code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
from commander.Commander import Commander
from commander.Terminal import Completer
| teran/commander | commander/__init__.py | Python | gpl-2.0 | 83 |
from .db import db
| plepe/pgmapcss | pgmapcss/db/overpass/__init__.py | Python | agpl-3.0 | 19 |
import unittest
import numpy as np
import yaml
from pythainlp.benchmarks import word_tokenization
with open("./tests/data/sentences.yml", "r", encoding="utf8") as stream:
TEST_DATA = yaml.safe_load(stream)
class TestBenchmarksPackage(unittest.TestCase):
def test_preprocessing(self):
self.assertIsNotNone(
word_tokenization.preprocessing(
txt="ทดสอบ การ ทำ ความสะอาด ข้อมูล<tag>ok</tag>"
)
)
def test_benchmark_not_none(self):
self.assertIsNotNone(
word_tokenization.benchmark(
["วัน", "จัน", "ทร์", "สี", "เหลือง"],
["วัน", "จันทร์", "สี", "เหลือง"],
)
)
def test_binary_representation(self):
sentence = "อากาศ|ร้อน|มาก|ครับ"
rept = word_tokenization._binary_representation(sentence)
self.assertEqual(
[1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0], rept.tolist()
)
def test_compute_stats(self):
for pair in TEST_DATA["sentences"]:
exp, act = pair["expected"], pair["actual"]
result = word_tokenization.compute_stats(
word_tokenization.preprocessing(exp),
word_tokenization.preprocessing(act),
)
self.assertIsNotNone(result)
def test_benchmark(self):
expected = []
actual = []
for pair in TEST_DATA["sentences"]:
expected.append(pair["expected"])
actual.append(pair["actual"])
df = word_tokenization.benchmark(expected, actual)
self.assertIsNotNone(df)
def test_count_correctly_tokenised_words(self):
for d in TEST_DATA["binary_sentences"]:
sample = np.array(list(d["actual"])).astype(int)
ref_sample = np.array(list(d["expected"])).astype(int)
sb = list(word_tokenization._find_word_boudaries(sample))
rb = list(word_tokenization._find_word_boudaries(ref_sample))
# in binary [{0, 1}, ...]
correctly_tokenized_words = word_tokenization._find_words_correctly_tokenised(
rb, sb
)
self.assertEqual(
np.sum(correctly_tokenized_words), d["expected_count"]
)
def test_words_correctly_tokenised(self):
r = [(0, 2), (2, 10), (10, 12)]
s = [(0, 10), (10, 12)]
expected = "01"
labels = word_tokenization._find_words_correctly_tokenised(r, s)
self.assertEqual(expected, "".join(np.array(labels).astype(str)))
def test_flatten_result(self):
result = dict(key1=dict(v1=6), key2=dict(v2=7))
actual = word_tokenization._flatten_result(result)
self.assertEqual(actual, {"key1:v1": 6, "key2:v2": 7})
| PyThaiNLP/pythainlp | tests/test_benchmarks.py | Python | apache-2.0 | 2,931 |
import fibo
fibo.fib(1000)
print fibo.fib2(100)
print fibo.__name__
fib = fibo.fib
fib(500) | janusnic/21v-python | unit_06/testfibo.py | Python | mit | 93 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any
from azure.core.credentials import TokenCredential
VERSION = "unknown"
class DataBoxEdgeManagementClientConfiguration(Configuration):
"""Configuration for DataBoxEdgeManagementClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: The subscription ID.
:type subscription_id: str
"""
def __init__(
self,
credential, # type: "TokenCredential"
subscription_id, # type: str
**kwargs # type: Any
):
# type: (...) -> None
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
super(DataBoxEdgeManagementClientConfiguration, self).__init__(**kwargs)
self.credential = credential
self.subscription_id = subscription_id
self.api_version = "2020-12-01"
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-databoxedge/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs # type: Any
):
# type: (...) -> None
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = policies.BearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs)
| Azure/azure-sdk-for-python | sdk/databoxedge/azure-mgmt-databoxedge/azure/mgmt/datab/_configuration.py | Python | mit | 3,261 |
"""
Miscellaneous utility code for VAR estimation
"""
import numpy as np
import scipy.stats as stats
import scipy.linalg as L
import scipy.linalg.decomp as decomp
import statsmodels.tsa.tsatools as tsa
from scipy.linalg import cholesky
#-------------------------------------------------------------------------------
# Auxiliary functions for estimation
def get_var_endog(y, lags, trend='c'):
"""
Make predictor matrix for VAR(p) process
Z := (Z_0, ..., Z_T).T (T x Kp)
Z_t = [1 y_t y_{t-1} ... y_{t - p + 1}] (Kp x 1)
Ref: Lutkepohl p.70 (transposed)
"""
nobs = len(y)
# Ravel C order, need to put in descending order
Z = np.array([y[t-lags : t][::-1].ravel() for t in xrange(lags, nobs)])
# Add constant, trend, etc.
if trend != 'nc':
Z = tsa.add_trend(Z, prepend=True, trend=trend)
return Z
def get_trendorder(trend='c'):
# Handle constant, etc.
if trend == 'c':
trendorder = 1
elif trend == 'nc':
trendorder = 0
elif trend == 'ct':
trendorder = 2
elif trend == 'ctt':
trendorder = 3
return trendorder
def make_lag_names(names, lag_order, trendorder=1):
"""
Produce list of lag-variable names. Constant / trends go at the beginning
Example
-------
>>> make_lag_names(['foo', 'bar'], 2, 1)
['const', 'L1.foo', 'L1.bar', 'L2.foo', 'L2.bar']
"""
lag_names = []
if isinstance(names, basestring): # python 3?
names = [names]
# take care of lagged endogenous names
for i in range(1, lag_order + 1):
for name in names:
if not isinstance(name, basestring):
name = str(name) # will need consistent unicode handling
lag_names.append('L'+str(i)+'.'+name)
# handle the constant name
if trendorder != 0:
lag_names.insert(0, 'const')
if trendorder > 1:
lag_names.insert(0, 'trend')
if trendorder > 2:
lag_names.insert(0, 'trend**2')
return lag_names
def comp_matrix(coefs):
"""
Return compansion matrix for the VAR(1) representation for a VAR(p) process
(companion form)
A = [A_1 A_2 ... A_p-1 A_p
I_K 0 0 0
0 I_K ... 0 0
0 ... I_K 0]
"""
p, k, k2 = coefs.shape
assert(k == k2)
kp = k * p
result = np.zeros((kp, kp))
result[:k] = np.concatenate(coefs, axis=1)
# Set I_K matrices
if p > 1:
result[np.arange(k, kp), np.arange(kp-k)] = 1
return result
#-------------------------------------------------------------------------------
# Miscellaneous stuff
def parse_lutkepohl_data(path): # pragma: no cover
"""
Parse data files from Lutkepohl (2005) book
Source for data files: www.jmulti.de
"""
from collections import deque
from datetime import datetime
import pandas
import pandas.core.datetools as dt
import re
from statsmodels.compatnp.py3k import asbytes
regex = re.compile(asbytes('<(.*) (\w)([\d]+)>.*'))
lines = deque(open(path, 'rb'))
to_skip = 0
while asbytes('*/') not in lines.popleft():
#while '*/' not in lines.popleft():
to_skip += 1
while True:
to_skip += 1
line = lines.popleft()
m = regex.match(line)
if m:
year, freq, start_point = m.groups()
break
data = np.genfromtxt(path, names=True, skip_header=to_skip+1)
n = len(data)
# generate the corresponding date range (using pandas for now)
start_point = int(start_point)
year = int(year)
offsets = {
asbytes('Q') : dt.BQuarterEnd(),
asbytes('M') : dt.BMonthEnd(),
asbytes('A') : dt.BYearEnd()
}
# create an instance
offset = offsets[freq]
inc = offset * (start_point - 1)
start_date = offset.rollforward(datetime(year, 1, 1)) + inc
offset = offsets[freq]
try:
from pandas import DatetimeIndex # pylint: disable=E0611
date_range = DatetimeIndex(start=start_date, freq=offset, periods=n)
except ImportError:
from pandas import DateRange
date_range = DateRange(start_date, offset=offset, periods=n)
return data, date_range
def get_logdet(m):
from statsmodels.tools.compatibility import np_slogdet
logdet = np_slogdet(m)
if logdet[0] == -1: # pragma: no cover
raise ValueError("Matrix is not positive definite")
elif logdet[0] == 0: # pragma: no cover
raise ValueError("Matrix is singular")
else:
logdet = logdet[1]
return logdet
def norm_signif_level(alpha=0.05):
return stats.norm.ppf(1 - alpha / 2)
def acf_to_acorr(acf):
diag = np.diag(acf[0])
# numpy broadcasting sufficient
return acf / np.sqrt(np.outer(diag, diag))
def varsim(coefs, intercept, sig_u, steps=100, initvalues=None, seed=None):
"""
Simulate simple VAR(p) process with known coefficients, intercept, white
noise covariance, etc.
"""
if seed is not None:
np.random.seed(seed=seed)
from numpy.random import multivariate_normal as rmvnorm
p, k, k = coefs.shape
ugen = rmvnorm(np.zeros(len(sig_u)), sig_u, steps)
result = np.zeros((steps, k))
result[p:] = intercept + ugen[p:]
# add in AR terms
for t in xrange(p, steps):
ygen = result[t]
for j in xrange(p):
ygen += np.dot(coefs[j], result[t-j-1])
return result
def get_index(lst, name):
try:
result = lst.index(name)
except Exception:
if not isinstance(name, int):
raise
result = name
return result
#method used repeatedly in Sims-Zha error bands
def eigval_decomp(sym_array):
"""
Returns
-------
W: array of eigenvectors
eigva: list of eigenvalues
k: largest eigenvector
"""
#check if symmetric, do not include shock period
eigva, W = decomp.eig(sym_array, left=True, right=False)
k = np.argmax(eigva)
return W, eigva, k
def vech(A):
"""
Simple vech operator
Returns
-------
vechvec: vector of all elements on and below diagonal
"""
length=A.shape[1]
vechvec=[]
for i in xrange(length):
b=i
while b < length:
vechvec.append(A[b,i])
b=b+1
vechvec=np.asarray(vechvec)
return vechvec
| yarikoptic/pystatsmodels | statsmodels/tsa/vector_ar/util.py | Python | bsd-3-clause | 6,347 |
#
# Copyright (c) 2012, Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3 only.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# GNU Lesser General Public License version 3 (see the file LICENSE).
# same format as sys.version_info: "A tuple containing the five components of
# the version number: major, minor, micro, releaselevel, and serial. All
# values except releaselevel are integers; the release level is 'alpha',
# 'beta', 'candidate', or 'final'. The version_info value corresponding to the
# Python version 2.0 is (2, 0, 0, 'final', 0)." Additionally we use a
# releaselevel of 'dev' for unreleased under-development code.
#
# If the releaselevel is 'alpha' then the major/minor/micro components are not
# established at this point, and setup.py will use a version of next-$(revno).
# If the releaselevel is 'final', then the tarball will be major.minor.micro.
# Otherwise it is major.minor.micro~$(revno).
__version__ = (0, 8, 0, 'final', 0)
__all__ = [
'Compiler',
'log',
'strlist',
'Scope',
'PybarsError'
]
from lib.pybars._compiler import (
Compiler,
strlist,
Scope,
PybarsError
)
log = lambda value: None
| Monofraps/glove | scripts/lib/pybars/__init__.py | Python | apache-2.0 | 1,681 |
#!/usr/bin/python2.5
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the 'License')
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""User Agent Parser Unit Tests."""
__author__ = 'slamm@google.com (Stephen Lamm)'
import unittest
from uaParser.lib import user_agent_parser
CHROME_UA_STRING = (
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/530.1 '
'(KHTML, like Gecko) Chrome/2.0.169.1 Safari/530.1')
TEST_STRINGS = (
# ((family, v1, v2, v3), user_agent_string)
#(('', '', '', '').
# '', {}),
(('RockMelt', '0', '8', '34'),
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/534.3 '
'(KHTML, like Gecko) RockMelt/0.8.34.841 Chrome/6.0.472.63 '
'Safari/534.3,gzip(gfe),gzip(gfe)', {}),
(('Firefox Beta', '4', '0', 'b4'),
'Mozilla/5.0 (X11; Linux i686 (x86_64); rv:2.0b4) Gecko/20100818 '
'Firefox/4.0b4', {}),
(('Firefox', '3', '6', '12'),
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.12) '
'Gecko/20101027 Ubuntu/10.04 (lucid) Firefox/3.6.12', {}),
(('Firefox (Shiretoko)', '3', '5', '1pre'),
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.1pre) '
'Gecko/20090717 Ubuntu/9.04 (jaunty) Shiretoko/3.5.1pre', {}),
(('Firefox Beta', '4', '0', 'b8pre'),
'Mozilla/5.0 (X11; Linux x86_64; rv:2.0b8pre) Gecko/20101031 '
'Firefox-4.0/4.0b8pre', {}),
(('Konqueror', '4', '3', '1'),
'Mozilla/5.0 (X11; U; Linux; de-DE) AppleWebKit/527 '
'(KHTML, like Gecko, Safari/419.3) konqueror/4.3.1,gzip(gfe)', {}),
(('Other', None, None, None),
'SomethingWeNeverKnewExisted', {}),
(('Chrome Frame (Sleipnir 2)', '2', '0', '169'),
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; '
'chromeframe; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR '
'3.5.30729; Sleipnir 2.8.5),gzip(gfe),gzip(gfe)',
{'js_user_agent_string': CHROME_UA_STRING}),
(('Chrome Frame (IE 8)', '2', '0', '169'),
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; '
'chromeframe; SLCC1; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR '
'3.0.30729),gzip(gfe),gzip(gfe)',
{'js_user_agent_string': CHROME_UA_STRING}),
# Chrome Frame installed but not enabled
(('IE', '8', '0', None),
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; GTB6; '
'chromeframe; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR '
'3.0.04506.648; .NET CLR 3.5.21022; .NET CLR 3.0.4506.2152; .NET CLR '
'3.5.30729),gzip(gfe),gzip(gfe)',
{'js_user_agent_string': 'Mozilla/4.0 (compatible; MSIE 8.0; '
'Windows NT 5.1; Trident/4.0; chromeframe; .NET CLR 2.0.50727; '
'.NET CLR 1.1.4322; .NET CLR 3.0.04506.648; .NET CLR 3.5.21022; '
'.NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)'}),
(('IE Platform Preview', '9', '0', '1'),
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; GTB6; '
'.NET CLR 2.0.50727; .NET CLR 1.1.4322),gzip(gfe),gzip(gfe)',
{'js_user_agent_string': 'Mozilla/4.0 (compatible; MSIE 8.0; '
'Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 1.1.4322)',
'js_user_agent_family': 'IE Platform Preview',
'js_user_agent_v1': '9',
'js_user_agent_v2': '0',
'js_user_agent_v3': '1'}),
(('Midori', '0', '2', None),
'Midori/0.2 (X11; Linux; U; en-us) WebKit/531.2 ,gzip(gfe),gzip(gfe)',
{}),
(('MozillaDeveloperPreview', '3', '7', 'a1'),
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.3a1) '
'Gecko/20100208 MozillaDeveloperPreview/3.7a1 '
'(.NET CLR 3.5.30729),gzip(gfe),gzip(gfe)', {}),
(('Opera', '10', '53', None),
'Opera/9.80 (Windows NT 5.1; U; ru) Presto/2.5.24 Version/10.53',
{}),
(('Opera Mobile', '10', '00', None),
'Opera/9.80 (S60; SymbOS; Opera Mobi/275; U; es-ES) '
'Presto/2.4.13 Version/10.00,gzip(gfe),gzip(gfe)', {}),
(('Palm webOS', '1', '2', None),
'Mozilla/5.0 (webOS/1.2; U; en-US) AppleWebKit/525.27.1 '
'(KHTML, like Gecko) Version/1.0 Safari/525.27.1 '
'Desktop/1.0,gzip(gfe),gzip(gfe)', {}),
(('iPad', '3', '2', None),
'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) '
'AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B367 '
'Safari/531.21.10,gzip(gfe),gzip(gfe)', {}),
(('Dolfin', '2', '0', None),
'Mozilla/5.0 (SAMSUNG; SAMSUNG-GT-S8500/S8500XXJEE; U; Bada/1.0; nl-nl) '
'AppleWebKit/533.1 (KHTML, like Gecko) Dolfin/2.0 Mobile WVGA '
'SMM-MMS/1.2.0 OPN-B', {}),
(('BOLT', '2', '101', None),
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; BOLT/2.101) '
'AppleWebKit/530 (KHTML, like Gecko) Version/4.0 '
'Safari/530.17,gzip(gfe),gzip(gfe)', {}),
(('Blackberry', '6', '0', '0'),
'Mozilla/5.0 (BlackBerry; U; BlackBerry 9800; en-GB) AppleWebKit/534.1+ '
'(KHTML, like Gecko) Version/6.0.0.141 Mobile '
'Safari/534.1+,gzip(gfe),gzip(gfe)', {}),
)
class ParseTest(unittest.TestCase):
def testStrings(self):
for (family, v1, v2, v3), user_agent_string, kwds in TEST_STRINGS:
self.assertEqual((family, v1, v2, v3),
user_agent_parser.Parse(user_agent_string, **kwds))
class GetFiltersTest(unittest.TestCase):
def testGetFiltersNoMatchesGiveEmptyDict(self):
user_agent_string = 'foo'
filters = user_agent_parser.GetFilters(
user_agent_string, js_user_agent_string=None)
self.assertEqual({}, filters)
def testGetFiltersJsUaPassedThrough(self):
user_agent_string = 'foo'
filters = user_agent_parser.GetFilters(
user_agent_string, js_user_agent_string='bar')
self.assertEqual({'js_user_agent_string': 'bar'}, filters)
def testGetFiltersJsUserAgentFamilyAndVersions(self):
user_agent_string = ('Mozilla/4.0 (compatible; MSIE 8.0; '
'Windows NT 5.1; Trident/4.0; GTB6; .NET CLR 2.0.50727; '
'.NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)')
filters = user_agent_parser.GetFilters(
user_agent_string, js_user_agent_string='bar',
js_user_agent_family='foo')
self.assertEqual({'js_user_agent_string': 'bar',
'js_user_agent_family': 'foo'}, filters)
if __name__ == '__main__':
unittest.main()
| dinomite/uaParser | uaParser/test/test_user_agent_parser.py | Python | apache-2.0 | 6,889 |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
''' filestatshandler.py '''
import json
import os
import stat
import tornado.web
from heron.shell.src.python import utils
class FileStatsHandler(tornado.web.RequestHandler):
"""
Get the file stats in JSON format given the path.
"""
@tornado.web.asynchronous
def get(self, path):
''' get method '''
path = tornado.escape.url_unescape(path)
if not path:
path = "."
# User should not be able to access anything outside
# of the dir that heron-shell is running in. This ensures
# sandboxing. So we don't allow absolute paths and parent
# accessing.
if not utils.check_path(path):
self.write("Only relative paths are allowed")
self.set_status(403)
self.finish()
return
listing = utils.get_listing(path)
file_stats = {}
for fn in listing:
try:
is_dir = False
formatted_stat = utils.format_prefix(fn, utils.get_stat(path, fn))
if stat.S_ISDIR(utils.get_stat(path, fn).st_mode):
is_dir = True
file_stats[fn] = {
"formatted_stat": formatted_stat,
"is_dir": is_dir,
"path": tornado.escape.url_escape(os.path.join(path, fn)),
}
if fn == "..":
path_fragments = path.split("/")
if not path_fragments:
file_stats[fn]["path"] = "."
else:
file_stats[fn]["path"] = tornado.escape.url_escape("/".join(path_fragments[:-1]))
except:
continue
self.write(json.dumps(file_stats))
self.finish()
| mycFelix/heron | heron/shell/src/python/handlers/filestatshandler.py | Python | apache-2.0 | 2,385 |
# this is a virtual module that is entirely implemented server side
DOCUMENTATION = '''
---
module: raw
short_description: Executes a low-down and dirty SSH command
options:
free_form:
description:
- the raw module takes a free form command to run
required: true
executable:
description:
- change the shell used to execute the command. Should be an absolute path to the executable.
required: false
version_added: "1.0"
description:
- Executes a low-down and dirty SSH command, not going through the module
subsystem. This is useful and should only be done in two cases. The
first case is installing C(python-simplejson) on older (Python 2.4 and
before) hosts that need it as a dependency to run modules, since nearly
all core modules require it. Another is speaking to any devices such as
routers that do not have any Python installed. In any other case, using
the M(shell) or M(command) module is much more appropriate. Arguments
given to M(raw) are run directly through the configured remote shell.
Standard output, error output and return code are returned when
available. There is no change handler support for this module.
- This module does not require python on the remote system, much like
the M(script) module.
notes:
- If you want to execute a command securely and predictably, it may be
better to use the M(command) module instead. Best practices when writing
playbooks will follow the trend of using M(command) unless M(shell) is
explicitly required. When running ad-hoc commands, use your best
judgement.
author:
- Ansible Core Team
- Michael DeHaan
'''
EXAMPLES = '''
# Bootstrap a legacy python 2.4 host
- raw: yum -y install python-simplejson
'''
| pahaz/ansible-modules-core | commands/raw.py | Python | gpl-3.0 | 1,817 |
#!/usr/bin/env python
# Convert to and from CoNLL-U format.
import os
import sys
import codecs
from conllu import conllu
def argparser():
import argparse
parser = argparse.ArgumentParser(description="Convert CoNLL-U data.")
parser.add_argument('-o', '--output', metavar='DIR', default=None,
help='Output directory.')
parser.add_argument('file', nargs='+', help='Source file(s).')
return parser
def output_document_text(document, output, options=None):
print >> output, document.text()
def output_document_annotations(document, output, options=None):
for annotation in document.to_brat_standoff():
print >> output, unicode(annotation)
def output_document(document, options=None):
"""Output given document according to given options."""
if options is None or options.output is None:
# If no output directory is specified, output both to stdout
output_document_text(document, sys.stdout, options)
output_document_annotations(document, sys.stdout, options)
else:
basefn = os.path.splitext(os.path.basename(document.filename))[0]
txtfn = os.path.join(options.output, basefn+'.txt')
annfn = os.path.join(options.output, basefn+'.ann')
with codecs.open(txtfn, 'wt', encoding='utf-8') as txtout:
output_document_text(document, txtout, options)
with codecs.open(annfn, 'wt', encoding='utf-8') as annout:
output_document_annotations(document, annout, options)
def convert(source, options=None):
# TODO: support conversions other than CoNLL-U to brat.
for document in conllu.read_documents(source):
output_document(document, options)
def main(argv):
args = argparser().parse_args(argv[1:])
for fn in args.file:
convert(fn, args)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| spyysalo/conllu.py | convert.py | Python | mit | 1,897 |
# Copyright (C) 2015-2022 by the RBniCS authors
#
# This file is part of RBniCS.
#
# SPDX-License-Identifier: LGPL-3.0-or-later
from rbnics.backends.abstract import FunctionsList as AbstractFunctionsList
from rbnics.backends.abstract import ProperOrthogonalDecomposition as AbstractProperOrthogonalDecomposition
from rbnics.backends.basic import ProperOrthogonalDecompositionBase as BasicProperOrthogonalDecomposition
from rbnics.backends.online.numpy.eigen_solver import EigenSolver
from rbnics.backends.online.numpy.functions_list import FunctionsList
from rbnics.backends.online.numpy.matrix import Matrix
from rbnics.backends.online.numpy.snapshots_matrix import SnapshotsMatrix
from rbnics.backends.online.numpy.transpose import transpose
from rbnics.backends.online.numpy.wrapping import get_mpi_comm
from rbnics.utils.decorators import BackendFor, ModuleWrapper
backend = ModuleWrapper(transpose)
wrapping = ModuleWrapper(get_mpi_comm)
online_backend = ModuleWrapper(OnlineEigenSolver=EigenSolver)
online_wrapping = ModuleWrapper()
ProperOrthogonalDecomposition_Base = BasicProperOrthogonalDecomposition(
backend, wrapping, online_backend, online_wrapping, AbstractProperOrthogonalDecomposition, SnapshotsMatrix,
FunctionsList)
@BackendFor("numpy", inputs=(AbstractFunctionsList, Matrix.Type(), (str, None)))
class ProperOrthogonalDecomposition(ProperOrthogonalDecomposition_Base):
def __init__(self, basis_functions, inner_product, component=None):
ProperOrthogonalDecomposition_Base.__init__(self, basis_functions, inner_product, component)
def store_snapshot(self, snapshot, component=None, weight=None):
self.snapshots_matrix.enrich(snapshot, component, weight)
| mathLab/RBniCS | rbnics/backends/online/numpy/proper_orthogonal_decomposition.py | Python | lgpl-3.0 | 1,710 |
import json
import unittest
from unittest.mock import patch, Mock, MagicMock
from botocore.exceptions import ClientError
from datetime import datetime
from batch_transform.src import batch_transform
from common import _utils
from . import test_utils
# TODO : Errors out if model_name doesn't contain '-'
# fix model_name '-' bug
required_args = [
'--region', 'us-west-2',
'--model_name', 'model-test',
'--input_location', 's3://fake-bucket/data',
'--output_location', 's3://fake-bucket/output',
'--instance_type', 'ml.c5.18xlarge',
'--output_location_file', 'tmp/'
]
class BatchTransformTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
parser = batch_transform.create_parser()
cls.parser = parser
def test_sample(self):
args = self.parser.parse_args(required_args)
response = _utils.create_transform_job_request(vars(args))
self.assertEqual(response['TransformOutput']['S3OutputPath'], 's3://fake-bucket/output')
def test_empty_string(self):
args = self.parser.parse_args(required_args)
response = _utils.create_transform_job_request(vars(args))
test_utils.check_empty_string_values(response) | kubeflow/kfp-tekton-backend | components/aws/sagemaker/tests/unit_tests/tests/test_batch_transform.py | Python | apache-2.0 | 1,166 |
###PYTHON#############
#Purpose : Build a DB of mutations found in the MS-tryspin/chemotrypsin digests
#Author : Aditya Ambati
#date : May 26 2017
#update : version 1
#####import libraries
from itertools import chain
import pandas as pd
import numpy as np
%matplotlib
import re
from Bio import Entrez
from Bio import SeqIO
######################make a list of files
arp_tryp = ['AFLPA319BB_ARP_TRYPSIN.csv','AFLPA328AA_ARP_TRYPSIN.csv','AFLPA359AA_ARP_TRYPSIN.csv','AFLPA373BA_ARP_TRYPSIN.csv','SF1B045CL_ARP_TRYPSIN.csv']
arp_chymo = ['AFLPA319BB_ARP_CHYMO.csv','AFLPA328AA_ARP_CHYMO.csv','AFLPA359AA_ARP_CHYMO.csv','AFLPA373BA_ARP_CHYMO.csv','SF1B0454CL_ARP_CHYMO.csv']
pan = ['AFLSA096AA_PAN_TRYPSIN.csv','AFLSA097AA_PAN_TRYPSIN.csv','AFLSA167AB_PAN_TRYPSIN.csv','AFLSA208A_PAN_TRYPSIN.csv', 'AFLSFDA280_PAN_TRYPSIN.csv', 'AFLSA174AA_PAN_TRYPSIN.csv']
pan_chymo = ['AFLSA208A_PAN_CHYMO.csv', 'AFLSFDA280_PAN_CHYMO.csv', 'AFLSA174AA_PAN_CHYMO.csv']
######### make a list of references influenza proteins used in the MS analysis
###############HA NP NA M1 PB2
references =['ADE29095.1', 'ADE29096.1', 'ADE29097.1', 'ADE29098.1', 'ADE29092.1'] ## to add in more id of references
### define subroutines
def reference_retreive(references): ## this function will take alist and output a list with Dataframes for each individual peptide in a protein
reference_proteins =[]
for i in references:
Entrez.email = "ambati@stanford.edu"
handle = Entrez.efetch(db="protein", rettype="fasta", retmode="text", id=i)
seq_record = SeqIO.read(handle, "fasta")
tempref=pd.DataFrame(np.arange(1, len(seq_record.seq.tostring())+1))
tempref.loc[:,('refseq')]=list(seq_record.seq.tostring())
tempref.columns = ['position', 'refseq']
tempref.loc[:, ('strain_protein')] = seq_record.description
reference_proteins.append(tempref)
return(reference_proteins)
################ this function will take a list with filenames and return a MS output spectra file with trimmed columns and calculated peptide length
def peponly(pepseq):
return(re.sub(pattern='[^A-Z]*',repl='', string=pepseq))
def spectra_processing(spectra):
dfs =[]
for i in range(0, len(spectra)):
temp = pd.read_csv(spectra[i], header = 0, sep = ";")
temp1 = temp[['Protein\nRank','Peptide\n< ProteinMetrics Confidential >','Modification Type(s)', 'Starting\nposition', 'Protein Name', '|Log Prob|']]
## rename the columns
temp1.columns = ['protein_rank', 'peptide', 'modtype', 'start_pos', 'protein_name', 'log_prob']
temp2=temp1.loc[~temp1['protein_name'].str.contains('Reverse')]
temp2.reset_index(drop=True, inplace=True)
temp2.loc[:, ('filename')] = spectra[i]
temp2.loc[:, ('pep_wt_tags')]=[re.findall('\..*\.', i) for i in temp2.peptide] ## take only characters between the first and the last dot character
temp2.loc[:, ('tag_rem_pep')] = [peponly(i) for i in temp2.pep_wt_tags] ### assign to a new column the raw peptide without tags
temp2.loc[:, ('tag_rm_len')] = [len(i) for i in temp2.tag_rem_pep] ## get the raw peptide length
temp2.loc[:, ('templen')] = temp2.tag_rm_len + temp2.start_pos ### add the startingpos + untagged peptide length
#temp2.loc[:, 'org_length'] = [len(i) for i in temp2.peptide]
dfs.append(temp2)
return(dfs)
################################################### this function will update the reference files
####################
def mutcheck(spectraDF, reference, stringquery):
tempdf = spectraDF[spectraDF['protein_name'].str.contains(stringquery)]
tempdf.reset_index(drop=False, inplace =True)
reference.loc[:, 'mutated_AA'] = 'Nan'
reference.loc[:, 'total_occurences'] =0
reference.loc[:, 'mutated_occurence']=0
reference.loc[:, 'occu_log_prob'] =0
reference.loc[:, 'mut_log_prob']=0
reference.loc[:, 'fileID'] = tempdf.filename.unique()
mutcheck =[]
for i in xrange(0, len(tempdf.start_pos)):
HAindex = np.arange(tempdf.start_pos[i]-1, tempdf.templen[i]-1)
reference.loc[HAindex, 'total_occurences'] = reference.loc[HAindex, 'total_occurences'] +1
reference.loc[HAindex, 'occu_log_prob'] = reference.loc[HAindex, 'occu_log_prob'] + tempdf.log_prob[i]
q=reference.ix[HAindex, ('refseq')]
MSpep = pd.Series(list(tempdf.tag_rem_pep[i]), index=q.index)
mutindex=q.index[q != list(tempdf.tag_rem_pep[i])]
if len(mutindex) > 1:
mutcheck.append(i)
reference.loc[mutindex, ('mutated_occurence')] = reference.loc[mutindex, ('mutated_occurence')] +1
reference.loc[mutindex, ('mut_log_prob')] = reference.loc[mutindex, ('mut_log_prob')] + tempdf.log_prob[i]
reference.loc[mutindex, ('mutated_AA')] = MSpep[MSpep != q]
return([tempdf.ix[mutcheck], reference])
##### process the spectra files
pan_tryp_dfs = spectra_processing(pan)
pan_chymo_dfs= spectra_processing(pan_chymo)
arp_tryp_dfs = spectra_processing(arp_tryp)
arp_chymo_dfs = spectra_processing(arp_chymo)
######retreive these protein sequences
ref_proteins = reference_retreive(references) ### feed a list of protein IDs to retreive them from database amd get them in a database format
###################################################################
HA_ref = ref_proteins[0][:]
NP_ref = ref_proteins[1][:]
NA_ref = ref_proteins[2][:]
MA_ref = ref_proteins[3][:]
PB2_ref = ref_proteins[4][:]
PB2check=mutcheck(pand_all, reference = PB2_ref[:], stringquery ='>PB2-|ACQ63273.1')
HAcheck=mutcheck(pand_all, reference = HA_ref[:], stringquery = '>HA-|ACQ55359.1|ADE29085.1')
NAcheck=mutcheck(pand_all, reference = NA_ref[:], stringquery = '>NA-|ADE29087.1')
MAcheck=mutcheck(pand_all, reference = MA_ref[:], stringquery = '>MA1-|ACP44184.1')
NPcheck=mutcheck(pand_all, reference = NP_ref[:], stringquery = '>NP-|ACP44183.1')
pan_tryp_DB =[]
pan_checkids=[]
for df in pan_tryp_dfs:
PB2check=mutcheck(df, reference = PB2_ref[:], stringquery ='>PB2-|ACQ63273.1')
HAcheck=mutcheck(df, reference = HA_ref[:], stringquery = '>HA-|ACQ55359.1|ADE29085.1')
NAcheck=mutcheck(df, reference = NA_ref[:], stringquery = '>NA-|ADE29087.1')
MAcheck=mutcheck(df, reference = MA_ref[:], stringquery = '>MA1-|ACP44184.1')
NPcheck=mutcheck(df, reference = NP_ref[:], stringquery = '>NP-|ACP44183.1')
alldb=pd.concat([PB2check[1], HAcheck[1], NAcheck[1], MAcheck[1], NPcheck[1]], ignore_index=True)
allcheck = pd.concat([PB2check[0], HAcheck[0], NAcheck[0], MAcheck[0], NPcheck[0]], ignore_index=True)
pan_tryp_DB.append(alldb)
pan_checkids.append(allcheck)
arp_tryp_DB=[]
arp_checkids=[]
for df in arp_tryp_dfs:
PB2check=mutcheck(df, reference = PB2_ref[:], stringquery ='>PB2-|ACQ63273.1')
HAcheck=mutcheck(df, reference = HA_ref[:], stringquery = '>HA-|ACQ55359.1|ADE29085.1')
NAcheck=mutcheck(df, reference = NA_ref[:], stringquery = '>NA-|ADE29087.1')
MAcheck=mutcheck(df, reference = MA_ref[:], stringquery = '>MA1-|ACP44184.1')
NPcheck=mutcheck(df, reference = NP_ref[:], stringquery = '>NP-|ACP44183.1')
alldb=pd.concat([PB2check[1], HAcheck[1], NAcheck[1], MAcheck[1], NPcheck[1]], ignore_index=True)
allcheck = pd.concat([PB2check[0], HAcheck[0], NAcheck[0], MAcheck[0], NPcheck[0]], ignore_index=True)
arp_tryp_DB.append(alldb)
arp_checkids.append(allcheck)
arp_db=pd.concat(arp_tryp_DB, ignore_index=True)
pan_db=pd.concat(pan_tryp_DB, ignore_index=True)
arp_db.to_csv('/home/labcomp/Desktop/Vaccine_MS_May16/Arepanrix_trypsin_june16.csv')
pan_db.to_csv('/home/labcomp/Desktop/Vaccine_MS_May16/Pandemrix_trypsin_june16.csv')
arp_check=pd.concat(arp_checkids, ignore_index=True)
pan_check=pd.concat(pan_checkids, ignore_index=True)
arp_check
pan_check
pd.concat([arp_check, pan_check], ignore_index=True)
all_tryp_check=pd.concat([arp_check, pan_check], ignore_index=True)
all_tryp_check.to_csv('/home/labcomp/Desktop/Vaccine_MS_May16/check_back_excel raw_june16.csv')
############################################## post processing the DB files ########################################
import pandas as pd
import numpy as np
arp = pd.read_csv('Arepanrix_trypsin_june16.csv')
pan = pd.read_csv('Pandemrix_trypsin_june16.csv')
prot=pan.strain_protein.unique()
refprot=[i.split('[')[0] for i in prot]
refid=[i.split(' ')[0] for i in refprot]
refprotname= ['PB2', 'HA', 'NA', 'MA1', 'NP']
### try to simplify the protein ids
['ADE29092.1 polymerase PB2 ',
'ADE29095.1 hemagglutinin ',
'ADE29097.1 neuraminidase ',
'ADE29098.1 matrix protein 1 ',
'ADE29096.1 nucleocapsid protein ']
#### change the long protein name to short ones for e.g NucleoProtein > NP etc.
arp.loc[:, 'prot'] = 'NaN'
pan.loc[:, 'prot'] = 'NaN'
for i, j in enumerate(refid):
arp.loc[arp.strain_protein.str.contains(j), ('prot')] = refprotname[i]
pan.loc[pan.strain_protein.str.contains(j), ('prot')] = refprotname[i]
## drop uneceassary cols
arp2=arp.drop(['Unnamed: 0', 'strain_protein'], axis=1)
pan2=pan.drop(['Unnamed: 0', 'strain_protein'], axis=1)
#### add keys column to these files by combining the proteinname and the position of that peptide in the ref protein
pan2.loc[:,('keys')]=[str(pan2.prot[i]) + '_' + str(pan2.position[i]) for i in xrange(0, len(pan2))]
arp2.loc[:,('keys')]=[str(arp2.prot[i]) + '_' + str(arp2.position[i]) for i in xrange(0, len(arp2))]
######## groupby pandas if a rows have the same common 'prot','position', 'refseq', 'mutated_AA' then mean the total_occurences', 'mutated_occurence', 'mut_log_prob'
pan2_grp2=pan2[['prot','position', 'refseq','mutated_AA','total_occurences', 'mutated_occurence', 'mut_log_prob']].groupby(['prot','position', 'refseq', 'mutated_AA']).mean()
pan2_reduced=pan2_grp2.reset_index()
#### further if certain rows satisfy conditions total_occurences > 0, mutated_occurence > = 3 and a mut_log prob of > 2, take these as valid rows
pan2_final=pan2_reduced.loc[(pan2_reduced['total_occurences'] != 0) & (pan2_reduced['mutated_occurence'] >= 3) & (pan2_reduced['mut_log_prob'] > 2)]
pan2_sort=pan2_final.sort_values(['mutated_occurence'], ascending=False)
#pan2_sort.loc[:, ('percentage')]=(pan2_sort.mutated_occurence/pan2_sort.total_occurences)*100
## do the same with arpanrix
arp2_grp2=arp2[['prot','position', 'refseq','mutated_AA','total_occurences', 'mutated_occurence', 'mut_log_prob']].groupby(['prot','position', 'refseq', 'mutated_AA']).mean()
arp2_reduced=arp2_grp2.reset_index()
arp2_final=arp2_reduced.loc[(arp2_reduced['total_occurences'] != 0) & (arp2_reduced['mutated_occurence'] >= 3) & (arp2_reduced['mut_log_prob'] > 2)]
arp2_sort=arp2_final.sort_values(['mutated_occurence'], ascending=False)
#arp2_sort.loc[:, ('percentage')]=(arp2_sort.mutated_occurence/arp2_sort.total_occurences)*100
### reset index on these files but keep the column structure
pan2_sort.reset_index(drop=True, inplace=True)
arp2_sort.reset_index(drop=True, inplace=True)
#### get all key mutations from each of the vaccine for e.g HA 146 becomes HA_146
pan2_keys = [str(pan2_sort.prot[i]) + '_' + str(pan2_sort.position[i]) for i in xrange(0, len(pan2_sort))]
arp2_keys = [str(arp2_sort.prot[i]) + '_' + str(arp2_sort.position[i]) for i in xrange(0, len(arp2_sort))]
common_keys= set(pan2_keys + arp2_keys) #### from the selected rows in above sort files pan2_sort and arp2_sort, make a big list containing common unique keys
### make these keys from the orginal database file *_reduced.
pan2_reduced.loc[:, ('keys')] = [str(pan2_reduced.prot[i]) + '_' + str(pan2_reduced.position[i]) for i in xrange(0, len(pan2_reduced))]
arp2_reduced.loc[:, ('keys')] = [str(arp2_reduced.prot[i]) + '_' + str(arp2_reduced.position[i]) for i in xrange(0, len(arp2_reduced))]
###get all the positions from the main DB *_reduced present in the common keys
panf=pan2_reduced[pan2_reduced['keys'].isin(common_keys)]
arpf=arp2_reduced[arp2_reduced['keys'].isin(common_keys)]
#### here we find that certain mutations maybe different across same vaccine lots (for e.g within pandemrix at same position , batch1 might have a mutation at HA 146 as D whereas batch might have mutatation ar HA 146 as F)
### further, if this is the case this will showup in above at groupby as two rows because of the difference in mutated_AA. we are merely gathering all these empty keys and removing them
arpf2=arpf.index[(arpf.duplicated(['keys'], keep=False)) & (arpf.mutated_AA.str.contains('Nan'))]
panf2=panf.index[(panf.duplicated(['keys'], keep=False)) & (panf.mutated_AA.str.contains('Nan'))]
### drop these indexes from the mutated files arp_mut/pan_mut
arp_mut=arpf.drop(arpf2, axis=0)
pan_mut=panf.drop(panf2, axis=0)
#### merge the tow vaccines on keys and get suffixes for each of them.
all_mut=pd.merge(arp_mut, pan_mut, on='keys', how='outer', suffixes=['_ARP', '_PAN'])
#all_mut.to_csv('/Users/adityaambati/Desktop/All_mutations.csv')
#all_mut.loc[(all_mut.mutated_occurence_ARP >= 3) | (all_mut.mutated_occurence_PAN >=3)]
#all_mut[(all_mut.mutated_occurence_ARP >= 3) & (all_mut.mutated_occurence_PAN >=3)]
#all_mut.loc[(all_mut.mut_log_prob_ARP >= 2) & (all_mut.mut_log_prob_PAN >=2)]
##make a copy of the file
all_mut2 = all_mut
#### calculate the ratios mut/total *100 - gives %
all_mut2.loc[:, ('mutated_occurence_ARP')] = np.round(all_mut2.mutated_occurence_ARP)
all_mut2.loc[:, ('mutated_occurence_PAN')] = np.round(all_mut2.mutated_occurence_PAN)
all_mut2.loc[:, ('total_occurences_ARP')] = np.round(all_mut2.total_occurences_ARP)
all_mut2.loc[:, ('total_occurences_PAN')] = np.round(all_mut2.total_occurences_PAN)
all_mut2.loc[:, ('percent_ARP')]=np.round((all_mut2.mutated_occurence_ARP/all_mut2.total_occurences_ARP)*100, decimals=2)
all_mut2.loc[:, ('percent_PAN')]=np.round((all_mut2.mutated_occurence_PAN/all_mut2.total_occurences_PAN)*100, decimals=2)
### clean up cols
all_mut3=all_mut2.drop(['prot_PAN', 'position_PAN','mut_log_prob_ARP', 'mut_log_prob_PAN'], axis=1)
all_mut3.reset_index(drop=True, inplace=True)
all_mut3.loc[:, ('mut_pos_PAN')] = [all_mut3.refseq_PAN[i] + ' > ' + all_mut3.mutated_AA_PAN[i] for i in xrange(0, len(all_mut3))]
all_mut3.loc[:, ('mut_pos_ARP')] = [all_mut3.refseq_ARP[i] + ' > ' + all_mut3.mutated_AA_ARP[i] for i in xrange(0, len(all_mut3))]
########## get 10 AA before and after the mutated motif from the reference file
### reach into the reference seq to get 20 aa around the mutations from above line 109
HA_seq=''.join(ref_proteins[0].refseq.tolist())
NP_seq = ''.join(ref_proteins[1].refseq.tolist())
NA_seq = ''.join(ref_proteins[2].refseq.tolist())
MA_seq = ''.join(ref_proteins[3].refseq.tolist())
PB2_seq = ''.join(ref_proteins[4].refseq.tolist())
### make a dirty DF of these proteins
all_seq=pd.DataFrame([HA_seq, NP_seq, NA_seq, MA_seq, PB2_seq], columns=['seq'])
all_seq.loc[:, ('prots')] = ['HA', 'NP', 'NA', 'MA1', 'PB2'] ### assign the protein names to them
### write a loop to reach into the all_seq to get the sequences
all_mut3.loc[:, ('20AA_mut_ARP')] = 'NaN' ## intialize the mut and ref_columns
all_mut3.loc[:, ('20AA_mut_PAN')] = 'NaN'
all_mut3.loc[:, ('20AA_X179A')] = 'NaN'
for i in xrange(0, len(all_mut3)):
pos = all_mut3.position_ARP[i]-1
refprot = ''.join(all_seq.loc[all_seq.prots.str.contains(all_mut3.prot_ARP[i]), ('seq')].tolist())
#print refprot[pos-10:pos] + '['+all_mut3.mut_pos_ARP[i]+']' + refprot[pos+1:pos+10], refprot[pos-10:pos] + '['+all_mut3.mut_pos_PAN[i]+']' + refprot[pos+1:pos+10], refprot[pos-10:pos+10]
all_mut3.loc[i, ('20AA_mut_ARP')] = refprot[pos-10:pos] + '['+all_mut3.mut_pos_ARP[i]+']' + refprot[pos+1:pos+10]
all_mut3.loc[i, ('20AA_mut_PAN')] = refprot[pos-10:pos] + '['+all_mut3.mut_pos_PAN[i]+']' + refprot[pos+1:pos+10]
all_mut3.loc[i, ('20AA_X179A')] = refprot[pos-10:pos+10]
#### write the file to disk
all_mut3.to_csv('TRYPSIN_mutated_counts.csv', index=False)
#############Debugging !!!!!!! ####### Debugging
### debugging and verifying mutations in the main DF raw files
pan_raw=pd.concat(pan_tryp_dfs, ignore_index=True)
arp_raw=pd.concat(arp_tryp_dfs, ignore_index=True)
#############Debugging !!!!!!! ####### Debugging
arp_raw.loc[(arp_raw.protein_name.str.contains('>HA-|ACQ55359.1|ADE29085.1|hema')) & (arp_raw.start_pos == 137)]
pan_raw.loc[(pan_raw.protein_name.str.contains('>HA-|ACQ55359.1|ADE29085.1|hema')) & (pan_raw.start_pos == 137)]
################### go to orginal DB files and retreive raw counts
pan2_mut_raw_counts=pan2.loc[pan2['keys'].isin(common_keys)]
arp2_mut_raw_counts=arp2.loc[arp2['keys'].isin(common_keys)]
#############Debugging !!!!!!! ####### Debugging
arp2_mut_raw_counts=arp2_mut_raw_counts.sort_values(['keys'])
pan2_mut_raw_counts=pan2_mut_raw_counts.sort_values(['keys'])
###### old function debugging !!
def db_reference(df, reference, i): #### this is an old function, use the one below called mutcheck!
i = int(i)
index_df = np.arange(df.start_pos[i]-1, df.templen[i]-1) ## get index from parent for loop
reference.loc[index_df, 'total_occurences'] = reference.loc[index_df, 'total_occurences']+1 # if index from spectra is present in reference index add 1 each time to all index positions
ref_index = reference.ix[index_df, ('refseq')] ## get the reference index peptides
df_pep = pd.Series(list(df.tag_rem_pep[i]), index=ref_index.index) ### get the spectra index peptides
ref_mutindex = ref_index.index[ref_index != list(df.tag_rem_pep[i])] ## if df spectra index peptides are not equal to the reference index peptides, get that ref_mut index
reference.loc[ref_mutindex, ('mutated_occurence')] = reference.loc[ref_mutindex, ('mutated_occurence')] +1 ## if there appears to be a mutation add 1 each time it finds one
reference.loc[ref_mutindex, ('mutated_AA')] = df_pep[df_pep != ref_index] ### finally retreive the mutated pos from spectra index peptide and add it into mutated pos in the reference file
| adiamb/BYONIC-readout-analysis | Byonic_analysis.py | Python | gpl-3.0 | 17,581 |
import datetime
from sqlalchemy import create_engine, Column, Date, DateTime, Float, ForeignKey, Integer, String
from sqlalchemy.dialects.mysql import TIMESTAMP
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relation, relationship
Base = declarative_base()
class CityModel(Base):
__tablename__ = 'city'
id = Column(Integer, primary_key=True)
name = Column(String(50), nullable=False)
state_id = Column(Integer, nullable=False)
status = Column(String(10), nullable=False)
latitude = Column(Float, nullable=False)
longitude = Column(Float, nullable=False)
date_added = Column(TIMESTAMP, nullable=False)
date_time_added = Column(TIMESTAMP, nullable=False)
last_updated = Column(TIMESTAMP, nullable=False)
def to_json(self):
return model_to_json(self)
class StateModel(Base):
__tablename__ = 'state'
id = Column(Integer, primary_key=True)
name = Column(String(50), nullable=False)
abbreviation = Column(String(3), nullable=False)
date_added = Column(TIMESTAMP, nullable=False)
date_time_added = Column(TIMESTAMP, nullable=False)
last_updated = Column(TIMESTAMP, nullable=False)
def to_json(self):
return model_to_json(self)
class UserModel(Base):
__tablename__ = 'user'
id = Column(Integer, primary_key=True)
first_name = Column(String(50), nullable=False)
last_name = Column(String(50), nullable=False)
date_added = Column(TIMESTAMP, nullable=False)
date_time_added = Column(TIMESTAMP, nullable=False)
last_updated = Column(TIMESTAMP, nullable=False)
def to_json(self):
return model_to_json(self)
class VisitModel(Base):
__tablename__ = 'visit'
id = Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey("user.id"), nullable=False)
state_id = Column(Integer, ForeignKey("state.id"), nullable=False)
city_id = Column(Integer, ForeignKey("city.id"), nullable=False)
date_added = Column(TIMESTAMP, nullable=False)
date_time_added = Column(TIMESTAMP, nullable=False)
last_updated = Column(TIMESTAMP, nullable=False)
user = relationship("UserModel", foreign_keys='VisitModel.user_id')
state = relationship("StateModel", foreign_keys='VisitModel.state_id')
city = relationship("CityModel", foreign_keys='VisitModel.city_id')
def to_json(self):
return model_to_json(self)
def model_to_json(object):
# return dir(self)
# return {p: self.__dict__[p] for p in dir(self)
# if not (p.startswith('_')
# | p.startswith('__')
# | p.startswith('metadata') ) and not callable(getattr(self, p))}
# return {key: getattr(self, key) for key in self.__mapper__.c.keys() }
data = {}
for key in object.__mapper__.c.keys():
value = getattr(object, key)
# if isinstance(value, datetime.datetime):
# value = str(value)
# elif isinstance(value, decimal.Decimal):
# value = str(value)
# else:
# value = str(value)
data[key] = str(value)
return data
| boxofgoobers/rv-rest-api | models.py | Python | mit | 3,071 |
"""
Signal Handlers for External User Ids to be created and maintainer
"""
from logging import getLogger
from django.db.models.signals import post_save
from django.dispatch import receiver
from openedx.core.djangoapps.catalog.utils import get_programs
from .models import ExternalId, ExternalIdType
LOGGER = getLogger(__name__)
def _user_needs_external_id(instance, created):
return (
created and
instance.user and
not ExternalId.user_has_external_id(
user=instance.user,
type_name=ExternalIdType.MICROBACHELORS_COACHING)
)
@receiver(post_save, sender='student.CourseEnrollment')
def create_external_id_for_microbachelors_program(
sender, instance, created, **kwargs # pylint: disable=unused-argument
):
"""
Watches for post_save signal for creates on the CourseEnrollment table.
Generate an External ID if the Enrollment is in a MicroBachelors Program
"""
if _user_needs_external_id(instance, created):
mb_programs = [
program for program in get_programs(course=instance.course_id)
if program.get('type_attrs', {}).get('coaching_supported')
]
if mb_programs:
ExternalId.add_new_user_id(
user=instance.user,
type_name=ExternalIdType.MICROBACHELORS_COACHING
)
@receiver(post_save, sender='entitlements.CourseEntitlement')
def create_external_id_for_microbachelors_program_entitlement(
sender, instance, created, **kwargs # pylint: disable=unused-argument
):
"""
Watches for post_save signal for creates on the CourseEntitlement table.
Generate an External ID if the Entitlement is in a MicroBachelors Program
"""
if _user_needs_external_id(instance, created):
mb_programs = [
program for program in get_programs(catalog_course_uuid=instance.course_uuid)
if program.get('type_attrs', {}).get('coaching_supported')
]
if mb_programs:
ExternalId.add_new_user_id(
user=instance.user,
type_name=ExternalIdType.MICROBACHELORS_COACHING
)
| edx/edx-platform | openedx/core/djangoapps/external_user_ids/signals.py | Python | agpl-3.0 | 2,154 |
#!/usr/bin/env python3
# Copyright (c) 2016-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test compact blocks (BIP 152).
Version 1 compact blocks are pre-segwit (txids)
Version 2 compact blocks are post-segwit (wtxids)
"""
from decimal import Decimal
import random
from test_framework.blocktools import create_block, create_coinbase, add_witness_commitment
from test_framework.messages import BlockTransactions, BlockTransactionsRequest, calculate_shortid, CBlock, CBlockHeader, CInv, COutPoint, CTransaction, CTxIn, CTxInWitness, CTxOut, FromHex, HeaderAndShortIDs, msg_block, msg_blocktxn, msg_cmpctblock, msg_getblocktxn, msg_getdata, msg_getheaders, msg_headers, msg_inv, msg_sendcmpct, msg_sendheaders, msg_tx, msg_witness_block, msg_witness_blocktxn, MSG_WITNESS_FLAG, NODE_NETWORK, NODE_WITNESS, P2PHeaderAndShortIDs, PrefilledTransaction, ser_uint256, ToHex
from test_framework.mininode import mininode_lock, P2PInterface
from test_framework.script import CScript, OP_TRUE, OP_DROP
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, get_bip9_status, satoshi_round, sync_blocks, wait_until
# TestP2PConn: A peer we use to send messages to bitcoind, and store responses.
class TestP2PConn(P2PInterface):
def __init__(self):
super().__init__()
self.last_sendcmpct = []
self.block_announced = False
# Store the hashes of blocks we've seen announced.
# This is for synchronizing the p2p message traffic,
# so we can eg wait until a particular block is announced.
self.announced_blockhashes = set()
def on_sendcmpct(self, message):
self.last_sendcmpct.append(message)
def on_cmpctblock(self, message):
self.block_announced = True
self.last_message["cmpctblock"].header_and_shortids.header.calc_sha256()
self.announced_blockhashes.add(self.last_message["cmpctblock"].header_and_shortids.header.sha256)
def on_headers(self, message):
self.block_announced = True
for x in self.last_message["headers"].headers:
x.calc_sha256()
self.announced_blockhashes.add(x.sha256)
def on_inv(self, message):
for x in self.last_message["inv"].inv:
if x.type == 2:
self.block_announced = True
self.announced_blockhashes.add(x.hash)
# Requires caller to hold mininode_lock
def received_block_announcement(self):
return self.block_announced
def clear_block_announcement(self):
with mininode_lock:
self.block_announced = False
self.last_message.pop("inv", None)
self.last_message.pop("headers", None)
self.last_message.pop("cmpctblock", None)
def get_headers(self, locator, hashstop):
msg = msg_getheaders()
msg.locator.vHave = locator
msg.hashstop = hashstop
self.send_message(msg)
def send_header_for_blocks(self, new_blocks):
headers_message = msg_headers()
headers_message.headers = [CBlockHeader(b) for b in new_blocks]
self.send_message(headers_message)
def request_headers_and_sync(self, locator, hashstop=0):
self.clear_block_announcement()
self.get_headers(locator, hashstop)
wait_until(self.received_block_announcement, timeout=30, lock=mininode_lock)
self.clear_block_announcement()
# Block until a block announcement for a particular block hash is
# received.
def wait_for_block_announcement(self, block_hash, timeout=30):
def received_hash():
return (block_hash in self.announced_blockhashes)
wait_until(received_hash, timeout=timeout, lock=mininode_lock)
def send_await_disconnect(self, message, timeout=30):
"""Sends a message to the node and wait for disconnect.
This is used when we want to send a message into the node that we expect
will get us disconnected, eg an invalid block."""
self.send_message(message)
wait_until(lambda: not self.is_connected, timeout=timeout, lock=mininode_lock)
class CompactBlocksTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
# Node0 = pre-segwit, node1 = segwit-aware
self.num_nodes = 2
# This test was written assuming SegWit is activated using BIP9 at height 432 (3x confirmation window).
# TODO: Rewrite this test to support SegWit being always active.
self.extra_args = [["-vbparams=segwit:0:0"], ["-vbparams=segwit:0:999999999999", "-txindex"]]
self.utxos = []
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def build_block_on_tip(self, node, segwit=False):
height = node.getblockcount()
tip = node.getbestblockhash()
mtp = node.getblockheader(tip)['mediantime']
block = create_block(int(tip, 16), create_coinbase(height + 1), mtp + 1)
block.nVersion = 4
if segwit:
add_witness_commitment(block)
block.solve()
return block
# Create 10 more anyone-can-spend utxo's for testing.
def make_utxos(self):
# Doesn't matter which node we use, just use node0.
block = self.build_block_on_tip(self.nodes[0])
self.test_node.send_and_ping(msg_block(block))
assert int(self.nodes[0].getbestblockhash(), 16) == block.sha256
self.nodes[0].generate(100)
total_value = block.vtx[0].vout[0].nValue
out_value = total_value // 10
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(block.vtx[0].sha256, 0), b''))
for i in range(10):
tx.vout.append(CTxOut(out_value, CScript([OP_TRUE])))
tx.rehash()
block2 = self.build_block_on_tip(self.nodes[0])
block2.vtx.append(tx)
block2.hashMerkleRoot = block2.calc_merkle_root()
block2.solve()
self.test_node.send_and_ping(msg_block(block2))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), block2.sha256)
self.utxos.extend([[tx.sha256, i, out_value] for i in range(10)])
return
# Test "sendcmpct" (between peers preferring the same version):
# - No compact block announcements unless sendcmpct is sent.
# - If sendcmpct is sent with version > preferred_version, the message is ignored.
# - If sendcmpct is sent with boolean 0, then block announcements are not
# made with compact blocks.
# - If sendcmpct is then sent with boolean 1, then new block announcements
# are made with compact blocks.
# If old_node is passed in, request compact blocks with version=preferred-1
# and verify that it receives block announcements via compact block.
def test_sendcmpct(self, node, test_node, preferred_version, old_node=None):
# Make sure we get a SENDCMPCT message from our peer
def received_sendcmpct():
return (len(test_node.last_sendcmpct) > 0)
wait_until(received_sendcmpct, timeout=30, lock=mininode_lock)
with mininode_lock:
# Check that the first version received is the preferred one
assert_equal(test_node.last_sendcmpct[0].version, preferred_version)
# And that we receive versions down to 1.
assert_equal(test_node.last_sendcmpct[-1].version, 1)
test_node.last_sendcmpct = []
tip = int(node.getbestblockhash(), 16)
def check_announcement_of_new_block(node, peer, predicate):
peer.clear_block_announcement()
block_hash = int(node.generate(1)[0], 16)
peer.wait_for_block_announcement(block_hash, timeout=30)
assert peer.block_announced
with mininode_lock:
assert predicate(peer), (
"block_hash={!r}, cmpctblock={!r}, inv={!r}".format(
block_hash, peer.last_message.get("cmpctblock", None), peer.last_message.get("inv", None)))
# We shouldn't get any block announcements via cmpctblock yet.
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message)
# Try one more time, this time after requesting headers.
test_node.request_headers_and_sync(locator=[tip])
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message and "inv" in p.last_message)
# Test a few ways of using sendcmpct that should NOT
# result in compact block announcements.
# Before each test, sync the headers chain.
test_node.request_headers_and_sync(locator=[tip])
# Now try a SENDCMPCT message with too-high version
sendcmpct = msg_sendcmpct()
sendcmpct.version = preferred_version + 1
sendcmpct.announce = True
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message)
# Headers sync before next test.
test_node.request_headers_and_sync(locator=[tip])
# Now try a SENDCMPCT message with valid version, but announce=False
sendcmpct.version = preferred_version
sendcmpct.announce = False
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message)
# Headers sync before next test.
test_node.request_headers_and_sync(locator=[tip])
# Finally, try a SENDCMPCT message with announce=True
sendcmpct.version = preferred_version
sendcmpct.announce = True
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" in p.last_message)
# Try one more time (no headers sync should be needed!)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" in p.last_message)
# Try one more time, after turning on sendheaders
test_node.send_and_ping(msg_sendheaders())
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" in p.last_message)
# Try one more time, after sending a version-1, announce=false message.
sendcmpct.version = preferred_version - 1
sendcmpct.announce = False
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" in p.last_message)
# Now turn off announcements
sendcmpct.version = preferred_version
sendcmpct.announce = False
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message and "headers" in p.last_message)
if old_node is not None:
# Verify that a peer using an older protocol version can receive
# announcements from this node.
sendcmpct.version = preferred_version - 1
sendcmpct.announce = True
old_node.send_and_ping(sendcmpct)
# Header sync
old_node.request_headers_and_sync(locator=[tip])
check_announcement_of_new_block(node, old_node, lambda p: "cmpctblock" in p.last_message)
# This test actually causes bitcoind to (reasonably!) disconnect us, so do this last.
def test_invalid_cmpctblock_message(self):
self.nodes[0].generate(101)
block = self.build_block_on_tip(self.nodes[0])
cmpct_block = P2PHeaderAndShortIDs()
cmpct_block.header = CBlockHeader(block)
cmpct_block.prefilled_txn_length = 1
# This index will be too high
prefilled_txn = PrefilledTransaction(1, block.vtx[0])
cmpct_block.prefilled_txn = [prefilled_txn]
self.test_node.send_await_disconnect(msg_cmpctblock(cmpct_block))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.hashPrevBlock)
# Compare the generated shortids to what we expect based on BIP 152, given
# bitcoind's choice of nonce.
def test_compactblock_construction(self, node, test_node, version, use_witness_address):
# Generate a bunch of transactions.
node.generate(101)
num_transactions = 25
address = node.getnewaddress()
if use_witness_address:
# Want at least one segwit spend, so move all funds to
# a witness address.
address = node.getnewaddress(address_type='bech32')
value_to_send = node.getbalance()
node.sendtoaddress(address, satoshi_round(value_to_send - Decimal(0.1)))
node.generate(1)
segwit_tx_generated = False
for i in range(num_transactions):
txid = node.sendtoaddress(address, 0.1)
hex_tx = node.gettransaction(txid)["hex"]
tx = FromHex(CTransaction(), hex_tx)
if not tx.wit.is_null():
segwit_tx_generated = True
if use_witness_address:
assert segwit_tx_generated # check that our test is not broken
# Wait until we've seen the block announcement for the resulting tip
tip = int(node.getbestblockhash(), 16)
test_node.wait_for_block_announcement(tip)
# Make sure we will receive a fast-announce compact block
self.request_cb_announcements(test_node, node, version)
# Now mine a block, and look at the resulting compact block.
test_node.clear_block_announcement()
block_hash = int(node.generate(1)[0], 16)
# Store the raw block in our internal format.
block = FromHex(CBlock(), node.getblock("%064x" % block_hash, False))
for tx in block.vtx:
tx.calc_sha256()
block.rehash()
# Wait until the block was announced (via compact blocks)
wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock)
# Now fetch and check the compact block
header_and_shortids = None
with mininode_lock:
assert "cmpctblock" in test_node.last_message
# Convert the on-the-wire representation to absolute indexes
header_and_shortids = HeaderAndShortIDs(test_node.last_message["cmpctblock"].header_and_shortids)
self.check_compactblock_construction_from_block(version, header_and_shortids, block_hash, block)
# Now fetch the compact block using a normal non-announce getdata
with mininode_lock:
test_node.clear_block_announcement()
inv = CInv(4, block_hash) # 4 == "CompactBlock"
test_node.send_message(msg_getdata([inv]))
wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock)
# Now fetch and check the compact block
header_and_shortids = None
with mininode_lock:
assert "cmpctblock" in test_node.last_message
# Convert the on-the-wire representation to absolute indexes
header_and_shortids = HeaderAndShortIDs(test_node.last_message["cmpctblock"].header_and_shortids)
self.check_compactblock_construction_from_block(version, header_and_shortids, block_hash, block)
def check_compactblock_construction_from_block(self, version, header_and_shortids, block_hash, block):
# Check that we got the right block!
header_and_shortids.header.calc_sha256()
assert_equal(header_and_shortids.header.sha256, block_hash)
# Make sure the prefilled_txn appears to have included the coinbase
assert len(header_and_shortids.prefilled_txn) >= 1
assert_equal(header_and_shortids.prefilled_txn[0].index, 0)
# Check that all prefilled_txn entries match what's in the block.
for entry in header_and_shortids.prefilled_txn:
entry.tx.calc_sha256()
# This checks the non-witness parts of the tx agree
assert_equal(entry.tx.sha256, block.vtx[entry.index].sha256)
# And this checks the witness
wtxid = entry.tx.calc_sha256(True)
if version == 2:
assert_equal(wtxid, block.vtx[entry.index].calc_sha256(True))
else:
# Shouldn't have received a witness
assert entry.tx.wit.is_null()
# Check that the cmpctblock message announced all the transactions.
assert_equal(len(header_and_shortids.prefilled_txn) + len(header_and_shortids.shortids), len(block.vtx))
# And now check that all the shortids are as expected as well.
# Determine the siphash keys to use.
[k0, k1] = header_and_shortids.get_siphash_keys()
index = 0
while index < len(block.vtx):
if (len(header_and_shortids.prefilled_txn) > 0 and
header_and_shortids.prefilled_txn[0].index == index):
# Already checked prefilled transactions above
header_and_shortids.prefilled_txn.pop(0)
else:
tx_hash = block.vtx[index].sha256
if version == 2:
tx_hash = block.vtx[index].calc_sha256(True)
shortid = calculate_shortid(k0, k1, tx_hash)
assert_equal(shortid, header_and_shortids.shortids[0])
header_and_shortids.shortids.pop(0)
index += 1
# Test that bitcoind requests compact blocks when we announce new blocks
# via header or inv, and that responding to getblocktxn causes the block
# to be successfully reconstructed.
# Post-segwit: upgraded nodes would only make this request of cb-version-2,
# NODE_WITNESS peers. Unupgraded nodes would still make this request of
# any cb-version-1-supporting peer.
def test_compactblock_requests(self, node, test_node, version, segwit):
# Try announcing a block with an inv or header, expect a compactblock
# request
for announce in ["inv", "header"]:
block = self.build_block_on_tip(node, segwit=segwit)
with mininode_lock:
test_node.last_message.pop("getdata", None)
if announce == "inv":
test_node.send_message(msg_inv([CInv(2, block.sha256)]))
wait_until(lambda: "getheaders" in test_node.last_message, timeout=30, lock=mininode_lock)
test_node.send_header_for_blocks([block])
else:
test_node.send_header_for_blocks([block])
wait_until(lambda: "getdata" in test_node.last_message, timeout=30, lock=mininode_lock)
assert_equal(len(test_node.last_message["getdata"].inv), 1)
assert_equal(test_node.last_message["getdata"].inv[0].type, 4)
assert_equal(test_node.last_message["getdata"].inv[0].hash, block.sha256)
# Send back a compactblock message that omits the coinbase
comp_block = HeaderAndShortIDs()
comp_block.header = CBlockHeader(block)
comp_block.nonce = 0
[k0, k1] = comp_block.get_siphash_keys()
coinbase_hash = block.vtx[0].sha256
if version == 2:
coinbase_hash = block.vtx[0].calc_sha256(True)
comp_block.shortids = [calculate_shortid(k0, k1, coinbase_hash)]
test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock)
# Expect a getblocktxn message.
with mininode_lock:
assert "getblocktxn" in test_node.last_message
absolute_indexes = test_node.last_message["getblocktxn"].block_txn_request.to_absolute()
assert_equal(absolute_indexes, [0]) # should be a coinbase request
# Send the coinbase, and verify that the tip advances.
if version == 2:
msg = msg_witness_blocktxn()
else:
msg = msg_blocktxn()
msg.block_transactions.blockhash = block.sha256
msg.block_transactions.transactions = [block.vtx[0]]
test_node.send_and_ping(msg)
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
# Create a chain of transactions from given utxo, and add to a new block.
def build_block_with_transactions(self, node, utxo, num_transactions):
block = self.build_block_on_tip(node)
for i in range(num_transactions):
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(utxo[0], utxo[1]), b''))
tx.vout.append(CTxOut(utxo[2] - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx.rehash()
utxo = [tx.sha256, 0, tx.vout[0].nValue]
block.vtx.append(tx)
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
return block
# Test that we only receive getblocktxn requests for transactions that the
# node needs, and that responding to them causes the block to be
# reconstructed.
def test_getblocktxn_requests(self, node, test_node, version):
with_witness = (version == 2)
def test_getblocktxn_response(compact_block, peer, expected_result):
msg = msg_cmpctblock(compact_block.to_p2p())
peer.send_and_ping(msg)
with mininode_lock:
assert "getblocktxn" in peer.last_message
absolute_indexes = peer.last_message["getblocktxn"].block_txn_request.to_absolute()
assert_equal(absolute_indexes, expected_result)
def test_tip_after_message(node, peer, msg, tip):
peer.send_and_ping(msg)
assert_equal(int(node.getbestblockhash(), 16), tip)
# First try announcing compactblocks that won't reconstruct, and verify
# that we receive getblocktxn messages back.
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 5)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
comp_block = HeaderAndShortIDs()
comp_block.initialize_from_block(block, use_witness=with_witness)
test_getblocktxn_response(comp_block, test_node, [1, 2, 3, 4, 5])
msg_bt = msg_blocktxn()
if with_witness:
msg_bt = msg_witness_blocktxn() # serialize with witnesses
msg_bt.block_transactions = BlockTransactions(block.sha256, block.vtx[1:])
test_tip_after_message(node, test_node, msg_bt, block.sha256)
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 5)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
# Now try interspersing the prefilled transactions
comp_block.initialize_from_block(block, prefill_list=[0, 1, 5], use_witness=with_witness)
test_getblocktxn_response(comp_block, test_node, [2, 3, 4])
msg_bt.block_transactions = BlockTransactions(block.sha256, block.vtx[2:5])
test_tip_after_message(node, test_node, msg_bt, block.sha256)
# Now try giving one transaction ahead of time.
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 5)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
test_node.send_and_ping(msg_tx(block.vtx[1]))
assert block.vtx[1].hash in node.getrawmempool()
# Prefill 4 out of the 6 transactions, and verify that only the one
# that was not in the mempool is requested.
comp_block.initialize_from_block(block, prefill_list=[0, 2, 3, 4], use_witness=with_witness)
test_getblocktxn_response(comp_block, test_node, [5])
msg_bt.block_transactions = BlockTransactions(block.sha256, [block.vtx[5]])
test_tip_after_message(node, test_node, msg_bt, block.sha256)
# Now provide all transactions to the node before the block is
# announced and verify reconstruction happens immediately.
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 10)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
for tx in block.vtx[1:]:
test_node.send_message(msg_tx(tx))
test_node.sync_with_ping()
# Make sure all transactions were accepted.
mempool = node.getrawmempool()
for tx in block.vtx[1:]:
assert tx.hash in mempool
# Clear out last request.
with mininode_lock:
test_node.last_message.pop("getblocktxn", None)
# Send compact block
comp_block.initialize_from_block(block, prefill_list=[0], use_witness=with_witness)
test_tip_after_message(node, test_node, msg_cmpctblock(comp_block.to_p2p()), block.sha256)
with mininode_lock:
# Shouldn't have gotten a request for any transaction
assert "getblocktxn" not in test_node.last_message
# Incorrectly responding to a getblocktxn shouldn't cause the block to be
# permanently failed.
def test_incorrect_blocktxn_response(self, node, test_node, version):
if (len(self.utxos) == 0):
self.make_utxos()
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 10)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
# Relay the first 5 transactions from the block in advance
for tx in block.vtx[1:6]:
test_node.send_message(msg_tx(tx))
test_node.sync_with_ping()
# Make sure all transactions were accepted.
mempool = node.getrawmempool()
for tx in block.vtx[1:6]:
assert tx.hash in mempool
# Send compact block
comp_block = HeaderAndShortIDs()
comp_block.initialize_from_block(block, prefill_list=[0], use_witness=(version == 2))
test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
absolute_indexes = []
with mininode_lock:
assert "getblocktxn" in test_node.last_message
absolute_indexes = test_node.last_message["getblocktxn"].block_txn_request.to_absolute()
assert_equal(absolute_indexes, [6, 7, 8, 9, 10])
# Now give an incorrect response.
# Note that it's possible for bitcoind to be smart enough to know we're
# lying, since it could check to see if the shortid matches what we're
# sending, and eg disconnect us for misbehavior. If that behavior
# change was made, we could just modify this test by having a
# different peer provide the block further down, so that we're still
# verifying that the block isn't marked bad permanently. This is good
# enough for now.
msg = msg_blocktxn()
if version == 2:
msg = msg_witness_blocktxn()
msg.block_transactions = BlockTransactions(block.sha256, [block.vtx[5]] + block.vtx[7:])
test_node.send_and_ping(msg)
# Tip should not have updated
assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock)
# We should receive a getdata request
wait_until(lambda: "getdata" in test_node.last_message, timeout=10, lock=mininode_lock)
assert_equal(len(test_node.last_message["getdata"].inv), 1)
assert test_node.last_message["getdata"].inv[0].type == 2 or test_node.last_message["getdata"].inv[0].type == 2 | MSG_WITNESS_FLAG
assert_equal(test_node.last_message["getdata"].inv[0].hash, block.sha256)
# Deliver the block
if version == 2:
test_node.send_and_ping(msg_witness_block(block))
else:
test_node.send_and_ping(msg_block(block))
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
def test_getblocktxn_handler(self, node, test_node, version):
# bitcoind will not send blocktxn responses for blocks whose height is
# more than 10 blocks deep.
MAX_GETBLOCKTXN_DEPTH = 10
chain_height = node.getblockcount()
current_height = chain_height
while (current_height >= chain_height - MAX_GETBLOCKTXN_DEPTH):
block_hash = node.getblockhash(current_height)
block = FromHex(CBlock(), node.getblock(block_hash, False))
msg = msg_getblocktxn()
msg.block_txn_request = BlockTransactionsRequest(int(block_hash, 16), [])
num_to_request = random.randint(1, len(block.vtx))
msg.block_txn_request.from_absolute(sorted(random.sample(range(len(block.vtx)), num_to_request)))
test_node.send_message(msg)
wait_until(lambda: "blocktxn" in test_node.last_message, timeout=10, lock=mininode_lock)
[tx.calc_sha256() for tx in block.vtx]
with mininode_lock:
assert_equal(test_node.last_message["blocktxn"].block_transactions.blockhash, int(block_hash, 16))
all_indices = msg.block_txn_request.to_absolute()
for index in all_indices:
tx = test_node.last_message["blocktxn"].block_transactions.transactions.pop(0)
tx.calc_sha256()
assert_equal(tx.sha256, block.vtx[index].sha256)
if version == 1:
# Witnesses should have been stripped
assert tx.wit.is_null()
else:
# Check that the witness matches
assert_equal(tx.calc_sha256(True), block.vtx[index].calc_sha256(True))
test_node.last_message.pop("blocktxn", None)
current_height -= 1
# Next request should send a full block response, as we're past the
# allowed depth for a blocktxn response.
block_hash = node.getblockhash(current_height)
msg.block_txn_request = BlockTransactionsRequest(int(block_hash, 16), [0])
with mininode_lock:
test_node.last_message.pop("block", None)
test_node.last_message.pop("blocktxn", None)
test_node.send_and_ping(msg)
with mininode_lock:
test_node.last_message["block"].block.calc_sha256()
assert_equal(test_node.last_message["block"].block.sha256, int(block_hash, 16))
assert "blocktxn" not in test_node.last_message
def test_compactblocks_not_at_tip(self, node, test_node):
# Test that requesting old compactblocks doesn't work.
MAX_CMPCTBLOCK_DEPTH = 5
new_blocks = []
for i in range(MAX_CMPCTBLOCK_DEPTH + 1):
test_node.clear_block_announcement()
new_blocks.append(node.generate(1)[0])
wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock)
test_node.clear_block_announcement()
test_node.send_message(msg_getdata([CInv(4, int(new_blocks[0], 16))]))
wait_until(lambda: "cmpctblock" in test_node.last_message, timeout=30, lock=mininode_lock)
test_node.clear_block_announcement()
node.generate(1)
wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock)
test_node.clear_block_announcement()
with mininode_lock:
test_node.last_message.pop("block", None)
test_node.send_message(msg_getdata([CInv(4, int(new_blocks[0], 16))]))
wait_until(lambda: "block" in test_node.last_message, timeout=30, lock=mininode_lock)
with mininode_lock:
test_node.last_message["block"].block.calc_sha256()
assert_equal(test_node.last_message["block"].block.sha256, int(new_blocks[0], 16))
# Generate an old compactblock, and verify that it's not accepted.
cur_height = node.getblockcount()
hashPrevBlock = int(node.getblockhash(cur_height - 5), 16)
block = self.build_block_on_tip(node)
block.hashPrevBlock = hashPrevBlock
block.solve()
comp_block = HeaderAndShortIDs()
comp_block.initialize_from_block(block)
test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
tips = node.getchaintips()
found = False
for x in tips:
if x["hash"] == block.hash:
assert_equal(x["status"], "headers-only")
found = True
break
assert found
# Requesting this block via getblocktxn should silently fail
# (to avoid fingerprinting attacks).
msg = msg_getblocktxn()
msg.block_txn_request = BlockTransactionsRequest(block.sha256, [0])
with mininode_lock:
test_node.last_message.pop("blocktxn", None)
test_node.send_and_ping(msg)
with mininode_lock:
assert "blocktxn" not in test_node.last_message
def activate_segwit(self, node):
node.generate(144 * 3)
assert_equal(get_bip9_status(node, "segwit")["status"], 'active')
def test_end_to_end_block_relay(self, node, listeners):
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 10)
[l.clear_block_announcement() for l in listeners]
# ToHex() won't serialize with witness, but this block has no witnesses
# anyway. TODO: repeat this test with witness tx's to a segwit node.
node.submitblock(ToHex(block))
for l in listeners:
wait_until(lambda: l.received_block_announcement(), timeout=30, lock=mininode_lock)
with mininode_lock:
for l in listeners:
assert "cmpctblock" in l.last_message
l.last_message["cmpctblock"].header_and_shortids.header.calc_sha256()
assert_equal(l.last_message["cmpctblock"].header_and_shortids.header.sha256, block.sha256)
# Test that we don't get disconnected if we relay a compact block with valid header,
# but invalid transactions.
def test_invalid_tx_in_compactblock(self, node, test_node, use_segwit):
assert len(self.utxos)
utxo = self.utxos[0]
block = self.build_block_with_transactions(node, utxo, 5)
del block.vtx[3]
block.hashMerkleRoot = block.calc_merkle_root()
if use_segwit:
# If we're testing with segwit, also drop the coinbase witness,
# but include the witness commitment.
add_witness_commitment(block)
block.vtx[0].wit.vtxinwit = []
block.solve()
# Now send the compact block with all transactions prefilled, and
# verify that we don't get disconnected.
comp_block = HeaderAndShortIDs()
comp_block.initialize_from_block(block, prefill_list=[0, 1, 2, 3, 4], use_witness=use_segwit)
msg = msg_cmpctblock(comp_block.to_p2p())
test_node.send_and_ping(msg)
# Check that the tip didn't advance
assert int(node.getbestblockhash(), 16) is not block.sha256
test_node.sync_with_ping()
# Helper for enabling cb announcements
# Send the sendcmpct request and sync headers
def request_cb_announcements(self, peer, node, version):
tip = node.getbestblockhash()
peer.get_headers(locator=[int(tip, 16)], hashstop=0)
msg = msg_sendcmpct()
msg.version = version
msg.announce = True
peer.send_and_ping(msg)
def test_compactblock_reconstruction_multiple_peers(self, node, stalling_peer, delivery_peer):
assert len(self.utxos)
def announce_cmpct_block(node, peer):
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 5)
cmpct_block = HeaderAndShortIDs()
cmpct_block.initialize_from_block(block)
msg = msg_cmpctblock(cmpct_block.to_p2p())
peer.send_and_ping(msg)
with mininode_lock:
assert "getblocktxn" in peer.last_message
return block, cmpct_block
block, cmpct_block = announce_cmpct_block(node, stalling_peer)
for tx in block.vtx[1:]:
delivery_peer.send_message(msg_tx(tx))
delivery_peer.sync_with_ping()
mempool = node.getrawmempool()
for tx in block.vtx[1:]:
assert tx.hash in mempool
delivery_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p()))
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
# Now test that delivering an invalid compact block won't break relay
block, cmpct_block = announce_cmpct_block(node, stalling_peer)
for tx in block.vtx[1:]:
delivery_peer.send_message(msg_tx(tx))
delivery_peer.sync_with_ping()
cmpct_block.prefilled_txn[0].tx.wit.vtxinwit = [CTxInWitness()]
cmpct_block.prefilled_txn[0].tx.wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(0)]
cmpct_block.use_witness = True
delivery_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p()))
assert int(node.getbestblockhash(), 16) != block.sha256
msg = msg_blocktxn()
msg.block_transactions.blockhash = block.sha256
msg.block_transactions.transactions = block.vtx[1:]
stalling_peer.send_and_ping(msg)
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
def run_test(self):
# Setup the p2p connections
self.test_node = self.nodes[0].add_p2p_connection(TestP2PConn())
self.segwit_node = self.nodes[1].add_p2p_connection(TestP2PConn(), services=NODE_NETWORK | NODE_WITNESS)
self.old_node = self.nodes[1].add_p2p_connection(TestP2PConn(), services=NODE_NETWORK)
# We will need UTXOs to construct transactions in later tests.
self.make_utxos()
self.log.info("Running tests, pre-segwit activation:")
self.log.info("Testing SENDCMPCT p2p message... ")
self.test_sendcmpct(self.nodes[0], self.test_node, 1)
sync_blocks(self.nodes)
self.test_sendcmpct(self.nodes[1], self.segwit_node, 2, old_node=self.old_node)
sync_blocks(self.nodes)
self.log.info("Testing compactblock construction...")
self.test_compactblock_construction(self.nodes[0], self.test_node, 1, False)
sync_blocks(self.nodes)
self.test_compactblock_construction(self.nodes[1], self.segwit_node, 2, False)
sync_blocks(self.nodes)
self.log.info("Testing compactblock requests... ")
self.test_compactblock_requests(self.nodes[0], self.test_node, 1, False)
sync_blocks(self.nodes)
self.test_compactblock_requests(self.nodes[1], self.segwit_node, 2, False)
sync_blocks(self.nodes)
self.log.info("Testing getblocktxn requests...")
self.test_getblocktxn_requests(self.nodes[0], self.test_node, 1)
sync_blocks(self.nodes)
self.test_getblocktxn_requests(self.nodes[1], self.segwit_node, 2)
sync_blocks(self.nodes)
self.log.info("Testing getblocktxn handler...")
self.test_getblocktxn_handler(self.nodes[0], self.test_node, 1)
sync_blocks(self.nodes)
self.test_getblocktxn_handler(self.nodes[1], self.segwit_node, 2)
self.test_getblocktxn_handler(self.nodes[1], self.old_node, 1)
sync_blocks(self.nodes)
self.log.info("Testing compactblock requests/announcements not at chain tip...")
self.test_compactblocks_not_at_tip(self.nodes[0], self.test_node)
sync_blocks(self.nodes)
self.test_compactblocks_not_at_tip(self.nodes[1], self.segwit_node)
self.test_compactblocks_not_at_tip(self.nodes[1], self.old_node)
sync_blocks(self.nodes)
self.log.info("Testing handling of incorrect blocktxn responses...")
self.test_incorrect_blocktxn_response(self.nodes[0], self.test_node, 1)
sync_blocks(self.nodes)
self.test_incorrect_blocktxn_response(self.nodes[1], self.segwit_node, 2)
sync_blocks(self.nodes)
# End-to-end block relay tests
self.log.info("Testing end-to-end block relay...")
self.request_cb_announcements(self.test_node, self.nodes[0], 1)
self.request_cb_announcements(self.old_node, self.nodes[1], 1)
self.request_cb_announcements(self.segwit_node, self.nodes[1], 2)
self.test_end_to_end_block_relay(self.nodes[0], [self.segwit_node, self.test_node, self.old_node])
self.test_end_to_end_block_relay(self.nodes[1], [self.segwit_node, self.test_node, self.old_node])
self.log.info("Testing handling of invalid compact blocks...")
self.test_invalid_tx_in_compactblock(self.nodes[0], self.test_node, False)
self.test_invalid_tx_in_compactblock(self.nodes[1], self.segwit_node, False)
self.test_invalid_tx_in_compactblock(self.nodes[1], self.old_node, False)
self.log.info("Testing reconstructing compact blocks from all peers...")
self.test_compactblock_reconstruction_multiple_peers(self.nodes[1], self.segwit_node, self.old_node)
sync_blocks(self.nodes)
# Advance to segwit activation
self.log.info("Advancing to segwit activation")
self.activate_segwit(self.nodes[1])
self.log.info("Running tests, post-segwit activation...")
self.log.info("Testing compactblock construction...")
self.test_compactblock_construction(self.nodes[1], self.old_node, 1, True)
self.test_compactblock_construction(self.nodes[1], self.segwit_node, 2, True)
sync_blocks(self.nodes)
self.log.info("Testing compactblock requests (unupgraded node)... ")
self.test_compactblock_requests(self.nodes[0], self.test_node, 1, True)
self.log.info("Testing getblocktxn requests (unupgraded node)...")
self.test_getblocktxn_requests(self.nodes[0], self.test_node, 1)
# Need to manually sync node0 and node1, because post-segwit activation,
# node1 will not download blocks from node0.
self.log.info("Syncing nodes...")
assert self.nodes[0].getbestblockhash() != self.nodes[1].getbestblockhash()
while (self.nodes[0].getblockcount() > self.nodes[1].getblockcount()):
block_hash = self.nodes[0].getblockhash(self.nodes[1].getblockcount() + 1)
self.nodes[1].submitblock(self.nodes[0].getblock(block_hash, False))
assert_equal(self.nodes[0].getbestblockhash(), self.nodes[1].getbestblockhash())
self.log.info("Testing compactblock requests (segwit node)... ")
self.test_compactblock_requests(self.nodes[1], self.segwit_node, 2, True)
self.log.info("Testing getblocktxn requests (segwit node)...")
self.test_getblocktxn_requests(self.nodes[1], self.segwit_node, 2)
sync_blocks(self.nodes)
self.log.info("Testing getblocktxn handler (segwit node should return witnesses)...")
self.test_getblocktxn_handler(self.nodes[1], self.segwit_node, 2)
self.test_getblocktxn_handler(self.nodes[1], self.old_node, 1)
# Test that if we submitblock to node1, we'll get a compact block
# announcement to all peers.
# (Post-segwit activation, blocks won't propagate from node0 to node1
# automatically, so don't bother testing a block announced to node0.)
self.log.info("Testing end-to-end block relay...")
self.request_cb_announcements(self.test_node, self.nodes[0], 1)
self.request_cb_announcements(self.old_node, self.nodes[1], 1)
self.request_cb_announcements(self.segwit_node, self.nodes[1], 2)
self.test_end_to_end_block_relay(self.nodes[1], [self.segwit_node, self.test_node, self.old_node])
self.log.info("Testing handling of invalid compact blocks...")
self.test_invalid_tx_in_compactblock(self.nodes[0], self.test_node, False)
self.test_invalid_tx_in_compactblock(self.nodes[1], self.segwit_node, True)
self.test_invalid_tx_in_compactblock(self.nodes[1], self.old_node, True)
self.log.info("Testing invalid index in cmpctblock message...")
self.test_invalid_cmpctblock_message()
if __name__ == '__main__':
CompactBlocksTest().main()
| r8921039/bitcoin | test/functional/p2p_compactblocks.py | Python | mit | 44,206 |
from . import connection
from . import chat_room
from . import utils
import datetime
class StandardRoom(chat_room.ChatRoom):
"""
A room with basic utilities already implemented as instructed by
https://github.com/jedevc/botrulez
"""
def __init__(self, roomname, password=None, attempts=None):
super().__init__(roomname, password, attempts)
self.ping_text = "Pong!"
self.short_help_text = None
self.help_text = None
def handle_message(self, data): #TODO: This is quite hacky.
content = data["data"]["content"]
reply = data["data"]["id"]
if content == "!ping":
self.send_chat(self.ping_text, reply)
elif content == "!ping @" + self.nickname:
self.send_chat(self.ping_text, reply)
elif content == "!help":
if self.short_help_text is not None:
self.send_chat(self.short_help_text, reply)
elif content == "!help @" + self.nickname:
if self.help_text is not None:
self.send_chat(self.help_text, reply)
elif content == "!uptime @" + self.nickname:
u = datetime.datetime.strftime(self.start_utc, "%Y-%m-%d %H:%M:%S")
t = utils.extract_time(self.uptime())
self.send_chat("/me has been up since " + u + " UTC (" + t + ")", reply)
else:
super().handle_message(data)
| ArkaneMoose/EuPy | euphoria/standard_room.py | Python | mit | 1,415 |
from __future__ import unicode_literals, division, absolute_import
import logging
import re
from itertools import chain
from flexget import plugin
from flexget.entry import Entry
from flexget.event import event
from flexget.utils.cached_input import cached
from flexget.utils.soup import get_soup
log = logging.getLogger('myepisodes')
URL = 'http://www.myepisodes.com/'
class MyEpisodesList(object):
"""Creates an entry for each item in your myepisodes.com show list.
Syntax:
myepisodes_list:
username: <value>
password: <value>
strip_dates: <yes|no>
include_ignored: <yes|no>
Options username and password are required.
"""
schema = {
'type': 'object',
'properties': {
'username': {'type': 'string'},
'password': {'type': 'string'},
'strip_dates': {'type': 'boolean', 'default': False},
'include_ignored': {'type': 'boolean', 'default': False},
},
'required': ['username', 'password'],
'additionalProperties': False,
}
@cached('myepisodes_list')
@plugin.internet(log)
def on_task_input(self, task, config):
if not task.requests.cookies:
username = config['username']
password = config['password']
log.debug("Logging in to %s ..." % URL)
params = {
'username': username,
'password': password,
'action': 'Login'
}
loginsrc = task.requests.post(URL + 'login.php', data=params).content
if str(username) not in loginsrc:
raise plugin.PluginWarning(('Login to myepisodes.com failed, please check '
'your account data or see if the site is down.'), log)
page = task.requests.get(URL + "shows.php?type=manage").content
try:
soup = get_soup(page)
except Exception as e:
raise plugin.PluginError("Unable to parse myepisodes.com page: %s" % (e,))
entries = []
def show_list(select_id):
return soup.find('select', {'id': select_id}).findAll('option')
options = show_list('shows')
if config['include_ignored']:
options = chain(options, show_list('ignored_shows'))
for option in options:
name = option.text
if config.get('strip_dates'):
# Remove year from end of name if present
name = re.sub(r'\s+\(\d{4}\)$', '', name)
showid = option.get('value')
url = '%sviews.php?type=epsbyshow&showid=%s' % (URL, showid)
entry = Entry()
entry['title'] = name
entry['url'] = url
entry['series_name'] = name
entry['myepisodes_id'] = showid
if entry.isvalid():
entries.append(entry)
else:
log.debug('Invalid entry created? %s' % entry)
if not entries:
log.warn("No shows found on myepisodes.com list. Maybe you need to add some first?")
return entries
@event('plugin.register')
def register_plugin():
plugin.register(MyEpisodesList, 'myepisodes_list', api_ver=2)
| ZefQ/Flexget | flexget/plugins/input/myepisodes_list.py | Python | mit | 3,239 |
##
## This file is part of qpOASES.
##
## qpOASES -- An Implementation of the Online Active Set Strategy.
## Copyright (C) 2007-2015 by Hans Joachim Ferreau, Andreas Potschka,
## Christian Kirches et al. All rights reserved.
##
## qpOASES is free software; you can redistribute it and/or
## modify it under the terms of the GNU Lesser General Public
## License as published by the Free Software Foundation; either
## version 2.1 of the License, or (at your option) any later version.
##
## qpOASES is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
## See the GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public
## License along with qpOASES; if not, write to the Free Software
## Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
##
## Example adapted from examples/example1b.cpp.
## author of this file: Sebastian F. Walter
import numpy as np
from qpoases import PyQProblemB as QProblemB
from qpoases import PyBooleanType as BooleanType
from qpoases import PySubjectToStatus as SubjectToStatus
from qpoases import PyOptions as Options
# Example for qpOASES main function using the QProblemB class.
#Setup data of first QP.
H = np.array([1.0, 0.0, 0.0, 0.5 ]).reshape((2,2))
g = np.array([1.5, 1.0 ])
lb = np.array([0.5, -2.0])
ub = np.array([5.0, 2.0 ])
# Setup data of second QP.
g_new = np.array([1.0, 1.5])
lb_new = np.array([0.0, -1.0])
ub_new = np.array([5.0, -0.5])
# Setting up QProblemB object.
example = QProblemB(2)
options = Options()
options.enableFlippingBounds = BooleanType.FALSE
options.initialStatusBounds = SubjectToStatus.INACTIVE
options.numRefinementSteps = 1
example.setOptions(options)
# Solve first QP.
nWSR = np.array([10])
example.init(H, g, lb, ub, nWSR)
print("\nnWSR = %d\n\n"%nWSR)
# Solve second QP.
nWSR = np.array([10])
example.hotstart(g_new, lb_new, ub_new, nWSR)
print("\nnWSR = %d\n\n"% nWSR)
# Get and print solution of second QP.
xOpt = np.zeros(2)
example.getPrimalSolution(xOpt)
print("\nxOpt = [ %e, %e ]; objVal = %e\n\n" %(xOpt[0], xOpt[1],
example.getObjVal()))
| airballking/qpOASES | interfaces/python/examples/example1b.py | Python | lgpl-2.1 | 2,304 |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.net.proto import ProtocolBuffer
import array
import dummy_thread as thread
__pychecker__ = """maxreturns=0 maxbranches=0 no-callinit
unusednames=printElemNumber,debug_strs no-special"""
from google.appengine.api.api_base_pb import *
import google.appengine.api.api_base_pb
class UserServiceError(ProtocolBuffer.ProtocolMessage):
OK = 0
REDIRECT_URL_TOO_LONG = 1
NOT_ALLOWED = 2
OAUTH_INVALID_TOKEN = 3
OAUTH_INVALID_REQUEST = 4
OAUTH_ERROR = 5
_ErrorCode_NAMES = {
0: "OK",
1: "REDIRECT_URL_TOO_LONG",
2: "NOT_ALLOWED",
3: "OAUTH_INVALID_TOKEN",
4: "OAUTH_INVALID_REQUEST",
5: "OAUTH_ERROR",
}
def ErrorCode_Name(cls, x): return cls._ErrorCode_NAMES.get(x, "")
ErrorCode_Name = classmethod(ErrorCode_Name)
def __init__(self, contents=None):
pass
if contents is not None: self.MergeFromString(contents)
def MergeFrom(self, x):
assert x is not self
def Equals(self, x):
if x is self: return 1
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
return n
def ByteSizePartial(self):
n = 0
return n
def Clear(self):
pass
def OutputUnchecked(self, out):
pass
def OutputPartial(self, out):
pass
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
}, 0)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
}, 0, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
class CreateLoginURLRequest(ProtocolBuffer.ProtocolMessage):
has_destination_url_ = 0
destination_url_ = ""
has_auth_domain_ = 0
auth_domain_ = ""
has_federated_identity_ = 0
federated_identity_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def destination_url(self): return self.destination_url_
def set_destination_url(self, x):
self.has_destination_url_ = 1
self.destination_url_ = x
def clear_destination_url(self):
if self.has_destination_url_:
self.has_destination_url_ = 0
self.destination_url_ = ""
def has_destination_url(self): return self.has_destination_url_
def auth_domain(self): return self.auth_domain_
def set_auth_domain(self, x):
self.has_auth_domain_ = 1
self.auth_domain_ = x
def clear_auth_domain(self):
if self.has_auth_domain_:
self.has_auth_domain_ = 0
self.auth_domain_ = ""
def has_auth_domain(self): return self.has_auth_domain_
def federated_identity(self): return self.federated_identity_
def set_federated_identity(self, x):
self.has_federated_identity_ = 1
self.federated_identity_ = x
def clear_federated_identity(self):
if self.has_federated_identity_:
self.has_federated_identity_ = 0
self.federated_identity_ = ""
def has_federated_identity(self): return self.has_federated_identity_
def MergeFrom(self, x):
assert x is not self
if (x.has_destination_url()): self.set_destination_url(x.destination_url())
if (x.has_auth_domain()): self.set_auth_domain(x.auth_domain())
if (x.has_federated_identity()): self.set_federated_identity(x.federated_identity())
def Equals(self, x):
if x is self: return 1
if self.has_destination_url_ != x.has_destination_url_: return 0
if self.has_destination_url_ and self.destination_url_ != x.destination_url_: return 0
if self.has_auth_domain_ != x.has_auth_domain_: return 0
if self.has_auth_domain_ and self.auth_domain_ != x.auth_domain_: return 0
if self.has_federated_identity_ != x.has_federated_identity_: return 0
if self.has_federated_identity_ and self.federated_identity_ != x.federated_identity_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_destination_url_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: destination_url not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.destination_url_))
if (self.has_auth_domain_): n += 1 + self.lengthString(len(self.auth_domain_))
if (self.has_federated_identity_): n += 1 + self.lengthString(len(self.federated_identity_))
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_destination_url_):
n += 1
n += self.lengthString(len(self.destination_url_))
if (self.has_auth_domain_): n += 1 + self.lengthString(len(self.auth_domain_))
if (self.has_federated_identity_): n += 1 + self.lengthString(len(self.federated_identity_))
return n
def Clear(self):
self.clear_destination_url()
self.clear_auth_domain()
self.clear_federated_identity()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.destination_url_)
if (self.has_auth_domain_):
out.putVarInt32(18)
out.putPrefixedString(self.auth_domain_)
if (self.has_federated_identity_):
out.putVarInt32(26)
out.putPrefixedString(self.federated_identity_)
def OutputPartial(self, out):
if (self.has_destination_url_):
out.putVarInt32(10)
out.putPrefixedString(self.destination_url_)
if (self.has_auth_domain_):
out.putVarInt32(18)
out.putPrefixedString(self.auth_domain_)
if (self.has_federated_identity_):
out.putVarInt32(26)
out.putPrefixedString(self.federated_identity_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_destination_url(d.getPrefixedString())
continue
if tt == 18:
self.set_auth_domain(d.getPrefixedString())
continue
if tt == 26:
self.set_federated_identity(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_destination_url_: res+=prefix+("destination_url: %s\n" % self.DebugFormatString(self.destination_url_))
if self.has_auth_domain_: res+=prefix+("auth_domain: %s\n" % self.DebugFormatString(self.auth_domain_))
if self.has_federated_identity_: res+=prefix+("federated_identity: %s\n" % self.DebugFormatString(self.federated_identity_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kdestination_url = 1
kauth_domain = 2
kfederated_identity = 3
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "destination_url",
2: "auth_domain",
3: "federated_identity",
}, 3)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.STRING,
}, 3, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
class CreateLoginURLResponse(ProtocolBuffer.ProtocolMessage):
has_login_url_ = 0
login_url_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def login_url(self): return self.login_url_
def set_login_url(self, x):
self.has_login_url_ = 1
self.login_url_ = x
def clear_login_url(self):
if self.has_login_url_:
self.has_login_url_ = 0
self.login_url_ = ""
def has_login_url(self): return self.has_login_url_
def MergeFrom(self, x):
assert x is not self
if (x.has_login_url()): self.set_login_url(x.login_url())
def Equals(self, x):
if x is self: return 1
if self.has_login_url_ != x.has_login_url_: return 0
if self.has_login_url_ and self.login_url_ != x.login_url_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_login_url_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: login_url not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.login_url_))
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_login_url_):
n += 1
n += self.lengthString(len(self.login_url_))
return n
def Clear(self):
self.clear_login_url()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.login_url_)
def OutputPartial(self, out):
if (self.has_login_url_):
out.putVarInt32(10)
out.putPrefixedString(self.login_url_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_login_url(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_login_url_: res+=prefix+("login_url: %s\n" % self.DebugFormatString(self.login_url_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
klogin_url = 1
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "login_url",
}, 1)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
}, 1, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
class CreateLogoutURLRequest(ProtocolBuffer.ProtocolMessage):
has_destination_url_ = 0
destination_url_ = ""
has_auth_domain_ = 0
auth_domain_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def destination_url(self): return self.destination_url_
def set_destination_url(self, x):
self.has_destination_url_ = 1
self.destination_url_ = x
def clear_destination_url(self):
if self.has_destination_url_:
self.has_destination_url_ = 0
self.destination_url_ = ""
def has_destination_url(self): return self.has_destination_url_
def auth_domain(self): return self.auth_domain_
def set_auth_domain(self, x):
self.has_auth_domain_ = 1
self.auth_domain_ = x
def clear_auth_domain(self):
if self.has_auth_domain_:
self.has_auth_domain_ = 0
self.auth_domain_ = ""
def has_auth_domain(self): return self.has_auth_domain_
def MergeFrom(self, x):
assert x is not self
if (x.has_destination_url()): self.set_destination_url(x.destination_url())
if (x.has_auth_domain()): self.set_auth_domain(x.auth_domain())
def Equals(self, x):
if x is self: return 1
if self.has_destination_url_ != x.has_destination_url_: return 0
if self.has_destination_url_ and self.destination_url_ != x.destination_url_: return 0
if self.has_auth_domain_ != x.has_auth_domain_: return 0
if self.has_auth_domain_ and self.auth_domain_ != x.auth_domain_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_destination_url_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: destination_url not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.destination_url_))
if (self.has_auth_domain_): n += 1 + self.lengthString(len(self.auth_domain_))
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_destination_url_):
n += 1
n += self.lengthString(len(self.destination_url_))
if (self.has_auth_domain_): n += 1 + self.lengthString(len(self.auth_domain_))
return n
def Clear(self):
self.clear_destination_url()
self.clear_auth_domain()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.destination_url_)
if (self.has_auth_domain_):
out.putVarInt32(18)
out.putPrefixedString(self.auth_domain_)
def OutputPartial(self, out):
if (self.has_destination_url_):
out.putVarInt32(10)
out.putPrefixedString(self.destination_url_)
if (self.has_auth_domain_):
out.putVarInt32(18)
out.putPrefixedString(self.auth_domain_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_destination_url(d.getPrefixedString())
continue
if tt == 18:
self.set_auth_domain(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_destination_url_: res+=prefix+("destination_url: %s\n" % self.DebugFormatString(self.destination_url_))
if self.has_auth_domain_: res+=prefix+("auth_domain: %s\n" % self.DebugFormatString(self.auth_domain_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kdestination_url = 1
kauth_domain = 2
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "destination_url",
2: "auth_domain",
}, 2)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
}, 2, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
class CreateLogoutURLResponse(ProtocolBuffer.ProtocolMessage):
has_logout_url_ = 0
logout_url_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def logout_url(self): return self.logout_url_
def set_logout_url(self, x):
self.has_logout_url_ = 1
self.logout_url_ = x
def clear_logout_url(self):
if self.has_logout_url_:
self.has_logout_url_ = 0
self.logout_url_ = ""
def has_logout_url(self): return self.has_logout_url_
def MergeFrom(self, x):
assert x is not self
if (x.has_logout_url()): self.set_logout_url(x.logout_url())
def Equals(self, x):
if x is self: return 1
if self.has_logout_url_ != x.has_logout_url_: return 0
if self.has_logout_url_ and self.logout_url_ != x.logout_url_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_logout_url_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: logout_url not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.logout_url_))
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_logout_url_):
n += 1
n += self.lengthString(len(self.logout_url_))
return n
def Clear(self):
self.clear_logout_url()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.logout_url_)
def OutputPartial(self, out):
if (self.has_logout_url_):
out.putVarInt32(10)
out.putPrefixedString(self.logout_url_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_logout_url(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_logout_url_: res+=prefix+("logout_url: %s\n" % self.DebugFormatString(self.logout_url_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
klogout_url = 1
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "logout_url",
}, 1)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
}, 1, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
class GetOAuthUserRequest(ProtocolBuffer.ProtocolMessage):
def __init__(self, contents=None):
pass
if contents is not None: self.MergeFromString(contents)
def MergeFrom(self, x):
assert x is not self
def Equals(self, x):
if x is self: return 1
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
return n
def ByteSizePartial(self):
n = 0
return n
def Clear(self):
pass
def OutputUnchecked(self, out):
pass
def OutputPartial(self, out):
pass
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
}, 0)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
}, 0, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
class GetOAuthUserResponse(ProtocolBuffer.ProtocolMessage):
has_email_ = 0
email_ = ""
has_user_id_ = 0
user_id_ = ""
has_auth_domain_ = 0
auth_domain_ = ""
has_user_organization_ = 0
user_organization_ = ""
has_is_admin_ = 0
is_admin_ = 0
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def email(self): return self.email_
def set_email(self, x):
self.has_email_ = 1
self.email_ = x
def clear_email(self):
if self.has_email_:
self.has_email_ = 0
self.email_ = ""
def has_email(self): return self.has_email_
def user_id(self): return self.user_id_
def set_user_id(self, x):
self.has_user_id_ = 1
self.user_id_ = x
def clear_user_id(self):
if self.has_user_id_:
self.has_user_id_ = 0
self.user_id_ = ""
def has_user_id(self): return self.has_user_id_
def auth_domain(self): return self.auth_domain_
def set_auth_domain(self, x):
self.has_auth_domain_ = 1
self.auth_domain_ = x
def clear_auth_domain(self):
if self.has_auth_domain_:
self.has_auth_domain_ = 0
self.auth_domain_ = ""
def has_auth_domain(self): return self.has_auth_domain_
def user_organization(self): return self.user_organization_
def set_user_organization(self, x):
self.has_user_organization_ = 1
self.user_organization_ = x
def clear_user_organization(self):
if self.has_user_organization_:
self.has_user_organization_ = 0
self.user_organization_ = ""
def has_user_organization(self): return self.has_user_organization_
def is_admin(self): return self.is_admin_
def set_is_admin(self, x):
self.has_is_admin_ = 1
self.is_admin_ = x
def clear_is_admin(self):
if self.has_is_admin_:
self.has_is_admin_ = 0
self.is_admin_ = 0
def has_is_admin(self): return self.has_is_admin_
def MergeFrom(self, x):
assert x is not self
if (x.has_email()): self.set_email(x.email())
if (x.has_user_id()): self.set_user_id(x.user_id())
if (x.has_auth_domain()): self.set_auth_domain(x.auth_domain())
if (x.has_user_organization()): self.set_user_organization(x.user_organization())
if (x.has_is_admin()): self.set_is_admin(x.is_admin())
def Equals(self, x):
if x is self: return 1
if self.has_email_ != x.has_email_: return 0
if self.has_email_ and self.email_ != x.email_: return 0
if self.has_user_id_ != x.has_user_id_: return 0
if self.has_user_id_ and self.user_id_ != x.user_id_: return 0
if self.has_auth_domain_ != x.has_auth_domain_: return 0
if self.has_auth_domain_ and self.auth_domain_ != x.auth_domain_: return 0
if self.has_user_organization_ != x.has_user_organization_: return 0
if self.has_user_organization_ and self.user_organization_ != x.user_organization_: return 0
if self.has_is_admin_ != x.has_is_admin_: return 0
if self.has_is_admin_ and self.is_admin_ != x.is_admin_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_email_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: email not set.')
if (not self.has_user_id_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: user_id not set.')
if (not self.has_auth_domain_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: auth_domain not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.email_))
n += self.lengthString(len(self.user_id_))
n += self.lengthString(len(self.auth_domain_))
if (self.has_user_organization_): n += 1 + self.lengthString(len(self.user_organization_))
if (self.has_is_admin_): n += 2
return n + 3
def ByteSizePartial(self):
n = 0
if (self.has_email_):
n += 1
n += self.lengthString(len(self.email_))
if (self.has_user_id_):
n += 1
n += self.lengthString(len(self.user_id_))
if (self.has_auth_domain_):
n += 1
n += self.lengthString(len(self.auth_domain_))
if (self.has_user_organization_): n += 1 + self.lengthString(len(self.user_organization_))
if (self.has_is_admin_): n += 2
return n
def Clear(self):
self.clear_email()
self.clear_user_id()
self.clear_auth_domain()
self.clear_user_organization()
self.clear_is_admin()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.email_)
out.putVarInt32(18)
out.putPrefixedString(self.user_id_)
out.putVarInt32(26)
out.putPrefixedString(self.auth_domain_)
if (self.has_user_organization_):
out.putVarInt32(34)
out.putPrefixedString(self.user_organization_)
if (self.has_is_admin_):
out.putVarInt32(40)
out.putBoolean(self.is_admin_)
def OutputPartial(self, out):
if (self.has_email_):
out.putVarInt32(10)
out.putPrefixedString(self.email_)
if (self.has_user_id_):
out.putVarInt32(18)
out.putPrefixedString(self.user_id_)
if (self.has_auth_domain_):
out.putVarInt32(26)
out.putPrefixedString(self.auth_domain_)
if (self.has_user_organization_):
out.putVarInt32(34)
out.putPrefixedString(self.user_organization_)
if (self.has_is_admin_):
out.putVarInt32(40)
out.putBoolean(self.is_admin_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_email(d.getPrefixedString())
continue
if tt == 18:
self.set_user_id(d.getPrefixedString())
continue
if tt == 26:
self.set_auth_domain(d.getPrefixedString())
continue
if tt == 34:
self.set_user_organization(d.getPrefixedString())
continue
if tt == 40:
self.set_is_admin(d.getBoolean())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_email_: res+=prefix+("email: %s\n" % self.DebugFormatString(self.email_))
if self.has_user_id_: res+=prefix+("user_id: %s\n" % self.DebugFormatString(self.user_id_))
if self.has_auth_domain_: res+=prefix+("auth_domain: %s\n" % self.DebugFormatString(self.auth_domain_))
if self.has_user_organization_: res+=prefix+("user_organization: %s\n" % self.DebugFormatString(self.user_organization_))
if self.has_is_admin_: res+=prefix+("is_admin: %s\n" % self.DebugFormatBool(self.is_admin_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kemail = 1
kuser_id = 2
kauth_domain = 3
kuser_organization = 4
kis_admin = 5
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "email",
2: "user_id",
3: "auth_domain",
4: "user_organization",
5: "is_admin",
}, 5)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.STRING,
4: ProtocolBuffer.Encoder.STRING,
5: ProtocolBuffer.Encoder.NUMERIC,
}, 5, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
class CheckOAuthSignatureRequest(ProtocolBuffer.ProtocolMessage):
def __init__(self, contents=None):
pass
if contents is not None: self.MergeFromString(contents)
def MergeFrom(self, x):
assert x is not self
def Equals(self, x):
if x is self: return 1
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
return n
def ByteSizePartial(self):
n = 0
return n
def Clear(self):
pass
def OutputUnchecked(self, out):
pass
def OutputPartial(self, out):
pass
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
}, 0)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
}, 0, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
class CheckOAuthSignatureResponse(ProtocolBuffer.ProtocolMessage):
has_oauth_consumer_key_ = 0
oauth_consumer_key_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def oauth_consumer_key(self): return self.oauth_consumer_key_
def set_oauth_consumer_key(self, x):
self.has_oauth_consumer_key_ = 1
self.oauth_consumer_key_ = x
def clear_oauth_consumer_key(self):
if self.has_oauth_consumer_key_:
self.has_oauth_consumer_key_ = 0
self.oauth_consumer_key_ = ""
def has_oauth_consumer_key(self): return self.has_oauth_consumer_key_
def MergeFrom(self, x):
assert x is not self
if (x.has_oauth_consumer_key()): self.set_oauth_consumer_key(x.oauth_consumer_key())
def Equals(self, x):
if x is self: return 1
if self.has_oauth_consumer_key_ != x.has_oauth_consumer_key_: return 0
if self.has_oauth_consumer_key_ and self.oauth_consumer_key_ != x.oauth_consumer_key_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_oauth_consumer_key_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: oauth_consumer_key not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.oauth_consumer_key_))
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_oauth_consumer_key_):
n += 1
n += self.lengthString(len(self.oauth_consumer_key_))
return n
def Clear(self):
self.clear_oauth_consumer_key()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.oauth_consumer_key_)
def OutputPartial(self, out):
if (self.has_oauth_consumer_key_):
out.putVarInt32(10)
out.putPrefixedString(self.oauth_consumer_key_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_oauth_consumer_key(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_oauth_consumer_key_: res+=prefix+("oauth_consumer_key: %s\n" % self.DebugFormatString(self.oauth_consumer_key_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
koauth_consumer_key = 1
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "oauth_consumer_key",
}, 1)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
}, 1, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
class CreateFederatedLoginRequest(ProtocolBuffer.ProtocolMessage):
has_claimed_id_ = 0
claimed_id_ = ""
has_continue_url_ = 0
continue_url_ = ""
has_authority_ = 0
authority_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def claimed_id(self): return self.claimed_id_
def set_claimed_id(self, x):
self.has_claimed_id_ = 1
self.claimed_id_ = x
def clear_claimed_id(self):
if self.has_claimed_id_:
self.has_claimed_id_ = 0
self.claimed_id_ = ""
def has_claimed_id(self): return self.has_claimed_id_
def continue_url(self): return self.continue_url_
def set_continue_url(self, x):
self.has_continue_url_ = 1
self.continue_url_ = x
def clear_continue_url(self):
if self.has_continue_url_:
self.has_continue_url_ = 0
self.continue_url_ = ""
def has_continue_url(self): return self.has_continue_url_
def authority(self): return self.authority_
def set_authority(self, x):
self.has_authority_ = 1
self.authority_ = x
def clear_authority(self):
if self.has_authority_:
self.has_authority_ = 0
self.authority_ = ""
def has_authority(self): return self.has_authority_
def MergeFrom(self, x):
assert x is not self
if (x.has_claimed_id()): self.set_claimed_id(x.claimed_id())
if (x.has_continue_url()): self.set_continue_url(x.continue_url())
if (x.has_authority()): self.set_authority(x.authority())
def Equals(self, x):
if x is self: return 1
if self.has_claimed_id_ != x.has_claimed_id_: return 0
if self.has_claimed_id_ and self.claimed_id_ != x.claimed_id_: return 0
if self.has_continue_url_ != x.has_continue_url_: return 0
if self.has_continue_url_ and self.continue_url_ != x.continue_url_: return 0
if self.has_authority_ != x.has_authority_: return 0
if self.has_authority_ and self.authority_ != x.authority_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_claimed_id_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: claimed_id not set.')
if (not self.has_continue_url_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: continue_url not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.claimed_id_))
n += self.lengthString(len(self.continue_url_))
if (self.has_authority_): n += 1 + self.lengthString(len(self.authority_))
return n + 2
def ByteSizePartial(self):
n = 0
if (self.has_claimed_id_):
n += 1
n += self.lengthString(len(self.claimed_id_))
if (self.has_continue_url_):
n += 1
n += self.lengthString(len(self.continue_url_))
if (self.has_authority_): n += 1 + self.lengthString(len(self.authority_))
return n
def Clear(self):
self.clear_claimed_id()
self.clear_continue_url()
self.clear_authority()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.claimed_id_)
out.putVarInt32(18)
out.putPrefixedString(self.continue_url_)
if (self.has_authority_):
out.putVarInt32(26)
out.putPrefixedString(self.authority_)
def OutputPartial(self, out):
if (self.has_claimed_id_):
out.putVarInt32(10)
out.putPrefixedString(self.claimed_id_)
if (self.has_continue_url_):
out.putVarInt32(18)
out.putPrefixedString(self.continue_url_)
if (self.has_authority_):
out.putVarInt32(26)
out.putPrefixedString(self.authority_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_claimed_id(d.getPrefixedString())
continue
if tt == 18:
self.set_continue_url(d.getPrefixedString())
continue
if tt == 26:
self.set_authority(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_claimed_id_: res+=prefix+("claimed_id: %s\n" % self.DebugFormatString(self.claimed_id_))
if self.has_continue_url_: res+=prefix+("continue_url: %s\n" % self.DebugFormatString(self.continue_url_))
if self.has_authority_: res+=prefix+("authority: %s\n" % self.DebugFormatString(self.authority_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kclaimed_id = 1
kcontinue_url = 2
kauthority = 3
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "claimed_id",
2: "continue_url",
3: "authority",
}, 3)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.STRING,
}, 3, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
class CreateFederatedLoginResponse(ProtocolBuffer.ProtocolMessage):
has_redirected_url_ = 0
redirected_url_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def redirected_url(self): return self.redirected_url_
def set_redirected_url(self, x):
self.has_redirected_url_ = 1
self.redirected_url_ = x
def clear_redirected_url(self):
if self.has_redirected_url_:
self.has_redirected_url_ = 0
self.redirected_url_ = ""
def has_redirected_url(self): return self.has_redirected_url_
def MergeFrom(self, x):
assert x is not self
if (x.has_redirected_url()): self.set_redirected_url(x.redirected_url())
def Equals(self, x):
if x is self: return 1
if self.has_redirected_url_ != x.has_redirected_url_: return 0
if self.has_redirected_url_ and self.redirected_url_ != x.redirected_url_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_redirected_url_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: redirected_url not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.redirected_url_))
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_redirected_url_):
n += 1
n += self.lengthString(len(self.redirected_url_))
return n
def Clear(self):
self.clear_redirected_url()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.redirected_url_)
def OutputPartial(self, out):
if (self.has_redirected_url_):
out.putVarInt32(10)
out.putPrefixedString(self.redirected_url_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_redirected_url(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_redirected_url_: res+=prefix+("redirected_url: %s\n" % self.DebugFormatString(self.redirected_url_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kredirected_url = 1
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "redirected_url",
}, 1)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
}, 1, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
class CreateFederatedLogoutRequest(ProtocolBuffer.ProtocolMessage):
has_destination_url_ = 0
destination_url_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def destination_url(self): return self.destination_url_
def set_destination_url(self, x):
self.has_destination_url_ = 1
self.destination_url_ = x
def clear_destination_url(self):
if self.has_destination_url_:
self.has_destination_url_ = 0
self.destination_url_ = ""
def has_destination_url(self): return self.has_destination_url_
def MergeFrom(self, x):
assert x is not self
if (x.has_destination_url()): self.set_destination_url(x.destination_url())
def Equals(self, x):
if x is self: return 1
if self.has_destination_url_ != x.has_destination_url_: return 0
if self.has_destination_url_ and self.destination_url_ != x.destination_url_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_destination_url_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: destination_url not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.destination_url_))
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_destination_url_):
n += 1
n += self.lengthString(len(self.destination_url_))
return n
def Clear(self):
self.clear_destination_url()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.destination_url_)
def OutputPartial(self, out):
if (self.has_destination_url_):
out.putVarInt32(10)
out.putPrefixedString(self.destination_url_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_destination_url(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_destination_url_: res+=prefix+("destination_url: %s\n" % self.DebugFormatString(self.destination_url_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kdestination_url = 1
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "destination_url",
}, 1)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
}, 1, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
class CreateFederatedLogoutResponse(ProtocolBuffer.ProtocolMessage):
has_logout_url_ = 0
logout_url_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def logout_url(self): return self.logout_url_
def set_logout_url(self, x):
self.has_logout_url_ = 1
self.logout_url_ = x
def clear_logout_url(self):
if self.has_logout_url_:
self.has_logout_url_ = 0
self.logout_url_ = ""
def has_logout_url(self): return self.has_logout_url_
def MergeFrom(self, x):
assert x is not self
if (x.has_logout_url()): self.set_logout_url(x.logout_url())
def Equals(self, x):
if x is self: return 1
if self.has_logout_url_ != x.has_logout_url_: return 0
if self.has_logout_url_ and self.logout_url_ != x.logout_url_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_logout_url_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: logout_url not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.logout_url_))
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_logout_url_):
n += 1
n += self.lengthString(len(self.logout_url_))
return n
def Clear(self):
self.clear_logout_url()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.logout_url_)
def OutputPartial(self, out):
if (self.has_logout_url_):
out.putVarInt32(10)
out.putPrefixedString(self.logout_url_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_logout_url(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_logout_url_: res+=prefix+("logout_url: %s\n" % self.DebugFormatString(self.logout_url_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
klogout_url = 1
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "logout_url",
}, 1)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
}, 1, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
__all__ = ['UserServiceError','CreateLoginURLRequest','CreateLoginURLResponse','CreateLogoutURLRequest','CreateLogoutURLResponse','GetOAuthUserRequest','GetOAuthUserResponse','CheckOAuthSignatureRequest','CheckOAuthSignatureResponse','CreateFederatedLoginRequest','CreateFederatedLoginResponse','CreateFederatedLogoutRequest','CreateFederatedLogoutResponse']
| SRabbelier/Melange | thirdparty/google_appengine/google/appengine/api/user_service_pb.py | Python | apache-2.0 | 42,563 |
#!/usr/bin/env python3
import sys
import requests
from html.parser import HTMLParser
from html.entities import name2codepoint
s1 = 'abc'
class MyHTMLParser(HTMLParser):
def __init__(self):
self.href_list = []
super().__init__()
def handle_starttag(self, tag, attrs):
print("Start tag:[{}]".format(self.getpos()), tag)
for attr in attrs:
print(" attr:[{}]".format(self.getpos()), attr)
if attr[0] == 'href':
self.href_list.append(attr[1])
def handle_endtag(self, tag):
print("End tag :", tag)
def handle_data(self, data):
print("Data :", data)
def handle_comment(self, data):
print("Comment :", data)
def handle_entityref(self, name):
c = chr(name2codepoint[name])
print("Named ent:", c)
def handle_charref(self, name):
if name.startswith('x'):
c = chr(int(name[1:], 16))
else:
c = chr(int(name))
print("Num ent :", c)
def handle_decl(self, data):
print("Decl :", data)
url = 'https://docs.python.org/3/library/index.html'
request_time_out = 20
r = requests.get(url, timeout=request_time_out)
c1 = '''<html><head><title>Test</title></head>
<body><h1>Parse me!</h1></body></html>'''
parser = MyHTMLParser()
parser.feed(r.content.decode())
print('-' * 20)
print(type(r.content))
# [print(chr(x)) for x in r.content]
# [print(x) for x in parser.href_list]
| jingwangian/tutorial | python/html/html_st.py | Python | gpl-3.0 | 1,496 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: Yannick Buron
# Copyright 2015, TODAY Clouder SASU
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License with Attribution
# clause as published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License with
# Attribution clause along with this program. If not, see
# <http://www.gnu.org/licenses/>.
#
##############################################################################
import runner
import template
| nicolas-petit/clouder | clouder_runner_openshift/__init__.py | Python | gpl-3.0 | 983 |
from zope import component
from zope import interface
from zope.interface.interfaces import IObjectEvent
from zope import location
from sparc.configuration.container import ISparcPyContainerConfiguredApplication
#EVENTS
class ISnippetAvailableForSecretsSniffEvent(IObjectEvent):
"""An object providing ISnippet is ready to be sniffed for secrets"""
#APPLICATION & FACTORIES
class IMellonApplication(ISparcPyContainerConfiguredApplication):
"""The Application"""
class IMellonFileProvider(interface.Interface):
"""Provides IFile objects that should be processed by the application"""
def __iter__():
"""Iterator of IFile objects"""
class IMellonFileProviderFactory(component.IFactory):
"""A factory producing a IMellonFileProvider"""
def __call__(config):
"""
Args:
config; factory specific data structure holding required object
initialization information needed by factory
"""
#SNIPPETS
class ISnippet(location.ILocation):
"""A snippet of data to be sniffed for secrets
This also implements ILocation, where __parent__ is a IMellonFile and
__name__ indicates where in the file the snippet can be located at.
"""
data = interface.Attribute("A Python data sequence")
class IBytesSnippet(ISnippet):
"""A snippet of bytes data to be sniffed for secrets"""
data = interface.Attribute("A Python bytes sequence")
class IUnicodeSnippet(ISnippet):
"""A snippet of unicode data to be sniffed for secrets"""
data = interface.Attribute("A Python unicode sequence")
class ISnippetIterator(interface.Interface):
"""Iterates data snippets"""
def __iter__():
"""Iterator of ISnippet objects"""
#FILES
class IPath(interface.Interface):
"""Marker for text that is a formatted file system path"""
class IFile(interface.Interface):
"""Marker for file-like object providing Python's file object interface"""
class IMellonFile(ISnippetIterator):
"""A file to be processed by the application"""
def __str__():
"""String locatable identity of file"""
class IUnicodeMellonFile(IMellonFile):
"""A Unicode (text) file to be processed by the application"""
snippet_lines_increment = \
interface.Attribute("Number of lines to jump after each snippet, 0 "+
"indicates entire data.")
snippet_lines_coverage = \
interface.Attribute("Number of lines to include in each snippet "+
"if available, 0 indicates all remaining lines.")
class IByteMellonFile(IMellonFile):
"""A byte (binary) file to be processed by the application"""
read_size = interface.Attribute(\
"Max number of bytes to include in each file read operation."+
"Number of bytes to jump after each snippet, 0 indicates entire data.")
snippet_bytes_increment = \
interface.Attribute("Number of read_size data packets to jump after "+
"snippet return.")
snippet_bytes_coverage = \
interface.Attribute("Number of read_size data packets to include in "+
"each snippet. 0 indicates all data packets.")
class IBinaryChecker(interface.Interface):
"""Binary file checker"""
def check():
"""True indicates the data was found to be binary"""
# SNIFFERS, SECRETS, WHITELISTS
class ISecretSniffer(interface.Interface):
"""Looks for a secret"""
def __iter__():
"""Iterator of found ISecret providers"""
class ISecret(location.ILocation):
"""A secret found within a ISnippet
This also implements ILocation, where __parent__ is a ISnippet and
__name__ is alias for __str__.
"""
def __str__():
"""String details of the secret and/or how it was found"""
def __hash__():
"""Uniquely identifies the locatable secret among other secrets"""
class IWhitelistInfo(interface.Interface):
"""Object whitelist information"""
def __str__():
"""Detailed information on how object was whitelisted"""
class IWhitelist(interface.Interface):
"""Identifies if object is whitelisted"""
def __iter__():
"""Iterator of found IWhitelistInfo providers"""
| CrowdStrike/mellon | mellon/interfaces.py | Python | mit | 4,243 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from openstack.tests.functional import base
from openstack.tests.functional.image.v2.test_image import TEST_IMAGE_NAME
class TestImage(base.BaseFunctionalTest):
def test_images(self):
images = list(self.conn.compute.images())
self.assertGreater(len(images), 0)
for image in images:
self.assertIsInstance(image.id, six.string_types)
def _get_non_test_image(self):
images = self.conn.compute.images()
image = next(images)
if image.name == TEST_IMAGE_NAME:
image = next(images)
return image
def test_find_image(self):
image = self._get_non_test_image()
self.assertIsNotNone(image)
sot = self.conn.compute.find_image(image.id)
self.assertEqual(image.id, sot.id)
self.assertEqual(image.name, sot.name)
def test_get_image(self):
image = self._get_non_test_image()
self.assertIsNotNone(image)
sot = self.conn.compute.get_image(image.id)
self.assertEqual(image.id, sot.id)
self.assertEqual(image.name, sot.name)
self.assertIn('links', image)
self.assertIn('minDisk', image)
self.assertIn('minRam', image)
self.assertIn('metadata', image)
self.assertIn('progress', image)
self.assertIn('status', image)
| dudymas/python-openstacksdk | openstack/tests/functional/compute/v2/test_image.py | Python | apache-2.0 | 1,884 |
#! /usr/bin/python
"""
test_frame.py
Paul Malmsten, 2010
pmalmsten@gmail.com
Tests frame module for proper behavior
"""
import unittest
from xbee.frame import APIFrame
from xbee.python2to3 import byteToInt, intToByte
class TestAPIFrameGeneration(unittest.TestCase):
"""
XBee class must be able to create a valid API frame given binary
data, in byte string form.
"""
def test_single_byte(self):
"""
create a frame containing a single byte
"""
data = b'\x00'
# start byte, two length bytes, data byte, checksum
expected_frame = b'\x7E\x00\x01\x00\xFF'
frame = APIFrame(data).output()
self.assertEqual(frame, expected_frame)
class TestAPIFrameParsing(unittest.TestCase):
"""
XBee class must be able to read and validate the data contained
by a valid API frame.
"""
def test_remaining_bytes(self):
"""
remaining_bytes() should provide accurate indication
of remaining bytes required before parsing a packet
"""
api_frame = APIFrame()
frame = b'\x7E\x00\x04\x00\x00\x00\x00\xFF'
self.assertEqual(api_frame.remaining_bytes(), 3)
api_frame.fill(frame[0])
self.assertEqual(api_frame.remaining_bytes(), 2)
api_frame.fill(frame[1])
self.assertEqual(api_frame.remaining_bytes(), 1)
api_frame.fill(frame[2])
self.assertEqual(api_frame.remaining_bytes(), 5)
api_frame.fill(frame[3])
self.assertEqual(api_frame.remaining_bytes(), 4)
def test_single_byte(self):
"""
read a frame containing a single byte
"""
api_frame = APIFrame()
frame = b'\x7E\x00\x01\x00\xFF'
expected_data = b'\x00'
for byte in frame:
api_frame.fill(intToByte(byteToInt(byte)))
api_frame.parse()
self.assertEqual(api_frame.data, expected_data)
def test_invalid_checksum(self):
"""
when an invalid frame is read, an exception must be raised
"""
api_frame = APIFrame()
frame = b'\x7E\x00\x01\x00\xF6'
for byte in frame:
api_frame.fill(intToByte(byteToInt(byte)))
self.assertRaises(ValueError, api_frame.parse)
class TestEscaping(unittest.TestCase):
"""
APIFrame class must properly escape and unescape data
"""
def test_escape_method(self):
"""
APIFrame.escape() must work as expected
"""
test_data = APIFrame.START_BYTE
new_data = APIFrame.escape(test_data)
self.assertEqual(new_data, APIFrame.ESCAPE_BYTE + b'\x5e')
def test_unescape_input(self):
"""
APIFrame must properly unescape escaped input
"""
test_data = b'\x7D\x23'
expected_data = b'\x03'
frame = APIFrame(escaped=True)
for byte in [test_data[x:x+1] for x in range(0, len(test_data))]:
frame.fill(byte)
self.assertEqual(frame.raw_data, expected_data)
| nioinnovation/python-xbee | xbee/tests/test_frame.py | Python | mit | 3,016 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('globenocturneapp', '0016_auto_20150701_1624'),
]
operations = [
migrations.AlterField(
model_name='worldcountry',
name='id',
field=models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True),
preserve_default=True,
),
]
| qliu/globe_nocturne | globenocturne/globenocturneapp/migrations/0017_auto_20150701_1624.py | Python | gpl-2.0 | 506 |
#python
import k3d
import testing
document = k3d.new_document()
source = k3d.plugin.create("PolyTorus", document)
triangles = k3d.plugin.create("TriangulateFaces", document)
triangles.mesh_selection = k3d.select_all()
k3d.property.connect(document, source.get_property("output_mesh"), triangles.get_property("input_mesh"))
modifier = k3d.plugin.create("PGPRemesh", document)
modifier.use_smooth = False
modifier.steps = 0
modifier.omega = 1
modifier.div = 2
modifier.triangulate = True
k3d.property.connect(document, triangles.get_property("output_mesh"), modifier.get_property("input_mesh"))
#print "source output: " + repr(source.output_mesh)
#print "triangles output: " + repr(triangles.output_mesh)
#print "modifier output: " + repr(modifier.output_mesh)
testing.require_valid_mesh(document, modifier.get_property("output_mesh"))
testing.require_similar_mesh(document, modifier.get_property("output_mesh"), "mesh.modifier.PGPRemesh", 1)
| K-3D/k3d | tests/mesh/mesh.modifier.PGPRemesh.py | Python | gpl-2.0 | 949 |
#!/usr/bin/env python3
'''
An example of three labels packed into three separate PanedWindow areas with
values set for the width of handle and sash.
'''
import tkinter
class PanedWindow(tkinter.Tk):
def __init__(self):
tkinter.Tk.__init__(self)
self.title("Label")
panedwindow = tkinter.PanedWindow()
panedwindow.config(handlesize=10)
panedwindow.config(sashwidth=5)
panedwindow.config(sashrelief=tkinter.RAISED)
panedwindow.pack(fill=tkinter.BOTH, expand=1)
label = tkinter.Label(text="Label in Pane 1")
panedwindow.add(label)
label = tkinter.Label(text="Label in Pane 2")
panedwindow.add(label)
label = tkinter.Label(text="Label in Pane 3")
panedwindow.add(label)
if __name__ == "__main__":
application = PanedWindow()
application.mainloop()
| steeleyuk/python-tkinter-examples | panedwindow.py | Python | cc0-1.0 | 866 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('spirit_topic', '0004_update_last_commenter'),
]
operations = [
migrations.AddField(
model_name='topic',
name='reindex_at',
field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='reindex at'),
),
]
| alesdotio/Spirit | spirit/topic/migrations/0005_topic_reindex_at.py | Python | mit | 491 |
#
# Copyright (C) 2000-2005 by Yasushi Saito (yasushi.saito@gmail.com)
#
# Pychart is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2, or (at your option) any
# later version.
#
# Pychart is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
from pychart import *
theme.get_options()
# We have 10 sample points total. The first value in each tuple is
# the X value, and subsequent values are Y values for different lines.
data = [(10, 20, 30), (20, 65, 33),
(30, 55, 30), (40, 45, 51),
(50, 25, 27), (60, 75, 30),
(70, 80, 42), (80, 62, 32),
(90, 42, 39), (100, 32, 39)]
# The format attribute specifies the text to be drawn at each tick mark.
# Here, texts are rotated -60 degrees ("/a-60"), left-aligned ("/hL"),
# and numbers are printed as integers ("%d").
xaxis = axis.X(format="/a-60/hL%d", tic_interval = 20, label="Stuff")
yaxis = axis.Y(tic_interval = 20, label="Value")
# Define the drawing area. "y_range=(0,None)" tells that the Y minimum
# is 0, but the Y maximum is to be computed automatically. Without
# y_ranges, Pychart will pick the minimum Y value among the samples,
# i.e., 20, as the base value of Y axis.
ar = area.T(x_axis=xaxis, y_axis=yaxis, y_range=(0,None))
# The first plot extracts Y values from the 2nd column
# ("ycol=1") of DATA ("data=data"). X values are takes from the first
# column, which is the default.
plot = line_plot.T(label="foo", data=data, ycol=1, tick_mark=tick_mark.star)
plot2 = line_plot.T(label="bar", data=data, ycol=2, tick_mark=tick_mark.square)
ar.add_plot(plot, plot2)
# The call to ar.draw() usually comes at the end of a program. It
# draws the axes, the plots, and the legend (if any).
ar.draw()
| cecep-edu/refactory | requirements/PyChart-1.39/demos/linetest.py | Python | gpl-3.0 | 1,970 |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tensorflow_fold.util.proto."""
import os
# import google3
import tensorflow as tf
from tensorflow_fold.util import proto_tools
from tensorflow_fold.util import test3_pb2
from tensorflow_fold.util import test_pb2
from google.protobuf import text_format
# Make sure SerializedMessageToTree can see our proto files.
proto_tools.map_proto_source_tree_path("", os.getcwd())
# Note: Tests run in the bazel root directory, which we will use as the root for
# our source protos.
proto_tools.import_proto_file("tensorflow_fold/util/test.proto")
proto_tools.import_proto_file("tensorflow_fold/util/test3.proto")
def MakeCyclicProto(message_str):
return text_format.Parse(message_str, test_pb2.CyclicType())
def MakeCyclicProto3(message_str):
return text_format.Parse(message_str, test3_pb2.CyclicType3())
def MakeOneAtomProto(message_str):
return text_format.Parse(message_str, test_pb2.OneAtom())
class ProtoTest(tf.test.TestCase):
def testSerializedMessageToTree(self):
example = MakeCyclicProto(
"some_same<"
" many_int32: 1"
" many_int32: 2"
" some_same<"
" many_int32: 3"
" many_int32: 4"
" some_bool: false"
" >"
">"
"some_enum: THAT")
result = proto_tools.serialized_message_to_tree(
"tensorflow.fold.CyclicType", example.SerializeToString())
self.assertEqual(result["some_same"]["many_int32"], [1, 2])
self.assertEqual(result["some_same"]["some_same"]["many_int32"], [3, 4])
self.assertEqual(result["some_same"]["some_same"]["some_bool"], False)
self.assertEqual(result["many_bool"], [])
self.assertEqual(result["some_bool"], None)
self.assertEqual(result["some_same"]["many_bool"], [])
self.assertEqual(result["some_same"]["some_bool"], None)
self.assertEqual(result["some_enum"]["name"], "THAT")
self.assertEqual(result["some_enum"]["index"], 1)
self.assertEqual(result["some_enum"]["number"], 1)
def testSerializedMessageToTreeProto3(self):
example = MakeCyclicProto3(
"some_same<"
" many_int32: 1"
" many_int32: 2"
" some_same<"
" many_int32: 3"
" many_int32: 4"
" some_bool: false"
" >"
">"
"some_enum: THAT")
result = proto_tools.serialized_message_to_tree(
"tensorflow.fold.CyclicType3", example.SerializeToString())
self.assertEqual(result["some_same"]["many_int32"], [1, 2])
self.assertEqual(result["some_same"]["some_same"]["many_int32"], [3, 4])
self.assertEqual(result["some_same"]["some_same"]["some_bool"], False)
self.assertEqual(result["many_bool"], [])
self.assertEqual(result["some_bool"], False)
self.assertEqual(result["some_same"]["many_bool"], [])
self.assertEqual(result["some_same"]["some_bool"], False)
self.assertEqual(result["some_enum"]["name"], "THAT")
self.assertEqual(result["some_enum"]["index"], 1)
self.assertEqual(result["some_enum"]["number"], 1)
def testSerializedMessageToTreeOneofEmpty(self):
empty_proto = MakeOneAtomProto("").SerializeToString()
empty_result = proto_tools.serialized_message_to_tree(
"tensorflow.fold.OneAtom", empty_proto)
self.assertEqual(empty_result["atom_type"], None)
self.assertEqual(empty_result["some_int32"], None)
self.assertEqual(empty_result["some_int64"], None)
self.assertEqual(empty_result["some_uint32"], None)
self.assertEqual(empty_result["some_uint64"], None)
self.assertEqual(empty_result["some_double"], None)
self.assertEqual(empty_result["some_float"], None)
self.assertEqual(empty_result["some_bool"], None)
self.assertEqual(empty_result["some_enum"], None)
self.assertEqual(empty_result["some_string"], None)
def testSerializedMessageToTreeOneof(self):
empty_proto = MakeOneAtomProto("some_string: \"x\"").SerializeToString()
empty_result = proto_tools.serialized_message_to_tree(
"tensorflow.fold.OneAtom", empty_proto)
self.assertEqual(empty_result["atom_type"], "some_string")
self.assertEqual(empty_result["some_int32"], None)
self.assertEqual(empty_result["some_int64"], None)
self.assertEqual(empty_result["some_uint32"], None)
self.assertEqual(empty_result["some_uint64"], None)
self.assertEqual(empty_result["some_double"], None)
self.assertEqual(empty_result["some_float"], None)
self.assertEqual(empty_result["some_bool"], None)
self.assertEqual(empty_result["some_enum"], None)
self.assertEqual(empty_result["some_string"], "x")
def testNonConsecutiveEnum(self):
name = "tensorflow.fold.NonConsecutiveEnumMessage"
msg = test_pb2.NonConsecutiveEnumMessage(
the_enum=test_pb2.NonConsecutiveEnumMessage.THREE)
self.assertEqual(
{"the_enum": {"name": "THREE", "index": 1, "number": 3}},
proto_tools.serialized_message_to_tree(name, msg.SerializeToString()))
msg.the_enum = test_pb2.NonConsecutiveEnumMessage.SEVEN
self.assertEqual(
{"the_enum": {"name": "SEVEN", "index": 0, "number": 7}},
proto_tools.serialized_message_to_tree(name, msg.SerializeToString()))
if __name__ == "__main__":
tf.test.main()
| pklfz/fold | tensorflow_fold/util/proto_test.py | Python | apache-2.0 | 5,810 |
#!/usr/bin/env python
from enum import Enum
class OTULabelStyleEnum(Enum):
OTT_ID = 0
CURRENT_LABEL = 1 # OTT_NAME, if mapped or ORIGINAL_LABEL
ORIGINAL_LABEL = 2
OTT_NAME = 3
OTT_UNIQNAME = 4
CURRENT_LABEL_OTT_ID = 5 # concatenates {c}_{i}.format(c=CURRENT_LABEL, i=OTT_ID)
| mtholder/peyotl | peyotl/phylo/entities.py | Python | bsd-2-clause | 303 |
from gwcconfig.GeoWebCacheServer import GeoWebCacheServer
try:
from config import *
except ImportError:
print("Failed to load settings")
def main():
server = GeoWebCacheServer(GWC_REST_API_URL,GWC_REST_API_USERNAME,GWC_REST_API_PASSWORD)
test_layer = server.get_layer(LAYER_NAME)
test_layer.fetch()
test_layer.expireCache = 604900
for gsu in test_layer.gridSubsets:
print gsu.gridSetName, gsu.extent_coords
print test_layer.message()
server.update(test_layer)
if __name__ == '__main__':
main()
| ndufrane/gwcconfig | tests/main.py | Python | mit | 546 |
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit, Layout, Column, Row
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib import admin
from django.contrib.admin.widgets import ForeignKeyRawIdWidget
from django.urls import reverse
from django import forms
from django.db import models
from django.utils.translation import gettext as _
from .helpers.validation import validate_partners, validate_authorizer
from .models import Editor, UserProfile, Authorization
from .groups import get_restricted
class EditorUpdateForm(forms.ModelForm):
class Meta:
model = Editor
fields = ["contributions"]
def __init__(self, *args, **kwargs):
"""
This form expects to be instantiated with 'instance=editor' indicating
the editor to be updated, and will fail otherwise.
"""
super(EditorUpdateForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.add_input(
Submit(
"submit",
# Translators: This is the label for a button that users click to update their public information.
_("Update profile"),
css_class="twl-btn",
)
)
editor = self.instance
self.helper.form_action = reverse("users:editor_update", args=[editor.id])
# fmt: off
# Translators: This labels a field where users can describe their activity on Wikipedia in a small biography.
self.fields["contributions"].label = _("Describe your contributions " "to Wikipedia: topics edited, et cetera.")
# fmt: on
self.fields["contributions"].help_text = None
class AuthorizationUserChoiceForm(forms.ModelChoiceField):
def label_from_instance(self, obj):
user = obj
if hasattr(user, "editor"):
return user.editor.wp_username
else:
return obj.username
class AuthorizationAdminForm(forms.ModelForm):
"""
This override only exists to run custom validation.
"""
class Meta:
model = Authorization
fields = "__all__"
def clean_partners(self):
validate_partners(self.cleaned_data["partners"])
return self.cleaned_data["partners"]
def clean_authorizer(self):
validate_authorizer(self.cleaned_data["authorizer"])
return self.cleaned_data["authorizer"]
class AuthorizationInlineForm(forms.ModelForm):
authorizer = AuthorizationUserChoiceForm(
User.objects.filter(
models.Q(is_superuser=True) | models.Q(groups__name="coordinators")
)
)
user = AuthorizationUserChoiceForm(
queryset=User.objects.all(),
widget=ForeignKeyRawIdWidget(
Authorization._meta.get_field("user").remote_field, admin.site
),
)
class SetLanguageForm(forms.Form):
language = forms.ChoiceField(choices=settings.LANGUAGES)
def __init__(self, user, *args, **kwargs):
super(SetLanguageForm, self).__init__(*args, **kwargs)
self.fields["language"].initial = user.userprofile.lang
self.helper = FormHelper()
self.helper.label_class = "sr-only"
class UserEmailForm(forms.Form):
send_renewal_notices = forms.BooleanField(required=False)
def __init__(self, user, *args, **kwargs):
super(UserEmailForm, self).__init__(*args, **kwargs)
self.fields[
"send_renewal_notices"
].initial = user.userprofile.send_renewal_notices
self.fields[
"send_renewal_notices"
# Translators: In the preferences section (Emails) of a user profile, this text labels the checkbox users can (un)click to change if they wish to receive account renewal notices or not.
].label = _("Send renewal notices")
class CoordinatorEmailForm(forms.Form):
send_pending_application_reminders = forms.BooleanField(required=False)
send_discussion_application_reminders = forms.BooleanField(required=False)
send_approved_application_reminders = forms.BooleanField(required=False)
def __init__(self, user, *args, **kwargs):
super(CoordinatorEmailForm, self).__init__(*args, **kwargs)
# We default to the values from the user's userprofile on
# page (Profile) load.
self.fields[
"send_pending_application_reminders"
].initial = user.userprofile.pending_app_reminders
self.fields[
"send_pending_application_reminders"
# Translators: In the preferences section (Emails) of a user profile, this text labels the checkbox coordinators can (un)click to change if they wish to receive pending application reminders or not.
].label = _("Send pending application reminders")
self.fields[
"send_discussion_application_reminders"
].initial = user.userprofile.discussion_app_reminders
self.fields[
"send_discussion_application_reminders"
# Translators: In the preferences section (Emails) of a user profile, this text labels the checkbox coordinators can (un)click to change if they wish to receive application reminders that are under discussion or not.
].label = _("Send discussion application reminders")
self.fields[
"send_approved_application_reminders"
].initial = user.userprofile.approved_app_reminders
self.fields[
"send_approved_application_reminders"
# Translators: In the preferences section (Emails) of a user profile, this text labels the checkbox coordinators can (un)click to change if they wish to receive approved application reminders or not.
].label = _("Send approved application reminders")
class RestrictDataForm(forms.Form):
restricted = forms.BooleanField(required=False)
def __init__(self, user, *args, **kwargs):
super(RestrictDataForm, self).__init__(*args, **kwargs)
# Translators: Labels the button users click to request a restriction on the processing of their data.
self.fields["restricted"].label = _("Restrict my data")
restricted = get_restricted()
user_is_restricted = user in restricted.user_set.all()
self.fields["restricted"].initial = user_is_restricted
self.helper = FormHelper()
self.helper.form_class = "form-inline"
self.helper.field_template = "bootstrap4/layout/inline_field.html"
class TermsForm(forms.ModelForm):
class Meta:
model = UserProfile
fields = ["terms_of_use"]
def __init__(self, user_profile, *args, **kwargs):
super(TermsForm, self).__init__(*args, **kwargs)
# Translators: Users must click this button when registering to agree to the website terms of use.
self.fields["terms_of_use"].label = _("I agree with the terms of use")
if user_profile:
self.fields["terms_of_use"].initial = user_profile.terms_of_use
self.helper = FormHelper()
self.helper.form_class = "form-inline"
self.helper.field_template = "bootstrap4/layout/inline_field.html"
# fmt: off
# Translators: This text explains how to disagree to the site's terms of use and the restrictions that will come into effect when users disagree to the terms of use.
self.fields["terms_of_use"].help_text = _("By unchecking this box and clicking “Update” you may explore the site, but you will not be able to apply for access to materials or evaluate applications unless you agree with the terms of use.")
# fmt: on
self.helper.layout = Layout(
"terms_of_use",
# Translators: this 'Submit' is referenced in the terms of use and should be translated the same way both places.
Submit("submit", _("Submit"), css_class="btn twl-btn"),
)
class EmailChangeForm(forms.Form):
email = forms.EmailField(required=False)
use_wp_email = forms.BooleanField(required=False)
def __init__(self, user, *args, **kwargs):
super(EmailChangeForm, self).__init__(*args, **kwargs)
# Translators: Labels the field where the user's email is displayed.
self.fields["email"].label = _("Email")
# fmt: off
# Translators: Users click this button to set their website email address to the one linked to their Wikipedia account.
self.fields["use_wp_email"].label = _("Use my Wikipedia email address "
"(will be updated the next time you login)."
)
# fmt: on
self.fields["email"].initial = user.email
self.fields["use_wp_email"].initial = user.userprofile.use_wp_email
self.helper = FormHelper()
self.helper.form_class = "mx-auto w-75 px-5"
self.helper.label_class = "font-weight-bold w-25 d-inline"
self.helper.layout = Layout(
Row(
Column("email", css_class="form-group w-100 d-inline"),
),
Row(
Column("use_wp_email", css_class="form-group w-100"),
),
Submit(
"submit",
# Translators: This labels a button which users click to change their email.
_("Update email"),
css_class="twl-btn",
),
)
| WikipediaLibrary/TWLight | TWLight/users/forms.py | Python | mit | 9,325 |
import errno
import sys
import os
from ansible.utils.display import Display
from ansible.utils.color import stringc
from ansible.utils.singleton import Singleton
from .utils import get_ansible_task_log_path
class UnSingleton(Singleton):
def __init__(cls, name, bases, dct):
type.__init__(cls, name, bases, dct)
def __call__(cls, *args, **kwargs):
return type.__call__(cls, *args, **kwargs)
class AdHocDisplay(Display, metaclass=UnSingleton):
def __init__(self, execution_id, verbosity=0):
super().__init__(verbosity=verbosity)
if execution_id:
log_path = get_ansible_task_log_path(execution_id)
else:
log_path = os.devnull
self.log_file = open(log_path, mode='a')
def close(self):
self.log_file.close()
def set_cowsay_info(self):
# 中断 cowsay 的测试,会频繁开启子进程
return
def _write_to_screen(self, msg, stderr):
if not stderr:
screen = sys.stdout
else:
screen = sys.stderr
screen.write(msg)
try:
screen.flush()
except IOError as e:
# Ignore EPIPE in case fileobj has been prematurely closed, eg.
# when piping to "head -n1"
if e.errno != errno.EPIPE:
raise
def _write_to_log_file(self, msg):
# 这里先不 flush,log 文件不需要那么及时。
self.log_file.write(msg)
def display(self, msg, color=None, stderr=False, screen_only=False, log_only=False):
if color:
msg = stringc(msg, color)
if not msg.endswith(u'\n'):
msg2 = msg + u'\n'
else:
msg2 = msg
self._write_to_screen(msg2, stderr)
self._write_to_log_file(msg2)
| skyoo/jumpserver | apps/ops/ansible/display.py | Python | gpl-2.0 | 1,809 |
import multiprocessing
import subprocess
def create_cpu_load(sleep=30):
"""
create some artificial cpu load to heat stuff up
"""
procs = []
# Create cpu load
for core in range(multiprocessing.cpu_count()):
procs.append(subprocess.Popen(['sha256sum','/dev/random']))
# Give some heat-up time
time.sleep(sleep)
# clean the processes
for proc in procs:
proc.kill()
| daveol/Fedora-Test-Laptop | tests/utils/cpu.py | Python | mit | 424 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Estimator classes for BoostedTrees."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
from tensorflow.python.estimator import estimator
from tensorflow.python.estimator import model_fn
from tensorflow.python.estimator.canned import head as head_lib
from tensorflow.python.feature_column import feature_column as feature_column_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import boosted_trees_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops.losses import losses
from tensorflow.python.summary import summary
from tensorflow.python.training import distribute as distribute_lib
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training_util
from tensorflow.python.util.tf_export import estimator_export
# TODO(nponomareva): Reveal pruning params here.
_TreeHParams = collections.namedtuple('TreeHParams', [
'n_trees', 'max_depth', 'learning_rate', 'l1', 'l2', 'tree_complexity',
'min_node_weight', 'center_bias'
])
_HOLD_FOR_MULTI_CLASS_SUPPORT = object()
_HOLD_FOR_MULTI_DIM_SUPPORT = object()
_DUMMY_NUM_BUCKETS = -1
_DUMMY_NODE_ID = -1
def _get_transformed_features(features, sorted_feature_columns):
"""Gets the transformed features from features/feature_columns pair.
Args:
features: a dicionary of name to Tensor.
sorted_feature_columns: a list/set of tf.feature_column, sorted by name.
Returns:
result_features: a list of the transformed features, sorted by the name.
Raises:
ValueError: when unsupported features/columns are tried.
"""
# pylint:disable=protected-access
transformed_features = feature_column_lib._transform_features(
features, sorted_feature_columns)
result_features = []
for column in sorted_feature_columns:
if isinstance(column, feature_column_lib._BucketizedColumn):
source_name = column.source_column.name
squeezed_tensor = array_ops.squeeze(transformed_features[column], axis=1)
if len(squeezed_tensor.shape) > 1:
raise ValueError('For now, only supports features equivalent to rank 1 '
'but column `{}` got: {}'.format(
source_name, features[source_name].shape))
result_features.append(squeezed_tensor)
elif isinstance(column, feature_column_lib._IndicatorColumn):
source_name = column.categorical_column.name
tensor = math_ops.to_int32(transformed_features[column])
if len(tensor.shape) > 2:
raise ValueError('Rank of indicator column must be no more than 2, '
'but column `{}` got: {}'.format(
source_name, features[source_name].shape))
unstacked = array_ops.unstack(tensor, axis=1)
result_features.extend(unstacked)
else:
raise ValueError(
'For now, only bucketized_column and indicator_column is supported '
'but got: {}'.format(column))
# pylint:enable=protected-access
return result_features
def _local_variable(initial_value, name=None):
"""Stores a tensor as a local Variable for faster read."""
result = variable_scope.variable(
initial_value=initial_value,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
validate_shape=False,
name=name)
if isinstance(initial_value, ops.Tensor):
# Match the resulting variable's shape if the initial_value is a Tensor.
result.set_shape(initial_value.shape)
return result
def _group_features_by_num_buckets(sorted_feature_columns):
"""Groups feature ids by the number of buckets.
Derives the feature ids based on iterating through ordered feature columns
and groups them by the number of buckets each feature require. Returns a
sorted list of buckets and a list of lists of feature ids for each of those
buckets.
Args:
sorted_feature_columns: a list/set of tf.feature_column sorted by name.
Returns:
bucket_size_list: a list of required bucket sizes.
feature_ids_list: a list of lists of feature ids for each bucket size.
Raises:
ValueError: when unsupported features columns are provided.
"""
bucket_size_to_feature_ids_dict = collections.OrderedDict()
# TODO(nponomareva) for now we preserve the previous functionality and bucket
# all numeric into the same num of buckets. Can be easily changed to using
# each numeric's real buckets num, but we need to test that it does not cause
# a performance hit.
# We will replace this dummy key with the real max after we calculate it.
bucket_size_to_feature_ids_dict[_DUMMY_NUM_BUCKETS] = []
max_buckets_for_bucketized = 2
max_buckets_for_indicator = 2
feature_idx = 0
# pylint:disable=protected-access
for column in sorted_feature_columns:
if isinstance(column, feature_column_lib._IndicatorColumn):
num_categorical_features = column.categorical_column._num_buckets
if max_buckets_for_indicator not in bucket_size_to_feature_ids_dict:
bucket_size_to_feature_ids_dict[max_buckets_for_indicator] = []
for _ in range(num_categorical_features):
# We use bucket size of 2 for categorical.
bucket_size_to_feature_ids_dict[max_buckets_for_indicator].append(
feature_idx)
feature_idx += 1
elif isinstance(column, feature_column_lib._BucketizedColumn):
max_buckets_for_bucketized = max(max_buckets_for_bucketized,
len(column.boundaries) + 1)
bucket_size_to_feature_ids_dict[_DUMMY_NUM_BUCKETS].append(feature_idx)
feature_idx += 1
elif not isinstance(column, feature_column_lib._IndicatorColumn): # pylint:disable=protected-access
raise ValueError(
'For now, only bucketized_column and indicator column are supported '
'but got: {}'.format(column))
# pylint:enable=protected-access
# Replace the dummy key with the real max num of buckets for all bucketized
# columns.
if max_buckets_for_bucketized not in bucket_size_to_feature_ids_dict:
bucket_size_to_feature_ids_dict[max_buckets_for_bucketized] = []
bucket_size_to_feature_ids_dict[max_buckets_for_bucketized].extend(
bucket_size_to_feature_ids_dict[_DUMMY_NUM_BUCKETS])
del bucket_size_to_feature_ids_dict[_DUMMY_NUM_BUCKETS]
feature_ids_list = list(bucket_size_to_feature_ids_dict.values())
bucket_size_list = list(bucket_size_to_feature_ids_dict.keys())
return bucket_size_list, feature_ids_list
def _calculate_num_features(sorted_feature_columns):
num_features = 0
for column in sorted_feature_columns:
if isinstance(column, feature_column_lib._IndicatorColumn): # pylint:disable=protected-access
num_features += column.categorical_column._num_buckets # pylint:disable=protected-access
else:
num_features += 1
return num_features
def _cache_transformed_features(features, sorted_feature_columns, batch_size):
"""Transform features and cache, then returns (cached_features, cache_op)."""
num_features = _calculate_num_features(sorted_feature_columns)
cached_features = [
_local_variable(
array_ops.zeros([batch_size], dtype=dtypes.int32),
name='cached_feature_{}'.format(i))
for i in range(num_features)
]
are_features_cached = _local_variable(False, name='are_features_cached')
def cache_features_and_return():
"""Caches transformed features.
The intention is to hide get_transformed_features() from the graph by
caching the result except the first step, since bucketize operation
(inside get_transformed_features) is expensive.
Returns:
input_feature_list: a list of input features.
cache_flip_op: op to add to graph to make sure cache update is included to
the graph.
"""
transformed_features = _get_transformed_features(features,
sorted_feature_columns)
cached = [
state_ops.assign(cached_features[i], transformed_features[i])
for i in range(num_features)
]
# TODO(youngheek): Try other combination of dependencies so that the
# function returns a single result, not a tuple.
with ops.control_dependencies(cached):
cache_flip_op = are_features_cached.assign(True)
return cached, cache_flip_op
input_feature_list, cache_flip_op = control_flow_ops.cond(
are_features_cached,
lambda: (cached_features, control_flow_ops.no_op()),
cache_features_and_return)
return input_feature_list, cache_flip_op
class _CacheTrainingStatesUsingHashTable(object):
"""Caching logits, etc. using MutableHashTable."""
def __init__(self, example_ids, logits_dimension):
"""Creates a cache with the given configuration.
It maintains a MutableDenseHashTable for all values.
The API lookup() and insert() would have those specs,
tree_ids: shape=[batch_size], dtype=int32
node_ids: shape=[batch_size], dtype=int32
logits: shape=[batch_size, logits_dimension], dtype=float32
However in the MutableDenseHashTable, ids are bitcasted into float32 and
all values are concatenated as a single tensor (of float32).
Hence conversion happens internally before inserting to the HashTable and
after lookup from it.
Args:
example_ids: a Rank 1 tensor to be used as a key of the cache.
logits_dimension: a constant (int) for the dimension of logits.
Raises:
ValueError: if example_ids is other than int64 or string.
"""
if dtypes.as_dtype(dtypes.int64).is_compatible_with(example_ids.dtype):
empty_key = -1 << 62
elif dtypes.as_dtype(dtypes.string).is_compatible_with(example_ids.dtype):
empty_key = ''
else:
raise ValueError('Unsupported example_id_feature dtype %s.' %
example_ids.dtype)
# Cache holds latest <tree_id, node_id, logits> for each example.
# tree_id and node_id are both int32 but logits is a float32.
# To reduce the overhead, we store all of them together as float32 and
# bitcast the ids to int32.
self._table_ref = lookup_ops.mutable_dense_hash_table_v2(
empty_key=empty_key, value_dtype=dtypes.float32, value_shape=[3])
self._example_ids = ops.convert_to_tensor(example_ids)
if self._example_ids.shape.ndims not in (None, 1):
raise ValueError('example_id should have rank 1, but got %s' %
self._example_ids)
self._logits_dimension = logits_dimension
def lookup(self):
"""Returns cached_tree_ids, cached_node_ids, cached_logits."""
cached_tree_ids, cached_node_ids, cached_logits = array_ops.split(
lookup_ops.lookup_table_find_v2(
self._table_ref,
self._example_ids,
default_value=[0.0, _DUMMY_NODE_ID, 0.0]),
[1, 1, self._logits_dimension],
axis=1)
cached_tree_ids = array_ops.squeeze(
array_ops.bitcast(cached_tree_ids, dtypes.int32))
cached_node_ids = array_ops.squeeze(
array_ops.bitcast(cached_node_ids, dtypes.int32))
if self._example_ids.shape.ndims is not None:
cached_logits.set_shape(
[self._example_ids.shape[0], self._logits_dimension])
return (cached_tree_ids, cached_node_ids, cached_logits)
def insert(self, tree_ids, node_ids, logits):
"""Inserts values and returns the op."""
insert_op = lookup_ops.lookup_table_insert_v2(
self._table_ref, self._example_ids,
array_ops.concat(
[
array_ops.expand_dims(
array_ops.bitcast(tree_ids, dtypes.float32), 1),
array_ops.expand_dims(
array_ops.bitcast(node_ids, dtypes.float32), 1),
logits,
],
axis=1,
name='value_concat_for_cache_insert'))
return insert_op
class _CacheTrainingStatesUsingVariables(object):
"""Caching logits, etc. using Variables."""
def __init__(self, batch_size, logits_dimension):
"""Creates a cache with the given configuration.
It maintains three variables, tree_ids, node_ids, logits, for caching.
tree_ids: shape=[batch_size], dtype=int32
node_ids: shape=[batch_size], dtype=int32
logits: shape=[batch_size, logits_dimension], dtype=float32
Note, this can be used only with in-memory data setting.
Args:
batch_size: `int`, the size of the cache.
logits_dimension: a constant (int) for the dimension of logits.
"""
self._logits_dimension = logits_dimension
self._tree_ids = _local_variable(
array_ops.zeros([batch_size], dtype=dtypes.int32),
name='tree_ids_cache')
self._node_ids = _local_variable(
_DUMMY_NODE_ID*array_ops.ones([batch_size], dtype=dtypes.int32),
name='node_ids_cache')
self._logits = _local_variable(
array_ops.zeros([batch_size, logits_dimension], dtype=dtypes.float32),
name='logits_cache')
def lookup(self):
"""Returns cached_tree_ids, cached_node_ids, cached_logits."""
return (self._tree_ids, self._node_ids, self._logits)
def insert(self, tree_ids, node_ids, logits):
"""Inserts values and returns the op."""
return control_flow_ops.group(
[
self._tree_ids.assign(tree_ids),
self._node_ids.assign(node_ids),
self._logits.assign(logits)
],
name='cache_insert')
class _StopAtAttemptsHook(session_run_hook.SessionRunHook):
"""Hook that requests stop at the number of attempts."""
def __init__(self, num_finalized_trees_tensor, num_attempted_layers_tensor,
max_trees, max_depth):
self._num_finalized_trees_tensor = num_finalized_trees_tensor
self._num_attempted_layers_tensor = num_attempted_layers_tensor
self._max_trees = max_trees
self._max_depth = max_depth
def before_run(self, run_context):
return session_run_hook.SessionRunArgs(
[self._num_finalized_trees_tensor, self._num_attempted_layers_tensor])
def after_run(self, run_context, run_values):
# num_* tensors should be retrieved by a separate session than the training
# one, in order to read the values after growing.
# So, if it's approaching to the limit, get the actual value by additional
# session.
num_finalized_trees, num_attempted_layers = run_values.results
if (num_finalized_trees >= self._max_trees - 1 or
num_attempted_layers > 2 * self._max_trees * self._max_depth - 1):
num_finalized_trees, num_attempted_layers = run_context.session.run(
[self._num_finalized_trees_tensor, self._num_attempted_layers_tensor])
if (num_finalized_trees >= self._max_trees or
num_attempted_layers > 2 * self._max_trees * self._max_depth):
run_context.request_stop()
def _bt_model_fn(
features,
labels,
mode,
head,
feature_columns,
tree_hparams,
n_batches_per_layer,
config,
closed_form_grad_and_hess_fn=None,
example_id_column_name=None,
# TODO(youngheek): replace this later using other options.
train_in_memory=False,
name='boosted_trees'):
"""Gradient Boosted Trees model_fn.
Args:
features: dict of `Tensor`.
labels: `Tensor` of shape [batch_size, 1] or [batch_size] labels of
dtype `int32` or `int64` in the range `[0, n_classes)`.
mode: Defines whether this is training, evaluation or prediction.
See `ModeKeys`.
head: A `head_lib._Head` instance.
feature_columns: Iterable of `feature_column._FeatureColumn` model inputs.
tree_hparams: TODO. collections.namedtuple for hyper parameters.
n_batches_per_layer: A `Tensor` of `int64`. Each layer is built after at
least n_batches_per_layer accumulations.
config: `RunConfig` object to configure the runtime settings.
closed_form_grad_and_hess_fn: a function that accepts logits and labels
and returns gradients and hessians. By default, they are created by
tf.gradients() from the loss.
example_id_column_name: Name of the feature for a unique ID per example.
Currently experimental -- not exposed to public API.
train_in_memory: `bool`, when true, it assumes the dataset is in memory,
i.e., input_fn should return the entire dataset as a single batch, and
also n_batches_per_layer should be set as 1.
name: Name to use for the model.
Returns:
An `EstimatorSpec` instance.
Raises:
ValueError: mode or params are invalid, or features has the wrong type.
"""
is_single_machine = (config.num_worker_replicas <= 1)
sorted_feature_columns = sorted(feature_columns, key=lambda tc: tc.name)
center_bias = tree_hparams.center_bias
if train_in_memory:
assert n_batches_per_layer == 1, (
'When train_in_memory is enabled, input_fn should return the entire '
'dataset as a single batch, and n_batches_per_layer should be set as '
'1.')
if (not config.is_chief or config.num_worker_replicas > 1 or
config.num_ps_replicas > 0):
raise ValueError('train_in_memory is supported only for '
'non-distributed training.')
worker_device = control_flow_ops.no_op().device
# maximum number of splits possible in the whole tree =2^(D-1)-1
# TODO(youngheek): perhaps storage could be optimized by storing stats with
# the dimension max_splits_per_layer, instead of max_splits (for the entire
# tree).
max_splits = (1 << tree_hparams.max_depth) - 1
train_op = []
with ops.name_scope(name) as name:
# Prepare.
global_step = training_util.get_or_create_global_step()
bucket_size_list, feature_ids_list = _group_features_by_num_buckets(
sorted_feature_columns)
# Extract input features and set up cache for training.
training_state_cache = None
if mode == model_fn.ModeKeys.TRAIN and train_in_memory:
# cache transformed features as well for in-memory training.
batch_size = array_ops.shape(labels)[0]
input_feature_list, input_cache_op = (
_cache_transformed_features(features, sorted_feature_columns,
batch_size))
train_op.append(input_cache_op)
training_state_cache = _CacheTrainingStatesUsingVariables(
batch_size, head.logits_dimension)
else:
input_feature_list = _get_transformed_features(features,
sorted_feature_columns)
if mode == model_fn.ModeKeys.TRAIN and example_id_column_name:
example_ids = features[example_id_column_name]
training_state_cache = _CacheTrainingStatesUsingHashTable(
example_ids, head.logits_dimension)
# Create Ensemble resources.
tree_ensemble = boosted_trees_ops.TreeEnsemble(name=name)
# Variable that determines whether bias centering is needed.
center_bias_var = variable_scope.variable(
initial_value=center_bias, name='center_bias_needed', trainable=False)
# Create logits.
if mode != model_fn.ModeKeys.TRAIN:
logits = boosted_trees_ops.predict(
# For non-TRAIN mode, ensemble doesn't change after initialization,
# so no local copy is needed; using tree_ensemble directly.
tree_ensemble_handle=tree_ensemble.resource_handle,
bucketized_features=input_feature_list,
logits_dimension=head.logits_dimension)
else:
if is_single_machine:
local_tree_ensemble = tree_ensemble
ensemble_reload = control_flow_ops.no_op()
else:
# Have a local copy of ensemble for the distributed setting.
with ops.device(worker_device):
local_tree_ensemble = boosted_trees_ops.TreeEnsemble(
name=name + '_local', is_local=True)
# TODO(soroush): Do partial updates if this becomes a bottleneck.
ensemble_reload = local_tree_ensemble.deserialize(
*tree_ensemble.serialize())
if training_state_cache:
cached_tree_ids, cached_node_ids, cached_logits = (
training_state_cache.lookup())
else:
# Always start from the beginning when no cache is set up.
batch_size = array_ops.shape(labels)[0]
cached_tree_ids, cached_node_ids, cached_logits = (
array_ops.zeros([batch_size], dtype=dtypes.int32),
_DUMMY_NODE_ID * array_ops.ones([batch_size], dtype=dtypes.int32),
array_ops.zeros(
[batch_size, head.logits_dimension], dtype=dtypes.float32))
with ops.control_dependencies([ensemble_reload]):
(stamp_token, num_trees, num_finalized_trees, num_attempted_layers,
last_layer_nodes_range) = local_tree_ensemble.get_states()
summary.scalar('ensemble/num_trees', num_trees)
summary.scalar('ensemble/num_finalized_trees', num_finalized_trees)
summary.scalar('ensemble/num_attempted_layers', num_attempted_layers)
partial_logits, tree_ids, node_ids = boosted_trees_ops.training_predict(
tree_ensemble_handle=local_tree_ensemble.resource_handle,
cached_tree_ids=cached_tree_ids,
cached_node_ids=cached_node_ids,
bucketized_features=input_feature_list,
logits_dimension=head.logits_dimension)
logits = cached_logits + partial_logits
# Create training graph.
def _train_op_fn(loss):
"""Run one training iteration."""
if training_state_cache:
# Cache logits only after center_bias is complete, if it's in progress.
train_op.append(
control_flow_ops.cond(
center_bias_var, control_flow_ops.no_op,
lambda: training_state_cache.insert(tree_ids, node_ids, logits))
)
if closed_form_grad_and_hess_fn:
gradients, hessians = closed_form_grad_and_hess_fn(logits, labels)
else:
gradients = gradients_impl.gradients(loss, logits, name='Gradients')[0]
hessians = gradients_impl.gradients(
gradients, logits, name='Hessians')[0]
stats_summaries_list = []
for i, feature_ids in enumerate(feature_ids_list):
num_buckets = bucket_size_list[i]
summaries = [
array_ops.squeeze(
boosted_trees_ops.make_stats_summary(
node_ids=node_ids,
gradients=gradients,
hessians=hessians,
bucketized_features_list=[input_feature_list[f]],
max_splits=max_splits,
num_buckets=num_buckets),
axis=0) for f in feature_ids
]
stats_summaries_list.append(summaries)
# ========= Helper methods for both in and not in memory. ==============
def grow_tree_from_stats_summaries(stats_summaries_list,
feature_ids_list):
"""Updates ensemble based on the best gains from stats summaries."""
node_ids_per_feature = []
gains_list = []
thresholds_list = []
left_node_contribs_list = []
right_node_contribs_list = []
all_feature_ids = []
assert len(stats_summaries_list) == len(feature_ids_list)
for i, feature_ids in enumerate(feature_ids_list):
(numeric_node_ids_per_feature, numeric_gains_list,
numeric_thresholds_list, numeric_left_node_contribs_list,
numeric_right_node_contribs_list) = (
boosted_trees_ops.calculate_best_gains_per_feature(
node_id_range=last_layer_nodes_range,
stats_summary_list=stats_summaries_list[i],
l1=tree_hparams.l1,
l2=tree_hparams.l2,
tree_complexity=tree_hparams.tree_complexity,
min_node_weight=tree_hparams.min_node_weight,
max_splits=max_splits))
all_feature_ids += feature_ids
node_ids_per_feature += numeric_node_ids_per_feature
gains_list += numeric_gains_list
thresholds_list += numeric_thresholds_list
left_node_contribs_list += numeric_left_node_contribs_list
right_node_contribs_list += numeric_right_node_contribs_list
grow_op = boosted_trees_ops.update_ensemble(
# Confirm if local_tree_ensemble or tree_ensemble should be used.
tree_ensemble.resource_handle,
feature_ids=all_feature_ids,
node_ids=node_ids_per_feature,
gains=gains_list,
thresholds=thresholds_list,
left_node_contribs=left_node_contribs_list,
right_node_contribs=right_node_contribs_list,
learning_rate=tree_hparams.learning_rate,
max_depth=tree_hparams.max_depth,
pruning_mode=boosted_trees_ops.PruningMode.NO_PRUNING)
return grow_op
def _center_bias_fn(mean_gradients, mean_hessians):
"""Updates the ensembles and cache (if needed) with logits prior."""
continue_centering = boosted_trees_ops.center_bias(
tree_ensemble.resource_handle,
mean_gradients=mean_gradients,
mean_hessians=mean_hessians,
l1=tree_hparams.l1,
l2=tree_hparams.l2
)
return center_bias_var.assign(continue_centering)
# ========= End of helper methods. ==============
if train_in_memory and is_single_machine:
train_op.append(distribute_lib.increment_var(global_step))
mean_gradients = array_ops.expand_dims(
math_ops.reduce_mean(gradients, 0), 0)
mean_heassians = array_ops.expand_dims(
math_ops.reduce_mean(hessians, 0), 0)
train_op.append(
control_flow_ops.cond(
center_bias_var,
lambda: _center_bias_fn(mean_gradients, mean_heassians),
functools.partial(grow_tree_from_stats_summaries,
stats_summaries_list, feature_ids_list)))
else:
def center_bias_not_in_mem():
"""Accumulates the data and updates the logits bias, when ready."""
bias_dependencies = []
bias_accumulator = data_flow_ops.ConditionalAccumulator(
dtype=dtypes.float32,
# The stats consist of grads and hessians means only.
# TODO(nponomareva): this will change for a multiclass
shape=[2, 1],
shared_name='bias_accumulator')
grads_and_hess = array_ops.stack([gradients, hessians], axis=0)
grads_and_hess = math_ops.reduce_mean(grads_and_hess, axis=1)
apply_grad = bias_accumulator.apply_grad(grads_and_hess, stamp_token)
bias_dependencies.append(apply_grad)
def center_bias_from_accumulator():
accumulated = array_ops.unstack(
bias_accumulator.take_grad(1), axis=0)
return _center_bias_fn(
array_ops.expand_dims(accumulated[0], 0),
array_ops.expand_dims(accumulated[1], 0))
with ops.control_dependencies(bias_dependencies):
if config.is_chief:
center_bias_op = control_flow_ops.cond(
math_ops.greater_equal(bias_accumulator.num_accumulated(),
n_batches_per_layer),
center_bias_from_accumulator,
control_flow_ops.no_op,
name='wait_until_n_batches_for_bias_accumulated')
return center_bias_op
def grow_not_in_mem():
"""Accumulates the data and grows a layer when ready."""
accumulators = []
dependencies = []
for i, feature_ids in enumerate(feature_ids_list):
stats_summaries = stats_summaries_list[i]
accumulator = data_flow_ops.ConditionalAccumulator(
dtype=dtypes.float32,
# The stats consist of grads and hessians (the last dimension).
shape=[len(feature_ids), max_splits, bucket_size_list[i], 2],
shared_name='numeric_stats_summary_accumulator_' + str(i))
accumulators.append(accumulator)
apply_grad = accumulator.apply_grad(
array_ops.stack(stats_summaries, axis=0), stamp_token)
dependencies.append(apply_grad)
def grow_tree_from_accumulated_summaries_fn():
"""Updates tree with the best layer from accumulated summaries."""
# Take out the accumulated summaries from the accumulator and grow.
stats_summaries_list = []
stats_summaries_list = [
array_ops.unstack(accumulator.take_grad(1), axis=0)
for accumulator in accumulators
]
grow_op = grow_tree_from_stats_summaries(stats_summaries_list,
feature_ids_list)
return grow_op
with ops.control_dependencies(dependencies):
if config.is_chief:
min_accumulated = math_ops.reduce_min(
array_ops.stack(
[acc.num_accumulated() for acc in accumulators]))
grow_model = control_flow_ops.cond(
math_ops.greater_equal(min_accumulated, n_batches_per_layer),
grow_tree_from_accumulated_summaries_fn,
control_flow_ops.no_op,
name='wait_until_n_batches_accumulated')
return grow_model
update_model = control_flow_ops.cond(
center_bias_var, center_bias_not_in_mem, grow_not_in_mem)
train_op.append(update_model)
with ops.control_dependencies([update_model]):
increment_global = distribute_lib.increment_var(global_step)
train_op.append(increment_global)
return control_flow_ops.group(train_op, name='train_op')
estimator_spec = head.create_estimator_spec(
features=features,
mode=mode,
labels=labels,
train_op_fn=_train_op_fn,
logits=logits)
if mode == model_fn.ModeKeys.TRAIN:
# Add an early stop hook.
estimator_spec = estimator_spec._replace(
training_hooks=estimator_spec.training_hooks +
(_StopAtAttemptsHook(num_finalized_trees, num_attempted_layers,
tree_hparams.n_trees, tree_hparams.max_depth),))
return estimator_spec
def _create_classification_head(n_classes,
weight_column=None,
label_vocabulary=None):
"""Creates a classification head. Refer to canned.head for details on args."""
# TODO(nponomareva): Support multi-class cases.
if n_classes == 2:
# pylint: disable=protected-access
return head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
weight_column=weight_column,
label_vocabulary=label_vocabulary,
loss_reduction=losses.Reduction.SUM_OVER_BATCH_SIZE)
# pylint: enable=protected-access
else:
raise ValueError('For now only binary classification is supported.'
'n_classes given as {}'.format(n_classes))
def _create_classification_head_and_closed_form(n_classes, weight_column,
label_vocabulary):
"""Creates a head for classifier and the closed form gradients/hessians."""
head = _create_classification_head(n_classes, weight_column, label_vocabulary)
if (n_classes == 2 and head.logits_dimension == 1 and weight_column is None
and label_vocabulary is None):
# Use the closed-form gradients/hessians for 2 class.
def _grad_and_hess_for_logloss(logits, labels):
"""A closed form gradient and hessian for logistic loss."""
# TODO(youngheek): add weights handling.
predictions = math_ops.reciprocal(math_ops.exp(-logits) + 1.0)
normalizer = math_ops.reciprocal(
math_ops.cast(array_ops.size(predictions), dtypes.float32))
labels = math_ops.cast(labels, dtypes.float32)
labels = head_lib._check_dense_labels_match_logits_and_reshape( # pylint: disable=protected-access
labels, logits, head.logits_dimension)
gradients = (predictions - labels) * normalizer
hessians = predictions * (1.0 - predictions) * normalizer
return gradients, hessians
closed_form = _grad_and_hess_for_logloss
else:
closed_form = None
return (head, closed_form)
def _create_regression_head(label_dimension, weight_column=None):
if label_dimension != 1:
raise ValueError('For now only 1 dimension regression is supported.'
'label_dimension given as {}'.format(label_dimension))
# pylint: disable=protected-access
return head_lib._regression_head(
label_dimension=label_dimension,
weight_column=weight_column,
loss_reduction=losses.Reduction.SUM_OVER_BATCH_SIZE)
# pylint: enable=protected-access
@estimator_export('estimator.BoostedTreesClassifier')
class BoostedTreesClassifier(estimator.Estimator):
"""A Classifier for Tensorflow Boosted Trees models.
@compatibility(eager)
Estimators can be used while eager execution is enabled. Note that `input_fn`
and all hooks are executed inside a graph context, so they have to be written
to be compatible with graph mode. Note that `input_fn` code using `tf.data`
generally works in both graph and eager modes.
@end_compatibility
"""
def __init__(self,
feature_columns,
n_batches_per_layer,
model_dir=None,
n_classes=_HOLD_FOR_MULTI_CLASS_SUPPORT,
weight_column=None,
label_vocabulary=None,
n_trees=100,
max_depth=6,
learning_rate=0.1,
l1_regularization=0.,
l2_regularization=0.,
tree_complexity=0.,
min_node_weight=0.,
config=None,
center_bias=False):
"""Initializes a `BoostedTreesClassifier` instance.
Example:
```python
bucketized_feature_1 = bucketized_column(
numeric_column('feature_1'), BUCKET_BOUNDARIES_1)
bucketized_feature_2 = bucketized_column(
numeric_column('feature_2'), BUCKET_BOUNDARIES_2)
classifier = estimator.BoostedTreesClassifier(
feature_columns=[bucketized_feature_1, bucketized_feature_2],
n_trees=100,
... <some other params>
)
def input_fn_train():
...
return dataset
classifier.train(input_fn=input_fn_train)
def input_fn_eval():
...
return dataset
metrics = classifier.evaluate(input_fn=input_fn_eval)
```
Args:
feature_columns: An iterable containing all the feature columns used by
the model. All items in the set should be instances of classes derived
from `FeatureColumn`.
n_batches_per_layer: the number of batches to collect statistics per
layer.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator
to continue training a previously saved model.
n_classes: number of label classes. Default is binary classification.
Multiclass support is not yet implemented.
weight_column: A string or a `_NumericColumn` created by
`tf.feature_column.numeric_column` defining feature column representing
weights. It is used to downweight or boost examples during training. It
will be multiplied by the loss of the example. If it is a string, it is
used as a key to fetch weight tensor from the `features`. If it is a
`_NumericColumn`, raw tensor is fetched by key `weight_column.key`,
then weight_column.normalizer_fn is applied on it to get weight tensor.
label_vocabulary: A list of strings represents possible label values. If
given, labels must be string type and have any value in
`label_vocabulary`. If it is not given, that means labels are
already encoded as integer or float within [0, 1] for `n_classes=2` and
encoded as integer values in {0, 1,..., n_classes-1} for `n_classes`>2 .
Also there will be errors if vocabulary is not provided and labels are
string.
n_trees: number trees to be created.
max_depth: maximum depth of the tree to grow.
learning_rate: shrinkage parameter to be used when a tree added to the
model.
l1_regularization: regularization multiplier applied to the absolute
weights of the tree leafs.
l2_regularization: regularization multiplier applied to the square weights
of the tree leafs.
tree_complexity: regularization factor to penalize trees with more leaves.
min_node_weight: min_node_weight: minimum hessian a node must have for a
split to be considered. The value will be compared with
sum(leaf_hessian)/(batch_size * n_batches_per_layer).
config: `RunConfig` object to configure the runtime settings.
center_bias: Whether bias centering needs to occur. Bias centering refers
to the first node in the very first tree returning the prediction that
is aligned with the original labels distribution. For example, for
regression problems, the first node will return the mean of the labels.
For binary classification problems, it will return a logit for a prior
probability of label 1.
Raises:
ValueError: when wrong arguments are given or unsupported functionalities
are requested.
"""
# TODO(nponomareva): Support multi-class cases.
if n_classes == _HOLD_FOR_MULTI_CLASS_SUPPORT:
n_classes = 2
head, closed_form = _create_classification_head_and_closed_form(
n_classes, weight_column, label_vocabulary=label_vocabulary)
# HParams for the model.
tree_hparams = _TreeHParams(n_trees, max_depth, learning_rate,
l1_regularization, l2_regularization,
tree_complexity, min_node_weight, center_bias)
def _model_fn(features, labels, mode, config):
return _bt_model_fn( # pylint: disable=protected-access
features,
labels,
mode,
head,
feature_columns,
tree_hparams,
n_batches_per_layer,
config,
closed_form_grad_and_hess_fn=closed_form)
super(BoostedTreesClassifier, self).__init__(
model_fn=_model_fn, model_dir=model_dir, config=config)
@estimator_export('estimator.BoostedTreesRegressor')
class BoostedTreesRegressor(estimator.Estimator):
"""A Regressor for Tensorflow Boosted Trees models.
@compatibility(eager)
Estimators can be used while eager execution is enabled. Note that `input_fn`
and all hooks are executed inside a graph context, so they have to be written
to be compatible with graph mode. Note that `input_fn` code using `tf.data`
generally works in both graph and eager modes.
@end_compatibility
"""
def __init__(self,
feature_columns,
n_batches_per_layer,
model_dir=None,
label_dimension=_HOLD_FOR_MULTI_DIM_SUPPORT,
weight_column=None,
n_trees=100,
max_depth=6,
learning_rate=0.1,
l1_regularization=0.,
l2_regularization=0.,
tree_complexity=0.,
min_node_weight=0.,
config=None,
center_bias=False):
"""Initializes a `BoostedTreesRegressor` instance.
Example:
```python
bucketized_feature_1 = bucketized_column(
numeric_column('feature_1'), BUCKET_BOUNDARIES_1)
bucketized_feature_2 = bucketized_column(
numeric_column('feature_2'), BUCKET_BOUNDARIES_2)
regressor = estimator.BoostedTreesRegressor(
feature_columns=[bucketized_feature_1, bucketized_feature_2],
n_trees=100,
... <some other params>
)
def input_fn_train():
...
return dataset
regressor.train(input_fn=input_fn_train)
def input_fn_eval():
...
return dataset
metrics = regressor.evaluate(input_fn=input_fn_eval)
```
Args:
feature_columns: An iterable containing all the feature columns used by
the model. All items in the set should be instances of classes derived
from `FeatureColumn`.
n_batches_per_layer: the number of batches to collect statistics per
layer.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator
to continue training a previously saved model.
label_dimension: Number of regression targets per example.
Multi-dimensional support is not yet implemented.
weight_column: A string or a `_NumericColumn` created by
`tf.feature_column.numeric_column` defining feature column representing
weights. It is used to downweight or boost examples during training. It
will be multiplied by the loss of the example. If it is a string, it is
used as a key to fetch weight tensor from the `features`. If it is a
`_NumericColumn`, raw tensor is fetched by key `weight_column.key`,
then weight_column.normalizer_fn is applied on it to get weight tensor.
n_trees: number trees to be created.
max_depth: maximum depth of the tree to grow.
learning_rate: shrinkage parameter to be used when a tree added to the
model.
l1_regularization: regularization multiplier applied to the absolute
weights of the tree leafs.
l2_regularization: regularization multiplier applied to the square weights
of the tree leafs.
tree_complexity: regularization factor to penalize trees with more leaves.
min_node_weight: min_node_weight: minimum hessian a node must have for a
split to be considered. The value will be compared with
sum(leaf_hessian)/(batch_size * n_batches_per_layer).
config: `RunConfig` object to configure the runtime settings.
center_bias: Whether bias centering needs to occur. Bias centering refers
to the first node in the very first tree returning the prediction that
is aligned with the original labels distribution. For example, for
regression problems, the first node will return the mean of the labels.
For binary classification problems, it will return a logit for a prior
probability of label 1.
Raises:
ValueError: when wrong arguments are given or unsupported functionalities
are requested.
"""
# TODO(nponomareva): Extend it to multi-dimension cases.
if label_dimension == _HOLD_FOR_MULTI_DIM_SUPPORT:
label_dimension = 1
head = _create_regression_head(label_dimension, weight_column)
# HParams for the model.
tree_hparams = _TreeHParams(n_trees, max_depth, learning_rate,
l1_regularization, l2_regularization,
tree_complexity, min_node_weight, center_bias)
def _model_fn(features, labels, mode, config):
return _bt_model_fn( # pylint: disable=protected-access
features, labels, mode, head, feature_columns, tree_hparams,
n_batches_per_layer, config)
super(BoostedTreesRegressor, self).__init__(
model_fn=_model_fn, model_dir=model_dir, config=config)
| caisq/tensorflow | tensorflow/python/estimator/canned/boosted_trees.py | Python | apache-2.0 | 44,604 |
__docformat__ = "numpy"
def unique(ar, return_index=False, return_inverse=False, return_counts=False):
"""
Find the unique elements of an array.
Returns the sorted unique elements of an array. There are two optional
outputs in addition to the unique elements: the indices of the input array
that give the unique values, and the indices of the unique array that
reconstruct the input array.
Parameters
----------
ar : array_like
Input array. This will be flattened if it is not already 1-D.
return_index : bool, optional
If True, also return the indices of `ar` that result in the unique
array.
return_inverse : bool, optional
If True, also return the indices of the unique array that can be used
to reconstruct `ar`.
return_counts : bool, optional
.. versionadded:: 1.9.0
If True, also return the number of times each unique value comes up
in `ar`.
Returns
-------
unique : ndarray
The sorted unique values.
unique_indices : ndarray
The indices of the first occurrences of the unique values in the
(flattened) original array. Only provided if `return_index` is True.
unique_inverse : ndarray
The indices to reconstruct the (flattened) original array from the
unique array. Only provided if `return_inverse` is True.
unique_counts : ndarray
.. versionadded:: 1.9.0
The number of times each of the unique values comes up in the
original array. Only provided if `return_counts` is True.
"""
ar = np.asanyarray(ar).flatten()
a=1
u, indices = <warning descr="Too many values to unpack">unique(a, return_index=True)</warning> | jwren/intellij-community | python/testData/inspections/PyNumpyType/ReturnTuple.py | Python | apache-2.0 | 1,733 |
"""Set testing/stable dates for all old updates.
Revision ID: 3dbed75df3fe
Revises: 13cfca635b99
Create Date: 2015-09-03 14:06:11.762119
"""
# revision identifiers, used by Alembic.
revision = '3dbed75df3fe'
down_revision = '13cfca635b99'
import transaction
from alembic import op
from bodhi.models import Update, DBSession, Base
import logging
log = logging.getLogger('alembic.migration')
testing = u'This update has been pushed to testing'
stable = u'This update has been pushed to stable'
def upgrade():
log.warn("Skipping. Do this by hand by uncommenting and running in tmux.")
#log.info("Getting session for data upgrade.")
#engine = op.get_bind().engine
#DBSession.configure(bind=engine)
#Base.metadata.bind = engine
#with transaction.manager:
# log.info("Querying for all updates ever.")
# total = DBSession.query(Update).count()
# log.info(" %i" % total)
# log.info("OK")
# for i, update in enumerate(DBSession.query(Update).yield_per(1000).enable_eagerloads(False)):
# if i % 100 == 0:
# log.info(" Considering update (%i/%i) %r" % (
# i, total, update.title))
# for comment in update.comments:
# if comment.user.name == u'bodhi':
# if comment.text.startswith(testing):
# update.date_testing = comment.timestamp
# elif comment.text.startswith(stable):
# update.date_stable = comment.timestamp
# log.info("Done. Committing..")
def downgrade():
# NOPE!
pass
| remicollet/bodhi | alembic/versions/3dbed75df3fe_set_testing_stable_dates_for_all_old_.py | Python | gpl-2.0 | 1,608 |
"""This pipeline is intended to make the classification of MRSI modality
features."""
from __future__ import division
import os
import numpy as np
from sklearn.externals import joblib
from sklearn.preprocessing import label_binarize
from sklearn.ensemble import RandomForestClassifier
from protoclass.data_management import GTModality
# Define the path where the patients are stored
path_patients = '/data/prostate/experiments'
# Define the path where the features have been extracted
path_features = '/data/prostate/extraction/mp-mri-prostate'
# Define a list of the path where the feature are kept
mrsi_features = ['mrsi-spectra']
ext_features = ['_spectra_mrsi.npy']
# Define the path of the balanced data
path_balanced = '/data/prostate/balanced/mp-mri-prostate/exp-3'
sub_folder = ['iht', 'nm1', 'nm2', 'nm3', 'smote',
'smote-b1', 'smote-b2']
ext_balanced = '_mrsi.npz'
# Define the path of the ground for the prostate
path_gt = ['GT_inv/prostate', 'GT_inv/pz', 'GT_inv/cg', 'GT_inv/cap']
# Define the label of the ground-truth which will be provided
label_gt = ['prostate', 'pz', 'cg', 'cap']
# Generate the different path to be later treated
path_patients_list_gt = []
# Create the generator
id_patient_list = [name for name in os.listdir(path_patients)
if os.path.isdir(os.path.join(path_patients, name))]
id_patient_list = sorted(id_patient_list)
for id_patient in id_patient_list:
# Append for the GT data - Note that we need a list of gt path
path_patients_list_gt.append([os.path.join(path_patients, id_patient, gt)
for gt in path_gt])
# Load all the data once. Splitting into training and testing will be done at
# the cross-validation time
data = []
data_bal = []
label = []
label_bal = []
for idx_pat in range(len(id_patient_list)):
print 'Read patient {}'.format(id_patient_list[idx_pat])
# For each patient we nee to load the different feature
patient_data = []
for idx_feat in range(len(mrsi_features)):
# Create the path to the patient file
filename_feature = (id_patient_list[idx_pat].lower().replace(' ', '_') +
ext_features[idx_feat])
path_data = os.path.join(path_features, mrsi_features[idx_feat],
filename_feature)
single_feature_data = np.load(path_data)
# Check if this is only one dimension data
if len(single_feature_data.shape) == 1:
single_feature_data = np.atleast_2d(single_feature_data).T
patient_data.append(single_feature_data)
# Concatenate the data in a single array
patient_data = np.concatenate(patient_data, axis=1)
print 'Imbalanced feature loaded ...'
# Load the dataset from each balancing method
data_bal_meth = []
label_bal_meth = []
for idx_imb in range(len(sub_folder)):
path_bal = os.path.join(path_balanced, sub_folder[idx_imb])
pat_chg = (id_patient_list[idx_pat].lower().replace(' ', '_') +
'_mrsi.npz')
filename = os.path.join(path_bal, pat_chg)
npz_file = np.load(filename)
data_bal_meth.append(npz_file['data_resampled'])
label_bal_meth.append(npz_file['label_resampled'])
data_bal.append(data_bal_meth)
label_bal.append(label_bal_meth)
print 'Balanced data loaded ...'
# Create the corresponding ground-truth
gt_mod = GTModality()
gt_mod.read_data_from_path(label_gt,
path_patients_list_gt[idx_pat])
print 'Read the GT data for the current patient ...'
# Concatenate the training data
data.append(patient_data)
# Extract the corresponding ground-truth for the testing data
# Get the index corresponding to the ground-truth
roi_prostate = gt_mod.extract_gt_data('prostate', output_type='index')
# Get the label of the gt only for the prostate ROI
gt_cap = gt_mod.extract_gt_data('cap', output_type='data')
label.append(gt_cap[roi_prostate])
print 'Data and label extracted for the current patient ...'
results_bal = []
for idx_imb in range(len(sub_folder)):
result_cv = []
# Go for LOPO cross-validation
for idx_lopo_cv in range(len(id_patient_list)):
# Display some information about the LOPO-CV
print 'Round #{} of the LOPO-CV'.format(idx_lopo_cv + 1)
# Get the testing data
testing_data = data[idx_lopo_cv]
testing_label = label_binarize(label[idx_lopo_cv], [0, 255])
print 'Create the testing set ...'
# Create the training data and label
# We need to take the balanced data
training_data = [arr[idx_imb] for idx_arr, arr in enumerate(data_bal)
if idx_arr != idx_lopo_cv]
training_label = [arr[idx_imb] for idx_arr, arr in enumerate(label_bal)
if idx_arr != idx_lopo_cv]
# Concatenate the data
training_data = np.vstack(training_data)
training_label = label_binarize(np.hstack(training_label).astype(int),
[0, 255])
print 'Create the training set ...'
# Perform the classification for the current cv and the
# given configuration
crf = RandomForestClassifier(n_estimators=100, n_jobs=-1)
pred_prob = crf.fit(training_data,
np.ravel(training_label)).predict_proba(
testing_data)
result_cv.append([pred_prob, crf.classes_])
results_bal.append(result_cv)
# Save the information
path_store = '/data/prostate/results/mp-mri-prostate/exp-3/balancing/mrsi'
if not os.path.exists(path_store):
os.makedirs(path_store)
joblib.dump(results_bal, os.path.join(path_store,
'results.pkl'))
| I2Cvb/mp-mri-prostate | pipeline/feature-classification/exp-3/balancing/pipeline_classifier_mrsi_spectra.py | Python | mit | 5,823 |
# -*- coding: utf-8 -*-
from numpy import logical_not as not_, logical_or as or_
from numpy.core.defchararray import startswith
from openfisca_france.model.base import * # noqa analysis:ignore
class coloc(Variable):
column = BoolCol
entity_class = Individus
label = u"Vie en colocation"
class logement_chambre(Variable):
column = BoolCol
entity_class = Individus
label = u"Le logement est considéré comme une chambre"
class loyer(Variable):
column = FloatCol()
entity_class = Menages
set_input = set_input_divide_by_period
label = u"Loyer ou mensualité d'emprunt pour un primo-accédant"
class loyer_individu(EntityToPersonColumn):
entity_class = Individus
label = u"Zone apl de la personne"
variable = loyer
class depcom(Variable):
column = FixedStrCol(max_length = 5)
entity_class = Menages
label = u"Code INSEE (depcom) du lieu de résidence"
class loyer_famille(PersonToEntityColumn):
entity_class = Familles
label = u"Zone apl de la famille"
role = CHEF
variable = loyer_individu
class charges_locatives(Variable):
column = FloatCol()
entity_class = Menages
set_input = set_input_divide_by_period
label = u'Charges locatives'
class proprietaire_proche_famille(Variable):
column = BoolCol
entity_class = Familles
label = u"Le propriétaire du logement a un lien de parenté avec la personne de référence ou son conjoint"
class statut_occupation_logement(Variable):
column = EnumCol(
enum = Enum([
u"Non renseigné",
u"Accédant à la propriété",
u"Propriétaire (non accédant) du logement",
u"Locataire d'un logement HLM",
u"Locataire ou sous-locataire d'un logement loué vide non-HLM",
u"Locataire ou sous-locataire d'un logement loué meublé ou d'une chambre d'hôtel",
u"Logé gratuitement par des parents, des amis ou l'employeur",
u"Locataire d'un foyer (résidence universitaire, maison de retraite, foyer de jeune travailleur, résidence sociale...)",
u"Sans domicile stable"])
)
entity_class = Menages
label = u"Statut d'occupation du logement"
set_input = set_input_dispatch_by_period
class residence_dom(Variable):
column = BoolCol
entity_class = Familles
def function(self, simulation, period):
residence_guadeloupe = simulation.calculate('residence_guadeloupe', period)
residence_martinique = simulation.calculate('residence_martinique', period)
residence_guyane = simulation.calculate('residence_guyane', period)
residence_reunion = simulation.calculate('residence_reunion', period)
residence_mayotte = simulation.calculate('residence_mayotte', period)
return period, or_(or_(residence_guadeloupe, residence_martinique), or_(or_(residence_reunion, residence_guyane), residence_mayotte))
class residence_guadeloupe(Variable):
column = BoolCol
entity_class = Familles
def function(self, simulation, period):
depcom_holder = simulation.compute('depcom', period)
depcom = self.cast_from_entity_to_roles(depcom_holder)
depcom = self.filter_role(depcom, role = CHEF)
return period, startswith(depcom, '971')
class residence_martinique(Variable):
column = BoolCol
entity_class = Familles
def function(self, simulation, period):
depcom_holder = simulation.compute('depcom', period)
depcom = self.cast_from_entity_to_roles(depcom_holder)
depcom = self.filter_role(depcom, role = CHEF)
return period, startswith(depcom, '972')
class residence_guyane(Variable):
column = BoolCol
entity_class = Familles
def function(self, simulation, period):
depcom_holder = simulation.compute('depcom', period)
depcom = self.cast_from_entity_to_roles(depcom_holder)
depcom = self.filter_role(depcom, role = CHEF)
return period, startswith(depcom, '973')
class residence_reunion(Variable):
column = BoolCol
entity_class = Familles
def function(self, simulation, period):
depcom_holder = simulation.compute('depcom', period)
depcom = self.cast_from_entity_to_roles(depcom_holder)
depcom = self.filter_role(depcom, role = CHEF)
return period, startswith(depcom, '974')
class residence_mayotte(Variable):
column = BoolCol
entity_class = Familles
def function(self, simulation, period):
depcom_holder = simulation.compute('depcom', period)
depcom = self.cast_from_entity_to_roles(depcom_holder)
depcom = self.filter_role(depcom, role = CHEF)
return period, startswith(depcom, '976')
| benjello/openfisca-france | openfisca_france/model/caracteristiques_socio_demographiques/logement.py | Python | agpl-3.0 | 4,749 |
#!/usr/bin/env python
# -*- coding: utf8 -*-
# Natural Language Toolkit: Toolbox data file parser
#
# Copyright (C) 2001-2006 University of Pennsylvania
# Author: Greg Aumann <greg_aumann@sil.org>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
"""module for reading Toolbox data files
"""
from nltk.etree.ElementTree import Element, SubElement, TreeBuilder
from nltk.corpus.reader import toolbox
import re
from datetime import date
class ToolboxData(toolbox.ToolboxData):
def __init__(self):
super(toolbox.ToolboxData, self).__init__()
def _tree2etree(self, parent, no_blanks):
from nltk.parse import Tree
root = Element(parent.node)
for child in parent:
if isinstance(child, Tree):
root.append(self._tree2etree(child, no_blanks))
else:
text, tag = child
if no_blanks == False or text:
e = SubElement(root, tag)
e.text = text
return root
def chunk_parse(self, grammar, no_blanks=True, incomplete='record', **kwargs):
"""
Returns an element tree structure corresponding to a toolbox data file
parsed according to the chunk grammar.
@type grammar: string
@param grammar: Contains the chunking rules used to parse the
database. See L{chunk.RegExp} for documentation.
@type no_blanks: boolean
@param no_blanks: blank fields that are not important to the structure are deleted
@type kwargs: keyword arguments dictionary
@param incomplete: name of element used if parse doesn't result in one toplevel element
@rtype: string
@param kwargs: Keyword arguments passed to L{toolbox.StandardFormat.fields()}
@rtype: ElementTree._ElementInterface
@return: Contents of toolbox data parsed according to the rules in grammar
"""
from nltk import chunk
from nltk.parse import Tree
cp = chunk.RegexpParser(grammar)
db = self.parse(**kwargs)
tb_etree = Element('toolbox_data')
header = db.find('header')
tb_etree.append(header)
for record in db.findall('record'):
parsed = cp.parse([(elem.text, elem.tag) for elem in record])
top = parsed[0]
if not isinstance(top, Tree) or len(parsed) != 1:
# didn't get a full parse
parsed.node = incomplete
top = parsed
tb_etree.append(self._tree2etree(top, no_blanks))
return tb_etree
def _make_parse_table(self, grammar):
"""
Return parsing state information used by tree_parser.
"""
first = dict()
gram = dict()
for sym, value in grammar.items():
first[sym] = value[0]
gram[sym] = value[0] + value[1]
parse_table = dict()
for state in gram.keys():
parse_table[state] = dict()
for to_sym in gram[state]:
if to_sym in grammar:
# is a nonterminal
# assume all firsts are terminals
for i in first[to_sym]:
parse_table[state][i] = to_sym
else:
parse_table[state][to_sym] = to_sym
return (parse_table, first)
def grammar_parse(self, startsym, grammar, no_blanks=True, **kwargs):
"""
Returns an element tree structure corresponding to a toolbox data file
parsed according to the grammar.
@type startsym: string
@param startsym: Start symbol used for the grammar
@type grammar: dictionary of tuple of tuples
@param grammar: Contains the set of rewrite rules used to parse the
database. See the description below.
@type no_blanks: boolean
@param no_blanks: blank fields that are not important to the structure are deleted
@type kwargs: keyword arguments dictionary
@param kwargs: Keyword arguments passed to L{toolbox.StandardFormat.fields()}
@rtype: ElementTree._ElementInterface
@return: Contents of toolbox data parsed according to rules in grammar
The rewrite rules in the grammar look similar to those usually used in
computer languages. The difference is that the ordering constraints
that are usually present are relaxed in this parser. The reason is that
toolbox databases seldom have consistent ordering of fields. Hence the
right side of each rule consists of a tuple with two parts. The
fields in the first part mark the start of nonterminal.
Each of them can occur only once and all those must
occur before any of the fields in the second part of that nonterminal.
Otherwise they are interpreted as marking the start
of another one of the same nonterminal. If there is more than one
in the first part of the tuple they do not need to all appear in a parse.
The fields in the second part of the tuple can occur in any order.
Sample grammar::
grammar = {
'toolbox': (('_sh',), ('_DateStampHasFourDigitYear', 'entry')),
'entry': (('lx',), ('hm', 'sense', 'dt')),
'sense': (('sn', 'ps'), ('pn', 'gv', 'dv',
'gn', 'gp', 'dn', 'rn',
'ge', 'de', 're',
'example', 'lexfunc')),
'example': (('rf', 'xv',), ('xn', 'xe')),
'lexfunc': (('lf',), ('lexvalue',)),
'lexvalue': (('lv',), ('ln', 'le')),
}
"""
parse_table, first = self._make_parse_table(grammar)
builder = TreeBuilder()
pstack = list()
state = startsym
first_elems = list()
pstack.append((state, first_elems))
builder.start(state, {})
field_iter = self.fields(**kwargs)
loop = True
try:
mkr, value = field_iter.next()
except StopIteration:
loop = False
while loop:
(state, first_elems) = pstack[-1]
if mkr in parse_table[state]:
next_state = parse_table[state][mkr]
if next_state == mkr:
if mkr in first[state]:
# may be start of a new nonterminal
if mkr not in first_elems:
# not a new nonterminal
first_elems.append(mkr)
add = True
else:
# a new nonterminal, second or subsequent instance
add = False
if len(pstack) > 1:
builder.end(state)
pstack.pop()
else:
raise ValueError, \
'Line %d: syntax error, unexpected marker %s.' % (self.line_num, mkr)
else:
# start of terminal marker
add = True
if add:
if not no_blanks or value:
builder.start(mkr, dict())
builder.data(value)
builder.end(mkr)
try:
mkr, value = field_iter.next()
except StopIteration:
loop = False
else:
# a non terminal, first instance
first_elems = list()
builder.start(next_state, dict())
pstack.append((next_state, first_elems))
else:
if len(pstack) > 1:
builder.end(state)
pstack.pop()
else:
raise ValueError, \
'Line %d: syntax error, unexpected marker %s.' % (self.line_num, mkr)
for state, first_elems in reversed(pstack):
builder.end(state)
return builder.close()
def indent(elem, level=0):
"""
Recursive function to indent an ElementTree._ElementInterface
used for pretty printing. Code from
U{http://www.effbot.org/zone/element-lib.htm}. To use run indent
on elem and then output in the normal way.
@param elem: element to be indented. will be modified.
@type elem: ElementTree._ElementInterface
@param level: level of indentation for this element
@type level: nonnegative integer
@rtype: ElementTree._ElementInterface
@return: Contents of elem indented to reflect its structure
"""
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
for elem in elem:
indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
def to_sfm_string(tree, encoding=None, errors='strict', unicode_fields=None):
"""Return a string with a standard format representation of the toolbox
data in tree (tree can be a toolbox database or a single record). Should work for trees
parsed by grammar_parse too.
@param tree: flat representation of toolbox data (whole database or single record)
@type tree: ElementTree._ElementInterface
@param encoding: Name of an encoding to use.
@type encoding: string
@param errors: Error handling scheme for codec. Same as the C{encode}
inbuilt string method.
@type errors: string
@param unicode_fields:
@type unicode_fields: string
@rtype: string
@return: string using standard format markup
"""
# write SFM to file
# unicode_fields parameter does nothing as yet
l = list()
_to_sfm_string(tree, l, encoding=encoding, errors=errors, unicode_fields=unicode_fields)
s = ''.join(l)
if encoding is not None:
s = s.encode(encoding, errors)
return s
_is_value = re.compile(r"\S")
def _to_sfm_string(node, l, **kwargs):
# write SFM to file
if len(node) == 0:
tag = node.tag
text = node.text
if text is None:
l.append('\\%s\n' % tag)
elif re.search(_is_value, text):
l.append('\\%s %s\n' % (tag, text))
else:
l.append('\\%s%s\n' % (tag, text))
else:
#l.append('\n')
for n in node:
_to_sfm_string(n, l, **kwargs)
return
_months = None
_month_abbr = (
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec')
def _init_months():
months = dict()
for i, m in enumerate(_month_abbr):
months[m] = i + 1
return months
def to_date(s, four_digit_year=True):
"""return a date object corresponding to the Toolbox date in s.
@param s: Toolbox date
@type s: string
@param four_digit_year: Do Toolbox dates use four digits for the year?
Defaults to True.
@type four_digit_year: boolean
@return: date
@rtype: datetime.date
"""
global _months
if _months is None:
_months = _init_months()
fields = s.split('/')
if len(fields) != 3:
raise ValueError, 'Invalid Toolbox date "%s"' % s
day = int(fields[0])
try:
month = _months[fields[1]]
except KeyError:
raise ValueError, 'Invalid Toolbox date "%s"' % s
year = int(fields[2])
return date(year, month, day)
def from_date(d, four_digit_year=True):
"""return a Toolbox date string corresponding to the date in d.
@param d: date
@type d: datetime.date
@param four_digit_year: Do Toolbox dates use four digits for the year?
Defaults to True.
@type four_digit_year: boolean
@return: Toolbox date
@rtype: string
"""
return '%04d/%s/%02d' % (date.day, _month_abbr[date.month-1], date.year)
def demo_flat():
from nltk.etree.ElementTree import ElementTree
import sys
tree = ElementTree(toolbox.xml('iu_mien_samp.db', key='lx', encoding='utf8'))
tree.write(sys.stdout)
if __name__ == '__main__':
demo_flat()
| hectormartinez/rougexstem | taln2016/icsisumm-primary-sys34_v1/nltk/nltk-0.9.2/nltk_contrib/toolbox/data.py | Python | apache-2.0 | 12,543 |
from django.views.generic import DetailView
from .items import Item
class ItemDetailView(DetailView):
context_object_name = 'item'
template_name = 'item-detail.html'
def get_object(self):
return Item(self.kwargs['pk'])
| wowref/wowref.com | wowref/wotlk/views.py | Python | mit | 243 |
from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:11235")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:11235")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Bitcoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Bitcoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported" | Jacklord1/franko | contrib/bitrpc/bitrpc.py | Python | mit | 7,837 |
from django.conf import settings
SHOW_PREVIEW = getattr(settings, 'PAGEDOWN_SHOW_PREVIEW', True)
WIDGET_TEMPLATE = getattr(settings, 'PAGEDOWN_WIDGET_TEMPLATE', 'pagedown/widgets/default.html')
WIDGET_CSS = getattr(settings, 'PAGEDOWN_WIDGET_CSS', ('pagedown/demo/browser/demo.css', ))
EXTENSIONS = getattr(settings, 'PAGEDOWN_EXTENSIONS', []) | ministryofjustice/django-pagedown | pagedown/settings.py | Python | bsd-3-clause | 345 |
from __future__ import unicode_literals
from django.db import models
from django.db.migrations.operations.base import Operation
from django.db.migrations.state import ModelState
from django.db.models.options import normalize_together
from django.utils import six
from django.utils.functional import cached_property
from .fields import (
AddField, AlterField, FieldOperation, RemoveField, RenameField,
)
def _check_for_duplicates(arg_name, objs):
used_vals = set()
for val in objs:
if val in used_vals:
raise ValueError(
"Found duplicate value %s in CreateModel %s argument." % (val, arg_name)
)
used_vals.add(val)
class ModelOperation(Operation):
def __init__(self, name):
self.name = name
@cached_property
def name_lower(self):
return self.name.lower()
def references_model(self, name, app_label=None):
return name.lower() == self.name_lower
def reduce(self, operation, in_between, app_label=None):
return (
super(ModelOperation, self).reduce(operation, in_between, app_label=app_label) or
not operation.references_model(self.name, app_label)
)
class CreateModel(ModelOperation):
"""
Create a model's table.
"""
serialization_expand_args = ['fields', 'options', 'managers']
def __init__(self, name, fields, options=None, bases=None, managers=None):
self.fields = fields
self.options = options or {}
self.bases = bases or (models.Model,)
self.managers = managers or []
super(CreateModel, self).__init__(name)
# Sanity-check that there are no duplicated field names, bases, or
# manager names
_check_for_duplicates('fields', (name for name, _ in self.fields))
_check_for_duplicates('bases', (
base._meta.label_lower if hasattr(base, '_meta') else
base.lower() if isinstance(base, six.string_types) else base
for base in self.bases
))
_check_for_duplicates('managers', (name for name, _ in self.managers))
def deconstruct(self):
kwargs = {
'name': self.name,
'fields': self.fields,
}
if self.options:
kwargs['options'] = self.options
if self.bases and self.bases != (models.Model,):
kwargs['bases'] = self.bases
if self.managers and self.managers != [('objects', models.Manager())]:
kwargs['managers'] = self.managers
return (
self.__class__.__name__,
[],
kwargs
)
def state_forwards(self, app_label, state):
state.add_model(ModelState(
app_label,
self.name,
list(self.fields),
dict(self.options),
tuple(self.bases),
list(self.managers),
))
def database_forwards(self, app_label, schema_editor, from_state, to_state):
model = to_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.create_model(model)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
model = from_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.delete_model(model)
def describe(self):
return "Create %smodel %s" % ("proxy " if self.options.get("proxy", False) else "", self.name)
def references_model(self, name, app_label=None):
name_lower = name.lower()
if name_lower == self.name_lower:
return True
# Check we didn't inherit from the model
models_to_check = [
base for base in self.bases
if base is not models.Model and isinstance(base, (models.base.ModelBase, six.string_types))
]
# Check we have no FKs/M2Ms with it
for fname, field in self.fields:
if field.remote_field:
models_to_check.append(field.remote_field.model)
# Now go over all the models and check against them
for model in models_to_check:
model_app_label, model_name = self.model_to_key(model)
if model_name.lower() == name_lower:
if app_label is None or not model_app_label or model_app_label == app_label:
return True
return False
def model_to_key(self, model):
"""
Take either a model class or an "app_label.ModelName" string
and return (app_label, object_name).
"""
if isinstance(model, six.string_types):
return model.split(".", 1)
else:
return model._meta.app_label, model._meta.object_name
def reduce(self, operation, in_between, app_label=None):
if (isinstance(operation, DeleteModel) and
self.name_lower == operation.name_lower and
not self.options.get("proxy", False)):
return []
elif isinstance(operation, RenameModel) and self.name_lower == operation.old_name_lower:
return [
CreateModel(
operation.new_name,
fields=self.fields,
options=self.options,
bases=self.bases,
managers=self.managers,
),
]
elif isinstance(operation, FieldOperation) and self.name_lower == operation.model_name_lower:
if isinstance(operation, AddField):
# Don't allow optimizations of FKs through models they reference
if hasattr(operation.field, "remote_field") and operation.field.remote_field:
for between in in_between:
# Check that it doesn't point to the model
app_label, object_name = self.model_to_key(operation.field.remote_field.model)
if between.references_model(object_name, app_label):
return False
# Check that it's not through the model
if getattr(operation.field.remote_field, "through", None):
app_label, object_name = self.model_to_key(operation.field.remote_field.through)
if between.references_model(object_name, app_label):
return False
return [
CreateModel(
self.name,
fields=self.fields + [(operation.name, operation.field)],
options=self.options,
bases=self.bases,
managers=self.managers,
),
]
elif isinstance(operation, AlterField):
return [
CreateModel(
self.name,
fields=[
(n, operation.field if n == operation.name else v)
for n, v in self.fields
],
options=self.options,
bases=self.bases,
managers=self.managers,
),
]
elif isinstance(operation, RemoveField):
return [
CreateModel(
self.name,
fields=[
(n, v)
for n, v in self.fields
if n.lower() != operation.name_lower
],
options=self.options,
bases=self.bases,
managers=self.managers,
),
]
elif isinstance(operation, RenameField):
return [
CreateModel(
self.name,
fields=[
(operation.new_name if n == operation.old_name else n, v)
for n, v in self.fields
],
options=self.options,
bases=self.bases,
managers=self.managers,
),
]
return super(CreateModel, self).reduce(operation, in_between, app_label=app_label)
class DeleteModel(ModelOperation):
"""
Drops a model's table.
"""
def deconstruct(self):
kwargs = {
'name': self.name,
}
return (
self.__class__.__name__,
[],
kwargs
)
def state_forwards(self, app_label, state):
state.remove_model(app_label, self.name_lower)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
model = from_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.delete_model(model)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
model = to_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.create_model(model)
def describe(self):
return "Delete model %s" % (self.name, )
class RenameModel(ModelOperation):
"""
Renames a model.
"""
def __init__(self, old_name, new_name):
self.old_name = old_name
self.new_name = new_name
super(RenameModel, self).__init__(old_name)
@cached_property
def old_name_lower(self):
return self.old_name.lower()
@cached_property
def new_name_lower(self):
return self.new_name.lower()
def deconstruct(self):
kwargs = {
'old_name': self.old_name,
'new_name': self.new_name,
}
return (
self.__class__.__name__,
[],
kwargs
)
def state_forwards(self, app_label, state):
# In cases where state doesn't have rendered apps, prevent subsequent
# reload_model() calls from rendering models for performance
# reasons. This method should be refactored to avoid relying on
# state.apps (#27310).
reset_apps = 'apps' not in state.__dict__
apps = state.apps
model = apps.get_model(app_label, self.old_name)
model._meta.apps = apps
# Get all of the related objects we need to repoint
all_related_objects = (
f for f in model._meta.get_fields(include_hidden=True)
if f.auto_created and not f.concrete and (not f.hidden or f.many_to_many)
)
if reset_apps:
del state.__dict__['apps']
# Rename the model
state.models[app_label, self.new_name_lower] = state.models[app_label, self.old_name_lower]
state.models[app_label, self.new_name_lower].name = self.new_name
state.remove_model(app_label, self.old_name_lower)
# Repoint the FKs and M2Ms pointing to us
for related_object in all_related_objects:
if related_object.model is not model:
# The model being renamed does not participate in this relation
# directly. Rather, a superclass does.
continue
# Use the new related key for self referential related objects.
if related_object.related_model == model:
related_key = (app_label, self.new_name_lower)
else:
related_key = (
related_object.related_model._meta.app_label,
related_object.related_model._meta.model_name,
)
new_fields = []
for name, field in state.models[related_key].fields:
if name == related_object.field.name:
field = field.clone()
field.remote_field.model = "%s.%s" % (app_label, self.new_name)
new_fields.append((name, field))
state.models[related_key].fields = new_fields
state.reload_model(*related_key)
# Repoint M2Ms with through pointing to us
related_models = {
f.remote_field.model for f in model._meta.fields
if getattr(f.remote_field, 'model', None)
}
model_name = '%s.%s' % (app_label, self.old_name)
for related_model in related_models:
if related_model == model:
related_key = (app_label, self.new_name_lower)
else:
related_key = (related_model._meta.app_label, related_model._meta.model_name)
new_fields = []
changed = False
for name, field in state.models[related_key].fields:
if field.is_relation and field.many_to_many and field.remote_field.through == model_name:
field = field.clone()
field.remote_field.through = '%s.%s' % (app_label, self.new_name)
changed = True
new_fields.append((name, field))
if changed:
state.models[related_key].fields = new_fields
state.reload_model(*related_key)
state.reload_model(app_label, self.new_name_lower)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
new_model = to_state.apps.get_model(app_label, self.new_name)
if self.allow_migrate_model(schema_editor.connection.alias, new_model):
old_model = from_state.apps.get_model(app_label, self.old_name)
# Move the main table
schema_editor.alter_db_table(
new_model,
old_model._meta.db_table,
new_model._meta.db_table,
)
# Alter the fields pointing to us
for related_object in old_model._meta.related_objects:
if related_object.related_model == old_model:
model = new_model
related_key = (app_label, self.new_name_lower)
else:
model = related_object.related_model
related_key = (
related_object.related_model._meta.app_label,
related_object.related_model._meta.model_name,
)
to_field = to_state.apps.get_model(
*related_key
)._meta.get_field(related_object.field.name)
schema_editor.alter_field(
model,
related_object.field,
to_field,
)
# Rename M2M fields whose name is based on this model's name.
fields = zip(old_model._meta.local_many_to_many, new_model._meta.local_many_to_many)
for (old_field, new_field) in fields:
# Skip self-referential fields as these are renamed above.
if new_field.model == new_field.related_model or not new_field.remote_field.through._meta.auto_created:
continue
# Rename the M2M table that's based on this model's name.
old_m2m_model = old_field.remote_field.through
new_m2m_model = new_field.remote_field.through
schema_editor.alter_db_table(
new_m2m_model,
old_m2m_model._meta.db_table,
new_m2m_model._meta.db_table,
)
# Rename the column in the M2M table that's based on this
# model's name.
schema_editor.alter_field(
new_m2m_model,
old_m2m_model._meta.get_field(old_model._meta.model_name),
new_m2m_model._meta.get_field(new_model._meta.model_name),
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
self.new_name_lower, self.old_name_lower = self.old_name_lower, self.new_name_lower
self.new_name, self.old_name = self.old_name, self.new_name
self.database_forwards(app_label, schema_editor, from_state, to_state)
self.new_name_lower, self.old_name_lower = self.old_name_lower, self.new_name_lower
self.new_name, self.old_name = self.old_name, self.new_name
def references_model(self, name, app_label=None):
return (
name.lower() == self.old_name_lower or
name.lower() == self.new_name_lower
)
def describe(self):
return "Rename model %s to %s" % (self.old_name, self.new_name)
def reduce(self, operation, in_between, app_label=None):
if (isinstance(operation, RenameModel) and
self.new_name_lower == operation.old_name_lower):
return [
RenameModel(
self.old_name,
operation.new_name,
),
]
# Skip `ModelOperation.reduce` as we want to run `references_model`
# against self.new_name.
return (
super(ModelOperation, self).reduce(operation, in_between, app_label=app_label) or
not operation.references_model(self.new_name, app_label)
)
class AlterModelTable(ModelOperation):
"""
Renames a model's table
"""
def __init__(self, name, table):
self.table = table
super(AlterModelTable, self).__init__(name)
def deconstruct(self):
kwargs = {
'name': self.name,
'table': self.table,
}
return (
self.__class__.__name__,
[],
kwargs
)
def state_forwards(self, app_label, state):
state.models[app_label, self.name_lower].options["db_table"] = self.table
state.reload_model(app_label, self.name_lower)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
new_model = to_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, new_model):
old_model = from_state.apps.get_model(app_label, self.name)
schema_editor.alter_db_table(
new_model,
old_model._meta.db_table,
new_model._meta.db_table,
)
# Rename M2M fields whose name is based on this model's db_table
for (old_field, new_field) in zip(old_model._meta.local_many_to_many, new_model._meta.local_many_to_many):
if new_field.remote_field.through._meta.auto_created:
schema_editor.alter_db_table(
new_field.remote_field.through,
old_field.remote_field.through._meta.db_table,
new_field.remote_field.through._meta.db_table,
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
return self.database_forwards(app_label, schema_editor, from_state, to_state)
def describe(self):
return "Rename table for %s to %s" % (
self.name,
self.table if self.table is not None else "(default)"
)
def reduce(self, operation, in_between, app_label=None):
if isinstance(operation, (AlterModelTable, DeleteModel)) and self.name_lower == operation.name_lower:
return [operation]
return super(AlterModelTable, self).reduce(operation, in_between, app_label=app_label)
class ModelOptionOperation(ModelOperation):
def reduce(self, operation, in_between, app_label=None):
if isinstance(operation, (self.__class__, DeleteModel)) and self.name_lower == operation.name_lower:
return [operation]
return super(ModelOptionOperation, self).reduce(operation, in_between, app_label=app_label)
class FieldRelatedOptionOperation(ModelOptionOperation):
def reduce(self, operation, in_between, app_label=None):
if (isinstance(operation, FieldOperation) and
self.name_lower == operation.model_name_lower and
not self.references_field(operation.model_name, operation.name)):
return [operation, self]
return super(FieldRelatedOptionOperation, self).reduce(operation, in_between, app_label=app_label)
class AlterUniqueTogether(FieldRelatedOptionOperation):
"""
Changes the value of unique_together to the target one.
Input value of unique_together must be a set of tuples.
"""
option_name = "unique_together"
def __init__(self, name, unique_together):
unique_together = normalize_together(unique_together)
self.unique_together = set(tuple(cons) for cons in unique_together)
super(AlterUniqueTogether, self).__init__(name)
def deconstruct(self):
kwargs = {
'name': self.name,
'unique_together': self.unique_together,
}
return (
self.__class__.__name__,
[],
kwargs
)
def state_forwards(self, app_label, state):
model_state = state.models[app_label, self.name_lower]
model_state.options[self.option_name] = self.unique_together
state.reload_model(app_label, self.name_lower)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
new_model = to_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, new_model):
old_model = from_state.apps.get_model(app_label, self.name)
schema_editor.alter_unique_together(
new_model,
getattr(old_model._meta, self.option_name, set()),
getattr(new_model._meta, self.option_name, set()),
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
return self.database_forwards(app_label, schema_editor, from_state, to_state)
def references_field(self, model_name, name, app_label=None):
return (
self.references_model(model_name, app_label) and
(
not self.unique_together or
any((name in together) for together in self.unique_together)
)
)
def describe(self):
return "Alter %s for %s (%s constraint(s))" % (self.option_name, self.name, len(self.unique_together or ''))
class AlterIndexTogether(FieldRelatedOptionOperation):
"""
Changes the value of index_together to the target one.
Input value of index_together must be a set of tuples.
"""
option_name = "index_together"
def __init__(self, name, index_together):
index_together = normalize_together(index_together)
self.index_together = set(tuple(cons) for cons in index_together)
super(AlterIndexTogether, self).__init__(name)
def deconstruct(self):
kwargs = {
'name': self.name,
'index_together': self.index_together,
}
return (
self.__class__.__name__,
[],
kwargs
)
def state_forwards(self, app_label, state):
model_state = state.models[app_label, self.name_lower]
model_state.options[self.option_name] = self.index_together
state.reload_model(app_label, self.name_lower)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
new_model = to_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, new_model):
old_model = from_state.apps.get_model(app_label, self.name)
schema_editor.alter_index_together(
new_model,
getattr(old_model._meta, self.option_name, set()),
getattr(new_model._meta, self.option_name, set()),
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
return self.database_forwards(app_label, schema_editor, from_state, to_state)
def references_field(self, model_name, name, app_label=None):
return (
self.references_model(model_name, app_label) and
(
not self.index_together or
any((name in together) for together in self.index_together)
)
)
def describe(self):
return "Alter %s for %s (%s constraint(s))" % (self.option_name, self.name, len(self.index_together or ''))
class AlterOrderWithRespectTo(FieldRelatedOptionOperation):
"""
Represents a change with the order_with_respect_to option.
"""
def __init__(self, name, order_with_respect_to):
self.order_with_respect_to = order_with_respect_to
super(AlterOrderWithRespectTo, self).__init__(name)
def deconstruct(self):
kwargs = {
'name': self.name,
'order_with_respect_to': self.order_with_respect_to,
}
return (
self.__class__.__name__,
[],
kwargs
)
def state_forwards(self, app_label, state):
model_state = state.models[app_label, self.name_lower]
model_state.options['order_with_respect_to'] = self.order_with_respect_to
state.reload_model(app_label, self.name_lower)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
to_model = to_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, to_model):
from_model = from_state.apps.get_model(app_label, self.name)
# Remove a field if we need to
if from_model._meta.order_with_respect_to and not to_model._meta.order_with_respect_to:
schema_editor.remove_field(from_model, from_model._meta.get_field("_order"))
# Add a field if we need to (altering the column is untouched as
# it's likely a rename)
elif to_model._meta.order_with_respect_to and not from_model._meta.order_with_respect_to:
field = to_model._meta.get_field("_order")
if not field.has_default():
field.default = 0
schema_editor.add_field(
from_model,
field,
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
self.database_forwards(app_label, schema_editor, from_state, to_state)
def references_field(self, model_name, name, app_label=None):
return (
self.references_model(model_name, app_label) and
(
self.order_with_respect_to is None or
name == self.order_with_respect_to
)
)
def describe(self):
return "Set order_with_respect_to on %s to %s" % (self.name, self.order_with_respect_to)
class AlterModelOptions(ModelOptionOperation):
"""
Sets new model options that don't directly affect the database schema
(like verbose_name, permissions, ordering). Python code in migrations
may still need them.
"""
# Model options we want to compare and preserve in an AlterModelOptions op
ALTER_OPTION_KEYS = [
"base_manager_name",
"default_manager_name",
"get_latest_by",
"managed",
"ordering",
"permissions",
"default_permissions",
"select_on_save",
"verbose_name",
"verbose_name_plural",
]
def __init__(self, name, options):
self.options = options
super(AlterModelOptions, self).__init__(name)
def deconstruct(self):
kwargs = {
'name': self.name,
'options': self.options,
}
return (
self.__class__.__name__,
[],
kwargs
)
def state_forwards(self, app_label, state):
model_state = state.models[app_label, self.name_lower]
model_state.options = dict(model_state.options)
model_state.options.update(self.options)
for key in self.ALTER_OPTION_KEYS:
if key not in self.options and key in model_state.options:
del model_state.options[key]
state.reload_model(app_label, self.name_lower)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
pass
def database_backwards(self, app_label, schema_editor, from_state, to_state):
pass
def describe(self):
return "Change Meta options on %s" % (self.name, )
class AlterModelManagers(ModelOptionOperation):
"""
Alters the model's managers
"""
serialization_expand_args = ['managers']
def __init__(self, name, managers):
self.managers = managers
super(AlterModelManagers, self).__init__(name)
def deconstruct(self):
return (
self.__class__.__name__,
[self.name, self.managers],
{}
)
def state_forwards(self, app_label, state):
model_state = state.models[app_label, self.name_lower]
model_state.managers = list(self.managers)
state.reload_model(app_label, self.name_lower)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
pass
def database_backwards(self, app_label, schema_editor, from_state, to_state):
pass
def describe(self):
return "Change managers on %s" % (self.name, )
class IndexOperation(Operation):
option_name = 'indexes'
@cached_property
def model_name_lower(self):
return self.model_name.lower()
class AddIndex(IndexOperation):
"""
Add an index on a model.
"""
def __init__(self, model_name, index):
self.model_name = model_name
if not index.name:
raise ValueError(
"Indexes passed to AddIndex operations require a name "
"argument. %r doesn't have one." % index
)
self.index = index
def state_forwards(self, app_label, state):
model_state = state.models[app_label, self.model_name_lower]
model_state.options[self.option_name].append(self.index)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
model = to_state.apps.get_model(app_label, self.model_name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.add_index(model, self.index)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
model = from_state.apps.get_model(app_label, self.model_name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.remove_index(model, self.index)
def deconstruct(self):
kwargs = {
'model_name': self.model_name,
'index': self.index,
}
return (
self.__class__.__name__,
[],
kwargs,
)
def describe(self):
return 'Create index %s on field(s) %s of model %s' % (
self.index.name,
', '.join(self.index.fields),
self.model_name,
)
class RemoveIndex(IndexOperation):
"""
Remove an index from a model.
"""
def __init__(self, model_name, name):
self.model_name = model_name
self.name = name
def state_forwards(self, app_label, state):
model_state = state.models[app_label, self.model_name_lower]
indexes = model_state.options[self.option_name]
model_state.options[self.option_name] = [idx for idx in indexes if idx.name != self.name]
def database_forwards(self, app_label, schema_editor, from_state, to_state):
model = from_state.apps.get_model(app_label, self.model_name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
from_model_state = from_state.models[app_label, self.model_name_lower]
index = from_model_state.get_index_by_name(self.name)
schema_editor.remove_index(model, index)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
model = to_state.apps.get_model(app_label, self.model_name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
to_model_state = to_state.models[app_label, self.model_name_lower]
index = to_model_state.get_index_by_name(self.name)
schema_editor.add_index(model, index)
def deconstruct(self):
kwargs = {
'model_name': self.model_name,
'name': self.name,
}
return (
self.__class__.__name__,
[],
kwargs,
)
def describe(self):
return 'Remove index %s from %s' % (self.name, self.model_name)
| guettli/django | django/db/migrations/operations/models.py | Python | bsd-3-clause | 33,007 |
# (c) 2015, Ansible Inc,
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
import copy
from ansible import constants as C
from ansible.plugins.action import ActionBase
from ansible.module_utils.network_common import load_provider
from imp import find_module, load_module
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class ActionModule(ActionBase):
def run(self, tmp=None, task_vars=None):
if self._play_context.connection != 'local':
return dict(
failed=True,
msg='invalid connection specified, expected connection=local, '
'got %s' % self._play_context.connection
)
play_context = copy.deepcopy(self._play_context)
play_context.network_os = self._get_network_os(task_vars)
# we should be able to stream line this a bit by creating a common
# provider argument spec in module_utils/network_common.py or another
# option is that there isn't a need to push provider into the module
# since the connection is started in the action handler.
f, p, d = find_module('ansible')
f2, p2, d2 = find_module('module_utils', [p])
f3, p3, d3 = find_module(play_context.network_os, [p2])
module = load_module('ansible.module_utils.' + play_context.network_os, f3, p3, d3)
self.provider = load_provider(module.get_provider_argspec(), self._task.args)
if play_context.network_os == 'junos':
play_context.connection = 'netconf'
play_context.port = int(self.provider['port'] or self._play_context.port or 830)
else:
play_context.connection = 'network_cli'
play_context.port = int(self.provider['port'] or self._play_context.port or 22)
play_context.remote_addr = self.provider['host'] or self._play_context.remote_addr
play_context.remote_user = self.provider['username'] or self._play_context.connection_user
play_context.password = self.provider['password'] or self._play_context.password
play_context.private_key_file = self.provider['ssh_keyfile'] or self._play_context.private_key_file
play_context.timeout = int(self.provider['timeout'] or C.PERSISTENT_COMMAND_TIMEOUT)
if 'authorize' in self.provider.keys():
play_context.become = self.provider['authorize'] or False
play_context.become_pass = self.provider['auth_pass']
socket_path = self._start_connection(play_context)
task_vars['ansible_socket'] = socket_path
if 'fail_on_missing_module' not in self._task.args:
self._task.args['fail_on_missing_module'] = False
result = super(ActionModule, self).run(tmp, task_vars)
module = self._get_implementation_module(play_context.network_os, self._task.action)
if not module:
if self._task.args['fail_on_missing_module']:
result['failed'] = True
else:
result['failed'] = False
result['msg'] = ('Could not find implementation module %s for %s' %
(self._task.action, play_context.network_os))
else:
new_module_args = self._task.args.copy()
# perhaps delete the provider argument here as well since the
# module code doesn't need the information, the connection is
# already started
if 'network_os' in new_module_args:
del new_module_args['network_os']
del new_module_args['fail_on_missing_module']
display.vvvv('Running implementation module %s' % module)
result.update(self._execute_module(module_name=module,
module_args=new_module_args, task_vars=task_vars,
wrap_async=self._task.async))
display.vvvv('Caching network OS %s in facts' % play_context.network_os)
result['ansible_facts'] = {'network_os': play_context.network_os}
return result
def _start_connection(self, play_context):
display.vvv('using connection plugin %s' % play_context.connection, play_context.remote_addr)
connection = self._shared_loader_obj.connection_loader.get('persistent',
play_context, sys.stdin)
socket_path = connection.run()
display.vvvv('socket_path: %s' % socket_path, play_context.remote_addr)
if not socket_path:
return {'failed': True,
'msg': 'unable to open shell. Please see: ' +
'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'}
# make sure we are in the right cli context which should be
# enable mode and not config module
rc, out, err = connection.exec_command('prompt()')
if str(out).strip().endswith(')#'):
display.vvvv('wrong context, sending exit to device', self._play_context.remote_addr)
connection.exec_command('exit')
if self._play_context.become_method == 'enable':
self._play_context.become = False
self._play_context.become_method = None
return socket_path
def _get_network_os(self, task_vars):
if ('network_os' in self._task.args and self._task.args['network_os']):
display.vvvv('Getting network OS from task argument')
network_os = self._task.args['network_os']
elif (self._play_context.network_os):
display.vvvv('Getting network OS from inventory')
network_os = self._play_context.network_os
elif ('network_os' in task_vars['ansible_facts'] and
task_vars['ansible_facts']['network_os']):
display.vvvv('Getting network OS from fact')
network_os = task_vars['ansible_facts']['network_os']
else:
# this will be replaced by the call to get_capabilities() on the
# connection
display.vvvv('Getting network OS from net discovery')
network_os = None
return network_os
def _get_implementation_module(self, network_os, platform_agnostic_module):
implementation_module = network_os + '_' + platform_agnostic_module.partition('_')[2]
if implementation_module not in self._shared_loader_obj.module_loader:
implementation_module = None
return implementation_module
| Yannig/ansible | lib/ansible/plugins/action/net_base.py | Python | gpl-3.0 | 7,262 |
import numpy as np
import pytest
from mathpy.numerical.roots import newtonraph, bisection, secant
class TestRoots:
@staticmethod
def _test_func1(x):
return x ** 2 - 10
@staticmethod
def _test_func2(x):
return x ** 3 - 2 * x - 5
@staticmethod
def _test_func3(x):
return x
def test_newton_raph(self):
np.testing.assert_almost_equal(newtonraph(self._test_func1, 3)[0], 3.16227766017)
np.testing.assert_almost_equal(newtonraph(self._test_func2, 2)[0], 2.094551481495814)
np.testing.assert_equal(newtonraph(self._test_func3, 0.0), 0.0)
with pytest.raises(TypeError):
newtonraph([1,2,3], 2)
def test_bisection(self):
np.testing.assert_almost_equal(bisection(self._test_func1, 3, 4)[0], 3.16227766017)
np.testing.assert_almost_equal(bisection(self._test_func2, 2, 3)[0], 2.094551481495814)
with pytest.raises(TypeError):
bisection([1, 2, 3], 1, 2)
with pytest.raises(ValueError):
bisection(self._test_func1, 1, 2)
with pytest.raises(ValueError):
bisection(self._test_func1, 10, 11)
def test_secant(self):
np.testing.assert_almost_equal(secant(self._test_func1, 0, 2)[0], 3.16227766017)
np.testing.assert_almost_equal(secant(self._test_func2, 2, 3)[0], 2.094551481495814)
with pytest.raises(TypeError):
secant([1, 2, 3], 1, 2)
| aschleg/mathpy | mathpy/numerical/tests/test_roots.py | Python | mit | 1,494 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import re
import os
import imp
from environment import *
from mas import *
from parser_agent import *
class Project(Mas):
def __init__(self, maspy):
project_maspy = open(maspy, 'r');
self.__load(os.path.dirname(project_maspy.name), project_maspy.read())
project_maspy.close()
def __load(self, maspy_path, project_content):
# [FERRAMENTA] https://regex101.com/#python
# Remove o que está entre /* e */
regex_multiple_comments = '/\*.*\*/'
project_content = re.sub(regex_multiple_comments, '', project_content, re.S)
# Remove o que está após //
regex_comments = '//.*'
project_content = re.sub(regex_comments, '', project_content)
# Encontra a lista dos agentes
self.agents = []
regex_agents = '\s*agents\s*=\s*\[(.*)\]'
agents_name = re.findall(regex_agents, project_content, re.S)
if agents_name:
# Remove os espaços em branco
agents_name = re.sub(' ', '', agents_name[0])
# Cria um array com o nome dos agentes
agents_name = re.split(',', agents_name)
for agent_name in agents_name:
position_hashtag = agent_name.find('#')
if position_hashtag >= 0:
number_instances = int(agent_name[position_hashtag+1:])
agent_name = agent_name[:position_hashtag]
agent_maspy = '%s/%s.asl' % (maspy_path, agent_name)
# [TO-DO] Melhorar...
for i in range(1, number_instances+1):
instance_name = '%s%s' % (agent_name, i)
parser = ParserAgent(instance_name, agent_maspy)
self.agents.append(parser.agent)
else:
agent_maspy = '%s/%s.asl' % (maspy_path, agent_name)
parser = ParserAgent(agent_name, agent_maspy)
self.agents.append(parser.agent)
# Ordena os agentes
self.sort(self.agents)
# Encontra o ambiente
self.environment = Environment()
regex_environment = '\s*environment\s*=\s*(\w*)'
environment_content = re.findall(regex_environment, project_content)
if environment_content:
environment_content = environment_content[0]
# Carrega o ambiente personalizado
environment_maspy = '%s/%s.py' % (maspy_path, environment_content)
module = imp.load_source(environment_content, environment_maspy)
EnvironmentClass = getattr(module, environment_content)
self.environment = EnvironmentClass()
| andrellsantos/agentspeak-py | agentspeak-py/project.py | Python | gpl-3.0 | 2,773 |
from django.contrib import admin
from forum.models import Forum, Thread, Post, Subscription
class ForumAdmin(admin.ModelAdmin):
list_display = ('title', '_parents_repr')
list_filter = ('groups',)
ordering = ['ordering', 'parent', 'title']
prepopulated_fields = {"slug": ("title",)}
class SubscriptionAdmin(admin.ModelAdmin):
list_display = ['author','thread']
class ThreadAdmin(admin.ModelAdmin):
list_display = ('title', 'forum', 'latest_post_time')
list_filter = ('forum',)
class PostAdmin(admin.ModelAdmin):
list_display = ('_author', 'time', '_forum', 'thread')
list_filter = ('thread__forum',)
search_fields = ('author__email', 'body', 'thread__title', 'thread__forum__title')
def _forum(self, obj):
return obj.thread.forum
def _author(self, obj):
return obj.author.email
admin.site.register(Forum, ForumAdmin)
admin.site.register(Thread, ThreadAdmin)
admin.site.register(Post, PostAdmin)
admin.site.register(Subscription, SubscriptionAdmin)
| farstarinc/django-forum | forum/admin.py | Python | bsd-3-clause | 1,022 |
# Copyright (c) 2008-2015, Michael J. Silbersack
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import commands
import glob
import os
import shutil
import sys
directory = sys.argv[1]
directory2 = sys.argv[2]
files = glob.glob("%s/*.processed" % (directory))
files.sort()
total = 0
changed = 0
unchanged = 0
for file in files:
total += 1
filepart = file.split(directory)[1]
dirpart = file.split("/")[0]
packetpart = filepart.split(".")[0]
packetpart2 = packetpart + "%em1.pcap.txt"
rv = commands.getstatusoutput("diff -u %s/%s %s/%s" % (directory, filepart, directory2, filepart))
if rv[0] != 0:
changed += 1
print "Differences found in", packetpart
print rv[1]
else:
unchanged += 1
print "%s total files, %s changed, %s unchanged" % (total, changed, unchanged)
| gvnn3/tcpdiff | compare.py | Python | bsd-2-clause | 2,044 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" Fields:
- simple
- relations (one2many, many2one, many2many)
- function
Fields Attributes:
* _classic_read: is a classic sql fields
* _type : field type
* _auto_join: for one2many and many2one fields, tells whether select
queries will join the relational table instead of replacing the
field condition by an equivalent-one based on a search.
* readonly
* required
* size
"""
import base64
import datetime as DT
import functools
import logging
import pytz
import re
import xmlrpclib
from operator import itemgetter
from psycopg2 import Binary
import openerp
import openerp.tools as tools
from openerp.tools.translate import _
from openerp.tools import float_round, float_repr
from openerp.tools import html_sanitize
import simplejson
from openerp import SUPERUSER_ID
_logger = logging.getLogger(__name__)
def _symbol_set(symb):
if symb is None or symb == False:
return None
elif isinstance(symb, unicode):
return symb.encode('utf-8')
return str(symb)
class _column(object):
""" Base of all fields, a database column
An instance of this object is a *description* of a database column. It will
not hold any data, but only provide the methods to manipulate data of an
ORM record or even prepare/update the database to hold such a field of data.
"""
_classic_read = True
_classic_write = True
_auto_join = False
_prefetch = True
_properties = False
_type = 'unknown'
_obj = None
_multi = False
_symbol_c = '%s'
_symbol_f = _symbol_set
_symbol_set = (_symbol_c, _symbol_f)
_symbol_get = None
_deprecated = False
copy = True # whether value is copied by BaseModel.copy()
string = None
help = ""
required = False
readonly = False
_domain = []
_context = {}
states = None
priority = 0
change_default = False
size = None
ondelete = None
translate = False
select = False
manual = False
write = False
read = False
selectable = True
group_operator = False
groups = False # CSV list of ext IDs of groups
deprecated = False # Optional deprecation warning
def __init__(self, string='unknown', required=False, readonly=False, domain=None, context=None, states=None, priority=0, change_default=False, size=None, ondelete=None, translate=False, select=False, manual=False, **args):
"""
The 'manual' keyword argument specifies if the field is a custom one.
It corresponds to the 'state' column in ir_model_fields.
"""
args0 = {
'string': string,
'required': required,
'readonly': readonly,
'_domain': domain,
'_context': context,
'states': states,
'priority': priority,
'change_default': change_default,
'size': size,
'ondelete': ondelete.lower() if ondelete else None,
'translate': translate,
'select': select,
'manual': manual,
}
for key, val in args0.iteritems():
if val:
setattr(self, key, val)
self._args = args
for key, val in args.iteritems():
setattr(self, key, val)
# prefetch only if self._classic_write, not self.groups, and not
# self.deprecated
if not self._classic_write or self.deprecated:
self._prefetch = False
def new(self, **args):
""" return a column like `self` with the given parameters """
# memory optimization: reuse self whenever possible; you can reduce the
# average memory usage per registry by 10 megabytes!
return self if self.same_parameters(args) else type(self)(**args)
def same_parameters(self, args):
dummy = object()
return all(
# either both are falsy, or they are equal
(not val1 and not val) or (val1 == val)
for key, val in args.iteritems()
for val1 in [getattr(self, key, getattr(self, '_' + key, dummy))]
)
def to_field(self):
""" convert column `self` to a new-style field """
from openerp.fields import Field
return Field.by_type[self._type](**self.to_field_args())
def to_field_args(self):
""" return a dictionary with all the arguments to pass to the field """
base_items = [
('column', self), # field interfaces self
('copy', self.copy),
]
truthy_items = filter(itemgetter(1), [
('index', self.select),
('manual', self.manual),
('string', self.string),
('help', self.help),
('readonly', self.readonly),
('required', self.required),
('states', self.states),
('groups', self.groups),
('change_default', self.change_default),
('deprecated', self.deprecated),
('size', self.size),
('ondelete', self.ondelete),
('translate', self.translate),
('domain', self._domain),
('context', self._context),
])
return dict(base_items + truthy_items + self._args.items())
def restart(self):
pass
def set(self, cr, obj, id, name, value, user=None, context=None):
cr.execute('update '+obj._table+' set '+name+'='+self._symbol_set[0]+' where id=%s', (self._symbol_set[1](value), id))
def get(self, cr, obj, ids, name, user=None, offset=0, context=None, values=None):
raise Exception(_('undefined get method !'))
def search(self, cr, obj, args, name, value, offset=0, limit=None, uid=None, context=None):
ids = obj.search(cr, uid, args+self._domain+[(name, 'ilike', value)], offset, limit, context=context)
res = obj.read(cr, uid, ids, [name], context=context)
return [x[name] for x in res]
def as_display_name(self, cr, uid, obj, value, context=None):
"""Converts a field value to a suitable string representation for a record,
e.g. when this field is used as ``rec_name``.
:param obj: the ``BaseModel`` instance this column belongs to
:param value: a proper value as returned by :py:meth:`~openerp.orm.osv.BaseModel.read`
for this column
"""
# delegated to class method, so a column type A can delegate
# to a column type B.
return self._as_display_name(self, cr, uid, obj, value, context=None)
@classmethod
def _as_display_name(cls, field, cr, uid, obj, value, context=None):
# This needs to be a class method, in case a column type A as to delegate
# to a column type B.
return tools.ustr(value)
# ---------------------------------------------------------
# Simple fields
# ---------------------------------------------------------
class boolean(_column):
_type = 'boolean'
_symbol_c = '%s'
_symbol_f = bool
_symbol_set = (_symbol_c, _symbol_f)
def __init__(self, string='unknown', required=False, **args):
super(boolean, self).__init__(string=string, required=required, **args)
if required:
_logger.debug(
"required=True is deprecated: making a boolean field"
" `required` has no effect, as NULL values are "
"automatically turned into False. args: %r",args)
class integer(_column):
_type = 'integer'
_symbol_c = '%s'
_symbol_f = lambda x: int(x or 0)
_symbol_set = (_symbol_c, _symbol_f)
_symbol_get = lambda self,x: x or 0
def __init__(self, string='unknown', required=False, **args):
super(integer, self).__init__(string=string, required=required, **args)
class reference(_column):
_type = 'reference'
_classic_read = False # post-process to handle missing target
def __init__(self, string, selection, size=None, **args):
if callable(selection):
from openerp import api
selection = api.expected(api.cr_uid_context, selection)
_column.__init__(self, string=string, size=size, selection=selection, **args)
def to_field_args(self):
args = super(reference, self).to_field_args()
args['selection'] = self.selection
return args
def get(self, cr, obj, ids, name, uid=None, context=None, values=None):
result = {}
# copy initial values fetched previously.
for value in values:
result[value['id']] = value[name]
if value[name]:
model, res_id = value[name].split(',')
if not obj.pool[model].exists(cr, uid, [int(res_id)], context=context):
result[value['id']] = False
return result
@classmethod
def _as_display_name(cls, field, cr, uid, obj, value, context=None):
if value:
# reference fields have a 'model,id'-like value, that we need to convert
# to a real name
model_name, res_id = value.split(',')
if model_name in obj.pool and res_id:
model = obj.pool[model_name]
names = model.name_get(cr, uid, [int(res_id)], context=context)
return names[0][1] if names else False
return tools.ustr(value)
# takes a string (encoded in utf8) and returns a string (encoded in utf8)
def _symbol_set_char(self, symb):
#TODO:
# * we need to remove the "symb==False" from the next line BUT
# for now too many things rely on this broken behavior
# * the symb==None test should be common to all data types
if symb is None or symb == False:
return None
# we need to convert the string to a unicode object to be able
# to evaluate its length (and possibly truncate it) reliably
u_symb = tools.ustr(symb)
return u_symb[:self.size].encode('utf8')
class char(_column):
_type = 'char'
def __init__(self, string="unknown", size=None, **args):
_column.__init__(self, string=string, size=size or None, **args)
# self._symbol_set_char defined to keep the backward compatibility
self._symbol_f = self._symbol_set_char = lambda x: _symbol_set_char(self, x)
self._symbol_set = (self._symbol_c, self._symbol_f)
class text(_column):
_type = 'text'
class html(text):
_type = 'html'
_symbol_c = '%s'
def _symbol_set_html(self, value):
if value is None or value is False:
return None
if not self._sanitize:
return value
return html_sanitize(value)
def __init__(self, string='unknown', sanitize=True, **args):
super(html, self).__init__(string=string, **args)
self._sanitize = sanitize
# symbol_set redefinition because of sanitize specific behavior
self._symbol_f = self._symbol_set_html
self._symbol_set = (self._symbol_c, self._symbol_f)
def to_field_args(self):
args = super(html, self).to_field_args()
args['sanitize'] = self._sanitize
return args
import __builtin__
class float(_column):
_type = 'float'
_symbol_c = '%s'
_symbol_f = lambda x: __builtin__.float(x or 0.0)
_symbol_set = (_symbol_c, _symbol_f)
_symbol_get = lambda self,x: x or 0.0
def __init__(self, string='unknown', digits=None, digits_compute=None, required=False, **args):
_column.__init__(self, string=string, required=required, **args)
self.digits = digits
# synopsis: digits_compute(cr) -> (precision, scale)
self.digits_compute = digits_compute
def new(self, **args):
# float columns are database-dependent, so always recreate them
return type(self)(**args)
def to_field_args(self):
args = super(float, self).to_field_args()
args['digits'] = self.digits_compute or self.digits
return args
def digits_change(self, cr):
if self.digits_compute:
self.digits = self.digits_compute(cr)
if self.digits:
precision, scale = self.digits
self._symbol_set = ('%s', lambda x: float_repr(float_round(__builtin__.float(x or 0.0),
precision_digits=scale),
precision_digits=scale))
class date(_column):
_type = 'date'
MONTHS = [
('01', 'January'),
('02', 'February'),
('03', 'March'),
('04', 'April'),
('05', 'May'),
('06', 'June'),
('07', 'July'),
('08', 'August'),
('09', 'September'),
('10', 'October'),
('11', 'November'),
('12', 'December')
]
@staticmethod
def today(*args):
""" Returns the current date in a format fit for being a
default value to a ``date`` field.
This method should be provided as is to the _defaults dict, it
should not be called.
"""
return DT.date.today().strftime(
tools.DEFAULT_SERVER_DATE_FORMAT)
@staticmethod
def context_today(model, cr, uid, context=None, timestamp=None):
"""Returns the current date as seen in the client's timezone
in a format fit for date fields.
This method may be passed as value to initialize _defaults.
:param Model model: model (osv) for which the date value is being
computed - automatically passed when used in
_defaults.
:param datetime timestamp: optional datetime value to use instead of
the current date and time (must be a
datetime, regular dates can't be converted
between timezones.)
:param dict context: the 'tz' key in the context should give the
name of the User/Client timezone (otherwise
UTC is used)
:rtype: str
"""
today = timestamp or DT.datetime.now()
context_today = None
if context and context.get('tz'):
tz_name = context['tz']
else:
user = model.pool['res.users'].browse(cr, SUPERUSER_ID, uid)
tz_name = user.tz
if tz_name:
try:
utc = pytz.timezone('UTC')
context_tz = pytz.timezone(tz_name)
utc_today = utc.localize(today, is_dst=False) # UTC = no DST
context_today = utc_today.astimezone(context_tz)
except Exception:
_logger.debug("failed to compute context/client-specific today date, "
"using the UTC value for `today`",
exc_info=True)
return (context_today or today).strftime(tools.DEFAULT_SERVER_DATE_FORMAT)
@staticmethod
def date_to_datetime(model, cr, uid, userdate, context=None):
""" Convert date values expressed in user's timezone to
server-side UTC timestamp, assuming a default arbitrary
time of 12:00 AM - because a time is needed.
:param str userdate: date string in in user time zone
:return: UTC datetime string for server-side use
"""
user_date = DT.datetime.strptime(userdate, tools.DEFAULT_SERVER_DATE_FORMAT)
if context and context.get('tz'):
tz_name = context['tz']
else:
tz_name = model.pool.get('res.users').read(cr, SUPERUSER_ID, uid, ['tz'])['tz']
if tz_name:
utc = pytz.timezone('UTC')
context_tz = pytz.timezone(tz_name)
user_datetime = user_date + DT.timedelta(hours=12.0)
local_timestamp = context_tz.localize(user_datetime, is_dst=False)
user_datetime = local_timestamp.astimezone(utc)
return user_datetime.strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT)
return user_date.strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT)
class datetime(_column):
_type = 'datetime'
MONTHS = [
('01', 'January'),
('02', 'February'),
('03', 'March'),
('04', 'April'),
('05', 'May'),
('06', 'June'),
('07', 'July'),
('08', 'August'),
('09', 'September'),
('10', 'October'),
('11', 'November'),
('12', 'December')
]
@staticmethod
def now(*args):
""" Returns the current datetime in a format fit for being a
default value to a ``datetime`` field.
This method should be provided as is to the _defaults dict, it
should not be called.
"""
return DT.datetime.now().strftime(
tools.DEFAULT_SERVER_DATETIME_FORMAT)
@staticmethod
def context_timestamp(cr, uid, timestamp, context=None):
"""Returns the given timestamp converted to the client's timezone.
This method is *not* meant for use as a _defaults initializer,
because datetime fields are automatically converted upon
display on client side. For _defaults you :meth:`fields.datetime.now`
should be used instead.
:param datetime timestamp: naive datetime value (expressed in UTC)
to be converted to the client timezone
:param dict context: the 'tz' key in the context should give the
name of the User/Client timezone (otherwise
UTC is used)
:rtype: datetime
:return: timestamp converted to timezone-aware datetime in context
timezone
"""
assert isinstance(timestamp, DT.datetime), 'Datetime instance expected'
if context and context.get('tz'):
tz_name = context['tz']
else:
registry = openerp.modules.registry.RegistryManager.get(cr.dbname)
user = registry['res.users'].browse(cr, SUPERUSER_ID, uid)
tz_name = user.tz
utc_timestamp = pytz.utc.localize(timestamp, is_dst=False) # UTC = no DST
if tz_name:
try:
context_tz = pytz.timezone(tz_name)
return utc_timestamp.astimezone(context_tz)
except Exception:
_logger.debug("failed to compute context/client-specific timestamp, "
"using the UTC value",
exc_info=True)
return utc_timestamp
class binary(_column):
_type = 'binary'
_symbol_c = '%s'
# Binary values may be byte strings (python 2.6 byte array), but
# the legacy OpenERP convention is to transfer and store binaries
# as base64-encoded strings. The base64 string may be provided as a
# unicode in some circumstances, hence the str() cast in symbol_f.
# This str coercion will only work for pure ASCII unicode strings,
# on purpose - non base64 data must be passed as a 8bit byte strings.
_symbol_f = lambda symb: symb and Binary(str(symb)) or None
_symbol_set = (_symbol_c, _symbol_f)
_symbol_get = lambda self, x: x and str(x)
_classic_read = False
_prefetch = False
def __init__(self, string='unknown', filters=None, **args):
_column.__init__(self, string=string, **args)
self.filters = filters
def get(self, cr, obj, ids, name, user=None, context=None, values=None):
if not context:
context = {}
if not values:
values = []
res = {}
for i in ids:
val = None
for v in values:
if v['id'] == i:
val = v[name]
break
# If client is requesting only the size of the field, we return it instead
# of the content. Presumably a separate request will be done to read the actual
# content if it's needed at some point.
# TODO: after 6.0 we should consider returning a dict with size and content instead of
# having an implicit convention for the value
if val and context.get('bin_size_%s' % name, context.get('bin_size')):
res[i] = tools.human_size(long(val))
else:
res[i] = val
return res
class selection(_column):
_type = 'selection'
def __init__(self, selection, string='unknown', **args):
if callable(selection):
from openerp import api
selection = api.expected(api.cr_uid_context, selection)
_column.__init__(self, string=string, **args)
self.selection = selection
def to_field_args(self):
args = super(selection, self).to_field_args()
args['selection'] = self.selection
return args
@classmethod
def reify(cls, cr, uid, model, field, context=None):
""" Munges the field's ``selection`` attribute as necessary to get
something useable out of it: calls it if it's a function, applies
translations to labels if it's not.
A callable ``selection`` is considered translated on its own.
:param orm.Model model:
:param _column field:
"""
if callable(field.selection):
return field.selection(model, cr, uid, context)
if not (context and 'lang' in context):
return field.selection
# field_to_dict isn't given a field name, only a field object, we
# need to get the name back in order to perform the translation lookup
field_name = next(
name for name, column in model._columns.iteritems()
if column == field)
translation_filter = "%s,%s" % (model._name, field_name)
translate = functools.partial(
model.pool['ir.translation']._get_source,
cr, uid, translation_filter, 'selection', context['lang'])
return [
(value, translate(label))
for value, label in field.selection
]
# ---------------------------------------------------------
# Relationals fields
# ---------------------------------------------------------
#
# Values: (0, 0, { fields }) create
# (1, ID, { fields }) update
# (2, ID) remove (delete)
# (3, ID) unlink one (target id or target of relation)
# (4, ID) link
# (5) unlink all (only valid for one2many)
#
class many2one(_column):
_classic_read = False
_classic_write = True
_type = 'many2one'
_symbol_c = '%s'
_symbol_f = lambda x: x or None
_symbol_set = (_symbol_c, _symbol_f)
ondelete = 'set null'
def __init__(self, obj, string='unknown', auto_join=False, **args):
_column.__init__(self, string=string, **args)
self._obj = obj
self._auto_join = auto_join
def to_field_args(self):
args = super(many2one, self).to_field_args()
args['comodel_name'] = self._obj
args['auto_join'] = self._auto_join
return args
def set(self, cr, obj_src, id, field, values, user=None, context=None):
if not context:
context = {}
obj = obj_src.pool[self._obj]
self._table = obj._table
if type(values) == type([]):
for act in values:
if act[0] == 0:
id_new = obj.create(cr, act[2])
cr.execute('update '+obj_src._table+' set '+field+'=%s where id=%s', (id_new, id))
elif act[0] == 1:
obj.write(cr, [act[1]], act[2], context=context)
elif act[0] == 2:
cr.execute('delete from '+self._table+' where id=%s', (act[1],))
elif act[0] == 3 or act[0] == 5:
cr.execute('update '+obj_src._table+' set '+field+'=null where id=%s', (id,))
elif act[0] == 4:
cr.execute('update '+obj_src._table+' set '+field+'=%s where id=%s', (act[1], id))
else:
if values:
cr.execute('update '+obj_src._table+' set '+field+'=%s where id=%s', (values, id))
else:
cr.execute('update '+obj_src._table+' set '+field+'=null where id=%s', (id,))
def search(self, cr, obj, args, name, value, offset=0, limit=None, uid=None, context=None):
return obj.pool[self._obj].search(cr, uid, args+self._domain+[('name', 'like', value)], offset, limit, context=context)
@classmethod
def _as_display_name(cls, field, cr, uid, obj, value, context=None):
return value[1] if isinstance(value, tuple) else tools.ustr(value)
class one2many(_column):
_classic_read = False
_classic_write = False
_prefetch = False
_type = 'one2many'
# one2many columns are not copied by default
copy = False
def __init__(self, obj, fields_id, string='unknown', limit=None, auto_join=False, **args):
_column.__init__(self, string=string, **args)
self._obj = obj
self._fields_id = fields_id
self._limit = limit
self._auto_join = auto_join
#one2many can't be used as condition for defaults
assert(self.change_default != True)
def to_field_args(self):
args = super(one2many, self).to_field_args()
args['comodel_name'] = self._obj
args['inverse_name'] = self._fields_id
args['auto_join'] = self._auto_join
args['limit'] = self._limit
return args
def get(self, cr, obj, ids, name, user=None, offset=0, context=None, values=None):
if self._context:
context = dict(context or {})
context.update(self._context)
# retrieve the records in the comodel
comodel = obj.pool[self._obj].browse(cr, user, [], context)
inverse = self._fields_id
domain = self._domain(obj) if callable(self._domain) else self._domain
domain = domain + [(inverse, 'in', ids)]
records = comodel.search(domain, limit=self._limit)
result = {id: [] for id in ids}
# read the inverse of records without prefetching other fields on them
for record in records.with_context(prefetch_fields=False):
# record[inverse] may be a record or an integer
result[int(record[inverse])].append(record.id)
return result
def set(self, cr, obj, id, field, values, user=None, context=None):
result = []
context = dict(context or {})
context.update(self._context)
context['recompute'] = False # recomputation is done by outer create/write
if not values:
return
obj = obj.pool[self._obj]
_table = obj._table
for act in values:
if act[0] == 0:
act[2][self._fields_id] = id
id_new = obj.create(cr, user, act[2], context=context)
result += obj._store_get_values(cr, user, [id_new], act[2].keys(), context)
elif act[0] == 1:
obj.write(cr, user, [act[1]], act[2], context=context)
elif act[0] == 2:
obj.unlink(cr, user, [act[1]], context=context)
elif act[0] == 3:
reverse_rel = obj._all_columns.get(self._fields_id)
assert reverse_rel, 'Trying to unlink the content of a o2m but the pointed model does not have a m2o'
# if the model has on delete cascade, just delete the row
if reverse_rel.column.ondelete == "cascade":
obj.unlink(cr, user, [act[1]], context=context)
else:
cr.execute('update '+_table+' set '+self._fields_id+'=null where id=%s', (act[1],))
elif act[0] == 4:
# table of the field (parent_model in case of inherit)
field_model = self._fields_id in obj.pool[self._obj]._columns and self._obj or obj.pool[self._obj]._all_columns[self._fields_id].parent_model
field_table = obj.pool[field_model]._table
cr.execute("select 1 from {0} where id=%s and {1}=%s".format(field_table, self._fields_id), (act[1], id))
if not cr.fetchone():
# Must use write() to recompute parent_store structure if needed and check access rules
obj.write(cr, user, [act[1]], {self._fields_id:id}, context=context or {})
elif act[0] == 5:
reverse_rel = obj._all_columns.get(self._fields_id)
assert reverse_rel, 'Trying to unlink the content of a o2m but the pointed model does not have a m2o'
# if the o2m has a static domain we must respect it when unlinking
domain = self._domain(obj) if callable(self._domain) else self._domain
extra_domain = domain or []
ids_to_unlink = obj.search(cr, user, [(self._fields_id,'=',id)] + extra_domain, context=context)
# If the model has cascade deletion, we delete the rows because it is the intended behavior,
# otherwise we only nullify the reverse foreign key column.
if reverse_rel.column.ondelete == "cascade":
obj.unlink(cr, user, ids_to_unlink, context=context)
else:
obj.write(cr, user, ids_to_unlink, {self._fields_id: False}, context=context)
elif act[0] == 6:
# Must use write() to recompute parent_store structure if needed
obj.write(cr, user, act[2], {self._fields_id:id}, context=context or {})
ids2 = act[2] or [0]
cr.execute('select id from '+_table+' where '+self._fields_id+'=%s and id <> ALL (%s)', (id,ids2))
ids3 = map(lambda x:x[0], cr.fetchall())
obj.write(cr, user, ids3, {self._fields_id:False}, context=context or {})
return result
def search(self, cr, obj, args, name, value, offset=0, limit=None, uid=None, operator='like', context=None):
domain = self._domain(obj) if callable(self._domain) else self._domain
return obj.pool[self._obj].name_search(cr, uid, value, domain, operator, context=context,limit=limit)
@classmethod
def _as_display_name(cls, field, cr, uid, obj, value, context=None):
raise NotImplementedError('One2Many columns should not be used as record name (_rec_name)')
#
# Values: (0, 0, { fields }) create
# (1, ID, { fields }) update (write fields to ID)
# (2, ID) remove (calls unlink on ID, that will also delete the relationship because of the ondelete)
# (3, ID) unlink (delete the relationship between the two objects but does not delete ID)
# (4, ID) link (add a relationship)
# (5, ID) unlink all
# (6, ?, ids) set a list of links
#
class many2many(_column):
"""Encapsulates the logic of a many-to-many bidirectional relationship, handling the
low-level details of the intermediary relationship table transparently.
A many-to-many relationship is always symmetrical, and can be declared and accessed
from either endpoint model.
If ``rel`` (relationship table name), ``id1`` (source foreign key column name)
or id2 (destination foreign key column name) are not specified, the system will
provide default values. This will by default only allow one single symmetrical
many-to-many relationship between the source and destination model.
For multiple many-to-many relationship between the same models and for
relationships where source and destination models are the same, ``rel``, ``id1``
and ``id2`` should be specified explicitly.
:param str obj: destination model
:param str rel: optional name of the intermediary relationship table. If not specified,
a canonical name will be derived based on the alphabetically-ordered
model names of the source and destination (in the form: ``amodel_bmodel_rel``).
Automatic naming is not possible when the source and destination are
the same, for obvious ambiguity reasons.
:param str id1: optional name for the column holding the foreign key to the current
model in the relationship table. If not specified, a canonical name
will be derived based on the model name (in the form: `src_model_id`).
:param str id2: optional name for the column holding the foreign key to the destination
model in the relationship table. If not specified, a canonical name
will be derived based on the model name (in the form: `dest_model_id`)
:param str string: field label
"""
_classic_read = False
_classic_write = False
_prefetch = False
_type = 'many2many'
def __init__(self, obj, rel=None, id1=None, id2=None, string='unknown', limit=None, **args):
"""
"""
_column.__init__(self, string=string, **args)
self._obj = obj
if rel and '.' in rel:
raise Exception(_('The second argument of the many2many field %s must be a SQL table !'\
'You used %s, which is not a valid SQL table name.')% (string,rel))
self._rel = rel
self._id1 = id1
self._id2 = id2
self._limit = limit
def to_field_args(self):
args = super(many2many, self).to_field_args()
args['comodel_name'] = self._obj
args['relation'] = self._rel
args['column1'] = self._id1
args['column2'] = self._id2
args['limit'] = self._limit
return args
def _sql_names(self, source_model):
"""Return the SQL names defining the structure of the m2m relationship table
:return: (m2m_table, local_col, dest_col) where m2m_table is the table name,
local_col is the name of the column holding the current model's FK, and
dest_col is the name of the column holding the destination model's FK, and
"""
tbl, col1, col2 = self._rel, self._id1, self._id2
if not all((tbl, col1, col2)):
# the default table name is based on the stable alphabetical order of tables
dest_model = source_model.pool[self._obj]
tables = tuple(sorted([source_model._table, dest_model._table]))
if not tbl:
assert tables[0] != tables[1], 'Implicit/Canonical naming of m2m relationship table '\
'is not possible when source and destination models are '\
'the same'
tbl = '%s_%s_rel' % tables
if not col1:
col1 = '%s_id' % source_model._table
if not col2:
col2 = '%s_id' % dest_model._table
return tbl, col1, col2
def _get_query_and_where_params(self, cr, model, ids, values, where_params):
""" Extracted from ``get`` to facilitate fine-tuning of the generated
query. """
query = 'SELECT %(rel)s.%(id2)s, %(rel)s.%(id1)s \
FROM %(rel)s, %(from_c)s \
WHERE %(rel)s.%(id1)s IN %%s \
AND %(rel)s.%(id2)s = %(tbl)s.id \
%(where_c)s \
%(order_by)s \
%(limit)s \
OFFSET %(offset)d' \
% values
return query, where_params
def get(self, cr, model, ids, name, user=None, offset=0, context=None, values=None):
if not context:
context = {}
if not values:
values = {}
res = {}
if not ids:
return res
for id in ids:
res[id] = []
if offset:
_logger.warning(
"Specifying offset at a many2many.get() is deprecated and may"
" produce unpredictable results.")
obj = model.pool[self._obj]
rel, id1, id2 = self._sql_names(model)
# static domains are lists, and are evaluated both here and on client-side, while string
# domains supposed by dynamic and evaluated on client-side only (thus ignored here)
# FIXME: make this distinction explicit in API!
domain = isinstance(self._domain, list) and self._domain or []
wquery = obj._where_calc(cr, user, domain, context=context)
obj._apply_ir_rules(cr, user, wquery, 'read', context=context)
from_c, where_c, where_params = wquery.get_sql()
if where_c:
where_c = ' AND ' + where_c
order_by = ' ORDER BY "%s".%s' %(obj._table, obj._order.split(',')[0])
limit_str = ''
if self._limit is not None:
limit_str = ' LIMIT %d' % self._limit
query, where_params = self._get_query_and_where_params(cr, model, ids, {'rel': rel,
'from_c': from_c,
'tbl': obj._table,
'id1': id1,
'id2': id2,
'where_c': where_c,
'limit': limit_str,
'order_by': order_by,
'offset': offset,
}, where_params)
cr.execute(query, [tuple(ids),] + where_params)
for r in cr.fetchall():
res[r[1]].append(r[0])
return res
def set(self, cr, model, id, name, values, user=None, context=None):
if not context:
context = {}
if not values:
return
rel, id1, id2 = self._sql_names(model)
obj = model.pool[self._obj]
for act in values:
if not (isinstance(act, list) or isinstance(act, tuple)) or not act:
continue
if act[0] == 0:
idnew = obj.create(cr, user, act[2], context=context)
cr.execute('insert into '+rel+' ('+id1+','+id2+') values (%s,%s)', (id, idnew))
elif act[0] == 1:
obj.write(cr, user, [act[1]], act[2], context=context)
elif act[0] == 2:
obj.unlink(cr, user, [act[1]], context=context)
elif act[0] == 3:
cr.execute('delete from '+rel+' where ' + id1 + '=%s and '+ id2 + '=%s', (id, act[1]))
elif act[0] == 4:
# following queries are in the same transaction - so should be relatively safe
cr.execute('SELECT 1 FROM '+rel+' WHERE '+id1+' = %s and '+id2+' = %s', (id, act[1]))
if not cr.fetchone():
cr.execute('insert into '+rel+' ('+id1+','+id2+') values (%s,%s)', (id, act[1]))
elif act[0] == 5:
cr.execute('delete from '+rel+' where ' + id1 + ' = %s', (id,))
elif act[0] == 6:
d1, d2,tables = obj.pool.get('ir.rule').domain_get(cr, user, obj._name, context=context)
if d1:
d1 = ' and ' + ' and '.join(d1)
else:
d1 = ''
cr.execute('delete from '+rel+' where '+id1+'=%s AND '+id2+' IN (SELECT '+rel+'.'+id2+' FROM '+rel+', '+','.join(tables)+' WHERE '+rel+'.'+id1+'=%s AND '+rel+'.'+id2+' = '+obj._table+'.id '+ d1 +')', [id, id]+d2)
for act_nbr in act[2]:
cr.execute('insert into '+rel+' ('+id1+','+id2+') values (%s, %s)', (id, act_nbr))
#
# TODO: use a name_search
#
def search(self, cr, obj, args, name, value, offset=0, limit=None, uid=None, operator='like', context=None):
return obj.pool[self._obj].search(cr, uid, args+self._domain+[('name', operator, value)], offset, limit, context=context)
@classmethod
def _as_display_name(cls, field, cr, uid, obj, value, context=None):
raise NotImplementedError('Many2Many columns should not be used as record name (_rec_name)')
def get_nice_size(value):
size = 0
if isinstance(value, (int,long)):
size = value
elif value: # this is supposed to be a string
size = len(value)
return tools.human_size(size)
# See http://www.w3.org/TR/2000/REC-xml-20001006#NT-Char
# and http://bugs.python.org/issue10066
invalid_xml_low_bytes = re.compile(r'[\x00-\x08\x0b-\x0c\x0e-\x1f]')
def sanitize_binary_value(value):
# binary fields should be 7-bit ASCII base64-encoded data,
# but we do additional sanity checks to make sure the values
# are not something else that won't pass via XML-RPC
if isinstance(value, (xmlrpclib.Binary, tuple, list, dict)):
# these builtin types are meant to pass untouched
return value
# Handle invalid bytes values that will cause problems
# for XML-RPC. See for more info:
# - http://bugs.python.org/issue10066
# - http://www.w3.org/TR/2000/REC-xml-20001006#NT-Char
# Coercing to unicode would normally allow it to properly pass via
# XML-RPC, transparently encoded as UTF-8 by xmlrpclib.
# (this works for _any_ byte values, thanks to the fallback
# to latin-1 passthrough encoding when decoding to unicode)
value = tools.ustr(value)
# Due to Python bug #10066 this could still yield invalid XML
# bytes, specifically in the low byte range, that will crash
# the decoding side: [\x00-\x08\x0b-\x0c\x0e-\x1f]
# So check for low bytes values, and if any, perform
# base64 encoding - not very smart or useful, but this is
# our last resort to avoid crashing the request.
if invalid_xml_low_bytes.search(value):
# b64-encode after restoring the pure bytes with latin-1
# passthrough encoding
value = base64.b64encode(value.encode('latin-1'))
return value
# ---------------------------------------------------------
# Function fields
# ---------------------------------------------------------
class function(_column):
"""
A field whose value is computed by a function (rather
than being read from the database).
:param fnct: the callable that will compute the field value.
:param arg: arbitrary value to be passed to ``fnct`` when computing the value.
:param fnct_inv: the callable that will allow writing values in that field
(if not provided, the field is read-only).
:param fnct_inv_arg: arbitrary value to be passed to ``fnct_inv`` when
writing a value.
:param str type: type of the field simulated by the function field
:param fnct_search: the callable that allows searching on the field
(if not provided, search will not return any result).
:param store: store computed value in database
(see :ref:`The *store* parameter <field-function-store>`).
:type store: True or dict specifying triggers for field computation
:param multi: name of batch for batch computation of function fields.
All fields with the same batch name will be computed by
a single function call. This changes the signature of the
``fnct`` callable.
.. _field-function-fnct: The ``fnct`` parameter
.. rubric:: The ``fnct`` parameter
The callable implementing the function field must have the following signature:
.. function:: fnct(model, cr, uid, ids, field_name(s), arg, context)
Implements the function field.
:param orm model: model to which the field belongs (should be ``self`` for
a model method)
:param field_name(s): name of the field to compute, or if ``multi`` is provided,
list of field names to compute.
:type field_name(s): str | [str]
:param arg: arbitrary value passed when declaring the function field
:rtype: dict
:return: mapping of ``ids`` to computed values, or if multi is provided,
to a map of field_names to computed values
The values in the returned dictionary must be of the type specified by the type
argument in the field declaration.
Here is an example with a simple function ``char`` function field::
# declarations
def compute(self, cr, uid, ids, field_name, arg, context):
result = {}
# ...
return result
_columns['my_char'] = fields.function(compute, type='char', size=50)
# when called with ``ids=[1,2,3]``, ``compute`` could return:
{
1: 'foo',
2: 'bar',
3: False # null values should be returned explicitly too
}
If ``multi`` is set, then ``field_name`` is replaced by ``field_names``: a list
of the field names that should be computed. Each value in the returned
dictionary must then be a dictionary mapping field names to values.
Here is an example where two function fields (``name`` and ``age``)
are both computed by a single function field::
# declarations
def compute(self, cr, uid, ids, field_names, arg, context):
result = {}
# ...
return result
_columns['name'] = fields.function(compute_person_data, type='char',\
size=50, multi='person_data')
_columns[''age'] = fields.function(compute_person_data, type='integer',\
multi='person_data')
# when called with ``ids=[1,2,3]``, ``compute_person_data`` could return:
{
1: {'name': 'Bob', 'age': 23},
2: {'name': 'Sally', 'age': 19},
3: {'name': 'unknown', 'age': False}
}
.. _field-function-fnct-inv:
.. rubric:: The ``fnct_inv`` parameter
This callable implements the write operation for the function field
and must have the following signature:
.. function:: fnct_inv(model, cr, uid, id, field_name, field_value, fnct_inv_arg, context)
Callable that implements the ``write`` operation for the function field.
:param orm model: model to which the field belongs (should be ``self`` for
a model method)
:param int id: the identifier of the object to write on
:param str field_name: name of the field to set
:param fnct_inv_arg: arbitrary value passed when declaring the function field
:return: True
When writing values for a function field, the ``multi`` parameter is ignored.
.. _field-function-fnct-search:
.. rubric:: The ``fnct_search`` parameter
This callable implements the search operation for the function field
and must have the following signature:
.. function:: fnct_search(model, cr, uid, model_again, field_name, criterion, context)
Callable that implements the ``search`` operation for the function field by expanding
a search criterion based on the function field into a new domain based only on
columns that are stored in the database.
:param orm model: model to which the field belongs (should be ``self`` for
a model method)
:param orm model_again: same value as ``model`` (seriously! this is for backwards
compatibility)
:param str field_name: name of the field to search on
:param list criterion: domain component specifying the search criterion on the field.
:rtype: list
:return: domain to use instead of ``criterion`` when performing the search.
This new domain must be based only on columns stored in the database, as it
will be used directly without any translation.
The returned value must be a domain, that is, a list of the form [(field_name, operator, operand)].
The most generic way to implement ``fnct_search`` is to directly search for the records that
match the given ``criterion``, and return their ``ids`` wrapped in a domain, such as
``[('id','in',[1,3,5])]``.
.. _field-function-store:
.. rubric:: The ``store`` parameter
The ``store`` parameter allows caching the result of the field computation in the
database, and defining the triggers that will invalidate that cache and force a
recomputation of the function field.
When not provided, the field is computed every time its value is read.
The value of ``store`` may be either ``True`` (to recompute the field value whenever
any field in the same record is modified), or a dictionary specifying a more
flexible set of recomputation triggers.
A trigger specification is a dictionary that maps the names of the models that
will trigger the computation, to a tuple describing the trigger rule, in the
following form::
store = {
'trigger_model': (mapping_function,
['trigger_field1', 'trigger_field2'],
priority),
}
A trigger rule is defined by a 3-item tuple where:
* The ``mapping_function`` is defined as follows:
.. function:: mapping_function(trigger_model, cr, uid, trigger_ids, context)
Callable that maps record ids of a trigger model to ids of the
corresponding records in the source model (whose field values
need to be recomputed).
:param orm model: trigger_model
:param list trigger_ids: ids of the records of trigger_model that were
modified
:rtype: list
:return: list of ids of the source model whose function field values
need to be recomputed
* The second item is a list of the fields who should act as triggers for
the computation. If an empty list is given, all fields will act as triggers.
* The last item is the priority, used to order the triggers when processing them
after any write operation on a model that has function field triggers. The
default priority is 10.
In fact, setting store = True is the same as using the following trigger dict::
store = {
'model_itself': (lambda self, cr, uid, ids, context: ids,
[],
10)
}
"""
_classic_read = False
_classic_write = False
_prefetch = False
_type = 'function'
_properties = True
# function fields are not copied by default
copy = False
#
# multi: compute several fields in one call
#
def __init__(self, fnct, arg=None, fnct_inv=None, fnct_inv_arg=None, type='float', fnct_search=None, obj=None, store=False, multi=False, **args):
_column.__init__(self, **args)
self._obj = obj
self._fnct = fnct
self._fnct_inv = fnct_inv
self._arg = arg
self._multi = multi
if 'relation' in args:
self._obj = args['relation']
self.digits = args.get('digits', (16,2))
self.digits_compute = args.get('digits_compute', None)
if callable(args.get('selection')):
from openerp import api
self.selection = api.expected(api.cr_uid_context, args['selection'])
self._fnct_inv_arg = fnct_inv_arg
if not fnct_inv:
self.readonly = 1
self._type = type
self._fnct_search = fnct_search
self.store = store
if not fnct_search and not store:
self.selectable = False
if store:
if self._type != 'many2one':
# m2o fields need to return tuples with name_get, not just foreign keys
self._classic_read = True
self._classic_write = True
if type=='binary':
self._symbol_get=lambda x:x and str(x)
else:
self._prefetch = True
if type == 'char':
self._symbol_c = char._symbol_c
self._symbol_f = lambda x: _symbol_set_char(self, x)
self._symbol_set = (self._symbol_c, self._symbol_f)
else:
type_class = globals().get(type)
if type_class is not None:
self._symbol_c = type_class._symbol_c
self._symbol_f = type_class._symbol_f
self._symbol_set = type_class._symbol_set
def new(self, **args):
# HACK: function fields are tricky to recreate, simply return a copy
import copy
return copy.copy(self)
def to_field_args(self):
args = super(function, self).to_field_args()
args['store'] = bool(self.store)
if self._type in ('float',):
args['digits'] = self.digits_compute or self.digits
elif self._type in ('selection', 'reference'):
args['selection'] = self.selection
elif self._type in ('many2one', 'one2many', 'many2many'):
args['comodel_name'] = self._obj
return args
def digits_change(self, cr):
if self._type == 'float':
if self.digits_compute:
self.digits = self.digits_compute(cr)
if self.digits:
precision, scale = self.digits
self._symbol_set = ('%s', lambda x: float_repr(float_round(__builtin__.float(x or 0.0),
precision_digits=scale),
precision_digits=scale))
def search(self, cr, uid, obj, name, args, context=None):
if not self._fnct_search:
#CHECKME: should raise an exception
return []
return self._fnct_search(obj, cr, uid, obj, name, args, context=context)
def postprocess(self, cr, uid, obj, field, value=None, context=None):
return self._postprocess_batch(cr, uid, obj, field, {0: value}, context=context)[0]
def _postprocess_batch(self, cr, uid, obj, field, values, context=None):
if not values:
return values
if context is None:
context = {}
field_type = obj._columns[field]._type
new_values = dict(values)
if field_type == 'binary':
if context.get('bin_size'):
# client requests only the size of binary fields
for rid, value in values.iteritems():
if value:
new_values[rid] = get_nice_size(value)
elif not context.get('bin_raw'):
for rid, value in values.iteritems():
if value:
new_values[rid] = sanitize_binary_value(value)
return new_values
def get(self, cr, obj, ids, name, uid=False, context=None, values=None):
multi = self._multi
# if we already have a value, don't recompute it.
# This happen if case of stored many2one fields
if values and not multi and name in values[0]:
result = dict((v['id'], v[name]) for v in values)
elif values and multi and all(n in values[0] for n in name):
result = dict((v['id'], dict((n, v[n]) for n in name)) for v in values)
else:
result = self._fnct(obj, cr, uid, ids, name, self._arg, context)
if multi:
swap = {}
for rid, values in result.iteritems():
for f, v in values.iteritems():
if f not in name:
continue
swap.setdefault(f, {})[rid] = v
for field, values in swap.iteritems():
new_values = self._postprocess_batch(cr, uid, obj, field, values, context)
for rid, value in new_values.iteritems():
result[rid][field] = value
else:
result = self._postprocess_batch(cr, uid, obj, name, result, context)
return result
def set(self, cr, obj, id, name, value, user=None, context=None):
if not context:
context = {}
if self._fnct_inv:
self._fnct_inv(obj, cr, user, id, name, value, self._fnct_inv_arg, context)
@classmethod
def _as_display_name(cls, field, cr, uid, obj, value, context=None):
# Function fields are supposed to emulate a basic field type,
# so they can delegate to the basic type for record name rendering
return globals()[field._type]._as_display_name(field, cr, uid, obj, value, context=context)
# ---------------------------------------------------------
# Related fields
# ---------------------------------------------------------
class related(function):
"""Field that points to some data inside another field of the current record.
Example::
_columns = {
'foo_id': fields.many2one('my.foo', 'Foo'),
'bar': fields.related('foo_id', 'frol', type='char', string='Frol of Foo'),
}
"""
def _fnct_search(self, tobj, cr, uid, obj=None, name=None, domain=None, context=None):
# assume self._arg = ('foo', 'bar', 'baz')
# domain = [(name, op, val)] => search [('foo.bar.baz', op, val)]
field = '.'.join(self._arg)
return map(lambda x: (field, x[1], x[2]), domain)
def _fnct_write(self, obj, cr, uid, ids, field_name, values, args, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
for instance in obj.browse(cr, uid, ids, context=context):
# traverse all fields except the last one
for field in self.arg[:-1]:
instance = instance[field][:1]
if instance:
# write on the last field of the target record
instance.write({self.arg[-1]: values})
def _fnct_read(self, obj, cr, uid, ids, field_name, args, context=None):
res = {}
for record in obj.browse(cr, SUPERUSER_ID, ids, context=context):
value = record
# traverse all fields except the last one
for field in self.arg[:-1]:
value = value[field][:1]
# read the last field on the target record
res[record.id] = value[self.arg[-1]]
if self._type == 'many2one':
# res[id] is a recordset; convert it to (id, name) or False.
# Perform name_get as root, as seeing the name of a related object depends on
# access right of source document, not target, so user may not have access.
value_ids = list(set(value.id for value in res.itervalues() if value))
value_name = dict(obj.pool[self._obj].name_get(cr, SUPERUSER_ID, value_ids, context=context))
res = dict((id, bool(value) and (value.id, value_name[value.id])) for id, value in res.iteritems())
elif self._type in ('one2many', 'many2many'):
# res[id] is a recordset; convert it to a list of ids
res = dict((id, value.ids) for id, value in res.iteritems())
return res
def __init__(self, *arg, **args):
self.arg = arg
self._relations = []
super(related, self).__init__(self._fnct_read, arg, self._fnct_write, fnct_inv_arg=arg, fnct_search=self._fnct_search, **args)
if self.store is True:
# TODO: improve here to change self.store = {...} according to related objects
pass
class sparse(function):
def convert_value(self, obj, cr, uid, record, value, read_value, context=None):
"""
+ For a many2many field, a list of tuples is expected.
Here is the list of tuple that are accepted, with the corresponding semantics ::
(0, 0, { values }) link to a new record that needs to be created with the given values dictionary
(1, ID, { values }) update the linked record with id = ID (write *values* on it)
(2, ID) remove and delete the linked record with id = ID (calls unlink on ID, that will delete the object completely, and the link to it as well)
(3, ID) cut the link to the linked record with id = ID (delete the relationship between the two objects but does not delete the target object itself)
(4, ID) link to existing record with id = ID (adds a relationship)
(5) unlink all (like using (3,ID) for all linked records)
(6, 0, [IDs]) replace the list of linked IDs (like using (5) then (4,ID) for each ID in the list of IDs)
Example:
[(6, 0, [8, 5, 6, 4])] sets the many2many to ids [8, 5, 6, 4]
+ For a one2many field, a lits of tuples is expected.
Here is the list of tuple that are accepted, with the corresponding semantics ::
(0, 0, { values }) link to a new record that needs to be created with the given values dictionary
(1, ID, { values }) update the linked record with id = ID (write *values* on it)
(2, ID) remove and delete the linked record with id = ID (calls unlink on ID, that will delete the object completely, and the link to it as well)
Example:
[(0, 0, {'field_name':field_value_record1, ...}), (0, 0, {'field_name':field_value_record2, ...})]
"""
if self._type == 'many2many':
assert value[0][0] == 6, 'Unsupported m2m value for sparse field: %s' % value
return value[0][2]
elif self._type == 'one2many':
if not read_value:
read_value = []
relation_obj = obj.pool[self.relation]
for vals in value:
assert vals[0] in (0,1,2), 'Unsupported o2m value for sparse field: %s' % vals
if vals[0] == 0:
read_value.append(relation_obj.create(cr, uid, vals[2], context=context))
elif vals[0] == 1:
relation_obj.write(cr, uid, vals[1], vals[2], context=context)
elif vals[0] == 2:
relation_obj.unlink(cr, uid, vals[1], context=context)
read_value.remove(vals[1])
return read_value
return value
def _fnct_write(self,obj,cr, uid, ids, field_name, value, args, context=None):
if not type(ids) == list:
ids = [ids]
records = obj.browse(cr, uid, ids, context=context)
for record in records:
# grab serialized value as object - already deserialized
serialized = getattr(record, self.serialization_field)
if value is None:
# simply delete the key to unset it.
serialized.pop(field_name, None)
else:
serialized[field_name] = self.convert_value(obj, cr, uid, record, value, serialized.get(field_name), context=context)
obj.write(cr, uid, ids, {self.serialization_field: serialized}, context=context)
return True
def _fnct_read(self, obj, cr, uid, ids, field_names, args, context=None):
results = {}
records = obj.browse(cr, uid, ids, context=context)
for record in records:
# grab serialized value as object - already deserialized
serialized = getattr(record, self.serialization_field)
results[record.id] = {}
for field_name in field_names:
field_type = obj._columns[field_name]._type
value = serialized.get(field_name, False)
if field_type in ('one2many','many2many'):
value = value or []
if value:
# filter out deleted records as superuser
relation_obj = obj.pool[obj._columns[field_name].relation]
value = relation_obj.exists(cr, openerp.SUPERUSER_ID, value)
if type(value) in (int,long) and field_type == 'many2one':
relation_obj = obj.pool[obj._columns[field_name].relation]
# check for deleted record as superuser
if not relation_obj.exists(cr, openerp.SUPERUSER_ID, [value]):
value = False
results[record.id][field_name] = value
return results
def __init__(self, serialization_field, **kwargs):
self.serialization_field = serialization_field
super(sparse, self).__init__(self._fnct_read, fnct_inv=self._fnct_write, multi='__sparse_multi', **kwargs)
# ---------------------------------------------------------
# Dummy fields
# ---------------------------------------------------------
class dummy(function):
def _fnct_search(self, tobj, cr, uid, obj=None, name=None, domain=None, context=None):
return []
def _fnct_write(self, obj, cr, uid, ids, field_name, values, args, context=None):
return False
def _fnct_read(self, obj, cr, uid, ids, field_name, args, context=None):
return {}
def __init__(self, *arg, **args):
self.arg = arg
self._relations = []
super(dummy, self).__init__(self._fnct_read, arg, self._fnct_write, fnct_inv_arg=arg, fnct_search=self._fnct_search, **args)
# ---------------------------------------------------------
# Serialized fields
# ---------------------------------------------------------
class serialized(_column):
""" A field able to store an arbitrary python data structure.
Note: only plain components allowed.
"""
def _symbol_set_struct(val):
return simplejson.dumps(val)
def _symbol_get_struct(self, val):
return simplejson.loads(val or '{}')
_prefetch = False
_type = 'serialized'
_symbol_c = '%s'
_symbol_f = _symbol_set_struct
_symbol_set = (_symbol_c, _symbol_f)
_symbol_get = _symbol_get_struct
# TODO: review completly this class for speed improvement
class property(function):
def to_field_args(self):
args = super(property, self).to_field_args()
args['company_dependent'] = True
return args
def _fnct_search(self, tobj, cr, uid, obj, name, domain, context=None):
ir_property = obj.pool['ir.property']
result = []
for field, operator, value in domain:
result += ir_property.search_multi(cr, uid, name, tobj._name, operator, value, context=context)
return result
def _fnct_write(self, obj, cr, uid, id, prop_name, value, obj_dest, context=None):
ir_property = obj.pool['ir.property']
ir_property.set_multi(cr, uid, prop_name, obj._name, {id: value}, context=context)
return True
def _fnct_read(self, obj, cr, uid, ids, prop_names, obj_dest, context=None):
ir_property = obj.pool['ir.property']
res = {id: {} for id in ids}
for prop_name in prop_names:
column = obj._all_columns[prop_name].column
values = ir_property.get_multi(cr, uid, prop_name, obj._name, ids, context=context)
if column._type == 'many2one':
# name_get the non-null values as SUPERUSER_ID
vals = sum(set(filter(None, values.itervalues())),
obj.pool[column._obj].browse(cr, uid, [], context=context))
vals_name = dict(vals.sudo().name_get()) if vals else {}
for id, value in values.iteritems():
ng = False
if value and value.id in vals_name:
ng = value.id, vals_name[value.id]
res[id][prop_name] = ng
else:
for id, value in values.iteritems():
res[id][prop_name] = value
return res
def __init__(self, **args):
if 'view_load' in args:
_logger.warning("view_load attribute is deprecated on ir.fields. Args: %r", args)
args = dict(args)
args['obj'] = args.pop('relation', '') or args.get('obj', '')
super(property, self).__init__(
fnct=self._fnct_read,
fnct_inv=self._fnct_write,
fnct_search=self._fnct_search,
multi='properties',
**args
)
class column_info(object):
""" Struct containing details about an osv column, either one local to
its model, or one inherited via _inherits.
.. attribute:: name
name of the column
.. attribute:: column
column instance, subclass of :class:`_column`
.. attribute:: parent_model
if the column is inherited, name of the model that contains it,
``None`` for local columns.
.. attribute:: parent_column
the name of the column containing the m2o relationship to the
parent model that contains this column, ``None`` for local columns.
.. attribute:: original_parent
if the column is inherited, name of the original parent model that
contains it i.e in case of multilevel inheritance, ``None`` for
local columns.
"""
def __init__(self, name, column, parent_model=None, parent_column=None, original_parent=None):
self.name = name
self.column = column
self.parent_model = parent_model
self.parent_column = parent_column
self.original_parent = original_parent
def __str__(self):
return '%s(%s, %s, %s, %s, %s)' % (
self.__class__.__name__, self.name, self.column,
self.parent_model, self.parent_column, self.original_parent)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| nicobustillos/odoo | openerp/osv/fields.py | Python | agpl-3.0 | 70,819 |
from .document import Document, Track
templates = {
'Development': [
dict(name='Qwer', key='q', group='one'),
dict(name='Wert', key='w', group='two'),
dict(name='Erty', key='e', group='three'),
dict(name='Asdf', key='a', group='one'),
dict(name='Sdfg', key='s', group='two'),
dict(name='Dfgh', key='d', group='three'),
],
'Re-Organized Flies': [
#this is a new version of general flies that has been edited so that similar
# behaviours are now together. Walk Away (from Chen et al.) has been removed
#and Chase has been redefined to include being persued quickly or slowly. If
#necessary I will modify it to be 'fast chase' and 'slow chase' to be more
#descriptive of the actual behaviour being scored. Behaviours being done TO
#the focal individual have been added.
dict(name='approach', key='u', group=None),
dict(name='wing threat', key='o', group=None),
dict(name='defensive wing threat', key='r', group=None),
dict(name='wings erect', key='p', group=None),
dict(name='low-level fencing', key='i', group=None),
dict(name='high-level fencing', key='h', group=None),
dict(name='boxing', key='b', group=None),
dict(name='tussling', key='n', group=None),
dict(name='lunging', key='k', group=None),
dict(name='lunged at', key='f', group=None),
dict(name='holding', key='l', group=None),
dict(name='held', key='d', group=None),
dict(name='chasing', key='j', group=None),
dict(name='being chased', key='e', group=None),
dict(name='turn away', key='g', group=None),
dict(name='retreat', key='q', group=None),
dict(name='fly away', key='w', group=None),
dict(name='arrive (in camera view)', key='s', group=None),
dict(name='leave (camera view)', key='a', group=None),
],
'General Flies': [
dict(name='approach', key='u', group=None),
dict(name='low-level fencing', key='i', group=None),
dict(name='wing threat', key='o', group=None),
dict(name='high-level fencing', key='h', group=None),
dict(name='chasing', key='j', group=None),
dict(name='lunging', key='k', group=None),
dict(name='holding', key='l', group=None),
dict(name='boxing', key='b', group=None),
dict(name='tussling', key='n', group=None),
dict(name='walk away', key='m', group=None),
dict(name='wings erect', key='p', group=None),
dict(name='defensive wing threat', key='r', group=None),
dict(name='run away / being chased', key='e', group=None),
dict(name='fly away', key='w', group=None),
dict(name='turn away', key='g', group=None),
#dict(name='non-aggressive present', key='f', group=None), these haven't proven useful ~Tanya
#dict(name='non-aggressive absent', key='d', group=None), these haven't proven useful ~Tanya
dict(name='non-aggressive arrive', key='s', group=None),
dict(name='non-aggressive leave', key='a', group=None),
]
} | mikeboers/ScoreBee | scorebee/templates.py | Python | bsd-3-clause | 3,567 |
# This script is used to create data file (expected.txt)
# which is used to compare the output from TensorFlowSharp optimizer tests.
import tensorflow as tf
# Training data
train_x =[
3.3, 4.4, 5.5, 6.71, 6.93, 4.168, 9.779, 6.182, 7.59, 2.167,
7.042, 10.791, 5.313, 7.997, 5.654, 9.27, 3.1
]
train_y = [
1.7, 2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221,
2.827,3.465,1.65,2.904,2.42,2.94,1.3
]
n_samples = len(train_x)
learning_rate = 0.01
X = tf.placeholder(tf.float32)
Y = tf.placeholder(tf.float32)
W = tf.Variable(tf.constant(0.1), dtype=tf.float32)
b = tf.Variable(tf.constant(0.1), dtype=tf.float32)
pred = tf.add(tf.multiply(X,W), b)
cost = tf.divide(tf.reduce_sum(tf.pow(tf.subtract(pred, Y), 2.0)), tf.multiply(2.0, n_samples))
optimizer = tf.train.MomentumOptimizer(learning_rate, 0.9).minimize(cost, name = "MomentumOptimizer")
init = tf.global_variables_initializer()
with tf.Session() as session:
session.run(init)
for e in range(2):
for i in range(n_samples):
_, cost_v, W_v, b_v, pred_v = session.run([optimizer, cost, W, b, pred], feed_dict = {X: train_x[i], Y: train_y[i]})
print(f"loss: {cost_v:.4f}, W: {W_v:.4f}, b: {b_v:.4f}")
#print("Prediction: %f == Actual: %f" % (pred_v, train_y[i])) | migueldeicaza/TensorFlowSharp | tests/TensorFlowSharp.Tests.CSharp/TestData/Momentum/optimizer_lr_test.py | Python | mit | 1,293 |
#!/usr/bin/env python
# pylint: disable=missing-docstring
# flake8: noqa: T001
# ___ ___ _ _ ___ ___ _ _____ ___ ___
# / __| __| \| | __| _ \ /_\_ _| __| \
# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
# | |) | (_) | | .` | (_) || | | _|| |) | | | |
# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
#
# Copyright 2016 Red Hat, Inc. and/or its affiliates
# and other contributors as indicated by the @author tags.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*-
'''
OpenShiftCLI class that wraps the oc commands in a subprocess
'''
# pylint: disable=too-many-lines
from __future__ import print_function
import atexit
import copy
import json
import os
import re
import shutil
import subprocess
import tempfile
# pylint: disable=import-error
try:
import ruamel.yaml as yaml
except ImportError:
import yaml
from ansible.module_utils.basic import AnsibleModule
# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: doc/volume -*- -*- -*-
DOCUMENTATION = '''
---
module: oc_volume
short_description: Create, modify, and idempotently manage openshift volumes.
description:
- Modify openshift volumes programmatically.
options:
state:
description:
- State controls the action that will be taken with resource
- 'present' will create or update and object to the desired state
- 'absent' will ensure volumes are removed
- 'list' will read the volumes
default: present
choices: ["present", "absent", "list"]
aliases: []
kubeconfig:
description:
- The path for the kubeconfig file to use for authentication
required: false
default: /etc/origin/master/admin.kubeconfig
aliases: []
debug:
description:
- Turn on debug output.
required: false
default: False
aliases: []
name:
description:
- Name of the object that is being queried.
required: false
default: None
aliases: []
vol_name:
description:
- Name of the volume that is being queried.
required: false
default: None
aliases: []
namespace:
description:
- The name of the namespace where the object lives
required: false
default: default
aliases: []
kind:
description:
- The kind of object that can be managed.
default: dc
choices:
- dc
- rc
- pods
aliases: []
mount_type:
description:
- The type of volume to be used
required: false
default: None
choices:
- emptydir
- hostpath
- secret
- pvc
- configmap
aliases: []
mount_path:
description:
- The path to where the mount will be attached
required: false
default: None
aliases: []
secret_name:
description:
- The name of the secret. Used when mount_type is secret.
required: false
default: None
aliases: []
claim_size:
description:
- The size in GB of the pv claim. e.g. 100G
required: false
default: None
aliases: []
claim_name:
description:
- The name of the pv claim
required: false
default: None
aliases: []
configmap_name:
description:
- The name of the configmap
required: false
default: None
aliases: []
author:
- "Kenny Woodson <kwoodson@redhat.com>"
extends_documentation_fragment: []
'''
EXAMPLES = '''
- name: attach storage volumes to deploymentconfig
oc_volume:
namespace: logging
kind: dc
name: name_of_the_dc
mount_type: pvc
claim_name: loggingclaim
claim_size: 100G
vol_name: logging-storage
run_once: true
'''
# -*- -*- -*- End included fragment: doc/volume -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
def __init__(self,
filename=None,
content=None,
content_type='yaml',
separator='.',
backup=False):
self.content = content
self._separator = separator
self.filename = filename
self.__yaml_dict = content
self.content_type = content_type
self.backup = backup
self.load(content_type=self.content_type)
if self.__yaml_dict is None:
self.__yaml_dict = {}
@property
def separator(self):
''' getter method for separator '''
return self._separator
@separator.setter
def separator(self, inc_sep):
''' setter method for separator '''
self._separator = inc_sep
@property
def yaml_dict(self):
''' getter method for yaml_dict '''
return self.__yaml_dict
@yaml_dict.setter
def yaml_dict(self, value):
''' setter method for yaml_dict '''
self.__yaml_dict = value
@staticmethod
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
return re.findall(Yedit.re_key.format(''.join(common_separators)), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key):
return False
return True
@staticmethod
def remove_entry(data, key, sep='.'):
''' remove data at location key '''
if key == '' and isinstance(data, dict):
data.clear()
return True
elif key == '' and isinstance(data, list):
del data[:]
return True
if not (key and Yedit.valid_key(key, sep)) and \
isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
# process last index for remove
# expected list entry
if key_indexes[-1][0]:
if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
del data[int(key_indexes[-1][0])]
return True
# expected dict entry
elif key_indexes[-1][1]:
if isinstance(data, dict):
del data[key_indexes[-1][1]]
return True
@staticmethod
def add_entry(data, key, item=None, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a#b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key:
if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
data = data[dict_key]
continue
elif data and not isinstance(data, dict):
raise YeditException("Unexpected item type found while going through key " +
"path: {} (at key: {})".format(key, dict_key))
data[dict_key] = {}
data = data[dict_key]
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
raise YeditException("Unexpected item type found while going through key path: {}".format(key))
if key == '':
data = item
# process last index for add
# expected list entry
elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
data[int(key_indexes[-1][0])] = item
# expected dict entry
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
# didn't add/update to an existing list, nor add/update key to a dict
# so we must have been provided some syntax like a.b.c[<int>] = "data" for a
# non-existent array
else:
raise YeditException("Error adding to object at path: {}".format(key))
return data
@staticmethod
def get_entry(data, key, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a.b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
return data
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
tmp_filename = filename + '.yedit'
with open(tmp_filename, 'w') as yfd:
yfd.write(contents)
os.rename(tmp_filename, filename)
def write(self):
''' write to file '''
if not self.filename:
raise YeditException('Please specify a filename.')
if self.backup and self.file_exists():
shutil.copy(self.filename, self.filename + '.orig')
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripDumper if supported.
try:
Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
except AttributeError:
Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))
return (True, self.yaml_dict)
def read(self):
''' read from file '''
# check if it exists
if self.filename is None or not self.file_exists():
return None
contents = None
with open(self.filename) as yfd:
contents = yfd.read()
return contents
def file_exists(self):
''' return whether file exists '''
if os.path.exists(self.filename):
return True
return False
def load(self, content_type='yaml'):
''' return yaml file '''
contents = self.read()
if not contents and not self.content:
return None
if self.content:
if isinstance(self.content, dict):
self.yaml_dict = self.content
return self.yaml_dict
elif isinstance(self.content, str):
contents = self.content
# check if it is yaml
try:
if content_type == 'yaml' and contents:
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripLoader if supported.
try:
self.yaml_dict = yaml.safe_load(contents, yaml.RoundTripLoader)
except AttributeError:
self.yaml_dict = yaml.safe_load(contents)
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
elif content_type == 'json' and contents:
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
raise YeditException('Problem with loading yaml file. {}'.format(err))
return self.yaml_dict
def get(self, key):
''' get a specified key'''
try:
entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
except KeyError:
entry = None
return entry
def pop(self, path, key_or_item):
''' remove a key, value pair from a dict or an item for a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if key_or_item in entry:
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
except ValueError:
return (False, self.yaml_dict)
entry.pop(ind)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
def delete(self, path):
''' remove path from a dict'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
if not result:
return (False, self.yaml_dict)
return (True, self.yaml_dict)
def exists(self, path, value):
''' check if value exists at path'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, list):
if value in entry:
return True
return False
elif isinstance(entry, dict):
if isinstance(value, dict):
rval = False
for key, val in value.items():
if entry[key] != val:
rval = False
break
else:
rval = True
return rval
return value in entry
return entry == value
def append(self, path, value):
'''append value to a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
self.put(path, [])
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
if not isinstance(entry, list):
return (False, self.yaml_dict)
# AUDIT:maybe-no-member makes sense due to loading data from
# a serialized format.
# pylint: disable=maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
# pylint: disable=too-many-arguments
def update(self, path, value, index=None, curr_value=None):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' +
'value=[{}] type=[{}]'.format(value, type(value)))
entry.update(value)
return (True, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
if curr_value:
try:
ind = entry.index(curr_value)
except ValueError:
return (False, self.yaml_dict)
elif index is not None:
ind = index
if ind is not None and entry[ind] != value:
entry[ind] = value
return (True, self.yaml_dict)
# see if it exists in the list
try:
ind = entry.index(value)
except ValueError:
# doesn't exist, append it
entry.append(value)
return (True, self.yaml_dict)
# already exists, return
if ind is not None:
return (False, self.yaml_dict)
return (False, self.yaml_dict)
def put(self, path, value):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry == value:
return (False, self.yaml_dict)
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result is None:
return (False, self.yaml_dict)
# When path equals "" it is a special case.
# "" refers to the root of the document
# Only update the root path (entire document) when its a list or dict
if path == '':
if isinstance(result, list) or isinstance(result, dict):
self.yaml_dict = result
return (True, self.yaml_dict)
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
def create(self, path, value):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result is not None:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
return (False, self.yaml_dict)
@staticmethod
def get_curr_value(invalue, val_type):
'''return the current value'''
if invalue is None:
return None
curr_value = invalue
if val_type == 'yaml':
curr_value = yaml.load(invalue)
elif val_type == 'json':
curr_value = json.loads(invalue)
return curr_value
@staticmethod
def parse_value(inc_value, vtype=''):
'''determine value type passed'''
true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
'on', 'On', 'ON', ]
false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
'off', 'Off', 'OFF']
# It came in as a string but you didn't specify value_type as string
# we will convert to bool if it matches any of the above cases
if isinstance(inc_value, str) and 'bool' in vtype:
if inc_value not in true_bools and inc_value not in false_bools:
raise YeditException('Not a boolean type. str=[{}] vtype=[{}]'.format(inc_value, vtype))
elif isinstance(inc_value, bool) and 'str' in vtype:
inc_value = str(inc_value)
# There is a special case where '' will turn into None after yaml loading it so skip
if isinstance(inc_value, str) and inc_value == '':
pass
# If vtype is not str then go ahead and attempt to yaml load it.
elif isinstance(inc_value, str) and 'str' not in vtype:
try:
inc_value = yaml.safe_load(inc_value)
except Exception:
raise YeditException('Could not determine type of incoming value. ' +
'value=[{}] vtype=[{}]'.format(type(inc_value), vtype))
return inc_value
@staticmethod
def process_edits(edits, yamlfile):
'''run through a list of edits and process them one-by-one'''
results = []
for edit in edits:
value = Yedit.parse_value(edit['value'], edit.get('value_type', ''))
if edit.get('action') == 'update':
# pylint: disable=line-too-long
curr_value = Yedit.get_curr_value(
Yedit.parse_value(edit.get('curr_value')),
edit.get('curr_value_format'))
rval = yamlfile.update(edit['key'],
value,
edit.get('index'),
curr_value)
elif edit.get('action') == 'append':
rval = yamlfile.append(edit['key'], value)
else:
rval = yamlfile.put(edit['key'], value)
if rval[0]:
results.append({'key': edit['key'], 'edit': rval[1]})
return {'changed': len(results) > 0, 'results': results}
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
def run_ansible(params):
'''perform the idempotent crud operations'''
yamlfile = Yedit(filename=params['src'],
backup=params['backup'],
separator=params['separator'])
state = params['state']
if params['src']:
rval = yamlfile.load()
if yamlfile.yaml_dict is None and state != 'present':
return {'failed': True,
'msg': 'Error opening file [{}]. Verify that the '.format(params['src']) +
'file exists, that it is has correct permissions, and is valid yaml.'}
if state == 'list':
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
if params['key']:
rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
elif state == 'absent':
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
if params['update']:
rval = yamlfile.pop(params['key'], params['value'])
else:
rval = yamlfile.delete(params['key'])
if rval[0] and params['src']:
yamlfile.write()
return {'changed': rval[0], 'result': rval[1], 'state': state}
elif state == 'present':
# check if content is different than what is in the file
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
# We had no edits to make and the contents are the same
if yamlfile.yaml_dict == content and \
params['value'] is None:
return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
yamlfile.yaml_dict = content
# If we were passed a key, value then
# we enapsulate it in a list and process it
# Key, Value passed to the module : Converted to Edits list #
edits = []
_edit = {}
if params['value'] is not None:
_edit['value'] = params['value']
_edit['value_type'] = params['value_type']
_edit['key'] = params['key']
if params['update']:
_edit['action'] = 'update'
_edit['curr_value'] = params['curr_value']
_edit['curr_value_format'] = params['curr_value_format']
_edit['index'] = params['index']
elif params['append']:
_edit['action'] = 'append'
edits.append(_edit)
elif params['edits'] is not None:
edits = params['edits']
if edits:
results = Yedit.process_edits(edits, yamlfile)
# if there were changes and a src provided to us we need to write
if results['changed'] and params['src']:
yamlfile.write()
return {'changed': results['changed'], 'result': results['results'], 'state': state}
# no edits to make
if params['src']:
# pylint: disable=redefined-variable-type
rval = yamlfile.write()
return {'changed': rval[0],
'result': rval[1],
'state': state}
# We were passed content but no src, key or value, or edits. Return contents in memory
return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
return {'failed': True, 'msg': 'Unkown state passed'}
# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*-
# pylint: disable=too-many-lines
# noqa: E301,E302,E303,T001
class OpenShiftCLIError(Exception):
'''Exception class for openshiftcli'''
pass
ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
def locate_oc_binary():
''' Find and return oc binary file '''
# https://github.com/openshift/openshift-ansible/issues/3410
# oc can be in /usr/local/bin in some cases, but that may not
# be in $PATH due to ansible/sudo
paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
oc_binary = 'oc'
# Use shutil.which if it is available, otherwise fallback to a naive path search
try:
which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
if which_result is not None:
oc_binary = which_result
except AttributeError:
for path in paths:
if os.path.exists(os.path.join(path, oc_binary)):
oc_binary = os.path.join(path, oc_binary)
break
return oc_binary
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
def __init__(self,
namespace,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False,
all_namespaces=False):
''' Constructor for OpenshiftCLI '''
self.namespace = namespace
self.verbose = verbose
self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig)
self.all_namespaces = all_namespaces
self.oc_binary = locate_oc_binary()
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
def _replace_content(self, resource, rname, content, force=False, sep='.'):
''' replace the current object with the content '''
res = self._get(resource, rname)
if not res['results']:
return res
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, res['results'][0], separator=sep)
changes = []
for key, value in content.items():
changes.append(yed.put(key, value))
if any([change[0] for change in changes]):
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._replace(fname, force)
return {'returncode': 0, 'updated': False}
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
# We are removing the 'resourceVersion' to handle
# a race condition when modifying oc objects
yed = Yedit(fname)
results = yed.delete('metadata.resourceVersion')
if results[0]:
yed.write()
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
return self.openshift_cmd(cmd)
def _create_from_content(self, rname, content):
'''create a temporary file and then call oc create on it'''
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, content=content)
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._create(fname)
def _create(self, fname):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
cmd = ['delete', resource]
if selector is not None:
cmd.append('--selector={}'.format(selector))
elif name is not None:
cmd.append(name)
else:
raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
'''process a template
template_name: the name of the template to process
create: whether to send to oc create after processing
params: the parameters for the template
template_data: the incoming template's data; instead of a file
'''
cmd = ['process']
if template_data:
cmd.extend(['-f', '-'])
else:
cmd.append(template_name)
if params:
param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
results = self.openshift_cmd(cmd, output=True, input_data=template_data)
if results['returncode'] != 0 or not create:
return results
fname = Utils.create_tmpfile(template_name + '-')
yed = Yedit(fname, results['results'])
yed.write()
atexit.register(Utils.cleanup, [fname])
return self.openshift_cmd(['create', '-f', fname])
def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
if selector is not None:
cmd.append('--selector={}'.format(selector))
elif name is not None:
cmd.append(name)
cmd.extend(['-o', 'json'])
rval = self.openshift_cmd(cmd, output=True)
# Ensure results are retuned in an array
if 'items' in rval:
rval['results'] = rval['items']
elif not isinstance(rval['results'], list):
rval['results'] = [rval['results']]
return rval
def _schedulable(self, node=None, selector=None, schedulable=True):
''' perform oadm manage-node scheduable '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
def _list_pods(self, node=None, selector=None, pod_selector=None):
''' perform oadm list pods
node: the node in which to list pods
selector: the label selector filter if provided
pod_selector: the pod selector filter if provided
'''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
if pod_selector:
cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
# pylint: disable=too-many-arguments
def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
cmd.append('--evacuate')
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _version(self):
''' return the openshift version'''
return self.openshift_cmd(['version'], output=True, output_type='raw')
def _import_image(self, url=None, name=None, tag=None):
''' perform image import '''
cmd = ['import-image']
image = '{0}'.format(name)
if tag:
image += ':{0}'.format(tag)
cmd.append(image)
if url:
cmd.append('--from={0}/{1}'.format(url, image))
cmd.append('-n{0}'.format(self.namespace))
cmd.append('--confirm')
return self.openshift_cmd(cmd)
def _run(self, cmds, input_data):
''' Actually executes the command. This makes mocking easier. '''
curr_env = os.environ.copy()
curr_env.update({'KUBECONFIG': self.kubeconfig})
proc = subprocess.Popen(cmds,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=curr_env)
stdout, stderr = proc.communicate(input_data)
return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
cmds = [self.oc_binary]
if oadm:
cmds.append('adm')
cmds.extend(cmd)
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
if self.verbose:
print(' '.join(cmds))
try:
returncode, stdout, stderr = self._run(cmds, input_data)
except OSError as ex:
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
"cmd": ' '.join(cmds)}
if output_type == 'json':
rval['results'] = {}
if output and stdout:
try:
rval['results'] = json.loads(stdout)
except ValueError as verr:
if "No JSON object could be decoded" in verr.args:
rval['err'] = verr.args
elif output_type == 'raw':
rval['results'] = stdout if output else ''
if self.verbose:
print("STDOUT: {0}".format(stdout))
print("STDERR: {0}".format(stderr))
if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
"stdout": stdout})
return rval
class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
sfd.write(str(contents))
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
''' create a file in tmp with name and contents'''
tmp = Utils.create_tmpfile(prefix=rname)
if ftype == 'yaml':
# AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripDumper'):
Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
else:
Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False))
elif ftype == 'json':
Utils._write(tmp, json.dumps(data))
else:
Utils._write(tmp, data)
# Register cleanup when module is done
atexit.register(Utils.cleanup, [tmp])
return tmp
@staticmethod
def create_tmpfile_copy(inc_file):
'''create a temporary copy of a file'''
tmpfile = Utils.create_tmpfile('lib_openshift-')
Utils._write(tmpfile, open(inc_file).read())
# Cleanup the tmpfile
atexit.register(Utils.cleanup, [tmpfile])
return tmpfile
@staticmethod
def create_tmpfile(prefix='tmp'):
''' Generates and returns a temporary file name '''
with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp:
return tmp.name
@staticmethod
def create_tmp_files_from_contents(content, content_type=None):
'''Turn an array of dict: filename, content into a files array'''
if not isinstance(content, list):
content = [content]
files = []
for item in content:
path = Utils.create_tmp_file_from_contents(item['path'] + '-',
item['data'],
ftype=content_type)
files.append({'name': os.path.basename(item['path']),
'path': path})
return files
@staticmethod
def cleanup(files):
'''Clean up on exit '''
for sfile in files:
if os.path.exists(sfile):
if os.path.isdir(sfile):
shutil.rmtree(sfile)
elif os.path.isfile(sfile):
os.remove(sfile)
@staticmethod
def exists(results, _name):
''' Check to see if the results include the name '''
if not results:
return False
if Utils.find_result(results, _name):
return True
return False
@staticmethod
def find_result(results, _name):
''' Find the specified result by name'''
rval = None
for result in results:
if 'metadata' in result and result['metadata']['name'] == _name:
rval = result
break
return rval
@staticmethod
def get_resource_file(sfile, sfile_type='yaml'):
''' return the service file '''
contents = None
with open(sfile) as sfd:
contents = sfd.read()
if sfile_type == 'yaml':
# AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripLoader'):
contents = yaml.load(contents, yaml.RoundTripLoader)
else:
contents = yaml.safe_load(contents)
elif sfile_type == 'json':
contents = json.loads(contents)
return contents
@staticmethod
def filter_versions(stdout):
''' filter the oc version output '''
version_dict = {}
version_search = ['oc', 'openshift', 'kubernetes']
for line in stdout.strip().split('\n'):
for term in version_search:
if not line:
continue
if line.startswith(term):
version_dict[term] = line.split()[-1]
# horrible hack to get openshift version in Openshift 3.2
# By default "oc version in 3.2 does not return an "openshift" version
if "openshift" not in version_dict:
version_dict["openshift"] = version_dict["oc"]
return version_dict
@staticmethod
def add_custom_versions(versions):
''' create custom versions strings '''
versions_dict = {}
for tech, version in versions.items():
# clean up "-" from version
if "-" in version:
version = version.split("-")[0]
if version.startswith('v'):
version = version[1:] # Remove the 'v' prefix
versions_dict[tech + '_numeric'] = version.split('+')[0]
# "3.3.0.33" is what we have, we want "3.3"
versions_dict[tech + '_short'] = "{}.{}".format(*version.split('.'))
return versions_dict
@staticmethod
def openshift_installed():
''' check if openshift is installed '''
import rpm
transaction_set = rpm.TransactionSet()
rpmquery = transaction_set.dbMatch("name", "atomic-openshift")
return rpmquery.count() > 0
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
@staticmethod
def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
''' Given a user defined definition, compare it with the results given back by our query. '''
# Currently these values are autogenerated and we do not need to check them
skip = ['metadata', 'status']
if skip_keys:
skip.extend(skip_keys)
for key, value in result_def.items():
if key in skip:
continue
# Both are lists
if isinstance(value, list):
if key not in user_def:
if debug:
print('User data does not have key [%s]' % key)
print('User data: %s' % user_def)
return False
if not isinstance(user_def[key], list):
if debug:
print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
return False
if len(user_def[key]) != len(value):
if debug:
print("List lengths are not equal.")
print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
print("user_def: %s" % user_def[key])
print("value: %s" % value)
return False
for values in zip(user_def[key], value):
if isinstance(values[0], dict) and isinstance(values[1], dict):
if debug:
print('sending list - list')
print(type(values[0]))
print(type(values[1]))
result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
if not result:
print('list compare returned false')
return False
elif value != user_def[key]:
if debug:
print('value should be identical')
print(user_def[key])
print(value)
return False
# recurse on a dictionary
elif isinstance(value, dict):
if key not in user_def:
if debug:
print("user_def does not have key [%s]" % key)
return False
if not isinstance(user_def[key], dict):
if debug:
print("dict returned false: not instance of dict")
return False
# before passing ensure keys match
api_values = set(value.keys()) - set(skip)
user_values = set(user_def[key].keys()) - set(skip)
if api_values != user_values:
if debug:
print("keys are not equal in dict")
print(user_values)
print(api_values)
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
if not result:
if debug:
print("dict returned false")
print(result)
return False
# Verify each key, value pair is the same
else:
if key not in user_def or value != user_def[key]:
if debug:
print("value not equal; user_def does not have key")
print(key)
print(value)
if key in user_def:
print(user_def[key])
return False
if debug:
print('returning true')
return True
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
self.kubeconfig = kubeconfig
self.name = rname
self.namespace = namespace
self._options = options
@property
def config_options(self):
''' return config options '''
return self._options
def to_option_list(self, ascommalist=''):
'''return all options as a string
if ascommalist is set to the name of a key, and
the value of that key is a dict, format the dict
as a list of comma delimited key=value pairs'''
return self.stringify(ascommalist)
def stringify(self, ascommalist=''):
''' return the options hash as cli params in a string
if ascommalist is set to the name of a key, and
the value of that key is a dict, format the dict
as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] is not None or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
val = data['value']
rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/deploymentconfig.py -*- -*- -*-
# pylint: disable=too-many-public-methods
class DeploymentConfig(Yedit):
''' Class to model an openshift DeploymentConfig'''
default_deployment_config = '''
apiVersion: v1
kind: DeploymentConfig
metadata:
name: default_dc
namespace: default
spec:
replicas: 0
selector:
default_dc: default_dc
strategy:
resources: {}
rollingParams:
intervalSeconds: 1
maxSurge: 0
maxUnavailable: 25%
timeoutSeconds: 600
updatePercent: -25
updatePeriodSeconds: 1
type: Rolling
template:
metadata:
spec:
containers:
- env:
- name: default
value: default
image: default
imagePullPolicy: IfNotPresent
name: default_dc
ports:
- containerPort: 8000
hostPort: 8000
protocol: TCP
name: default_port
resources: {}
terminationMessagePath: /dev/termination-log
dnsPolicy: ClusterFirst
hostNetwork: true
nodeSelector:
type: compute
restartPolicy: Always
securityContext: {}
serviceAccount: default
serviceAccountName: default
terminationGracePeriodSeconds: 30
triggers:
- type: ConfigChange
'''
replicas_path = "spec.replicas"
env_path = "spec.template.spec.containers[0].env"
volumes_path = "spec.template.spec.volumes"
container_path = "spec.template.spec.containers"
volume_mounts_path = "spec.template.spec.containers[0].volumeMounts"
def __init__(self, content=None):
''' Constructor for deploymentconfig '''
if not content:
content = DeploymentConfig.default_deployment_config
super(DeploymentConfig, self).__init__(content=content)
def add_env_value(self, key, value):
''' add key, value pair to env array '''
rval = False
env = self.get_env_vars()
if env:
env.append({'name': key, 'value': value})
rval = True
else:
result = self.put(DeploymentConfig.env_path, {'name': key, 'value': value})
rval = result[0]
return rval
def exists_env_value(self, key, value):
''' return whether a key, value pair exists '''
results = self.get_env_vars()
if not results:
return False
for result in results:
if result['name'] == key and result['value'] == value:
return True
return False
def exists_env_key(self, key):
''' return whether a key, value pair exists '''
results = self.get_env_vars()
if not results:
return False
for result in results:
if result['name'] == key:
return True
return False
def get_env_var(self, key):
'''return a environment variables '''
results = self.get(DeploymentConfig.env_path) or []
if not results:
return None
for env_var in results:
if env_var['name'] == key:
return env_var
return None
def get_env_vars(self):
'''return a environment variables '''
return self.get(DeploymentConfig.env_path) or []
def delete_env_var(self, keys):
'''delete a list of keys '''
if not isinstance(keys, list):
keys = [keys]
env_vars_array = self.get_env_vars()
modified = False
idx = None
for key in keys:
for env_idx, env_var in enumerate(env_vars_array):
if env_var['name'] == key:
idx = env_idx
break
if idx:
modified = True
del env_vars_array[idx]
if modified:
return True
return False
def update_env_var(self, key, value):
'''place an env in the env var list'''
env_vars_array = self.get_env_vars()
idx = None
for env_idx, env_var in enumerate(env_vars_array):
if env_var['name'] == key:
idx = env_idx
break
if idx:
env_vars_array[idx]['value'] = value
else:
self.add_env_value(key, value)
return True
def exists_volume_mount(self, volume_mount):
''' return whether a volume mount exists '''
exist_volume_mounts = self.get_volume_mounts()
if not exist_volume_mounts:
return False
volume_mount_found = False
for exist_volume_mount in exist_volume_mounts:
if exist_volume_mount['name'] == volume_mount['name']:
volume_mount_found = True
break
return volume_mount_found
def exists_volume(self, volume):
''' return whether a volume exists '''
exist_volumes = self.get_volumes()
volume_found = False
for exist_volume in exist_volumes:
if exist_volume['name'] == volume['name']:
volume_found = True
break
return volume_found
def find_volume_by_name(self, volume, mounts=False):
''' return the index of a volume '''
volumes = []
if mounts:
volumes = self.get_volume_mounts()
else:
volumes = self.get_volumes()
for exist_volume in volumes:
if exist_volume['name'] == volume['name']:
return exist_volume
return None
def get_replicas(self):
''' return replicas setting '''
return self.get(DeploymentConfig.replicas_path)
def get_volume_mounts(self):
'''return volume mount information '''
return self.get_volumes(mounts=True)
def get_volumes(self, mounts=False):
'''return volume mount information '''
if mounts:
return self.get(DeploymentConfig.volume_mounts_path) or []
return self.get(DeploymentConfig.volumes_path) or []
def delete_volume_by_name(self, volume):
'''delete a volume '''
modified = False
exist_volume_mounts = self.get_volume_mounts()
exist_volumes = self.get_volumes()
del_idx = None
for idx, exist_volume in enumerate(exist_volumes):
if 'name' in exist_volume and exist_volume['name'] == volume['name']:
del_idx = idx
break
if del_idx != None:
del exist_volumes[del_idx]
modified = True
del_idx = None
for idx, exist_volume_mount in enumerate(exist_volume_mounts):
if 'name' in exist_volume_mount and exist_volume_mount['name'] == volume['name']:
del_idx = idx
break
if del_idx != None:
del exist_volume_mounts[idx]
modified = True
return modified
def add_volume_mount(self, volume_mount):
''' add a volume or volume mount to the proper location '''
exist_volume_mounts = self.get_volume_mounts()
if not exist_volume_mounts and volume_mount:
self.put(DeploymentConfig.volume_mounts_path, [volume_mount])
else:
exist_volume_mounts.append(volume_mount)
def add_volume(self, volume):
''' add a volume or volume mount to the proper location '''
exist_volumes = self.get_volumes()
if not volume:
return
if not exist_volumes:
self.put(DeploymentConfig.volumes_path, [volume])
else:
exist_volumes.append(volume)
def update_replicas(self, replicas):
''' update replicas value '''
self.put(DeploymentConfig.replicas_path, replicas)
def update_volume(self, volume):
'''place an env in the env var list'''
exist_volumes = self.get_volumes()
if not volume:
return False
# update the volume
update_idx = None
for idx, exist_vol in enumerate(exist_volumes):
if exist_vol['name'] == volume['name']:
update_idx = idx
break
if update_idx != None:
exist_volumes[update_idx] = volume
else:
self.add_volume(volume)
return True
def update_volume_mount(self, volume_mount):
'''place an env in the env var list'''
modified = False
exist_volume_mounts = self.get_volume_mounts()
if not volume_mount:
return False
# update the volume mount
for exist_vol_mount in exist_volume_mounts:
if exist_vol_mount['name'] == volume_mount['name']:
if 'mountPath' in exist_vol_mount and \
str(exist_vol_mount['mountPath']) != str(volume_mount['mountPath']):
exist_vol_mount['mountPath'] = volume_mount['mountPath']
modified = True
break
if not modified:
self.add_volume_mount(volume_mount)
modified = True
return modified
def needs_update_volume(self, volume, volume_mount):
''' verify a volume update is needed '''
exist_volume = self.find_volume_by_name(volume)
exist_volume_mount = self.find_volume_by_name(volume, mounts=True)
results = []
results.append(exist_volume['name'] == volume['name'])
if 'secret' in volume:
results.append('secret' in exist_volume)
results.append(exist_volume['secret']['secretName'] == volume['secret']['secretName'])
results.append(exist_volume_mount['name'] == volume_mount['name'])
results.append(exist_volume_mount['mountPath'] == volume_mount['mountPath'])
elif 'emptyDir' in volume:
results.append(exist_volume_mount['name'] == volume['name'])
results.append(exist_volume_mount['mountPath'] == volume_mount['mountPath'])
elif 'persistentVolumeClaim' in volume:
pvc = 'persistentVolumeClaim'
results.append(pvc in exist_volume)
if results[-1]:
results.append(exist_volume[pvc]['claimName'] == volume[pvc]['claimName'])
if 'claimSize' in volume[pvc]:
results.append(exist_volume[pvc]['claimSize'] == volume[pvc]['claimSize'])
elif 'hostpath' in volume:
results.append('hostPath' in exist_volume)
results.append(exist_volume['hostPath']['path'] == volume_mount['mountPath'])
return not all(results)
def needs_update_replicas(self, replicas):
''' verify whether a replica update is needed '''
current_reps = self.get(DeploymentConfig.replicas_path)
return not current_reps == replicas
# -*- -*- -*- End included fragment: lib/deploymentconfig.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/volume.py -*- -*- -*-
class Volume(object):
''' Class to represent an openshift volume object'''
volume_mounts_path = {"pod": "spec.containers[0].volumeMounts",
"dc": "spec.template.spec.containers[0].volumeMounts",
"rc": "spec.template.spec.containers[0].volumeMounts",
}
volumes_path = {"pod": "spec.volumes",
"dc": "spec.template.spec.volumes",
"rc": "spec.template.spec.volumes",
}
@staticmethod
def create_volume_structure(volume_info):
''' return a properly structured volume '''
volume_mount = None
volume = {'name': volume_info['name']}
volume_type = volume_info['type'].lower()
if volume_type == 'secret':
volume['secret'] = {}
volume[volume_info['type']] = {'secretName': volume_info['secret_name']}
volume_mount = {'mountPath': volume_info['path'],
'name': volume_info['name']}
elif volume_type == 'emptydir':
volume['emptyDir'] = {}
volume_mount = {'mountPath': volume_info['path'],
'name': volume_info['name']}
elif volume_type == 'pvc' or volume_type == 'persistentvolumeclaim':
volume['persistentVolumeClaim'] = {}
volume['persistentVolumeClaim']['claimName'] = volume_info['claimName']
volume['persistentVolumeClaim']['claimSize'] = volume_info['claimSize']
elif volume_type == 'hostpath':
volume['hostPath'] = {}
volume['hostPath']['path'] = volume_info['path']
elif volume_type == 'configmap':
volume['configMap'] = {}
volume['configMap']['name'] = volume_info['configmap_name']
volume_mount = {'mountPath': volume_info['path'],
'name': volume_info['name']}
return (volume, volume_mount)
# -*- -*- -*- End included fragment: lib/volume.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: class/oc_volume.py -*- -*- -*-
# pylint: disable=too-many-instance-attributes
class OCVolume(OpenShiftCLI):
''' Class to wrap the oc command line tools '''
volume_mounts_path = {"pod": "spec.containers[0].volumeMounts",
"dc": "spec.template.spec.containers[0].volumeMounts",
"rc": "spec.template.spec.containers[0].volumeMounts",
}
volumes_path = {"pod": "spec.volumes",
"dc": "spec.template.spec.volumes",
"rc": "spec.template.spec.volumes",
}
# pylint allows 5
# pylint: disable=too-many-arguments
def __init__(self,
kind,
resource_name,
namespace,
vol_name,
mount_path,
mount_type,
secret_name,
claim_size,
claim_name,
configmap_name,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False):
''' Constructor for OCVolume '''
super(OCVolume, self).__init__(namespace, kubeconfig)
self.kind = kind
self.volume_info = {'name': vol_name,
'secret_name': secret_name,
'path': mount_path,
'type': mount_type,
'claimSize': claim_size,
'claimName': claim_name,
'configmap_name': configmap_name}
self.volume, self.volume_mount = Volume.create_volume_structure(self.volume_info)
self.name = resource_name
self.namespace = namespace
self.kubeconfig = kubeconfig
self.verbose = verbose
self._resource = None
@property
def resource(self):
''' property function for resource var '''
if not self._resource:
self.get()
return self._resource
@resource.setter
def resource(self, data):
''' setter function for resource var '''
self._resource = data
def exists(self):
''' return whether a volume exists '''
volume_mount_found = False
volume_found = self.resource.exists_volume(self.volume)
if not self.volume_mount and volume_found:
return True
if self.volume_mount:
volume_mount_found = self.resource.exists_volume_mount(self.volume_mount)
if volume_found and self.volume_mount and volume_mount_found:
return True
return False
def get(self):
'''return volume information '''
vol = self._get(self.kind, self.name)
if vol['returncode'] == 0:
if self.kind == 'dc':
self.resource = DeploymentConfig(content=vol['results'][0])
vol['results'] = self.resource.get_volumes()
return vol
def delete(self):
'''remove a volume'''
self.resource.delete_volume_by_name(self.volume)
return self._replace_content(self.kind, self.name, self.resource.yaml_dict)
def put(self):
'''place volume into dc '''
self.resource.update_volume(self.volume)
self.resource.get_volumes()
self.resource.update_volume_mount(self.volume_mount)
return self._replace_content(self.kind, self.name, self.resource.yaml_dict)
def needs_update(self):
''' verify an update is needed '''
return self.resource.needs_update_volume(self.volume, self.volume_mount)
# pylint: disable=too-many-branches,too-many-return-statements
@staticmethod
def run_ansible(params, check_mode=False):
'''run the idempotent ansible code'''
oc_volume = OCVolume(params['kind'],
params['name'],
params['namespace'],
params['vol_name'],
params['mount_path'],
params['mount_type'],
# secrets
params['secret_name'],
# pvc
params['claim_size'],
params['claim_name'],
# configmap
params['configmap_name'],
kubeconfig=params['kubeconfig'],
verbose=params['debug'])
state = params['state']
api_rval = oc_volume.get()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
#####
# Get
#####
if state == 'list':
return {'changed': False, 'results': api_rval['results'], 'state': state}
########
# Delete
########
if state == 'absent':
if oc_volume.exists():
if check_mode:
return {'changed': False, 'msg': 'CHECK_MODE: Would have performed a delete.'}
api_rval = oc_volume.delete()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': state}
return {'changed': False, 'state': state}
if state == 'present':
########
# Create
########
if not oc_volume.exists():
if check_mode:
return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a create.'}
# Create it here
api_rval = oc_volume.put()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
# return the created object
api_rval = oc_volume.get()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': state}
########
# Update
########
if oc_volume.needs_update():
api_rval = oc_volume.put()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
# return the created object
api_rval = oc_volume.get()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, state: state}
return {'changed': False, 'results': api_rval, state: state}
return {'failed': True, 'msg': 'Unknown state passed. {}'.format(state)}
# -*- -*- -*- End included fragment: class/oc_volume.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: ansible/oc_volume.py -*- -*- -*-
def main():
'''
ansible oc module for volumes
'''
module = AnsibleModule(
argument_spec=dict(
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
state=dict(default='present', type='str',
choices=['present', 'absent', 'list']),
debug=dict(default=False, type='bool'),
kind=dict(default='dc', choices=['dc', 'rc', 'pods'], type='str'),
namespace=dict(default='default', type='str'),
vol_name=dict(default=None, type='str'),
name=dict(default=None, type='str'),
mount_type=dict(default=None,
choices=['emptydir', 'hostpath', 'secret', 'pvc', 'configmap'],
type='str'),
mount_path=dict(default=None, type='str'),
# secrets require a name
secret_name=dict(default=None, type='str'),
# pvc requires a size
claim_size=dict(default=None, type='str'),
claim_name=dict(default=None, type='str'),
# configmap requires a name
configmap_name=dict(default=None, type='str'),
),
supports_check_mode=True,
)
rval = OCVolume.run_ansible(module.params, module.check_mode)
if 'failed' in rval:
module.fail_json(**rval)
module.exit_json(**rval)
if __name__ == '__main__':
main()
# -*- -*- -*- End included fragment: ansible/oc_volume.py -*- -*- -*-
| twiest/openshift-tools | openshift/installer/vendored/openshift-ansible-3.7.42-1/roles/lib_openshift/library/oc_volume.py | Python | apache-2.0 | 70,312 |
"""
Sending pastries generated from the oven to the shop.
"""
from PyBake import *
from PyBake.logger import *
from importlib import import_module
import textwrap
import zipfile
import requests
# Note: This function could run concurrently.
def uploadPastry(menu, pastry, server, *, force):
"""
Uploads a pastry to the server.
"""
pastryPath = menu.makePath(pastry)
# Extract pastry.json data from the pastry package (pastry.zip),
# to validate the pastry.
with pastryPath.open("rb") as pastryFile:
with zipfile.ZipFile(pastryFile) as zip_file:
# Note: For some reason, json.load does not accept the result
# of ZipFile.open, so we use ZipFile.read instead to load
# the entire file as bytes, convert it to a string, and parse that.
pastryBytes = zip_file.read("pastry.json")
zippedPastry = Pastry(data=json.loads(pastryBytes.decode("UTF-8")))
if zippedPastry != pastry:
log.error("Pastry infos from the menu and the pastry.json inside it do not match: {} vs. {}".format(zippedPastry, pastry))
return
# Construct the `files` dictionary with the pastry package (.zip).
files = {"pastry": pastryPath.open("rb")}
postUrl = "{}/upload_pastry".format(server)
with LogBlock("Uploading {}".format(pastry)):
requestData = dict(pastry)
requestData["force"] = bool(force)
log.info("Sending data...")
log.debug("Data: {}".format(requestData))
# Send the post request with some meta data and the actual file data.
response = requests.post(postUrl, data=requestData, files=files)
log.dev("Status: {}\n{}".format(response.status_code, response.text))
if response.ok:
log.success("Successful deposit.")
else:
log.error("Failed to upload pastry.")
def run(*, pastryPaths, configPath, force, **kwargs):
"""Deposit a pastry in a shop."""
# Import the config script.
log.debug("Importing config script: {}".format(configPath.as_posix()))
config = importFromFile(configPath)
server = try_getattr(config, ("server", "serverConfig"), raise_error=True)
with LogBlock("Server: {}".format(server)):
for pastryPath in pastryPaths:
pastryPath.safe_mkdir(parents=True)
menu = Menu(pastryPath)
menu.load()
for pastry in menu.registry:
uploadPastry(menu, pastry, server, force=force)
| lab132/PyBake | PyBake/depot.py | Python | mit | 2,338 |
#############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#############################################################################
import json
import math
import numpy
import os
from pkg_resources import DistributionNotFound, get_distribution
import PIL.Image
from large_image import config
from large_image.cache_util import LruCacheMetaclass, methodcache, strhash
from large_image.constants import TILE_FORMAT_PIL
from large_image.exceptions import TileSourceException
from large_image.tilesource import FileTileSource
try:
__version__ = get_distribution(__name__).version
except DistributionNotFound:
# package is not installed
pass
def getMaxSize(size=None, maxDefault=4096):
"""
Get the maximum width and height that we allow for an image.
:param size: the requested maximum size. This is either a number to use
for both width and height, or an object with {'width': (width),
'height': height} in pixels. If None, the default max size is used.
:param maxDefault: a default value to use for width and height.
:returns: maxWidth, maxHeight in pixels. 0 means no images are allowed.
"""
maxWidth = maxHeight = maxDefault
if size is not None:
if isinstance(size, dict):
maxWidth = size.get('width', maxWidth)
maxHeight = size.get('height', maxHeight)
else:
maxWidth = maxHeight = size
# We may want to put an upper limit on what is requested so it can't be
# completely overridden.
return maxWidth, maxHeight
class PILFileTileSource(FileTileSource, metaclass=LruCacheMetaclass):
"""
Provides tile access to single image PIL files.
"""
cacheName = 'tilesource'
name = 'pilfile'
# No extensions or mime types are explicitly added for the PIL tile source,
# as it should always be a fallback source
def __init__(self, path, maxSize=None, **kwargs):
"""
Initialize the tile class. See the base class for other available
parameters.
:param path: the associated file path.
:param maxSize: either a number or an object with {'width': (width),
'height': height} in pixels. If None, the default max size is
used.
"""
super().__init__(path, **kwargs)
self._maxSize = maxSize
if isinstance(maxSize, str):
try:
maxSize = json.loads(maxSize)
except Exception:
raise TileSourceException(
'maxSize must be None, an integer, a dictionary, or a '
'JSON string that converts to one of those.')
self.maxSize = maxSize
largeImagePath = self._getLargeImagePath()
# Some formats shouldn't be read this way, even if they could. For
# instances, mirax (mrxs) files look like JPEGs, but opening them as
# such misses most of the data.
if os.path.splitext(largeImagePath)[1] in ('.mrxs', ):
raise TileSourceException('File cannot be opened via PIL.')
try:
self._pilImage = PIL.Image.open(largeImagePath)
except OSError:
raise TileSourceException('File cannot be opened via PIL.')
# If this is encoded as a 32-bit integer or a 32-bit float, convert it
# to an 8-bit integer. This expects the source value to either have a
# maximum of 1, 2^8-1, 2^16-1, 2^24-1, or 2^32-1, and scales it to
# [0, 255]
pilImageMode = self._pilImage.mode.split(';')[0]
if pilImageMode in ('I', 'F'):
imgdata = numpy.asarray(self._pilImage)
maxval = 256 ** math.ceil(math.log(numpy.max(imgdata) + 1, 256)) - 1
self._pilImage = PIL.Image.fromarray(numpy.uint8(numpy.multiply(
imgdata, 255.0 / maxval)))
self.sizeX = self._pilImage.width
self.sizeY = self._pilImage.height
# We have just one tile which is the entire image.
self.tileWidth = self.sizeX
self.tileHeight = self.sizeY
self.levels = 1
# Throw an exception if too big
if self.tileWidth <= 0 or self.tileHeight <= 0:
raise TileSourceException('PIL tile size is invalid.')
maxWidth, maxHeight = getMaxSize(maxSize, self.defaultMaxSize())
if self.tileWidth > maxWidth or self.tileHeight > maxHeight:
raise TileSourceException('PIL tile size is too large.')
def defaultMaxSize(self):
"""
Get the default max size from the config settings.
:returns: the default max size.
"""
return int(config.getConfig('max_small_image_size', 4096))
@staticmethod
def getLRUHash(*args, **kwargs):
return strhash(
super(PILFileTileSource, PILFileTileSource).getLRUHash(
*args, **kwargs),
kwargs.get('maxSize'))
def getState(self):
return super().getState() + ',' + str(
self._maxSize)
def getInternalMetadata(self, **kwargs):
"""
Return additional known metadata about the tile source. Data returned
from this method is not guaranteed to be in any particular format or
have specific values.
:returns: a dictionary of data or None.
"""
results = {'pil': {}}
for key in ('filename', 'format', 'mode', 'size', 'width', 'height', 'palette', 'info'):
try:
results['pil'][key] = getattr(self._pilImage, key)
except Exception:
pass
return results
@methodcache()
def getTile(self, x, y, z, pilImageAllowed=False, numpyAllowed=False,
mayRedirect=False, **kwargs):
if z != 0:
raise TileSourceException('z layer does not exist')
if x != 0:
raise TileSourceException('x is outside layer')
if y != 0:
raise TileSourceException('y is outside layer')
return self._outputTile(self._pilImage, TILE_FORMAT_PIL, x, y, z,
pilImageAllowed, numpyAllowed, **kwargs)
def open(*args, **kwargs):
"""
Create an instance of the module class.
"""
return PILFileTileSource(*args, **kwargs)
def canRead(*args, **kwargs):
"""
Check if an input can be read by the module class.
"""
return PILFileTileSource.canRead(*args, **kwargs)
| DigitalSlideArchive/large_image | sources/pil/large_image_source_pil/__init__.py | Python | apache-2.0 | 6,995 |
version_info = (0, 6, 16)
__version__ = version = '.'.join(map(str, version_info))
__project__ = PROJECT = 'django-summernote'
__author__ = AUTHOR = "Park Hyunwoo <ez.amiryo@gmail.com>"
default_app_config = 'django_summernote.apps.DjangoSummernoteConfig'
| WQuanfeng/django-summernote | django_summernote/__init__.py | Python | mit | 258 |
# -*- coding: utf-8 -*-
from pysignfe.xml_sped import *
from .ConsultarSituacaoLoteRps import ListaMensagemRetorno
from .Rps import IdentificacaoPrestador, IdentificacaoRps
from .Nfse import CompNfse
import os
DIRNAME = os.path.dirname(__file__)
class ConsultarLoteRpsEnvio(XMLNFe):
def __init__(self):
super(ConsultarLoteRpsEnvio, self).__init__()
self.versao = TagDecimal(nome=u'ConsultarLoteRpsEnvio', propriedade=u'versao', namespace=NAMESPACE_NFSE, valor=u'1.00', raiz=u'/')
self.Prestador = IdentificacaoPrestador()
self.Protocolo = TagCaracter(nome=u'Protocolo', tamanho=[ 1, 50], raiz=u'/')
self.caminho_esquema = os.path.join(DIRNAME, u'schema/')
self.arquivo_esquema = u'nfse.xsd'
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += ABERTURA
xml += u'<ConsultarLoteRpsEnvio xmlns="'+ NAMESPACE_NFSE + '">'
xml += self.Prestador.xml.replace(ABERTURA, u'')
xml += self.Protocolo.xml
xml += u'</ConsultarLoteRpsEnvio>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.Prestador.xml = arquivo
self.Protocolo.xml = arquivo
xml = property(get_xml, set_xml)
class ConsultarLoteRpsResposta(XMLNFe):
def __init__(self):
super(ConsultarLoteRpsResposta, self).__init__()
self.CompNfse = []
self.ListaMensagemRetorno = ListaMensagemRetorno()
self.caminho_esquema = os.path.join(DIRNAME, u'schema/')
self.arquivo_esquema = u'nfse.xsd'
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += ABERTURA
xml += u'<ConsultarLoteRpsResposta xmlns="'+ NAMESPACE_NFSE + '">'
if len(self.ListaMensagemRetorno.MensagemRetorno) != 0:
xml += self.ListaMensagemRetorno.xml.replace(ABERTURA, u'')
else:
xml += u'<ListaNfse>'
for c in self.CompNfse:
xml += tira_abertura(c.xml)
xml += u'</ListaNfse>'
xml += u'</ConsultarLoteRpsResposta>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.CompNfse = self.le_grupo('[nfse]//ConsultarLoteRpsResposta/CompNfse', CompNfse)
self.ListaMensagemRetorno.xml = arquivo
xml = property(get_xml, set_xml) | thiagopena/PySIGNFe | pysignfe/nfse/bhiss/v10/ConsultarLoteRps.py | Python | lgpl-2.1 | 2,448 |
from __future__ import print_function
import ttfw_idf
EXPECT_TIMEOUT = 20
@ttfw_idf.idf_example_test(env_tag='Example_I2C_CCS811_SENSOR')
def test_i2ctools_example(env, extra_data):
# Get device under test, flash and start example. "i2ctool" must be defined in EnvConfig
dut = env.get_dut('i2ctools', 'examples/peripherals/i2c/i2c_tools', dut_class=ttfw_idf.ESP32DUT)
dut.start_app()
dut.expect('i2c-tools>', timeout=EXPECT_TIMEOUT)
# Get i2c address
dut.write('i2cdetect')
dut.expect('5b', timeout=EXPECT_TIMEOUT)
# Get chip ID
dut.write('i2cget -c 0x5b -r 0x20 -l 1')
dut.expect('0x81', timeout=EXPECT_TIMEOUT)
# Reset sensor
dut.write('i2cset -c 0x5b -r 0xFF 0x11 0xE5 0x72 0x8A')
dut.expect('OK', timeout=EXPECT_TIMEOUT)
# Get status
dut.write('i2cget -c 0x5b -r 0x00 -l 1')
dut.expect_any('0x10', timeout=EXPECT_TIMEOUT)
# Change work mode
dut.write('i2cset -c 0x5b -r 0xF4')
dut.expect('OK', timeout=EXPECT_TIMEOUT)
dut.write('i2cset -c 0x5b -r 0x01 0x10')
dut.expect('OK', timeout=EXPECT_TIMEOUT)
# Get new status
dut.write('i2cget -c 0x5b -r 0x00 -l 1')
dut.expect_any('0x98', '0x90', timeout=EXPECT_TIMEOUT)
if __name__ == '__main__':
test_i2ctools_example()
| espressif/esp-idf | examples/peripherals/i2c/i2c_tools/example_test.py | Python | apache-2.0 | 1,274 |
#!/usr/bin/env python
def configuration(parent_package='',top_path=None):
import numpy
from numpy.distutils.misc_util import Configuration
config = Configuration('solvers', parent_package, top_path)
config.add_scripts(['nlpy_trunk.py',
'nlpy_lbfgs.py',
'nlpy_ldfp.py',
'nlpy_reglp.py',
'nlpy_regqp.py',
'nlpy_funnel.py',
'nlpy_elastic.py'])
config.make_config_py()
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| dpo/nlpy | nlpy/optimize/solvers/setup.py | Python | gpl-3.0 | 671 |
from django.contrib.auth.models import Group
class GroupManager:
def __init__(self):
pass
@staticmethod
def get_joinable_groups():
return Group.objects.select_related('authgroup').exclude(authgroup__internal=True)
@staticmethod
def get_group_leaders_groups(user):
return Group.objects.select_related('authgroup').filter(authgroup__group_leaders__in=[user])
@staticmethod
def joinable_group(group):
"""
Check if a group is a user joinable group, i.e.
not an internal group for Corp, Alliance, Members etc
:param group: django.contrib.auth.models.Group object
:return: bool True if its joinable, False otherwise
"""
return not group.authgroup.internal
@staticmethod
def has_management_permission(user):
return user.has_perm('auth.group_management')
@classmethod
def can_manage_groups(cls, user):
"""
For use with user_passes_test decorator.
Check if the user can manage groups. Either has the
auth.group_management permission or is a leader of at least one group
and is also a Member.
:param user: django.contrib.auth.models.User for the request
:return: bool True if user can manage groups, False otherwise
"""
if user.is_authenticated:
return cls.has_management_permission(user) or user.leads_groups.all()
return False
@classmethod
def can_manage_group(cls, user, group):
"""
Check user has permission to manage the given group
:param user: User object to test permission of
:param group: Group object the user is attempting to manage
:return: True if the user can manage the group
"""
if user.is_authenticated:
return cls.has_management_permission(user) or user.leads_groups.filter(group=group).exists()
return False
| Kaezon/allianceauth | allianceauth/groupmanagement/managers.py | Python | gpl-2.0 | 1,934 |
#!/usr/bin/python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import errno
import filecmp
import os.path
import re
import tempfile
import sys
def ExceptionAppend(e, msg):
"""Append a message to the given exception's message."""
if not e.args:
e.args = (msg,)
elif len(e.args) == 1:
e.args = (str(e.args[0]) + ' ' + msg,)
else:
e.args = (str(e.args[0]) + ' ' + msg,) + e.args[1:]
def ParseQualifiedTarget(target):
# Splits a qualified target into a build file, target name and toolset.
# NOTE: rsplit is used to disambiguate the Windows drive letter separator.
target_split = target.rsplit(':', 1)
if len(target_split) == 2:
[build_file, target] = target_split
else:
build_file = None
target_split = target.rsplit('#', 1)
if len(target_split) == 2:
[target, toolset] = target_split
else:
toolset = None
return [build_file, target, toolset]
def ResolveTarget(build_file, target, toolset):
# This function resolves a target into a canonical form:
# - a fully defined build file, either absolute or relative to the current
# directory
# - a target name
# - a toolset
#
# build_file is the file relative to which 'target' is defined.
# target is the qualified target.
# toolset is the default toolset for that target.
[parsed_build_file, target, parsed_toolset] = ParseQualifiedTarget(target)
if parsed_build_file:
if build_file:
# If a relative path, parsed_build_file is relative to the directory
# containing build_file. If build_file is not in the current directory,
# parsed_build_file is not a usable path as-is. Resolve it by
# interpreting it as relative to build_file. If parsed_build_file is
# absolute, it is usable as a path regardless of the current directory,
# and os.path.join will return it as-is.
build_file = os.path.normpath(os.path.join(os.path.dirname(build_file),
parsed_build_file))
else:
build_file = parsed_build_file
if parsed_toolset:
toolset = parsed_toolset
return [build_file, target, toolset]
def BuildFile(fully_qualified_target):
# Extracts the build file from the fully qualified target.
return ParseQualifiedTarget(fully_qualified_target)[0]
def QualifiedTarget(build_file, target, toolset):
# "Qualified" means the file that a target was defined in and the target
# name, separated by a colon, suffixed by a # and the toolset name:
# /path/to/file.gyp:target_name#toolset
fully_qualified = build_file + ':' + target
if toolset:
fully_qualified = fully_qualified + '#' + toolset
return fully_qualified
def RelativePath(path, relative_to):
# Assuming both |path| and |relative_to| are relative to the current
# directory, returns a relative path that identifies path relative to
# relative_to.
# Convert to absolute (and therefore normalized paths).
path = os.path.abspath(path)
relative_to = os.path.abspath(relative_to)
# Split the paths into components.
path_split = path.split(os.path.sep)
relative_to_split = relative_to.split(os.path.sep)
# Determine how much of the prefix the two paths share.
prefix_len = len(os.path.commonprefix([path_split, relative_to_split]))
# Put enough ".." components to back up out of relative_to to the common
# prefix, and then append the part of path_split after the common prefix.
relative_split = [os.path.pardir] * (len(relative_to_split) - prefix_len) + \
path_split[prefix_len:]
if len(relative_split) == 0:
# The paths were the same.
return ''
# Turn it back into a string and we're done.
return os.path.join(*relative_split)
def FixIfRelativePath(path, relative_to):
# Like RelativePath but returns |path| unchanged if it is absolute.
if os.path.isabs(path):
return path
return RelativePath(path, relative_to)
def UnrelativePath(path, relative_to):
# Assuming that |relative_to| is relative to the current directory, and |path|
# is a path relative to the dirname of |relative_to|, returns a path that
# identifies |path| relative to the current directory.
rel_dir = os.path.dirname(relative_to)
return os.path.normpath(os.path.join(rel_dir, path))
# re objects used by EncodePOSIXShellArgument. See IEEE 1003.1 XCU.2.2 at
# http://www.opengroup.org/onlinepubs/009695399/utilities/xcu_chap02.html#tag_02_02
# and the documentation for various shells.
# _quote is a pattern that should match any argument that needs to be quoted
# with double-quotes by EncodePOSIXShellArgument. It matches the following
# characters appearing anywhere in an argument:
# \t, \n, space parameter separators
# # comments
# $ expansions (quoted to always expand within one argument)
# % called out by IEEE 1003.1 XCU.2.2
# & job control
# ' quoting
# (, ) subshell execution
# *, ?, [ pathname expansion
# ; command delimiter
# <, >, | redirection
# = assignment
# {, } brace expansion (bash)
# ~ tilde expansion
# It also matches the empty string, because "" (or '') is the only way to
# represent an empty string literal argument to a POSIX shell.
#
# This does not match the characters in _escape, because those need to be
# backslash-escaped regardless of whether they appear in a double-quoted
# string.
_quote = re.compile('[\t\n #$%&\'()*;<=>?[{|}~]|^$')
# _escape is a pattern that should match any character that needs to be
# escaped with a backslash, whether or not the argument matched the _quote
# pattern. _escape is used with re.sub to backslash anything in _escape's
# first match group, hence the (parentheses) in the regular expression.
#
# _escape matches the following characters appearing anywhere in an argument:
# " to prevent POSIX shells from interpreting this character for quoting
# \ to prevent POSIX shells from interpreting this character for escaping
# ` to prevent POSIX shells from interpreting this character for command
# substitution
# Missing from this list is $, because the desired behavior of
# EncodePOSIXShellArgument is to permit parameter (variable) expansion.
#
# Also missing from this list is !, which bash will interpret as the history
# expansion character when history is enabled. bash does not enable history
# by default in non-interactive shells, so this is not thought to be a problem.
# ! was omitted from this list because bash interprets "\!" as a literal string
# including the backslash character (avoiding history expansion but retaining
# the backslash), which would not be correct for argument encoding. Handling
# this case properly would also be problematic because bash allows the history
# character to be changed with the histchars shell variable. Fortunately,
# as history is not enabled in non-interactive shells and
# EncodePOSIXShellArgument is only expected to encode for non-interactive
# shells, there is no room for error here by ignoring !.
_escape = re.compile(r'(["\\`])')
def EncodePOSIXShellArgument(argument):
"""Encodes |argument| suitably for consumption by POSIX shells.
argument may be quoted and escaped as necessary to ensure that POSIX shells
treat the returned value as a literal representing the argument passed to
this function. Parameter (variable) expansions beginning with $ are allowed
to remain intact without escaping the $, to allow the argument to contain
references to variables to be expanded by the shell.
"""
if not isinstance(argument, str):
argument = str(argument)
if _quote.search(argument):
quote = '"'
else:
quote = ''
encoded = quote + re.sub(_escape, r'\\\1', argument) + quote
return encoded
def EncodePOSIXShellList(list):
"""Encodes |list| suitably for consumption by POSIX shells.
Returns EncodePOSIXShellArgument for each item in list, and joins them
together using the space character as an argument separator.
"""
encoded_arguments = []
for argument in list:
encoded_arguments.append(EncodePOSIXShellArgument(argument))
return ' '.join(encoded_arguments)
def DeepDependencyTargets(target_dicts, roots):
"""Returns the recursive list of target dependencies."""
dependencies = set()
pending = set(roots)
while pending:
# Pluck out one.
r = pending.pop()
# Skip if visited already.
if r in dependencies:
continue
# Add it.
dependencies.add(r)
# Add its children.
spec = target_dicts[r]
pending.update(set(spec.get('dependencies', [])))
pending.update(set(spec.get('dependencies_original', [])))
return list(dependencies - set(roots))
def BuildFileTargets(target_list, build_file):
"""From a target_list, returns the subset from the specified build_file.
"""
return [p for p in target_list if BuildFile(p) == build_file]
def AllTargets(target_list, target_dicts, build_file):
"""Returns all targets (direct and dependencies) for the specified build_file.
"""
bftargets = BuildFileTargets(target_list, build_file)
deptargets = DeepDependencyTargets(target_dicts, bftargets)
return bftargets + deptargets
def WriteOnDiff(filename):
"""Write to a file only if the new contents differ.
Arguments:
filename: name of the file to potentially write to.
Returns:
A file like object which will write to temporary file and only overwrite
the target if it differs (on close).
"""
class Writer:
"""Wrapper around file which only covers the target if it differs."""
def __init__(self):
# Pick temporary file.
tmp_fd, self.tmp_path = tempfile.mkstemp(
suffix='.tmp',
prefix=os.path.split(filename)[1] + '.gyp.',
dir=os.path.split(filename)[0])
try:
self.tmp_file = os.fdopen(tmp_fd, 'wb')
except Exception:
# Don't leave turds behind.
os.unlink(self.tmp_path)
raise
def __getattr__(self, attrname):
# Delegate everything else to self.tmp_file
return getattr(self.tmp_file, attrname)
def close(self):
try:
# Close tmp file.
self.tmp_file.close()
# Determine if different.
same = False
try:
same = filecmp.cmp(self.tmp_path, filename, False)
except OSError, e:
if e.errno != errno.ENOENT:
raise
if same:
# The new file is identical to the old one, just get rid of the new
# one.
os.unlink(self.tmp_path)
else:
# The new file is different from the old one, or there is no old one.
# Rename the new file to the permanent name.
#
# tempfile.mkstemp uses an overly restrictive mode, resulting in a
# file that can only be read by the owner, regardless of the umask.
# There's no reason to not respect the umask here, which means that
# an extra hoop is required to fetch it and reset the new file's mode.
#
# No way to get the umask without setting a new one? Set a safe one
# and then set it back to the old value.
umask = os.umask(077)
os.umask(umask)
os.chmod(self.tmp_path, 0666 & ~umask)
if sys.platform == 'win32' and os.path.exists(filename):
# NOTE: on windows (but not cygwin) rename will not replace an
# existing file, so it must be preceded with a remove. Sadly there
# is no way to make the switch atomic.
os.remove(filename)
os.rename(self.tmp_path, filename)
except Exception:
# Don't leave turds behind.
os.unlink(self.tmp_path)
raise
return Writer()
# From Alex Martelli,
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52560
# ASPN: Python Cookbook: Remove duplicates from a sequence
# First comment, dated 2001/10/13.
# (Also in the printed Python Cookbook.)
def uniquer(seq, idfun=None):
if idfun is None:
def idfun(x): return x
seen = {}
result = []
for item in seq:
marker = idfun(item)
if marker in seen: continue
seen[marker] = 1
result.append(item)
return result
| nawawi/wkhtmltopdf | webkit/Source/ThirdParty/gyp/pylib/gyp/common.py | Python | lgpl-3.0 | 12,381 |
# coding=utf-8
# Author: Nic Wolfe <nic@wolfeden.ca>
#
# This file is part of Medusa.
#
# Medusa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Medusa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Medusa. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import datetime
import functools
import locale
from builtins import object
from medusa import app
from medusa.network_timezones import app_timezone
import six
date_presets = (
'%Y-%m-%d',
'%a, %Y-%m-%d',
'%A, %Y-%m-%d',
'%y-%m-%d',
'%a, %y-%m-%d',
'%A, %y-%m-%d',
'%m/%d/%Y',
'%a, %m/%d/%Y',
'%A, %m/%d/%Y',
'%m/%d/%y',
'%a, %m/%d/%y',
'%A, %m/%d/%y',
'%m-%d-%Y',
'%a, %m-%d-%Y',
'%A, %m-%d-%Y',
'%m-%d-%y',
'%a, %m-%d-%y',
'%A, %m-%d-%y',
'%m.%d.%Y',
'%a, %m.%d.%Y',
'%A, %m.%d.%Y',
'%m.%d.%y',
'%a, %m.%d.%y',
'%A, %m.%d.%y',
'%d-%m-%Y',
'%a, %d-%m-%Y',
'%A, %d-%m-%Y',
'%d-%m-%y',
'%a, %d-%m-%y',
'%A, %d-%m-%y',
'%d/%m/%Y',
'%a, %d/%m/%Y',
'%A, %d/%m/%Y',
'%d/%m/%y',
'%a, %d/%m/%y',
'%A, %d/%m/%y',
'%d.%m.%Y',
'%a, %d.%m.%Y',
'%A, %d.%m.%Y',
'%d.%m.%y',
'%a, %d.%m.%y',
'%A, %d.%m.%y',
'%d. %b %Y',
'%a, %d. %b %Y',
'%A, %d. %b %Y',
'%d. %b %y',
'%a, %d. %b %y',
'%A, %d. %b %y',
'%d. %B %Y',
'%a, %d. %B %Y',
'%A, %d. %B %Y',
'%d. %B %y',
'%a, %d. %B %y',
'%A, %d. %B %y',
'%b %d, %Y',
'%a, %b %d, %Y',
'%A, %b %d, %Y',
'%B %d, %Y',
'%a, %B %d, %Y',
'%A, %B %d, %Y'
)
time_presets = ('%I:%M:%S %p', '%H:%M:%S')
# helper class
class static_or_instance(object):
def __init__(self, func):
self.func = func
def __get__(self, instance, owner):
return functools.partial(self.func, instance)
# subclass datetime.datetime to add function to display custom date and time formats
class sbdatetime(datetime.datetime):
has_locale = True
en_us_norm = locale.normalize('en_US.utf-8')
@static_or_instance
def convert_to_setting(self, dt=None):
try:
if app.TIMEZONE_DISPLAY == 'local':
return dt.astimezone(app_timezone) if self is None else self.astimezone(app_timezone)
else:
return dt if self is None else self
except Exception:
return dt if self is None else self
# display Time in application Format
@static_or_instance
def sbftime(self, dt=None, show_seconds=False, t_preset=None):
"""
Display time in application format
TODO: Rename this to srftime
:param dt: datetime object
:param show_seconds: Boolean, show seconds
:param t_preset: Preset time format
:return: time string
"""
try:
locale.setlocale(locale.LC_TIME, '')
except Exception:
pass
try:
if sbdatetime.has_locale:
locale.setlocale(locale.LC_TIME, 'en_US')
except Exception:
try:
if sbdatetime.has_locale:
locale.setlocale(locale.LC_TIME, sbdatetime.en_us_norm)
except Exception:
sbdatetime.has_locale = False
strt = ''
try:
if self is None:
if dt is not None:
if t_preset is not None:
strt = dt.strftime(t_preset)
elif show_seconds:
strt = dt.strftime(app.TIME_PRESET_W_SECONDS)
else:
strt = dt.strftime(app.TIME_PRESET)
else:
if t_preset is not None:
strt = self.strftime(t_preset)
elif show_seconds:
strt = self.strftime(app.TIME_PRESET_W_SECONDS)
else:
strt = self.strftime(app.TIME_PRESET)
finally:
try:
if sbdatetime.has_locale:
locale.setlocale(locale.LC_TIME, '')
except Exception:
sbdatetime.has_locale = False
if six.PY2:
strt = strt.decode(app.SYS_ENCODING)
return strt
# display Date in application Format
@static_or_instance
def sbfdate(self, dt=None, d_preset=None):
"""
Display date in application format
TODO: Rename this to srfdate
:param dt: datetime object
:param d_preset: Preset date format
:return: date string
"""
try:
locale.setlocale(locale.LC_TIME, '')
except Exception:
pass
strd = ''
try:
if self is None:
if dt is not None:
if d_preset is not None:
strd = dt.strftime(d_preset)
else:
strd = dt.strftime(app.DATE_PRESET)
else:
if d_preset is not None:
strd = self.strftime(d_preset)
else:
strd = self.strftime(app.DATE_PRESET)
finally:
try:
locale.setlocale(locale.LC_TIME, '')
except Exception:
pass
if six.PY2:
strd = strd.decode(app.SYS_ENCODING)
return strd
# display Datetime in application Format
@static_or_instance
def sbfdatetime(self, dt=None, show_seconds=False, d_preset=None, t_preset=None):
"""
Show datetime in application format
TODO: Rename this to srfdatetime
:param dt: datetime object
:param show_seconds: Boolean, show seconds as well
:param d_preset: Preset date format
:param t_preset: Preset time format
:return: datetime string
"""
try:
locale.setlocale(locale.LC_TIME, '')
except Exception:
pass
strd = ''
try:
if self is None:
if dt is not None:
if d_preset is not None:
strd = dt.strftime(d_preset)
else:
strd = dt.strftime(app.DATE_PRESET)
try:
if sbdatetime.has_locale:
locale.setlocale(locale.LC_TIME, 'en_US')
except Exception:
try:
if sbdatetime.has_locale:
locale.setlocale(locale.LC_TIME, sbdatetime.en_us_norm)
except Exception:
sbdatetime.has_locale = False
if six.PY2:
strd = strd.decode(app.SYS_ENCODING)
# Select format
fmt = t_preset or app.TIME_PRESET_W_SECONDS if show_seconds else app.TIME_PRESET
# Add formatted date
strd = ', '.join([strd, dt.strftime(fmt)])
else:
if d_preset is not None:
strd = self.strftime(d_preset)
else:
strd = self.strftime(app.DATE_PRESET)
try:
if sbdatetime.has_locale:
locale.setlocale(locale.LC_TIME, 'en_US')
except Exception:
try:
if sbdatetime.has_locale:
locale.setlocale(locale.LC_TIME, sbdatetime.en_us_norm)
except Exception:
sbdatetime.has_locale = False
if six.PY2:
strd = strd.decode(app.SYS_ENCODING)
# Select format
fmt = t_preset or app.TIME_PRESET_W_SECONDS if show_seconds else app.TIME_PRESET
# Add formatted date
strd = ', '.join([strd, self.strftime(fmt)])
finally:
try:
if sbdatetime.has_locale:
locale.setlocale(locale.LC_TIME, '')
except Exception:
sbdatetime.has_locale = False
return strd
| pymedusa/SickRage | medusa/sbdatetime.py | Python | gpl-3.0 | 8,651 |
import struct
from boofuzz.primitives.bit_field import BitField
class DWord(BitField):
"""The 4 byte sized bit field primitive.
:type name: str, optional
:param name: Name, for referencing later. Names should always be provided, but if not, a default name will be given,
defaults to None
:type default_value: int, optional
:param default_value: Default integer value, defaults to 0
:type max_num: int, optional
:param max_num: Maximum number to iterate up to, defaults to None
:type endian: char, optional
:param endian: Endianness of the bit field (LITTLE_ENDIAN: <, BIG_ENDIAN: >), defaults to LITTLE_ENDIAN
:type output_format: str, optional
:param output_format: Output format, "binary" or "ascii", defaults to binary
:type signed: bool, optional
:param signed: Make size signed vs. unsigned (applicable only with format="ascii"), defaults to False
:type full_range: bool, optional
:param full_range: If enabled the field mutates through *all* possible values, defaults to False
:type fuzz_values: list, optional
:param fuzz_values: List of custom fuzz values to add to the normal mutations, defaults to None
:type fuzzable: bool, optional
:param fuzzable: Enable/disable fuzzing of this primitive, defaults to true
"""
def __init__(self, *args, **kwargs):
# Inject our width argument
super(DWord, self).__init__(width=32, *args, **kwargs)
def encode(self, value, mutation_context):
if not isinstance(value, (int, list, tuple)):
value = struct.unpack(self.endian + "L", value)[0]
return super(DWord, self).encode(value, mutation_context)
| jtpereyda/boofuzz | boofuzz/primitives/dword.py | Python | gpl-2.0 | 1,697 |
# missileKineticDmgBonusStandard
#
# Used by:
# Implants named like: Zainou 'Snapshot' Light Missiles LM (6 of 6)
type = "passive"
def handler(fit, container, context):
fit.modules.filteredChargeBoost(lambda mod: mod.charge.requiresSkill("Light Missiles"),
"kineticDamage", container.getModifiedItemAttr("damageMultiplierBonus"))
| Ebag333/Pyfa | eos/effects/missilekineticdmgbonusstandard.py | Python | gpl-3.0 | 372 |
# Copyright 2015 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Ironic Wake-On-Lan power manager.
"""
import contextlib
import socket
import time
from oslo_log import log
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common.i18n import _LI
from ironic.common import states
from ironic.conductor import task_manager
from ironic.drivers import base
LOG = log.getLogger(__name__)
REQUIRED_PROPERTIES = {}
OPTIONAL_PROPERTIES = {
'wol_host': _('Broadcast IP address; defaults to '
'255.255.255.255. Optional.'),
'wol_port': _("Destination port; defaults to 9. Optional."),
}
COMMON_PROPERTIES = REQUIRED_PROPERTIES.copy()
COMMON_PROPERTIES.update(OPTIONAL_PROPERTIES)
def _send_magic_packets(task, dest_host, dest_port):
"""Create and send magic packets.
Creates and sends a magic packet for each MAC address registered in
the Node.
:param task: a TaskManager instance containing the node to act on.
:param dest_host: The broadcast to this IP address.
:param dest_port: The destination port.
:raises: WolOperationError if an error occur when connecting to the
host or sending the magic packets
"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
with contextlib.closing(s) as sock:
for port in task.ports:
address = port.address.replace(':', '')
# TODO(lucasagomes): Implement sending the magic packets with
# SecureON password feature. If your NIC is capable of, you can
# set the password of your SecureON using the ethtool utility.
data = 'FFFFFFFFFFFF' + (address * 16)
packet = bytearray.fromhex(data)
try:
sock.sendto(packet, (dest_host, dest_port))
except socket.error as e:
msg = (_("Failed to send Wake-On-Lan magic packets to "
"node %(node)s port %(port)s. Error: %(error)s") %
{'node': task.node.uuid, 'port': port.address,
'error': e})
LOG.exception(msg)
raise exception.WolOperationError(msg)
# let's not flood the network with broadcast packets
time.sleep(0.5)
def _parse_parameters(task):
driver_info = task.node.driver_info
host = driver_info.get('wol_host', '255.255.255.255')
port = driver_info.get('wol_port', 9)
try:
port = int(port)
except ValueError:
raise exception.InvalidParameterValue(_(
'Wake-On-Lan port must be an integer'))
if len(task.ports) < 1:
raise exception.MissingParameterValue(_(
'Wake-On-Lan needs at least one port resource to be '
'registered in the node'))
return {'host': host, 'port': port}
class WakeOnLanPower(base.PowerInterface):
"""Wake-On-Lan Driver for Ironic
This PowerManager class provides a mechanism for controlling power
state via Wake-On-Lan.
"""
def get_properties(self):
return COMMON_PROPERTIES
def validate(self, task):
"""Validate driver.
:param task: a TaskManager instance containing the node to act on.
:raises: InvalidParameterValue if parameters are invalid.
:raises: MissingParameterValue if required parameters are missing.
"""
_parse_parameters(task)
def get_power_state(self, task):
"""Not supported. Get the current power state of the task's node.
This operation is not supported by the Wake-On-Lan driver. So
value returned will be from the database and may not reflect
the actual state of the system.
:returns: POWER_OFF if power state is not set otherwise return
the node's power_state value from the database.
"""
pstate = task.node.power_state
return states.POWER_OFF if pstate is states.NOSTATE else pstate
@task_manager.require_exclusive_lock
def set_power_state(self, task, pstate):
"""Wakes the task's node on power on. Powering off is not supported.
Wakes the task's node on. Wake-On-Lan does not support powering
the task's node off so, just log it.
:param task: a TaskManager instance containing the node to act on.
:param pstate: The desired power state, one of ironic.common.states
POWER_ON, POWER_OFF.
:raises: InvalidParameterValue if parameters are invalid.
:raises: MissingParameterValue if required parameters are missing.
:raises: WolOperationError if an error occur when sending the
magic packets
"""
node = task.node
params = _parse_parameters(task)
if pstate == states.POWER_ON:
_send_magic_packets(task, params['host'], params['port'])
elif pstate == states.POWER_OFF:
LOG.info(_LI('Power off called for node %s. Wake-On-Lan does not '
'support this operation. Manual intervention '
'required to perform this action.'), node.uuid)
else:
raise exception.InvalidParameterValue(_(
"set_power_state called for Node %(node)s with invalid "
"power state %(pstate)s.") % {'node': node.uuid,
'pstate': pstate})
@task_manager.require_exclusive_lock
def reboot(self, task):
"""Not supported. Cycles the power to the task's node.
This operation is not fully supported by the Wake-On-Lan
driver. So this method will just try to power the task's node on.
:param task: a TaskManager instance containing the node to act on.
:raises: InvalidParameterValue if parameters are invalid.
:raises: MissingParameterValue if required parameters are missing.
:raises: WolOperationError if an error occur when sending the
magic packets
"""
LOG.info(_LI('Reboot called for node %s. Wake-On-Lan does '
'not fully support this operation. Trying to '
'power on the node.'), task.node.uuid)
self.set_power_state(task, states.POWER_ON)
| redhat-openstack/ironic | ironic/drivers/modules/wol.py | Python | apache-2.0 | 6,842 |
# -*- coding: utf-8 -*-
try:
import __builtin__
except ImportError:
import builtins as __builtin__ #Python 3.0
from copy import copy
import os.path
from cfmt import fmt
import codecs
### Default set of dmsl extensions
def css(s, _locals):
s = s.splitlines()
n = s[0]
s = s[1:]
return [u'%link[rel=stylesheet][href={0}{1}]'.format(n, x) for x in s]
def js(s, _locals):
s = s.splitlines()
n = s[0]
s = s[1:]
return ['%script[src={0}{1}]'.format(n, x) for x in s]
###
def form(s, _locals):
s = s.splitlines()
n = s[0]
d, n = n.split(' ', 1)
s = s[1:]
r = ['%form[action={0}][method=post]'.format(n)]
for e in s:
_type, _id = e.split(' ')
label = _id.replace('_', ' ').replace('number', '#').title()
if _type == 'hidden':
r.append((' %input#{0}[name={0}][type={1}][value="{2!s}"]').format(_id, _type, _locals[d][_id]))
elif _type == 'text':
r.append(' %label[for={0}] {1}'.format(_id, label))
r.append((' %input#{0}[name={0}][type={1}][value="{2!s}"]').format(_id, _type, _locals[d][_id]))
elif _type == 'submit':
r.append(' %input[type=submit][value={0}]'.format(label))
return r
def _open(f):
return codecs.open(os.path.join(_open.template_dir, f), encoding='utf-8', errors='replace')
_open.template_dir = ''
default_sandbox = { '__builtins__': None,
'css': css,
'dict': __builtin__.dict,
'enumerate': __builtin__.enumerate,
'Exception': Exception,
'form': form,
'float': __builtin__.float,
'fmt': fmt,
'globals': __builtin__.globals,
'int': __builtin__.int,
'js': js,
'len': __builtin__.len,
'list': __builtin__.list,
'locals': __builtin__.locals,
'map': __builtin__.map,
'max': __builtin__.max,
'min': __builtin__.min,
'open': _open,
'range': __builtin__.range,
'repr': __builtin__.repr,
'reversed': __builtin__.reversed,
'set': __builtin__.set,
'sorted': __builtin__.sorted,
'str': __builtin__.str}
# Python3
if hasattr(__builtin__, 'False'):
default_sandbox['False'] = getattr(__builtin__, 'False')
if hasattr(__builtin__, 'True'):
default_sandbox['True'] = getattr(__builtin__, 'True')
# Python2
if hasattr(__builtin__, 'unicode'):
default_sandbox['unicode'] = getattr(__builtin__, 'unicode')
#
def new():
return copy(default_sandbox)
extensions = {}
| dskinner/damsel-python | dmsl/_sandbox.py | Python | mit | 2,807 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'MessageRecord'
db.create_table(u'nuntium_messagerecord', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('status', self.gf('django.db.models.fields.CharField')(max_length=255)),
('datetime', self.gf('django.db.models.fields.DateField')(default=datetime.datetime(2013, 4, 24, 0, 0))),
('content_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['contenttypes.ContentType'])),
('object_id', self.gf('django.db.models.fields.PositiveIntegerField')()),
))
db.send_create_signal(u'nuntium', ['MessageRecord'])
def backwards(self, orm):
# Deleting model 'MessageRecord'
db.delete_table(u'nuntium_messagerecord')
models = {
u'contactos.contact': {
'Meta': {'object_name': 'Contact'},
'contact_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contactos.ContactType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['popit.Person']"}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '512'})
},
u'contactos.contacttype': {
'Meta': {'object_name': 'ContactType'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'nuntium.message': {
'Meta': {'object_name': 'Message'},
'content': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'new'", 'max_length': "'4'"}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'writeitinstance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['nuntium.WriteItInstance']"})
},
u'nuntium.messagerecord': {
'Meta': {'object_name': 'MessageRecord'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
'datetime': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2013, 4, 24, 0, 0)'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'nuntium.outboundmessage': {
'Meta': {'object_name': 'OutboundMessage'},
'contact': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contactos.Contact']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['nuntium.Message']"})
},
u'nuntium.writeitinstance': {
'Meta': {'object_name': 'WriteItInstance'},
'api_instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['popit.ApiInstance']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'popit.apiinstance': {
'Meta': {'object_name': 'ApiInstance'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'url': ('popit.fields.ApiInstanceURLField', [], {'unique': 'True', 'max_length': '200'})
},
u'popit.person': {
'Meta': {'object_name': 'Person'},
'api_instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['popit.ApiInstance']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'popit_url': ('popit.fields.PopItURLField', [], {'default': "''", 'max_length': '200', 'unique': 'True', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['nuntium'] | TEDICpy/write-it | nuntium/migrations/0007_auto__add_messagerecord.py | Python | gpl-3.0 | 5,320 |
from simtk.openmm.app import *
from simtk.openmm import *
from simtk.unit import *
from sys import stdout
from dcdreporter import DCDReporter
import numpy as np
def OPLS_LJ(system):
forces = {system.getForce(index).__class__.__name__: system.getForce(
index) for index in range(system.getNumForces())}
nonbonded_force = forces['NonbondedForce']
lorentz = CustomNonbondedForce(
'4*epsilon*((sigma/r)^12-(sigma/r)^6); sigma=sqrt(sigma1*sigma2); epsilon=sqrt(epsilon1*epsilon2)')
lorentz.setNonbondedMethod(nonbonded_force.getNonbondedMethod())
lorentz.addPerParticleParameter('sigma')
lorentz.addPerParticleParameter('epsilon')
lorentz.setCutoffDistance(nonbonded_force.getCutoffDistance())
system.addForce(lorentz)
LJset = {}
for index in range(nonbonded_force.getNumParticles()):
charge, sigma, epsilon = nonbonded_force.getParticleParameters(index)
LJset[index] = (sigma, epsilon)
lorentz.addParticle([sigma, epsilon])
nonbonded_force.setParticleParameters(
index, charge, sigma, epsilon * 0)
for i in range(nonbonded_force.getNumExceptions()):
(p1, p2, q, sig, eps) = nonbonded_force.getExceptionParameters(i)
# ALL THE 12,13 and 14 interactions are EXCLUDED FROM CUSTOM NONBONDED
# FORCE
lorentz.addExclusion(p1, p2)
if eps._value != 0.0:
#print p1,p2,sig,eps
sig14 = sqrt(LJset[p1][0] * LJset[p2][0])
eps14 = sqrt(LJset[p1][1] * LJset[p2][1])
nonbonded_force.setExceptionParameters(i, p1, p2, q, sig14, eps)
return system
def Minimize(simulation,iters=0):
simulation.minimizeEnergy(maxIterations=iters)
position = simulation.context.getState(getPositions=True).getPositions()
energy = simulation.context.getState(getEnergy=True).getPotentialEnergy()
PDBFile.writeFile(simulation.topology, position,
open('gasmin.pdb', 'w'))
print 'Energy at Minima is %3.3f kcal/mol' % (energy._value * KcalPerKJ)
return simulation
pdb = PDBFile('125_OCT_BOX.pdb')
modeller = Modeller(pdb.topology, pdb.positions)
forcefield = ForceField('OCT.xml')
system = forcefield.createSystem(modeller.topology, nonbondedMethod=PME, nonbondedCutoff=1*nanometer)
system = OPLS_LJ(system)
## FOR NPT
TEMP = 300*kelvin
system.addForce(MonteCarloBarostat(1*bar,TEMP))
integrator = LangevinIntegrator(TEMP, 1/picosecond, 0.001*picoseconds)
simulation = Simulation(pdb.topology, system, integrator)
simulation.context.setPositions(pdb.positions)
#print('MINIMIZATION STARTED')
#simulation = Minimize(simulation,300)
#print('MINIMIZATION DONE')
simulation.reporters.append(PDBReporter('output.pdb', 1000))
simulation.reporters.append(StateDataReporter(stdout, 1000, step=True, potentialEnergy=True, temperature=True,density=True))
#simulation.reporters.append(DCDReporter('myfile.dcd', 100, enforcePeriodicBox=False))
simulation.step(50000)
np_equ_pos = simulation.context.getState(getPositions=True).getPositions()
PDBFile.writeFile(simulation.topology, np_equ_pos,open('NVT_EQ_FINAL.pdb', 'w'))
| leelasd/LigParGenTools | PLIQ_OPENMM.py | Python | mit | 3,116 |
##############################################
# File Name: module6.py
# Version: 1.0
# Team No.: 22
# Team Name:
# Date: 11 Nov 15
##############################################
import RPi.GPIO as GPIO
import time
print 'Programming the PiBot...'
GPIO.setmode(GPIO.BOARD)
GPIO.setup(7, GPIO.OUT)
GPIO.setup(11, GPIO.OUT)
GPIO.setup(13, GPIO.OUT)
GPIO.setup(15, GPIO.OUT)
interval = 1
print 'forward'
GPIO.output(11, True)
GPIO.output(13, True)
time.sleep(interval)
GPIO.output(11, False)
GPIO.output(13, False)
print 'back'
GPIO.output(7, True)
GPIO.output(15, True)
time.sleep(interval)
GPIO.output(7, False)
GPIO.output(15, False)
print 'left'
GPIO.output(13, True)
time.sleep(interval)
GPIO.output(13, False)
print 'right'
GPIO.output(11, True)
time.sleep(interval)
GPIO.output(11, False)
GPIO.cleanup()
print "\nPiBot is going offline..."
| konini-school/pibot22 | module6.py | Python | gpl-2.0 | 867 |
from __future__ import division
from pandas import Interval
import pytest
import pandas.util.testing as tm
@pytest.fixture
def interval():
return Interval(0, 1)
class TestInterval(object):
def test_properties(self, interval):
assert interval.closed == 'right'
assert interval.left == 0
assert interval.right == 1
assert interval.mid == 0.5
def test_repr(self, interval):
assert repr(interval) == "Interval(0, 1, closed='right')"
assert str(interval) == "(0, 1]"
interval_left = Interval(0, 1, closed='left')
assert repr(interval_left) == "Interval(0, 1, closed='left')"
assert str(interval_left) == "[0, 1)"
def test_contains(self, interval):
assert 0.5 in interval
assert 1 in interval
assert 0 not in interval
msg = "__contains__ not defined for two intervals"
with tm.assert_raises_regex(TypeError, msg):
interval in interval
interval_both = Interval(0, 1, closed='both')
assert 0 in interval_both
assert 1 in interval_both
interval_neither = Interval(0, 1, closed='neither')
assert 0 not in interval_neither
assert 0.5 in interval_neither
assert 1 not in interval_neither
def test_equal(self):
assert Interval(0, 1) == Interval(0, 1, closed='right')
assert Interval(0, 1) != Interval(0, 1, closed='left')
assert Interval(0, 1) != 0
def test_comparison(self):
with tm.assert_raises_regex(TypeError, 'unorderable types'):
Interval(0, 1) < 2
assert Interval(0, 1) < Interval(1, 2)
assert Interval(0, 1) < Interval(0, 2)
assert Interval(0, 1) < Interval(0.5, 1.5)
assert Interval(0, 1) <= Interval(0, 1)
assert Interval(0, 1) > Interval(-1, 2)
assert Interval(0, 1) >= Interval(0, 1)
def test_hash(self, interval):
# should not raise
hash(interval)
def test_math_add(self, interval):
expected = Interval(1, 2)
actual = interval + 1
assert expected == actual
expected = Interval(1, 2)
actual = 1 + interval
assert expected == actual
actual = interval
actual += 1
assert expected == actual
msg = "unsupported operand type\(s\) for \+"
with tm.assert_raises_regex(TypeError, msg):
interval + Interval(1, 2)
with tm.assert_raises_regex(TypeError, msg):
interval + 'foo'
def test_math_sub(self, interval):
expected = Interval(-1, 0)
actual = interval - 1
assert expected == actual
actual = interval
actual -= 1
assert expected == actual
msg = "unsupported operand type\(s\) for -"
with tm.assert_raises_regex(TypeError, msg):
interval - Interval(1, 2)
with tm.assert_raises_regex(TypeError, msg):
interval - 'foo'
def test_math_mult(self, interval):
expected = Interval(0, 2)
actual = interval * 2
assert expected == actual
expected = Interval(0, 2)
actual = 2 * interval
assert expected == actual
actual = interval
actual *= 2
assert expected == actual
msg = "unsupported operand type\(s\) for \*"
with tm.assert_raises_regex(TypeError, msg):
interval * Interval(1, 2)
msg = "can\'t multiply sequence by non-int"
with tm.assert_raises_regex(TypeError, msg):
interval * 'foo'
def test_math_div(self, interval):
expected = Interval(0, 0.5)
actual = interval / 2.0
assert expected == actual
actual = interval
actual /= 2.0
assert expected == actual
msg = "unsupported operand type\(s\) for /"
with tm.assert_raises_regex(TypeError, msg):
interval / Interval(1, 2)
with tm.assert_raises_regex(TypeError, msg):
interval / 'foo'
| NixaSoftware/CVis | venv/lib/python2.7/site-packages/pandas/tests/scalar/test_interval.py | Python | apache-2.0 | 4,026 |
# This will work out how far you are from the number 6.
def six_toast(num):
rem = 6 - num
return abs(rem)
# Pos to neg
def six_toast(num):
return abs(num-6)
# a less efficient way to solve it:
def six_toast(num):
if num >= 6:
t = num-6
return t
else:
return num
| Matt-Stammers/Python-Foundations | Simple Functions/Anchored_Number.py | Python | gpl-3.0 | 324 |
from gym.spaces import Space
from typing import Optional
from ray.rllib.utils.exploration.gaussian_noise import GaussianNoise
from ray.rllib.utils.schedules import ConstantSchedule
class PerWorkerGaussianNoise(GaussianNoise):
"""A per-worker Gaussian noise class for distributed algorithms.
Sets the `scale` schedules of individual workers to a constant:
0.4 ^ (1 + [worker-index] / float([num-workers] - 1) * 7)
See Ape-X paper.
"""
def __init__(self, action_space: Space, *, framework: Optional[str],
num_workers: Optional[int], worker_index: Optional[int],
**kwargs):
"""
Args:
action_space (Space): The gym action space used by the environment.
num_workers (Optional[int]): The overall number of workers used.
worker_index (Optional[int]): The index of the Worker using this
Exploration.
framework (Optional[str]): One of None, "tf", "torch".
"""
scale_schedule = None
# Use a fixed, different epsilon per worker. See: Ape-X paper.
if num_workers > 0:
if worker_index > 0:
num_workers_minus_1 = float(num_workers - 1) \
if num_workers > 1 else 1.0
exponent = (1 + (worker_index / num_workers_minus_1) * 7)
scale_schedule = ConstantSchedule(
0.4**exponent, framework=framework)
# Local worker should have zero exploration so that eval
# rollouts run properly.
else:
scale_schedule = ConstantSchedule(0.0, framework=framework)
super().__init__(
action_space,
scale_schedule=scale_schedule,
framework=framework,
**kwargs)
| richardliaw/ray | rllib/utils/exploration/per_worker_gaussian_noise.py | Python | apache-2.0 | 1,807 |
from ..cw_model import CWModel
class Source(CWModel):
def __init__(self, json_dict=None):
self.id = None # (Integer)
self.name = None # *(String(50))
self.defaultFlag = None # (Boolean)
self._info = None # (Metadata)
self.enteredBy = None # (String)
self.dateEntered = None # (String)
# initialize object with json dict
super().__init__(json_dict)
| joshuamsmith/ConnectPyse | service/source.py | Python | mit | 441 |
from sipRequest import SIPRequest
class UnknownSIPRequest(SIPRequest):
pass
| bobjects/BobStack | bobstack/sipmessaging/unknownSIPRequest.py | Python | apache-2.0 | 82 |
#!/usr/bin/env python3
# Copyright (c) 2014-2017 The Doriancoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test mining RPCs
- getmininginfo
- getblocktemplate proposal mode
- submitblock"""
import copy
from binascii import b2a_hex
from decimal import Decimal
from test_framework.blocktools import create_coinbase
from test_framework.mininode import CBlock
from test_framework.test_framework import DoriancoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error
def b2x(b):
return b2a_hex(b).decode('ascii')
def assert_template(node, block, expect, rehash=True):
if rehash:
block.hashMerkleRoot = block.calc_merkle_root()
rsp = node.getblocktemplate({'data': b2x(block.serialize()), 'mode': 'proposal'})
assert_equal(rsp, expect)
class MiningTest(DoriancoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = False
def run_test(self):
node = self.nodes[0]
self.log.info('getmininginfo')
mining_info = node.getmininginfo()
assert_equal(mining_info['blocks'], 200)
assert_equal(mining_info['chain'], 'regtest')
assert_equal(mining_info['currentblocktx'], 0)
assert_equal(mining_info['currentblockweight'], 0)
assert_equal(mining_info['difficulty'], Decimal('4.656542373906925E-10'))
assert_equal(mining_info['networkhashps'], Decimal('0.003333333333333334'))
assert_equal(mining_info['pooledtx'], 0)
# Mine a block to leave initial block download
node.generate(1)
tmpl = node.getblocktemplate()
self.log.info("getblocktemplate: Test capability advertised")
assert 'proposal' in tmpl['capabilities']
assert 'coinbasetxn' not in tmpl
coinbase_tx = create_coinbase(height=int(tmpl["height"]) + 1)
# sequence numbers must not be max for nLockTime to have effect
coinbase_tx.vin[0].nSequence = 2 ** 32 - 2
coinbase_tx.rehash()
block = CBlock()
block.nVersion = tmpl["version"]
block.hashPrevBlock = int(tmpl["previousblockhash"], 16)
block.nTime = tmpl["curtime"]
block.nBits = int(tmpl["bits"], 16)
block.nNonce = 0
block.vtx = [coinbase_tx]
self.log.info("getblocktemplate: Test valid block")
assert_template(node, block, None)
self.log.info("submitblock: Test block decode failure")
assert_raises_rpc_error(-22, "Block decode failed", node.submitblock, b2x(block.serialize()[:-15]))
self.log.info("getblocktemplate: Test bad input hash for coinbase transaction")
bad_block = copy.deepcopy(block)
bad_block.vtx[0].vin[0].prevout.hash += 1
bad_block.vtx[0].rehash()
assert_template(node, bad_block, 'bad-cb-missing')
self.log.info("submitblock: Test invalid coinbase transaction")
assert_raises_rpc_error(-22, "Block does not start with a coinbase", node.submitblock, b2x(bad_block.serialize()))
self.log.info("getblocktemplate: Test truncated final transaction")
assert_raises_rpc_error(-22, "Block decode failed", node.getblocktemplate, {'data': b2x(block.serialize()[:-1]), 'mode': 'proposal'})
self.log.info("getblocktemplate: Test duplicate transaction")
bad_block = copy.deepcopy(block)
bad_block.vtx.append(bad_block.vtx[0])
assert_template(node, bad_block, 'bad-txns-duplicate')
self.log.info("getblocktemplate: Test invalid transaction")
bad_block = copy.deepcopy(block)
bad_tx = copy.deepcopy(bad_block.vtx[0])
bad_tx.vin[0].prevout.hash = 255
bad_tx.rehash()
bad_block.vtx.append(bad_tx)
assert_template(node, bad_block, 'bad-txns-inputs-missingorspent')
self.log.info("getblocktemplate: Test nonfinal transaction")
bad_block = copy.deepcopy(block)
bad_block.vtx[0].nLockTime = 2 ** 32 - 1
bad_block.vtx[0].rehash()
assert_template(node, bad_block, 'bad-txns-nonfinal')
self.log.info("getblocktemplate: Test bad tx count")
# The tx count is immediately after the block header
TX_COUNT_OFFSET = 80
bad_block_sn = bytearray(block.serialize())
assert_equal(bad_block_sn[TX_COUNT_OFFSET], 1)
bad_block_sn[TX_COUNT_OFFSET] += 1
assert_raises_rpc_error(-22, "Block decode failed", node.getblocktemplate, {'data': b2x(bad_block_sn), 'mode': 'proposal'})
self.log.info("getblocktemplate: Test bad bits")
bad_block = copy.deepcopy(block)
bad_block.nBits = 469762303 # impossible in the real world
assert_template(node, bad_block, 'bad-diffbits')
self.log.info("getblocktemplate: Test bad merkle root")
bad_block = copy.deepcopy(block)
bad_block.hashMerkleRoot += 1
assert_template(node, bad_block, 'bad-txnmrklroot', False)
self.log.info("getblocktemplate: Test bad timestamps")
bad_block = copy.deepcopy(block)
bad_block.nTime = 2 ** 31 - 1
assert_template(node, bad_block, 'time-too-new')
bad_block.nTime = 0
assert_template(node, bad_block, 'time-too-old')
self.log.info("getblocktemplate: Test not best block")
bad_block = copy.deepcopy(block)
bad_block.hashPrevBlock = 123
assert_template(node, bad_block, 'inconclusive-not-best-prevblk')
if __name__ == '__main__':
MiningTest().main()
| doriancoins/doriancoin | test/functional/mining_basic.py | Python | mit | 5,591 |
"""
Copyright 2016 Brocade Communications Systems, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pynos.versions.base.system import System as BaseSystem
from pynos.versions.ver_7.ver_7_1_0.yang.brocade_rbridge import brocade_rbridge
import xml.etree.ElementTree as ET
import pynos.utilities
class System(BaseSystem):
"""System class containing all system level methods and attributes.
"""
def __init__(self, callback):
super(System, self).__init__(callback)
self._rbridge = brocade_rbridge(callback=pynos.utilities.return_xml)
def maintenance_mode(self, **kwargs):
"""Configures maintenance mode on the device
Args:
rbridge_id (str): The rbridge ID of the device on which
Maintenance mode
will be configured in a VCS fabric.
get (bool): Get config instead of editing config. (True, False)
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `rbridge_id` is not specified.
Examples:
>>> import pynos.device
>>> conn = ('10.24.39.202', '22')
>>> auth = ('admin', 'password')
>>> with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.system.maintenance_mode(rbridge_id='226')
... output = dev.system.maintenance_mode(rbridge_id='226',
... get=True)
... assert output == True
... output = dev.system.maintenance_mode(rbridge_id='226',
... delete=True)
... output = dev.system.maintenance_mode(rbridge_id='226',
... get=True)
... assert output == False
"""
is_get_config = kwargs.pop('get', False)
delete = kwargs.pop('delete', False)
rbridge_id = kwargs.pop('rbridge_id')
callback = kwargs.pop('callback', self._callback)
rid_args = dict(rbridge_id=rbridge_id)
rid = getattr(self._rbridge,
'rbridge_id_system_mode_maintenance')
config = rid(**rid_args)
if is_get_config:
maint_mode = callback(config, handler='get_config')
mode = maint_mode.data_xml
root = ET.fromstring(mode)
namespace = 'urn:brocade.com:mgmt:brocade-rbridge'
for rbridge_id_node in root.findall('{%s}rbridge-id' % namespace):
system_mode = rbridge_id_node.find(
'{%s}system-mode' % namespace)
if system_mode is not None:
return True
else:
return False
if delete:
config.find('.//*maintenance').set('operation', 'delete')
return callback(config)
| brocade/pynos | pynos/versions/ver_7/ver_7_1_0/system.py | Python | apache-2.0 | 3,459 |
from .schedule import Schedule
from .schedule import ScheduleScorer
from .schedule_generator import find_schedules
| rosshamish/classtime | classtime/brain/scheduling/__init__.py | Python | mit | 116 |
#!/usr/bin/env python3.7
# -*- coding: utf-8 -*-
# callirhoe - high quality calendar rendering
# Copyright (C) 2012-2015 George M. Tzoumas
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/
"""high quality calendar rendering"""
# TODO:
# fix auto-measure rendering (cairo)
# fix plugin loading (without global vars)
# week markers selectable
# test layouts
# allow to change background color (fill), other than white
# page spec parse errors
# mobile themes (e.g. 800x480)
# photo support (like ImageMagick's polaroid effect)
# .callirhoe/config : default values for plugins (styles/layouts/lang...) and cmdline
# MAYBE-TODO:
# implement various data sources
# auto-landscape? should aim for matrix or bars?
# allow /usr/bin/date-like formatting %x...
# improve file matching with __init__ when lang known
# styles and geometries could be merged, css-like
# then we can apply a chain of --style a --style b ...
# and b inherits from a and so on
# however, this would require dynamically creating a class that inherits from others...
# CANNOT UPGRADE TO argparse !!! -- how to handle [[month] year] form?
import calendar
import sys
import time
import optparse
import lib.xcairo as xcairo
import lib.holiday as holiday
import lib
from lib.plugin import *
# TODO: SEE IF IT CAN BE MOVED INTO lib.plugin ...
def import_plugin(plugin_paths, cat, longcat, longcat2, listopt, preset):
"""import a plugin making it visible
I{Example:}
>>> Language = import_plugin(get_plugin_paths(), "lang", "language", "languages", "--list-languages", "EN")
@param plugin_paths: list of plugin search paths
@param cat: short category name (for folder name)
@param longcat: long category name
@param longcat2: long category name in plural form
@param listopt: option name
@param preset: default value
@rtype: module
@note: Aimed for internal use with I{lang}, I{style}, I{geom}, I{layouts}.
"""
try:
found = []
for path in plugin_paths:
found += available_files(path, cat, preset)
if len(found) == 0: raise IOError
if found[0][1] == "resource:":
m = __import__("%s.%s" % (cat,preset), globals(), locals(), [ "*" ])
else:
sys.path.insert(0, found[0][1])
m = __import__("%s.%s" % (cat,preset), globals(), locals(), [ "*" ])
sys.path.pop(0)
return m
except IOError:
sys.exit("callirhoe: %s definition '%s' not found, use %s to see available definitions" % (longcat,
preset,listopt))
except ImportError:
sys.exit("callirhoe: error loading %s definition '%s'" % (longcat, preset))
def print_examples():
"""print usage examples"""
print("""Examples:
Create a calendar of the current year (by default in a 4x3 grid):
$ callirhoe my_calendar.pdf
Same as above, but in landscape mode (3x4) (for printing):
$ callirhoe --landscape my_calendar.pdf
Landscape via rotation (for screen):
$ callirhoe --paper=a4w --rows=3 my_calendar.pdf
Let's try with bars instead of boxes:
$ callirhoe -t bars my_calendar.pdf
In landscape mode, one row only looks quite good:
$ callirhoe -t bars --landscape --rows=1 my_calendar.pdf
How about a more flat look?
$ callirhoe -t sparse -s bw_sparse --rows=1 --cols=3 my_calendar.pdf
Calendar of 24 consecutive months, starting from current month:
$ callirhoe 0:24 0 my_calendar.pdf
Create a 600-dpi PNG file so that we can edit it with some effects in order to print an A3 poster:
$ callirhoe my_poster.png --paper=a3 --dpi=600 --opaque
Create a calendar as a full-hd wallpaper (1920x1080):
$ callirhoe wallpaper.png --paper=-1920:-1080 --opaque --rows=3 --no-shadow -s rainbow-gfs
and do some magic with ImageMagick! ;)
$ convert wallpaper.png -negate fancy.png
""")
def add_list_option(parser, opt):
"""add a --list-I{plugins} option to parser
@note: To be used with I{languages}, I{layouts}, I{styles} and I{geometries}.
"""
parser.add_option("--list-%s" % opt, action="store_true", dest="list_%s" % opt, default=False,
help="list available %s" % opt)
def get_parser():
"""get the argument parser object
@rtype: optparse.OptionParser
"""
parser = optparse.OptionParser(usage="usage: %prog [options] [[MONTH[-MONTH2|:SPAN]] YEAR] FILE",
description="High quality calendar rendering with vector graphics. "
"By default, a calendar of the current year in pdf format is written to FILE. "
"Alternatively, you can select a specific YEAR (0=current), "
"and a month range from MONTH (0-12, 0=current) to MONTH2 or for SPAN months.",
version="callirhoe " + lib._version + '\n' + lib._copyright)
parser.add_option("-l", "--lang", dest="lang", default="EN",
help="choose language [%default]")
parser.add_option("-t", "--layout", dest="layout", default="classic",
help="choose layout [%default]")
parser.add_option("-?", "--layout-help", dest="layouthelp", action="store_true", default=False,
help="show layout-specific help")
parser.add_option("--examples", dest="examples", action="store_true",
help="display some usage examples")
parser.add_option("-s", "--style", dest="style", default="default",
help="choose style [%default]")
parser.add_option("-g", "--geometry", dest="geom", default="default",
help="choose geometry [%default]")
parser.add_option("--landscape", action="store_true", dest="landscape", default=False,
help="landscape mode")
parser.add_option("--dpi", type="float", default=72.0,
help="set DPI (for raster output) [%default]")
parser.add_option("--paper", default="a4",
help="set paper type; PAPER can be an ISO paper type (a0..a9 or a0w..a9w) or of the "
"form W:H; positive values correspond to W or H mm, negative values correspond to "
"-W or -H pixels; 'w' suffix swaps width & height [%default]")
parser.add_option("--border", type="float", default=3,
help="set border size (in mm) [%default]")
parser.add_option("-H", "--with-holidays", action="append", dest="holidays",
help="load holiday file (can be used multiple times)")
parser.add_option("--short-monthnames", action="store_true", default=False,
help="user the short version of month names (defined in language file) [%default]")
parser.add_option("--long-daynames", action="store_true", default=False,
help="user the long version of day names (defined in language file) [%default]")
parser.add_option("-T", "--terse-holidays", action="store_false", dest="multiday_holidays",
default=True, help="do not print holiday end markers and omit dots")
for x in ["languages", "layouts", "styles", "geometries"]:
add_list_option(parser, x)
parser.add_option("--lang-var", action="append", dest="lang_assign",
help="modify a language variable")
parser.add_option("--style-var", action="append", dest="style_assign",
help="modify a style variable, e.g. dom.frame_thickness=0")
parser.add_option("--geom-var", action="append", dest="geom_assign",
help="modify a geometry variable")
return parser
def main_program():
parser = get_parser()
sys.argv,argv2 = lib.extract_parser_args(sys.argv,parser)
(options,args) = parser.parse_args()
list_and_exit = False
if options.list_languages:
for x in plugin_list("lang"): print(x[0], end=' ')
print()
list_and_exit = True
if options.list_styles:
for x in plugin_list("style"): print(x[0], end=' ')
print()
list_and_exit = True
if options.list_geometries:
for x in plugin_list("geom"): print(x[0], end=' ')
print()
list_and_exit = True
if options.list_layouts:
for x in plugin_list("layouts"): print(x[0], end=' ')
print()
list_and_exit = True
if list_and_exit: return
plugin_paths = get_plugin_paths()
Language = import_plugin(plugin_paths, "lang", "language", "languages", "--list-languages", options.lang)
Style = import_plugin(plugin_paths, "style", "style", "styles", "--list-styles", options.style)
Geometry = import_plugin(plugin_paths, "geom", "geometry", "geometries", "--list-geometries", options.geom)
Layout = import_plugin(plugin_paths, "layouts", "layout", "layouts", "--list-layouts", options.layout)
for x in argv2:
if '=' in x: x = x[0:x.find('=')]
if not Layout.parser.has_option(x):
parser.error("invalid option %s; use --help (-h) or --layout-help (-?) to see available options" % x)
(loptions,largs) = Layout.parser.parse_args(argv2)
if options.layouthelp:
#print "Help for layout:", options.layout
Layout.parser.print_help()
return
if options.examples:
print_examples()
return
# we can put it separately together with Layout; but we load Layout *after* lang,style,geom
if len(args) < 1 or len(args) > 3:
parser.print_help()
return
#if (len(args[-1]) == 4 and args[-1].isdigit()):
# print "WARNING: file name '%s' looks like a year, writing anyway..." % args[-1]
# the usual "beware of exec()" crap applies here... but come on,
# this is a SCRIPTING language, you can always hack the source code!!!
if options.lang_assign:
for x in options.lang_assign: exec("Language.%s" % x)
if options.style_assign:
for x in options.style_assign: exec("Style.%s" % x)
if options.geom_assign:
for x in options.geom_assign: exec("Geometry.%s" % x)
calendar.long_month_name = Language.long_month_name
calendar.long_day_name = Language.long_day_name
calendar.short_month_name = Language.short_month_name
calendar.short_day_name = Language.short_day_name
if len(args) == 1:
Year = time.localtime()[0]
Month, MonthSpan = 1, 12
Outfile = args[0]
elif len(args) == 2:
Year = lib.parse_year(args[0])
Month, MonthSpan = 1, 12
Outfile = args[1]
elif len(args) == 3:
Month, MonthSpan = lib.parse_month_range(args[0])
Year = lib.parse_year(args[1])
Outfile = args[2]
if MonthSpan == 0:
raise lib.Abort("callirhoe: empty calendar requested, aborting")
Geometry.landscape = options.landscape
xcairo.XDPI = options.dpi
Geometry.pagespec = options.paper
Geometry.border = options.border
hprovider = holiday.HolidayProvider(Style.dom, Style.dom_weekend,
Style.dom_holiday, Style.dom_weekend_holiday,
Style.dom_multi, Style.dom_weekend_multi, options.multiday_holidays)
if options.holidays:
for f in options.holidays:
hprovider.load_holiday_file(f)
if options.long_daynames:
Language.day_name = Language.long_day_name
else:
Language.day_name = Language.short_day_name
if options.short_monthnames:
Language.month_name = Language.short_month_name
else:
Language.month_name = Language.long_month_name
renderer = Layout.CalendarRenderer(Outfile, Year, Month, MonthSpan,
(Style,Geometry,Language), hprovider, lib._version, loptions)
renderer.render()
if __name__ == "__main__":
try:
main_program()
except lib.Abort as e:
sys.exit(e.args[0])
| geotz/callirhoe | callirhoe.py | Python | gpl-3.0 | 12,413 |
# Opus/UrbanSim urban simulation software.
# Copyright (C) 2005-2009 University of Washington and Kai Nagel
# See opus_core/LICENSE
import os
import opus_matsim.sustain_city.tests as test_dir
from opus_core.tests import opus_unittest
from opus_core.store.csv_storage import csv_storage
from urbansim.datasets.travel_data_dataset import TravelDataDataset
from numpy import *
import numpy
from opus_core.logger import logger
class MatrixTest(opus_unittest.OpusTestCase):
""" Testing access to travel data values stored in numpy arrays
"""
def setUp(self):
print "Entering setup"
# get sensitivity test path
self.test_dir_path = test_dir.__path__[0]
# get location to travel data table
self.input_directory = os.path.join( self.test_dir_path, 'data', 'travel_cost')
logger.log_status("input_directory: %s" % self.input_directory)
# check source file
if not os.path.exists( self.input_directory ):
raise('File not found! %s' % self.input_directory)
print "Leaving setup"
def test_run(self):
print "Entering test run"
# This test loads an exising travel data as a TravelDataSet (numpy array)
# and accesses single (pre-known) values to validate the conversion process
# (numpy array into standard python list).
#
# Here an example:
# my_list = [[1,2,3],
# [4,5,6],
# [7,8,9]]
#
# my_list[0][1] should be = 2
# my_list[2][2] should be = 9
table_name = 'travel_data'
travel_data_attribute = 'single_vehicle_to_work_travel_cost'
# location of pre-calculated MATSim travel costs
in_storage = csv_storage(storage_location = self.input_directory)
# create travel data set (travel costs)
travel_data_set = TravelDataDataset( in_storage=in_storage, in_table_name=table_name )
travel_data_attribute_mat = travel_data_set.get_attribute_as_matrix(travel_data_attribute, fill=31)
# converting from numpy array into a 2d list
travel_list = numpy.atleast_2d(travel_data_attribute_mat).tolist()
# get two values for validation
value1 = int(travel_list[1][1]) # should be = 0
value2 = int(travel_list[2][1]) # should be = 120
logger.log_status('First validation value should be 0. Current value is %i' % value1)
logger.log_status('Second validation value should be 120. Current value is %i' % value2)
self.assertTrue( value1 == 0 )
self.assertTrue( value2 == 120 )
# self.dump_travel_list(travel_list) # for debugging
print "Leaving test run"
def dump_travel_list(self, travel_list):
''' Dumping travel_list for debugging reasons...
'''
dest = os.path.join( os.environ['OPUS_HOME'], 'opus_matsim', 'tmp')
if not os.path.exists(dest):
os.makedirs(dest)
travel = os.path.join(dest, 'travelFile.txt')
f = open(travel, "w")
f.write( str(travel_list) )
f.close()
if __name__ == "__main__":
#mt = MatrixTest() # for debugging
#mt.test_run() # for debugging
opus_unittest.main() | christianurich/VIBe2UrbanSim | 3rdparty/opus/src/opus_matsim/sustain_city/tests/matsim_coupeling/matrix_test.py | Python | gpl-2.0 | 3,343 |
"""
Common tests shared by test_str, test_unicode, test_userstring and test_string.
"""
import operator
import unittest, string, sys, struct
from test import support
from collections import UserList
class Sequence:
def __init__(self, seq='wxyz'): self.seq = seq
def __len__(self): return len(self.seq)
def __getitem__(self, i): return self.seq[i]
class BadSeq1(Sequence):
def __init__(self): self.seq = [7, 'hello', 123]
def __str__(self): return '{0} {1} {2}'.format(*self.seq)
class BadSeq2(Sequence):
def __init__(self): self.seq = ['a', 'b', 'c']
def __len__(self): return 8
class BaseTest(unittest.TestCase):
# These tests are for buffers of values (bytes) and not
# specific to character interpretation, used for bytes objects
# and various string implementations
# The type to be tested
# Change in subclasses to change the behaviour of fixtesttype()
type2test = None
# All tests pass their arguments to the testing methods
# as str objects. fixtesttype() can be used to propagate
# these arguments to the appropriate type
def fixtype(self, obj):
if isinstance(obj, str):
return self.__class__.type2test(obj)
elif isinstance(obj, list):
return [self.fixtype(x) for x in obj]
elif isinstance(obj, tuple):
return tuple([self.fixtype(x) for x in obj])
elif isinstance(obj, dict):
return dict([
(self.fixtype(key), self.fixtype(value))
for (key, value) in obj.items()
])
else:
return obj
# check that obj.method(*args) returns result
def checkequal(self, result, obj, methodname, *args):
result = self.fixtype(result)
obj = self.fixtype(obj)
args = self.fixtype(args)
realresult = getattr(obj, methodname)(*args)
self.assertEqual(
result,
realresult
)
# if the original is returned make sure that
# this doesn't happen with subclasses
if obj is realresult:
try:
class subtype(self.__class__.type2test):
pass
except TypeError:
pass # Skip this if we can't subclass
else:
obj = subtype(obj)
realresult = getattr(obj, methodname)(*args)
self.assertIsNot(obj, realresult)
# check that obj.method(*args) raises exc
def checkraises(self, exc, obj, methodname, *args):
obj = self.fixtype(obj)
args = self.fixtype(args)
self.assertRaises(
exc,
getattr(obj, methodname),
*args
)
# call obj.method(*args) without any checks
def checkcall(self, obj, methodname, *args):
obj = self.fixtype(obj)
args = self.fixtype(args)
getattr(obj, methodname)(*args)
def test_count(self):
self.checkequal(3, 'aaa', 'count', 'a')
self.checkequal(0, 'aaa', 'count', 'b')
self.checkequal(3, 'aaa', 'count', 'a')
self.checkequal(0, 'aaa', 'count', 'b')
self.checkequal(3, 'aaa', 'count', 'a')
self.checkequal(0, 'aaa', 'count', 'b')
self.checkequal(0, 'aaa', 'count', 'b')
self.checkequal(2, 'aaa', 'count', 'a', 1)
self.checkequal(0, 'aaa', 'count', 'a', 10)
self.checkequal(1, 'aaa', 'count', 'a', -1)
self.checkequal(3, 'aaa', 'count', 'a', -10)
self.checkequal(1, 'aaa', 'count', 'a', 0, 1)
self.checkequal(3, 'aaa', 'count', 'a', 0, 10)
self.checkequal(2, 'aaa', 'count', 'a', 0, -1)
self.checkequal(0, 'aaa', 'count', 'a', 0, -10)
self.checkequal(3, 'aaa', 'count', '', 1)
self.checkequal(1, 'aaa', 'count', '', 3)
self.checkequal(0, 'aaa', 'count', '', 10)
self.checkequal(2, 'aaa', 'count', '', -1)
self.checkequal(4, 'aaa', 'count', '', -10)
self.checkequal(1, '', 'count', '')
self.checkequal(0, '', 'count', '', 1, 1)
self.checkequal(0, '', 'count', '', sys.maxsize, 0)
self.checkequal(0, '', 'count', 'xx')
self.checkequal(0, '', 'count', 'xx', 1, 1)
self.checkequal(0, '', 'count', 'xx', sys.maxsize, 0)
self.checkraises(TypeError, 'hello', 'count')
self.checkraises(TypeError, 'hello', 'count', 42)
# For a variety of combinations,
# verify that str.count() matches an equivalent function
# replacing all occurrences and then differencing the string lengths
charset = ['', 'a', 'b']
digits = 7
base = len(charset)
teststrings = set()
for i in range(base ** digits):
entry = []
for j in range(digits):
i, m = divmod(i, base)
entry.append(charset[m])
teststrings.add(''.join(entry))
teststrings = [self.fixtype(ts) for ts in teststrings]
for i in teststrings:
n = len(i)
for j in teststrings:
r1 = i.count(j)
if j:
r2, rem = divmod(n - len(i.replace(j, self.fixtype(''))),
len(j))
else:
r2, rem = len(i)+1, 0
if rem or r1 != r2:
self.assertEqual(rem, 0, '%s != 0 for %s' % (rem, i))
self.assertEqual(r1, r2, '%s != %s for %s' % (r1, r2, i))
def test_find(self):
self.checkequal(0, 'abcdefghiabc', 'find', 'abc')
self.checkequal(9, 'abcdefghiabc', 'find', 'abc', 1)
self.checkequal(-1, 'abcdefghiabc', 'find', 'def', 4)
self.checkequal(0, 'abc', 'find', '', 0)
self.checkequal(3, 'abc', 'find', '', 3)
self.checkequal(-1, 'abc', 'find', '', 4)
# to check the ability to pass None as defaults
self.checkequal( 2, 'rrarrrrrrrrra', 'find', 'a')
self.checkequal(12, 'rrarrrrrrrrra', 'find', 'a', 4)
self.checkequal(-1, 'rrarrrrrrrrra', 'find', 'a', 4, 6)
self.checkequal(12, 'rrarrrrrrrrra', 'find', 'a', 4, None)
self.checkequal( 2, 'rrarrrrrrrrra', 'find', 'a', None, 6)
self.checkraises(TypeError, 'hello', 'find')
self.checkraises(TypeError, 'hello', 'find', 42)
self.checkequal(0, '', 'find', '')
self.checkequal(-1, '', 'find', '', 1, 1)
self.checkequal(-1, '', 'find', '', sys.maxsize, 0)
self.checkequal(-1, '', 'find', 'xx')
self.checkequal(-1, '', 'find', 'xx', 1, 1)
self.checkequal(-1, '', 'find', 'xx', sys.maxsize, 0)
# issue 7458
self.checkequal(-1, 'ab', 'find', 'xxx', sys.maxsize + 1, 0)
# For a variety of combinations,
# verify that str.find() matches __contains__
# and that the found substring is really at that location
charset = ['', 'a', 'b', 'c']
digits = 5
base = len(charset)
teststrings = set()
for i in range(base ** digits):
entry = []
for j in range(digits):
i, m = divmod(i, base)
entry.append(charset[m])
teststrings.add(''.join(entry))
teststrings = [self.fixtype(ts) for ts in teststrings]
for i in teststrings:
for j in teststrings:
loc = i.find(j)
r1 = (loc != -1)
r2 = j in i
self.assertEqual(r1, r2)
if loc != -1:
self.assertEqual(i[loc:loc+len(j)], j)
def test_rfind(self):
self.checkequal(9, 'abcdefghiabc', 'rfind', 'abc')
self.checkequal(12, 'abcdefghiabc', 'rfind', '')
self.checkequal(0, 'abcdefghiabc', 'rfind', 'abcd')
self.checkequal(-1, 'abcdefghiabc', 'rfind', 'abcz')
self.checkequal(3, 'abc', 'rfind', '', 0)
self.checkequal(3, 'abc', 'rfind', '', 3)
self.checkequal(-1, 'abc', 'rfind', '', 4)
# to check the ability to pass None as defaults
self.checkequal(12, 'rrarrrrrrrrra', 'rfind', 'a')
self.checkequal(12, 'rrarrrrrrrrra', 'rfind', 'a', 4)
self.checkequal(-1, 'rrarrrrrrrrra', 'rfind', 'a', 4, 6)
self.checkequal(12, 'rrarrrrrrrrra', 'rfind', 'a', 4, None)
self.checkequal( 2, 'rrarrrrrrrrra', 'rfind', 'a', None, 6)
self.checkraises(TypeError, 'hello', 'rfind')
self.checkraises(TypeError, 'hello', 'rfind', 42)
# For a variety of combinations,
# verify that str.rfind() matches __contains__
# and that the found substring is really at that location
charset = ['', 'a', 'b', 'c']
digits = 5
base = len(charset)
teststrings = set()
for i in range(base ** digits):
entry = []
for j in range(digits):
i, m = divmod(i, base)
entry.append(charset[m])
teststrings.add(''.join(entry))
teststrings = [self.fixtype(ts) for ts in teststrings]
for i in teststrings:
for j in teststrings:
loc = i.rfind(j)
r1 = (loc != -1)
r2 = j in i
self.assertEqual(r1, r2)
if loc != -1:
self.assertEqual(i[loc:loc+len(j)], j)
# issue 7458
self.checkequal(-1, 'ab', 'rfind', 'xxx', sys.maxsize + 1, 0)
def test_index(self):
self.checkequal(0, 'abcdefghiabc', 'index', '')
self.checkequal(3, 'abcdefghiabc', 'index', 'def')
self.checkequal(0, 'abcdefghiabc', 'index', 'abc')
self.checkequal(9, 'abcdefghiabc', 'index', 'abc', 1)
self.checkraises(ValueError, 'abcdefghiabc', 'index', 'hib')
self.checkraises(ValueError, 'abcdefghiab', 'index', 'abc', 1)
self.checkraises(ValueError, 'abcdefghi', 'index', 'ghi', 8)
self.checkraises(ValueError, 'abcdefghi', 'index', 'ghi', -1)
# to check the ability to pass None as defaults
self.checkequal( 2, 'rrarrrrrrrrra', 'index', 'a')
self.checkequal(12, 'rrarrrrrrrrra', 'index', 'a', 4)
self.checkraises(ValueError, 'rrarrrrrrrrra', 'index', 'a', 4, 6)
self.checkequal(12, 'rrarrrrrrrrra', 'index', 'a', 4, None)
self.checkequal( 2, 'rrarrrrrrrrra', 'index', 'a', None, 6)
self.checkraises(TypeError, 'hello', 'index')
self.checkraises(TypeError, 'hello', 'index', 42)
def test_rindex(self):
self.checkequal(12, 'abcdefghiabc', 'rindex', '')
self.checkequal(3, 'abcdefghiabc', 'rindex', 'def')
self.checkequal(9, 'abcdefghiabc', 'rindex', 'abc')
self.checkequal(0, 'abcdefghiabc', 'rindex', 'abc', 0, -1)
self.checkraises(ValueError, 'abcdefghiabc', 'rindex', 'hib')
self.checkraises(ValueError, 'defghiabc', 'rindex', 'def', 1)
self.checkraises(ValueError, 'defghiabc', 'rindex', 'abc', 0, -1)
self.checkraises(ValueError, 'abcdefghi', 'rindex', 'ghi', 0, 8)
self.checkraises(ValueError, 'abcdefghi', 'rindex', 'ghi', 0, -1)
# to check the ability to pass None as defaults
self.checkequal(12, 'rrarrrrrrrrra', 'rindex', 'a')
self.checkequal(12, 'rrarrrrrrrrra', 'rindex', 'a', 4)
self.checkraises(ValueError, 'rrarrrrrrrrra', 'rindex', 'a', 4, 6)
self.checkequal(12, 'rrarrrrrrrrra', 'rindex', 'a', 4, None)
self.checkequal( 2, 'rrarrrrrrrrra', 'rindex', 'a', None, 6)
self.checkraises(TypeError, 'hello', 'rindex')
self.checkraises(TypeError, 'hello', 'rindex', 42)
def test_lower(self):
self.checkequal('hello', 'HeLLo', 'lower')
self.checkequal('hello', 'hello', 'lower')
self.checkraises(TypeError, 'hello', 'lower', 42)
def test_upper(self):
self.checkequal('HELLO', 'HeLLo', 'upper')
self.checkequal('HELLO', 'HELLO', 'upper')
self.checkraises(TypeError, 'hello', 'upper', 42)
def test_expandtabs(self):
self.checkequal('abc\rab def\ng hi', 'abc\rab\tdef\ng\thi', 'expandtabs')
self.checkequal('abc\rab def\ng hi', 'abc\rab\tdef\ng\thi', 'expandtabs', 8)
self.checkequal('abc\rab def\ng hi', 'abc\rab\tdef\ng\thi', 'expandtabs', 4)
self.checkequal('abc\r\nab def\ng hi', 'abc\r\nab\tdef\ng\thi', 'expandtabs', 4)
self.checkequal('abc\rab def\ng hi', 'abc\rab\tdef\ng\thi', 'expandtabs')
self.checkequal('abc\rab def\ng hi', 'abc\rab\tdef\ng\thi', 'expandtabs', 8)
self.checkequal('abc\r\nab\r\ndef\ng\r\nhi', 'abc\r\nab\r\ndef\ng\r\nhi', 'expandtabs', 4)
self.checkequal(' a\n b', ' \ta\n\tb', 'expandtabs', 1)
self.checkraises(TypeError, 'hello', 'expandtabs', 42, 42)
# This test is only valid when sizeof(int) == sizeof(void*) == 4.
if sys.maxsize < (1 << 32) and struct.calcsize('P') == 4:
self.checkraises(OverflowError,
'\ta\n\tb', 'expandtabs', sys.maxsize)
def test_split(self):
# by a char
self.checkequal(['a', 'b', 'c', 'd'], 'a|b|c|d', 'split', '|')
self.checkequal(['a|b|c|d'], 'a|b|c|d', 'split', '|', 0)
self.checkequal(['a', 'b|c|d'], 'a|b|c|d', 'split', '|', 1)
self.checkequal(['a', 'b', 'c|d'], 'a|b|c|d', 'split', '|', 2)
self.checkequal(['a', 'b', 'c', 'd'], 'a|b|c|d', 'split', '|', 3)
self.checkequal(['a', 'b', 'c', 'd'], 'a|b|c|d', 'split', '|', 4)
self.checkequal(['a', 'b', 'c', 'd'], 'a|b|c|d', 'split', '|',
sys.maxsize-2)
self.checkequal(['a|b|c|d'], 'a|b|c|d', 'split', '|', 0)
self.checkequal(['a', '', 'b||c||d'], 'a||b||c||d', 'split', '|', 2)
self.checkequal(['endcase ', ''], 'endcase |', 'split', '|')
self.checkequal(['', ' startcase'], '| startcase', 'split', '|')
self.checkequal(['', 'bothcase', ''], '|bothcase|', 'split', '|')
self.checkequal(['a', '', 'b\x00c\x00d'], 'a\x00\x00b\x00c\x00d', 'split', '\x00', 2)
self.checkequal(['a']*20, ('a|'*20)[:-1], 'split', '|')
self.checkequal(['a']*15 +['a|a|a|a|a'],
('a|'*20)[:-1], 'split', '|', 15)
# by string
self.checkequal(['a', 'b', 'c', 'd'], 'a//b//c//d', 'split', '//')
self.checkequal(['a', 'b//c//d'], 'a//b//c//d', 'split', '//', 1)
self.checkequal(['a', 'b', 'c//d'], 'a//b//c//d', 'split', '//', 2)
self.checkequal(['a', 'b', 'c', 'd'], 'a//b//c//d', 'split', '//', 3)
self.checkequal(['a', 'b', 'c', 'd'], 'a//b//c//d', 'split', '//', 4)
self.checkequal(['a', 'b', 'c', 'd'], 'a//b//c//d', 'split', '//',
sys.maxsize-10)
self.checkequal(['a//b//c//d'], 'a//b//c//d', 'split', '//', 0)
self.checkequal(['a', '', 'b////c////d'], 'a////b////c////d', 'split', '//', 2)
self.checkequal(['endcase ', ''], 'endcase test', 'split', 'test')
self.checkequal(['', ' begincase'], 'test begincase', 'split', 'test')
self.checkequal(['', ' bothcase ', ''], 'test bothcase test',
'split', 'test')
self.checkequal(['a', 'bc'], 'abbbc', 'split', 'bb')
self.checkequal(['', ''], 'aaa', 'split', 'aaa')
self.checkequal(['aaa'], 'aaa', 'split', 'aaa', 0)
self.checkequal(['ab', 'ab'], 'abbaab', 'split', 'ba')
self.checkequal(['aaaa'], 'aaaa', 'split', 'aab')
self.checkequal([''], '', 'split', 'aaa')
self.checkequal(['aa'], 'aa', 'split', 'aaa')
self.checkequal(['A', 'bobb'], 'Abbobbbobb', 'split', 'bbobb')
self.checkequal(['A', 'B', ''], 'AbbobbBbbobb', 'split', 'bbobb')
self.checkequal(['a']*20, ('aBLAH'*20)[:-4], 'split', 'BLAH')
self.checkequal(['a']*20, ('aBLAH'*20)[:-4], 'split', 'BLAH', 19)
self.checkequal(['a']*18 + ['aBLAHa'], ('aBLAH'*20)[:-4],
'split', 'BLAH', 18)
# argument type
self.checkraises(TypeError, 'hello', 'split', 42, 42, 42)
# null case
self.checkraises(ValueError, 'hello', 'split', '')
self.checkraises(ValueError, 'hello', 'split', '', 0)
def test_rsplit(self):
# by a char
self.checkequal(['a', 'b', 'c', 'd'], 'a|b|c|d', 'rsplit', '|')
self.checkequal(['a|b|c', 'd'], 'a|b|c|d', 'rsplit', '|', 1)
self.checkequal(['a|b', 'c', 'd'], 'a|b|c|d', 'rsplit', '|', 2)
self.checkequal(['a', 'b', 'c', 'd'], 'a|b|c|d', 'rsplit', '|', 3)
self.checkequal(['a', 'b', 'c', 'd'], 'a|b|c|d', 'rsplit', '|', 4)
self.checkequal(['a', 'b', 'c', 'd'], 'a|b|c|d', 'rsplit', '|',
sys.maxsize-100)
self.checkequal(['a|b|c|d'], 'a|b|c|d', 'rsplit', '|', 0)
self.checkequal(['a||b||c', '', 'd'], 'a||b||c||d', 'rsplit', '|', 2)
self.checkequal(['', ' begincase'], '| begincase', 'rsplit', '|')
self.checkequal(['endcase ', ''], 'endcase |', 'rsplit', '|')
self.checkequal(['', 'bothcase', ''], '|bothcase|', 'rsplit', '|')
self.checkequal(['a\x00\x00b', 'c', 'd'], 'a\x00\x00b\x00c\x00d', 'rsplit', '\x00', 2)
self.checkequal(['a']*20, ('a|'*20)[:-1], 'rsplit', '|')
self.checkequal(['a|a|a|a|a']+['a']*15,
('a|'*20)[:-1], 'rsplit', '|', 15)
# by string
self.checkequal(['a', 'b', 'c', 'd'], 'a//b//c//d', 'rsplit', '//')
self.checkequal(['a//b//c', 'd'], 'a//b//c//d', 'rsplit', '//', 1)
self.checkequal(['a//b', 'c', 'd'], 'a//b//c//d', 'rsplit', '//', 2)
self.checkequal(['a', 'b', 'c', 'd'], 'a//b//c//d', 'rsplit', '//', 3)
self.checkequal(['a', 'b', 'c', 'd'], 'a//b//c//d', 'rsplit', '//', 4)
self.checkequal(['a', 'b', 'c', 'd'], 'a//b//c//d', 'rsplit', '//',
sys.maxsize-5)
self.checkequal(['a//b//c//d'], 'a//b//c//d', 'rsplit', '//', 0)
self.checkequal(['a////b////c', '', 'd'], 'a////b////c////d', 'rsplit', '//', 2)
self.checkequal(['', ' begincase'], 'test begincase', 'rsplit', 'test')
self.checkequal(['endcase ', ''], 'endcase test', 'rsplit', 'test')
self.checkequal(['', ' bothcase ', ''], 'test bothcase test',
'rsplit', 'test')
self.checkequal(['ab', 'c'], 'abbbc', 'rsplit', 'bb')
self.checkequal(['', ''], 'aaa', 'rsplit', 'aaa')
self.checkequal(['aaa'], 'aaa', 'rsplit', 'aaa', 0)
self.checkequal(['ab', 'ab'], 'abbaab', 'rsplit', 'ba')
self.checkequal(['aaaa'], 'aaaa', 'rsplit', 'aab')
self.checkequal([''], '', 'rsplit', 'aaa')
self.checkequal(['aa'], 'aa', 'rsplit', 'aaa')
self.checkequal(['bbob', 'A'], 'bbobbbobbA', 'rsplit', 'bbobb')
self.checkequal(['', 'B', 'A'], 'bbobbBbbobbA', 'rsplit', 'bbobb')
self.checkequal(['a']*20, ('aBLAH'*20)[:-4], 'rsplit', 'BLAH')
self.checkequal(['a']*20, ('aBLAH'*20)[:-4], 'rsplit', 'BLAH', 19)
self.checkequal(['aBLAHa'] + ['a']*18, ('aBLAH'*20)[:-4],
'rsplit', 'BLAH', 18)
# argument type
self.checkraises(TypeError, 'hello', 'rsplit', 42, 42, 42)
# null case
self.checkraises(ValueError, 'hello', 'rsplit', '')
self.checkraises(ValueError, 'hello', 'rsplit', '', 0)
def test_replace(self):
EQ = self.checkequal
# Operations on the empty string
EQ("", "", "replace", "", "")
EQ("A", "", "replace", "", "A")
EQ("", "", "replace", "A", "")
EQ("", "", "replace", "A", "A")
EQ("", "", "replace", "", "", 100)
EQ("", "", "replace", "", "", sys.maxsize)
# interleave (from=="", 'to' gets inserted everywhere)
EQ("A", "A", "replace", "", "")
EQ("*A*", "A", "replace", "", "*")
EQ("*1A*1", "A", "replace", "", "*1")
EQ("*-#A*-#", "A", "replace", "", "*-#")
EQ("*-A*-A*-", "AA", "replace", "", "*-")
EQ("*-A*-A*-", "AA", "replace", "", "*-", -1)
EQ("*-A*-A*-", "AA", "replace", "", "*-", sys.maxsize)
EQ("*-A*-A*-", "AA", "replace", "", "*-", 4)
EQ("*-A*-A*-", "AA", "replace", "", "*-", 3)
EQ("*-A*-A", "AA", "replace", "", "*-", 2)
EQ("*-AA", "AA", "replace", "", "*-", 1)
EQ("AA", "AA", "replace", "", "*-", 0)
# single character deletion (from=="A", to=="")
EQ("", "A", "replace", "A", "")
EQ("", "AAA", "replace", "A", "")
EQ("", "AAA", "replace", "A", "", -1)
EQ("", "AAA", "replace", "A", "", sys.maxsize)
EQ("", "AAA", "replace", "A", "", 4)
EQ("", "AAA", "replace", "A", "", 3)
EQ("A", "AAA", "replace", "A", "", 2)
EQ("AA", "AAA", "replace", "A", "", 1)
EQ("AAA", "AAA", "replace", "A", "", 0)
EQ("", "AAAAAAAAAA", "replace", "A", "")
EQ("BCD", "ABACADA", "replace", "A", "")
EQ("BCD", "ABACADA", "replace", "A", "", -1)
EQ("BCD", "ABACADA", "replace", "A", "", sys.maxsize)
EQ("BCD", "ABACADA", "replace", "A", "", 5)
EQ("BCD", "ABACADA", "replace", "A", "", 4)
EQ("BCDA", "ABACADA", "replace", "A", "", 3)
EQ("BCADA", "ABACADA", "replace", "A", "", 2)
EQ("BACADA", "ABACADA", "replace", "A", "", 1)
EQ("ABACADA", "ABACADA", "replace", "A", "", 0)
EQ("BCD", "ABCAD", "replace", "A", "")
EQ("BCD", "ABCADAA", "replace", "A", "")
EQ("BCD", "BCD", "replace", "A", "")
EQ("*************", "*************", "replace", "A", "")
EQ("^A^", "^"+"A"*1000+"^", "replace", "A", "", 999)
# substring deletion (from=="the", to=="")
EQ("", "the", "replace", "the", "")
EQ("ater", "theater", "replace", "the", "")
EQ("", "thethe", "replace", "the", "")
EQ("", "thethethethe", "replace", "the", "")
EQ("aaaa", "theatheatheathea", "replace", "the", "")
EQ("that", "that", "replace", "the", "")
EQ("thaet", "thaet", "replace", "the", "")
EQ("here and re", "here and there", "replace", "the", "")
EQ("here and re and re", "here and there and there",
"replace", "the", "", sys.maxsize)
EQ("here and re and re", "here and there and there",
"replace", "the", "", -1)
EQ("here and re and re", "here and there and there",
"replace", "the", "", 3)
EQ("here and re and re", "here and there and there",
"replace", "the", "", 2)
EQ("here and re and there", "here and there and there",
"replace", "the", "", 1)
EQ("here and there and there", "here and there and there",
"replace", "the", "", 0)
EQ("here and re and re", "here and there and there", "replace", "the", "")
EQ("abc", "abc", "replace", "the", "")
EQ("abcdefg", "abcdefg", "replace", "the", "")
# substring deletion (from=="bob", to=="")
EQ("bob", "bbobob", "replace", "bob", "")
EQ("bobXbob", "bbobobXbbobob", "replace", "bob", "")
EQ("aaaaaaa", "aaaaaaabob", "replace", "bob", "")
EQ("aaaaaaa", "aaaaaaa", "replace", "bob", "")
# single character replace in place (len(from)==len(to)==1)
EQ("Who goes there?", "Who goes there?", "replace", "o", "o")
EQ("WhO gOes there?", "Who goes there?", "replace", "o", "O")
EQ("WhO gOes there?", "Who goes there?", "replace", "o", "O", sys.maxsize)
EQ("WhO gOes there?", "Who goes there?", "replace", "o", "O", -1)
EQ("WhO gOes there?", "Who goes there?", "replace", "o", "O", 3)
EQ("WhO gOes there?", "Who goes there?", "replace", "o", "O", 2)
EQ("WhO goes there?", "Who goes there?", "replace", "o", "O", 1)
EQ("Who goes there?", "Who goes there?", "replace", "o", "O", 0)
EQ("Who goes there?", "Who goes there?", "replace", "a", "q")
EQ("who goes there?", "Who goes there?", "replace", "W", "w")
EQ("wwho goes there?ww", "WWho goes there?WW", "replace", "W", "w")
EQ("Who goes there!", "Who goes there?", "replace", "?", "!")
EQ("Who goes there!!", "Who goes there??", "replace", "?", "!")
EQ("Who goes there?", "Who goes there?", "replace", ".", "!")
# substring replace in place (len(from)==len(to) > 1)
EQ("Th** ** a t**sue", "This is a tissue", "replace", "is", "**")
EQ("Th** ** a t**sue", "This is a tissue", "replace", "is", "**", sys.maxsize)
EQ("Th** ** a t**sue", "This is a tissue", "replace", "is", "**", -1)
EQ("Th** ** a t**sue", "This is a tissue", "replace", "is", "**", 4)
EQ("Th** ** a t**sue", "This is a tissue", "replace", "is", "**", 3)
EQ("Th** ** a tissue", "This is a tissue", "replace", "is", "**", 2)
EQ("Th** is a tissue", "This is a tissue", "replace", "is", "**", 1)
EQ("This is a tissue", "This is a tissue", "replace", "is", "**", 0)
EQ("cobob", "bobob", "replace", "bob", "cob")
EQ("cobobXcobocob", "bobobXbobobob", "replace", "bob", "cob")
EQ("bobob", "bobob", "replace", "bot", "bot")
# replace single character (len(from)==1, len(to)>1)
EQ("ReyKKjaviKK", "Reykjavik", "replace", "k", "KK")
EQ("ReyKKjaviKK", "Reykjavik", "replace", "k", "KK", -1)
EQ("ReyKKjaviKK", "Reykjavik", "replace", "k", "KK", sys.maxsize)
EQ("ReyKKjaviKK", "Reykjavik", "replace", "k", "KK", 2)
EQ("ReyKKjavik", "Reykjavik", "replace", "k", "KK", 1)
EQ("Reykjavik", "Reykjavik", "replace", "k", "KK", 0)
EQ("A----B----C----", "A.B.C.", "replace", ".", "----")
EQ("Reykjavik", "Reykjavik", "replace", "q", "KK")
# replace substring (len(from)>1, len(to)!=len(from))
EQ("ham, ham, eggs and ham", "spam, spam, eggs and spam",
"replace", "spam", "ham")
EQ("ham, ham, eggs and ham", "spam, spam, eggs and spam",
"replace", "spam", "ham", sys.maxsize)
EQ("ham, ham, eggs and ham", "spam, spam, eggs and spam",
"replace", "spam", "ham", -1)
EQ("ham, ham, eggs and ham", "spam, spam, eggs and spam",
"replace", "spam", "ham", 4)
EQ("ham, ham, eggs and ham", "spam, spam, eggs and spam",
"replace", "spam", "ham", 3)
EQ("ham, ham, eggs and spam", "spam, spam, eggs and spam",
"replace", "spam", "ham", 2)
EQ("ham, spam, eggs and spam", "spam, spam, eggs and spam",
"replace", "spam", "ham", 1)
EQ("spam, spam, eggs and spam", "spam, spam, eggs and spam",
"replace", "spam", "ham", 0)
EQ("bobob", "bobobob", "replace", "bobob", "bob")
EQ("bobobXbobob", "bobobobXbobobob", "replace", "bobob", "bob")
EQ("BOBOBOB", "BOBOBOB", "replace", "bob", "bobby")
# XXX Commented out. Is there any reason to support buffer objects
# as arguments for str.replace()? GvR
## ba = bytearray('a')
## bb = bytearray('b')
## EQ("bbc", "abc", "replace", ba, bb)
## EQ("aac", "abc", "replace", bb, ba)
#
self.checkequal('one@two!three!', 'one!two!three!', 'replace', '!', '@', 1)
self.checkequal('onetwothree', 'one!two!three!', 'replace', '!', '')
self.checkequal('one@two@three!', 'one!two!three!', 'replace', '!', '@', 2)
self.checkequal('one@two@three@', 'one!two!three!', 'replace', '!', '@', 3)
self.checkequal('one@two@three@', 'one!two!three!', 'replace', '!', '@', 4)
self.checkequal('one!two!three!', 'one!two!three!', 'replace', '!', '@', 0)
self.checkequal('one@two@three@', 'one!two!three!', 'replace', '!', '@')
self.checkequal('one!two!three!', 'one!two!three!', 'replace', 'x', '@')
self.checkequal('one!two!three!', 'one!two!three!', 'replace', 'x', '@', 2)
self.checkequal('-a-b-c-', 'abc', 'replace', '', '-')
self.checkequal('-a-b-c', 'abc', 'replace', '', '-', 3)
self.checkequal('abc', 'abc', 'replace', '', '-', 0)
self.checkequal('', '', 'replace', '', '')
self.checkequal('abc', 'abc', 'replace', 'ab', '--', 0)
self.checkequal('abc', 'abc', 'replace', 'xy', '--')
# Next three for SF bug 422088: [OSF1 alpha] string.replace(); died with
# MemoryError due to empty result (platform malloc issue when requesting
# 0 bytes).
self.checkequal('', '123', 'replace', '123', '')
self.checkequal('', '123123', 'replace', '123', '')
self.checkequal('x', '123x123', 'replace', '123', '')
self.checkraises(TypeError, 'hello', 'replace')
self.checkraises(TypeError, 'hello', 'replace', 42)
self.checkraises(TypeError, 'hello', 'replace', 42, 'h')
self.checkraises(TypeError, 'hello', 'replace', 'h', 42)
def test_replace_overflow(self):
# Check for overflow checking on 32 bit machines
if sys.maxsize != 2147483647 or struct.calcsize("P") > 4:
return
A2_16 = "A" * (2**16)
self.checkraises(OverflowError, A2_16, "replace", "", A2_16)
self.checkraises(OverflowError, A2_16, "replace", "A", A2_16)
self.checkraises(OverflowError, A2_16, "replace", "AA", A2_16+A2_16)
class CommonTest(BaseTest):
# This testcase contains test that can be used in all
# stringlike classes. Currently this is str, unicode
# UserString and the string module.
def test_hash(self):
# SF bug 1054139: += optimization was not invalidating cached hash value
a = self.type2test('DNSSEC')
b = self.type2test('')
for c in a:
b += c
hash(b)
self.assertEqual(hash(a), hash(b))
def test_capitalize(self):
self.checkequal(' hello ', ' hello ', 'capitalize')
self.checkequal('Hello ', 'Hello ','capitalize')
self.checkequal('Hello ', 'hello ','capitalize')
self.checkequal('Aaaa', 'aaaa', 'capitalize')
self.checkequal('Aaaa', 'AaAa', 'capitalize')
# check that titlecased chars are lowered correctly
# \u1ffc is the titlecased char
self.checkequal('\u1ffc\u1ff3\u1ff3\u1ff3',
'\u1ff3\u1ff3\u1ffc\u1ffc', 'capitalize')
# check with cased non-letter chars
self.checkequal('\u24c5\u24e8\u24e3\u24d7\u24de\u24dd',
'\u24c5\u24ce\u24c9\u24bd\u24c4\u24c3', 'capitalize')
self.checkequal('\u24c5\u24e8\u24e3\u24d7\u24de\u24dd',
'\u24df\u24e8\u24e3\u24d7\u24de\u24dd', 'capitalize')
self.checkequal('\u2160\u2171\u2172',
'\u2160\u2161\u2162', 'capitalize')
self.checkequal('\u2160\u2171\u2172',
'\u2170\u2171\u2172', 'capitalize')
# check with Ll chars with no upper - nothing changes here
self.checkequal('\u019b\u1d00\u1d86\u0221\u1fb7',
'\u019b\u1d00\u1d86\u0221\u1fb7', 'capitalize')
self.checkraises(TypeError, 'hello', 'capitalize', 42)
def test_lower(self):
self.checkequal('hello', 'HeLLo', 'lower')
self.checkequal('hello', 'hello', 'lower')
self.checkraises(TypeError, 'hello', 'lower', 42)
def test_upper(self):
self.checkequal('HELLO', 'HeLLo', 'upper')
self.checkequal('HELLO', 'HELLO', 'upper')
self.checkraises(TypeError, 'hello', 'upper', 42)
def test_expandtabs(self):
self.checkequal('abc\rab def\ng hi', 'abc\rab\tdef\ng\thi', 'expandtabs')
self.checkequal('abc\rab def\ng hi', 'abc\rab\tdef\ng\thi', 'expandtabs', 8)
self.checkequal('abc\rab def\ng hi', 'abc\rab\tdef\ng\thi', 'expandtabs', 4)
self.checkequal('abc\r\nab def\ng hi', 'abc\r\nab\tdef\ng\thi', 'expandtabs', 4)
self.checkequal('abc\rab def\ng hi', 'abc\rab\tdef\ng\thi', 'expandtabs')
self.checkequal('abc\rab def\ng hi', 'abc\rab\tdef\ng\thi', 'expandtabs', 8)
self.checkequal('abc\r\nab\r\ndef\ng\r\nhi', 'abc\r\nab\r\ndef\ng\r\nhi', 'expandtabs', 4)
self.checkraises(TypeError, 'hello', 'expandtabs', 42, 42)
def test_additional_split(self):
self.checkequal(['this', 'is', 'the', 'split', 'function'],
'this is the split function', 'split')
# by whitespace
self.checkequal(['a', 'b', 'c', 'd'], 'a b c d ', 'split')
self.checkequal(['a', 'b c d'], 'a b c d', 'split', None, 1)
self.checkequal(['a', 'b', 'c d'], 'a b c d', 'split', None, 2)
self.checkequal(['a', 'b', 'c', 'd'], 'a b c d', 'split', None, 3)
self.checkequal(['a', 'b', 'c', 'd'], 'a b c d', 'split', None, 4)
self.checkequal(['a', 'b', 'c', 'd'], 'a b c d', 'split', None,
sys.maxsize-1)
self.checkequal(['a b c d'], 'a b c d', 'split', None, 0)
self.checkequal(['a b c d'], ' a b c d', 'split', None, 0)
self.checkequal(['a', 'b', 'c d'], 'a b c d', 'split', None, 2)
self.checkequal([], ' ', 'split')
self.checkequal(['a'], ' a ', 'split')
self.checkequal(['a', 'b'], ' a b ', 'split')
self.checkequal(['a', 'b '], ' a b ', 'split', None, 1)
self.checkequal(['a', 'b c '], ' a b c ', 'split', None, 1)
self.checkequal(['a', 'b', 'c '], ' a b c ', 'split', None, 2)
self.checkequal(['a', 'b'], '\n\ta \t\r b \v ', 'split')
aaa = ' a '*20
self.checkequal(['a']*20, aaa, 'split')
self.checkequal(['a'] + [aaa[4:]], aaa, 'split', None, 1)
self.checkequal(['a']*19 + ['a '], aaa, 'split', None, 19)
# mixed use of str and unicode
self.checkequal(['a', 'b', 'c d'], 'a b c d', 'split', ' ', 2)
def test_additional_rsplit(self):
self.checkequal(['this', 'is', 'the', 'rsplit', 'function'],
'this is the rsplit function', 'rsplit')
# by whitespace
self.checkequal(['a', 'b', 'c', 'd'], 'a b c d ', 'rsplit')
self.checkequal(['a b c', 'd'], 'a b c d', 'rsplit', None, 1)
self.checkequal(['a b', 'c', 'd'], 'a b c d', 'rsplit', None, 2)
self.checkequal(['a', 'b', 'c', 'd'], 'a b c d', 'rsplit', None, 3)
self.checkequal(['a', 'b', 'c', 'd'], 'a b c d', 'rsplit', None, 4)
self.checkequal(['a', 'b', 'c', 'd'], 'a b c d', 'rsplit', None,
sys.maxsize-20)
self.checkequal(['a b c d'], 'a b c d', 'rsplit', None, 0)
self.checkequal(['a b c d'], 'a b c d ', 'rsplit', None, 0)
self.checkequal(['a b', 'c', 'd'], 'a b c d', 'rsplit', None, 2)
self.checkequal([], ' ', 'rsplit')
self.checkequal(['a'], ' a ', 'rsplit')
self.checkequal(['a', 'b'], ' a b ', 'rsplit')
self.checkequal([' a', 'b'], ' a b ', 'rsplit', None, 1)
self.checkequal([' a b','c'], ' a b c ', 'rsplit',
None, 1)
self.checkequal([' a', 'b', 'c'], ' a b c ', 'rsplit',
None, 2)
self.checkequal(['a', 'b'], '\n\ta \t\r b \v ', 'rsplit', None, 88)
aaa = ' a '*20
self.checkequal(['a']*20, aaa, 'rsplit')
self.checkequal([aaa[:-4]] + ['a'], aaa, 'rsplit', None, 1)
self.checkequal([' a a'] + ['a']*18, aaa, 'rsplit', None, 18)
# mixed use of str and unicode
self.checkequal(['a b', 'c', 'd'], 'a b c d', 'rsplit', ' ', 2)
def test_strip(self):
self.checkequal('hello', ' hello ', 'strip')
self.checkequal('hello ', ' hello ', 'lstrip')
self.checkequal(' hello', ' hello ', 'rstrip')
self.checkequal('hello', 'hello', 'strip')
# strip/lstrip/rstrip with None arg
self.checkequal('hello', ' hello ', 'strip', None)
self.checkequal('hello ', ' hello ', 'lstrip', None)
self.checkequal(' hello', ' hello ', 'rstrip', None)
self.checkequal('hello', 'hello', 'strip', None)
# strip/lstrip/rstrip with str arg
self.checkequal('hello', 'xyzzyhelloxyzzy', 'strip', 'xyz')
self.checkequal('helloxyzzy', 'xyzzyhelloxyzzy', 'lstrip', 'xyz')
self.checkequal('xyzzyhello', 'xyzzyhelloxyzzy', 'rstrip', 'xyz')
self.checkequal('hello', 'hello', 'strip', 'xyz')
self.checkraises(TypeError, 'hello', 'strip', 42, 42)
self.checkraises(TypeError, 'hello', 'lstrip', 42, 42)
self.checkraises(TypeError, 'hello', 'rstrip', 42, 42)
def test_ljust(self):
self.checkequal('abc ', 'abc', 'ljust', 10)
self.checkequal('abc ', 'abc', 'ljust', 6)
self.checkequal('abc', 'abc', 'ljust', 3)
self.checkequal('abc', 'abc', 'ljust', 2)
self.checkequal('abc*******', 'abc', 'ljust', 10, '*')
self.checkraises(TypeError, 'abc', 'ljust')
def test_rjust(self):
self.checkequal(' abc', 'abc', 'rjust', 10)
self.checkequal(' abc', 'abc', 'rjust', 6)
self.checkequal('abc', 'abc', 'rjust', 3)
self.checkequal('abc', 'abc', 'rjust', 2)
self.checkequal('*******abc', 'abc', 'rjust', 10, '*')
self.checkraises(TypeError, 'abc', 'rjust')
def test_center(self):
self.checkequal(' abc ', 'abc', 'center', 10)
self.checkequal(' abc ', 'abc', 'center', 6)
self.checkequal('abc', 'abc', 'center', 3)
self.checkequal('abc', 'abc', 'center', 2)
self.checkequal('***abc****', 'abc', 'center', 10, '*')
self.checkraises(TypeError, 'abc', 'center')
def test_swapcase(self):
self.checkequal('hEllO CoMPuTErS', 'HeLLo cOmpUteRs', 'swapcase')
self.checkraises(TypeError, 'hello', 'swapcase', 42)
def test_zfill(self):
self.checkequal('123', '123', 'zfill', 2)
self.checkequal('123', '123', 'zfill', 3)
self.checkequal('0123', '123', 'zfill', 4)
self.checkequal('+123', '+123', 'zfill', 3)
self.checkequal('+123', '+123', 'zfill', 4)
self.checkequal('+0123', '+123', 'zfill', 5)
self.checkequal('-123', '-123', 'zfill', 3)
self.checkequal('-123', '-123', 'zfill', 4)
self.checkequal('-0123', '-123', 'zfill', 5)
self.checkequal('000', '', 'zfill', 3)
self.checkequal('34', '34', 'zfill', 1)
self.checkequal('0034', '34', 'zfill', 4)
self.checkraises(TypeError, '123', 'zfill')
class MixinStrUnicodeUserStringTest:
# additional tests that only work for
# stringlike objects, i.e. str, unicode, UserString
# (but not the string module)
def test_islower(self):
self.checkequal(False, '', 'islower')
self.checkequal(True, 'a', 'islower')
self.checkequal(False, 'A', 'islower')
self.checkequal(False, '\n', 'islower')
self.checkequal(True, 'abc', 'islower')
self.checkequal(False, 'aBc', 'islower')
self.checkequal(True, 'abc\n', 'islower')
self.checkraises(TypeError, 'abc', 'islower', 42)
def test_isupper(self):
self.checkequal(False, '', 'isupper')
self.checkequal(False, 'a', 'isupper')
self.checkequal(True, 'A', 'isupper')
self.checkequal(False, '\n', 'isupper')
self.checkequal(True, 'ABC', 'isupper')
self.checkequal(False, 'AbC', 'isupper')
self.checkequal(True, 'ABC\n', 'isupper')
self.checkraises(TypeError, 'abc', 'isupper', 42)
def test_istitle(self):
self.checkequal(False, '', 'istitle')
self.checkequal(False, 'a', 'istitle')
self.checkequal(True, 'A', 'istitle')
self.checkequal(False, '\n', 'istitle')
self.checkequal(True, 'A Titlecased Line', 'istitle')
self.checkequal(True, 'A\nTitlecased Line', 'istitle')
self.checkequal(True, 'A Titlecased, Line', 'istitle')
self.checkequal(False, 'Not a capitalized String', 'istitle')
self.checkequal(False, 'Not\ta Titlecase String', 'istitle')
self.checkequal(False, 'Not--a Titlecase String', 'istitle')
self.checkequal(False, 'NOT', 'istitle')
self.checkraises(TypeError, 'abc', 'istitle', 42)
def test_isspace(self):
self.checkequal(False, '', 'isspace')
self.checkequal(False, 'a', 'isspace')
self.checkequal(True, ' ', 'isspace')
self.checkequal(True, '\t', 'isspace')
self.checkequal(True, '\r', 'isspace')
self.checkequal(True, '\n', 'isspace')
self.checkequal(True, ' \t\r\n', 'isspace')
self.checkequal(False, ' \t\r\na', 'isspace')
self.checkraises(TypeError, 'abc', 'isspace', 42)
def test_isalpha(self):
self.checkequal(False, '', 'isalpha')
self.checkequal(True, 'a', 'isalpha')
self.checkequal(True, 'A', 'isalpha')
self.checkequal(False, '\n', 'isalpha')
self.checkequal(True, 'abc', 'isalpha')
self.checkequal(False, 'aBc123', 'isalpha')
self.checkequal(False, 'abc\n', 'isalpha')
self.checkraises(TypeError, 'abc', 'isalpha', 42)
def test_isalnum(self):
self.checkequal(False, '', 'isalnum')
self.checkequal(True, 'a', 'isalnum')
self.checkequal(True, 'A', 'isalnum')
self.checkequal(False, '\n', 'isalnum')
self.checkequal(True, '123abc456', 'isalnum')
self.checkequal(True, 'a1b3c', 'isalnum')
self.checkequal(False, 'aBc000 ', 'isalnum')
self.checkequal(False, 'abc\n', 'isalnum')
self.checkraises(TypeError, 'abc', 'isalnum', 42)
def test_isdigit(self):
self.checkequal(False, '', 'isdigit')
self.checkequal(False, 'a', 'isdigit')
self.checkequal(True, '0', 'isdigit')
self.checkequal(True, '0123456789', 'isdigit')
self.checkequal(False, '0123456789a', 'isdigit')
self.checkraises(TypeError, 'abc', 'isdigit', 42)
def test_title(self):
self.checkequal(' Hello ', ' hello ', 'title')
self.checkequal('Hello ', 'hello ', 'title')
self.checkequal('Hello ', 'Hello ', 'title')
self.checkequal('Format This As Title String', "fOrMaT thIs aS titLe String", 'title')
self.checkequal('Format,This-As*Title;String', "fOrMaT,thIs-aS*titLe;String", 'title', )
self.checkequal('Getint', "getInt", 'title')
self.checkraises(TypeError, 'hello', 'title', 42)
def test_splitlines(self):
self.checkequal(['abc', 'def', '', 'ghi'], "abc\ndef\n\rghi", 'splitlines')
self.checkequal(['abc', 'def', '', 'ghi'], "abc\ndef\n\r\nghi", 'splitlines')
self.checkequal(['abc', 'def', 'ghi'], "abc\ndef\r\nghi", 'splitlines')
self.checkequal(['abc', 'def', 'ghi'], "abc\ndef\r\nghi\n", 'splitlines')
self.checkequal(['abc', 'def', 'ghi', ''], "abc\ndef\r\nghi\n\r", 'splitlines')
self.checkequal(['', 'abc', 'def', 'ghi', ''], "\nabc\ndef\r\nghi\n\r", 'splitlines')
self.checkequal(['\n', 'abc\n', 'def\r\n', 'ghi\n', '\r'], "\nabc\ndef\r\nghi\n\r", 'splitlines', 1)
self.checkraises(TypeError, 'abc', 'splitlines', 42, 42)
def test_startswith(self):
self.checkequal(True, 'hello', 'startswith', 'he')
self.checkequal(True, 'hello', 'startswith', 'hello')
self.checkequal(False, 'hello', 'startswith', 'hello world')
self.checkequal(True, 'hello', 'startswith', '')
self.checkequal(False, 'hello', 'startswith', 'ello')
self.checkequal(True, 'hello', 'startswith', 'ello', 1)
self.checkequal(True, 'hello', 'startswith', 'o', 4)
self.checkequal(False, 'hello', 'startswith', 'o', 5)
self.checkequal(True, 'hello', 'startswith', '', 5)
self.checkequal(False, 'hello', 'startswith', 'lo', 6)
self.checkequal(True, 'helloworld', 'startswith', 'lowo', 3)
self.checkequal(True, 'helloworld', 'startswith', 'lowo', 3, 7)
self.checkequal(False, 'helloworld', 'startswith', 'lowo', 3, 6)
# test negative indices
self.checkequal(True, 'hello', 'startswith', 'he', 0, -1)
self.checkequal(True, 'hello', 'startswith', 'he', -53, -1)
self.checkequal(False, 'hello', 'startswith', 'hello', 0, -1)
self.checkequal(False, 'hello', 'startswith', 'hello world', -1, -10)
self.checkequal(False, 'hello', 'startswith', 'ello', -5)
self.checkequal(True, 'hello', 'startswith', 'ello', -4)
self.checkequal(False, 'hello', 'startswith', 'o', -2)
self.checkequal(True, 'hello', 'startswith', 'o', -1)
self.checkequal(True, 'hello', 'startswith', '', -3, -3)
self.checkequal(False, 'hello', 'startswith', 'lo', -9)
self.checkraises(TypeError, 'hello', 'startswith')
self.checkraises(TypeError, 'hello', 'startswith', 42)
# test tuple arguments
self.checkequal(True, 'hello', 'startswith', ('he', 'ha'))
self.checkequal(False, 'hello', 'startswith', ('lo', 'llo'))
self.checkequal(True, 'hello', 'startswith', ('hellox', 'hello'))
self.checkequal(False, 'hello', 'startswith', ())
self.checkequal(True, 'helloworld', 'startswith', ('hellowo',
'rld', 'lowo'), 3)
self.checkequal(False, 'helloworld', 'startswith', ('hellowo', 'ello',
'rld'), 3)
self.checkequal(True, 'hello', 'startswith', ('lo', 'he'), 0, -1)
self.checkequal(False, 'hello', 'startswith', ('he', 'hel'), 0, 1)
self.checkequal(True, 'hello', 'startswith', ('he', 'hel'), 0, 2)
self.checkraises(TypeError, 'hello', 'startswith', (42,))
def test_endswith(self):
self.checkequal(True, 'hello', 'endswith', 'lo')
self.checkequal(False, 'hello', 'endswith', 'he')
self.checkequal(True, 'hello', 'endswith', '')
self.checkequal(False, 'hello', 'endswith', 'hello world')
self.checkequal(False, 'helloworld', 'endswith', 'worl')
self.checkequal(True, 'helloworld', 'endswith', 'worl', 3, 9)
self.checkequal(True, 'helloworld', 'endswith', 'world', 3, 12)
self.checkequal(True, 'helloworld', 'endswith', 'lowo', 1, 7)
self.checkequal(True, 'helloworld', 'endswith', 'lowo', 2, 7)
self.checkequal(True, 'helloworld', 'endswith', 'lowo', 3, 7)
self.checkequal(False, 'helloworld', 'endswith', 'lowo', 4, 7)
self.checkequal(False, 'helloworld', 'endswith', 'lowo', 3, 8)
self.checkequal(False, 'ab', 'endswith', 'ab', 0, 1)
self.checkequal(False, 'ab', 'endswith', 'ab', 0, 0)
# test negative indices
self.checkequal(True, 'hello', 'endswith', 'lo', -2)
self.checkequal(False, 'hello', 'endswith', 'he', -2)
self.checkequal(True, 'hello', 'endswith', '', -3, -3)
self.checkequal(False, 'hello', 'endswith', 'hello world', -10, -2)
self.checkequal(False, 'helloworld', 'endswith', 'worl', -6)
self.checkequal(True, 'helloworld', 'endswith', 'worl', -5, -1)
self.checkequal(True, 'helloworld', 'endswith', 'worl', -5, 9)
self.checkequal(True, 'helloworld', 'endswith', 'world', -7, 12)
self.checkequal(True, 'helloworld', 'endswith', 'lowo', -99, -3)
self.checkequal(True, 'helloworld', 'endswith', 'lowo', -8, -3)
self.checkequal(True, 'helloworld', 'endswith', 'lowo', -7, -3)
self.checkequal(False, 'helloworld', 'endswith', 'lowo', 3, -4)
self.checkequal(False, 'helloworld', 'endswith', 'lowo', -8, -2)
self.checkraises(TypeError, 'hello', 'endswith')
self.checkraises(TypeError, 'hello', 'endswith', 42)
# test tuple arguments
self.checkequal(False, 'hello', 'endswith', ('he', 'ha'))
self.checkequal(True, 'hello', 'endswith', ('lo', 'llo'))
self.checkequal(True, 'hello', 'endswith', ('hellox', 'hello'))
self.checkequal(False, 'hello', 'endswith', ())
self.checkequal(True, 'helloworld', 'endswith', ('hellowo',
'rld', 'lowo'), 3)
self.checkequal(False, 'helloworld', 'endswith', ('hellowo', 'ello',
'rld'), 3, -1)
self.checkequal(True, 'hello', 'endswith', ('hell', 'ell'), 0, -1)
self.checkequal(False, 'hello', 'endswith', ('he', 'hel'), 0, 1)
self.checkequal(True, 'hello', 'endswith', ('he', 'hell'), 0, 4)
self.checkraises(TypeError, 'hello', 'endswith', (42,))
def test___contains__(self):
self.checkequal(True, '', '__contains__', '')
self.checkequal(True, 'abc', '__contains__', '')
self.checkequal(False, 'abc', '__contains__', '\0')
self.checkequal(True, '\0abc', '__contains__', '\0')
self.checkequal(True, 'abc\0', '__contains__', '\0')
self.checkequal(True, '\0abc', '__contains__', 'a')
self.checkequal(True, 'asdf', '__contains__', 'asdf')
self.checkequal(False, 'asd', '__contains__', 'asdf')
self.checkequal(False, '', '__contains__', 'asdf')
def test_subscript(self):
self.checkequal('a', 'abc', '__getitem__', 0)
self.checkequal('c', 'abc', '__getitem__', -1)
self.checkequal('a', 'abc', '__getitem__', 0)
self.checkequal('abc', 'abc', '__getitem__', slice(0, 3))
self.checkequal('abc', 'abc', '__getitem__', slice(0, 1000))
self.checkequal('a', 'abc', '__getitem__', slice(0, 1))
self.checkequal('', 'abc', '__getitem__', slice(0, 0))
self.checkraises(TypeError, 'abc', '__getitem__', 'def')
def test_slice(self):
self.checkequal('abc', 'abc', '__getitem__', slice(0, 1000))
self.checkequal('abc', 'abc', '__getitem__', slice(0, 3))
self.checkequal('ab', 'abc', '__getitem__', slice(0, 2))
self.checkequal('bc', 'abc', '__getitem__', slice(1, 3))
self.checkequal('b', 'abc', '__getitem__', slice(1, 2))
self.checkequal('', 'abc', '__getitem__', slice(2, 2))
self.checkequal('', 'abc', '__getitem__', slice(1000, 1000))
self.checkequal('', 'abc', '__getitem__', slice(2000, 1000))
self.checkequal('', 'abc', '__getitem__', slice(2, 1))
self.checkraises(TypeError, 'abc', '__getitem__', 'def')
def test_extended_getslice(self):
# Test extended slicing by comparing with list slicing.
s = string.ascii_letters + string.digits
indices = (0, None, 1, 3, 41, -1, -2, -37)
for start in indices:
for stop in indices:
# Skip step 0 (invalid)
for step in indices[1:]:
L = list(s)[start:stop:step]
self.checkequal("".join(L), s, '__getitem__',
slice(start, stop, step))
def test_mul(self):
self.checkequal('', 'abc', '__mul__', -1)
self.checkequal('', 'abc', '__mul__', 0)
self.checkequal('abc', 'abc', '__mul__', 1)
self.checkequal('abcabcabc', 'abc', '__mul__', 3)
self.checkraises(TypeError, 'abc', '__mul__')
self.assertRaises(TypeError, operator.mul, 'abc', '')
# XXX: on a 64-bit system, this doesn't raise an overflow error,
# but either raises a MemoryError, or succeeds (if you have 54TiB)
#self.checkraises(OverflowError, 10000*'abc', '__mul__', 2000000000)
def test_join(self):
# join now works with any sequence type
# moved here, because the argument order is
# different in string.join (see the test in
# test.test_string.StringTest.test_join)
self.checkequal('a b c d', ' ', 'join', ['a', 'b', 'c', 'd'])
self.checkequal('abcd', '', 'join', ('a', 'b', 'c', 'd'))
self.checkequal('bd', '', 'join', ('', 'b', '', 'd'))
self.checkequal('ac', '', 'join', ('a', '', 'c', ''))
self.checkequal('w x y z', ' ', 'join', Sequence())
self.checkequal('abc', 'a', 'join', ('abc',))
self.checkequal('z', 'a', 'join', UserList(['z']))
self.checkequal('a.b.c', '.', 'join', ['a', 'b', 'c'])
self.assertRaises(TypeError, '.'.join, ['a', 'b', 3])
for i in [5, 25, 125]:
self.checkequal(((('a' * i) + '-') * i)[:-1], '-', 'join',
['a' * i] * i)
self.checkequal(((('a' * i) + '-') * i)[:-1], '-', 'join',
('a' * i,) * i)
#self.checkequal(str(BadSeq1()), ' ', 'join', BadSeq1())
self.checkequal('a b c', ' ', 'join', BadSeq2())
self.checkraises(TypeError, ' ', 'join')
self.checkraises(TypeError, ' ', 'join', 7)
self.checkraises(TypeError, ' ', 'join', [1, 2, bytes()])
try:
def f():
yield 4 + ""
self.fixtype(' ').join(f())
except TypeError as e:
if '+' not in str(e):
self.fail('join() ate exception message')
else:
self.fail('exception not raised')
def test_formatting(self):
self.checkequal('+hello+', '+%s+', '__mod__', 'hello')
self.checkequal('+10+', '+%d+', '__mod__', 10)
self.checkequal('a', "%c", '__mod__', "a")
self.checkequal('a', "%c", '__mod__', "a")
self.checkequal('"', "%c", '__mod__', 34)
self.checkequal('$', "%c", '__mod__', 36)
self.checkequal('10', "%d", '__mod__', 10)
self.checkequal('\x7f', "%c", '__mod__', 0x7f)
for ordinal in (-100, 0x200000):
# unicode raises ValueError, str raises OverflowError
self.checkraises((ValueError, OverflowError), '%c', '__mod__', ordinal)
longvalue = sys.maxsize + 10
slongvalue = str(longvalue)
self.checkequal(' 42', '%3ld', '__mod__', 42)
self.checkequal('42', '%d', '__mod__', 42.0)
self.checkequal(slongvalue, '%d', '__mod__', longvalue)
self.checkcall('%d', '__mod__', float(longvalue))
self.checkequal('0042.00', '%07.2f', '__mod__', 42)
self.checkequal('0042.00', '%07.2F', '__mod__', 42)
self.checkraises(TypeError, 'abc', '__mod__')
self.checkraises(TypeError, '%(foo)s', '__mod__', 42)
self.checkraises(TypeError, '%s%s', '__mod__', (42,))
self.checkraises(TypeError, '%c', '__mod__', (None,))
self.checkraises(ValueError, '%(foo', '__mod__', {})
self.checkraises(TypeError, '%(foo)s %(bar)s', '__mod__', ('foo', 42))
self.checkraises(TypeError, '%d', '__mod__', "42") # not numeric
self.checkraises(TypeError, '%d', '__mod__', (42+0j)) # no int conversion provided
# argument names with properly nested brackets are supported
self.checkequal('bar', '%((foo))s', '__mod__', {'(foo)': 'bar'})
# 100 is a magic number in PyUnicode_Format, this forces a resize
self.checkequal(103*'a'+'x', '%sx', '__mod__', 103*'a')
self.checkraises(TypeError, '%*s', '__mod__', ('foo', 'bar'))
self.checkraises(TypeError, '%10.*f', '__mod__', ('foo', 42.))
self.checkraises(ValueError, '%10', '__mod__', (42,))
def test_floatformatting(self):
# float formatting
for prec in range(100):
format = '%%.%if' % prec
value = 0.01
for x in range(60):
value = value * 3.14159265359 / 3.0 * 10.0
self.checkcall(format, "__mod__", value)
def test_inplace_rewrites(self):
# Check that strings don't copy and modify cached single-character strings
self.checkequal('a', 'A', 'lower')
self.checkequal(True, 'A', 'isupper')
self.checkequal('A', 'a', 'upper')
self.checkequal(True, 'a', 'islower')
self.checkequal('a', 'A', 'replace', 'A', 'a')
self.checkequal(True, 'A', 'isupper')
self.checkequal('A', 'a', 'capitalize')
self.checkequal(True, 'a', 'islower')
self.checkequal('A', 'a', 'swapcase')
self.checkequal(True, 'a', 'islower')
self.checkequal('A', 'a', 'title')
self.checkequal(True, 'a', 'islower')
def test_partition(self):
self.checkequal(('this is the par', 'ti', 'tion method'),
'this is the partition method', 'partition', 'ti')
# from raymond's original specification
S = 'http://www.python.org'
self.checkequal(('http', '://', 'www.python.org'), S, 'partition', '://')
self.checkequal(('http://www.python.org', '', ''), S, 'partition', '?')
self.checkequal(('', 'http://', 'www.python.org'), S, 'partition', 'http://')
self.checkequal(('http://www.python.', 'org', ''), S, 'partition', 'org')
self.checkraises(ValueError, S, 'partition', '')
self.checkraises(TypeError, S, 'partition', None)
def test_rpartition(self):
self.checkequal(('this is the rparti', 'ti', 'on method'),
'this is the rpartition method', 'rpartition', 'ti')
# from raymond's original specification
S = 'http://www.python.org'
self.checkequal(('http', '://', 'www.python.org'), S, 'rpartition', '://')
self.checkequal(('', '', 'http://www.python.org'), S, 'rpartition', '?')
self.checkequal(('', 'http://', 'www.python.org'), S, 'rpartition', 'http://')
self.checkequal(('http://www.python.', 'org', ''), S, 'rpartition', 'org')
self.checkraises(ValueError, S, 'rpartition', '')
self.checkraises(TypeError, S, 'rpartition', None)
def test_none_arguments(self):
# issue 11828
s = 'hello'
self.checkequal(2, s, 'find', 'l', None)
self.checkequal(3, s, 'find', 'l', -2, None)
self.checkequal(2, s, 'find', 'l', None, -2)
self.checkequal(0, s, 'find', 'h', None, None)
self.checkequal(3, s, 'rfind', 'l', None)
self.checkequal(3, s, 'rfind', 'l', -2, None)
self.checkequal(2, s, 'rfind', 'l', None, -2)
self.checkequal(0, s, 'rfind', 'h', None, None)
self.checkequal(2, s, 'index', 'l', None)
self.checkequal(3, s, 'index', 'l', -2, None)
self.checkequal(2, s, 'index', 'l', None, -2)
self.checkequal(0, s, 'index', 'h', None, None)
self.checkequal(3, s, 'rindex', 'l', None)
self.checkequal(3, s, 'rindex', 'l', -2, None)
self.checkequal(2, s, 'rindex', 'l', None, -2)
self.checkequal(0, s, 'rindex', 'h', None, None)
self.checkequal(2, s, 'count', 'l', None)
self.checkequal(1, s, 'count', 'l', -2, None)
self.checkequal(1, s, 'count', 'l', None, -2)
self.checkequal(0, s, 'count', 'x', None, None)
self.checkequal(True, s, 'endswith', 'o', None)
self.checkequal(True, s, 'endswith', 'lo', -2, None)
self.checkequal(True, s, 'endswith', 'l', None, -2)
self.checkequal(False, s, 'endswith', 'x', None, None)
self.checkequal(True, s, 'startswith', 'h', None)
self.checkequal(True, s, 'startswith', 'l', -2, None)
self.checkequal(True, s, 'startswith', 'h', None, -2)
self.checkequal(False, s, 'startswith', 'x', None, None)
def test_find_etc_raise_correct_error_messages(self):
# issue 11828
s = 'hello'
x = 'x'
self.assertRaisesRegex(TypeError, r'^find\(', s.find,
x, None, None, None)
self.assertRaisesRegex(TypeError, r'^rfind\(', s.rfind,
x, None, None, None)
self.assertRaisesRegex(TypeError, r'^index\(', s.index,
x, None, None, None)
self.assertRaisesRegex(TypeError, r'^rindex\(', s.rindex,
x, None, None, None)
self.assertRaisesRegex(TypeError, r'^count\(', s.count,
x, None, None, None)
self.assertRaisesRegex(TypeError, r'^startswith\(', s.startswith,
x, None, None, None)
self.assertRaisesRegex(TypeError, r'^endswith\(', s.endswith,
x, None, None, None)
class MixinStrUnicodeTest:
# Additional tests that only work with str and unicode.
def test_bug1001011(self):
# Make sure join returns a NEW object for single item sequences
# involving a subclass.
# Make sure that it is of the appropriate type.
# Check the optimisation still occurs for standard objects.
t = self.type2test
class subclass(t):
pass
s1 = subclass("abcd")
s2 = t().join([s1])
self.assertIsNot(s1, s2)
self.assertIs(type(s2), t)
s1 = t("abcd")
s2 = t().join([s1])
self.assertIs(s1, s2)
# Should also test mixed-type join.
if t is str:
s1 = subclass("abcd")
s2 = "".join([s1])
self.assertIsNot(s1, s2)
self.assertIs(type(s2), t)
s1 = t("abcd")
s2 = "".join([s1])
self.assertIs(s1, s2)
## elif t is str8:
## s1 = subclass("abcd")
## s2 = "".join([s1])
## self.assertIsNot(s1, s2)
## self.assertIs(type(s2), str) # promotes!
## s1 = t("abcd")
## s2 = "".join([s1])
## self.assertIsNot(s1, s2)
## self.assertIs(type(s2), str) # promotes!
else:
self.fail("unexpected type for MixinStrUnicodeTest %r" % t)
| wdv4758h/ZipPy | lib-python/3/test/string_tests.py | Python | bsd-3-clause | 61,662 |
# --------------------------------------------------------
# TensorFlow for Dragon
# Copyright(c) 2017 SeetaTech
# Written by Ting Pan
# --------------------------------------------------------
__all__ = ['equal']
import dragon.ops as ops
def equal(a, b, name=None):
return ops.Equal([a, b]) | neopenx/Dragon | Dragon/python/dragon/vm/tensorflow/ops/control_flow_ops.py | Python | bsd-2-clause | 300 |
#
# (C) Copyright 2005,2007 Hewlett-Packard Development Company, L.P.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 2 of the
# License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
# Author: Tim Potter <tpot@hp.com>
"""pywbem.twisted - WBEM client bindings for Twisted Python.
This module contains factory classes that produce WBEMClient instances
that perform WBEM requests over HTTP using the
twisted.protocols.http.HTTPClient base class.
"""
from twisted.internet import reactor, protocol, defer
from twisted.web import http, client, error
from pywbem import CIMClass, CIMClassName, CIMInstance, CIMInstanceName, CIMError, cim_types, cim_xml
try:
from elementtree.ElementTree import fromstring, tostring
except ImportError, arg:
from xml.etree.ElementTree import fromstring, tostring
import string
from types import StringTypes
from datetime import datetime, timedelta
class WBEMClient(http.HTTPClient):
"""A HTTPClient subclass that handles WBEM requests."""
status = None
def connectionMade(self):
"""Send a HTTP POST command with the appropriate CIM over HTTP
headers and payload."""
self.factory.request_xml = str(self.factory.payload)
self.sendCommand('POST', '/cimom')
self.sendHeader('Host', '%s:%d' %
(self.transport.addr[0], self.transport.addr[1]))
self.sendHeader('User-Agent', 'pywbem/twisted')
self.sendHeader('Content-length', len(self.factory.payload))
self.sendHeader('Content-type', 'application/xml')
import base64
auth = base64.encodestring('%s:%s' % (self.factory.creds[0],
self.factory.creds[1]))[:-1]
self.sendHeader('Authorization', 'Basic %s' % auth)
self.sendHeader('CIMOperation', str(self.factory.operation))
self.sendHeader('CIMMethod', str(self.factory.method))
self.sendHeader('CIMObject', str(self.factory.object))
self.endHeaders()
# TODO: Figure out why twisted doesn't support unicode. An
# exception should be thrown by the str() call if the payload
# can't be converted to the current codepage.
self.transport.write(str(self.factory.payload))
def handleResponse(self, data):
"""Called when all response data has been received."""
self.factory.response_xml = data
if self.status == '200':
self.factory.parseErrorAndResponse(data)
self.factory.deferred = None
self.transport.loseConnection()
def handleStatus(self, version, status, message):
"""Save the status code for processing when we get to the end
of the headers."""
self.status = status
self.message = message
def handleHeader(self, key, value):
"""Handle header values."""
import urllib
if key == 'CIMError':
self.CIMError = urllib.unquote(value)
if key == 'PGErrorDetail':
self.PGErrorDetail = urllib.unquote(value)
def handleEndHeaders(self):
"""Check whether the status was OK and raise an error if not
using previously saved header information."""
if self.status != '200':
if not hasattr(self, 'cimerror') or \
not hasattr(self, 'errordetail'):
self.factory.deferred.errback(
CIMError(0, 'HTTP error %s: %s' %
(self.status, self.message)))
else:
self.factory.deferred.errback(
CIMError(0, '%s: %s' % (cimerror, errordetail)))
class WBEMClientFactory(protocol.ClientFactory):
"""Create instances of the WBEMClient class."""
request_xml = None
response_xml = None
xml_header = '<?xml version="1.0" encoding="utf-8" ?>'
def __init__(self, creds, operation, method, object, payload):
self.creds = creds
self.operation = operation
self.method = method
self.object = object
self.payload = payload
self.protocol = lambda: WBEMClient()
self.deferred = defer.Deferred()
def clientConnectionFailed(self, connector, reason):
if self.deferred is not None:
reactor.callLater(0, self.deferred.errback, reason)
def clientConnectionLost(self, connector, reason):
if self.deferred is not None:
reactor.callLater(0, self.deferred.errback, reason)
def imethodcallPayload(self, methodname, localnsp, **kwargs):
"""Generate the XML payload for an intrinsic methodcall."""
param_list = [pywbem.IPARAMVALUE(x[0], pywbem.tocimxml(x[1]))
for x in kwargs.items()]
payload = pywbem.CIM(
pywbem.MESSAGE(
pywbem.SIMPLEREQ(
pywbem.IMETHODCALL(
methodname,
pywbem.LOCALNAMESPACEPATH(
[pywbem.NAMESPACE(ns)
for ns in string.split(localnsp, '/')]),
param_list)),
'1001', '1.0'),
'2.0', '2.0')
return self.xml_header + payload.toxml()
def methodcallPayload(self, methodname, obj, namespace, **kwargs):
"""Generate the XML payload for an extrinsic methodcall."""
if isinstance(obj, CIMInstanceName):
path = obj.copy()
path.host = None
path.namespace = None
localpath = pywbem.LOCALINSTANCEPATH(
pywbem.LOCALNAMESPACEPATH(
[pywbem.NAMESPACE(ns)
for ns in string.split(namespace, '/')]),
path.tocimxml())
else:
localpath = pywbem.LOCALCLASSPATH(
pywbem.LOCALNAMESPACEPATH(
[pywbem.NAMESPACE(ns)
for ns in string.split(namespace, '/')]),
obj)
def paramtype(obj):
"""Return a string to be used as the CIMTYPE for a parameter."""
if isinstance(obj, cim_types.CIMType):
return obj.cimtype
elif type(obj) == bool:
return 'boolean'
elif isinstance(obj, StringTypes):
return 'string'
elif isinstance(obj, (datetime, timedelta)):
return 'datetime'
elif isinstance(obj, (CIMClassName, CIMInstanceName)):
return 'reference'
elif isinstance(obj, (CIMClass, CIMInstance)):
return 'string'
elif isinstance(obj, list):
return paramtype(obj[0])
raise TypeError('Unsupported parameter type "%s"' % type(obj))
def paramvalue(obj):
"""Return a cim_xml node to be used as the value for a
parameter."""
if isinstance(obj, (datetime, timedelta)):
obj = CIMDateTime(obj)
if isinstance(obj, (cim_types.CIMType, bool, StringTypes)):
return cim_xml.VALUE(cim_types.atomic_to_cim_xml(obj))
if isinstance(obj, (CIMClassName, CIMInstanceName)):
return cim_xml.VALUE_REFERENCE(obj.tocimxml())
if isinstance(obj, (CIMClass, CIMInstance)):
return cim_xml.VALUE(obj.tocimxml().toxml())
if isinstance(obj, list):
if isinstance(obj[0], (CIMClassName, CIMInstanceName)):
return cim_xml.VALUE_REFARRAY([paramvalue(x) for x in obj])
return cim_xml.VALUE_ARRAY([paramvalue(x) for x in obj])
raise TypeError('Unsupported parameter type "%s"' % type(obj))
param_list = [pywbem.PARAMVALUE(x[0],
paramvalue(x[1]),
paramtype(x[1]))
for x in kwargs.items()]
payload = pywbem.CIM(
pywbem.MESSAGE(
pywbem.SIMPLEREQ(
pywbem.METHODCALL(methodname,
localpath,
param_list)),
'1001', '1.0'),
'2.0', '2.0')
return self.xml_header + payload.toxml()
def parseErrorAndResponse(self, data):
"""Parse returned XML for errors, then convert into
appropriate Python objects."""
xml = fromstring(data)
error = xml.find('.//ERROR')
if error is None:
self.deferred.callback(self.parseResponse(xml))
return
try:
code = int(error.attrib['CODE'])
except ValueError:
code = 0
self.deferred.errback(CIMError(code, error.attrib['DESCRIPTION']))
def parseResponse(self, xml):
"""Parse returned XML and convert into appropriate Python
objects. Override in subclass"""
pass
# TODO: Eww - we should get rid of the tupletree, tupleparse modules
# and replace with elementtree based code.
import pywbem.tupletree
class EnumerateInstances(WBEMClientFactory):
"""Factory to produce EnumerateInstances WBEM clients."""
def __init__(self, creds, classname, namespace = 'root/cimv2', **kwargs):
self.classname = classname
self.namespace = namespace
payload = self.imethodcallPayload(
'EnumerateInstances',
namespace,
ClassName = CIMClassName(classname),
**kwargs)
WBEMClientFactory.__init__(
self,
creds,
operation = 'MethodCall',
method = 'EnumerateInstances',
object = namespace,
payload = payload)
def __repr__(self):
return '<%s(/%s:%s) at 0x%x>' % \
(self.__class__, self.namespace, self.classname, id(self))
def parseResponse(self, xml):
tt = [pywbem.tupletree.xml_to_tupletree(tostring(x))
for x in xml.findall('.//VALUE.NAMEDINSTANCE')]
return [pywbem.tupleparse.parse_value_namedinstance(x) for x in tt]
class EnumerateInstanceNames(WBEMClientFactory):
"""Factory to produce EnumerateInstanceNames WBEM clients."""
def __init__(self, creds, classname, namespace = 'root/cimv2', **kwargs):
self.classname = classname
self.namespace = namespace
payload = self.imethodcallPayload(
'EnumerateInstanceNames',
namespace,
ClassName = CIMClassName(classname),
**kwargs)
WBEMClientFactory.__init__(
self,
creds,
operation = 'MethodCall',
method = 'EnumerateInstanceNames',
object = namespace,
payload = payload)
def __repr__(self):
return '<%s(/%s:%s) at 0x%x>' % \
(self.__class__, self.namespace, self.classname, id(self))
def parseResponse(self, xml):
tt = [pywbem.tupletree.xml_to_tupletree(tostring(x))
for x in xml.findall('.//INSTANCENAME')]
names = [pywbem.tupleparse.parse_instancename(x) for x in tt]
[setattr(n, 'namespace', self.namespace) for n in names]
return names
class GetInstance(WBEMClientFactory):
"""Factory to produce GetInstance WBEM clients."""
def __init__(self, creds, instancename, namespace = 'root/cimv2', **kwargs):
self.instancename = instancename
self.namespace = namespace
payload = self.imethodcallPayload(
'GetInstance',
namespace,
InstanceName = instancename,
**kwargs)
WBEMClientFactory.__init__(
self,
creds,
operation = 'MethodCall',
method = 'GetInstance',
object = namespace,
payload = payload)
def __repr__(self):
return '<%s(/%s:%s) at 0x%x>' % \
(self.__class__, self.namespace, self.instancename, id(self))
def parseResponse(self, xml):
tt = pywbem.tupletree.xml_to_tupletree(
tostring(xml.find('.//INSTANCE')))
return pywbem.tupleparse.parse_instance(tt)
class DeleteInstance(WBEMClientFactory):
"""Factory to produce DeleteInstance WBEM clients."""
def __init__(self, creds, instancename, namespace = 'root/cimv2', **kwargs):
self.instancename = instancename
self.namespace = namespace
payload = self.imethodcallPayload(
'DeleteInstance',
namespace,
InstanceName = instancename,
**kwargs)
WBEMClientFactory.__init__(
self,
creds,
operation = 'MethodCall',
method = 'DeleteInstance',
object = namespace,
payload = payload)
def __repr__(self):
return '<%s(/%s:%s) at 0x%x>' % \
(self.__class__, self.namespace, self.instancename, id(self))
class CreateInstance(WBEMClientFactory):
"""Factory to produce CreateInstance WBEM clients."""
# TODO: Implement __repr__ method
def __init__(self, creds, instance, namespace = 'root/cimv2', **kwargs):
payload = self.imethodcallPayload(
'CreateInstance',
namespace,
NewInstance = instance,
**kwargs)
WBEMClientFactory.__init__(
self,
creds,
operation = 'MethodCall',
method = 'CreateInstance',
object = namespace,
payload = payload)
def parseResponse(self, xml):
tt = pywbem.tupletree.xml_to_tupletree(
tostring(xml.find('.//INSTANCENAME')))
return pywbem.tupleparse.parse_instancename(tt)
class ModifyInstance(WBEMClientFactory):
"""Factory to produce ModifyInstance WBEM clients."""
# TODO: Implement __repr__ method
def __init__(self, creds, instancename, instance, namespace = 'root/cimv2',
**kwargs):
wrapped_instance = CIMNamedInstance(instancename, instance)
payload = self.imethodcallPayload(
'ModifyInstance',
namespace,
ModifiedInstance = wrapped_instance,
**kwargs)
WBEMClientFactory.__init__(
self,
creds,
operation = 'MethodCall',
method = 'ModifyInstance',
object = namespace,
payload = payload)
class EnumerateClassNames(WBEMClientFactory):
"""Factory to produce EnumerateClassNames WBEM clients."""
def __init__(self, creds, namespace = 'root/cimv2', **kwargs):
self.localnsp = LocalNamespacePath
payload = self.imethodcallPayload(
'EnumerateClassNames',
namespace,
**kwargs)
WBEMClientFactory.__init__(
self,
creds,
operation = 'MethodCall',
method = 'EnumerateClassNames',
object = LocalNamespacePath,
payload = payload)
def __repr__(self):
return '<%s(/%s) at 0x%x>' % \
(self.__class__, self.namespace, id(self))
def parseResponse(self, xml):
tt = [pywbem.tupletree.xml_to_tupletree(tostring(x))
for x in xml.findall('.//CLASSNAME')]
return [pywbem.tupleparse.parse_classname(x) for x in tt]
class EnumerateClasses(WBEMClientFactory):
"""Factory to produce EnumerateClasses WBEM clients."""
def __init__(self, creds, namespace = 'root/cimv2', **kwargs):
self.localnsp = LocalNamespacePath
payload = self.imethodcallPayload(
'EnumerateClasses',
namespace,
**kwargs)
WBEMClientFactory.__init__(
self,
creds,
operation = 'MethodCall',
method = 'EnumerateClasses',
object = namespace,
payload = payload)
def __repr__(self):
return '<%s(/%s) at 0x%x>' % \
(self.__class__, self.namespace, id(self))
def parseResponse(self, xml):
tt = [pywbem.tupletree.xml_to_tupletree(tostring(x))
for x in xml.findall('.//CLASS')]
return [pywbem.tupleparse.parse_class(x) for x in tt]
class GetClass(WBEMClientFactory):
"""Factory to produce GetClass WBEM clients."""
def __init__(self, creds, classname, namespace = 'root/cimv2', **kwargs):
self.classname = classname
self.namespace = namespace
payload = self.imethodcallPayload(
'GetClass',
namespace,
ClassName = CIMClassName(classname),
**kwargs)
WBEMClientFactory.__init__(
self,
creds,
operation = 'MethodCall',
method = 'GetClass',
object = namespace,
payload = payload)
def __repr__(self):
return '<%s(/%s:%s) at 0x%x>' % \
(self.__class__, self.namespace, self.classname, id(self))
def parseResponse(self, xml):
tt = pywbem.tupletree.xml_to_tupletree(
tostring(xml.find('.//CLASS')))
return pywbem.tupleparse.parse_class(tt)
class Associators(WBEMClientFactory):
"""Factory to produce Associators WBEM clients."""
# TODO: Implement __repr__ method
def __init__(self, creds, obj, namespace = 'root/cimv2', **kwargs):
if isinstance(obj, CIMInstanceName):
kwargs['ObjectName'] = obj
else:
kwargs['ObjectName'] = CIMClassName(obj)
payload = self.imethodcallPayload(
'Associators',
namespace,
**kwargs)
WBEMClientFactory.__init__(
self,
creds,
operation = 'MethodCall',
method = 'Associators',
object = namespace,
payload = payload)
class AssociatorNames(WBEMClientFactory):
"""Factory to produce AssociatorNames WBEM clients."""
# TODO: Implement __repr__ method
def __init__(self, creds, obj, namespace = 'root/cimv2', **kwargs):
if isinstance(obj, CIMInstanceName):
kwargs['ObjectName'] = obj
else:
kwargs['ObjectName'] = CIMClassName(obj)
payload = self.imethodcallPayload(
'AssociatorNames',
namespace,
**kwargs)
WBEMClientFactory.__init__(
self,
creds,
operation = 'MethodCall',
method = 'AssociatorNames',
object = namespace,
payload = payload)
def parseResponse(self, xml):
if len(xml.findall('.//INSTANCENAME')) > 0:
tt = [pywbem.tupletree.xml_to_tupletree(tostring(x))
for x in xml.findall('.//INSTANCENAME')]
return [pywbem.tupleparse.parse_instancename(x) for x in tt]
else:
tt = [pywbem.tupletree.xml_to_tupletree(tostring(x))
for x in xml.findall('.//OBJECTPATH')]
return [pywbem.tupleparse.parse_objectpath(x)[2] for x in tt]
class References(WBEMClientFactory):
"""Factory to produce References WBEM clients."""
def __init__(self, creds, obj, namespace = 'root/cimv2', **kwargs):
if isinstance(obj, CIMInstanceName):
kwargs['ObjectName'] = obj
else:
kwargs['ObjectName'] = CIMClassName(obj)
payload = self.imethodcallPayload(
'References',
namespace,
**kwargs)
WBEMClientFactory.__init__(
self,
creds,
operation = 'MethodCall',
method = 'References',
object = namespace,
payload = payload)
class ReferenceNames(WBEMClientFactory):
"""Factory to produce ReferenceNames WBEM clients."""
# TODO: Implement __repr__ method
def __init__(self, creds, obj, namespace = 'root/cimv2', **kwargs):
if isinstance(obj, CIMInstanceName):
kwargs['ObjectName'] = obj
else:
kwargs['ObjectName'] = CIMClassName(obj)
payload = self.imethodcallPayload(
'ReferenceNames',
namespace,
**kwargs)
WBEMClientFactory.__init__(
self,
creds,
operation = 'MethodCall',
method = 'ReferenceNames',
object = namespace,
payload = payload)
def parseResponse(self, xml):
if len(xml.findall('.//INSTANCENAME')) > 0:
tt = [pywbem.tupletree.xml_to_tupletree(tostring(x))
for x in xml.findall('.//INSTANCENAME')]
return [pywbem.tupleparse.parse_instancename(x) for x in tt]
else:
tt = [pywbem.tupletree.xml_to_tupletree(tostring(x))
for x in xml.findall('.//OBJECTPATH')]
return [pywbem.tupleparse.parse_objectpath(x)[2] for x in tt]
class InvokeMethod(WBEMClientFactory):
"""Factory to produce InvokeMethod WBEM clients."""
def __init__(self, creds, MethodName, ObjectName, namespace = 'root/cimv2',
**kwargs):
# Convert string to CIMClassName
obj = ObjectName
if isinstance(obj, StringTypes):
obj = CIMClassName(obj, namespace = namespace)
if isinstance(obj, CIMInstanceName) and obj.namespace is None:
obj = ObjectName.copy()
obj.namespace = namespace
# Make the method call
payload = self.methodcallPayload(
MethodName,
obj,
namespace,
**kwargs)
WBEMClientFactory.__init__(
self,
creds,
operation = 'MethodCall',
method = MethodName,
object = obj,
payload = payload)
| openlmi/openlmi-doc | doc/python/pywbem/twisted_client.py | Python | gpl-2.0 | 22,547 |
# Copyright (C) 2003-2005 Peter J. Verveer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import division, print_function, absolute_import
import warnings
import math
import numpy
from . import _ni_support
from . import _nd_image
from . import _ni_docstrings
from scipy.misc import doccer
from scipy._lib._version import NumpyVersion
__all__ = ['correlate1d', 'convolve1d', 'gaussian_filter1d', 'gaussian_filter',
'prewitt', 'sobel', 'generic_laplace', 'laplace',
'gaussian_laplace', 'generic_gradient_magnitude',
'gaussian_gradient_magnitude', 'correlate', 'convolve',
'uniform_filter1d', 'uniform_filter', 'minimum_filter1d',
'maximum_filter1d', 'minimum_filter', 'maximum_filter',
'rank_filter', 'median_filter', 'percentile_filter',
'generic_filter1d', 'generic_filter']
@_ni_docstrings.docfiller
def correlate1d(input, weights, axis=-1, output=None, mode="reflect",
cval=0.0, origin=0):
"""Calculate a one-dimensional correlation along the given axis.
The lines of the array along the given axis are correlated with the
given weights.
Parameters
----------
%(input)s
weights : array
One-dimensional sequence of numbers.
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
Examples
--------
>>> from scipy.ndimage import correlate1d
>>> correlate1d([2, 8, 0, 4, 1, 9, 9, 0], weights=[1, 3])
array([ 8, 26, 8, 12, 7, 28, 36, 9])
"""
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output = _ni_support._get_output(output, input)
weights = numpy.asarray(weights, dtype=numpy.float64)
if weights.ndim != 1 or weights.shape[0] < 1:
raise RuntimeError('no filter weights given')
if not weights.flags.contiguous:
weights = weights.copy()
axis = _ni_support._check_axis(axis, input.ndim)
if (len(weights) // 2 + origin < 0) or (len(weights) // 2 +
origin > len(weights)):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.correlate1d(input, weights, axis, output, mode, cval,
origin)
return output
@_ni_docstrings.docfiller
def convolve1d(input, weights, axis=-1, output=None, mode="reflect",
cval=0.0, origin=0):
"""Calculate a one-dimensional convolution along the given axis.
The lines of the array along the given axis are convolved with the
given weights.
Parameters
----------
%(input)s
weights : ndarray
One-dimensional sequence of numbers.
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
Returns
-------
convolve1d : ndarray
Convolved array with same shape as input
Examples
--------
>>> from scipy.ndimage import convolve1d
>>> convolve1d([2, 8, 0, 4, 1, 9, 9, 0], weights=[1, 3])
array([14, 24, 4, 13, 12, 36, 27, 0])
"""
weights = weights[::-1]
origin = -origin
if not len(weights) & 1:
origin -= 1
return correlate1d(input, weights, axis, output, mode, cval, origin)
def _gaussian_kernel1d(sigma, order, radius):
"""
Computes a 1D Gaussian convolution kernel.
"""
if order < 0:
raise ValueError('order must be non-negative')
p = numpy.polynomial.Polynomial([0, 0, -0.5 / (sigma * sigma)])
x = numpy.arange(-radius, radius + 1)
phi_x = numpy.exp(p(x), dtype=numpy.double)
phi_x /= phi_x.sum()
if order > 0:
q = numpy.polynomial.Polynomial([1])
p_deriv = p.deriv()
for _ in range(order):
# f(x) = q(x) * phi(x) = q(x) * exp(p(x))
# f'(x) = (q'(x) + q(x) * p'(x)) * phi(x)
q = q.deriv() + q * p_deriv
phi_x *= q(x)
return phi_x
@_ni_docstrings.docfiller
def gaussian_filter1d(input, sigma, axis=-1, order=0, output=None,
mode="reflect", cval=0.0, truncate=4.0):
"""One-dimensional Gaussian filter.
Parameters
----------
%(input)s
sigma : scalar
standard deviation for Gaussian kernel
%(axis)s
order : int, optional
An order of 0 corresponds to convolution with a Gaussian
kernel. A positive order corresponds to convolution with
that derivative of a Gaussian.
%(output)s
%(mode)s
%(cval)s
truncate : float, optional
Truncate the filter at this many standard deviations.
Default is 4.0.
Returns
-------
gaussian_filter1d : ndarray
Examples
--------
>>> from scipy.ndimage import gaussian_filter1d
>>> gaussian_filter1d([1.0, 2.0, 3.0, 4.0, 5.0], 1)
array([ 1.42704095, 2.06782203, 3. , 3.93217797, 4.57295905])
>>> gaussian_filter1d([1.0, 2.0, 3.0, 4.0, 5.0], 4)
array([ 2.91948343, 2.95023502, 3. , 3.04976498, 3.08051657])
>>> import matplotlib.pyplot as plt
>>> np.random.seed(280490)
>>> x = np.random.randn(101).cumsum()
>>> y3 = gaussian_filter1d(x, 3)
>>> y6 = gaussian_filter1d(x, 6)
>>> plt.plot(x, 'k', label='original data')
>>> plt.plot(y3, '--', label='filtered, sigma=3')
>>> plt.plot(y6, ':', label='filtered, sigma=6')
>>> plt.legend()
>>> plt.grid()
>>> plt.show()
"""
sd = float(sigma)
# make the radius of the filter equal to truncate standard deviations
lw = int(truncate * sd + 0.5)
# Since we are calling correlate, not convolve, revert the kernel
weights = _gaussian_kernel1d(sigma, order, lw)[::-1]
return correlate1d(input, weights, axis, output, mode, cval, 0)
@_ni_docstrings.docfiller
def gaussian_filter(input, sigma, order=0, output=None,
mode="reflect", cval=0.0, truncate=4.0):
"""Multidimensional Gaussian filter.
Parameters
----------
%(input)s
sigma : scalar or sequence of scalars
Standard deviation for Gaussian kernel. The standard
deviations of the Gaussian filter are given for each axis as a
sequence, or as a single number, in which case it is equal for
all axes.
order : int or sequence of ints, optional
The order of the filter along each axis is given as a sequence
of integers, or as a single number. An order of 0 corresponds
to convolution with a Gaussian kernel. A positive order
corresponds to convolution with that derivative of a Gaussian.
%(output)s
%(mode_multiple)s
%(cval)s
truncate : float
Truncate the filter at this many standard deviations.
Default is 4.0.
Returns
-------
gaussian_filter : ndarray
Returned array of same shape as `input`.
Notes
-----
The multidimensional filter is implemented as a sequence of
one-dimensional convolution filters. The intermediate arrays are
stored in the same data type as the output. Therefore, for output
types with a limited precision, the results may be imprecise
because intermediate results may be stored with insufficient
precision.
Examples
--------
>>> from scipy.ndimage import gaussian_filter
>>> a = np.arange(50, step=2).reshape((5,5))
>>> a
array([[ 0, 2, 4, 6, 8],
[10, 12, 14, 16, 18],
[20, 22, 24, 26, 28],
[30, 32, 34, 36, 38],
[40, 42, 44, 46, 48]])
>>> gaussian_filter(a, sigma=1)
array([[ 4, 6, 8, 9, 11],
[10, 12, 14, 15, 17],
[20, 22, 24, 25, 27],
[29, 31, 33, 34, 36],
[35, 37, 39, 40, 42]])
>>> from scipy import misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = gaussian_filter(ascent, sigma=5)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
input = numpy.asarray(input)
output = _ni_support._get_output(output, input)
orders = _ni_support._normalize_sequence(order, input.ndim)
sigmas = _ni_support._normalize_sequence(sigma, input.ndim)
modes = _ni_support._normalize_sequence(mode, input.ndim)
axes = list(range(input.ndim))
axes = [(axes[ii], sigmas[ii], orders[ii], modes[ii])
for ii in range(len(axes)) if sigmas[ii] > 1e-15]
if len(axes) > 0:
for axis, sigma, order, mode in axes:
gaussian_filter1d(input, sigma, axis, order, output,
mode, cval, truncate)
input = output
else:
output[...] = input[...]
return output
@_ni_docstrings.docfiller
def prewitt(input, axis=-1, output=None, mode="reflect", cval=0.0):
"""Calculate a Prewitt filter.
Parameters
----------
%(input)s
%(axis)s
%(output)s
%(mode_multiple)s
%(cval)s
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.prewitt(ascent)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
input = numpy.asarray(input)
axis = _ni_support._check_axis(axis, input.ndim)
output = _ni_support._get_output(output, input)
modes = _ni_support._normalize_sequence(mode, input.ndim)
correlate1d(input, [-1, 0, 1], axis, output, modes[axis], cval, 0)
axes = [ii for ii in range(input.ndim) if ii != axis]
for ii in axes:
correlate1d(output, [1, 1, 1], ii, output, modes[ii], cval, 0,)
return output
@_ni_docstrings.docfiller
def sobel(input, axis=-1, output=None, mode="reflect", cval=0.0):
"""Calculate a Sobel filter.
Parameters
----------
%(input)s
%(axis)s
%(output)s
%(mode_multiple)s
%(cval)s
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.sobel(ascent)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
input = numpy.asarray(input)
axis = _ni_support._check_axis(axis, input.ndim)
output = _ni_support._get_output(output, input)
modes = _ni_support._normalize_sequence(mode, input.ndim)
correlate1d(input, [-1, 0, 1], axis, output, modes[axis], cval, 0)
axes = [ii for ii in range(input.ndim) if ii != axis]
for ii in axes:
correlate1d(output, [1, 2, 1], ii, output, modes[ii], cval, 0)
return output
@_ni_docstrings.docfiller
def generic_laplace(input, derivative2, output=None, mode="reflect",
cval=0.0,
extra_arguments=(),
extra_keywords=None):
"""
N-dimensional Laplace filter using a provided second derivative function.
Parameters
----------
%(input)s
derivative2 : callable
Callable with the following signature::
derivative2(input, axis, output, mode, cval,
*extra_arguments, **extra_keywords)
See `extra_arguments`, `extra_keywords` below.
%(output)s
%(mode_multiple)s
%(cval)s
%(extra_keywords)s
%(extra_arguments)s
"""
if extra_keywords is None:
extra_keywords = {}
input = numpy.asarray(input)
output = _ni_support._get_output(output, input)
axes = list(range(input.ndim))
if len(axes) > 0:
modes = _ni_support._normalize_sequence(mode, len(axes))
derivative2(input, axes[0], output, modes[0], cval,
*extra_arguments, **extra_keywords)
for ii in range(1, len(axes)):
tmp = derivative2(input, axes[ii], output.dtype, modes[ii], cval,
*extra_arguments, **extra_keywords)
output += tmp
else:
output[...] = input[...]
return output
@_ni_docstrings.docfiller
def laplace(input, output=None, mode="reflect", cval=0.0):
"""N-dimensional Laplace filter based on approximate second derivatives.
Parameters
----------
%(input)s
%(output)s
%(mode_multiple)s
%(cval)s
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.laplace(ascent)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
def derivative2(input, axis, output, mode, cval):
return correlate1d(input, [1, -2, 1], axis, output, mode, cval, 0)
return generic_laplace(input, derivative2, output, mode, cval)
@_ni_docstrings.docfiller
def gaussian_laplace(input, sigma, output=None, mode="reflect",
cval=0.0, **kwargs):
"""Multidimensional Laplace filter using gaussian second derivatives.
Parameters
----------
%(input)s
sigma : scalar or sequence of scalars
The standard deviations of the Gaussian filter are given for
each axis as a sequence, or as a single number, in which case
it is equal for all axes.
%(output)s
%(mode_multiple)s
%(cval)s
Extra keyword arguments will be passed to gaussian_filter().
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> ascent = misc.ascent()
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> result = ndimage.gaussian_laplace(ascent, sigma=1)
>>> ax1.imshow(result)
>>> result = ndimage.gaussian_laplace(ascent, sigma=3)
>>> ax2.imshow(result)
>>> plt.show()
"""
input = numpy.asarray(input)
def derivative2(input, axis, output, mode, cval, sigma, **kwargs):
order = [0] * input.ndim
order[axis] = 2
return gaussian_filter(input, sigma, order, output, mode, cval,
**kwargs)
return generic_laplace(input, derivative2, output, mode, cval,
extra_arguments=(sigma,),
extra_keywords=kwargs)
@_ni_docstrings.docfiller
def generic_gradient_magnitude(input, derivative, output=None,
mode="reflect", cval=0.0,
extra_arguments=(), extra_keywords=None):
"""Gradient magnitude using a provided gradient function.
Parameters
----------
%(input)s
derivative : callable
Callable with the following signature::
derivative(input, axis, output, mode, cval,
*extra_arguments, **extra_keywords)
See `extra_arguments`, `extra_keywords` below.
`derivative` can assume that `input` and `output` are ndarrays.
Note that the output from `derivative` is modified inplace;
be careful to copy important inputs before returning them.
%(output)s
%(mode_multiple)s
%(cval)s
%(extra_keywords)s
%(extra_arguments)s
"""
if extra_keywords is None:
extra_keywords = {}
input = numpy.asarray(input)
output = _ni_support._get_output(output, input)
axes = list(range(input.ndim))
if len(axes) > 0:
modes = _ni_support._normalize_sequence(mode, len(axes))
derivative(input, axes[0], output, modes[0], cval,
*extra_arguments, **extra_keywords)
numpy.multiply(output, output, output)
for ii in range(1, len(axes)):
tmp = derivative(input, axes[ii], output.dtype, modes[ii], cval,
*extra_arguments, **extra_keywords)
numpy.multiply(tmp, tmp, tmp)
output += tmp
# This allows the sqrt to work with a different default casting
numpy.sqrt(output, output, casting='unsafe')
else:
output[...] = input[...]
return output
@_ni_docstrings.docfiller
def gaussian_gradient_magnitude(input, sigma, output=None,
mode="reflect", cval=0.0, **kwargs):
"""Multidimensional gradient magnitude using Gaussian derivatives.
Parameters
----------
%(input)s
sigma : scalar or sequence of scalars
The standard deviations of the Gaussian filter are given for
each axis as a sequence, or as a single number, in which case
it is equal for all axes..
%(output)s
%(mode_multiple)s
%(cval)s
Extra keyword arguments will be passed to gaussian_filter().
Returns
-------
gaussian_gradient_magnitude : ndarray
Filtered array. Has the same shape as `input`.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.gaussian_gradient_magnitude(ascent, sigma=5)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
input = numpy.asarray(input)
def derivative(input, axis, output, mode, cval, sigma, **kwargs):
order = [0] * input.ndim
order[axis] = 1
return gaussian_filter(input, sigma, order, output, mode,
cval, **kwargs)
return generic_gradient_magnitude(input, derivative, output, mode,
cval, extra_arguments=(sigma,),
extra_keywords=kwargs)
def _correlate_or_convolve(input, weights, output, mode, cval, origin,
convolution):
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
origins = _ni_support._normalize_sequence(origin, input.ndim)
weights = numpy.asarray(weights, dtype=numpy.float64)
wshape = [ii for ii in weights.shape if ii > 0]
if len(wshape) != input.ndim:
raise RuntimeError('filter weights array has incorrect shape.')
if convolution:
weights = weights[tuple([slice(None, None, -1)] * weights.ndim)]
for ii in range(len(origins)):
origins[ii] = -origins[ii]
if not weights.shape[ii] & 1:
origins[ii] -= 1
for origin, lenw in zip(origins, wshape):
if (lenw // 2 + origin < 0) or (lenw // 2 + origin > lenw):
raise ValueError('invalid origin')
if not weights.flags.contiguous:
weights = weights.copy()
output = _ni_support._get_output(output, input)
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.correlate(input, weights, output, mode, cval, origins)
return output
@_ni_docstrings.docfiller
def correlate(input, weights, output=None, mode='reflect', cval=0.0,
origin=0):
"""
Multi-dimensional correlation.
The array is correlated with the given kernel.
Parameters
----------
%(input)s
weights : ndarray
array of weights, same number of dimensions as input
%(output)s
%(mode_multiple)s
%(cval)s
%(origin_multiple)s
See Also
--------
convolve : Convolve an image with a kernel.
"""
return _correlate_or_convolve(input, weights, output, mode, cval,
origin, False)
@_ni_docstrings.docfiller
def convolve(input, weights, output=None, mode='reflect', cval=0.0,
origin=0):
"""
Multidimensional convolution.
The array is convolved with the given kernel.
Parameters
----------
%(input)s
weights : array_like
Array of weights, same number of dimensions as input
%(output)s
%(mode_multiple)s
cval : scalar, optional
Value to fill past edges of input if `mode` is 'constant'. Default
is 0.0
%(origin_multiple)s
Returns
-------
result : ndarray
The result of convolution of `input` with `weights`.
See Also
--------
correlate : Correlate an image with a kernel.
Notes
-----
Each value in result is :math:`C_i = \\sum_j{I_{i+k-j} W_j}`, where
W is the `weights` kernel,
j is the n-D spatial index over :math:`W`,
I is the `input` and k is the coordinate of the center of
W, specified by `origin` in the input parameters.
Examples
--------
Perhaps the simplest case to understand is ``mode='constant', cval=0.0``,
because in this case borders (i.e. where the `weights` kernel, centered
on any one value, extends beyond an edge of `input`.
>>> a = np.array([[1, 2, 0, 0],
... [5, 3, 0, 4],
... [0, 0, 0, 7],
... [9, 3, 0, 0]])
>>> k = np.array([[1,1,1],[1,1,0],[1,0,0]])
>>> from scipy import ndimage
>>> ndimage.convolve(a, k, mode='constant', cval=0.0)
array([[11, 10, 7, 4],
[10, 3, 11, 11],
[15, 12, 14, 7],
[12, 3, 7, 0]])
Setting ``cval=1.0`` is equivalent to padding the outer edge of `input`
with 1.0's (and then extracting only the original region of the result).
>>> ndimage.convolve(a, k, mode='constant', cval=1.0)
array([[13, 11, 8, 7],
[11, 3, 11, 14],
[16, 12, 14, 10],
[15, 6, 10, 5]])
With ``mode='reflect'`` (the default), outer values are reflected at the
edge of `input` to fill in missing values.
>>> b = np.array([[2, 0, 0],
... [1, 0, 0],
... [0, 0, 0]])
>>> k = np.array([[0,1,0], [0,1,0], [0,1,0]])
>>> ndimage.convolve(b, k, mode='reflect')
array([[5, 0, 0],
[3, 0, 0],
[1, 0, 0]])
This includes diagonally at the corners.
>>> k = np.array([[1,0,0],[0,1,0],[0,0,1]])
>>> ndimage.convolve(b, k)
array([[4, 2, 0],
[3, 2, 0],
[1, 1, 0]])
With ``mode='nearest'``, the single nearest value in to an edge in
`input` is repeated as many times as needed to match the overlapping
`weights`.
>>> c = np.array([[2, 0, 1],
... [1, 0, 0],
... [0, 0, 0]])
>>> k = np.array([[0, 1, 0],
... [0, 1, 0],
... [0, 1, 0],
... [0, 1, 0],
... [0, 1, 0]])
>>> ndimage.convolve(c, k, mode='nearest')
array([[7, 0, 3],
[5, 0, 2],
[3, 0, 1]])
"""
return _correlate_or_convolve(input, weights, output, mode, cval,
origin, True)
@_ni_docstrings.docfiller
def uniform_filter1d(input, size, axis=-1, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculate a one-dimensional uniform filter along the given axis.
The lines of the array along the given axis are filtered with a
uniform filter of given size.
Parameters
----------
%(input)s
size : int
length of uniform filter
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
Examples
--------
>>> from scipy.ndimage import uniform_filter1d
>>> uniform_filter1d([2, 8, 0, 4, 1, 9, 9, 0], size=3)
array([4, 3, 4, 1, 4, 6, 6, 3])
"""
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
axis = _ni_support._check_axis(axis, input.ndim)
if size < 1:
raise RuntimeError('incorrect filter size')
output = _ni_support._get_output(output, input)
if (size // 2 + origin < 0) or (size // 2 + origin >= size):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.uniform_filter1d(input, size, axis, output, mode, cval,
origin)
return output
@_ni_docstrings.docfiller
def uniform_filter(input, size=3, output=None, mode="reflect",
cval=0.0, origin=0):
"""Multi-dimensional uniform filter.
Parameters
----------
%(input)s
size : int or sequence of ints, optional
The sizes of the uniform filter are given for each axis as a
sequence, or as a single number, in which case the size is
equal for all axes.
%(output)s
%(mode_multiple)s
%(cval)s
%(origin_multiple)s
Returns
-------
uniform_filter : ndarray
Filtered array. Has the same shape as `input`.
Notes
-----
The multi-dimensional filter is implemented as a sequence of
one-dimensional uniform filters. The intermediate arrays are stored
in the same data type as the output. Therefore, for output types
with a limited precision, the results may be imprecise because
intermediate results may be stored with insufficient precision.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.uniform_filter(ascent, size=20)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
input = numpy.asarray(input)
output = _ni_support._get_output(output, input)
sizes = _ni_support._normalize_sequence(size, input.ndim)
origins = _ni_support._normalize_sequence(origin, input.ndim)
modes = _ni_support._normalize_sequence(mode, input.ndim)
axes = list(range(input.ndim))
axes = [(axes[ii], sizes[ii], origins[ii], modes[ii])
for ii in range(len(axes)) if sizes[ii] > 1]
if len(axes) > 0:
for axis, size, origin, mode in axes:
uniform_filter1d(input, int(size), axis, output, mode,
cval, origin)
input = output
else:
output[...] = input[...]
return output
@_ni_docstrings.docfiller
def minimum_filter1d(input, size, axis=-1, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculate a one-dimensional minimum filter along the given axis.
The lines of the array along the given axis are filtered with a
minimum filter of given size.
Parameters
----------
%(input)s
size : int
length along which to calculate 1D minimum
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
Notes
-----
This function implements the MINLIST algorithm [1]_, as described by
Richard Harter [2]_, and has a guaranteed O(n) performance, `n` being
the `input` length, regardless of filter size.
References
----------
.. [1] http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.42.2777
.. [2] http://www.richardhartersworld.com/cri/2001/slidingmin.html
Examples
--------
>>> from scipy.ndimage import minimum_filter1d
>>> minimum_filter1d([2, 8, 0, 4, 1, 9, 9, 0], size=3)
array([2, 0, 0, 0, 1, 1, 0, 0])
"""
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
axis = _ni_support._check_axis(axis, input.ndim)
if size < 1:
raise RuntimeError('incorrect filter size')
output = _ni_support._get_output(output, input)
if (size // 2 + origin < 0) or (size // 2 + origin >= size):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.min_or_max_filter1d(input, size, axis, output, mode, cval,
origin, 1)
return output
@_ni_docstrings.docfiller
def maximum_filter1d(input, size, axis=-1, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculate a one-dimensional maximum filter along the given axis.
The lines of the array along the given axis are filtered with a
maximum filter of given size.
Parameters
----------
%(input)s
size : int
Length along which to calculate the 1-D maximum.
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
Returns
-------
maximum1d : ndarray, None
Maximum-filtered array with same shape as input.
None if `output` is not None
Notes
-----
This function implements the MAXLIST algorithm [1]_, as described by
Richard Harter [2]_, and has a guaranteed O(n) performance, `n` being
the `input` length, regardless of filter size.
References
----------
.. [1] http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.42.2777
.. [2] http://www.richardhartersworld.com/cri/2001/slidingmin.html
Examples
--------
>>> from scipy.ndimage import maximum_filter1d
>>> maximum_filter1d([2, 8, 0, 4, 1, 9, 9, 0], size=3)
array([8, 8, 8, 4, 9, 9, 9, 9])
"""
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
axis = _ni_support._check_axis(axis, input.ndim)
if size < 1:
raise RuntimeError('incorrect filter size')
output = _ni_support._get_output(output, input)
if (size // 2 + origin < 0) or (size // 2 + origin >= size):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.min_or_max_filter1d(input, size, axis, output, mode, cval,
origin, 0)
return output
def _min_or_max_filter(input, size, footprint, structure, output, mode,
cval, origin, minimum):
if (size is not None) and (footprint is not None):
warnings.warn("ignoring size because footprint is set", UserWarning, stacklevel=3)
if structure is None:
if footprint is None:
if size is None:
raise RuntimeError("no footprint provided")
separable = True
else:
footprint = numpy.asarray(footprint, dtype=bool)
if not footprint.any():
raise ValueError("All-zero footprint is not supported.")
if footprint.all():
size = footprint.shape
footprint = None
separable = True
else:
separable = False
else:
structure = numpy.asarray(structure, dtype=numpy.float64)
separable = False
if footprint is None:
footprint = numpy.ones(structure.shape, bool)
else:
footprint = numpy.asarray(footprint, dtype=bool)
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output = _ni_support._get_output(output, input)
origins = _ni_support._normalize_sequence(origin, input.ndim)
if separable:
sizes = _ni_support._normalize_sequence(size, input.ndim)
modes = _ni_support._normalize_sequence(mode, input.ndim)
axes = list(range(input.ndim))
axes = [(axes[ii], sizes[ii], origins[ii], modes[ii])
for ii in range(len(axes)) if sizes[ii] > 1]
if minimum:
filter_ = minimum_filter1d
else:
filter_ = maximum_filter1d
if len(axes) > 0:
for axis, size, origin, mode in axes:
filter_(input, int(size), axis, output, mode, cval, origin)
input = output
else:
output[...] = input[...]
else:
fshape = [ii for ii in footprint.shape if ii > 0]
if len(fshape) != input.ndim:
raise RuntimeError('footprint array has incorrect shape.')
for origin, lenf in zip(origins, fshape):
if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf):
raise ValueError('invalid origin')
if not footprint.flags.contiguous:
footprint = footprint.copy()
if structure is not None:
if len(structure.shape) != input.ndim:
raise RuntimeError('structure array has incorrect shape')
if not structure.flags.contiguous:
structure = structure.copy()
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.min_or_max_filter(input, footprint, structure, output,
mode, cval, origins, minimum)
return output
@_ni_docstrings.docfiller
def minimum_filter(input, size=None, footprint=None, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculate a multi-dimensional minimum filter.
Parameters
----------
%(input)s
%(size_foot)s
%(output)s
%(mode_multiple)s
%(cval)s
%(origin_multiple)s
Returns
-------
minimum_filter : ndarray
Filtered array. Has the same shape as `input`.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.minimum_filter(ascent, size=20)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
return _min_or_max_filter(input, size, footprint, None, output, mode,
cval, origin, 1)
@_ni_docstrings.docfiller
def maximum_filter(input, size=None, footprint=None, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculate a multi-dimensional maximum filter.
Parameters
----------
%(input)s
%(size_foot)s
%(output)s
%(mode_multiple)s
%(cval)s
%(origin_multiple)s
Returns
-------
maximum_filter : ndarray
Filtered array. Has the same shape as `input`.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.maximum_filter(ascent, size=20)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
return _min_or_max_filter(input, size, footprint, None, output, mode,
cval, origin, 0)
@_ni_docstrings.docfiller
def _rank_filter(input, rank, size=None, footprint=None, output=None,
mode="reflect", cval=0.0, origin=0, operation='rank'):
if (size is not None) and (footprint is not None):
warnings.warn("ignoring size because footprint is set", UserWarning, stacklevel=3)
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
origins = _ni_support._normalize_sequence(origin, input.ndim)
if footprint is None:
if size is None:
raise RuntimeError("no footprint or filter size provided")
sizes = _ni_support._normalize_sequence(size, input.ndim)
footprint = numpy.ones(sizes, dtype=bool)
else:
footprint = numpy.asarray(footprint, dtype=bool)
fshape = [ii for ii in footprint.shape if ii > 0]
if len(fshape) != input.ndim:
raise RuntimeError('filter footprint array has incorrect shape.')
for origin, lenf in zip(origins, fshape):
if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf):
raise ValueError('invalid origin')
if not footprint.flags.contiguous:
footprint = footprint.copy()
filter_size = numpy.where(footprint, 1, 0).sum()
if operation == 'median':
rank = filter_size // 2
elif operation == 'percentile':
percentile = rank
if percentile < 0.0:
percentile += 100.0
if percentile < 0 or percentile > 100:
raise RuntimeError('invalid percentile')
if percentile == 100.0:
rank = filter_size - 1
else:
rank = int(float(filter_size) * percentile / 100.0)
if rank < 0:
rank += filter_size
if rank < 0 or rank >= filter_size:
raise RuntimeError('rank not within filter footprint size')
if rank == 0:
return minimum_filter(input, None, footprint, output, mode, cval,
origins)
elif rank == filter_size - 1:
return maximum_filter(input, None, footprint, output, mode, cval,
origins)
else:
output = _ni_support._get_output(output, input)
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.rank_filter(input, rank, footprint, output, mode, cval,
origins)
return output
@_ni_docstrings.docfiller
def rank_filter(input, rank, size=None, footprint=None, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculate a multi-dimensional rank filter.
Parameters
----------
%(input)s
rank : int
The rank parameter may be less then zero, i.e., rank = -1
indicates the largest element.
%(size_foot)s
%(output)s
%(mode_multiple)s
%(cval)s
%(origin_multiple)s
Returns
-------
rank_filter : ndarray
Filtered array. Has the same shape as `input`.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.rank_filter(ascent, rank=42, size=20)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
return _rank_filter(input, rank, size, footprint, output, mode, cval,
origin, 'rank')
@_ni_docstrings.docfiller
def median_filter(input, size=None, footprint=None, output=None,
mode="reflect", cval=0.0, origin=0):
"""
Calculate a multidimensional median filter.
Parameters
----------
%(input)s
%(size_foot)s
%(output)s
%(mode_multiple)s
%(cval)s
%(origin_multiple)s
Returns
-------
median_filter : ndarray
Filtered array. Has the same shape as `input`.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.median_filter(ascent, size=20)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
return _rank_filter(input, 0, size, footprint, output, mode, cval,
origin, 'median')
@_ni_docstrings.docfiller
def percentile_filter(input, percentile, size=None, footprint=None,
output=None, mode="reflect", cval=0.0, origin=0):
"""Calculate a multi-dimensional percentile filter.
Parameters
----------
%(input)s
percentile : scalar
The percentile parameter may be less then zero, i.e.,
percentile = -20 equals percentile = 80
%(size_foot)s
%(output)s
%(mode_multiple)s
%(cval)s
%(origin_multiple)s
Returns
-------
percentile_filter : ndarray
Filtered array. Has the same shape as `input`.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.percentile_filter(ascent, percentile=20, size=20)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
return _rank_filter(input, percentile, size, footprint, output, mode,
cval, origin, 'percentile')
@_ni_docstrings.docfiller
def generic_filter1d(input, function, filter_size, axis=-1,
output=None, mode="reflect", cval=0.0, origin=0,
extra_arguments=(), extra_keywords=None):
"""Calculate a one-dimensional filter along the given axis.
`generic_filter1d` iterates over the lines of the array, calling the
given function at each line. The arguments of the line are the
input line, and the output line. The input and output lines are 1D
double arrays. The input line is extended appropriately according
to the filter size and origin. The output line must be modified
in-place with the result.
Parameters
----------
%(input)s
function : {callable, scipy.LowLevelCallable}
Function to apply along given axis.
filter_size : scalar
Length of the filter.
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
%(extra_arguments)s
%(extra_keywords)s
Notes
-----
This function also accepts low-level callback functions with one of
the following signatures and wrapped in `scipy.LowLevelCallable`:
.. code:: c
int function(double *input_line, npy_intp input_length,
double *output_line, npy_intp output_length,
void *user_data)
int function(double *input_line, intptr_t input_length,
double *output_line, intptr_t output_length,
void *user_data)
The calling function iterates over the lines of the input and output
arrays, calling the callback function at each line. The current line
is extended according to the border conditions set by the calling
function, and the result is copied into the array that is passed
through ``input_line``. The length of the input line (after extension)
is passed through ``input_length``. The callback function should apply
the filter and store the result in the array passed through
``output_line``. The length of the output line is passed through
``output_length``. ``user_data`` is the data pointer provided
to `scipy.LowLevelCallable` as-is.
The callback function must return an integer error status that is zero
if something went wrong and one otherwise. If an error occurs, you should
normally set the python error status with an informative message
before returning, otherwise a default error message is set by the
calling function.
In addition, some other low-level function pointer specifications
are accepted, but these are for backward compatibility only and should
not be used in new code.
"""
if extra_keywords is None:
extra_keywords = {}
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output = _ni_support._get_output(output, input)
if filter_size < 1:
raise RuntimeError('invalid filter size')
axis = _ni_support._check_axis(axis, input.ndim)
if (filter_size // 2 + origin < 0) or (filter_size // 2 + origin >=
filter_size):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.generic_filter1d(input, function, filter_size, axis, output,
mode, cval, origin, extra_arguments,
extra_keywords)
return output
@_ni_docstrings.docfiller
def generic_filter(input, function, size=None, footprint=None,
output=None, mode="reflect", cval=0.0, origin=0,
extra_arguments=(), extra_keywords=None):
"""Calculate a multi-dimensional filter using the given function.
At each element the provided function is called. The input values
within the filter footprint at that element are passed to the function
as a 1D array of double values.
Parameters
----------
%(input)s
function : {callable, scipy.LowLevelCallable}
Function to apply at each element.
%(size_foot)s
%(output)s
%(mode_multiple)s
%(cval)s
%(origin_multiple)s
%(extra_arguments)s
%(extra_keywords)s
Notes
-----
This function also accepts low-level callback functions with one of
the following signatures and wrapped in `scipy.LowLevelCallable`:
.. code:: c
int callback(double *buffer, npy_intp filter_size,
double *return_value, void *user_data)
int callback(double *buffer, intptr_t filter_size,
double *return_value, void *user_data)
The calling function iterates over the elements of the input and
output arrays, calling the callback function at each element. The
elements within the footprint of the filter at the current element are
passed through the ``buffer`` parameter, and the number of elements
within the footprint through ``filter_size``. The calculated value is
returned in ``return_value``. ``user_data`` is the data pointer provided
to `scipy.LowLevelCallable` as-is.
The callback function must return an integer error status that is zero
if something went wrong and one otherwise. If an error occurs, you should
normally set the python error status with an informative message
before returning, otherwise a default error message is set by the
calling function.
In addition, some other low-level function pointer specifications
are accepted, but these are for backward compatibility only and should
not be used in new code.
"""
if (size is not None) and (footprint is not None):
warnings.warn("ignoring size because footprint is set", UserWarning, stacklevel=2)
if extra_keywords is None:
extra_keywords = {}
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
origins = _ni_support._normalize_sequence(origin, input.ndim)
if footprint is None:
if size is None:
raise RuntimeError("no footprint or filter size provided")
sizes = _ni_support._normalize_sequence(size, input.ndim)
footprint = numpy.ones(sizes, dtype=bool)
else:
footprint = numpy.asarray(footprint, dtype=bool)
fshape = [ii for ii in footprint.shape if ii > 0]
if len(fshape) != input.ndim:
raise RuntimeError('filter footprint array has incorrect shape.')
for origin, lenf in zip(origins, fshape):
if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf):
raise ValueError('invalid origin')
if not footprint.flags.contiguous:
footprint = footprint.copy()
output = _ni_support._get_output(output, input)
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.generic_filter(input, function, footprint, output, mode,
cval, origins, extra_arguments, extra_keywords)
return output
| kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/scipy/ndimage/filters.py | Python | gpl-3.0 | 49,136 |
from .indices import Index, registry
from .helpers import query, parse, index, model_indices, multisearch
from .utils import autodiscover
from . import exceptions
default_app_config = 'springy.apps.SpringyAppConfig'
| marcinn/springy | springy/__init__.py | Python | bsd-2-clause | 218 |
#this is a utility script designed to provide function interfaces for converting values from stmp into scaled numbers for drawing. This involves normalizing and changing values
from scipy.stats import norm
from numpy import linspace
#fit a single distribution given the attribute name and idx dicts
#deprecated, but good for testing
def fit_distribution(attrName, idxDict, data, numLines):
l = []
for i in range(1, numLines):
line = data[i]
val = line[idxDict[attrName]]
l.append(float(val))
param = norm.fit(l)
return param
#function to test if something is a number
#from http://stackoverflow.com/questions/354038/how-do-i-check-if-a-string-is-a-number-float-in-python
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
def fit_all_distributions(attrNames, idxDict, data, numLines):
fits = {}
#initialize the dictionary with the names of attributes we will track
for name in attrNames:
fits[name] = []
#add values if they are numeric
for i in range(1, numLines):
line = data[i]
for name in attrNames:
val = line[idxDict[name]]
if is_number(val):
fits[name].append(float(val))
#add fits
for key, value in fits.items():
fits[key] = norm.fit(value)
return fits
def get_drawing_val(attr, value, distributionDict):
C = 5
mean, std = distributionDict[attr]
#get rid of the values that aren't a number
if not is_number(mean) or not is_number(std) or not is_number(value):
return 0
#otherwise return a constant times the number of standard deviaitons from the mean
else:
divergence = abs(float(value) - float(mean))/float(std)
return C*divergence
| AshleyLab/stmpVisualization2.0 | stmpVisualization-master/drawing_functions.py | Python | mit | 1,664 |
# !/usr/bin/python
import sys
import os
import warnings
import unittest2
# silences Python's complaints about imports
warnings.filterwarnings('ignore', category=UserWarning)
USAGE = """
Path to your sdk must be the first argument. To run type:
$ apptest.py path/to/your/appengine/installation
Remember to set environment variable FLASK_CONF to TEST.
Loading configuration depending on the value of
environment variable allows you to add your own
testing configuration in src/inbox/settings.py
"""
def main(sdk_path, test_path):
sys.path.insert(0, sdk_path)
import dev_appserver
dev_appserver.fix_sys_path()
sys.path.insert(1, os.path.join(os.path.abspath('.'), 'lib'))
suite = unittest2.loader.TestLoader().discover(test_path)
unittest2.TextTestRunner(verbosity=2).run(suite)
if __name__ == '__main__':
#See: http://code.google.com/appengine/docs/python/tools/localunittesting.html
try:
#Path to the SDK installation
SDK_PATH = sys.argv[1] # ...or hardcoded path
#Path to tests folder
TEST_PATH = os.path.join(os.path.dirname(os.path.abspath(__name__)),
'tests')
main(SDK_PATH, TEST_PATH)
except IndexError:
# you probably forgot about path as first argument
print USAGE | Eforcers/inbox-cleaner | src/apptest.py | Python | mit | 1,313 |
class MissingReferenceFlank(Exception):
pass
class InconsistentAlignment(Exception):
pass
class Unstrandable(Exception):
pass
class FlanksTooShort(Exception):
pass
| 23andMe/stranding | stranding/exceptions.py | Python | mit | 186 |
# Copyright 2016 Hewlett Packard Enterprise Development, LP.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from _restobject import RestObject
def ex33_set_bios_service(restobj, bios_properties, bios_password=None):
sys.stdout.write("\nEXAMPLE 33: Set Bios Service\n")
instances = restobj.search_for_type("Bios.")
for instance in instances:
response = restobj.rest_patch(instance["href"], bios_properties, \
bios_password)
restobj.error_handler(response)
if __name__ == "__main__":
# When running on the server locally use the following commented values
# iLO_https_url = "blobstore://."
# iLO_account = "None"
# iLO_password = "None"
# When running remotely connect using the iLO secured (https://) address,
# iLO account name, and password to send https requests
# iLO_https_url acceptable examples:
# "https://10.0.0.100"
# "https://f250asha.americas.hpqcorp.net"
iLO_https_url = "https://10.0.0.100"
iLO_account = "admin"
iLO_password = "password"
#Create a REST object
REST_OBJ = RestObject(iLO_https_url, iLO_account, iLO_password)
ex33_set_bios_service(REST_OBJ, {'ServiceName':'HP', \
'ServiceEmail':'me@hp.com'})
| HewlettPackard/python-proliant-sdk | examples/Rest/ex33_set_bios_service.py | Python | apache-2.0 | 1,873 |
# -*- coding: utf-8 -*-
"""
babel.messages.catalog
~~~~~~~~~~~~~~~~~~~~~~
Data structures for message catalogs.
:copyright: (c) 2013 by the Babel Team.
:license: BSD, see LICENSE for more details.
"""
import re
import time
from cgi import parse_header
from datetime import datetime, time as time_
from difflib import get_close_matches
from email import message_from_string
from copy import copy
from babel import __version__ as VERSION
from babel.core import Locale
from babel.dates import format_datetime
from babel.messages.plurals import get_plural
from babel.util import odict, distinct, LOCALTZ, FixedOffsetTimezone
from babel._compat import string_types, number_types, PY2, cmp
__all__ = ['Message', 'Catalog', 'TranslationError']
PYTHON_FORMAT = re.compile(r'''(?x)
\%
(?:\(([\w]*)\))?
(
[-#0\ +]?(?:\*|[\d]+)?
(?:\.(?:\*|[\d]+))?
[hlL]?
)
([diouxXeEfFgGcrs%])
''')
def _parse_datetime_header(value):
match = re.match(r'^(?P<datetime>.*?)(?P<tzoffset>[+-]\d{4})?$', value)
tt = time.strptime(match.group('datetime'), '%Y-%m-%d %H:%M')
ts = time.mktime(tt)
dt = datetime.fromtimestamp(ts)
# Separate the offset into a sign component, hours, and # minutes
tzoffset = match.group('tzoffset')
if tzoffset is not None:
plus_minus_s, rest = tzoffset[0], tzoffset[1:]
hours_offset_s, mins_offset_s = rest[:2], rest[2:]
# Make them all integers
plus_minus = int(plus_minus_s + '1')
hours_offset = int(hours_offset_s)
mins_offset = int(mins_offset_s)
# Calculate net offset
net_mins_offset = hours_offset * 60
net_mins_offset += mins_offset
net_mins_offset *= plus_minus
# Create an offset object
tzoffset = FixedOffsetTimezone(net_mins_offset)
# Store the offset in a datetime object
dt = dt.replace(tzinfo=tzoffset)
return dt
class Message(object):
"""Representation of a single message in a catalog."""
def __init__(self, id, string=u'', locations=(), flags=(), auto_comments=(),
user_comments=(), previous_id=(), lineno=None, context=None):
"""Create the message object.
:param id: the message ID, or a ``(singular, plural)`` tuple for
pluralizable messages
:param string: the translated message string, or a
``(singular, plural)`` tuple for pluralizable messages
:param locations: a sequence of ``(filenname, lineno)`` tuples
:param flags: a set or sequence of flags
:param auto_comments: a sequence of automatic comments for the message
:param user_comments: a sequence of user comments for the message
:param previous_id: the previous message ID, or a ``(singular, plural)``
tuple for pluralizable messages
:param lineno: the line number on which the msgid line was found in the
PO file, if any
:param context: the message context
"""
self.id = id
if not string and self.pluralizable:
string = (u'', u'')
self.string = string
self.locations = list(distinct(locations))
self.flags = set(flags)
if id and self.python_format:
self.flags.add('python-format')
else:
self.flags.discard('python-format')
self.auto_comments = list(distinct(auto_comments))
self.user_comments = list(distinct(user_comments))
if isinstance(previous_id, string_types):
self.previous_id = [previous_id]
else:
self.previous_id = list(previous_id)
self.lineno = lineno
self.context = context
def __repr__(self):
return '<%s %r (flags: %r)>' % (type(self).__name__, self.id,
list(self.flags))
def __cmp__(self, obj):
"""Compare Messages, taking into account plural ids"""
def values_to_compare():
if isinstance(obj, Message):
plural = self.pluralizable
obj_plural = obj.pluralizable
if plural and obj_plural:
return self.id[0], obj.id[0]
elif plural:
return self.id[0], obj.id
elif obj_plural:
return self.id, obj.id[0]
return self.id, obj.id
this, other = values_to_compare()
return cmp(this, other)
def __gt__(self, other):
return self.__cmp__(other) > 0
def __lt__(self, other):
return self.__cmp__(other) < 0
def __ge__(self, other):
return self.__cmp__(other) >= 0
def __le__(self, other):
return self.__cmp__(other) <= 0
def __eq__(self, other):
return self.__cmp__(other) == 0
def __ne__(self, other):
return self.__cmp__(other) != 0
def clone(self):
return Message(*map(copy, (self.id, self.string, self.locations,
self.flags, self.auto_comments,
self.user_comments, self.previous_id,
self.lineno, self.context)))
def check(self, catalog=None):
"""Run various validation checks on the message. Some validations
are only performed if the catalog is provided. This method returns
a sequence of `TranslationError` objects.
:rtype: ``iterator``
:param catalog: A catalog instance that is passed to the checkers
:see: `Catalog.check` for a way to perform checks for all messages
in a catalog.
"""
from babel.messages.checkers import checkers
errors = []
for checker in checkers:
try:
checker(catalog, self)
except TranslationError as e:
errors.append(e)
return errors
@property
def fuzzy(self):
"""Whether the translation is fuzzy.
>>> Message('foo').fuzzy
False
>>> msg = Message('foo', 'foo', flags=['fuzzy'])
>>> msg.fuzzy
True
>>> msg
<Message 'foo' (flags: ['fuzzy'])>
:type: `bool`"""
return 'fuzzy' in self.flags
@property
def pluralizable(self):
"""Whether the message is plurizable.
>>> Message('foo').pluralizable
False
>>> Message(('foo', 'bar')).pluralizable
True
:type: `bool`"""
return isinstance(self.id, (list, tuple))
@property
def python_format(self):
"""Whether the message contains Python-style parameters.
>>> Message('foo %(name)s bar').python_format
True
>>> Message(('foo %(name)s', 'foo %(name)s')).python_format
True
:type: `bool`"""
ids = self.id
if not isinstance(ids, (list, tuple)):
ids = [ids]
return any(PYTHON_FORMAT.search(id) for id in ids)
class TranslationError(Exception):
"""Exception thrown by translation checkers when invalid message
translations are encountered."""
DEFAULT_HEADER = u"""\
# Translations template for PROJECT.
# Copyright (C) YEAR ORGANIZATION
# This file is distributed under the same license as the PROJECT project.
# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.
#"""
if PY2:
def _parse_header(header_string):
# message_from_string only works for str, not for unicode
headers = message_from_string(header_string.encode('utf8'))
decoded_headers = {}
for name, value in headers.items():
name = name.decode('utf8')
value = value.decode('utf8')
decoded_headers[name] = value
return decoded_headers
else:
_parse_header = message_from_string
class Catalog(object):
"""Representation of a message catalog."""
def __init__(self, locale=None, domain=None, header_comment=DEFAULT_HEADER,
project=None, version=None, copyright_holder=None,
msgid_bugs_address=None, creation_date=None,
revision_date=None, last_translator=None, language_team=None,
charset=None, fuzzy=True):
"""Initialize the catalog object.
:param locale: the locale identifier or `Locale` object, or `None`
if the catalog is not bound to a locale (which basically
means it's a template)
:param domain: the message domain
:param header_comment: the header comment as string, or `None` for the
default header
:param project: the project's name
:param version: the project's version
:param copyright_holder: the copyright holder of the catalog
:param msgid_bugs_address: the email address or URL to submit bug
reports to
:param creation_date: the date the catalog was created
:param revision_date: the date the catalog was revised
:param last_translator: the name and email of the last translator
:param language_team: the name and email of the language team
:param charset: the encoding to use in the output (defaults to utf-8)
:param fuzzy: the fuzzy bit on the catalog header
"""
self.domain = domain
if locale:
locale = Locale.parse(locale)
self.locale = locale
self._header_comment = header_comment
self._messages = odict()
self.project = project or 'PROJECT'
self.version = version or 'VERSION'
self.copyright_holder = copyright_holder or 'ORGANIZATION'
self.msgid_bugs_address = msgid_bugs_address or 'EMAIL@ADDRESS'
self.last_translator = last_translator or 'FULL NAME <EMAIL@ADDRESS>'
"""Name and email address of the last translator."""
self.language_team = language_team or 'LANGUAGE <LL@li.org>'
"""Name and email address of the language team."""
self.charset = charset or 'utf-8'
if creation_date is None:
creation_date = datetime.now(LOCALTZ)
elif isinstance(creation_date, datetime) and not creation_date.tzinfo:
creation_date = creation_date.replace(tzinfo=LOCALTZ)
self.creation_date = creation_date
if revision_date is None:
revision_date = 'YEAR-MO-DA HO:MI+ZONE'
elif isinstance(revision_date, datetime) and not revision_date.tzinfo:
revision_date = revision_date.replace(tzinfo=LOCALTZ)
self.revision_date = revision_date
self.fuzzy = fuzzy
self.obsolete = odict() # Dictionary of obsolete messages
self._num_plurals = None
self._plural_expr = None
def _get_header_comment(self):
comment = self._header_comment
year = datetime.now(LOCALTZ).strftime('%Y')
if hasattr(self.revision_date, 'strftime'):
year = self.revision_date.strftime('%Y')
comment = comment.replace('PROJECT', self.project) \
.replace('VERSION', self.version) \
.replace('YEAR', year) \
.replace('ORGANIZATION', self.copyright_holder)
if self.locale:
comment = comment.replace('Translations template', '%s translations'
% self.locale.english_name)
return comment
def _set_header_comment(self, string):
self._header_comment = string
header_comment = property(_get_header_comment, _set_header_comment, doc="""\
The header comment for the catalog.
>>> catalog = Catalog(project='Foobar', version='1.0',
... copyright_holder='Foo Company')
>>> print(catalog.header_comment) #doctest: +ELLIPSIS
# Translations template for Foobar.
# Copyright (C) ... Foo Company
# This file is distributed under the same license as the Foobar project.
# FIRST AUTHOR <EMAIL@ADDRESS>, ....
#
The header can also be set from a string. Any known upper-case variables
will be replaced when the header is retrieved again:
>>> catalog = Catalog(project='Foobar', version='1.0',
... copyright_holder='Foo Company')
>>> catalog.header_comment = '''\\
... # The POT for my really cool PROJECT project.
... # Copyright (C) 1990-2003 ORGANIZATION
... # This file is distributed under the same license as the PROJECT
... # project.
... #'''
>>> print(catalog.header_comment)
# The POT for my really cool Foobar project.
# Copyright (C) 1990-2003 Foo Company
# This file is distributed under the same license as the Foobar
# project.
#
:type: `unicode`
""")
def _get_mime_headers(self):
headers = []
headers.append(('Project-Id-Version',
'%s %s' % (self.project, self.version)))
headers.append(('Report-Msgid-Bugs-To', self.msgid_bugs_address))
headers.append(('POT-Creation-Date',
format_datetime(self.creation_date, 'yyyy-MM-dd HH:mmZ',
locale='en')))
if isinstance(self.revision_date, (datetime, time_) + number_types):
headers.append(('PO-Revision-Date',
format_datetime(self.revision_date,
'yyyy-MM-dd HH:mmZ', locale='en')))
else:
headers.append(('PO-Revision-Date', self.revision_date))
headers.append(('Last-Translator', self.last_translator))
if self.locale is not None:
headers.append(('Language', str(self.locale)))
if (self.locale is not None) and ('LANGUAGE' in self.language_team):
headers.append(('Language-Team',
self.language_team.replace('LANGUAGE',
str(self.locale))))
else:
headers.append(('Language-Team', self.language_team))
if self.locale is not None:
headers.append(('Plural-Forms', self.plural_forms))
headers.append(('MIME-Version', '1.0'))
headers.append(('Content-Type',
'text/plain; charset=%s' % self.charset))
headers.append(('Content-Transfer-Encoding', '8bit'))
headers.append(('Generated-By', 'Babel %s\n' % VERSION))
return headers
def _set_mime_headers(self, headers):
for name, value in headers:
name = name.lower()
if name == 'project-id-version':
parts = value.split(' ')
self.project = u' '.join(parts[:-1])
self.version = parts[-1]
elif name == 'report-msgid-bugs-to':
self.msgid_bugs_address = value
elif name == 'last-translator':
self.last_translator = value
elif name == 'language-team':
self.language_team = value
elif name == 'content-type':
mimetype, params = parse_header(value)
if 'charset' in params:
self.charset = params['charset'].lower()
elif name == 'plural-forms':
_, params = parse_header(' ;' + value)
self._num_plurals = int(params.get('nplurals', 2))
self._plural_expr = params.get('plural', '(n != 1)')
elif name == 'pot-creation-date':
self.creation_date = _parse_datetime_header(value)
elif name == 'po-revision-date':
# Keep the value if it's not the default one
if 'YEAR' not in value:
self.revision_date = _parse_datetime_header(value)
mime_headers = property(_get_mime_headers, _set_mime_headers, doc="""\
The MIME headers of the catalog, used for the special ``msgid ""`` entry.
The behavior of this property changes slightly depending on whether a locale
is set or not, the latter indicating that the catalog is actually a template
for actual translations.
Here's an example of the output for such a catalog template:
>>> from babel.dates import UTC
>>> created = datetime(1990, 4, 1, 15, 30, tzinfo=UTC)
>>> catalog = Catalog(project='Foobar', version='1.0',
... creation_date=created)
>>> for name, value in catalog.mime_headers:
... print('%s: %s' % (name, value))
Project-Id-Version: Foobar 1.0
Report-Msgid-Bugs-To: EMAIL@ADDRESS
POT-Creation-Date: 1990-04-01 15:30+0000
PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE
Last-Translator: FULL NAME <EMAIL@ADDRESS>
Language-Team: LANGUAGE <LL@li.org>
MIME-Version: 1.0
Content-Type: text/plain; charset=utf-8
Content-Transfer-Encoding: 8bit
Generated-By: Babel ...
And here's an example of the output when the locale is set:
>>> revised = datetime(1990, 8, 3, 12, 0, tzinfo=UTC)
>>> catalog = Catalog(locale='de_DE', project='Foobar', version='1.0',
... creation_date=created, revision_date=revised,
... last_translator='John Doe <jd@example.com>',
... language_team='de_DE <de@example.com>')
>>> for name, value in catalog.mime_headers:
... print('%s: %s' % (name, value))
Project-Id-Version: Foobar 1.0
Report-Msgid-Bugs-To: EMAIL@ADDRESS
POT-Creation-Date: 1990-04-01 15:30+0000
PO-Revision-Date: 1990-08-03 12:00+0000
Last-Translator: John Doe <jd@example.com>
Language: de_DE
Language-Team: de_DE <de@example.com>
Plural-Forms: nplurals=2; plural=(n != 1)
MIME-Version: 1.0
Content-Type: text/plain; charset=utf-8
Content-Transfer-Encoding: 8bit
Generated-By: Babel ...
:type: `list`
""")
@property
def num_plurals(self):
"""The number of plurals used by the catalog or locale.
>>> Catalog(locale='en').num_plurals
2
>>> Catalog(locale='ga').num_plurals
3
:type: `int`"""
if self._num_plurals is None:
num = 2
if self.locale:
num = get_plural(self.locale)[0]
self._num_plurals = num
return self._num_plurals
@property
def plural_expr(self):
"""The plural expression used by the catalog or locale.
>>> Catalog(locale='en').plural_expr
'(n != 1)'
>>> Catalog(locale='ga').plural_expr
'(n==1 ? 0 : n==2 ? 1 : 2)'
:type: `string_types`"""
if self._plural_expr is None:
expr = '(n != 1)'
if self.locale:
expr = get_plural(self.locale)[1]
self._plural_expr = expr
return self._plural_expr
@property
def plural_forms(self):
"""Return the plural forms declaration for the locale.
>>> Catalog(locale='en').plural_forms
'nplurals=2; plural=(n != 1)'
>>> Catalog(locale='pt_BR').plural_forms
'nplurals=2; plural=(n > 1)'
:type: `str`"""
return 'nplurals=%s; plural=%s' % (self.num_plurals, self.plural_expr)
def __contains__(self, id):
"""Return whether the catalog has a message with the specified ID."""
return self._key_for(id) in self._messages
def __len__(self):
"""The number of messages in the catalog.
This does not include the special ``msgid ""`` entry."""
return len(self._messages)
def __iter__(self):
"""Iterates through all the entries in the catalog, in the order they
were added, yielding a `Message` object for every entry.
:rtype: ``iterator``"""
buf = []
for name, value in self.mime_headers:
buf.append('%s: %s' % (name, value))
flags = set()
if self.fuzzy:
flags |= set(['fuzzy'])
yield Message(u'', '\n'.join(buf), flags=flags)
for key in self._messages:
yield self._messages[key]
def __repr__(self):
locale = ''
if self.locale:
locale = ' %s' % self.locale
return '<%s %r%s>' % (type(self).__name__, self.domain, locale)
def __delitem__(self, id):
"""Delete the message with the specified ID."""
self.delete(id)
def __getitem__(self, id):
"""Return the message with the specified ID.
:param id: the message ID
"""
return self.get(id)
def __setitem__(self, id, message):
"""Add or update the message with the specified ID.
>>> catalog = Catalog()
>>> catalog[u'foo'] = Message(u'foo')
>>> catalog[u'foo']
<Message u'foo' (flags: [])>
If a message with that ID is already in the catalog, it is updated
to include the locations and flags of the new message.
>>> catalog = Catalog()
>>> catalog[u'foo'] = Message(u'foo', locations=[('main.py', 1)])
>>> catalog[u'foo'].locations
[('main.py', 1)]
>>> catalog[u'foo'] = Message(u'foo', locations=[('utils.py', 5)])
>>> catalog[u'foo'].locations
[('main.py', 1), ('utils.py', 5)]
:param id: the message ID
:param message: the `Message` object
"""
assert isinstance(message, Message), 'expected a Message object'
key = self._key_for(id, message.context)
current = self._messages.get(key)
if current:
if message.pluralizable and not current.pluralizable:
# The new message adds pluralization
current.id = message.id
current.string = message.string
current.locations = list(distinct(current.locations +
message.locations))
current.auto_comments = list(distinct(current.auto_comments +
message.auto_comments))
current.user_comments = list(distinct(current.user_comments +
message.user_comments))
current.flags |= message.flags
message = current
elif id == '':
# special treatment for the header message
self.mime_headers = _parse_header(message.string).items()
self.header_comment = '\n'.join([('# %s' % c).rstrip() for c
in message.user_comments])
self.fuzzy = message.fuzzy
else:
if isinstance(id, (list, tuple)):
assert isinstance(message.string, (list, tuple)), \
'Expected sequence but got %s' % type(message.string)
self._messages[key] = message
def add(self, id, string=None, locations=(), flags=(), auto_comments=(),
user_comments=(), previous_id=(), lineno=None, context=None):
"""Add or update the message with the specified ID.
>>> catalog = Catalog()
>>> catalog.add(u'foo')
<Message ...>
>>> catalog[u'foo']
<Message u'foo' (flags: [])>
This method simply constructs a `Message` object with the given
arguments and invokes `__setitem__` with that object.
:param id: the message ID, or a ``(singular, plural)`` tuple for
pluralizable messages
:param string: the translated message string, or a
``(singular, plural)`` tuple for pluralizable messages
:param locations: a sequence of ``(filenname, lineno)`` tuples
:param flags: a set or sequence of flags
:param auto_comments: a sequence of automatic comments
:param user_comments: a sequence of user comments
:param previous_id: the previous message ID, or a ``(singular, plural)``
tuple for pluralizable messages
:param lineno: the line number on which the msgid line was found in the
PO file, if any
:param context: the message context
"""
message = Message(id, string, list(locations), flags, auto_comments,
user_comments, previous_id, lineno=lineno,
context=context)
self[id] = message
return message
def check(self):
"""Run various validation checks on the translations in the catalog.
For every message which fails validation, this method yield a
``(message, errors)`` tuple, where ``message`` is the `Message` object
and ``errors`` is a sequence of `TranslationError` objects.
:rtype: ``iterator``
"""
for message in self._messages.values():
errors = message.check(catalog=self)
if errors:
yield message, errors
def get(self, id, context=None):
"""Return the message with the specified ID and context.
:param id: the message ID
:param context: the message context, or ``None`` for no context
"""
return self._messages.get(self._key_for(id, context))
def delete(self, id, context=None):
"""Delete the message with the specified ID and context.
:param id: the message ID
:param context: the message context, or ``None`` for no context
"""
key = self._key_for(id, context)
if key in self._messages:
del self._messages[key]
def update(self, template, no_fuzzy_matching=False, update_header_comment=False):
"""Update the catalog based on the given template catalog.
>>> from babel.messages import Catalog
>>> template = Catalog()
>>> template.add('green', locations=[('main.py', 99)])
<Message ...>
>>> template.add('blue', locations=[('main.py', 100)])
<Message ...>
>>> template.add(('salad', 'salads'), locations=[('util.py', 42)])
<Message ...>
>>> catalog = Catalog(locale='de_DE')
>>> catalog.add('blue', u'blau', locations=[('main.py', 98)])
<Message ...>
>>> catalog.add('head', u'Kopf', locations=[('util.py', 33)])
<Message ...>
>>> catalog.add(('salad', 'salads'), (u'Salat', u'Salate'),
... locations=[('util.py', 38)])
<Message ...>
>>> catalog.update(template)
>>> len(catalog)
3
>>> msg1 = catalog['green']
>>> msg1.string
>>> msg1.locations
[('main.py', 99)]
>>> msg2 = catalog['blue']
>>> msg2.string
u'blau'
>>> msg2.locations
[('main.py', 100)]
>>> msg3 = catalog['salad']
>>> msg3.string
(u'Salat', u'Salate')
>>> msg3.locations
[('util.py', 42)]
Messages that are in the catalog but not in the template are removed
from the main collection, but can still be accessed via the `obsolete`
member:
>>> 'head' in catalog
False
>>> list(catalog.obsolete.values())
[<Message 'head' (flags: [])>]
:param template: the reference catalog, usually read from a POT file
:param no_fuzzy_matching: whether to use fuzzy matching of message IDs
"""
messages = self._messages
remaining = messages.copy()
self._messages = odict()
# Prepare for fuzzy matching
fuzzy_candidates = []
if not no_fuzzy_matching:
fuzzy_candidates = dict([
(self._key_for(msgid), messages[msgid].context)
for msgid in messages if msgid and messages[msgid].string
])
fuzzy_matches = set()
def _merge(message, oldkey, newkey):
message = message.clone()
fuzzy = False
if oldkey != newkey:
fuzzy = True
fuzzy_matches.add(oldkey)
oldmsg = messages.get(oldkey)
if isinstance(oldmsg.id, string_types):
message.previous_id = [oldmsg.id]
else:
message.previous_id = list(oldmsg.id)
else:
oldmsg = remaining.pop(oldkey, None)
message.string = oldmsg.string
if isinstance(message.id, (list, tuple)):
if not isinstance(message.string, (list, tuple)):
fuzzy = True
message.string = tuple(
[message.string] + ([u''] * (len(message.id) - 1))
)
elif len(message.string) != self.num_plurals:
fuzzy = True
message.string = tuple(message.string[:len(oldmsg.string)])
elif isinstance(message.string, (list, tuple)):
fuzzy = True
message.string = message.string[0]
message.flags |= oldmsg.flags
if fuzzy:
message.flags |= set([u'fuzzy'])
self[message.id] = message
for message in template:
if message.id:
key = self._key_for(message.id, message.context)
if key in messages:
_merge(message, key, key)
else:
if no_fuzzy_matching is False:
# do some fuzzy matching with difflib
if isinstance(key, tuple):
matchkey = key[0] # just the msgid, no context
else:
matchkey = key
matches = get_close_matches(matchkey.lower().strip(),
fuzzy_candidates.keys(), 1)
if matches:
newkey = matches[0]
newctxt = fuzzy_candidates[newkey]
if newctxt is not None:
newkey = newkey, newctxt
_merge(message, newkey, key)
continue
self[message.id] = message
for msgid in remaining:
if no_fuzzy_matching or msgid not in fuzzy_matches:
self.obsolete[msgid] = remaining[msgid]
if update_header_comment:
# Allow the updated catalog's header to be rewritten based on the
# template's header
self.header_comment = template.header_comment
# Make updated catalog's POT-Creation-Date equal to the template
# used to update the catalog
self.creation_date = template.creation_date
def _key_for(self, id, context=None):
"""The key for a message is just the singular ID even for pluralizable
messages, but is a ``(msgid, msgctxt)`` tuple for context-specific
messages.
"""
key = id
if isinstance(key, (list, tuple)):
key = id[0]
if context is not None:
key = (key, context)
return key
| srisankethu/babel | babel/messages/catalog.py | Python | bsd-3-clause | 31,052 |
#Standard libraries import
from thread import *
#Local import
import tcp_server
device_repository = [
{'type': 'new emax',
'dictionary' : [['Current I1', 2000, 2, 0], ['Current I2', 2002, 2, 0],
['Current I3', 2004, 2, 0], ['Current Ne', 2006, 2, 0]]},
{'type': 'emax2',
'dictionary': [['Current I1', 2000, 2, 0], ['Current I2', 2002, 2, 0],
['Current I3', 2004, 2, 0], ['Current Ne', 2006, 2, 0]]}
]
##Device class.
# This class loads a device object dictionary and manage the interface with the external worls
class Device:
name = '' ##Device name freely assigned by user
type = '' ##Device type, compared with the repository
object_dictionary = [] ##Object Dictionary
object_dictionary_columns = \
('Name', 'ModbusAddress', 'Size') ##Dictionary columns
##Constructor method
def __init__(self, name = 'Generic device', type = 'emax2', diagnostics = False):
#Assigning device name and type
self.name = name
self.type = type
server = 0
#Generating Object Dictionary
for device_item in device_repository:
#Look for device
if device_item['type'] == self.type:
#Matching device found !!
if diagnostics:
print 'Device instantiated: ' + self.name + ' (' + self.type + ')'
#Loading device dictionary
for entry in device_item['dictionary']:
self.object_dictionary.append(dict(zip(self.object_dictionary_columns, entry)))
if diagnostics:
print self.object_dictionary[-1]
## Create a thread for the device
start_new_thread(self.device_thread, ())
## Quit loop
break
##Device run-time management
def device_thread(self):
print 'Device thread started..'
## Create a thread for the device
start_new_thread(self.device_server_thread, ())
while True:
#Execute device run-time..
pass
##Device server thread - required since it's typically blocked listening for connections
def device_server_thread(self):
print 'Device server thread started..'
#Creating ModbusTCP server
self.server = tcp_server.ModbusTCPServer(self.name, 502, True)
#Creating the ModbusTCP server thread
self.server.listen_on_connections()
##Top-level detection
if __name__ == '__main__':
device = Device('CB building', 'emax2', True)
while True:
pass
| MarcoStucchi/ModbusTCP-Client-Server | device/device.py | Python | mit | 2,774 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-06-21 15:12
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('base', '0006_auto_20160620_0207'),
]
operations = [
migrations.AddField(
model_name='document',
name='image_resolution_x',
field=models.IntegerField(blank=True, null=True),
),
migrations.AddField(
model_name='document',
name='image_resolution_y',
field=models.IntegerField(blank=True, null=True),
),
migrations.AddField(
model_name='extractedelements',
name='block_height',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='extractedelements',
name='block_width',
field=models.FloatField(blank=True, null=True),
),
]
| pulkitpahwa/smart-image-coordinates | smarter/base/migrations/0007_auto_20160621_1512.py | Python | mit | 999 |
from __future__ import division
__all__ = ['Categorical', 'CategoricalAndConcentration', 'Multinomial',
'MultinomialAndConcentration', 'GammaCompoundDirichlet', 'CRP']
import numpy as np
from warnings import warn
import scipy.stats as stats
import scipy.special as special
from pybasicbayes.abstractions import \
GibbsSampling, MeanField, MeanFieldSVI, MaxLikelihood, MAP
from pybasicbayes.util.stats import sample_discrete
try:
from pybasicbayes.util.cstats import sample_crp_tablecounts
except ImportError:
warn('using slow sample_crp_tablecounts')
from pybasicbayes.util.stats import sample_crp_tablecounts
class Categorical(GibbsSampling, MeanField, MeanFieldSVI, MaxLikelihood, MAP):
'''
This class represents a categorical distribution over labels, where the
parameter is weights and the prior is a Dirichlet distribution.
For example, if K == 3, then five samples may look like
[0,1,0,2,1]
Each entry is the label of a sample, like the outcome of die rolls. In other
words, generated data or data passed to log_likelihood are indices, not
indicator variables! (But when 'weighted data' is passed, like in mean
field or weighted max likelihood, the weights are over indicator
variables...)
This class can be used as a weak limit approximation for a DP, particularly by
calling __init__ with alpha_0 and K arguments, in which case the prior will be
a symmetric Dirichlet with K components and parameter alpha_0/K; K is then the
weak limit approximation parameter.
Hyperparaemters:
alphav_0 (vector) OR alpha_0 (scalar) and K
Parameters:
weights, a vector encoding a finite pmf
'''
def __init__(self,weights=None,alpha_0=None,K=None,alphav_0=None,alpha_mf=None):
self.K = K
self.alpha_0 = alpha_0
self.alphav_0 = alphav_0
self._alpha_mf = alpha_mf if alpha_mf is not None else self.alphav_0
self.weights = weights
if weights is None and self.alphav_0 is not None:
self.resample() # intialize from prior
def _get_alpha_0(self):
return self._alpha_0
def _set_alpha_0(self,alpha_0):
self._alpha_0 = alpha_0
if not any(_ is None for _ in (self.K, self._alpha_0)):
self.alphav_0 = np.repeat(self._alpha_0/self.K,self.K)
alpha_0 = property(_get_alpha_0,_set_alpha_0)
def _get_alphav_0(self):
return self._alphav_0 if hasattr(self,'_alphav_0') else None
def _set_alphav_0(self,alphav_0):
if alphav_0 is not None:
self._alphav_0 = alphav_0
self.K = len(alphav_0)
alphav_0 = property(_get_alphav_0,_set_alphav_0)
@property
def params(self):
return dict(weights=self.weights)
@property
def hypparams(self):
return dict(alphav_0=self.alphav_0)
@property
def num_parameters(self):
return len(self.weights)
def rvs(self,size=None):
return sample_discrete(self.weights,size)
def log_likelihood(self,x):
err = np.seterr(divide='ignore')
out = np.log(self.weights)[x] # log(0) can happen, no warning
np.seterr(**err)
return out
### Gibbs sampling
def resample(self,data=[],counts=None):
counts = self._get_statistics(data) if counts is None else counts
self.weights = np.random.dirichlet(np.maximum(1e-5,self.alphav_0 + counts))
# NOTE: next line is so we can use Gibbs sampling to initialize mean field
self._alpha_mf = self.weights * self.alphav_0.sum()
assert (self._alpha_mf >= 0.).all()
return self
def _get_statistics(self,data,K=None):
K = K if K else self.K
if isinstance(data,np.ndarray) or \
(isinstance(data,list) and len(data) > 0
and not isinstance(data[0],(np.ndarray,list))):
counts = np.bincount(data,minlength=K)
else:
counts = sum(np.bincount(d,minlength=K) for d in data)
return counts
def _get_weighted_statistics(self,data,weights):
if isinstance(weights,np.ndarray):
assert weights.ndim in (1,2)
if data is None or weights.ndim == 2:
# when weights is 2D or data is None, the weights are expected
# indicators and data is just a placeholder; nominally data
# should be np.arange(K)[na,:].repeat(N,axis=0)
counts = np.atleast_2d(weights).sum(0)
else:
# when weights is 1D, data is indices and we do a weighted
# bincount
counts = np.bincount(data,weights,minlength=self.K)
else:
if len(weights) == 0:
counts = np.zeros(self.K,dtype=int)
else:
data = data if data else [None]*len(weights)
counts = sum(self._get_weighted_statistics(d,w)
for d, w in zip(data,weights))
return counts
### Mean Field
def meanfieldupdate(self,data,weights):
# update
self._alpha_mf = self.alphav_0 + self._get_weighted_statistics(data,weights)
self.weights = self._alpha_mf / self._alpha_mf.sum() # for plotting
assert (self._alpha_mf > 0.).all()
return self
def get_vlb(self):
# return avg energy plus entropy, our contribution to the vlb
# see Eq. 10.66 in Bishop
logpitilde = self.expected_log_likelihood() # default is on np.arange(self.K)
q_entropy = -1* (
(logpitilde*(self._alpha_mf-1)).sum()
+ special.gammaln(self._alpha_mf.sum()) - special.gammaln(self._alpha_mf).sum())
p_avgengy = special.gammaln(self.alphav_0.sum()) - special.gammaln(self.alphav_0).sum() \
+ ((self.alphav_0-1)*logpitilde).sum()
return p_avgengy + q_entropy
def expected_log_likelihood(self,x=None):
# usually called when np.all(x == np.arange(self.K))
x = x if x is not None else slice(None)
return special.digamma(self._alpha_mf[x]) - special.digamma(self._alpha_mf.sum())
### Mean Field SGD
def meanfield_sgdstep(self,data,weights,minibatchfrac,stepsize):
self._alpha_mf = \
(1-stepsize) * self._alpha_mf + stepsize * (
self.alphav_0
+ 1./minibatchfrac * self._get_weighted_statistics(data,weights))
self.weights = self._alpha_mf / self._alpha_mf.sum() # for plotting
return self
def _resample_from_mf(self):
self.weights = np.random.dirichlet(self._alpha_mf)
### Max likelihood
def max_likelihood(self,data,weights=None):
if weights is None:
counts = self._get_statistics(data)
else:
counts = self._get_weighted_statistics(data,weights)
self.weights = counts/counts.sum()
return self
def MAP(self,data,weights=None):
if weights is None:
counts = self._get_statistics(data)
else:
counts = self._get_weighted_statistics(data,weights)
counts += self.alphav_0
self.weights = counts/counts.sum()
return self
class CategoricalAndConcentration(Categorical):
'''
Categorical with resampling of the symmetric Dirichlet concentration
parameter.
concentration ~ Gamma(a_0,b_0)
The Dirichlet prior over pi is then
pi ~ Dir(concentration/K)
'''
def __init__(self,a_0,b_0,K,alpha_0=None,weights=None):
self.alpha_0_obj = GammaCompoundDirichlet(a_0=a_0,b_0=b_0,K=K,concentration=alpha_0)
super(CategoricalAndConcentration,self).__init__(alpha_0=self.alpha_0,
K=K,weights=weights)
def _get_alpha_0(self):
return self.alpha_0_obj.concentration
def _set_alpha_0(self,alpha_0):
self.alpha_0_obj.concentration = alpha_0
self.alphav_0 = np.repeat(alpha_0/self.K,self.K)
alpha_0 = property(_get_alpha_0, _set_alpha_0)
@property
def params(self):
return dict(alpha_0=self.alpha_0,weights=self.weights)
@property
def hypparams(self):
return dict(a_0=self.a_0,b_0=self.b_0,K=self.K)
def resample(self,data=[]):
counts = self._get_statistics(data,self.K)
self.alpha_0_obj.resample(counts)
self.alpha_0 = self.alpha_0 # for the effect on alphav_0
return super(CategoricalAndConcentration,self).resample(data)
def resample_just_weights(self,data=[]):
return super(CategoricalAndConcentration,self).resample(data)
def meanfieldupdate(self,*args,**kwargs): # TODO
warn('MeanField not implemented for %s; concentration parameter will stay fixed')
return super(CategoricalAndConcentration,self).meanfieldupdate(*args,**kwargs)
def max_likelihood(self,*args,**kwargs):
raise NotImplementedError
class Multinomial(Categorical):
'''
Like Categorical but the data are counts, so _get_statistics is overridden
(though _get_weighted_statistics can stay the same!). log_likelihood also
changes since, just like for the binomial special case, we sum over all
possible orderings.
For example, if K == 3, then a sample with n=5 might be
array([2,2,1])
A Poisson process conditioned on the number of points emitted.
'''
def log_likelihood(self,x):
assert isinstance(x,np.ndarray) and x.ndim == 2 and x.shape[1] == self.K
return np.where(x,x*np.log(self.weights),0.).sum(1) \
+ special.gammaln(x.sum(1)+1) - special.gammaln(x+1).sum(1)
def rvs(self,size=None):
return np.bincount(super(Multinomial,self).rvs(size=size),minlength=self.K)
def _get_statistics(self,data,K=None):
K = K if K else self.K
if isinstance(data,np.ndarray):
return np.atleast_2d(data).sum(0)
else:
if len(data) == 0:
return np.zeros(K,dtype=int)
return np.concatenate(data).sum(0)
def expected_log_likelihood(self,x=None):
if x is not None and (not x.ndim == 2 or not np.all(x == np.eye(x.shape[0]))):
raise NotImplementedError # TODO nontrivial expected log likelihood
return super(Multinomial,self).expected_log_likelihood()
class MultinomialAndConcentration(CategoricalAndConcentration,Multinomial):
pass
class CRP(GibbsSampling):
'''
concentration ~ Gamma(a_0,b_0) [b_0 is inverse scale, inverse of numpy scale arg]
rvs ~ CRP(concentration)
This class models CRPs. The parameter is the concentration parameter (proportional
to probability of starting a new table given some number of customers in the
restaurant), which has a Gamma prior.
'''
def __init__(self,a_0,b_0,concentration=None):
self.a_0 = a_0
self.b_0 = b_0
if concentration is not None:
self.concentration = concentration
else:
self.resample(niter=1)
@property
def params(self):
return dict(concentration=self.concentration)
@property
def hypparams(self):
return dict(a_0=self.a_0,b_0=self.b_0)
def rvs(self,customer_counts):
# could replace this with one of the faster C versions I have lying
# around, but at least the Python version is clearer
assert isinstance(customer_counts,list) or isinstance(customer_counts,int)
if isinstance(customer_counts,int):
customer_counts = [customer_counts]
restaurants = []
for num in customer_counts:
# a CRP with num customers
tables = []
for c in range(num):
newidx = sample_discrete(np.array(tables + [self.concentration]))
if newidx == len(tables):
tables += [1]
else:
tables[newidx] += 1
restaurants.append(tables)
return restaurants if len(restaurants) > 1 else restaurants[0]
def log_likelihood(self,restaurants):
assert isinstance(restaurants,list) and len(restaurants) > 0
if not isinstance(restaurants[0],list): restaurants=[restaurants]
likes = []
for counts in restaurants:
counts = np.array([c for c in counts if c > 0]) # remove zero counts b/c of gammaln
K = len(counts) # number of tables
N = sum(counts) # number of customers
likes.append(K*np.log(self.concentration) + np.sum(special.gammaln(counts)) +
special.gammaln(self.concentration) -
special.gammaln(N+self.concentration))
return np.asarray(likes) if len(likes) > 1 else likes[0]
def resample(self,data=[],niter=50):
for itr in range(niter):
a_n, b_n = self._posterior_hypparams(*self._get_statistics(data))
self.concentration = np.random.gamma(a_n,scale=1./b_n)
def _posterior_hypparams(self,sample_numbers,total_num_distinct):
# NOTE: this is a stochastic function: it samples auxiliary variables
if total_num_distinct > 0:
sample_numbers = np.array(sample_numbers)
sample_numbers = sample_numbers[sample_numbers > 0]
wvec = np.random.beta(self.concentration+1,sample_numbers)
svec = np.array(stats.bernoulli.rvs(sample_numbers/(sample_numbers+self.concentration)))
return self.a_0 + total_num_distinct-svec.sum(), (self.b_0 - np.log(wvec).sum())
else:
return self.a_0, self.b_0
return self
def _get_statistics(self,data):
assert isinstance(data,list)
if len(data) == 0:
sample_numbers = 0
total_num_distinct = 0
else:
if isinstance(data[0],list):
sample_numbers = np.array(map(sum,data))
total_num_distinct = sum(map(len,data))
else:
sample_numbers = np.array(sum(data))
total_num_distinct = len(data)
return sample_numbers, total_num_distinct
class GammaCompoundDirichlet(CRP):
# TODO this class is a bit ugly
'''
Implements a Gamma(a_0,b_0) prior over finite dirichlet concentration
parameter. The concentration is scaled according to the weak-limit sequence.
For each set of counts i, the model is
concentration ~ Gamma(a_0,b_0)
pi_i ~ Dir(concentration/K)
data_i ~ Multinomial(pi_i)
K is a free parameter in that with big enough K (relative to the size of the
sampled data) everything starts to act like a DP; K is just the size of the
size of the mesh projection.
'''
def __init__(self,K,a_0,b_0,concentration=None):
self.K = K
super(GammaCompoundDirichlet,self).__init__(a_0=a_0,b_0=b_0,
concentration=concentration)
@property
def params(self):
return dict(concentration=self.concentration)
@property
def hypparams(self):
return dict(a_0=self.a_0,b_0=self.b_0,K=self.K)
def rvs(self,sample_counts):
if isinstance(sample_counts,int):
sample_counts = [sample_counts]
out = np.empty((len(sample_counts),self.K),dtype=int)
for idx,c in enumerate(sample_counts):
out[idx] = np.random.multinomial(c,
np.random.dirichlet(np.repeat(self.concentration/self.K,self.K)))
return out if out.shape[0] > 1 else out[0]
def resample(self,data=[],niter=50,weighted_cols=None):
if weighted_cols is not None:
self.weighted_cols = weighted_cols
else:
self.weighted_cols = np.ones(self.K)
# all this is to check if data is empty
if isinstance(data,np.ndarray):
size = data.sum()
elif isinstance(data,list):
size = sum(d.sum() for d in data)
else:
assert data == 0
size = 0
if size > 0:
return super(GammaCompoundDirichlet,self).resample(data,niter=niter)
else:
return super(GammaCompoundDirichlet,self).resample(data,niter=1)
def _get_statistics(self,data):
# NOTE: this is a stochastic function: it samples auxiliary variables
counts = np.array(data,ndmin=2,order='C')
# sample m's, which sample an inverse of the weak limit projection
if counts.sum() == 0:
return 0, 0
else:
m = sample_crp_tablecounts(self.concentration,counts,self.weighted_cols)
return counts.sum(1), m.sum()
def _get_statistics_python(self,data):
counts = np.array(data,ndmin=2)
# sample m's
if counts.sum() == 0:
return 0, 0
else:
m = 0
for (i,j), n in np.ndenumerate(counts):
m += (np.random.rand(n) < self.concentration*self.K*self.weighted_cols[j] \
/ (np.arange(n)+self.concentration*self.K*self.weighted_cols[j])).sum()
return counts.sum(1), m
| fivejjs/pybasicbayes | pybasicbayes/distributions/multinomial.py | Python | mit | 17,040 |
u"""
Fixer for (metaclass=X) -> __metaclass__ = X
Some semantics (see PEP 3115) may be altered in the translation."""
from lib2to3 import fixer_base
from lib2to3.fixer_util import Name, syms, Node, Leaf, Newline, find_root
from lib2to3.pygram import token
from libfuturize.fixer_util import indentation, suitify
# from ..fixer_util import Name, syms, Node, Leaf, Newline, find_root, indentation, suitify
def has_metaclass(parent):
results = None
for node in parent.children:
kids = node.children
if node.type == syms.argument:
if kids[0] == Leaf(token.NAME, u"metaclass") and \
kids[1] == Leaf(token.EQUAL, u"=") and \
kids[2]:
#Hack to avoid "class X(=):" with this case.
results = [node] + kids
break
elif node.type == syms.arglist:
# Argument list... loop through it looking for:
# Node(*, [*, Leaf(token.NAME, u"metaclass"), Leaf(token.EQUAL, u"="), Leaf(*, *)]
for child in node.children:
if results: break
if child.type == token.COMMA:
#Store the last comma, which precedes the metaclass
comma = child
elif type(child) == Node:
meta = equal = name = None
for arg in child.children:
if arg == Leaf(token.NAME, u"metaclass"):
#We have the (metaclass) part
meta = arg
elif meta and arg == Leaf(token.EQUAL, u"="):
#We have the (metaclass=) part
equal = arg
elif meta and equal:
#Here we go, we have (metaclass=X)
name = arg
results = (comma, meta, equal, name)
break
return results
class FixMetaclass(fixer_base.BaseFix):
PATTERN = u"""
classdef<any*>
"""
def transform(self, node, results):
meta_results = has_metaclass(node)
if not meta_results: return
for meta in meta_results:
meta.remove()
target = Leaf(token.NAME, u"__metaclass__")
equal = Leaf(token.EQUAL, u"=", prefix=u" ")
# meta is the last item in what was returned by has_metaclass(): name
name = meta
name.prefix = u" "
stmt_node = Node(syms.atom, [target, equal, name])
suitify(node)
for item in node.children:
if item.type == syms.suite:
for stmt in item.children:
if stmt.type == token.INDENT:
# Insert, in reverse order, the statement, a newline,
# and an indent right after the first indented line
loc = item.children.index(stmt) + 1
# Keep consistent indentation form
ident = Leaf(token.INDENT, stmt.value)
item.insert_child(loc, ident)
item.insert_child(loc, Newline())
item.insert_child(loc, stmt_node)
break
| hughperkins/kgsgo-dataset-preprocessor | thirdparty/future/src/libpasteurize/fixes/fix_metaclass.py | Python | mpl-2.0 | 3,268 |
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import jsonschema
import six
from rally.common import log as logging
from rally.common import utils
from rally import exceptions
LOG = logging.getLogger(__name__)
def context(name, order, hidden=False):
"""Context class wrapper.
Each context class has to be wrapped by context() wrapper. It
sets essential configuration of context classes. Actually this wrapper just
adds attributes to the class.
:param name: Name of the class, used in the input task
:param order: As far as we can use multiple context classes that sometimes
depend on each other we have to specify order of execution.
Contexts with smaller order are run first
:param hidden: If it is true you won't be able to specify context via
task config
"""
def wrapper(cls):
cls._ctx_name = name
cls._ctx_order = order
cls._ctx_hidden = hidden
return cls
return wrapper
@six.add_metaclass(abc.ABCMeta)
@context(name="base", order=0, hidden=True)
class Context(object):
"""This class is a factory for context classes.
Every context class should be a subclass of this class and implement
2 abstract methods: setup() and cleanup()
It covers:
1) proper setting up of context config
2) Auto discovering & get by name
3) Validation by CONFIG_SCHEMA
4) Order of context creation
"""
CONFIG_SCHEMA = {}
def __init__(self, context):
self.config = context.get("config", {}).get(self.get_name(), {})
if hasattr(self, "DEFAULT_CONFIG"):
for key, value in self.DEFAULT_CONFIG.items():
self.config.setdefault(key, value)
self.context = context
self.task = context["task"]
def __lt__(self, other):
return self.get_order() < other.get_order()
def __gt__(self, other):
return self.get_order() > other.get_order()
def __eq__(self, other):
return self.get_order() == other.get_order()
@classmethod
def validate(cls, config, non_hidden=False):
if non_hidden and cls._ctx_hidden:
raise exceptions.NoSuchContext(name=cls.get_name())
jsonschema.validate(config, cls.CONFIG_SCHEMA)
@classmethod
def get_name(cls):
return cls._ctx_name
@classmethod
def get_order(cls):
return cls._ctx_order
@staticmethod
def get_by_name(name):
"""Return Context class by name."""
for context in utils.itersubclasses(Context):
if name == context.get_name():
return context
raise exceptions.NoSuchContext(name=name)
@abc.abstractmethod
def setup(self):
"""Set context of benchmark."""
@abc.abstractmethod
def cleanup(self):
"""Clean context of benchmark."""
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
self.cleanup()
class ContextManager(object):
"""Create context environment and run method inside it."""
def __init__(self, context_obj):
self._visited = []
self.context_obj = context_obj
@staticmethod
def validate(context, non_hidden=False):
for name, config in six.iteritems(context):
Context.get_by_name(name).validate(config, non_hidden=non_hidden)
def _get_sorted_context_lst(self):
ctxlst = map(Context.get_by_name, self.context_obj["config"])
return sorted(map(lambda ctx: ctx(self.context_obj), ctxlst))
def setup(self):
"""Creates benchmark environment from config."""
self._visited = []
for ctx in self._get_sorted_context_lst():
self._visited.append(ctx)
ctx.setup()
return self.context_obj
def cleanup(self):
"""Destroys benchmark environment."""
ctxlst = self._visited or self._get_sorted_context_lst()
for ctx in ctxlst[::-1]:
try:
ctx.cleanup()
except Exception as e:
LOG.error("Context %s failed during cleanup." % ctx.get_name())
LOG.exception(e)
def __enter__(self):
try:
self.setup()
except Exception:
self.cleanup()
raise
def __exit__(self, exc_type, exc_value, exc_traceback):
self.cleanup()
| pandeyop/rally | rally/benchmark/context/base.py | Python | apache-2.0 | 5,019 |
import os
import sys
from setuptools import setup, find_packages, Command
class RunTests(Command):
description = "Run the django test suite from the test_project dir."
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
this_dir = os.getcwd()
testproj_dir = os.path.join(this_dir, "test_project")
os.chdir(testproj_dir)
sys.path.append(testproj_dir)
from django.core.management import execute_manager
os.environ["DJANGO_SETTINGS_MODULE"] = os.environ.get(
"DJANGO_SETTINGS_MODULE", "settings")
settings_file = os.environ["DJANGO_SETTINGS_MODULE"]
settings_mod = __import__(settings_file, {}, {}, [''])
execute_manager(settings_mod, argv=[
__file__, "test"])
os.chdir(this_dir)
setup(
name = "django-serverconf",
version = "0.1",
packages = find_packages(exclude=['test_project']),
install_requires = [
'django>=1.3',
],
include_package_data = True,
cmdclass = {"test": RunTests},
author="Stefan Kjartansson",
author_email="esteban.supreme@gmail.com",
description="Server Configuration Helpers",
zip_safe = False,
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Framework :: Django",
]
)
| StefanKjartansson/Django-Server-Conf | setup.py | Python | bsd-2-clause | 1,597 |
# -*- coding: utf-8 -*-
################################################################
### common functions for data structures, file name manipulation, etc.
################################################################
from __future__ import print_function
import os
import os.path
import re
import sys
import sysconfig
import unicodedata
import inspect
import glob
import cPickle
from ocrolib.exceptions import (BadClassLabel, BadInput, FileNotFound,
OcropusException)
import numpy
from numpy import (amax, amin, array, bitwise_and, clip, dtype, mean, minimum,
nan, sin, sqrt, zeros)
import pylab
from pylab import (clf, cm, ginput, gray, imshow, ion, subplot, where)
from scipy.ndimage import morphology, measurements
import PIL
from default import getlocal
from toplevel import (checks, ABINARY2, AINT2, AINT3, BOOL, DARKSEG, GRAYSCALE,
LIGHTSEG, LINESEG, PAGESEG)
import chars
import codecs
import ligatures
import lstm
import morph
import multiprocessing
import sl
pickle_mode = 2
################################################################
# text normalization
################################################################
def normalize_text(s):
"""Apply standard Unicode normalizations for OCR.
This eliminates common ambiguities and weird unicode
characters."""
s = unicode(s)
s = unicodedata.normalize('NFC',s)
s = re.sub(ur'\s+(?u)',' ',s)
s = re.sub(ur'\n(?u)','',s)
s = re.sub(ur'^\s+(?u)','',s)
s = re.sub(ur'\s+$(?u)','',s)
for m,r in chars.replacements:
s = re.sub(unicode(m),unicode(r),s)
return s
def project_text(s,kind="exact"):
"""Project text onto a smaller subset of characters
for comparison."""
s = normalize_text(s)
s = re.sub(ur'( *[.] *){4,}',u'....',s) # dot rows
s = re.sub(ur'[~_]',u'',s) # dot rows
if kind=="exact":
return s
if kind=="nospace":
return re.sub(ur'\s','',s)
if kind=="spletdig":
return re.sub(ur'[^A-Za-z0-9 ]','',s)
if kind=="letdig":
return re.sub(ur'[^A-Za-z0-9]','',s)
if kind=="letters":
return re.sub(ur'[^A-Za-z]','',s)
if kind=="digits":
return re.sub(ur'[^0-9]','',s)
if kind=="lnc":
s = s.upper()
return re.sub(ur'[^A-Z]','',s)
raise BadInput("unknown normalization: "+kind)
################################################################
### Text I/O
################################################################
def read_text(fname,nonl=1,normalize=1):
"""Read text. This assumes files are in unicode.
By default, it removes newlines and normalizes the
text for OCR processing with `normalize_text`"""
with codecs.open(fname,"r","utf-8") as stream:
result = stream.read()
if nonl and len(result)>0 and result[-1]=='\n':
result = result[:-1]
if normalize:
result = normalize_text(result)
return result
def write_text(fname,text,nonl=0,normalize=1):
"""Write text. This assumes files are in unicode.
By default, it removes newlines and normalizes the
text for OCR processing with `normalize_text`"""
if normalize:
text = normalize_text(text)
with codecs.open(fname,"w","utf-8") as stream:
stream.write(text)
if not nonl and (len(text) == 0 or text[-1] != '\n'):
stream.write('\n')
################################################################
### Image I/O
################################################################
def pil2array(im,alpha=0):
if im.mode=="L":
a = numpy.fromstring(im.tobytes(),'B')
a.shape = im.size[1],im.size[0]
return a
if im.mode=="RGB":
a = numpy.fromstring(im.tobytes(),'B')
a.shape = im.size[1],im.size[0],3
return a
if im.mode=="RGBA":
a = numpy.fromstring(im.tobytes(),'B')
a.shape = im.size[1],im.size[0],4
if not alpha: a = a[:,:,:3]
return a
return pil2array(im.convert("L"))
def array2pil(a):
if a.dtype==dtype("B"):
if a.ndim==2:
return PIL.Image.frombytes("L",(a.shape[1],a.shape[0]),a.tostring())
elif a.ndim==3:
return PIL.Image.frombytes("RGB",(a.shape[1],a.shape[0]),a.tostring())
else:
raise OcropusException("bad image rank")
elif a.dtype==dtype('float32'):
return PIL.Image.fromstring("F",(a.shape[1],a.shape[0]),a.tostring())
else:
raise OcropusException("unknown image type")
def isbytearray(a):
return a.dtype in [dtype('uint8')]
def isfloatarray(a):
return a.dtype in [dtype('f'),dtype('float32'),dtype('float64')]
def isintarray(a):
return a.dtype in [dtype('B'),dtype('int16'),dtype('int32'),dtype('int64'),dtype('uint16'),dtype('uint32'),dtype('uint64')]
def isintegerarray(a):
return a.dtype in [dtype('int32'),dtype('int64'),dtype('uint32'),dtype('uint64')]
@checks(str,pageno=int,_=GRAYSCALE)
def read_image_gray(fname,pageno=0):
"""Read an image and returns it as a floating point array.
The optional page number allows images from files containing multiple
images to be addressed. Byte and short arrays are rescaled to
the range 0...1 (unsigned) or -1...1 (signed)."""
if type(fname)==tuple: fname,pageno = fname
assert pageno==0
pil = PIL.Image.open(fname)
a = pil2array(pil)
if a.dtype==dtype('uint8'):
a = a/255.0
if a.dtype==dtype('int8'):
a = a/127.0
elif a.dtype==dtype('uint16'):
a = a/65536.0
elif a.dtype==dtype('int16'):
a = a/32767.0
elif isfloatarray(a):
pass
else:
raise OcropusException("unknown image type: "+a.dtype)
if a.ndim==3:
a = mean(a,2)
return a
def write_image_gray(fname,image,normalize=0,verbose=0):
"""Write an image to disk. If the image is of floating point
type, its values are clipped to the range [0,1],
multiplied by 255 and converted to unsigned bytes. Otherwise,
the image must be of type unsigned byte."""
if verbose: print("# writing", fname)
if isfloatarray(image):
image = array(255*clip(image,0.0,1.0),'B')
assert image.dtype==dtype('B'),"array has wrong dtype: %s"%image.dtype
im = array2pil(image)
im.save(fname)
@checks(str,_=ABINARY2)
def read_image_binary(fname,dtype='i',pageno=0):
"""Read an image from disk and return it as a binary image
of the given dtype."""
if type(fname)==tuple: fname,pageno = fname
assert pageno==0
pil = PIL.Image.open(fname)
a = pil2array(pil)
if a.ndim==3: a = amax(a,axis=2)
return array(a>0.5*(amin(a)+amax(a)),dtype)
@checks(str,ABINARY2)
def write_image_binary(fname,image,verbose=0):
"""Write a binary image to disk. This verifies first that the given image
is, in fact, binary. The image may be of any type, but must consist of only
two values."""
if verbose: print("# writing", fname)
assert image.ndim==2
image = array(255*(image>midrange(image)),'B')
im = array2pil(image)
im.save(fname)
@checks(AINT3,_=AINT2)
def rgb2int(a):
"""Converts a rank 3 array with RGB values stored in the
last axis into a rank 2 array containing 32 bit RGB values."""
assert a.ndim==3
assert a.dtype==dtype('B')
return array(0xffffff&((0x10000*a[:,:,0])|(0x100*a[:,:,1])|a[:,:,2]),'i')
@checks(AINT2,_=AINT3)
def int2rgb(image):
"""Converts a rank 3 array with RGB values stored in the
last axis into a rank 2 array containing 32 bit RGB values."""
assert image.ndim==2
assert isintarray(image)
a = zeros(list(image.shape)+[3],'B')
a[:,:,0] = (image>>16)
a[:,:,1] = (image>>8)
a[:,:,2] = image
return a
@checks(LIGHTSEG,_=DARKSEG)
def make_seg_black(image):
assert isintegerarray(image),"%s: wrong type for segmentation"%image.dtype
image = image.copy()
image[image==0xffffff] = 0
return image
@checks(DARKSEG,_=LIGHTSEG)
def make_seg_white(image):
assert isintegerarray(image),"%s: wrong type for segmentation"%image.dtype
image = image.copy()
image[image==0] = 0xffffff
return image
@checks(str,_=LINESEG)
def read_line_segmentation(fname):
"""Reads a line segmentation, that is an RGB image whose values
encode the segmentation of a text line. Returns an int array."""
pil = PIL.Image.open(fname)
a = pil2array(pil)
assert a.dtype==dtype('B')
assert a.ndim==3
image = rgb2int(a)
result = make_seg_black(image)
return result
@checks(str,LINESEG)
def write_line_segmentation(fname,image):
"""Writes a line segmentation, that is an RGB image whose values
encode the segmentation of a text line."""
a = int2rgb(make_seg_white(image))
im = array2pil(a)
im.save(fname)
@checks(str,_=PAGESEG)
def read_page_segmentation(fname):
"""Reads a page segmentation, that is an RGB image whose values
encode the segmentation of a page. Returns an int array."""
pil = PIL.Image.open(fname)
a = pil2array(pil)
assert a.dtype==dtype('B')
assert a.ndim==3
segmentation = rgb2int(a)
segmentation = make_seg_black(segmentation)
return segmentation
def write_page_segmentation(fname,image):
"""Writes a page segmentation, that is an RGB image whose values
encode the segmentation of a page."""
assert image.ndim==2
assert image.dtype in [dtype('int32'),dtype('int64')]
a = int2rgb(make_seg_white(image))
im = array2pil(a)
im.save(fname)
def iulib_page_iterator(files):
for fname in files:
image = read_image_gray(fname)
yield image,fname
def norm_max(a):
return a/amax(a)
def pad_by(image,r,dtype=None):
"""Symmetrically pad the image by the given amount.
FIXME: replace by scipy version."""
if dtype is None: dtype = image.dtype
w,h = image.shape
result = zeros((w+2*r,h+2*r))
result[r:(w+r),r:(h+r)] = image
return result
class RegionExtractor:
"""A class facilitating iterating over the parts of a segmentation."""
def __init__(self):
self.cache = {}
def clear(self):
del self.cache
self.cache = {}
def setImage(self,image):
return self.setImageMasked(image)
def setImageMasked(self,image,mask=None,lo=None,hi=None):
"""Set the image to be iterated over. This should be an RGB image,
ndim==3, dtype=='B'. This picks a subset of the segmentation to iterate
over, using a mask and lo and hi values.."""
assert image.dtype==dtype('B') or image.dtype==dtype('i'),"image must be type B or i"
if image.ndim==3: image = rgb2int(image)
assert image.ndim==2,"wrong number of dimensions"
self.image = image
labels = image
if lo is not None: labels[labels<lo] = 0
if hi is not None: labels[labels>hi] = 0
if mask is not None: labels = bitwise_and(labels,mask)
labels,correspondence = morph.renumber_labels_ordered(labels,correspondence=1)
self.labels = labels
self.correspondence = correspondence
self.objects = [None]+morph.find_objects(labels)
def setPageColumns(self,image):
"""Set the image to be iterated over. This should be an RGB image,
ndim==3, dtype=='B'. This iterates over the columns."""
self.setImageMasked(image,0xff0000,hi=0x800000)
def setPageParagraphs(self,image):
"""Set the image to be iterated over. This should be an RGB image,
ndim==3, dtype=='B'. This iterates over the paragraphs (if present
in the segmentation)."""
self.setImageMasked(image,0xffff00,hi=0x800000)
def setPageLines(self,image):
"""Set the image to be iterated over. This should be an RGB image,
ndim==3, dtype=='B'. This iterates over the lines."""
self.setImageMasked(image,0xffffff,hi=0x800000)
def id(self,i):
"""Return the RGB pixel value for this segment."""
return self.correspondence[i]
def x0(self,i):
"""Return x0 (column) for the start of the box."""
return self.bbox(i)[1]
def x1(self,i):
"""Return x0 (column) for the end of the box."""
return self.bbox(i)[3]
def y0(self,i):
"""Return y0 (row) for the start of the box."""
h = self.image.shape[0]
return h-self.bbox(i)[2]-1
def y1(self,i):
"""Return y0 (row) for the end of the box."""
h = self.image.shape[0]
return h-self.bbox(i)[0]-1
def bbox(self,i):
"""Return the bounding box in raster coordinates
(row0,col0,row1,col1)."""
r = self.objects[i]
# print("@@@bbox", i, r)
return (r[0].start,r[1].start,r[0].stop,r[1].stop)
def bboxMath(self,i):
"""Return the bounding box in math coordinates
(row0,col0,row1,col1)."""
h = self.image.shape[0]
(y0,x0,y1,x1) = self.bbox(i)
return (h-y1-1,x0,h-y0-1,x1)
def length(self):
"""Return the number of components."""
return len(self.objects)
def mask(self,index,margin=0):
"""Return the mask for component index."""
b = self.objects[index]
# print("@@@mask", index, b)
m = self.labels[b]
m[m!=index] = 0
if margin>0: m = pad_by(m,margin)
return array(m!=0,'B')
def extract(self,image,index,margin=0):
"""Return the subimage for component index."""
h,w = image.shape[:2]
(r0,c0,r1,c1) = self.bbox(index)
# mask = self.mask(index,margin=margin)
return image[max(0,r0-margin):min(h,r1+margin),max(0,c0-margin):min(w,c1+margin),...]
def extractMasked(self,image,index,grow=0,bg=None,margin=0,dtype=None):
"""Return the masked subimage for component index, elsewhere the bg value."""
if bg is None: bg = amax(image)
h,w = image.shape[:2]
mask = self.mask(index,margin=margin)
# FIXME ... not circular
if grow>0: mask = morphology.binary_dilation(mask,iterations=grow)
mh,mw = mask.shape
box = self.bbox(index)
r0,c0,r1,c1 = box
subimage = sl.cut(image,(r0,c0,r0+mh-2*margin,c0+mw-2*margin),margin,bg=bg)
return where(mask,subimage,bg)
################################################################
### Object reading and writing
### This handles reading and writing zipped files directly,
### and it also contains workarounds for changed module/class names.
################################################################
def save_object(fname,obj,zip=0):
if zip==0 and fname.endswith(".gz"):
zip = 1
if zip>0:
# with gzip.GzipFile(fname,"wb") as stream:
with os.popen("gzip -9 > '%s'"%fname,"wb") as stream:
cPickle.dump(obj,stream,2)
else:
with open(fname,"wb") as stream:
cPickle.dump(obj,stream,2)
def unpickle_find_global(mname,cname):
if mname=="lstm.lstm":
return getattr(lstm,cname)
if not mname in sys.modules.keys():
exec "import "+mname
return getattr(sys.modules[mname],cname)
def load_object(fname,zip=0,nofind=0,verbose=0):
"""Loads an object from disk. By default, this handles zipped files
and searches in the usual places for OCRopus. It also handles some
class names that have changed."""
if not nofind:
fname = ocropus_find_file(fname)
if verbose:
print("# loading object", fname)
if zip==0 and fname.endswith(".gz"):
zip = 1
if zip>0:
# with gzip.GzipFile(fname,"rb") as stream:
with os.popen("gunzip < '%s'"%fname,"rb") as stream:
unpickler = cPickle.Unpickler(stream)
unpickler.find_global = unpickle_find_global
return unpickler.load()
else:
with open(fname,"rb") as stream:
unpickler = cPickle.Unpickler(stream)
unpickler.find_global = unpickle_find_global
return unpickler.load()
################################################################
### Simple record object.
################################################################
class Record:
"""A simple record datatype that allows initialization with
keyword arguments, as in Record(x=3,y=9)"""
def __init__(self,**kw):
self.__dict__.update(kw)
def like(self,obj):
self.__dict__.update(obj.__dict__)
return self
################################################################
### Histograms
################################################################
def chist(l):
"""Simple counting histogram. Takes a list of items
and returns a list of (count,object) tuples."""
counts = {}
for c in l:
counts[c] = counts.get(c,0)+1
hist = [(v,k) for k,v in counts.items()]
return sorted(hist,reverse=1)
################################################################
### multiprocessing
################################################################
def number_of_processors():
"""Estimates the number of processors."""
return multiprocessing.cpu_count()
# return int(os.popen("cat /proc/cpuinfo | grep 'processor.*:' | wc -l").read())
def parallel_map(fun,jobs,parallel=0,chunksize=1):
if parallel<2:
for e in jobs:
result = fun(e)
yield result
else:
try:
pool = multiprocessing.Pool(parallel)
for e in pool.imap_unordered(fun,jobs,chunksize):
yield e
finally:
pool.close()
pool.join()
del pool
def check_valid_class_label(s):
"""Determines whether the given character is a valid class label.
Control characters and spaces are not permitted."""
if type(s)==unicode:
if re.search(r'[\0-\x20]',s):
raise BadClassLabel(s)
elif type(s)==str:
if re.search(r'[^\x21-\x7e]',s):
raise BadClassLabel(s)
else:
raise BadClassLabel(s)
################################################################
### file name manipulation
################################################################
@checks(str,_=str)
def findfile(name,error=1):
result = ocropus_find_file(name)
return result
@checks(str)
def finddir(name):
"""Find some OCRopus-related resource by looking in a bunch off standard places.
(This needs to be integrated better with setup.py and the build system.)"""
local = getlocal()
path = name
if os.path.exists(path) and os.path.isdir(path): return path
path = local+name
if os.path.exists(path) and os.path.isdir(path): return path
_,tail = os.path.split(name)
path = tail
if os.path.exists(path) and os.path.isdir(path): return path
path = local+tail
if os.path.exists(path) and os.path.isdir(path): return path
raise FileNotFound("file '"+path+"' not found in . or /usr/local/share/ocropus/")
@checks(str)
def allsplitext(path):
"""Split all the pathname extensions, so that "a/b.c.d" -> "a/b", ".c.d" """
match = re.search(r'((.*/)*[^.]*)([^/]*)',path)
if not match:
return path,""
else:
return match.group(1),match.group(3)
@checks(str)
def base(path):
return allsplitext(path)[0]
@checks(str,{str,unicode})
def write_text_simple(file,s):
"""Write the given string s to the output file."""
with open(file,"w") as stream:
if type(s)==unicode: s = s.encode("utf-8")
stream.write(s)
@checks([str])
def glob_all(args):
"""Given a list of command line arguments, expand all of them with glob."""
result = []
for arg in args:
if arg[0]=="@":
with open(arg[1:],"r") as stream:
expanded = stream.read().split("\n")
expanded = [s for s in expanded if s!=""]
else:
expanded = sorted(glob.glob(arg))
if len(expanded)<1:
raise FileNotFound("%s: expansion did not yield any files"%arg)
result += expanded
return result
@checks([str])
def expand_args(args):
"""Given a list of command line arguments, if the
length is one, assume it's a book directory and expands it.
Otherwise returns the arguments unchanged."""
if len(args)==1 and os.path.isdir(args[0]):
return sorted(glob.glob(args[0]+"/????/??????.png"))
else:
return args
def ocropus_find_file(fname, gz=True):
"""Search for `fname` in one of the OCRopus data directories, as well as
the current directory). If `gz` is True, search also for gzipped files.
Result of searching $fname is the first existing in:
* $base/$fname
* $base/$fname.gz # if gz
* $base/model/$fname
* $base/model/$fname.gz # if gz
* $base/data/$fname
* $base/data/$fname.gz # if gz
* $base/gui/$fname
* $base/gui/$fname.gz # if gz
$base can be four base paths:
* `$OCROPUS_DATA` environment variable
* current working directory
* ../../../../share/ocropus from this file's install location
* `/usr/local/share/ocropus`
* `$PREFIX/share/ocropus` ($PREFIX being the Python installation
prefix, usually `/usr`)
"""
possible_prefixes = []
if os.getenv("OCROPUS_DATA"):
possible_prefixes.append(os.getenv("OCROPUS_DATA"))
possible_prefixes.append(os.curdir)
possible_prefixes.append(os.path.normpath(os.path.join(
os.path.dirname(inspect.getfile(inspect.currentframe())),
os.pardir, os.pardir, os.pardir, os.pardir, "share", "ocropus")))
possible_prefixes.append("/usr/local/share/ocropus")
# datarootdir is None in windows so don't add it to search list
if sysconfig.get_config_var("datarootdir") is not None:
possible_prefixes.append(os.path.join(
sysconfig.get_config_var("datarootdir"), "ocropus"))
# Unique entries with preserved order in possible_prefixes
# http://stackoverflow.com/a/15637398/201318
possible_prefixes = [possible_prefixes[i] for i in
sorted(numpy.unique(possible_prefixes, return_index=True)[1])]
for prefix in possible_prefixes:
if not os.path.isdir(prefix):
continue
for basename in [".", "models", "data", "gui"]:
if not os.path.isdir(os.path.join(prefix, basename)):
continue
full = os.path.join(prefix, basename, fname)
if os.path.exists(full):
return full
if gz and os.path.exists(full + ".gz"):
return full + ".gz"
raise FileNotFound(fname)
def fvariant(fname,kind,gt=""):
"""Find the file variant corresponding to the given file name.
Possible fil variants are line (or png), rseg, cseg, fst, costs, and txt.
Ground truth files have an extra suffix (usually something like "gt",
as in 010001.gt.txt or 010001.rseg.gt.png). By default, the variant
with the same ground truth suffix is produced. The non-ground-truth
version can be produced with gt="", the ground truth version can
be produced with gt="gt" (or some other desired suffix)."""
if gt!="": gt = "."+gt
base,ext = allsplitext(fname)
# text output
if kind=="txt":
return base+gt+".txt"
assert gt=="","gt suffix may only be supplied for .txt files (%s,%s,%s)"%(fname,kind,gt)
# a text line image
if kind=="line" or kind=="png" or kind=="bin":
return base+".bin.png"
if kind=="nrm":
return base+".nrm.png"
# a recognition lattice
if kind=="lattice":
return base+gt+".lattice"
# raw segmentation
if kind=="rseg":
return base+".rseg.png"
# character segmentation
if kind=="cseg":
return base+".cseg.png"
# text specifically aligned with cseg (this may be different from gt or txt)
if kind=="aligned":
return base+".aligned"
# per character costs
if kind=="costs":
return base+".costs"
raise BadInput("unknown kind: %s"%kind)
################################################################
### Utility for setting "parameters" on an object: a list of keywords for
### changing instance variables.
################################################################
def set_params(object,kw,warn=1):
"""Given an object and a dictionary of keyword arguments,
set only those object properties that are already instance
variables of the given object. Returns a new dictionary
without the key,value pairs that have been used. If
all keywords have been used, afterwards, len(kw)==0."""
kw = kw.copy()
for k,v in kw.items():
if hasattr(object,k):
setattr(object,k,v)
del kw[k]
return kw
################################################################
### warning and logging
################################################################
def caller():
"""Just returns info about the caller in string for (for error messages)."""
frame = sys._getframe(2)
info = inspect.getframeinfo(frame)
result = "%s:%d (%s)"%(info.filename,info.lineno,info.function)
del frame
return result
def die(message,*args):
"""Die with an error message."""
message = message%args
message = caller()+" FATAL "+message+"\n"
sys.stderr.write(message)
sys.exit(1)
def warn(message,*args):
"""Give a warning message."""
message = message%args
message = caller()+" WARNING "+message+"\n"
sys.stderr.write(message)
already_warned = {}
def warn_once(message,*args):
"""Give a warning message, but just once."""
c = caller()
if c in already_warned: return
already_warned[c] = 1
message = message%args
message = c+" WARNING "+message+"\n"
sys.stderr.write(message)
def quick_check_page_components(page_bin,dpi):
"""Quickly check whether the components of page_bin are
reasonable. Returns a value between 0 and 1; <0.5 means that
there is probably something wrong."""
return 1.0
def quick_check_line_components(line_bin,dpi):
"""Quickly check whether the components of line_bin are
reasonable. Returns a value between 0 and 1; <0.5 means that
there is probably something wrong."""
return 1.0
################################################################
### conversion functions
################################################################
def ustrg2unicode(u,lig=ligatures.lig):
"""Convert an iulib ustrg to a Python unicode string; the
C++ version iulib.ustrg2unicode does weird things for special
symbols like -3"""
result = ""
for i in range(u.length()):
value = u.at(i)
if value>=0:
c = lig.chr(value)
if c is not None:
result += c
else:
result += "<%d>"%value
return result
################################################################
### loading and saving components
################################################################
# This code has to deal with a lot of special cases for all the
# different formats we have accrued.
def obinfo(ob):
"""A bit of information about the given object. Returns
the str representation of the object, and if it has a shape,
also includes the shape."""
result = str(ob)
if hasattr(ob,"shape"):
result += " "
result += str(ob.shape)
return result
def binarize_range(image,dtype='B',threshold=0.5):
"""Binarize an image by its range."""
threshold = (amax(image)+amin(image))*threshold
scale = 1
if dtype=='B': scale = 255
return array(scale*(image>threshold),dtype=dtype)
def plotgrid(data,d=10,shape=(30,30)):
"""Plot a list of images on a grid."""
ion()
gray()
clf()
for i in range(min(d*d,len(data))):
subplot(d,d,i+1)
row = data[i]
if shape is not None: row = row.reshape(shape)
imshow(row)
ginput(1,timeout=0.1)
def showrgb(r,g=None,b=None):
if g is None: g = r
if b is None: b = r
imshow(array([r,g,b]).transpose([1,2,0]))
def showgrid(l,cols=None,n=400,titles=None,xlabels=None,ylabels=None,**kw):
if "cmap" not in kw: kw["cmap"] = cm.gray
if "interpolation" not in kw: kw["interpolation"] = "nearest"
n = minimum(n,len(l))
if cols is None: cols = int(sqrt(n))
rows = (n+cols-1)//cols
for i in range(n):
pylab.xticks([]) ;pylab.yticks([])
pylab.subplot(rows,cols,i+1)
pylab.imshow(l[i],**kw)
if titles is not None: pylab.title(str(titles[i]))
if xlabels is not None: pylab.xlabel(str(xlabels[i]))
if ylabels is not None: pylab.ylabel(str(ylabels[i]))
def gt_explode(s):
l = re.split(r'_(.{1,4})_',s)
result = []
for i,e in enumerate(l):
if i%2==0:
result += [c for c in e]
else:
result += [e]
result = [re.sub("\001","_",s) for s in result]
result = [re.sub("\002","\\\\",s) for s in result]
return result
def gt_implode(l):
result = []
for c in l:
if c=="_":
result.append("___")
elif len(c)<=1:
result.append(c)
elif len(c)<=4:
result.append("_"+c+"_")
else:
raise BadInput("cannot create ground truth transcription for: %s"%l)
return "".join(result)
@checks(int,sequence=int,frac=int,_=BOOL)
def testset(index,sequence=0,frac=10):
# this doesn't have to be good, just a fast, somewhat random function
return sequence==int(abs(sin(index))*1.23456789e6)%frac
def midrange(image,frac=0.5):
"""Computes the center of the range of image values
(for quick thresholding)."""
return frac*(amin(image)+amax(image))
def remove_noise(line,minsize=8):
"""Remove small pixels from an image."""
if minsize==0: return line
bin = (line>0.5*amax(line))
labels,n = morph.label(bin)
sums = measurements.sum(bin,labels,range(n+1))
sums = sums[labels]
good = minimum(bin,1-(sums>0)*(sums<minsize))
return good
class MovingStats:
def __init__(self,n=100):
self.data = []
self.n = n
self.count = 0
def add(self,x):
self.data += [x]
self.data = self.data[-self.n:]
self.count += 1
def mean(self):
if len(self.data)==0: return nan
return mean(self.data)
| tmbdev/ocropy | ocrolib/common.py | Python | apache-2.0 | 30,368 |
"""Add verification table
Revision ID: 90ac01a2df
Revises: df61cfff356e
Create Date: 2016-04-16 17:28:20.778467
"""
# revision identifiers, used by Alembic.
revision = '90ac01a2df'
down_revision = 'df61cfff356e'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
op.create_table('verification',
sa.Column('verification_id', sa.String(length=32), nullable=False),
sa.Column('ip4', sa.BigInteger(), nullable=False),
sa.Column('expires', sa.BigInteger(), nullable=False),
sa.Column('data', postgresql.JSON(), nullable=False),
sa.PrimaryKeyConstraint('verification_id')
)
op.create_index(op.f('ix_verification_expires'), 'verification', ['expires'], unique=False)
op.create_index(op.f('ix_verification_ip4'), 'verification', ['ip4'], unique=False)
def downgrade():
op.drop_index(op.f('ix_verification_ip4'), table_name='verification')
op.drop_index(op.f('ix_verification_expires'), table_name='verification')
op.drop_table('verification')
| Floens/uchan | migrations/versions/90ac01a2df_add_verification_table.py | Python | mit | 1,083 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.