repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
flossmanualsfr/peril-en-daiza | refs/heads/master | game/mouse_set.py | 1 | import bge
def active():
bge.render.showMouse(True)
def inactive():
bge.render.showMouse(False) |
ASCrookes/django | refs/heads/master | tests/file_uploads/models.py | 691 | from django.db import models
class FileModel(models.Model):
testfile = models.FileField(upload_to='test_upload')
|
BlueLens/bl-magi | refs/heads/master | stylelens/dataset/bluehack/text.py | 1 | from stylelens_dataset.texts import Texts
TEXT_DATASET_FILE = './text_data.txt'
text_api = Texts()
def add_text(class_code, keyword):
text = {}
text['class_code'] = class_code
text['text'] = keyword
try:
res = text_api.add_text(text)
except Exception as e:
print(e)
if __name__ == '__main__':
try:
text_dataset = open(TEXT_DATASET_FILE, 'r')
texts = []
for pair in text_dataset.readlines():
map = pair.strip().split(' ', 1)
tmp = map[1].strip().split(',')
keywords = list(set(tmp))
class_code = str(map[0])
for keyword in keywords:
print('' + class_code + ":" + keyword)
add_text(class_code, keyword)
except Exception as e:
print(e)
|
sinkovit/ImmunoUtils | refs/heads/master | parse_mhcii.py | 1 | # Program parse_mhcii.py
#
# Description: Parse output from MHC Class II peptide binding
# prediction program (mhc_II_binding.py) and sort peptides with
# affinities below a specified threshold into T cell receptor
# classes. Note that this does not work with the consensus method
# since different algorithms may predict different core nonamers for a
# given 15-mer.
#
# Author: Robert Sinkovits, San Diego Supercomputer Center
import argparse
import mhcii
parser = argparse.ArgumentParser(description='Parse MHC II prediction data and sort peptides into T cell receptor classes')
parser.add_argument(dest='infile',
help='Input file in FASTA format')
parser.add_argument('-n', dest='nn_cutoff', default=0, type=int,
help='Cutoff affinity for NN algorithm affinity prediction')
parser.add_argument('-s', dest='smm_cutoff', default=0, type=int,
help='Cutoff affinity for SMM algorithm affinity prediction')
args = parser.parse_args()
infile = args.infile
nn_cutoff = args.nn_cutoff
smm_cutoff = args.smm_cutoff
mhcii.mhcii_to_TCR_classes(infile, nn_cutoff, smm_cutoff)
|
jlopp/statoshi | refs/heads/master | test/functional/mempool_unbroadcast.py | 35 | #!/usr/bin/env python3
# Copyright (c) 2017-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test that the mempool ensures transaction delivery by periodically sending
to peers until a GETDATA is received."""
import time
from test_framework.p2p import P2PTxInvStore
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
create_confirmed_utxos,
)
MAX_INITIAL_BROADCAST_DELAY = 15 * 60 # 15 minutes in seconds
class MempoolUnbroadcastTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.test_broadcast()
self.test_txn_removal()
def test_broadcast(self):
self.log.info("Test that mempool reattempts delivery of locally submitted transaction")
node = self.nodes[0]
min_relay_fee = node.getnetworkinfo()["relayfee"]
utxos = create_confirmed_utxos(min_relay_fee, node, 10)
self.disconnect_nodes(0, 1)
self.log.info("Generate transactions that only node 0 knows about")
# generate a wallet txn
addr = node.getnewaddress()
wallet_tx_hsh = node.sendtoaddress(addr, 0.0001)
# generate a txn using sendrawtransaction
us0 = utxos.pop()
inputs = [{"txid": us0["txid"], "vout": us0["vout"]}]
outputs = {addr: 0.0001}
tx = node.createrawtransaction(inputs, outputs)
node.settxfee(min_relay_fee)
txF = node.fundrawtransaction(tx)
txFS = node.signrawtransactionwithwallet(txF["hex"])
rpc_tx_hsh = node.sendrawtransaction(txFS["hex"])
# check transactions are in unbroadcast using rpc
mempoolinfo = self.nodes[0].getmempoolinfo()
assert_equal(mempoolinfo['unbroadcastcount'], 2)
mempool = self.nodes[0].getrawmempool(True)
for tx in mempool:
assert_equal(mempool[tx]['unbroadcast'], True)
# check that second node doesn't have these two txns
mempool = self.nodes[1].getrawmempool()
assert rpc_tx_hsh not in mempool
assert wallet_tx_hsh not in mempool
# ensure that unbroadcast txs are persisted to mempool.dat
self.restart_node(0)
self.log.info("Reconnect nodes & check if they are sent to node 1")
self.connect_nodes(0, 1)
# fast forward into the future & ensure that the second node has the txns
node.mockscheduler(MAX_INITIAL_BROADCAST_DELAY)
self.sync_mempools(timeout=30)
mempool = self.nodes[1].getrawmempool()
assert rpc_tx_hsh in mempool
assert wallet_tx_hsh in mempool
# check that transactions are no longer in first node's unbroadcast set
mempool = self.nodes[0].getrawmempool(True)
for tx in mempool:
assert_equal(mempool[tx]['unbroadcast'], False)
self.log.info("Add another connection & ensure transactions aren't broadcast again")
conn = node.add_p2p_connection(P2PTxInvStore())
node.mockscheduler(MAX_INITIAL_BROADCAST_DELAY)
time.sleep(2) # allow sufficient time for possibility of broadcast
assert_equal(len(conn.get_invs()), 0)
self.disconnect_nodes(0, 1)
node.disconnect_p2ps()
def test_txn_removal(self):
self.log.info("Test that transactions removed from mempool are removed from unbroadcast set")
node = self.nodes[0]
# since the node doesn't have any connections, it will not receive
# any GETDATAs & thus the transaction will remain in the unbroadcast set.
addr = node.getnewaddress()
txhsh = node.sendtoaddress(addr, 0.0001)
# check transaction was removed from unbroadcast set due to presence in
# a block
removal_reason = "Removed {} from set of unbroadcast txns before confirmation that txn was sent out".format(txhsh)
with node.assert_debug_log([removal_reason]):
node.generate(1)
if __name__ == "__main__":
MempoolUnbroadcastTest().main()
|
castlecms/castle.cms | refs/heads/master | castle/cms/cron/_link_report.py | 1 | from AccessControl.SecurityManagement import newSecurityManager
from castle.cms.linkreporter import Reporter
from Products.CMFPlone.interfaces.siteroot import IPloneSiteRoot
from tendo import singleton
def run_link_report(site):
reporter = Reporter(site)
if not reporter.valid:
return
try:
reporter()
except KeyboardInterrupt:
reporter.join()
def run(app):
singleton.SingleInstance('linkreport')
user = app.acl_users.getUser('admin')
newSecurityManager(None, user.__of__(app.acl_users))
for oid in app.objectIds():
obj = app[oid]
if IPloneSiteRoot.providedBy(obj):
run_link_report(obj)
if __name__ == '__main__':
run(app) # noqa
|
petebachant/scipy | refs/heads/master | scipy/stats/mstats.py | 37 | """
===================================================================
Statistical functions for masked arrays (:mod:`scipy.stats.mstats`)
===================================================================
.. currentmodule:: scipy.stats.mstats
This module contains a large number of statistical functions that can
be used with masked arrays.
Most of these functions are similar to those in scipy.stats but might
have small differences in the API or in the algorithm used. Since this
is a relatively new package, some API changes are still possible.
.. autosummary::
:toctree: generated/
argstoarray
betai
chisquare
count_tied_groups
describe
f_oneway
f_value_wilks_lambda
find_repeats
friedmanchisquare
kendalltau
kendalltau_seasonal
kruskalwallis
ks_twosamp
kurtosis
kurtosistest
linregress
mannwhitneyu
plotting_positions
mode
moment
mquantiles
msign
normaltest
obrientransform
pearsonr
plotting_positions
pointbiserialr
rankdata
scoreatpercentile
sem
signaltonoise
skew
skewtest
spearmanr
theilslopes
threshold
tmax
tmean
tmin
trim
trima
trimboth
trimmed_stde
trimr
trimtail
tsem
ttest_onesamp
ttest_ind
ttest_onesamp
ttest_rel
tvar
variation
winsorize
zmap
zscore
"""
from __future__ import division, print_function, absolute_import
from .mstats_basic import *
from .mstats_extras import *
from scipy.stats import gmean, hmean
|
coxmediagroup/googleads-python-lib | refs/heads/master | examples/adxbuyer/v201506/basic_operations/add_placements.py | 3 | #!/usr/bin/python
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example adds ad group criteria to an ad group.
To get ad groups, run get_ad_groups.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
Tags: AdGroupCriterionService.mutate
"""
__author__ = 'api.kwinter@gmail.com (Kevin Winter)'
from googleads import adwords
AD_GROUP_ID = 'INSERT_AD_GROUP_ID_HERE'
def main(client, ad_group_id):
# Initialize appropriate service.
ad_group_criterion_service = client.GetService(
'AdGroupCriterionService', version='v201506')
# Construct keyword ad group criterion object.
placement1 = {
'xsi_type': 'BiddableAdGroupCriterion',
'adGroupId': ad_group_id,
'criterion': {
'xsi_type': 'Placement',
'url': 'http://mars.google.com'
},
# These fields are optional.
'userStatus': 'PAUSED',
'destinationUrl': 'http://example.com/mars'
}
placement2 = {
'xsi_type': 'NegativeAdGroupCriterion',
'adGroupId': ad_group_id,
'criterion': {
'xsi_type': 'Placement',
'url': 'http://example.com/pluto'
},
}
# Construct operations and add ad group criteria.
operations = [
{
'operator': 'ADD',
'operand': placement1
},
{
'operator': 'ADD',
'operand': placement2
}
]
ad_group_criteria = ad_group_criterion_service.mutate(
operations)['value']
# Display results.
for criterion in ad_group_criteria:
print ('Placement ad group criterion with ad group id \'%s\', criterion '
'id \'%s\', and url \'%s\' was added.'
% (criterion['adGroupId'], criterion['criterion']['id'],
criterion['criterion']['url']))
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client, AD_GROUP_ID)
|
MoKee/android_kernel_xiaomi_msm8226-common | refs/heads/kk_mkt | tools/perf/scripts/python/failed-syscalls-by-pid.py | 11180 | # failed system call counts, by pid
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide failed system call totals, broken down by pid.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s syscall-counts-by-pid.py [comm|pid]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_error_totals()
def raw_syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, ret):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
if ret < 0:
try:
syscalls[common_comm][common_pid][id][ret] += 1
except TypeError:
syscalls[common_comm][common_pid][id][ret] = 1
def print_error_totals():
if for_comm is not None:
print "\nsyscall errors for %s:\n\n" % (for_comm),
else:
print "\nsyscall errors:\n\n",
print "%-30s %10s\n" % ("comm [pid]", "count"),
print "%-30s %10s\n" % ("------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id in id_keys:
print " syscall: %-16s\n" % syscall_name(id),
ret_keys = syscalls[comm][pid][id].keys()
for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True):
print " err = %-20s %10d\n" % (strerror(ret), val),
|
kkochubey1/docker-sikuli-novnc | refs/heads/master | noVNC/utils/websockify/websockify.py | 20 | run |
louispotok/pandas | refs/heads/master | pandas/tests/indexing/test_indexing_slow.py | 5 | # -*- coding: utf-8 -*-
import warnings
import numpy as np
import pandas as pd
from pandas.core.api import Series, DataFrame, MultiIndex
import pandas.util.testing as tm
import pytest
class TestIndexingSlow(object):
@pytest.mark.slow
def test_multiindex_get_loc(self): # GH7724, GH2646
with warnings.catch_warnings(record=True):
# test indexing into a multi-index before & past the lexsort depth
from numpy.random import randint, choice, randn
cols = ['jim', 'joe', 'jolie', 'joline', 'jolia']
def validate(mi, df, key):
mask = np.ones(len(df)).astype('bool')
# test for all partials of this key
for i, k in enumerate(key):
mask &= df.iloc[:, i] == k
if not mask.any():
assert key[:i + 1] not in mi.index
continue
assert key[:i + 1] in mi.index
right = df[mask].copy()
if i + 1 != len(key): # partial key
right.drop(cols[:i + 1], axis=1, inplace=True)
right.set_index(cols[i + 1:-1], inplace=True)
tm.assert_frame_equal(mi.loc[key[:i + 1]], right)
else: # full key
right.set_index(cols[:-1], inplace=True)
if len(right) == 1: # single hit
right = Series(right['jolia'].values,
name=right.index[0],
index=['jolia'])
tm.assert_series_equal(mi.loc[key[:i + 1]], right)
else: # multi hit
tm.assert_frame_equal(mi.loc[key[:i + 1]], right)
def loop(mi, df, keys):
for key in keys:
validate(mi, df, key)
n, m = 1000, 50
vals = [randint(0, 10, n), choice(
list('abcdefghij'), n), choice(
pd.date_range('20141009', periods=10).tolist(), n), choice(
list('ZYXWVUTSRQ'), n), randn(n)]
vals = list(map(tuple, zip(*vals)))
# bunch of keys for testing
keys = [randint(0, 11, m), choice(
list('abcdefghijk'), m), choice(
pd.date_range('20141009', periods=11).tolist(), m), choice(
list('ZYXWVUTSRQP'), m)]
keys = list(map(tuple, zip(*keys)))
keys += list(map(lambda t: t[:-1], vals[::n // m]))
# covers both unique index and non-unique index
df = DataFrame(vals, columns=cols)
a, b = pd.concat([df, df]), df.drop_duplicates(subset=cols[:-1])
for frame in a, b:
for i in range(5): # lexsort depth
df = frame.copy() if i == 0 else frame.sort_values(
by=cols[:i])
mi = df.set_index(cols[:-1])
assert not mi.index.lexsort_depth < i
loop(mi, df, keys)
@pytest.mark.slow
def test_large_dataframe_indexing(self):
# GH10692
result = DataFrame({'x': range(10 ** 6)}, dtype='int64')
result.loc[len(result)] = len(result) + 1
expected = DataFrame({'x': range(10 ** 6 + 1)}, dtype='int64')
tm.assert_frame_equal(result, expected)
@pytest.mark.slow
def test_large_mi_dataframe_indexing(self):
# GH10645
result = MultiIndex.from_arrays([range(10 ** 6), range(10 ** 6)])
assert (not (10 ** 6, 0) in result)
|
erjohnso/ansible | refs/heads/devel | lib/ansible/modules/cloud/azure/azure_rm_loadbalancer.py | 21 | #!/usr/bin/python
#
# Copyright (c) 2016 Thomas Stringer, <tomstr@microsoft.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_loadbalancer
version_added: "2.4"
short_description: Manage Azure load balancers.
description:
- Create, update and delete Azure load balancers
options:
resource_group:
description:
- Name of a resource group where the load balancer exists or will be created.
required: true
name:
description:
- Name of the load balancer.
required: true
state:
description:
- Assert the state of the load balancer. Use 'present' to create or update a load balancer and
'absent' to delete a load balancer.
default: present
choices:
- absent
- present
required: false
location:
description:
- Valid azure location. Defaults to location of the resource group.
default: resource_group location
required: false
public_ip_address_name:
description:
- Name of an existing public IP address object to associate with the security group.
aliases:
- public_ip_address
- public_ip_name
- public_ip
required: false
probe_port:
description:
- The port that the health probe will use.
required: false
probe_protocol:
description:
- The protocol to use for the health probe.
required: false
choices:
- Tcp
- Http
probe_interval:
description:
- How much time (in seconds) to probe the endpoint for health.
default: 15
required: false
probe_fail_count:
description:
- The amount of probe failures for the load balancer to make a health determination.
default: 3
required: false
probe_request_path:
description:
- The URL that an HTTP probe will use (only relevant if probe_protocol is set to Http).
required: false
protocol:
description:
- The protocol (TCP or UDP) that the load balancer will use.
required: false
choices:
- Tcp
- Udp
load_distribution:
description:
- The type of load distribution that the load balancer will employ.
required: false
choices:
- Default
- SourceIP
- SourceIPProtocol
frontend_port:
description:
- Frontend port that will be exposed for the load balancer.
required: false
backend_port:
description:
- Backend port that will be exposed for the load balancer.
required: false
idle_timeout:
description:
- Timeout for TCP idle connection in minutes.
default: 4
required: false
natpool_frontend_port_start:
description:
- Start of the port range for a NAT pool.
required: false
natpool_frontend_port_end:
description:
- End of the port range for a NAT pool.
required: false
natpool_backend_port:
description:
- Backend port used by the NAT pool.
required: false
natpool_protocol:
description:
- The protocol for the NAT pool.
required: false
extends_documentation_fragment:
- azure
- azure_tags
author:
- "Thomas Stringer (@tr_stringer)"
'''
EXAMPLES = '''
- name: Create a load balancer
azure_rm_loadbalancer:
name: myloadbalancer
location: eastus
resource_group: my-rg
public_ip: mypublicip
probe_protocol: Tcp
probe_port: 80
probe_interval: 10
probe_fail_count: 3
protocol: Tcp
load_distribution: Default
frontend_port: 80
backend_port: 8080
idle_timeout: 4
natpool_frontend_port_start: 1030
natpool_frontend_port_end: 1040
natpool_backend_port: 80
natpool_protocol: Tcp
'''
RETURN = '''
state:
description: Current state of the load balancer
returned: always
type: dict
changed:
description: Whether or not the resource has changed
returned: always
type: bool
'''
import random
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
try:
from msrestazure.azure_exceptions import CloudError
from azure.mgmt.network.models import (
LoadBalancer,
FrontendIPConfiguration,
BackendAddressPool,
Probe,
LoadBalancingRule,
SubResource,
InboundNatPool,
Subnet
)
except ImportError:
# This is handled in azure_rm_common
pass
class AzureRMLoadBalancer(AzureRMModuleBase):
"""Configuration class for an Azure RM load balancer resource"""
def __init__(self):
self.module_args = dict(
resource_group=dict(
type='str',
required=True
),
name=dict(
type='str',
required=True
),
state=dict(
type='str',
required=False,
default='present',
choices=['present', 'absent']
),
location=dict(
type='str',
required=False
),
public_ip_address_name=dict(
type='str',
required=False,
aliases=['public_ip_address', 'public_ip_name', 'public_ip']
),
probe_port=dict(
type='int',
required=False
),
probe_protocol=dict(
type='str',
required=False,
choices=['Tcp', 'Http']
),
probe_interval=dict(
type='int',
default=15
),
probe_fail_count=dict(
type='int',
default=3
),
probe_request_path=dict(
type='str',
required=False
),
protocol=dict(
type='str',
required=False,
choices=['Tcp', 'Udp']
),
load_distribution=dict(
type='str',
required=False,
choices=['Default', 'SourceIP', 'SourceIPProtocol']
),
frontend_port=dict(
type='int',
required=False
),
backend_port=dict(
type='int',
required=False
),
idle_timeout=dict(
type='int',
default=4
),
natpool_frontend_port_start=dict(
type='int'
),
natpool_frontend_port_end=dict(
type='int'
),
natpool_backend_port=dict(
type='int'
),
natpool_protocol=dict(
type='str'
)
)
self.resource_group = None
self.name = None
self.location = None
self.public_ip_address_name = None
self.state = None
self.probe_port = None
self.probe_protocol = None
self.probe_interval = None
self.probe_fail_count = None
self.probe_request_path = None
self.protocol = None
self.load_distribution = None
self.frontend_port = None
self.backend_port = None
self.idle_timeout = None
self.natpool_frontend_port_start = None
self.natpool_frontend_port_end = None
self.natpool_backend_port = None
self.natpool_protocol = None
self.results = dict(changed=False, state=dict())
required_if = [('state', 'present', ['public_ip_address_name'])]
super(AzureRMLoadBalancer, self).__init__(
derived_arg_spec=self.module_args,
supports_check_mode=True,
required_if=required_if
)
def exec_module(self, **kwargs):
"""Main module execution method"""
for key in self.module_args.keys():
setattr(self, key, kwargs[key])
results = dict()
changed = False
pip = None
load_balancer_props = dict()
try:
resource_group = self.get_resource_group(self.resource_group)
except CloudError:
self.fail('resource group {} not found'.format(self.resource_group))
if not self.location:
self.location = resource_group.location
load_balancer_props['location'] = self.location
if self.state == 'present':
# handle present status
frontend_ip_config_name = random_name('feipconfig')
frontend_ip_config_id = frontend_ip_configuration_id(
subscription_id=self.subscription_id,
resource_group_name=self.resource_group,
load_balancer_name=self.name,
name=frontend_ip_config_name
)
if self.public_ip_address_name:
pip = self.get_public_ip_address(self.public_ip_address_name)
load_balancer_props['frontend_ip_configurations'] = [
FrontendIPConfiguration(
name=frontend_ip_config_name,
public_ip_address=pip
)
]
elif self.state == 'absent':
try:
self.network_client.load_balancers.delete(
resource_group_name=self.resource_group,
load_balancer_name=self.name
).wait()
changed = True
except CloudError:
changed = False
self.results['changed'] = changed
return self.results
try:
# before we do anything, we need to attempt to retrieve the load balancer
# knowing whether or not it exists will tell us what to do in the future
self.log('Fetching load balancer {}'.format(self.name))
load_balancer = self.network_client.load_balancers.get(self.resource_group, self.name)
self.log('Load balancer {} exists'.format(self.name))
self.check_provisioning_state(load_balancer, self.state)
results = load_balancer_to_dict(load_balancer)
self.log(results, pretty_print=True)
if self.state == 'present':
update_tags, results['tags'] = self.update_tags(results['tags'])
if update_tags:
changed = True
except CloudError:
self.log('Load balancer {} does not exist'.format(self.name))
if self.state == 'present':
self.log(
'CHANGED: load balancer {} does not exist but requested status \'present\''
.format(self.name)
)
changed = True
backend_address_pool_name = random_name('beap')
backend_addr_pool_id = backend_address_pool_id(
subscription_id=self.subscription_id,
resource_group_name=self.resource_group,
load_balancer_name=self.name,
name=backend_address_pool_name
)
load_balancer_props['backend_address_pools'] = [BackendAddressPool(name=backend_address_pool_name)]
probe_name = random_name('probe')
prb_id = probe_id(
subscription_id=self.subscription_id,
resource_group_name=self.resource_group,
load_balancer_name=self.name,
name=probe_name
)
if self.probe_protocol:
load_balancer_props['probes'] = [
Probe(
name=probe_name,
protocol=self.probe_protocol,
port=self.probe_port,
interval_in_seconds=self.probe_interval,
number_of_probes=self.probe_fail_count,
request_path=self.probe_request_path
)
]
load_balancing_rule_name = random_name('lbr')
if self.protocol:
load_balancer_props['load_balancing_rules'] = [
LoadBalancingRule(
name=load_balancing_rule_name,
frontend_ip_configuration=SubResource(id=frontend_ip_config_id),
backend_address_pool=SubResource(id=backend_addr_pool_id),
probe=SubResource(id=prb_id),
protocol=self.protocol,
load_distribution=self.load_distribution,
frontend_port=self.frontend_port,
backend_port=self.backend_port,
idle_timeout_in_minutes=self.idle_timeout,
enable_floating_ip=False
)
]
inbound_nat_pool_name = random_name('inp')
if frontend_ip_config_id and self.natpool_protocol:
load_balancer_props['inbound_nat_pools'] = [
InboundNatPool(
name=inbound_nat_pool_name,
frontend_ip_configuration=Subnet(id=frontend_ip_config_id),
protocol=self.natpool_protocol,
frontend_port_range_start=self.natpool_frontend_port_start,
frontend_port_range_end=self.natpool_frontend_port_end,
backend_port=self.natpool_backend_port
)
]
self.results['changed'] = changed
self.results['state'] = (
results if results
else load_balancer_to_dict(LoadBalancer(**load_balancer_props))
)
if self.check_mode:
return self.results
try:
self.network_client.load_balancers.create_or_update(
resource_group_name=self.resource_group,
load_balancer_name=self.name,
parameters=LoadBalancer(**load_balancer_props)
).wait()
except CloudError as err:
self.fail('Error creating load balancer {}'.format(err))
return self.results
def get_public_ip_address(self, name):
"""Get a reference to the public ip address resource"""
self.log('Fetching public ip address {}'.format(name))
try:
public_ip = self.network_client.public_ip_addresses.get(self.resource_group, name)
except CloudError as err:
self.fail('Error fetching public ip address {} - {}'.format(name, str(err)))
return public_ip
def load_balancer_to_dict(load_balancer):
"""Seralialize a LoadBalancer object to a dict"""
result = dict(
id=load_balancer.id,
name=load_balancer.name,
location=load_balancer.location,
tags=load_balancer.tags,
provisioning_state=load_balancer.provisioning_state,
etag=load_balancer.etag,
frontend_ip_configurations=[],
backend_address_pools=[],
load_balancing_rules=[],
probes=[],
inbound_nat_rules=[],
inbound_nat_pools=[],
outbound_nat_rules=[]
)
if load_balancer.frontend_ip_configurations:
result['frontend_ip_configurations'] = [dict(
id=_.id,
name=_.name,
etag=_.etag,
provisioning_state=_.provisioning_state,
private_ip_address=_.private_ip_address,
private_ip_allocation_method=_.private_ip_allocation_method,
subnet=dict(
id=_.subnet.id,
name=_.subnet.name,
address_prefix=_.subnet.address_prefix
) if _.subnet else None,
public_ip_address=dict(
id=_.public_ip_address.id,
location=_.public_ip_address.location,
public_ip_allocation_method=_.public_ip_address.public_ip_allocation_method,
ip_address=_.public_ip_address.ip_address
) if _.public_ip_address else None
) for _ in load_balancer.frontend_ip_configurations]
if load_balancer.backend_address_pools:
result['backend_address_pools'] = [dict(
id=_.id,
name=_.name,
provisioning_state=_.provisioning_state,
etag=_.etag
) for _ in load_balancer.backend_address_pools]
if load_balancer.load_balancing_rules:
result['load_balancing_rules'] = [dict(
id=_.id,
name=_.name,
protocol=_.protocol,
frontend_ip_configuration_id=_.frontend_ip_configuration.id,
backend_address_pool_id=_.backend_address_pool.id,
probe_id=_.probe.id,
load_distribution=_.load_distribution,
frontend_port=_.frontend_port,
backend_port=_.backend_port,
idle_timeout_in_minutes=_.idle_timeout_in_minutes,
enable_floating_ip=_.enable_floating_ip,
provisioning_state=_.provisioning_state,
etag=_.etag
) for _ in load_balancer.load_balancing_rules]
if load_balancer.probes:
result['probes'] = [dict(
id=_.id,
name=_.name,
protocol=_.protocol,
port=_.port,
interval_in_seconds=_.interval_in_seconds,
number_of_probes=_.number_of_probes,
request_path=_.request_path,
provisioning_state=_.provisioning_state
) for _ in load_balancer.probes]
if load_balancer.inbound_nat_rules:
result['inbound_nat_rules'] = [dict(
id=_.id,
name=_.name,
frontend_ip_configuration_id=_.frontend_ip_configuration.id,
protocol=_.protocol,
frontend_port=_.frontend_port,
backend_port=_.backend_port,
idle_timeout_in_minutes=_.idle_timeout_in_minutes,
enable_floating_point_ip=_.enable_floating_point_ip,
provisioning_state=_.provisioning_state,
etag=_.etag
) for _ in load_balancer.inbound_nat_rules]
if load_balancer.inbound_nat_pools:
result['inbound_nat_pools'] = [dict(
id=_.id,
name=_.name,
frontend_ip_configuration_id=_.frontend_ip_configuration.id,
protocol=_.protocol,
frontend_port_range_start=_.frontend_port_range_start,
frontend_port_range_end=_.frontend_port_range_end,
backend_port=_.backend_port,
provisioning_state=_.provisioning_state,
etag=_.etag
) for _ in load_balancer.inbound_nat_pools]
if load_balancer.outbound_nat_rules:
result['outbound_nat_rules'] = [dict(
id=_.id,
name=_.name,
allocated_outbound_ports=_.allocated_outbound_ports,
frontend_ip_configuration_id=_.frontend_ip_configuration.id,
backend_address_pool=_.backend_address_pool.id,
provisioning_state=_.provisioning_state,
etag=_.etag
) for _ in load_balancer.outbound_nat_rules]
return result
def random_name(prefix):
"""Generate a random name with a specific prefix"""
return '{}{}'.format(prefix, random.randint(10000, 99999))
def frontend_ip_configuration_id(subscription_id, resource_group_name, load_balancer_name, name):
"""Generate the id for a frontend ip configuration"""
return '/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/loadBalancers/{}/frontendIPConfigurations/{}'.format(
subscription_id,
resource_group_name,
load_balancer_name,
name
)
def backend_address_pool_id(subscription_id, resource_group_name, load_balancer_name, name):
"""Generate the id for a backend address pool"""
return '/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/loadBalancers/{}/backendAddressPools/{}'.format(
subscription_id,
resource_group_name,
load_balancer_name,
name
)
def probe_id(subscription_id, resource_group_name, load_balancer_name, name):
"""Generate the id for a probe"""
return '/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/loadBalancers/{}/probes/{}'.format(
subscription_id,
resource_group_name,
load_balancer_name,
name
)
def main():
"""Main execution"""
AzureRMLoadBalancer()
if __name__ == '__main__':
main()
|
ogenstad/ansible | refs/heads/devel | lib/ansible/module_utils/exoscale.py | 96 | # -*- coding: utf-8 -*-
# Copyright (c) 2016, René Moser <mail@renemoser.net>
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
import os
from ansible.module_utils.six.moves import configparser
from ansible.module_utils.six import integer_types, string_types
from ansible.module_utils._text import to_native, to_text
from ansible.module_utils.urls import fetch_url
EXO_DNS_BASEURL = "https://api.exoscale.ch/dns/v1"
def exo_dns_argument_spec():
return dict(
api_key=dict(default=os.environ.get('CLOUDSTACK_KEY'), no_log=True),
api_secret=dict(default=os.environ.get('CLOUDSTACK_SECRET'), no_log=True),
api_timeout=dict(type='int', default=os.environ.get('CLOUDSTACK_TIMEOUT') or 10),
api_region=dict(default=os.environ.get('CLOUDSTACK_REGION') or 'cloudstack'),
validate_certs=dict(default=True, type='bool'),
)
def exo_dns_required_together():
return [['api_key', 'api_secret']]
class ExoDns(object):
def __init__(self, module):
self.module = module
self.api_key = self.module.params.get('api_key')
self.api_secret = self.module.params.get('api_secret')
if not (self.api_key and self.api_secret):
try:
region = self.module.params.get('api_region')
config = self.read_config(ini_group=region)
self.api_key = config['key']
self.api_secret = config['secret']
except Exception as e:
self.module.fail_json(msg="Error while processing config: %s" % to_native(e))
self.headers = {
'X-DNS-Token': "%s:%s" % (self.api_key, self.api_secret),
'Content-Type': 'application/json',
'Accept': 'application/json',
}
self.result = {
'changed': False,
'diff': {
'before': {},
'after': {},
}
}
def read_config(self, ini_group=None):
if not ini_group:
ini_group = os.environ.get('CLOUDSTACK_REGION', 'cloudstack')
keys = ['key', 'secret']
env_conf = {}
for key in keys:
if 'CLOUDSTACK_%s' % key.upper() not in os.environ:
break
else:
env_conf[key] = os.environ['CLOUDSTACK_%s' % key.upper()]
else:
return env_conf
# Config file: $PWD/cloudstack.ini or $HOME/.cloudstack.ini
# Last read wins in configparser
paths = (
os.path.join(os.path.expanduser('~'), '.cloudstack.ini'),
os.path.join(os.getcwd(), 'cloudstack.ini'),
)
# Look at CLOUDSTACK_CONFIG first if present
if 'CLOUDSTACK_CONFIG' in os.environ:
paths += (os.path.expanduser(os.environ['CLOUDSTACK_CONFIG']),)
if not any([os.path.exists(c) for c in paths]):
self.module.fail_json(msg="Config file not found. Tried : %s" % ", ".join(paths))
conf = configparser.ConfigParser()
conf.read(paths)
return dict(conf.items(ini_group))
def api_query(self, resource="/domains", method="GET", data=None):
url = EXO_DNS_BASEURL + resource
if data:
data = self.module.jsonify(data)
response, info = fetch_url(
module=self.module,
url=url,
data=data,
method=method,
headers=self.headers,
timeout=self.module.params.get('api_timeout'),
)
if info['status'] not in (200, 201, 204):
self.module.fail_json(msg="%s returned %s, with body: %s" % (url, info['status'], info['msg']))
try:
return self.module.from_json(to_text(response.read()))
except Exception as e:
self.module.fail_json(msg="Could not process response into json: %s" % to_native(e))
def has_changed(self, want_dict, current_dict, only_keys=None):
changed = False
for key, value in want_dict.items():
# Optionally limit by a list of keys
if only_keys and key not in only_keys:
continue
# Skip None values
if value is None:
continue
if key in current_dict:
if isinstance(current_dict[key], integer_types):
if value != current_dict[key]:
self.result['diff']['before'][key] = current_dict[key]
self.result['diff']['after'][key] = value
changed = True
elif isinstance(current_dict[key], string_types):
if value.lower() != current_dict[key].lower():
self.result['diff']['before'][key] = current_dict[key]
self.result['diff']['after'][key] = value
changed = True
else:
self.module.fail_json(msg="Unable to determine comparison for key %s" % key)
else:
self.result['diff']['after'][key] = value
changed = True
return changed
|
kubeflow/pipelines | refs/heads/master | samples/contrib/azure-samples/kfp-azure-databricks/setup.py | 3 | from setuptools import setup
import databricks
setup(
name='kfp-azure-databricks',
version=databricks.__version__,
description='Python package to manage Azure Databricks on Kubeflow Pipelines using Azure Databricks operator for Kubernetes',
url='https://github.com/kubeflow/pipelines/tree/master/samples/contrib/azure-samples/kfp-azure-databricks',
packages=['databricks']
)
|
kesre/slask | refs/heads/master | plugins/image.py | 3 | """!image <search term> return a random result from the google image search result for <search term>"""
from urllib import quote
import re
import requests
from random import shuffle, randint, choice
def image(searchterm, unsafe=True):
searchterm = quote(searchterm)
# There's a chance of pandas today
eggs = ['panda']
if randint(0, 100) < 10:
searchterm = '{} {}'.format(choice(eggs), searchterm)
safe = "&safe=" if unsafe else "&safe=active"
searchurl = "https://www.google.com/search?tbm=isch&q={0}{1}".format(searchterm, safe)
# this is an old iphone user agent. Seems to make google return good results.
useragent = "Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_0 like Mac OS X; en-us) AppleWebKit/532.9 (KHTML, like Gecko) Version/4.0.5 Mobile/8A293 Safari/6531.22.7"
result = requests.get(searchurl, headers={"User-agent": useragent}).text
images = re.findall(r'imgurl.*?(http.*?)\\', result)
shuffle(images)
return images[0] if images else ""
def on_message(msg, server):
text = msg.get("text", "")
match = re.findall(r"!image (.*)", text)
if not match: return
searchterm = match[0]
return image(searchterm)
|
mferenca/HMS-ecommerce | refs/heads/HMSecommerce | ecommerce/extensions/order/tests/test_processing.py | 1 | from oscar.core.loading import get_model
from oscar.test import factories
from ecommerce.extensions.fulfillment.signals import SHIPPING_EVENT_NAME
from ecommerce.extensions.fulfillment.status import LINE
from ecommerce.extensions.order.processing import EventHandler
from ecommerce.tests.testcases import TestCase
ShippingEventType = get_model('order', 'ShippingEventType')
ShippingEvent = get_model('order', 'ShippingEvent')
class EventHandlerTests(TestCase):
def setUp(self):
super(EventHandlerTests, self).setUp()
self.shipping_event_type, __ = ShippingEventType.objects.get_or_create(name=SHIPPING_EVENT_NAME)
self.order = factories.create_order()
def test_create_shipping_event_all_lines_complete(self):
"""
ShippingEvents should only be created if at least one line item in an order has been successfully fulfilled. The
created ShippingEvent should only contain the fulfilled line items. If no line items have been fulfilled, no
ShippingEvent should be created.
"""
order = self.order
self.assertEqual(order.lines.count(), 1)
line = order.lines.first()
line.status = LINE.COMPLETE
line.save()
self.assertEqual(order.shipping_events.count(), 0)
EventHandler().create_shipping_event(order, self.shipping_event_type, order.lines.all(), [1])
shipping_event = order.shipping_events.first()
self.assertEqual(shipping_event.order.id, order.id)
self.assertEqual(shipping_event.lines.count(), 1)
self.assertEqual(shipping_event.lines.first().id, line.id)
def test_create_shipping_event_all_lines_failed(self):
""" If no line items have been fulfilled, no ShippingEvent should be created. """
order = self.order
self.assertEqual(order.lines.count(), 1)
line = order.lines.first()
line.status = LINE.FULFILLMENT_CONFIGURATION_ERROR
line.save()
self.assertEqual(order.shipping_events.count(), 0)
EventHandler().create_shipping_event(order, self.shipping_event_type, order.lines.all(), [1])
self.assertEqual(order.shipping_events.count(), 0,
'No ShippingEvent should have been created for an order with no fulfilled line items.')
def test_create_shipping_event_mixed_line_status(self):
""" The created ShippingEvent should only contain the fulfilled line items. """
# Create a basket with multiple items
basket = factories.create_basket()
product = factories.create_product()
factories.create_stockrecord(product, num_in_stock=2)
basket.add_product(product)
# Create an order from the basket and verify a line item exists for each item in the basket
order = factories.create_order(basket=basket)
self.assertEqual(order.lines.count(), 2)
statuses = (LINE.COMPLETE, LINE.FULFILLMENT_CONFIGURATION_ERROR,)
lines = order.lines.all()
for index, line in enumerate(lines):
line.status = statuses[index]
line.save()
self.assertEqual(order.shipping_events.count(), 0)
EventHandler().create_shipping_event(order, self.shipping_event_type, lines, [1, 1])
# Verify a single shipping event was created and that the event only contains the complete line item
self.assertEqual(order.shipping_events.count(), 1)
shipping_event = order.shipping_events.first()
self.assertEqual(shipping_event.order.id, order.id)
self.assertEqual(shipping_event.lines.count(), 1)
self.assertEqual(shipping_event.lines.first().id, lines[0].id)
# Fulfill all line items and create a new shipping event
lines.update(status=LINE.COMPLETE)
EventHandler().create_shipping_event(order, self.shipping_event_type, lines, [1, 1])
# Verify a second shipping event was created for the newly-fulfilled line item
self.assertEqual(order.shipping_events.count(), 2)
shipping_event = order.shipping_events.all()[0]
self.assertEqual(shipping_event.order.id, order.id)
self.assertEqual(shipping_event.lines.count(), 1)
self.assertEqual(shipping_event.lines.first().id, lines[1].id)
|
SUSE/azure-sdk-for-python | refs/heads/master | azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/models/export_jobs_operation_result_info.py | 4 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .operation_result_info_base import OperationResultInfoBase
class ExportJobsOperationResultInfo(OperationResultInfoBase):
"""This class is used to send blob details after exporting jobs.
:param object_type: Polymorphic Discriminator
:type object_type: str
:param blob_url: URL of the blob into which the serialized string of list
of jobs is exported.
:type blob_url: str
:param blob_sas_key: SAS key to access the blob. It expires in 15 mins.
:type blob_sas_key: str
"""
_validation = {
'object_type': {'required': True},
}
_attribute_map = {
'object_type': {'key': 'objectType', 'type': 'str'},
'blob_url': {'key': 'blobUrl', 'type': 'str'},
'blob_sas_key': {'key': 'blobSasKey', 'type': 'str'},
}
def __init__(self, blob_url=None, blob_sas_key=None):
super(ExportJobsOperationResultInfo, self).__init__()
self.blob_url = blob_url
self.blob_sas_key = blob_sas_key
self.object_type = 'ExportJobsOperationResultInfo'
|
garnaat/placebo | refs/heads/develop | tests/unit/test_utils.py | 1 | # Copyright (c) 2015-2019 Mitch Garnaat
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import os
import shutil
import boto3
try:
import mock
except ImportError:
import unittest.mock as mock
from placebo.utils import placebo_session
class TestUtils(unittest.TestCase):
def setUp(self):
self.environ = {}
self.environ_patch = mock.patch('os.environ', self.environ)
self.environ_patch.start()
credential_path = os.path.join(os.path.dirname(__file__), 'cfg',
'aws_credentials')
self.environ['AWS_SHARED_CREDENTIALS_FILE'] = credential_path
self.environ['PLACEBO_MODE'] = 'record'
self.environ['PLACEBO_DIR'] = 'placebo_test_runs'
self.session = boto3.Session(profile_name='foobar',
region_name='us-west-2')
@placebo_session
def test_decorator(self, session):
# Tear it up..
PLACEBO_TEST_DIR = os.path.join(os.getcwd(), 'placebo_test_runs')
prefix = 'TestUtils.test_decorator'
record_dir = os.path.join(PLACEBO_TEST_DIR, prefix)
self.assertTrue(os.path.exists(record_dir))
# Tear it down..
shutil.rmtree(PLACEBO_TEST_DIR)
|
EliteTK/qutebrowser | refs/heads/master | tests/unit/mainwindow/test_messageview.py | 1 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2016 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
import pytest
from qutebrowser.mainwindow import messageview
from qutebrowser.utils import usertypes
@pytest.fixture
def view(qtbot, config_stub):
config_stub.data = {
'colors': {
'messages.fg.error': 'white',
'messages.bg.error': 'red',
'messages.border.error': '#bb0000',
'messages.fg.warning': 'white',
'messages.bg.warning': 'darkorange',
'messages.border.warning': '#d47300',
'messages.fg.info': 'white',
'messages.bg.info': 'black',
'messages.border.info': '#333333',
},
'fonts': {
'messages.error': '8pt Monospace',
'messages.warning': '8pt Monospace',
'messages.info': '8pt Monospace',
},
'ui': {
'message-timeout': 100,
}
}
mv = messageview.MessageView()
qtbot.add_widget(mv)
return mv
@pytest.mark.parametrize('level', [usertypes.MessageLevel.info,
usertypes.MessageLevel.warning,
usertypes.MessageLevel.error])
def test_single_message(qtbot, view, level):
view.show_message(level, 'test')
qtbot.waitForWindowShown(view)
assert view._messages[0].isVisible()
def test_message_hiding(qtbot, view):
"""Messages should be hidden after the timer times out."""
with qtbot.waitSignal(view._clear_timer.timeout):
view.show_message(usertypes.MessageLevel.info, 'test')
assert not view._messages
def test_size_hint(view):
"""The message height should increase with more messages."""
view.show_message(usertypes.MessageLevel.info, 'test1')
height1 = view.sizeHint().height()
assert height1 > 0
view.show_message(usertypes.MessageLevel.info, 'test2')
height2 = view.sizeHint().height()
assert height2 == height1 * 2
def test_show_message_twice(view):
"""Show the same message twice -> only one should be shown."""
view.show_message(usertypes.MessageLevel.info, 'test')
view.show_message(usertypes.MessageLevel.info, 'test')
assert len(view._messages) == 1
def test_show_message_twice_after_first_disappears(qtbot, view):
"""Show the same message twice after the first is gone."""
with qtbot.waitSignal(view._clear_timer.timeout):
view.show_message(usertypes.MessageLevel.info, 'test')
# Just a sanity check
assert not view._messages
view.show_message(usertypes.MessageLevel.info, 'test')
assert len(view._messages) == 1
def test_changing_timer_with_messages_shown(qtbot, view, config_stub):
"""When we change ui -> message-timeout, the timer should be restarted."""
config_stub['ui']['message-timeout'] = 900000 # 15s
view.show_message(usertypes.MessageLevel.info, 'test')
with qtbot.waitSignal(view._clear_timer.timeout):
config_stub.set('ui', 'message-timeout', 100)
|
LukasSukenik/faunus | refs/heads/master | src/playground/vacha/sc-moviepdb.py | 2 | #!/usr/bin/env python
#this program convert movie data to atom data = for each spherocylinder make residue
#consiting of two atoms at begining and end then in vmd use cpk to draw cylinders
import os
import sys
import math
import optparse
import commands
import string
import random
import usefulmath
def write_pdb(outfilename,box,newdata):
f = open(outfilename, 'w')
outstring=""
for frame in range(len(newdata)):
newline="CRYST1 %8.3f %8.3f %8.3f 90.00 90.00 90.00 P 1 1\n" % (box[frame][0],box[frame][1],box[frame][2])
outstring=outstring+newline
for line in range(len(newdata[frame])):
newline="ATOM %5d N%1d PSC F%4d % 8.3f% 8.3f% 8.3f\n" % (line+1,(line)%2+1 ,(2+line-(line%2))/2,newdata[frame][line][0],newdata[frame][line][1],newdata[frame][line][2])
outstring=outstring+newline
outstring=outstring+"END\n"
f.write(outstring)
f.close()
return 0
def write_psf(outfilename,box,newdata):
f = open(outfilename, 'w')
outstring="PSF\n"
frame0 = newdata[0]
newline="%8d !NATOM\n" % (len(frame0))
outstring=outstring+newline
for line in range(len(frame0)):
newline="%8d N%03d %4d PSC N%1d N%1d \n" % ( line+1,line%2+1, (2+line-(line%2))/2,line%2+1,line%2+1 )
outstring=outstring+newline
newline="%8d !NBOND\n" % (len(frame0)/2)
outstring=outstring+newline
newline=""
for i in range(len(frame0)/2):
newline+=" %7d %7d" % (2*i+1,2*i+2)
if (i%2 ==1):
outstring=outstring+newline+"\n"
newline=""
outstring=outstring+"%-64s"%(newline)
f.write(outstring)
f.close()
return 0
def read_input(infilename):
data=[]
box=[]
frame=[]
inp=open(infilename)
i=0
for line in inp:
linesplit=line.split()
if (len(linesplit)==1):
atomnum = int(linesplit[0])
#print atomnum
i=0
else:
if (len(linesplit)==6):
[sweep,num,boxstr,bx,by,bz]=linesplit[:]
box.append([float(bx),float(by),float(bz)])
else:
[x,y,z,vx,vy,vz,px,py,pz]=linesplit[:]
frame.append([float(x),float(y),float(z),float(vx),float(vy),float(vz),float(px),float(py),float(pz)])
i=i+1
if (i==atomnum):
#print frame
data.append(frame)
frame=[]
return [box,data]
def datatransform(data,leng,patch):
newdata=[]
for frame in data:
newframe=[]
for line in frame:
[x,y,z,vx,vy,vz,px,py,pz]=line[:]
vec=usefulmath.vec_normalize([vx,vy,vz])
newframe.append([x+leng/2*vec[0],y+leng/2*vec[1],z+leng/2*vec[2]])
newframe.append([x-leng/2*vec[0],y-leng/2*vec[1],z-leng/2*vec[2]])
newdata.append(newframe)
return newdata
def write_vmd(outfilename,outfilename2,patch):
f = open("vmd.script", 'w')
outstring=""
outstring+="proc setlook {} {\n"
outstring+="rotate stop\n"
outstring+="color Display Background white\n"
outstring+="display projection orthographic\n"
outstring+="mol delrep 0 0\n"
outstring+="mol selection \"name N1 N2\"\n"
outstring+="mol addrep 0\n"
outstring+="mol selection \"name N3 N4\"\n"
outstring+="mol addrep 0\n"
outstring+="mol modstyle 0 0 CPK 10.0 14 20 20\n"
outstring+="mol modcolor 0 0 ColorID 0\n"
outstring+="mol modmaterial 0 0 Edgy\n"
outstring+="axes location off\n"
outstring+="}\n"
outstring+="mol load psf %s \n" %(outfilename2)
outstring+="mol addfile %s 0\n"%(outfilename)
outstring+="setlook\n"
f.write(outstring)
f.close()
return 0
def make(infilename,outfilename,outfilename2,leng,patch):
[box,data]=read_input(infilename)
newdata=datatransform(data,leng,patch)
write_pdb(outfilename,box,newdata)
write_psf(outfilename2,box,newdata)
write_vmd(outfilename,outfilename2,patch)
return 0
parser=optparse.OptionParser()
help="""Usage:
%prog [options]
"""
parser.set_usage(help)
parser.add_option(
"-i",
"--input",
help="Set file from which you want to load data",
dest="infilename",
default="movie"
)
parser.add_option(
"-o",
"--output",
help="Set to which file you want to save data",
dest="outfilename",
default="movie.pdb"
)
parser.add_option(
"--psf",
help="Set to which file you want to save connectivity - psf",
dest="outfilename2",
default="movie.psf"
)
parser.add_option(
"-l",
"--length",
help="Set length/diameter size of spherocylinder",
dest="leng",
default="40"
)
parser.add_option(
"-p",
"--patch",
help="Set size of patch in degrees",
dest="patch",
default="90"
)
(options,arguments)=parser.parse_args()
make(options.infilename,options.outfilename,options.outfilename2,float(options.leng),float(options.patch))
|
sarvex/django | refs/heads/master | tests/template_tests/filter_tests/test_iriencode.py | 388 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.template.defaultfilters import iriencode, urlencode
from django.test import SimpleTestCase
from django.utils.safestring import mark_safe
from ..utils import setup
class IriencodeTests(SimpleTestCase):
"""
Ensure iriencode keeps safe strings.
"""
@setup({'iriencode01': '{{ url|iriencode }}'})
def test_iriencode01(self):
output = self.engine.render_to_string('iriencode01', {'url': '?test=1&me=2'})
self.assertEqual(output, '?test=1&me=2')
@setup({'iriencode02': '{% autoescape off %}{{ url|iriencode }}{% endautoescape %}'})
def test_iriencode02(self):
output = self.engine.render_to_string('iriencode02', {'url': '?test=1&me=2'})
self.assertEqual(output, '?test=1&me=2')
@setup({'iriencode03': '{{ url|iriencode }}'})
def test_iriencode03(self):
output = self.engine.render_to_string('iriencode03', {'url': mark_safe('?test=1&me=2')})
self.assertEqual(output, '?test=1&me=2')
@setup({'iriencode04': '{% autoescape off %}{{ url|iriencode }}{% endautoescape %}'})
def test_iriencode04(self):
output = self.engine.render_to_string('iriencode04', {'url': mark_safe('?test=1&me=2')})
self.assertEqual(output, '?test=1&me=2')
class FunctionTests(SimpleTestCase):
def test_unicode(self):
self.assertEqual(iriencode('S\xf8r-Tr\xf8ndelag'), 'S%C3%B8r-Tr%C3%B8ndelag')
def test_urlencoded(self):
self.assertEqual(iriencode(urlencode('fran\xe7ois & jill')), 'fran%C3%A7ois%20%26%20jill')
|
UrusTeam/android_ndk_toolchain_cross | refs/heads/master | lib/python2.7/json/decoder.py | 51 | """Implementation of JSONDecoder
"""
import re
import sys
import struct
from json import scanner
try:
from _json import scanstring as c_scanstring
except ImportError:
c_scanstring = None
__all__ = ['JSONDecoder']
FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL
def _floatconstants():
_BYTES = '7FF80000000000007FF0000000000000'.decode('hex')
if sys.byteorder != 'big':
_BYTES = _BYTES[:8][::-1] + _BYTES[8:][::-1]
nan, inf = struct.unpack('dd', _BYTES)
return nan, inf, -inf
NaN, PosInf, NegInf = _floatconstants()
def linecol(doc, pos):
lineno = doc.count('\n', 0, pos) + 1
if lineno == 1:
colno = pos + 1
else:
colno = pos - doc.rindex('\n', 0, pos)
return lineno, colno
def errmsg(msg, doc, pos, end=None):
# Note that this function is called from _json
lineno, colno = linecol(doc, pos)
if end is None:
fmt = '{0}: line {1} column {2} (char {3})'
return fmt.format(msg, lineno, colno, pos)
#fmt = '%s: line %d column %d (char %d)'
#return fmt % (msg, lineno, colno, pos)
endlineno, endcolno = linecol(doc, end)
fmt = '{0}: line {1} column {2} - line {3} column {4} (char {5} - {6})'
return fmt.format(msg, lineno, colno, endlineno, endcolno, pos, end)
#fmt = '%s: line %d column %d - line %d column %d (char %d - %d)'
#return fmt % (msg, lineno, colno, endlineno, endcolno, pos, end)
_CONSTANTS = {
'-Infinity': NegInf,
'Infinity': PosInf,
'NaN': NaN,
}
STRINGCHUNK = re.compile(r'(.*?)(["\\\x00-\x1f])', FLAGS)
BACKSLASH = {
'"': u'"', '\\': u'\\', '/': u'/',
'b': u'\b', 'f': u'\f', 'n': u'\n', 'r': u'\r', 't': u'\t',
}
DEFAULT_ENCODING = "utf-8"
def py_scanstring(s, end, encoding=None, strict=True,
_b=BACKSLASH, _m=STRINGCHUNK.match):
"""Scan the string s for a JSON string. End is the index of the
character in s after the quote that started the JSON string.
Unescapes all valid JSON string escape sequences and raises ValueError
on attempt to decode an invalid string. If strict is False then literal
control characters are allowed in the string.
Returns a tuple of the decoded string and the index of the character in s
after the end quote."""
if encoding is None:
encoding = DEFAULT_ENCODING
chunks = []
_append = chunks.append
begin = end - 1
while 1:
chunk = _m(s, end)
if chunk is None:
raise ValueError(
errmsg("Unterminated string starting at", s, begin))
end = chunk.end()
content, terminator = chunk.groups()
# Content is contains zero or more unescaped string characters
if content:
if not isinstance(content, unicode):
content = unicode(content, encoding)
_append(content)
# Terminator is the end of string, a literal control character,
# or a backslash denoting that an escape sequence follows
if terminator == '"':
break
elif terminator != '\\':
if strict:
#msg = "Invalid control character %r at" % (terminator,)
msg = "Invalid control character {0!r} at".format(terminator)
raise ValueError(errmsg(msg, s, end))
else:
_append(terminator)
continue
try:
esc = s[end]
except IndexError:
raise ValueError(
errmsg("Unterminated string starting at", s, begin))
# If not a unicode escape sequence, must be in the lookup table
if esc != 'u':
try:
char = _b[esc]
except KeyError:
msg = "Invalid \\escape: " + repr(esc)
raise ValueError(errmsg(msg, s, end))
end += 1
else:
# Unicode escape sequence
esc = s[end + 1:end + 5]
next_end = end + 5
if len(esc) != 4:
msg = "Invalid \\uXXXX escape"
raise ValueError(errmsg(msg, s, end))
uni = int(esc, 16)
# Check for surrogate pair on UCS-4 systems
if 0xd800 <= uni <= 0xdbff and sys.maxunicode > 65535:
msg = "Invalid \\uXXXX\\uXXXX surrogate pair"
if not s[end + 5:end + 7] == '\\u':
raise ValueError(errmsg(msg, s, end))
esc2 = s[end + 7:end + 11]
if len(esc2) != 4:
raise ValueError(errmsg(msg, s, end))
uni2 = int(esc2, 16)
uni = 0x10000 + (((uni - 0xd800) << 10) | (uni2 - 0xdc00))
next_end += 6
char = unichr(uni)
end = next_end
# Append the unescaped character
_append(char)
return u''.join(chunks), end
# Use speedup if available
scanstring = c_scanstring or py_scanstring
WHITESPACE = re.compile(r'[ \t\n\r]*', FLAGS)
WHITESPACE_STR = ' \t\n\r'
def JSONObject(s_and_end, encoding, strict, scan_once, object_hook,
object_pairs_hook, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
s, end = s_and_end
pairs = []
pairs_append = pairs.append
# Use a slice to prevent IndexError from being raised, the following
# check will raise a more specific ValueError if the string is empty
nextchar = s[end:end + 1]
# Normally we expect nextchar == '"'
if nextchar != '"':
if nextchar in _ws:
end = _w(s, end).end()
nextchar = s[end:end + 1]
# Trivial empty object
if nextchar == '}':
if object_pairs_hook is not None:
result = object_pairs_hook(pairs)
return result, end + 1
pairs = {}
if object_hook is not None:
pairs = object_hook(pairs)
return pairs, end + 1
elif nextchar != '"':
raise ValueError(errmsg(
"Expecting property name enclosed in double quotes", s, end))
end += 1
while True:
key, end = scanstring(s, end, encoding, strict)
# To skip some function call overhead we optimize the fast paths where
# the JSON key separator is ": " or just ":".
if s[end:end + 1] != ':':
end = _w(s, end).end()
if s[end:end + 1] != ':':
raise ValueError(errmsg("Expecting ':' delimiter", s, end))
end += 1
try:
if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
try:
value, end = scan_once(s, end)
except StopIteration:
raise ValueError(errmsg("Expecting object", s, end))
pairs_append((key, value))
try:
nextchar = s[end]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end]
except IndexError:
nextchar = ''
end += 1
if nextchar == '}':
break
elif nextchar != ',':
raise ValueError(errmsg("Expecting ',' delimiter", s, end - 1))
try:
nextchar = s[end]
if nextchar in _ws:
end += 1
nextchar = s[end]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end]
except IndexError:
nextchar = ''
end += 1
if nextchar != '"':
raise ValueError(errmsg(
"Expecting property name enclosed in double quotes", s, end - 1))
if object_pairs_hook is not None:
result = object_pairs_hook(pairs)
return result, end
pairs = dict(pairs)
if object_hook is not None:
pairs = object_hook(pairs)
return pairs, end
def JSONArray(s_and_end, scan_once, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
s, end = s_and_end
values = []
nextchar = s[end:end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end:end + 1]
# Look-ahead for trivial empty array
if nextchar == ']':
return values, end + 1
_append = values.append
while True:
try:
value, end = scan_once(s, end)
except StopIteration:
raise ValueError(errmsg("Expecting object", s, end))
_append(value)
nextchar = s[end:end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end:end + 1]
end += 1
if nextchar == ']':
break
elif nextchar != ',':
raise ValueError(errmsg("Expecting ',' delimiter", s, end))
try:
if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
return values, end
class JSONDecoder(object):
"""Simple JSON <http://json.org> decoder
Performs the following translations in decoding by default:
+---------------+-------------------+
| JSON | Python |
+===============+===================+
| object | dict |
+---------------+-------------------+
| array | list |
+---------------+-------------------+
| string | unicode |
+---------------+-------------------+
| number (int) | int, long |
+---------------+-------------------+
| number (real) | float |
+---------------+-------------------+
| true | True |
+---------------+-------------------+
| false | False |
+---------------+-------------------+
| null | None |
+---------------+-------------------+
It also understands ``NaN``, ``Infinity``, and ``-Infinity`` as
their corresponding ``float`` values, which is outside the JSON spec.
"""
def __init__(self, encoding=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, strict=True,
object_pairs_hook=None):
"""``encoding`` determines the encoding used to interpret any ``str``
objects decoded by this instance (utf-8 by default). It has no
effect when decoding ``unicode`` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as ``unicode``.
``object_hook``, if specified, will be called with the result
of every JSON object decoded and its return value will be used in
place of the given ``dict``. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
``object_pairs_hook``, if specified will be called with the result of
every JSON object decoded with an ordered list of pairs. The return
value of ``object_pairs_hook`` will be used instead of the ``dict``.
This feature can be used to implement custom decoders that rely on the
order that the key and value pairs are decoded (for example,
collections.OrderedDict will remember the order of insertion). If
``object_hook`` is also defined, the ``object_pairs_hook`` takes
priority.
``parse_float``, if specified, will be called with the string
of every JSON float to be decoded. By default this is equivalent to
float(num_str). This can be used to use another datatype or parser
for JSON floats (e.g. decimal.Decimal).
``parse_int``, if specified, will be called with the string
of every JSON int to be decoded. By default this is equivalent to
int(num_str). This can be used to use another datatype or parser
for JSON integers (e.g. float).
``parse_constant``, if specified, will be called with one of the
following strings: -Infinity, Infinity, NaN.
This can be used to raise an exception if invalid JSON numbers
are encountered.
If ``strict`` is false (true is the default), then control
characters will be allowed inside strings. Control characters in
this context are those with character codes in the 0-31 range,
including ``'\\t'`` (tab), ``'\\n'``, ``'\\r'`` and ``'\\0'``.
"""
self.encoding = encoding
self.object_hook = object_hook
self.object_pairs_hook = object_pairs_hook
self.parse_float = parse_float or float
self.parse_int = parse_int or int
self.parse_constant = parse_constant or _CONSTANTS.__getitem__
self.strict = strict
self.parse_object = JSONObject
self.parse_array = JSONArray
self.parse_string = scanstring
self.scan_once = scanner.make_scanner(self)
def decode(self, s, _w=WHITESPACE.match):
"""Return the Python representation of ``s`` (a ``str`` or ``unicode``
instance containing a JSON document)
"""
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
end = _w(s, end).end()
if end != len(s):
raise ValueError(errmsg("Extra data", s, end, len(s)))
return obj
def raw_decode(self, s, idx=0):
"""Decode a JSON document from ``s`` (a ``str`` or ``unicode``
beginning with a JSON document) and return a 2-tuple of the Python
representation and the index in ``s`` where the document ended.
This can be used to decode a JSON document from a string that may
have extraneous data at the end.
"""
try:
obj, end = self.scan_once(s, idx)
except StopIteration:
raise ValueError("No JSON object could be decoded")
return obj, end
|
peiyuwang/pants | refs/heads/master | src/python/pants/backend/jvm/targets/unpacked_jars.py | 10 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
from pants.backend.jvm.targets.import_jars_mixin import ImportJarsMixin
from pants.base.payload import Payload
from pants.base.payload_field import PrimitiveField
from pants.build_graph.target import Target
logger = logging.getLogger(__name__)
class UnpackedJars(ImportJarsMixin, Target):
"""A set of sources extracted from JAR files.
:API: public
"""
class ExpectedLibrariesError(Exception):
"""Thrown when the target has no libraries defined."""
pass
def __init__(self, payload=None, libraries=None, include_patterns=None, exclude_patterns=None,
**kwargs):
"""
:param libraries: List of addresses of `jar_library <#jar_library>`_
targets which contain .proto definitions.
:param list libraries: addresses of jar_library targets that specify the jars you want to unpack
:param list include_patterns: fileset patterns to include from the archive
:param list exclude_patterns: fileset patterns to exclude from the archive. Exclude patterns
are processed before include_patterns.
"""
payload = payload or Payload()
payload.add_fields({
'library_specs': PrimitiveField(libraries or ()),
'include_patterns' : PrimitiveField(include_patterns or ()),
'exclude_patterns' : PrimitiveField(exclude_patterns or ()),
})
super(UnpackedJars, self).__init__(payload=payload, **kwargs)
self._files = None
if not libraries:
raise self.ExpectedLibrariesError('Expected non-empty libraries attribute for {spec}'
.format(spec=self.address.spec))
@property
def imported_jar_library_specs(self):
"""List of JarLibrary specs to import.
Required to implement the ImportJarsMixin.
"""
return self.payload.library_specs
|
samantp/gensimPy3 | refs/heads/develop | gensim/parsing/porter.py | 1 | #!/usr/bin/env python
"""Porter Stemming Algorithm
This is the Porter stemming algorithm, ported to Python from the
version coded up in ANSI C by the author. It may be be regarded
as canonical, in that it follows the algorithm presented in
Porter, 1980, An algorithm for suffix stripping, Program, Vol. 14,
no. 3, pp 130-137,
only differing from it at the points maked --DEPARTURE-- below.
See also http://www.tartarus.org/~martin/PorterStemmer
The algorithm as described in the paper could be exactly replicated
by adjusting the points of DEPARTURE, but this is barely necessary,
because (a) the points of DEPARTURE are definitely improvements, and
(b) no encoding of the Porter stemmer I have seen is anything like
as exact as this version, even with the points of DEPARTURE!
Vivake Gupta (v@nano.com)
Release 1: January 2001
Further adjustments by Santiago Bruno (bananabruno@gmail.com)
to allow word input not restricted to one word per line, leading
to:
Release 2: July 2008
Optimizations and cleanup of the code by Lars Buitinck, July 2012.
"""
class PorterStemmer(object):
def __init__(self):
"""The main part of the stemming algorithm starts here.
b is a buffer holding a word to be stemmed. The letters are in b[0],
b[1] ... ending at b[k]. k is readjusted downwards as the stemming
progresses.
Note that only lower case sequences are stemmed. Forcing to lower case
should be done before stem(...) is called.
"""
self.b = "" # buffer for word to be stemmed
self.k = 0
self.j = 0 # j is a general offset into the string
def _cons(self, i):
"""True <=> b[i] is a consonant."""
ch = self.b[i]
if ch in "aeiou":
return False
if ch == 'y':
return i == 0 or not self._cons(i - 1)
return True
def _m(self):
"""Returns the number of consonant sequences between 0 and j.
If c is a consonant sequence and v a vowel sequence, and <..>
indicates arbitrary presence,
<c><v> gives 0
<c>vc<v> gives 1
<c>vcvc<v> gives 2
<c>vcvcvc<v> gives 3
....
"""
i = 0
while True:
if i > self.j:
return 0
if not self._cons(i):
break
i += 1
i += 1
n = 0
while True:
while True:
if i > self.j:
return n
if self._cons(i):
break
i += 1
i += 1
n += 1
while 1:
if i > self.j:
return n
if not self._cons(i):
break
i += 1
i += 1
def _vowelinstem(self):
"""True <=> 0,...j contains a vowel"""
return not all(self._cons(i) for i in range(self.j + 1))
def _doublec(self, j):
"""True <=> j,(j-1) contain a double consonant."""
return j > 0 and self.b[j] == self.b[j-1] and self._cons(j)
def _cvc(self, i):
"""True <=> i-2,i-1,i has the form consonant - vowel - consonant
and also if the second c is not w,x or y. This is used when trying to
restore an e at the end of a short word, e.g.
cav(e), lov(e), hop(e), crim(e), but
snow, box, tray.
"""
if i < 2 or not self._cons(i) or self._cons(i-1) or not self._cons(i-2):
return False
return self.b[i] not in "wxy"
def _ends(self, s):
"""True <=> 0,...k ends with the string s."""
if s[-1] != self.b[self.k]: # tiny speed-up
return 0
length = len(s)
if length > (self.k + 1):
return 0
if self.b[self.k-length+1:self.k+1] != s:
return 0
self.j = self.k - length
return 1
def _setto(self, s):
"""Set (j+1),...k to the characters in the string s, adjusting k."""
self.b = self.b[:self.j+1] + s
self.k = len(self.b) - 1
def _r(self, s):
if self._m() > 0:
self._setto(s)
def _step1ab(self):
"""Get rid of plurals and -ed or -ing. E.g.,
caresses -> caress
ponies -> poni
ties -> ti
caress -> caress
cats -> cat
feed -> feed
agreed -> agree
disabled -> disable
matting -> mat
mating -> mate
meeting -> meet
milling -> mill
messing -> mess
meetings -> meet
"""
if self.b[self.k] == 's':
if self._ends("sses"):
self.k -= 2
elif self._ends("ies"):
self._setto("i")
elif self.b[self.k - 1] != 's':
self.k -= 1
if self._ends("eed"):
if self._m() > 0:
self.k -= 1
elif (self._ends("ed") or self._ends("ing")) and self._vowelinstem():
self.k = self.j
if self._ends("at"): self._setto("ate")
elif self._ends("bl"): self._setto("ble")
elif self._ends("iz"): self._setto("ize")
elif self._doublec(self.k):
if self.b[self.k - 1] not in "lsz":
self.k -= 1
elif self._m() == 1 and self._cvc(self.k):
self._setto("e")
def _step1c(self):
"""Turn terminal y to i when there is another vowel in the stem."""
if self._ends("y") and self._vowelinstem():
self.b = self.b[:self.k] + 'i'
def _step2(self):
"""Map double suffices to single ones.
So, -ization ( = -ize plus -ation) maps to -ize etc. Note that the
string before the suffix must give _m() > 0.
"""
ch = self.b[self.k - 1]
if ch == 'a':
if self._ends("ational"): self._r("ate")
elif self._ends("tional"): self._r("tion")
elif ch == 'c':
if self._ends("enci"): self._r("ence")
elif self._ends("anci"): self._r("ance")
elif ch == 'e':
if self._ends("izer"): self._r("ize")
elif ch == 'l':
if self._ends("bli"): self._r("ble") # --DEPARTURE--
# To match the published algorithm, replace this phrase with
# if self._ends("abli"): self._r("able")
elif self._ends("alli"): self._r("al")
elif self._ends("entli"): self._r("ent")
elif self._ends("eli"): self._r("e")
elif self._ends("ousli"): self._r("ous")
elif ch == 'o':
if self._ends("ization"): self._r("ize")
elif self._ends("ation"): self._r("ate")
elif self._ends("ator"): self._r("ate")
elif ch == 's':
if self._ends("alism"): self._r("al")
elif self._ends("iveness"): self._r("ive")
elif self._ends("fulness"): self._r("ful")
elif self._ends("ousness"): self._r("ous")
elif ch == 't':
if self._ends("aliti"): self._r("al")
elif self._ends("iviti"): self._r("ive")
elif self._ends("biliti"): self._r("ble")
elif ch == 'g': # --DEPARTURE--
if self._ends("logi"): self._r("log")
# To match the published algorithm, delete this phrase
def _step3(self):
"""Deal with -ic-, -full, -ness etc. Similar strategy to _step2."""
ch = self.b[self.k]
if ch == 'e':
if self._ends("icate"): self._r("ic")
elif self._ends("ative"): self._r("")
elif self._ends("alize"): self._r("al")
elif ch == 'i':
if self._ends("iciti"): self._r("ic")
elif ch == 'l':
if self._ends("ical"): self._r("ic")
elif self._ends("ful"): self._r("")
elif ch == 's':
if self._ends("ness"): self._r("")
def _step4(self):
"""_step4() takes off -ant, -ence etc., in context <c>vcvc<v>."""
ch = self.b[self.k - 1]
if ch == 'a':
if not self._ends("al"): return
elif ch == 'c':
if not self._ends("ance") and not self._ends("ence"): return
elif ch == 'e':
if not self._ends("er"): return
elif ch == 'i':
if not self._ends("ic"): return
elif ch == 'l':
if not self._ends("able") and not self._ends("ible"): return
elif ch == 'n':
if self._ends("ant"): pass
elif self._ends("ement"): pass
elif self._ends("ment"): pass
elif self._ends("ent"): pass
else: return
elif ch == 'o':
if self._ends("ion") and self.b[self.j] in "st": pass
elif self._ends("ou"): pass
# takes care of -ous
else: return
elif ch == 's':
if not self._ends("ism"): return
elif ch == 't':
if not self._ends("ate") and not self._ends("iti"): return
elif ch == 'u':
if not self._ends("ous"): return
elif ch == 'v':
if not self._ends("ive"): return
elif ch == 'z':
if not self._ends("ize"): return
else:
return
if self._m() > 1:
self.k = self.j
def _step5(self):
"""Remove a final -e if _m() > 1, and change -ll to -l if m() > 1.
"""
k = self.j = self.k
if self.b[k] == 'e':
a = self._m()
if a > 1 or (a == 1 and not self._cvc(k - 1)):
self.k -= 1
if self.b[self.k] == 'l' and self._doublec(self.k) and self._m() > 1:
self.k -= 1
def stem(self, w):
"""Stem the word w, return the stemmed form."""
w = w.lower()
k = len(w) - 1
if k <= 1:
return w # --DEPARTURE--
# With this line, strings of length 1 or 2 don't go through the
# stemming process, although no mention is made of this in the
# published algorithm. Remove the line to match the published
# algorithm.
self.b = w
self.k = k
self._step1ab()
self._step1c()
self._step2()
self._step3()
self._step4()
self._step5()
return self.b[:self.k+1]
def stem_sentence(self, txt):
return " ".join(map(self.stem, txt.split()))
def stem_documents(self, docs):
return list(map(self.stem_sentence, docs))
if __name__ == '__main__':
import sys
p = PorterStemmer()
for f in sys.argv[1:]:
with open(f) as infile:
for line in infile:
print((p.stem_sentence(line)))
|
fhorinek/SkyBean | refs/heads/master | vario/convert/main.py | 1 | #!/usr/bin/python
import sys
import serial
from intelhex import IntelHex
import time
import datetime
import base64
def add8(a, b):
return (a + b & 0xFF)
page_size = 255
class Hex2BinConv():
def __init__(self, out):
self.hex = IntelHex()
self.out = out
def load(self, filename):
print
print "Loading application from hex"
self.hex.loadfile(filename, "hex")
size = self.hex.maxaddr() - self.hex.minaddr()
print " size: %0.2f KiB (%d B)" % (size/1024, size)
def conv(self, label):
done = False
adr = self.hex.minaddr()
max_adr = self.hex.maxaddr()
tmp_file = open("tmp.bin", "wb")
out_file = open(self.out, "w")
print "Converting HEX 2 BIN ...",
while(adr <= max_adr):
tmp_file.write(chr(self.hex[adr]))
adr += 1
tmp_file.close()
tmp_file = open("tmp.bin", "r")
base64.encode(tmp_file, out_file)
out_file.close()
print "Done"
def batch(self, filename, label):
start = time.clock()
self.load(filename)
self.conv(label)
end = time.clock()
print
print "That's all folks! (%.2f seconds)" % (end - start)
if (len(sys.argv) < 3 or len(sys.argv) > 4):
print "Usage %s hex_file output_file [label]" % __file__
sys.exit(-1)
hex = sys.argv[1]
label = ""
if (len(sys.argv) == 4):
label = sys.argv[3]
if (label == "" or label == "auto"):
label = datetime.datetime.now().strftime("%Y%m%d%H%M")
f = open("../src/build_number.txt", "r")
if (f):
number = f.readline() + "_"
f.close()
else:
number = ""
out = sys.argv[2] + "skybean_" + number + label + ".ebin"
a = Hex2BinConv(out)
a.batch(hex, label)
|
adamjmcgrath/glancydesign | refs/heads/master | src/django-nonrel/django/contrib/gis/shortcuts.py | 317 | import cStringIO, zipfile
from django.conf import settings
from django.http import HttpResponse
from django.template import loader
def compress_kml(kml):
"Returns compressed KMZ from the given KML string."
kmz = cStringIO.StringIO()
zf = zipfile.ZipFile(kmz, 'a', zipfile.ZIP_DEFLATED)
zf.writestr('doc.kml', kml.encode(settings.DEFAULT_CHARSET))
zf.close()
kmz.seek(0)
return kmz.read()
def render_to_kml(*args, **kwargs):
"Renders the response as KML (using the correct MIME type)."
return HttpResponse(loader.render_to_string(*args, **kwargs),
mimetype='application/vnd.google-earth.kml+xml')
def render_to_kmz(*args, **kwargs):
"""
Compresses the KML content and returns as KMZ (using the correct
MIME type).
"""
return HttpResponse(compress_kml(loader.render_to_string(*args, **kwargs)),
mimetype='application/vnd.google-earth.kmz')
def render_to_text(*args, **kwargs):
"Renders the response using the MIME type for plain text."
return HttpResponse(loader.render_to_string(*args, **kwargs),
mimetype='text/plain')
|
lordmuffin/aws-cfn-plex | refs/heads/master | functions/credstash/pip/_vendor/html5lib/__init__.py | 336 | """
HTML parsing library based on the WHATWG "HTML5"
specification. The parser is designed to be compatible with existing
HTML found in the wild and implements well-defined error recovery that
is largely compatible with modern desktop web browsers.
Example usage:
import html5lib
f = open("my_document.html")
tree = html5lib.parse(f)
"""
from __future__ import absolute_import, division, unicode_literals
from .html5parser import HTMLParser, parse, parseFragment
from .treebuilders import getTreeBuilder
from .treewalkers import getTreeWalker
from .serializer import serialize
__all__ = ["HTMLParser", "parse", "parseFragment", "getTreeBuilder",
"getTreeWalker", "serialize"]
# this has to be at the top level, see how setup.py parses this
__version__ = "1.0b10"
|
g-k/servo | refs/heads/master | components/script/dom/bindings/codegen/BindingGen.py | 20 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import sys
sys.path.append("./parser/")
sys.path.append("./ply/")
import os
import cPickle
from Configuration import Configuration
from CodegenRust import CGBindingRoot, replaceFileIfChanged
def generate_binding_rs(config, outputprefix, webidlfile):
"""
|config| Is the configuration object.
|outputprefix| is a prefix to use for the header guards and filename.
"""
filename = outputprefix + ".rs"
root = CGBindingRoot(config, outputprefix, webidlfile)
if replaceFileIfChanged(filename, root.define()):
print "Generating binding implementation: %s" % (filename)
def main():
# Parse arguments.
from optparse import OptionParser
usagestring = "usage: %prog configFile outputPrefix webIDLFile"
o = OptionParser(usage=usagestring)
o.add_option("--verbose-errors", action='store_true', default=False,
help="When an error happens, display the Python traceback.")
(options, args) = o.parse_args()
if len(args) != 3:
o.error(usagestring)
configFile = os.path.normpath(args[0])
outputPrefix = args[1]
webIDLFile = os.path.normpath(args[2])
# Load the parsing results
f = open('ParserResults.pkl', 'rb')
parserData = cPickle.load(f)
f.close()
# Create the configuration data.
config = Configuration(configFile, parserData)
# Generate the prototype classes.
generate_binding_rs(config, outputPrefix, webIDLFile)
if __name__ == '__main__':
main()
|
watchdogpolska/feder | refs/heads/master | feder/institutions/migrations/0015_auto_20170830_1408.py | 1 | # Generated by Django 1.11.4 on 2017-08-30 14:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("institutions", "0014_auto_20170822_1403")]
operations = [
migrations.AlterField(
model_name="institution",
name="parents",
field=models.ManyToManyField(
blank=True,
related_name="_institution_parents_+",
to="institutions.Institution",
verbose_name="Parent institutions",
),
)
]
|
kylon/pacman-fakeroot | refs/heads/upstream | test/pacman/tests/fileconflict006.py | 19 | self.description = "dir->symlink change during package upgrade (conflict)"
p1 = pmpkg("pkg1", "1.0-1")
p1.files = ["test/",
"test/file1",
"test/dir/file1",
"test/dir/file2"]
self.addpkg2db("local", p1)
p2 = pmpkg("pkg2")
p2.files = ["test/dir/file3"]
self.addpkg2db("local", p2)
p3 = pmpkg("pkg1", "2.0-1")
p3.files = ["test2/",
"test2/file3",
"test -> test2"]
self.addpkg2db("sync", p3)
self.args = "-S pkg1"
self.addrule("PACMAN_RETCODE=1")
self.addrule("PKG_EXIST=pkg1")
self.addrule("PKG_VERSION=pkg1|1.0-1")
|
r2k0/flask-apps | refs/heads/master | test/app.py | 1 | from flask import Flask
""" learning and experimenting with Flask """
# create an instance of the Flask class and assigned
# it to the varialbe 'app'
app = Flask(__name__)
# decorators to link the function to a url
@app.route("/")
@app.route("/hello")
# dynamic routes
@app.route("/test")
@app.route("/test/<query>")
def search(query):
return query
#Flask converters
# * <value> is treated as unicode
# * <int:value> is treated as an integer
# * <float:value> is treated as a floating point
# * <path/of/some/sort> is treated as a path
@app.route("/integer/<int:value>")
def int_type(value):
print value + 1
return "correct"
@app.route("/float/<float:value>")
def float_type(value):
print value + 1
return "correct"
# dynamic route that accpets slashes
@app.route("/path/<path:value>")
def path_type(value):
print value
return "correct"
# Response Object - response, status, headers
# if not explicitly define these, Flask will automatically assign
# a Status Code of 200 and a header where the Content-Type: "text/html"
#
@app.route("/name/<name>")
def index(name):
return "Hello, {}".format(name), 200
def test():
return "testing Flask!"
# run() start the development server
if __name__ == "__main__":
app.run()
|
teriyakichild/ansible-modules-extras | refs/heads/devel | network/dnsimple.py | 96 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: dnsimple
version_added: "1.6"
short_description: Interface with dnsimple.com (a DNS hosting service).
description:
- "Manages domains and records via the DNSimple API, see the docs: U(http://developer.dnsimple.com/)"
options:
account_email:
description:
- "Account email. If omitted, the env variables DNSIMPLE_EMAIL and DNSIMPLE_API_TOKEN will be looked for. If those aren't found, a C(.dnsimple) file will be looked for, see: U(https://github.com/mikemaccana/dnsimple-python#getting-started)"
required: false
default: null
account_api_token:
description:
- Account API token. See I(account_email) for info.
required: false
default: null
domain:
description:
- Domain to work with. Can be the domain name (e.g. "mydomain.com") or the numeric ID of the domain in DNSimple. If omitted, a list of domains will be returned.
- If domain is present but the domain doesn't exist, it will be created.
required: false
default: null
record:
description:
- Record to add, if blank a record for the domain will be created, supports the wildcard (*)
required: false
default: null
record_ids:
description:
- List of records to ensure they either exist or don't exist
required: false
default: null
type:
description:
- The type of DNS record to create
required: false
choices: [ 'A', 'ALIAS', 'CNAME', 'MX', 'SPF', 'URL', 'TXT', 'NS', 'SRV', 'NAPTR', 'PTR', 'AAAA', 'SSHFP', 'HINFO', 'POOL' ]
default: null
ttl:
description:
- The TTL to give the new record
required: false
default: 3600 (one hour)
value:
description:
- Record value
- "Must be specified when trying to ensure a record exists"
required: false
default: null
priority:
description:
- Record priority
required: false
default: null
state:
description:
- whether the record should exist or not
required: false
choices: [ 'present', 'absent' ]
default: null
solo:
description:
- Whether the record should be the only one for that record type and record name. Only use with state=present on a record
required: false
default: null
requirements: [ dnsimple ]
author: "Alex Coomans (@drcapulet)"
'''
EXAMPLES = '''
# authenticate using email and API token
- local_action: dnsimple account_email=test@example.com account_api_token=dummyapitoken
# fetch all domains
- local_action dnsimple
register: domains
# fetch my.com domain records
- local_action: dnsimple domain=my.com state=present
register: records
# delete a domain
- local_action: dnsimple domain=my.com state=absent
# create a test.my.com A record to point to 127.0.0.01
- local_action: dnsimple domain=my.com record=test type=A value=127.0.0.1
register: record
# and then delete it
- local_action: dnsimple domain=my.com record_ids={{ record['id'] }}
# create a my.com CNAME record to example.com
- local_action: dnsimple domain=my.com record= type=CNAME value=example.com state=present
# change it's ttl
- local_action: dnsimple domain=my.com record= type=CNAME value=example.com ttl=600 state=present
# and delete the record
- local_action: dnsimpledomain=my.com record= type=CNAME value=example.com state=absent
'''
import os
try:
from dnsimple import DNSimple
from dnsimple.dnsimple import DNSimpleException
HAS_DNSIMPLE = True
except ImportError:
HAS_DNSIMPLE = False
def main():
module = AnsibleModule(
argument_spec = dict(
account_email = dict(required=False),
account_api_token = dict(required=False, no_log=True),
domain = dict(required=False),
record = dict(required=False),
record_ids = dict(required=False, type='list'),
type = dict(required=False, choices=['A', 'ALIAS', 'CNAME', 'MX', 'SPF', 'URL', 'TXT', 'NS', 'SRV', 'NAPTR', 'PTR', 'AAAA', 'SSHFP', 'HINFO', 'POOL']),
ttl = dict(required=False, default=3600, type='int'),
value = dict(required=False),
priority = dict(required=False, type='int'),
state = dict(required=False, choices=['present', 'absent']),
solo = dict(required=False, type='bool'),
),
required_together = (
['record', 'value']
),
supports_check_mode = True,
)
if not HAS_DNSIMPLE:
module.fail_json("dnsimple required for this module")
account_email = module.params.get('account_email')
account_api_token = module.params.get('account_api_token')
domain = module.params.get('domain')
record = module.params.get('record')
record_ids = module.params.get('record_ids')
record_type = module.params.get('type')
ttl = module.params.get('ttl')
value = module.params.get('value')
priority = module.params.get('priority')
state = module.params.get('state')
is_solo = module.params.get('solo')
if account_email and account_api_token:
client = DNSimple(email=account_email, api_token=account_api_token)
elif os.environ.get('DNSIMPLE_EMAIL') and os.environ.get('DNSIMPLE_API_TOKEN'):
client = DNSimple(email=os.environ.get('DNSIMPLE_EMAIL'), api_token=os.environ.get('DNSIMPLE_API_TOKEN'))
else:
client = DNSimple()
try:
# Let's figure out what operation we want to do
# No domain, return a list
if not domain:
domains = client.domains()
module.exit_json(changed=False, result=[d['domain'] for d in domains])
# Domain & No record
if domain and record is None and not record_ids:
domains = [d['domain'] for d in client.domains()]
if domain.isdigit():
dr = next((d for d in domains if d['id'] == int(domain)), None)
else:
dr = next((d for d in domains if d['name'] == domain), None)
if state == 'present':
if dr:
module.exit_json(changed=False, result=dr)
else:
if module.check_mode:
module.exit_json(changed=True)
else:
module.exit_json(changed=True, result=client.add_domain(domain)['domain'])
elif state == 'absent':
if dr:
if not module.check_mode:
client.delete(domain)
module.exit_json(changed=True)
else:
module.exit_json(changed=False)
else:
module.fail_json(msg="'%s' is an unknown value for the state argument" % state)
# need the not none check since record could be an empty string
if domain and record is not None:
records = [r['record'] for r in client.records(str(domain))]
if not record_type:
module.fail_json(msg="Missing the record type")
if not value:
module.fail_json(msg="Missing the record value")
rr = next((r for r in records if r['name'] == record and r['record_type'] == record_type and r['content'] == value), None)
if state == 'present':
changed = False
if is_solo:
# delete any records that have the same name and record type
same_type = [r['id'] for r in records if r['name'] == record and r['record_type'] == record_type]
if rr:
same_type = [rid for rid in same_type if rid != rr['id']]
if same_type:
if not module.check_mode:
for rid in same_type:
client.delete_record(str(domain), rid)
changed = True
if rr:
# check if we need to update
if rr['ttl'] != ttl or rr['prio'] != priority:
data = {}
if ttl: data['ttl'] = ttl
if priority: data['prio'] = priority
if module.check_mode:
module.exit_json(changed=True)
else:
module.exit_json(changed=True, result=client.update_record(str(domain), str(rr['id']), data)['record'])
else:
module.exit_json(changed=changed, result=rr)
else:
# create it
data = {
'name': record,
'record_type': record_type,
'content': value,
}
if ttl: data['ttl'] = ttl
if priority: data['prio'] = priority
if module.check_mode:
module.exit_json(changed=True)
else:
module.exit_json(changed=True, result=client.add_record(str(domain), data)['record'])
elif state == 'absent':
if rr:
if not module.check_mode:
client.delete_record(str(domain), rr['id'])
module.exit_json(changed=True)
else:
module.exit_json(changed=False)
else:
module.fail_json(msg="'%s' is an unknown value for the state argument" % state)
# Make sure these record_ids either all exist or none
if domain and record_ids:
current_records = [str(r['record']['id']) for r in client.records(str(domain))]
wanted_records = [str(r) for r in record_ids]
if state == 'present':
difference = list(set(wanted_records) - set(current_records))
if difference:
module.fail_json(msg="Missing the following records: %s" % difference)
else:
module.exit_json(changed=False)
elif state == 'absent':
difference = list(set(wanted_records) & set(current_records))
if difference:
if not module.check_mode:
for rid in difference:
client.delete_record(str(domain), rid)
module.exit_json(changed=True)
else:
module.exit_json(changed=False)
else:
module.fail_json(msg="'%s' is an unknown value for the state argument" % state)
except DNSimpleException, e:
module.fail_json(msg="Unable to contact DNSimple: %s" % e.message)
module.fail_json(msg="Unknown what you wanted me to do")
# import module snippets
from ansible.module_utils.basic import *
main()
|
RO-ny9/python-for-android | refs/heads/master | python3-alpha/python3-src/Lib/re.py | 45 | #
# Secret Labs' Regular Expression Engine
#
# re-compatible interface for the sre matching engine
#
# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved.
#
# This version of the SRE library can be redistributed under CNRI's
# Python 1.6 license. For any other use, please contact Secret Labs
# AB (info@pythonware.com).
#
# Portions of this engine have been developed in cooperation with
# CNRI. Hewlett-Packard provided funding for 1.6 integration and
# other compatibility work.
#
r"""Support for regular expressions (RE).
This module provides regular expression matching operations similar to
those found in Perl. It supports both 8-bit and Unicode strings; both
the pattern and the strings being processed can contain null bytes and
characters outside the US ASCII range.
Regular expressions can contain both special and ordinary characters.
Most ordinary characters, like "A", "a", or "0", are the simplest
regular expressions; they simply match themselves. You can
concatenate ordinary characters, so last matches the string 'last'.
The special characters are:
"." Matches any character except a newline.
"^" Matches the start of the string.
"$" Matches the end of the string or just before the newline at
the end of the string.
"*" Matches 0 or more (greedy) repetitions of the preceding RE.
Greedy means that it will match as many repetitions as possible.
"+" Matches 1 or more (greedy) repetitions of the preceding RE.
"?" Matches 0 or 1 (greedy) of the preceding RE.
*?,+?,?? Non-greedy versions of the previous three special characters.
{m,n} Matches from m to n repetitions of the preceding RE.
{m,n}? Non-greedy version of the above.
"\\" Either escapes special characters or signals a special sequence.
[] Indicates a set of characters.
A "^" as the first character indicates a complementing set.
"|" A|B, creates an RE that will match either A or B.
(...) Matches the RE inside the parentheses.
The contents can be retrieved or matched later in the string.
(?aiLmsux) Set the A, I, L, M, S, U, or X flag for the RE (see below).
(?:...) Non-grouping version of regular parentheses.
(?P<name>...) The substring matched by the group is accessible by name.
(?P=name) Matches the text matched earlier by the group named name.
(?#...) A comment; ignored.
(?=...) Matches if ... matches next, but doesn't consume the string.
(?!...) Matches if ... doesn't match next.
(?<=...) Matches if preceded by ... (must be fixed length).
(?<!...) Matches if not preceded by ... (must be fixed length).
(?(id/name)yes|no) Matches yes pattern if the group with id/name matched,
the (optional) no pattern otherwise.
The special sequences consist of "\\" and a character from the list
below. If the ordinary character is not on the list, then the
resulting RE will match the second character.
\number Matches the contents of the group of the same number.
\A Matches only at the start of the string.
\Z Matches only at the end of the string.
\b Matches the empty string, but only at the start or end of a word.
\B Matches the empty string, but not at the start or end of a word.
\d Matches any decimal digit; equivalent to the set [0-9] in
bytes patterns or string patterns with the ASCII flag.
In string patterns without the ASCII flag, it will match the whole
range of Unicode digits.
\D Matches any non-digit character; equivalent to [^\d].
\s Matches any whitespace character; equivalent to [ \t\n\r\f\v].
\S Matches any non-whitespace character; equiv. to [^ \t\n\r\f\v].
\w Matches any alphanumeric character; equivalent to [a-zA-Z0-9_]
in bytes patterns or string patterns with the ASCII flag.
In string patterns without the ASCII flag, it will match the
range of Unicode alphanumeric characters (letters plus digits
plus underscore).
With LOCALE, it will match the set [0-9_] plus characters defined
as letters for the current locale.
\W Matches the complement of \w.
\\ Matches a literal backslash.
This module exports the following functions:
match Match a regular expression pattern to the beginning of a string.
search Search a string for the presence of a pattern.
sub Substitute occurrences of a pattern found in a string.
subn Same as sub, but also return the number of substitutions made.
split Split a string by the occurrences of a pattern.
findall Find all occurrences of a pattern in a string.
finditer Return an iterator yielding a match object for each match.
compile Compile a pattern into a RegexObject.
purge Clear the regular expression cache.
escape Backslash all non-alphanumerics in a string.
Some of the functions in this module takes flags as optional parameters:
A ASCII For string patterns, make \w, \W, \b, \B, \d, \D
match the corresponding ASCII character categories
(rather than the whole Unicode categories, which is the
default).
For bytes patterns, this flag is the only available
behaviour and needn't be specified.
I IGNORECASE Perform case-insensitive matching.
L LOCALE Make \w, \W, \b, \B, dependent on the current locale.
M MULTILINE "^" matches the beginning of lines (after a newline)
as well as the string.
"$" matches the end of lines (before a newline) as well
as the end of the string.
S DOTALL "." matches any character at all, including the newline.
X VERBOSE Ignore whitespace and comments for nicer looking RE's.
U UNICODE For compatibility only. Ignored for string patterns (it
is the default), and forbidden for bytes patterns.
This module also defines an exception 'error'.
"""
import sys
import sre_compile
import sre_parse
import functools
# public symbols
__all__ = [ "match", "search", "sub", "subn", "split", "findall",
"compile", "purge", "template", "escape", "A", "I", "L", "M", "S", "X",
"U", "ASCII", "IGNORECASE", "LOCALE", "MULTILINE", "DOTALL", "VERBOSE",
"UNICODE", "error" ]
__version__ = "2.2.1"
# flags
A = ASCII = sre_compile.SRE_FLAG_ASCII # assume ascii "locale"
I = IGNORECASE = sre_compile.SRE_FLAG_IGNORECASE # ignore case
L = LOCALE = sre_compile.SRE_FLAG_LOCALE # assume current 8-bit locale
U = UNICODE = sre_compile.SRE_FLAG_UNICODE # assume unicode "locale"
M = MULTILINE = sre_compile.SRE_FLAG_MULTILINE # make anchors look for newline
S = DOTALL = sre_compile.SRE_FLAG_DOTALL # make dot match newline
X = VERBOSE = sre_compile.SRE_FLAG_VERBOSE # ignore whitespace and comments
# sre extensions (experimental, don't rely on these)
T = TEMPLATE = sre_compile.SRE_FLAG_TEMPLATE # disable backtracking
DEBUG = sre_compile.SRE_FLAG_DEBUG # dump pattern after compilation
# sre exception
error = sre_compile.error
# --------------------------------------------------------------------
# public interface
def match(pattern, string, flags=0):
"""Try to apply the pattern at the start of the string, returning
a match object, or None if no match was found."""
return _compile(pattern, flags).match(string)
def search(pattern, string, flags=0):
"""Scan through string looking for a match to the pattern, returning
a match object, or None if no match was found."""
return _compile(pattern, flags).search(string)
def sub(pattern, repl, string, count=0, flags=0):
"""Return the string obtained by replacing the leftmost
non-overlapping occurrences of the pattern in string by the
replacement repl. repl can be either a string or a callable;
if a string, backslash escapes in it are processed. If it is
a callable, it's passed the match object and must return
a replacement string to be used."""
return _compile(pattern, flags).sub(repl, string, count)
def subn(pattern, repl, string, count=0, flags=0):
"""Return a 2-tuple containing (new_string, number).
new_string is the string obtained by replacing the leftmost
non-overlapping occurrences of the pattern in the source
string by the replacement repl. number is the number of
substitutions that were made. repl can be either a string or a
callable; if a string, backslash escapes in it are processed.
If it is a callable, it's passed the match object and must
return a replacement string to be used."""
return _compile(pattern, flags).subn(repl, string, count)
def split(pattern, string, maxsplit=0, flags=0):
"""Split the source string by the occurrences of the pattern,
returning a list containing the resulting substrings."""
return _compile(pattern, flags).split(string, maxsplit)
def findall(pattern, string, flags=0):
"""Return a list of all non-overlapping matches in the string.
If one or more groups are present in the pattern, return a
list of groups; this will be a list of tuples if the pattern
has more than one group.
Empty matches are included in the result."""
return _compile(pattern, flags).findall(string)
if sys.hexversion >= 0x02020000:
__all__.append("finditer")
def finditer(pattern, string, flags=0):
"""Return an iterator over all non-overlapping matches in the
string. For each match, the iterator returns a match object.
Empty matches are included in the result."""
return _compile(pattern, flags).finditer(string)
def compile(pattern, flags=0):
"Compile a regular expression pattern, returning a pattern object."
return _compile(pattern, flags)
def purge():
"Clear the regular expression caches"
_compile_typed.cache_clear()
_compile_repl.cache_clear()
def template(pattern, flags=0):
"Compile a template pattern, returning a pattern object"
return _compile(pattern, flags|T)
_alphanum_str = frozenset(
"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234567890")
_alphanum_bytes = frozenset(
b"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234567890")
def escape(pattern):
"Escape all non-alphanumeric characters in pattern."
if isinstance(pattern, str):
alphanum = _alphanum_str
s = list(pattern)
for i, c in enumerate(pattern):
if c not in alphanum:
if c == "\000":
s[i] = "\\000"
else:
s[i] = "\\" + c
return "".join(s)
else:
alphanum = _alphanum_bytes
s = []
esc = ord(b"\\")
for c in pattern:
if c in alphanum:
s.append(c)
else:
if c == 0:
s.extend(b"\\000")
else:
s.append(esc)
s.append(c)
return bytes(s)
# --------------------------------------------------------------------
# internals
_pattern_type = type(sre_compile.compile("", 0))
def _compile(pattern, flags):
return _compile_typed(type(pattern), pattern, flags)
@functools.lru_cache(maxsize=500)
def _compile_typed(text_bytes_type, pattern, flags):
# internal: compile pattern
if isinstance(pattern, _pattern_type):
if flags:
raise ValueError(
"Cannot process flags argument with a compiled pattern")
return pattern
if not sre_compile.isstring(pattern):
raise TypeError("first argument must be string or compiled pattern")
return sre_compile.compile(pattern, flags)
@functools.lru_cache(maxsize=500)
def _compile_repl(repl, pattern):
# internal: compile replacement pattern
return sre_parse.parse_template(repl, pattern)
def _expand(pattern, match, template):
# internal: match.expand implementation hook
template = sre_parse.parse_template(template, pattern)
return sre_parse.expand_template(template, match)
def _subx(pattern, template):
# internal: pattern.sub/subn implementation helper
template = _compile_repl(template, pattern)
if not template[0] and len(template[1]) == 1:
# literal replacement
return template[1][0]
def filter(match, template=template):
return sre_parse.expand_template(template, match)
return filter
# register myself for pickling
import copyreg
def _pickle(p):
return _compile, (p.pattern, p.flags)
copyreg.pickle(_pattern_type, _pickle, _compile)
# --------------------------------------------------------------------
# experimental stuff (see python-dev discussions for details)
class Scanner:
def __init__(self, lexicon, flags=0):
from sre_constants import BRANCH, SUBPATTERN
self.lexicon = lexicon
# combine phrases into a compound pattern
p = []
s = sre_parse.Pattern()
s.flags = flags
for phrase, action in lexicon:
p.append(sre_parse.SubPattern(s, [
(SUBPATTERN, (len(p)+1, sre_parse.parse(phrase, flags))),
]))
s.groups = len(p)+1
p = sre_parse.SubPattern(s, [(BRANCH, (None, p))])
self.scanner = sre_compile.compile(p)
def scan(self, string):
result = []
append = result.append
match = self.scanner.scanner(string).match
i = 0
while 1:
m = match()
if not m:
break
j = m.end()
if i == j:
break
action = self.lexicon[m.lastindex-1][1]
if hasattr(action, "__call__"):
self.match = m
action = action(self, m.group())
if action is not None:
append(action)
i = j
return result, string[i:]
|
UAlbanyArchives/ua395 | refs/heads/master | ua395.py | 1 | # coding: utf-8
import requests
import json
from lxml import etree as ET
import time
import urllib
import os
import uuid
import shortuuid
import hashlib
import simplejson
import shutil
import traceback
import datetime
from PIL import Image
import sys
from subprocess import Popen, PIPE
import smtplib
startTime = time.time()
startTimeReadable = str(time.strftime("%Y-%m-%d %H:%M:%S"))
print startTimeReadable
print(sys.getfilesystemencoding())
#start log
startLog = open("log.txt", "a")
logText = "\n****************************************************************************************************************\n"
logText = logText + "Crawl started " + startTimeReadable
startLog.write(logText)
startLog.close()
#from http://stackoverflow.com/questions/14996453/python-libraries-to-calculate-human-readable-filesize-from-bytes
suffixes = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
def humansize(nbytes):
if nbytes == 0: return '0 B'
i = 0
while nbytes >= 1024 and i < len(suffixes)-1:
nbytes /= 1024.
i += 1
f = ('%.2f' % nbytes).rstrip('0').rstrip('.')
return '%s %s' % (f, suffixes[i])
if os.name == "nt":
basePath = "\\Processing\\ua395"
stagingPath = "\\Processing\\ua395\\stagingUA395"
hashdir = "\\LINCOLN\\Masters\\Special Collections\\accessions\\hashDir\\ua395Hash"
else:
basePath = "/home/bcadmin/Desktop/Processing/ua395"
stagingPath = "/media/bcadmin/SPE/Electronic_Records_Library/ua395/fromSmugMug"
hashDir = "/media/bcadmin/Lincoln/Special Collections/accessions/hashDir/ua395Hash"
try:
def readField(JSON, parent, fieldString):
try:
newElement = ET.SubElement(parent, fieldString.lower())
newElement.text = JSON[fieldString].replace(u"\u2018", "'").replace(u"\u2019", "'").strip()
except:
print "Could not read " + fieldString + " from " + JSON["Uri"]
def md5(fname):
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
keyFile = blockedFile = open("key.txt", "r")
line1 = keyFile.readline().split("\n")[0]
keyString = line1.split("Key: ")[1].strip()
keyFile.close()
#url = "http://www.smugmug.com/api/v2/user/ualbanyphotos"
#url = "http://www.smugmug.com/api/v2/user/ualbanyphotos?APIKey=keyString"
#url = "http://www.smugmug.com/api/v2/user/ualbanyphotos!albums?start=1&count=99999999?APIKey=keyString"
url = "http://www.smugmug.com/api/v2/user/ualbanyphotos!albums?start=1&count=99999&APIKey=keyString"
parser = ET.XMLParser(remove_blank_text=True)
headers = {
'Accept': 'application/json',
}
r = requests.get(url, headers=headers)
print r.status_code
#print r.json()
blockedCount = 0
passwordCount = 0
passwordText = ""
blockedText = ""
if os.path.isfile(os.path.join(basePath, "albums.xml")):
albumsInput = ET.parse(os.path.join(basePath, "albums.xml"), parser)
albumsXML = albumsInput.getroot()
else:
print "Error: existing Album records not found."
finalTime = time.time() - startTime
print "Total Time: " + str(finalTime) + " seconds, " + str(finalTime/60) + " minutes, " + str(finalTime/3600) + " hours"
errorLog = open("errorLog.txt", "a")
errorText = "***********************************************************************************\n" + str(time.strftime("%Y-%m-%d %H:%M:%S")) + "\n" + str(finalTime) + " seconds\n" + str(finalTime/60) + " minutes\n" + str(finalTime/3600) + " hours" + "\nTraceback:\n" + "Error: existing Album records not found."
errorLog.write(errorText)
errorLog.close()
albumsXML = ET.Element("albums")
for album in r.json()["Response"]["Album"]:
if album["ResponseLevel"] != "Public":
passwordCount = passwordCount + 1
passwordText = passwordText + "\n" + album["WebUri"]# + " (" + album["Name"] + ")"
elif album["AllowDownloads"] != True:
blockedCount = blockedCount + 1
blockedText = blockedText + "\n" + album["WebUri"]# + " (" + album["Name"] + ")"
else:
#no permissions issues
#check if album has already been downloaded and all content is the same
try:
albumURI = albumsXML.xpath("//uri[text()='" + album["Uri"] + "']")[0]
albumXML = albumURI.getparent()
if albumXML.find("urlpath").text == album["UrlPath"] and albumXML.find("name").text == album["Name"] and albumXML.find("nicename").text == album["NiceName"] and albumXML.find("imagesURI").text == album["Uris"]["AlbumImages"]["Uri"]:
#updated album record
ImagesLastUpdatedMatch = 0
DescriptionMatch = 0
KeywordsMatch = 0
for albumField in albumXML:
if albumField.tag == "imageslastupdated" and albumField.text == album["ImagesLastUpdated"]:
ImagesLastUpdatedMatch = ImagesLastUpdatedMatch + 1
if albumField.tag == "description" and albumField.text == album["Description"]:
DescriptionMatch = DescriptionMatch + 1
if albumField.tag == "keywords" and albumField.text == album["Keywords"]:
KeywordsMatch = KeywordsMatch + 1
if ImagesLastUpdatedMatch == 0:
readField(album, albumXML, "ImagesLastUpdated")
if DescriptionMatch == 0:
readField(album, albumXML, "Description")
if KeywordsMatch == 0:
readField(album, albumXML, "Keywords")
else:
#conflict with old record
conflictXML = ET.SubElement(albumXML, "conflict")
conflictXML.set("conflictDiscovered",str(time.time()))
newAlbumXML = ET.SubElement(conflictXML, "album")
readField(album, newAlbumXML, "Uri")
readField(album, newAlbumXML, "UrlPath")
readField(album, newAlbumXML, "Name")
readField(album, newAlbumXML, "NiceName")
readField(album, newAlbumXML, "ImagesLastUpdated")
readField(album, newAlbumXML, "Description")
if album["Title"] != album["Name"]:
print "Title is different for " + album["Name"]
readField(album, newAlbumXML, "Title")
readField(album, newAlbumXML, "Keywords")
imagesURI = ET.SubElement(newAlbumXML, "imagesURI")
imagesURI.text = album["Uris"]["AlbumImages"]["Uri"]
except:
albumXML = ET.SubElement(albumsXML, "album")
readField(album, albumXML, "Uri")
readField(album, albumXML, "UrlPath")
readField(album, albumXML, "Name")
readField(album, albumXML, "NiceName")
readField(album, albumXML, "ImagesLastUpdated")
readField(album, albumXML, "Description")
if album["Title"] != album["Name"]:
print "Title is different for " + album["Name"]
readField(album, albumXML, "Title")
readField(album, albumXML, "Keywords")
imagesURI = ET.SubElement(albumXML, "imagesURI")
imagesURI.text = album["Uris"]["AlbumImages"]["Uri"]
passwordFile = open("password.txt", "w")
passwordFile.write(passwordText)
passwordFile.close()
blockedFile = open("blocked.txt", "w")
blockedFile.write(blockedText)
blockedFile.close()
print str(passwordCount) + " albums require a password"
print str(blockedCount) + " albums cannot be completely downloaded"
albumString = ET.tostring(albumsXML, pretty_print=True, xml_declaration=True, encoding="utf-8")
albumFile = open("albums.xml", "w")
albumFile.write(albumString)
albumFile.close()
shutil.copy(os.path.join(hashDir,"hashIndex.json"), os.path.join(basePath, "hashIndexWorking.json"))
with open(os.path.join(basePath, 'hashIndexWorking.json'), 'r') as fp:
hashIndex = simplejson.loads(fp.read())
if os.path.isfile(os.path.join(basePath, "images.xml")):
#imageInput = ET.parse(os.path.join(basePath, "images.xml"), parser)
imagesXML = ET.Element("albums")
else:
imagesXML = ET.Element("albums")
runningFileSize = 0
for folder in albumsXML:
try:
folderXML = imagesXML.xpath("//album[@uri='" + folder.find("uri").text + "']")[0]
except:
folderXML = ET.SubElement(imagesXML, "album")
folderXML.set("name", folder.find("name").text)
folderXML.set("uri", folder.find("uri").text)
#url for api request for each album to list images
keyFile = blockedFile = open("key.txt", "r")
line1 = keyFile.readline().split("\n")[0]
keyString = line1.split("Key: ")[1].strip()
keyFile.close()
url = "http://www.smugmug.com" + folder.find("imagesURI").text + "?APIKey=" + keyString
r = requests.get(url, headers=headers)
#print status code if not successful
if str(r.status_code) != "200":
print r.status_code
for image in r.json()["Response"]["AlbumImage"]:
#check against json hash index
try:
metaHash = image["ArchivedMD5"]
if len(metaHash) < 1:
raise ValueError("no hash in API data")
except:
metaHash = "'''''"
if metaHash in hashIndex.values():
#image already downloaded
pass
else:
imageXML = ET.SubElement(folderXML, "image")
readField(image, imageXML, "Uri")
readField(image, imageXML, "FileName")
readField(image, imageXML, "Date")
readField(image, imageXML, "WebUri")
readField(image, imageXML, "LastUpdated")
readField(image, imageXML, "ArchivedMD5")
readField(image, imageXML, "ThumbnailUrl")
readField(image, imageXML, "Caption")
readField(image, imageXML, "Keywords")
imageURL = ET.SubElement(imageXML, "ArchivedUri".lower())
try:
imageURL.text = image["ArchivedUri"].replace(u"\u2018", "'").replace(u"\u2019", "'").strip()
except:
imageURL.text = image["WebUri"] + "/0/O/" + image["FileName"].replace(" ", "%20")
try:
runningFileSize = runningFileSize + int(image["ArchivedSize"])
except:
pass
imageString = ET.tostring(imagesXML, pretty_print=True, xml_declaration=True, encoding="utf-8")
imagesFile = open("images.xml", "w")
imagesFile.write(imageString)
imagesFile.close()
#for debugging
imagesFile2 = open("imagesTest.xml", "w")
imagesFile2.write(imageString)
imagesFile2.close()
metaTime = time.time() - startTime
print "Total File Size: " + str(runningFileSize)
print "Total Time to get metadata: " + str(metaTime)
input = ET.parse(os.path.join(basePath, "images.xml"), parser)
imagesXML = input.getroot()
if not os.path.isdir(os.path.join(stagingPath, "ualbanyphotos")):
os.makedirs(os.path.join(stagingPath, "ualbanyphotos"))
for group in imagesXML:
if group.find("image") is None:
pass
else:
#print "examining " + group.attrib["uri"]
for file in group:
with open(os.path.join(basePath,'hashIndexWorking.json'), 'r') as fp:
hashIndex = simplejson.loads(fp.read())
if file.find("archivedmd5").text is None:
metaHash = "'''''"
else:
metaHash = file.find("archivedmd5").text
if metaHash in hashIndex.values():
#print "hash found for " + makeFile
#print "removing " + file.find("uri").text + " from " + group.attrib["uri"]
group.remove(file)
else:
#print "downloading " + file.find("uri").text
filename = file.find("filename").text
path = file.find("weburi").text
path = path.split("//")[1]
path = path.replace("-/", "/")
path = os.path.dirname(path)
path = path.replace("www.ualbanyphotos.com/", "").replace("photos.smugmug.com/", "")
#path = path.replace("-", " ")
#Only for Windows paths:
if os.name =="nt":
path = path.replace("/", "\\")
destination = os.path.join(os.path.join(stagingPath, "ualbanyphotos"), path)
#print destination
if not os.path.isdir(destination):
os.makedirs(destination)
makeFile = os.path.join(destination, filename)
href = file.find("archiveduri").text
thumb = file.find("thumbnailurl").text
#print href
#download file
print "downloading " + file.find("filename").text
try:
urllib.urlretrieve(href, makeFile)
except:
try:
print "failed first attempt to retrieve " + file.find("uri").text
with open(makeFile, 'wb') as handle:
response = requests.get(href, stream=True)
for block in response.iter_content(1024):
handle.write(block)
except:
try:
time.sleep(15)
if os.path.isfile(makeFile):
os.remove(makeFile)
print "failed second attempt to retrieve " + file.find("uri").text
href = href.replace("https://", "http://")
with open(makeFile, 'wb') as handle:
response = requests.get(href, stream=True)
for block in response.iter_content(1024):
handle.write(block)
except:
try:
time.sleep(300)
if os.path.isfile(makeFile):
os.remove(makeFile)
print "failed third attempt to retrieve " + file.find("uri").text
href = href.replace("https://", "http://")
with open(makeFile, 'wb') as handle:
response = requests.get(href, stream=True)
for block in response.iter_content(1024):
handle.write(block)
except:
print "failed final attempt to retrieve " + file.find("uri").text
print "tried to download image from " + href
exceptMsg = str(traceback.format_exc())
finalTime = time.time() - startTime
print "Total Time: " + str(finalTime) + " seconds, " + str(finalTime/60) + " minutes, " + str(finalTime/3600) + " hours"
print exceptMsg
exceptMsg = exceptMsg + "\ntried to download image " + file.find("uri").text + " from " + href
errorLog = open("errorLog.txt", "a")
errorText = "***********************************************************************************\n" + str(time.strftime("%Y-%m-%d %H:%M:%S")) + "\n" + str(finalTime) + " seconds\n" + str(finalTime/60) + " minutes\n" + str(finalTime/3600) + " hours" + "\nTraceback:\n" + exceptMsg
errorLog.write(errorText)
errorLog.close()
thumbDir = os.path.join(destination, "thumbs")
if not os.path.isdir(thumbDir):
os.makedirs(thumbDir)
thumbName = os.path.basename(thumb)
thumbFile = os.path.join(thumbDir, thumbName)
try:
urllib.urlretrieve(thumb, thumbFile)
except:
try:
print "failed first attempt to retrieve thumbnail for " + file.find("uri").text
with open(thumbFile, 'wb') as handle:
response = requests.get(thumb, stream=True)
for block in response.iter_content(1024):
handle.write(block)
except:
try:
time.sleep(15)
if os.path.isfile(thumbFile):
os.remove(thumbFile)
print "failed second attempt to retrieve thumbnail for " + file.find("uri").text
thumb = thumb.replace("https://", "http://")
with open(thumbFile, 'wb') as handle:
response = requests.get(thumb, stream=True)
for block in response.iter_content(1024):
handle.write(block)
except:
try:
time.sleep(300)
if os.path.isfile(thumbFile):
os.remove(thumbFile)
print "failed third attempt to retrieve thumbnail for " + file.find("uri").text
thumb = thumb.replace("https://", "http://")
with open(thumbFile, 'wb') as handle:
response = requests.get(thumb, stream=True)
for block in response.iter_content(1024):
handle.write(block)
except:
print "failed final attempt to retrieve thumbnail for " + file.find("uri").text
print "tried to download thumbnail from " + thumb
exceptMsg = str(traceback.format_exc())
finalTime = time.time() - startTime
print "Total Time: " + str(finalTime) + " seconds, " + str(finalTime/60) + " minutes, " + str(finalTime/3600) + " hours"
print exceptMsg
exceptMsg = exceptMsg + "\ntried to download thumbnail for " + file.find("uri").text + " from " + thumb
errorLog = open("errorLog.txt", "a")
errorText = "***********************************************************************************\n" + str(time.strftime("%Y-%m-%d %H:%M:%S")) + "\n" + str(finalTime) + " seconds\n" + str(finalTime/60) + " minutes\n" + str(finalTime/3600) + " hours" + "\nTraceback:\n" + exceptMsg
errorLog.write(errorText)
errorLog.close()
if file.find("downloadTime") is None:
downloadXML = ET.SubElement(file, "downloadTime")
downloadXML.set("type", "posix")
downloadXML.text = str(time.time())
else:
file.find("downloadTime").set("type", "posix")
file.find("downloadTime").text = str(time.time())
fileHash = str(md5(makeFile))
if metaHash == "'''''":
if fileHash in hashIndex.values():
#issue here
#print "hash found for " + makeFile
#print "removing " + str(file.find("uri").text) + " from " + str(group.attrib["uri"])
group.remove(file)
os.remove(thumbFile)
os.remove(makeFile)
else:
print makeFile + " is new"
hashIndex.update({file.find("uri").text: fileHash})
else:
if file.find("archivedmd5").text == fileHash:
hashXML = ET.SubElement(file, "hash")
hashXML.set("type", "md5")
hashXML.text = "success"
else:
hashXML = ET.SubElement(file, "hash")
hashXML.set("type", "md5")
hashXML.text = "failed"
hashIndex[file.find("uri").text] = fileHash
with open(os.path.join(basePath,'hashIndexWorking.json'), 'w') as fp:
simplejson.dump(hashIndex, fp)
#count new albums and remove empty albums from images.xml
newAlbumCount = 0
for albumElement in imagesXML:
#print "looking at " + str(albumElement.attrib["uri"])
#print "count is " + str(len(albumElement.findall("image")))
if len(albumElement.findall("image")) == 0:
imagesXML.remove(albumElement)
else:
newAlbumCount = newAlbumCount + 1
print str(newAlbumCount) + " new albums found"
imageString = ET.tostring(imagesXML, pretty_print=True, xml_declaration=True, encoding="utf-8")
imagesFile = open("images.xml", "w")
imagesFile.write(imageString)
imagesFile.close()
#count files and data
fileCount = 0
totalSize = 0
for root, dirs, files in os.walk(os.path.join(stagingPath, "ualbanyphotos")):
fileCount += len(files)
for f in files:
fp = os.path.join(root, f)
totalSize += os.path.getsize(fp)
readableSize = humansize(totalSize)
#remove empty directories
for root, dirs, files in os.walk(os.path.join(stagingPath, "ualbanyphotos"), topdown=False):
for folder in dirs:
if len(os.listdir(os.path.join(root, folder))) == 0:
os.rmdir(os.path.join(root, folder))
for root, dirs, files in os.walk(os.path.join(stagingPath, "ualbanyphotos"), topdown=True):
for folder in reversed(dirs):
if len(os.listdir(os.path.join(root, folder))) == 0:
os.rmdir(os.path.join(root, folder))
#log albums and images files for crawl
startTimeFilename = startTimeReadable.replace(":", "-").replace(" ", "_")
shutil.copy2("images.xml", os.path.join(basePath, "arrangement"))
#print os.path.join(basePath, "arrangement", "images.xml")
os.rename(os.path.join(basePath, "arrangement", "images.xml"), os.path.join(basePath, "arrangement", startTimeFilename + "images.xml"))
shutil.copy2("albums.xml", os.path.join(basePath, "arrangement"))
os.rename(os.path.join(basePath, "arrangement", "albums.xml"), os.path.join(basePath, "arrangement", startTimeFilename + "albums.xml"))
#make SIP metadata file
if newAlbumCount > 0:
collectionID = "ua395"
accessionNumber = collectionID + "-" + str(shortuuid.uuid())
sipRoot = ET.Element("accession")
sipRoot.set("version", "0.1")
sipRoot.set("number", accessionNumber)
submitTime = time.time()
submitTimeReadable = str(time.strftime("%Y-%m-%d %H:%M:%S"))
sipRoot.set("submitted", submitTimeReadable)
sipRoot.set("submittedPosix", str(submitTime))
#create profile
profileXML = ET.SubElement(sipRoot, "profile")
notesXML = ET.SubElement(profileXML, "notes")
notesXML.text = ""
creatorXML = ET.SubElement(profileXML, "creator")
creatorXML.text = "Digital Media Unit"
creatorIdXML = ET.SubElement(profileXML, "creatorId")
creatorIdXML.text = collectionID
donorXML = ET.SubElement(profileXML, "donor")
donorXML.text = "Mark Schmidt"
roleXML = ET.SubElement(profileXML, "role")
roleXML.text = "Campus Photographer"
emailXML = ET.SubElement(profileXML, "email")
emailXML.text = "pmiller2@albany.edu"
officeXML = ET.SubElement(profileXML, "office")
officeXML.text = "University Hall 202"
address1XML = ET.SubElement(profileXML, "address1")
address1XML.text = "1400 Washington Ave"
address2XML = ET.SubElement(profileXML, "address2")
address2XML.text = "Albany, NY 12222"
address3XML = ET.SubElement(profileXML, "address3")
address3XML.text = ""
methodXML = ET.SubElement(profileXML, "method")
methodXML.text = "Crawled from Smug Mug API using ua395.py (https://github.com/UAlbanyArchives/ua395/blob/master/ua395.py)"
locationXML = ET.SubElement(profileXML, "location")
locationXML.text = basePath
extentXML = ET.SubElement(profileXML, "extent")
extentXML.set("unit", "bytes")
extentXML.text = str(totalSize)
extentXML.set("humanReadable", str(readableSize))
inputImages = ET.parse(os.path.join(basePath, "images.xml"), parser)
imagesXML = inputImages.getroot()
inputAlbums = ET.parse(os.path.join(basePath, "albums.xml"), parser)
albumsXML = inputAlbums.getroot()
def makeRecord(path):
if os.path.isdir(path):
record = ET.Element("folder")
else:
record = ET.Element("file")
try:
record.set("name", os.path.basename(path))
except:
print str(traceback.format_exc())
metadataString = ET.tostring(sipRoot, pretty_print=True, xml_declaration=True, encoding="utf-8")
metadataFile = open(os.path.join(stagingPath, accessionNumber + ".xml"), "w")
metadataFile.write(metadataString)
metadataFile.close()
idXML = ET.SubElement(record, "id")
idXML.text = str(uuid.uuid4())
pathXML = ET.SubElement(record, "path")
descriptionXML = ET.SubElement(record, "description")
accessXML = ET.SubElement(record, "access")
curatorialEventsXML = ET.SubElement(record, "curatorialEvents")
recordEventsXML = ET.SubElement(record, "recordEvents")
return record
#loop thorugh directory and create records
def loopAccession(path, root):
if os.path.isdir(path):
record = makeRecord(path.decode(sys.getfilesystemencoding()))
root.append(record)
for item in os.listdir(path):
root = loopAccession(os.path.join(path, item), record)
else:
root.append(makeRecord(path))
return sipRoot
sipRoot = loopAccession(os.path.join(stagingPath, "ualbanyphotos"), sipRoot)
#for debugging
"""
metadataString = ET.tostring(sipRoot, pretty_print=True, xml_declaration=True, encoding="utf-8")
metadataFile = open(os.path.join(stagingPath, accessionNumber + ".xml"), "w")
metadataFile.write(metadataString)
metadataFile.close()
"""
for album in imagesXML:
albumUri = album.attrib["uri"]
for albumListing in albumsXML:
if albumListing.find("uri").text == albumUri:
albumRecord = albumListing
albumPath = albumRecord.find("urlpath").text
if albumPath.startswith("/"):
albumPath = albumPath[1:]
query = "/"
for level in albumPath.split("/"):
query = query + "/folder[@name='" + level + "']"
albumNode = sipRoot.xpath(query)[0]
albumNode.find("path").text = albumPath
unittitle = ET.Element("unittitle")
unittitle.text = albumRecord.find("name").text
scopecontent = ET.Element("scopecontent")
scopecontent.text = albumRecord.find("description").text
controlaccess = ET.Element("controlaccess")
controlaccess.text = albumRecord.find("keywords").text
albumNode.find("description").append(unittitle)
albumNode.find("description").append(scopecontent)
albumNode.find("description").append(controlaccess)
timestamp = ET.Element("timestamp")
timestamp.text = albumRecord.find("imageslastupdated").text
timestamp.set("timeType", "iso8601")
timestamp.set("parser", "SmugMug")
albumNode.find("recordEvents").append(timestamp)
for image in album:
weburi = image.find("weburi").text.split("http://www.ualbanyphotos.com/")[1]
imagePath = os.path.dirname(weburi)
if imagePath.startswith("/"):
imagePath = imagePath[1:]
query = "/"
for level in imagePath.split("/"):
query = query + "/folder[@name='" + level + "']"
query = query + "/file[@name='" + image.find("filename").text + "']"
imageNode = sipRoot.xpath(query)[0]
imageNode.find("path").text = imagePath + "/" + image.find("filename").text
unittitle = ET.Element("unittitle")
unittitle.text = image.find("caption").text
controlaccess = ET.Element("controlaccess")
controlaccess.text = image.find("keywords").text
imageNode.find("description").append(unittitle)
imageNode.find("description").append(controlaccess)
event1 = ET.SubElement(imageNode.find("curatorialEvents"), "event")
event1.text = "downloaded from SmugMug API"
if image.find("downloadTime") is None:
pass
else:
downloadTime = image.find("downloadTime").text.split(".")[0]
event1.set("timestamp", downloadTime)
event1.set("humanTime", datetime.datetime.fromtimestamp(int(downloadTime)).strftime('%Y-%m-%d %H:%M:%S'))
if image.find("hash").text.lower() == "success":
event2 = ET.SubElement(imageNode.find("curatorialEvents"), "event")
event2.text = "MD5 hash matched SmugMug hash"
event2.set("timestamp", downloadTime)
event2.set("humanTime", datetime.datetime.fromtimestamp(int(downloadTime)).strftime('%Y-%m-%d %H:%M:%S'))
timestamp = ET.Element("timestamp")
timestamp.text = image.find("date").text
timestamp.set("timeType", "iso8601")
timestamp.set("parser", "SmugMug")
imageNode.find("recordEvents").append(timestamp)
#exif date
try:
imageFile = os.path.join(stagingPath, "ualbanyphotos", imagePath, image.find("filename").text)
exifDate = Image.open(imageFile)._getexif()[36867]
timestamp = ET.Element("timestamp")
timestamp.text = exifDate.replace(" ", "T")
timestamp.set("timeType", "iso8601")
timestamp.set("parser", "PIL")
timestamp.set("source", "exif")
timestamp.set("label", "DateTimeOriginal")
imageNode.find("recordEvents").append(timestamp)
except:
exceptMsg = str(traceback.format_exc())
print exceptMsg
metadataString = ET.tostring(sipRoot, pretty_print=True, xml_declaration=True, encoding="utf-8")
metadataFile = open(os.path.join(stagingPath, accessionNumber + ".xml"), "w")
metadataFile.write(metadataString)
metadataFile.close()
#createSIP.py
print "bagging SIP"
sipCmd = "sudo python /home/bcadmin/Projects/createSIP/createSIP.py -m \"" + os.path.join(stagingPath, accessionNumber + ".xml") + "\" \"" + os.path.join(stagingPath, "ualbanyphotos") + "\""
print sipCmd
createSIP = Popen(sipCmd, shell=True, stdout=PIPE, stderr=PIPE)
stdout, stderr = createSIP.communicate()
if len(stderr) > 0:
print stderr
if len(stdout) > 0:
print stderr
else:
os.rmdir(os.path.join(stagingPath, "ualbanyphotos"))
if os.path.isfile(os.path.join(hashDir,"hashIndex.json")):
os.remove(os.path.join(hashDir,"hashIndex.json"))
shutil.copy(os.path.join(basePath,"hashIndexWorking.json"), os.path.join(hashDir,"hashIndex.json"))
os.remove(os.path.join(basePath,"hashIndexWorking.json"))
#update log.txt
finalTime = time.time() - startTime
print "Total Time: " + str(finalTime) + " seconds, " + str(finalTime/60) + " minutes, " + str(finalTime/3600) + " hours"
finalTimeFile = open("log.txt", "a")
logText = "\nSuccessful Crawl ran " + str(time.strftime("%Y-%m-%d %H:%M:%S"))
logText = logText + "\nProcess took " + str(finalTime) + " seconds or " + str(finalTime/60) + " minutes or " + str(finalTime/3600) + " hours"
logText = logText + "\n" + str(newAlbumCount) + " new albums found"
logText = logText + "\n" + str(fileCount) + " files downloaded"
logText = logText + "\n" + str(totalSize) + " bytes or " + str(readableSize) + " downloaded"
finalTimeFile.write(logText)
finalTimeFile.close()
sender = 'UAlbanyArchivesNotify@gmail.com'
receivers = ['gwiedeman@albany.edu']
subject = "SmugMug Crawler Success"
body = logText
message = 'Subject: %s\n\n%s' % (subject, body)
smtpObj = smtplib.SMTP(host='smtp.gmail.com', port=587)
smtpObj.ehlo()
smtpObj.starttls()
smtpObj.ehlo()
keyFile = open("key.txt", "r")
lines = keyFile.readlines()
emailPW = lines[2]
keyFile.close()
smtpObj.login('UAlbanyArchivesNotify', emailPW)
smtpObj.sendmail(sender, receivers, message)
smtpObj.quit()
except:
exceptMsg = str(traceback.format_exc())
updateLog = open("log.txt", "a")
logText = "\nCrawl failed at " + str(time.strftime("%Y-%m-%d %H:%M:%S"))
updateLog.write(logText)
updateLog.close()
finalTime = time.time() - startTime
print "Total Time: " + str(finalTime) + " seconds, " + str(finalTime/60) + " minutes, " + str(finalTime/3600) + " hours"
print exceptMsg
errorLog = open("errorLog.txt", "a")
errorText = "***********************************************************************************\n" + str(time.strftime("%Y-%m-%d %H:%M:%S")) + "\n" + str(finalTime) + " seconds\n" + str(finalTime/60) + " minutes\n" + str(finalTime/3600) + " hours" + "\nTraceback:\n" + exceptMsg
errorLog.write(errorText)
errorLog.close()
#serialize working data for debugging
if hashIndex is None:
pass
else:
with open('hashIndexWorking.json', 'w') as fp:
simplejson.dump(hashIndex, fp)
if albumsXML is None:
pass
else:
albumString = ET.tostring(albumsXML, pretty_print=True, xml_declaration=True, encoding="utf-8")
albumFile = open("albums.xml", "w")
albumFile.write(albumString)
albumFile.close()
if imageXML is None:
pass
else:
imageString = ET.tostring(imagesXML, pretty_print=True, xml_declaration=True, encoding="utf-8")
imagesFile = open("images.xml", "w")
imagesFile.write(imageString)
imagesFile.close()
sender = 'UAlbanyArchivesNotify@gmail.com'
receivers = ['gwiedeman@albany.edu']
subject = "SmugMug Crawler Error"
body = "ERROR: " + logText + "\n\n" + exceptMsg
message = 'Subject: %s\n\n%s' % (subject, body)
smtpObj = smtplib.SMTP(host='smtp.gmail.com', port=587)
smtpObj.ehlo()
smtpObj.starttls()
smtpObj.ehlo()
keyFile = open("key.txt", "r")
lines = keyFile.readlines()
emailPW = lines[2]
keyFile.close()
smtpObj.login('UAlbanyArchivesNotify', emailPW)
smtpObj.sendmail(sender, receivers, message)
smtpObj.quit()
#needs:
#Remove empty folders
#create metadata file for SIP
|
ic-hep/DIRAC | refs/heads/rel-v6r15 | Core/scripts/dirac-start-mysql.py | 5 | #!/usr/bin/env python
########################################################################
# File : dirac-start-mysql
# Author : Ricardo Graciani
########################################################################
"""
Start DIRAC MySQL server
"""
__RCSID__ = "$Id$"
#
from DIRAC.Core.Base import Script
Script.disableCS()
Script.setUsageMessage( '\n'.join( [ __doc__.split( '\n' )[1],
'Usage:',
' %s [option|cfgfile] ...' % Script.scriptName,
] ) )
Script.parseCommandLine()
#
from DIRAC.FrameworkSystem.Client.ComponentInstaller import gComponentInstaller
#
gComponentInstaller.exitOnError = True
#
print gComponentInstaller.startMySQL()['Value'][1]
|
wojciechtanski/robotframework | refs/heads/master | src/robot/parsing/restsupport.py | 27 | # Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot.errors import DataError
try:
from docutils.core import publish_doctree, publish_from_doctree
from docutils.parsers.rst.directives import register_directive
from docutils.parsers.rst.directives.body import CodeBlock
except ImportError:
raise DataError("Using reStructuredText test data requires having "
"'docutils' module version 0.9 or newer installed.")
class CaptureRobotData(CodeBlock):
def run(self):
if 'robotframework' in self.arguments:
store = RobotDataStorage(self.state_machine.document)
store.add_data(self.content)
return []
register_directive('code', CaptureRobotData)
register_directive('code-block', CaptureRobotData)
register_directive('sourcecode', CaptureRobotData)
class RobotDataStorage(object):
def __init__(self, doctree):
if not hasattr(doctree, '_robot_data'):
doctree._robot_data = []
self._robot_data = doctree._robot_data
def add_data(self, rows):
self._robot_data.extend(rows)
def get_data(self):
return '\n'.join(self._robot_data)
def has_data(self):
return bool(self._robot_data)
|
harshilasu/GraphicMelon | refs/heads/master | y/google-cloud-sdk/platform/gsutil/third_party/boto/tests/integration/rds/__init__.py | 645 | # Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All Rights Reserved
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
|
szaghi/MaTiSSe | refs/heads/master | release/MaTiSSe-0.3.0/matisse/utils/mdx_mathjax.py | 6 | """md_mathjax is a simple extension of the Python implementation of Markdown
author: Man YUAN
homepage: https://github.com/epsilony/md_mathjax
"""
from markdown.util import AtomicString
from markdown.util import etree
from markdown.inlinepatterns import Pattern
from markdown import Extension
class MathJaxPattern(Pattern):
groups = 2, 3, 4
start_end = None
def __init__ (self, start_end=None, groups=None):
if start_end is not None:
self.start_end = start_end
if groups is not None:
self.groups = groups
pattern = r'(?<!\\)(%s)(.+?)(?<!\\)(%s)' % (self.start_end)
Pattern.__init__(self, pattern)
def handleMatch(self, m):
node = etree.Element(None)
text = ''
for group in self.groups:
text += m.group(group)
node.text = AtomicString(text)
return node
class MathJaxInlinePattern(MathJaxPattern):
start_end = r'\\\(', r'\\\)'
class BraketPattern(MathJaxPattern):
start_end = r'\\\[', r'\\\]'
class DoubleDollarPattern(MathJaxPattern):
start_end = r'\$\$', r'\$\$'
class BeginEndPattern(MathJaxPattern):
start_end = r'\\begin\{(.+?)\}', r'\\end\{\3\}'
groups = 2, 4, 5
class MathJaxExtension(Extension):
def extendMarkdown(self, md, md_globals):
md.inlinePatterns.add('mathjax_invironment', BeginEndPattern(), '<escape')
md.inlinePatterns.add('mathjax_bracket', BraketPattern(), '<escape')
md.inlinePatterns.add('mathjax_double_dollar', DoubleDollarPattern(), '<escape')
md.inlinePatterns.add('mathjax_inline', MathJaxInlinePattern(), '<escape')
def makeExtension(configs=None):
return MathJaxExtension(configs)
|
gablg1/PerfKitBenchmarker | refs/heads/master | perfkitbenchmarker/deployment/config/config_exceptions.py | 10 | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Exceptions raised by the deployment helper library."""
class ConfigError(Exception):
"""Top level exception for all Deploy errors."""
pass
class DeployEmptyConfigError(ConfigError):
"""Either config file not found, or empty config file."""
pass
class BadStaticReferenceError(ConfigError):
"""Could not resolve the static reference at parse time."""
pass
class InvalidPdError(ConfigError):
"""The PD name or specs are invalid in the configuration file."""
pass
class InvalidVmNameError(ConfigError):
"""The node names are not valid VM names."""
pass
class NoClusterSectionInConfigError(ConfigError):
"""No cluster section was specified in the configuration file."""
pass
class NoClusterTypeInConfigError(ConfigError):
"""No cluster type was specified in the configuration file."""
pass
class NoSetupModulesInConfigError(ConfigError):
"""No setup module list was specified in the configuration file."""
pass
class NoProjectInConfigError(ConfigError):
"""No project specified in the configuration file."""
pass
class NoZoneInConfigError(ConfigError):
"""No zone specified in the configuration file."""
pass
class NoAdminUserInConfigError(ConfigError):
"""No administrator user specified in the configuration file."""
pass
class NoNetworkSectionInConfigError(ConfigError):
"""No network section was specified in the configuration file."""
pass
class NoNetworkNameInConfigError(ConfigError):
"""No network name specified in the configuration file."""
pass
class NoTcpPortsInConfigError(ConfigError):
"""No TCP ports specified in the network section of the configuration file."""
pass
class NoUdpPortsInConfigError(ConfigError):
"""No UDP ports specified in the network section of the configuration file."""
pass
class InvalidNetworkPortInConfigError(ConfigError):
"""Not a single node definition was found on configuration file."""
pass
class NoNodeTypesInConfigError(ConfigError):
"""Not a single node definition was found on configuration file."""
pass
|
mne-tools/mne-tools.github.io | refs/heads/main | 0.11/_downloads/plot_ssp_projs_sensitivity_map.py | 18 | """
==================================
Sensitivity map of SSP projections
==================================
This example shows the sources that have a forward field
similar to the first SSP vector correcting for ECG.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
from mne import read_forward_solution, read_proj, sensitivity_map
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
subjects_dir = data_path + '/subjects'
fname = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
ecg_fname = data_path + '/MEG/sample/sample_audvis_ecg_proj.fif'
fwd = read_forward_solution(fname, surf_ori=True)
projs = read_proj(ecg_fname)
projs = projs[3:][::2] # take only one projection per channel type
# Compute sensitivity map
ssp_ecg_map = sensitivity_map(fwd, ch_type='grad', projs=projs, mode='angle')
###############################################################################
# Show sensitivity map
plt.hist(ssp_ecg_map.data.ravel())
plt.show()
args = dict(clim=dict(kind='value', lims=(0.2, 0.6, 1.)), smoothing_steps=7,
hemi='rh', subjects_dir=subjects_dir)
ssp_ecg_map.plot(subject='sample', time_label='ECG SSP sensitivity', **args)
|
stefanklug/mapnik | refs/heads/master | scons/scons-local-2.3.6/SCons/Tool/hpc++.py | 4 | """SCons.Tool.hpc++
Tool-specific initialization for c++ on HP/UX.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001 - 2015 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/hpc++.py rel_2.3.5:3347:d31d5a4e74b6 2015/07/31 14:36:10 bdbaddog"
import os.path
import SCons.Util
cplusplus = __import__('c++', globals(), locals(), [])
acc = None
# search for the acc compiler and linker front end
try:
dirs = os.listdir('/opt')
except (IOError, OSError):
# Not being able to read the directory because it doesn't exist
# (IOError) or isn't readable (OSError) is okay.
dirs = []
for dir in dirs:
cc = '/opt/' + dir + '/bin/aCC'
if os.path.exists(cc):
acc = cc
break
def generate(env):
"""Add Builders and construction variables for g++ to an Environment."""
cplusplus.generate(env)
if acc:
env['CXX'] = acc or 'aCC'
env['SHCXXFLAGS'] = SCons.Util.CLVar('$CXXFLAGS +Z')
# determine version of aCC
line = os.popen(acc + ' -V 2>&1').readline().rstrip()
if line.find('aCC: HP ANSI C++') == 0:
env['CXXVERSION'] = line.split()[-1]
if env['PLATFORM'] == 'cygwin':
env['SHCXXFLAGS'] = SCons.Util.CLVar('$CXXFLAGS')
else:
env['SHCXXFLAGS'] = SCons.Util.CLVar('$CXXFLAGS +Z')
def exists(env):
return acc
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
vitalogy/wiringX | refs/heads/master | python/examples/tmp102.py | 8 | #!/usr/bin/env python
# Copyright (c) 2015 Paul Adams <paul@thoughtcriminal.co.uk>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# This example shows how to read temperature from the TMP102 I2C device
# with WiringX in Python. It assumes that the sensor is configured at
# the default I2C address of 0x48
from time import sleep
from wiringX import gpio
# setup wiringX
gpio.setup()
# get a handle to the sensor, using the default I2C address
fd = gpio.I2CSetup(0x48)
while True:
# read from the default register
data = gpio.I2CReadReg16(fd, 0x00)
reg = []
# calculate the temperature
reg.append((data>>8)&0xff)
reg.append(data&0xff)
res = (reg[1] << 4) | (reg[0] >> 4)
res = res * 0.0625
# print the result
print(u'Temperature: ' + str(res) + u' C')
sleep(1)
|
todotobe1/apprtc | refs/heads/master | build/run_python_tests.py | 22 | #!/usr/bin/python
import os
import optparse
import sys
import unittest
USAGE = """%prog sdk_path test_path webtest_path
Run unit tests for App Engine apps.
sdk_path Path to the SDK installation.
test_path Path to package containing test modules.
webtest_path Path to the webtest library."""
def _WebTestIsInstalled():
try:
import webtest
return True
except ImportError:
print 'You need to install webtest dependencies before you can proceed '
print 'running the tests. To do this you need to get easy_install since '
print 'that is how webtest provisions its dependencies.'
print 'See https://pythonhosted.org/setuptools/easy_install.html.'
print 'Then:'
print 'cd webtest-master'
print 'python setup.py install'
print '(Prefix with sudo / run in admin shell as necessary).'
return False
def main(sdk_path, test_path, webtest_path):
if not os.path.exists(sdk_path):
return 'Missing %s: try grunt shell:getPythonTestDeps.' % sdk_path
if not os.path.exists(test_path):
return 'Missing %s: try grunt build.' % test_path
if not os.path.exists(webtest_path):
return 'Missing %s: try grunt shell:getPythonTestDeps.' % webtest_path
sys.path.insert(0, sdk_path)
import dev_appserver
dev_appserver.fix_sys_path()
sys.path.append(webtest_path)
if not _WebTestIsInstalled():
return 1
suite = unittest.loader.TestLoader().discover(test_path,
pattern="*test.py")
ok = unittest.TextTestRunner(verbosity=2).run(suite).wasSuccessful()
return 0 if ok else 1
if __name__ == '__main__':
parser = optparse.OptionParser(USAGE)
options, args = parser.parse_args()
if len(args) != 3:
parser.error('Error: Exactly 3 arguments required.')
sdk_path, test_path, webtest_path = args[0:3]
sys.exit(main(sdk_path, test_path, webtest_path))
|
Achuth17/scikit-bio | refs/heads/master | skbio/stats/tests/test_power.py | 12 | # ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from unittest import TestCase, main
import numpy as np
import numpy.testing as npt
import pandas as pd
from scipy.stats import kruskal
from skbio.stats.power import (subsample_power,
subsample_paired_power,
_check_nans,
confidence_bound,
_calculate_power,
_compare_distributions,
_calculate_power_curve,
_check_subsample_power_inputs,
_identify_sample_groups,
_draw_paired_samples,
_get_min_size,
bootstrap_power_curve,
paired_subsamples
)
class PowerAnalysisTest(TestCase):
def setUp(self):
# Defines a testing functions
def test_meta(ids, meta, cat, div):
"""Checks thhe div metric with a kruskal wallis"""
out = [meta.loc[id_, div] for id_ in ids]
return kruskal(*out)[1]
def meta_f(x):
"""Applies `test_meta` to a result"""
return test_meta(x, self.meta, 'INT', 'DIV')
def f(x):
"""returns the p value of a kruskal wallis test"""
return kruskal(*x)[1]
self.test_meta = test_meta
self.f = f
self.meta_f = meta_f
self.num_p = 1
# Sets the random seed
np.random.seed(5)
# Sets up the distributions of data for use
self.s1 = np.arange(0, 10, 1)
# Sets up two distributions which will never be equal by a rank-sum
# test.
self.samps = [np.ones((10))/10., np.ones((10))]
self.pop = [np.arange(0, 10, 0.1), np.arange(0, 20, 0.2)]
# Sets up a vector of alpha values
self.alpha = np.power(10, np.array([-1, -1.301, -2, -3])).round(3)
# Sets up a vector of samples
self.num_samps = np.arange(10, 100, 10)
# Sets up a mapping file
meta = {'GW': {'INT': 'N', 'ABX': np.nan, 'DIV': 19.5, 'AGE': '30s',
'SEX': 'M'},
'CB': {'INT': 'Y', 'ABX': np.nan, 'DIV': 42.7, 'AGE': '30s',
'SEX': 'M'},
'WM': {'INT': 'N', 'ABX': 'N', 'DIV': 27.5, 'AGE': '20s',
'SEX': 'F'},
'MH': {'INT': 'Y', 'ABX': 'N', 'DIV': 62.3, 'AGE': '30s',
'SEX': 'F'},
'CD': {'INT': 'Y', 'ABX': 'Y', 'DIV': 36.4, 'AGE': '40s',
'SEX': 'F'},
'LF': {'INT': 'Y', 'ABX': 'N', 'DIV': 50.2, 'AGE': '20s',
'SEX': 'M'},
'PP': {'INT': 'N', 'ABX': 'Y', 'DIV': 10.8, 'AGE': '30s',
'SEX': 'F'},
'MM': {'INT': 'N', 'ABX': 'N', 'DIV': 55.6, 'AGE': '40s',
'SEX': 'F'},
'SR': {'INT': 'N', 'ABX': 'Y', 'DIV': 2.2, 'AGE': '20s',
'SEX': 'M'},
'TS': {'INT': 'N', 'ABX': 'Y', 'DIV': 16.1, 'AGE': '40s',
'SEX': 'M'},
'PC': {'INT': 'Y', 'ABX': 'N', 'DIV': 82.6, 'AGE': '40s',
'SEX': 'M'},
'NR': {'INT': 'Y', 'ABX': 'Y', 'DIV': 15.7, 'AGE': '20s',
'SEX': 'F'}}
self.meta = pd.DataFrame.from_dict(meta, orient='index')
self.meta_pairs = {0: [['GW', 'SR', 'TS'], ['CB', 'LF', 'PC']],
1: [['MM', 'PP', 'WM'], ['CD', 'MH', 'NR']]}
self.pair_index = np.array([0, 0, 0, 1, 1, 1])
self.counts = np.array([5, 15, 25, 35, 45])
self.powers = [np.array([[0.105, 0.137, 0.174, 0.208, 0.280],
[0.115, 0.135, 0.196, 0.204, 0.281],
[0.096, 0.170, 0.165, 0.232, 0.256],
[0.122, 0.157, 0.202, 0.250, 0.279],
[0.132, 0.135, 0.173, 0.203, 0.279]]),
np.array([[0.157, 0.345, 0.522, 0.639, 0.739],
[0.159, 0.374, 0.519, 0.646, 0.757],
[0.161, 0.339, 0.532, 0.634, 0.745],
[0.169, 0.372, 0.541, 0.646, 0.762],
[0.163, 0.371, 0.522, 0.648, 0.746]]),
np.array([[0.276, 0.626, 0.865, 0.927, 0.992],
[0.267, 0.667, 0.848, 0.937, 0.978],
[0.236, 0.642, 0.850, 0.935, 0.977],
[0.249, 0.633, 0.828, 0.955, 0.986],
[0.249, 0.663, 0.869, 0.951, 0.985]])]
self.power_alpha = 0.1
self.effects = np.array([0.15245, 0.34877, 0.55830])
self.bounds = np.array([0.01049, 0.00299, 0.007492])
self.labels = np.array(['Age', 'Intervenption', 'Antibiotics'])
self.cats = np.array(['AGE', 'INT', 'ABX'])
self.cat = "AGE"
self.control_cats = ['INT', 'ABX']
def test_subsample_power_defaults(self):
test_p, test_c = subsample_power(self.f, self.pop,
num_iter=10, num_runs=5)
self.assertEqual(test_p.shape, (5, 4))
npt.assert_array_equal(np.array([10, 20, 30, 40]), test_c)
def test_subsample_power_counts(self):
test_p, test_c = subsample_power(self.f,
samples=self.pop,
num_iter=10,
num_runs=2,
min_counts=5)
self.assertEqual(test_p.shape, (2, 5))
npt.assert_array_equal(np.arange(5, 50, 10), test_c)
def test_subsample_power_matches(self):
test_p, test_c = subsample_power(self.f,
samples=self.pop,
num_iter=10,
num_runs=5,
draw_mode="matched")
self.assertEqual(test_p.shape, (5, 4))
npt.assert_array_equal(np.array([10, 20, 30, 40]), test_c)
def test_subsample_power_multi_p(self):
test_p, test_c = subsample_power(lambda x: np.array([0.5, 0.5]),
samples=self.pop,
num_iter=10,
num_runs=5)
self.assertEqual(test_p.shape, (5, 4, 2))
npt.assert_array_equal(np.array([10, 20, 30, 40]), test_c)
def test_subsample_paired_power(self):
known_c = np.array([1, 2, 3, 4])
# Sets up the handling values
cat = 'INT'
control_cats = ['SEX']
# Tests for the control cats
test_p, test_c = subsample_paired_power(self.meta_f,
meta=self.meta,
cat=cat,
control_cats=control_cats,
counts_interval=1,
num_iter=10,
num_runs=2)
# Test the output shapes are sane
self.assertEqual(test_p.shape, (2, 4))
npt.assert_array_equal(known_c, test_c)
def test_subsample_paired_power_multi_p(self):
def f(x):
return np.array([0.5, 0.5, 0.005])
cat = 'INT'
control_cats = ['SEX']
# Tests for the control cats
test_p, test_c = subsample_paired_power(f,
meta=self.meta,
cat=cat,
control_cats=control_cats,
counts_interval=1,
num_iter=10,
num_runs=2)
self.assertEqual(test_p.shape, (2, 4, 3))
def test_check_nans_str(self):
self.assertTrue(_check_nans('string'))
def test_check_nans_num(self):
self.assertTrue(_check_nans(4.2))
def test__check_nans_nan(self):
self.assertFalse(_check_nans(np.nan))
def test__check_nans_clean_list(self):
self.assertTrue(_check_nans(['foo', 'bar'], switch=True))
def test__check_nans_list_nan(self):
self.assertFalse(_check_nans(['foo', np.nan], switch=True))
def test__check_str_error(self):
with self.assertRaises(TypeError):
_check_nans(self.f)
def test__get_min_size_strict(self):
known = 5
test = _get_min_size(self.meta, 'INT', ['ABX', 'SEX'], ['Y', 'N'],
True)
self.assertEqual(test, known)
def test__get_min_size_relaxed(self):
known = 5
test = _get_min_size(self.meta, 'INT', ['ABX', 'SEX'], ['Y', 'N'],
False)
self.assertEqual(known, test)
def test_confidence_bound_default(self):
# Sets the know confidence bound
known = 2.2830070
test = confidence_bound(self.s1)
npt.assert_almost_equal(test, known, 3)
def test_confidence_bound_df(self):
known = 2.15109
test = confidence_bound(self.s1, df=15)
npt.assert_almost_equal(known, test, 3)
def test_confidence_bound_alpha(self):
known = 3.2797886
test = confidence_bound(self.s1, alpha=0.01)
npt.assert_almost_equal(known, test, 3)
def test_confidence_bound_nan(self):
# Sets the value to test
samples = np.array([[4, 3.2, 3.05],
[2, 2.8, 2.95],
[5, 2.9, 3.07],
[1, 3.1, 2.93],
[3, np.nan, 3.00]])
# Sets the know value
known = np.array([2.2284, 0.2573, 0.08573])
# Tests the function
test = confidence_bound(samples, axis=0)
npt.assert_almost_equal(known, test, 3)
def test_confidence_bound_axis_none(self):
# Sets the value to test
samples = np.array([[4, 3.2, 3.05],
[2, 2.8, 2.95],
[5, 2.9, 3.07],
[1, 3.1, 2.93],
[3, np.nan, 3.00]])
# Sest the known value
known = 0.52852
# Tests the output
test = confidence_bound(samples, axis=None)
npt.assert_almost_equal(known, test, 3)
def test__calculate_power(self):
# Sets up the values to test
crit = 0.025
# Sets the known value
known = 0.5
# Calculates the test value
test = _calculate_power(self.alpha, crit)
# Checks the test value
npt.assert_almost_equal(known, test)
def test__calculate_power_n(self):
crit = 0.025
known = np.array([0.5, 0.5])
alpha = np.vstack((self.alpha, self.alpha))
test = _calculate_power(alpha, crit)
npt.assert_almost_equal(known, test)
def test__compare_distributions_sample_counts_error(self):
with self.assertRaises(ValueError):
_compare_distributions(self.f, [self.pop[0][:5], self.pop[1]], 1,
counts=25)
def test__compare_distributions_all_mode(self):
known = np.ones((100))*0.0026998
test = _compare_distributions(self.f, self.samps, 1, num_iter=100)
npt.assert_allclose(known, test, 5)
def test__compare_distributions_matched_mode(self):
# Sets the known value
known_mean = 0.162195
known_std = 0.121887
known_shape = (100,)
# Tests the sample value
test = _compare_distributions(self.f, self.pop, self.num_p,
mode='matched', num_iter=100)
npt.assert_allclose(known_mean, test.mean(), rtol=0.1, atol=0.02)
npt.assert_allclose(known_std, test.std(), rtol=0.1, atol=0.02)
self.assertEqual(known_shape, test.shape)
def test__compare_distributions_draw_mode(self):
draw_mode = 'Ultron'
with self.assertRaises(ValueError):
_check_subsample_power_inputs(self.f, self.pop, draw_mode,
self.num_p)
def test__compare_distributions_multiple_returns(self):
known = np.array([[1, 1, 1], [2, 2, 2], [3, 3, 3]])
def f(x):
return np.array([1, 2, 3])
test = _compare_distributions(f, self.pop, 3, mode='matched',
num_iter=3)
npt.assert_array_equal(known, test)
def test_check_subsample_power_inputs_matched_mode(self):
with self.assertRaises(ValueError):
_check_subsample_power_inputs(self.f,
samples=[np.ones((2)), np.ones((5))],
draw_mode="matched")
def test_check_subsample_power_inputs_counts(self):
with self.assertRaises(ValueError):
_check_subsample_power_inputs(self.f,
samples=[np.ones((3)), np.ones((5))],
min_counts=5,
counts_interval=1000,
max_counts=7)
def test_check_subsample_power_inputs_ratio(self):
with self.assertRaises(ValueError):
_check_subsample_power_inputs(self.f,
self.samps,
ratio=np.array([1, 2, 3]))
def test_check_subsample_power_inputs_test(self):
# Defines a test function
def test(x):
return 'Hello World!'
with self.assertRaises(TypeError):
_check_subsample_power_inputs(test, self.samps)
def test_check_sample_power_inputs(self):
# Defines the know returns
known_num_p = 1
known_ratio = np.ones((2))
known_counts = np.arange(2, 10, 2)
# Runs the code for the returns
test_ratio, test_num_p, test_counts = \
_check_subsample_power_inputs(self.f,
self.samps,
counts_interval=2,
max_counts=10)
# Checks the returns are sane
self.assertEqual(known_num_p, test_num_p)
npt.assert_array_equal(known_ratio, test_ratio)
npt.assert_array_equal(known_counts, test_counts)
def test__calculate_power_curve_ratio_error(self):
with self.assertRaises(ValueError):
_calculate_power_curve(self.f, self.pop, self.num_samps,
ratio=np.array([0.1, 0.2, 0.3]),
num_iter=100)
def test__calculate_power_curve_default(self):
# Sets the known output
known = np.array([0.509, 0.822, 0.962, 0.997, 1.000, 1.000, 1.000,
1.000, 1.000])
# Generates the test values
test = _calculate_power_curve(self.f,
self.pop,
self.num_samps,
num_iter=100)
# Checks the samples returned sanely
npt.assert_allclose(test, known, rtol=0.1, atol=0.01)
def test__calculate_power_curve_alpha(self):
# Sets the know output
known = np.array([0.31, 0.568, 0.842, 0.954, 0.995, 1.000, 1.000,
1.000, 1.000])
# Generates the test values
test = _calculate_power_curve(self.f,
self.pop,
self.num_samps,
alpha=0.01,
num_iter=100)
# Checks the samples returned sanely
npt.assert_allclose(test, known, rtol=0.1, atol=0.1)
def test__calculate_power_curve_ratio(self):
# Sets the know output
known = np.array([0.096, 0.333, 0.493, 0.743, 0.824, 0.937, 0.969,
0.996, 0.998])
# Generates the test values
test = _calculate_power_curve(self.f,
self.pop,
self.num_samps,
ratio=np.array([0.25, 0.75]),
num_iter=100)
# Checks the samples returned sanely
npt.assert_allclose(test, known, rtol=0.1, atol=0.1)
def test_bootstrap_power_curve(self):
# Sets the known values
known_mean = np.array([0.500, 0.82, 0.965, 0.995, 1.000, 1.000,
1.000, 1.000, 1.000])
known_bound = np.array([0.03, 0.02, 0.01, 0.01, 0.00, 0.00, 0.00, 0.00,
0.00])
# Generates the test values
test_mean, test_bound = bootstrap_power_curve(self.f,
self.pop,
self.num_samps,
num_iter=100)
# Checks the function returned sanely
npt.assert_allclose(test_mean, known_mean, rtol=0.05, atol=0.05)
npt.assert_allclose(test_bound, known_bound, rtol=0.1, atol=0.01)
def test_paired_subsamples_default(self):
# Sets the known np.array set
known_array = [{'MM', 'SR', 'TS', 'GW', 'PP', 'WM'},
{'CD', 'LF', 'PC', 'CB', 'MH', 'NR'}]
# Gets the test value
cat = 'INT'
control_cats = ['SEX', 'AGE']
test_array = paired_subsamples(self.meta, cat, control_cats)
self.assertEqual(known_array[0], set(test_array[0]))
self.assertEqual(known_array[1], set(test_array[1]))
def test_paired_subsamples_break(self):
# Sets known np.array set
known_array = [np.array([]), np.array([])]
# Gets the test value
cat = 'ABX'
control_cats = ['SEX', 'AGE', 'INT']
test_array = paired_subsamples(self.meta, cat, control_cats)
npt.assert_array_equal(known_array, test_array)
def test_paired_subsample_undefined(self):
known_array = np.zeros((2, 0))
cat = 'INT'
order = ['Y', 'N']
control_cats = ['AGE', 'ABX', 'SEX']
test_array = paired_subsamples(self.meta, cat, control_cats,
order=order)
npt.assert_array_equal(test_array, known_array)
def test_paired_subsample_fewer(self):
# Set known value
known_array = {'PP', 'MH', 'CD', 'PC', 'TS', 'MM'}
# Sets up test values
cat = 'AGE'
order = ['30s', '40s']
control_cats = ['ABX']
test_array = paired_subsamples(self.meta, cat, control_cats,
order=order)
for v in test_array[0]:
self.assertTrue(v in known_array)
for v in test_array[1]:
self.assertTrue(v in known_array)
def test_paired_subsamples_not_strict(self):
known_array = [{'WM', 'MM', 'GW', 'SR', 'TS'},
{'LF', 'PC', 'CB', 'NR', 'CD'}]
# Gets the test values
cat = 'INT'
control_cats = ['ABX', 'AGE']
test_array = paired_subsamples(self.meta, cat, control_cats,
strict_match=False)
self.assertEqual(set(test_array[0]), known_array[0])
self.assertEqual(set(test_array[1]), known_array[1])
def test__identify_sample_groups(self):
# Defines the know values
known_pairs = {0: [['MM'], ['CD']],
1: [['SR'], ['LF']],
2: [['TS'], ['PC']],
3: [['GW'], ['CB']],
4: [['PP'], ['MH']],
5: [['WM'], ['NR']]}
known_index = np.array([0, 1, 2, 3, 4, 5])
test_pairs, test_index = _identify_sample_groups(self.meta,
'INT',
['SEX', 'AGE'],
order=['N', 'Y'],
strict_match=True)
self.assertEqual(known_pairs.keys(), test_pairs.keys())
self.assertEqual(sorted(known_pairs.values()),
sorted(test_pairs.values()))
npt.assert_array_equal(known_index, test_index)
def test__identify_sample_groups_not_strict(self):
# Defines the know values
known_pairs = {0: [['PP'], ['CD', 'NR']],
1: [['MM', 'WM'], ['MH']],
2: [['GW'], ['CB']]}
known_index = np.array([0, 1, 2])
test_pairs, test_index = _identify_sample_groups(self.meta,
'INT',
['SEX', 'ABX'],
order=['N', 'Y'],
strict_match=False)
self.assertEqual(known_pairs.keys(), test_pairs.keys())
self.assertEqual(sorted(known_pairs.values()),
sorted(test_pairs.values()))
npt.assert_array_equal(known_index, test_index)
def test__draw_paired_samples(self):
num_samps = 3
known_sets = [{'GW', 'SR', 'TS', 'MM', 'PP', 'WM'},
{'CB', 'LF', 'PC', 'CD', 'MH', 'NR'}]
test_samps = _draw_paired_samples(self.meta_pairs, self.pair_index,
num_samps)
for i, t in enumerate(test_samps):
self.assertTrue(set(t).issubset(known_sets[i]))
if __name__ == '__main__':
main()
|
erjohnso/ansible | refs/heads/devel | lib/ansible/modules/packaging/os/swupd.py | 49 | #!/usr/bin/python
# (c) 2017, Alberto Murillo <alberto.murillo.silva@intel.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: swupd
short_description: Manages updates and bundles in ClearLinux systems.
description:
- Manages updates and bundles with the swupd bundle manager, which is used by the
Clear Linux Project for Intel Architecture.
version_added: "2.3"
author: Alberto Murillo (@albertomurillo)
options:
contenturl:
description:
- URL pointing to the contents of available bundles.
If not specified, the contents are retrieved from clearlinux.org.
required: false
default: null
format:
description:
- The format suffix for version file downloads. For example [1,2,3,staging,etc].
If not specified, the default format is used.
required: false
default: null
manifest:
description:
- The manifest contains information about the bundles at certaion version of the OS.
Specify a Manifest version to verify against that version or leave unspecified to
verify against the current version.
required: false
default: null
aliases: [release, version]
name:
description:
- Name of the (I)bundle to install or remove.
required: false
default: null
aliases: [bundle]
state:
description:
- Indicates the desired (I)bundle state. C(present) ensures the bundle
is installed while C(absent) ensures the (I)bundle is not installed.
required: false
default: present
choices: [present, absent]
update:
description:
- Updates the OS to the latest version.
required: false
default: no
url:
description:
- Overrides both I(contenturl) and I(versionurl).
required: false
default: null
verify:
description:
- Verify content for OS version.
required: false
default: null
versionurl:
description:
- URL for version string download.
required: false
default: null
'''
EXAMPLES = '''
- name: Update the OS to the latest version
swupd:
update: yes
- name: Installs the "foo" bundle
swupd:
name: foo
state: present
- name: Removes the "foo" bundle
swupd:
name: foo
state: absent
- name: Check integrity of filesystem
swupd:
verify: yes
- name: Downgrade OS to release 12920
swupd:
verify: yes
manifest: 12920
'''
RETURN = '''
stdout:
description: stdout of swupd
returned: always
type: string
stderr:
description: stderr of swupd
returned: always
type: string
'''
import os
from ansible.module_utils.basic import AnsibleModule
class Swupd(object):
FILES_NOT_MATCH = "files did not match"
FILES_REPLACED = "missing files were replaced"
FILES_FIXED = "files were fixed"
FILES_DELETED = "files were deleted"
def __init__(self, module):
# Fail if swupd is not found
self.module = module
self.swupd_cmd = module.get_bin_path("swupd", False)
if not self.swupd_cmd:
module.fail_json(msg="Could not find swupd.")
# Initialize parameters
for key in module.params.keys():
setattr(self, key, module.params[key])
# Initialize return values
self.changed = False
self.failed = False
self.msg = None
self.rc = None
self.stderr = ""
self.stdout = ""
def _run_cmd(self, cmd):
self.rc, self.stdout, self.stderr = self.module.run_command(cmd, check_rc=False)
def _get_cmd(self, command):
cmd = "%s %s" % (self.swupd_cmd, command)
if self.format:
cmd += " --format=%s" % self.format
if self.manifest:
cmd += " --manifest=%s" % self.manifest
if self.url:
cmd += " --url=%s" % self.url
else:
if self.contenturl and command != "check-update":
cmd += " --contenturl=%s" % self.contenturl
if self.versionurl:
cmd += " --versionurl=%s" % self.versionurl
return cmd
def _is_bundle_installed(self, bundle):
try:
os.stat("/usr/share/clear/bundles/%s" % bundle)
except OSError:
return False
return True
def _needs_update(self):
cmd = self._get_cmd("check-update")
self._run_cmd(cmd)
if self.rc == 0:
return True
if self.rc == 1:
return False
self.failed = True
self.msg = "Failed to check for updates"
def _needs_verify(self):
cmd = self._get_cmd("verify")
self._run_cmd(cmd)
if self.rc != 0:
self.failed = True
self.msg = "Failed to check for filesystem inconsistencies."
if self.FILES_NOT_MATCH in self.stdout:
return True
return False
def install_bundle(self, bundle):
"""Installs a bundle with `swupd bundle-add bundle`"""
if self.module.check_mode:
self.module.exit_json(changed=not self._is_bundle_installed(bundle))
if self._is_bundle_installed(bundle):
self.msg = "Bundle %s is already installed" % bundle
return
cmd = self._get_cmd("bundle-add %s" % bundle)
self._run_cmd(cmd)
if self.rc == 0:
self.changed = True
self.msg = "Bundle %s installed" % bundle
return
if self.rc == 18:
self.msg = "Bundle name %s is invalid" % bundle
return
self.failed = True
self.msg = "Failed to install bundle %s" % bundle
def remove_bundle(self, bundle):
"""Removes a bundle with `swupd bundle-remove bundle`"""
if self.module.check_mode:
self.module.exit_json(changed=self._is_bundle_installed(bundle))
if not self._is_bundle_installed(bundle):
self.msg = "Bundle %s not installed"
return
cmd = self._get_cmd("bundle-remove %s" % bundle)
self._run_cmd(cmd)
if self.rc == 0:
self.changed = True
self.msg = "Bundle %s removed" % bundle
return
self.failed = True
self.msg = "Failed to remove bundle %s" % bundle
def update_os(self):
"""Updates the os with `swupd update`"""
if self.module.check_mode:
self.module.exit_json(changed=self._needs_update())
if not self._needs_update():
self.msg = "There are no updates available"
return
cmd = self._get_cmd("update")
self._run_cmd(cmd)
if self.rc == 0:
self.changed = True
self.msg = "Update successful"
return
self.failed = True
self.msg = "Failed to check for updates"
def verify_os(self):
"""Verifies filesystem against specified or current version"""
if self.module.check_mode:
self.module.exit_json(changed=self._needs_verify())
if not self._needs_verify():
self.msg = "No files where changed"
return
cmd = self._get_cmd("verify --fix")
self._run_cmd(cmd)
if self.rc == 0 and (self.FILES_REPLACED in self.stdout or self.FILES_FIXED in self.stdout or self.FILES_DELETED in self.stdout):
self.changed = True
self.msg = "Fix successful"
return
self.failed = True
self.msg = "Failed to verify the OS"
def main():
"""The main function."""
module = AnsibleModule(
argument_spec=dict(
contenturl=dict(type="str"),
format=dict(type="str"),
manifest=dict(aliases=["release", "version"], type="int"),
name=dict(aliases=["bundle"], type="str"),
state=dict(default="present", choices=["present", "absent"], type="str"),
update=dict(default=False, type="bool"),
url=dict(type="str"),
verify=dict(default=False, type="bool"),
versionurl=dict(type="str"),
),
required_one_of=[["name", "update", "verify"]],
mutually_exclusive=[["name", "update", "verify"]],
supports_check_mode=True
)
swupd = Swupd(module)
name = module.params["name"]
state = module.params["state"]
update = module.params["update"]
verify = module.params["verify"]
if update:
swupd.update_os()
elif verify:
swupd.verify_os()
elif state == "present":
swupd.install_bundle(name)
elif state == "absent":
swupd.remove_bundle(name)
else:
swupd.failed = True
if swupd.failed:
module.fail_json(msg=swupd.msg, stdout=swupd.stdout, stderr=swupd.stderr)
else:
module.exit_json(changed=swupd.changed, msg=swupd.msg, stdout=swupd.stdout, stderr=swupd.stderr)
if __name__ == '__main__':
main()
|
spacewalkproject/spacewalk | refs/heads/master | client/rhel/rhn-client-tools/test/testClientCaps.py | 30 | #!/usr/bin/python
import os
import settestpath
import unittest
from up2date_client import clientCaps
from up2date_client import up2dateAuth
test_clientCaps_d = "etc-sysconfig-rhn/clientCaps.d"
class TestClientCaps(unittest.TestCase):
def setUp(self):
self.__setupData()
def __setupData(self):
self.caps1 = {"packages.runTransaction":{'version':1, 'value':1},
"blippyfoo":{'version':5, 'value':0},
"caneatCheese":{'version':1, 'value': 1}
}
self.headerFormat1 = [('X-RHN-Client-Capability', 'caneatCheese(1)=1'),
('X-RHN-Client-Capability', 'packages.runTransaction(1)=1'),
('X-RHN-Client-Capability', 'blippyfoo(5)=0')]
self.dataKeysSorted1 = ['blippyfoo',
'caneatCheese',
'packages.runTransaction']
self.dataValuesSorted1 = [{'version': 5, 'value': 0},
{'version': 1, 'value': 1},
{'version': 1, 'value': 1}]
def testEmptyInit(self):
"Verify that the class can be created with no arguments"
cc = clientCaps.ClientCapabilities()
def testPopulate(self):
"Verify the object gets created with an approriate populated data"
cc = clientCaps.ClientCapabilities()
len = cc.keys()
self.assertTrue(len >= 1)
def testHeaderFormat(self):
"Verify that headerFormat runs without errors"
cc = clientCaps.ClientCapabilities()
res = cc.headerFormat()
self.assertEquals(type([]), type(res))
def testHeaderFormatVerify(self):
"Verify that headerFormat returns proper results"
cc = clientCaps.ClientCapabilities()
cc.populate(self.caps1)
res = cc.headerFormat()
self.assertEquals(type([]), type(res))
self.assertTrue(len(res) >= 1)
for header in res:
headerName, value = header
self.assertEqual("X-RHN-Client-Capability", headerName)
self.assertEqual(res, self.headerFormat1)
def testDataFormatVerify(self):
"Verify that populate() creates the internal dict's properly"
cc = clientCaps.ClientCapabilities()
cc.populate(self.caps1)
keys = cc.keys()
keys.sort()
self.assertEqual(self.dataKeysSorted1, keys)
values = cc.values()
values.sort()
self.assertEqual(self.dataValuesSorted1, values)
def testLoadClientCaps(self):
"Verify that loadClientCaps works"
blip = clientCaps.loadLocalCaps(test_clientCaps_d)
def testLoadClientCapsSkipDirs(self):
"Verify that client caps loads with dirs in /etc/sysconfig/rhn/clientCaps.d,"
# bugzilla #114322
dirname= test_clientCaps_d + "/TESTDIR"
if not os.access(dirname, os.R_OK):
os.makedirs(dirname)
try:
clientCaps.loadLocalCaps(test_clientCaps_d)
os.rmdir(dirname)
except:
os.rmdir(dirname)
self.fail()
class TestLoginWithCaps(unittest.TestCase):
def testLogin(self):
"Attempt a login that utilizies capabilties"
# this doesnt neccesarily seem to relate to caps
# but it's here as a convient way to test login's
# at the moment...
# in the future we could also try override different
# capbilities, etc...
res = up2dateAuth.login()
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestClientCaps))
suite.addTest(unittest.makeSuite(TestLoginWithCaps))
return suite
if __name__ == "__main__":
unittest.main(defaultTest="suite")
|
Russell-IO/ansible | refs/heads/devel | lib/ansible/modules/clustering/etcd3.py | 31 | #!/usr/bin/python
#
# (c) 2018, Jean-Philippe Evrard <jean-philippe@evrard.me>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = """
---
module: etcd3
short_description: "Set or delete key value pairs from an etcd3 cluster"
version_added: "2.5"
requirements:
- etcd3
description:
- Sets or deletes values in etcd3 cluster using its v3 api.
- Needs python etcd3 lib to work
options:
key:
description:
- the key where the information is stored in the cluster
required: true
value:
description:
- the information stored
required: true
host:
description:
- the IP address of the cluster
default: 'localhost'
port:
description:
- the port number used to connect to the cluster
default: 2379
state:
description:
- the state of the value for the key.
- can be present or absent
required: true
author:
- Jean-Philippe Evrard (@evrardjp)
"""
EXAMPLES = """
# Store a value "bar" under the key "foo" for a cluster located "http://localhost:2379"
- etcd3:
key: "foo"
value: "baz3"
host: "localhost"
port: 2379
state: "present"
"""
RETURN = '''
key:
description: The key that was queried
returned: always
type: str
old_value:
description: The previous value in the cluster
returned: always
type: str
'''
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
try:
import etcd3
etcd_found = True
except ImportError:
etcd_found = False
def run_module():
# define the available arguments/parameters that a user can pass to
# the module
module_args = dict(
key=dict(type='str', required=True),
value=dict(type='str', required=True),
host=dict(type='str', default='localhost'),
port=dict(type='int', default=2379),
state=dict(type='str', required=True, choices=['present', 'absent']),
)
# seed the result dict in the object
# we primarily care about changed and state
# change is if this module effectively modified the target
# state will include any data that you want your module to pass back
# for consumption, for example, in a subsequent task
result = dict(
changed=False,
)
# the AnsibleModule object will be our abstraction working with Ansible
# this includes instantiation, a couple of common attr would be the
# args/params passed to the execution, as well as if the module
# supports check mode
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=True
)
result['key'] = module.params.get('key')
if not etcd_found:
module.fail_json(msg="the python etcd3 module is required")
allowed_keys = ['host', 'port', 'ca_cert', 'cert_key', 'cert_cert',
'timeout', 'user', 'password']
# TODO(evrardjp): Move this back to a dict comprehension when python 2.7 is
# the minimum supported version
# client_params = {key: value for key, value in module.params.items() if key in allowed_keys}
client_params = dict()
for key, value in module.params.items():
if key in allowed_keys:
client_params[key] = value
try:
etcd = etcd3.client(**client_params)
except Exception as exp:
module.fail_json(msg='Cannot connect to etcd cluster: %s' % (to_native(exp)),
exception=traceback.format_exc())
try:
cluster_value = etcd.get(module.params['key'])
except Exception as exp:
module.fail_json(msg='Cannot reach data: %s' % (to_native(exp)),
exception=traceback.format_exc())
# Make the cluster_value[0] a string for string comparisons
result['old_value'] = to_native(cluster_value[0])
if module.params['state'] == 'absent':
if cluster_value[0] is not None:
if module.check_mode:
result['changed'] = True
else:
try:
etcd.delete(module.params['key'])
except Exception as exp:
module.fail_json(msg='Cannot delete %s: %s' % (module.params['key'], to_native(exp)),
exception=traceback.format_exc())
else:
result['changed'] = True
elif module.params['state'] == 'present':
if result['old_value'] != module.params['value']:
if module.check_mode:
result['changed'] = True
else:
try:
etcd.put(module.params['key'], module.params['value'])
except Exception as exp:
module.fail_json(msg='Cannot add or edit key %s: %s' % (module.params['key'], to_native(exp)),
exception=traceback.format_exc())
else:
result['changed'] = True
else:
module.fail_json(msg="State not recognized")
# manipulate or modify the state as needed (this is going to be the
# part where your module will do what it needs to do)
# during the execution of the module, if there is an exception or a
# conditional state that effectively causes a failure, run
# AnsibleModule.fail_json() to pass in the message and the result
# in the event of a successful module execution, you will want to
# simple AnsibleModule.exit_json(), passing the key/value results
module.exit_json(**result)
def main():
run_module()
if __name__ == '__main__':
main()
|
kwailamchan/programming-languages | refs/heads/master | python/gists/super.py | 3 | class Employee(object):
"""Models real-life employees!"""
def __init__(self, employee_name):
self.employee_name = employee_name
def calculate_wage(self, hours):
self.hours = hours
return hours * 20.00
# Add your code below!
class PartTimeEmployee(Employee):
def calculate_wage(self, hours):
self.hours = hours
return hours * 12.00
def full_time_wage(self,hours):
return super(PartTimeEmployee, self).calculate_wage(hours)
milton = PartTimeEmployee("milton")
print milton.full_time_wage(10)
|
zrhans/pythonanywhere | refs/heads/master | .virtualenvs/django19/lib/python3.4/site-packages/django/core/handlers/wsgi.py | 339 | from __future__ import unicode_literals
import cgi
import codecs
import logging
import sys
from io import BytesIO
from threading import Lock
from django import http
from django.conf import settings
from django.core import signals
from django.core.handlers import base
from django.core.urlresolvers import set_script_prefix
from django.utils import six
from django.utils.encoding import force_str, force_text
from django.utils.functional import cached_property
logger = logging.getLogger('django.request')
# encode() and decode() expect the charset to be a native string.
ISO_8859_1, UTF_8 = str('iso-8859-1'), str('utf-8')
class LimitedStream(object):
'''
LimitedStream wraps another stream in order to not allow reading from it
past specified amount of bytes.
'''
def __init__(self, stream, limit, buf_size=64 * 1024 * 1024):
self.stream = stream
self.remaining = limit
self.buffer = b''
self.buf_size = buf_size
def _read_limited(self, size=None):
if size is None or size > self.remaining:
size = self.remaining
if size == 0:
return b''
result = self.stream.read(size)
self.remaining -= len(result)
return result
def read(self, size=None):
if size is None:
result = self.buffer + self._read_limited()
self.buffer = b''
elif size < len(self.buffer):
result = self.buffer[:size]
self.buffer = self.buffer[size:]
else: # size >= len(self.buffer)
result = self.buffer + self._read_limited(size - len(self.buffer))
self.buffer = b''
return result
def readline(self, size=None):
while b'\n' not in self.buffer and \
(size is None or len(self.buffer) < size):
if size:
# since size is not None here, len(self.buffer) < size
chunk = self._read_limited(size - len(self.buffer))
else:
chunk = self._read_limited()
if not chunk:
break
self.buffer += chunk
sio = BytesIO(self.buffer)
if size:
line = sio.readline(size)
else:
line = sio.readline()
self.buffer = sio.read()
return line
class WSGIRequest(http.HttpRequest):
def __init__(self, environ):
script_name = get_script_name(environ)
path_info = get_path_info(environ)
if not path_info:
# Sometimes PATH_INFO exists, but is empty (e.g. accessing
# the SCRIPT_NAME URL without a trailing slash). We really need to
# operate as if they'd requested '/'. Not amazingly nice to force
# the path like this, but should be harmless.
path_info = '/'
self.environ = environ
self.path_info = path_info
# be careful to only replace the first slash in the path because of
# http://test/something and http://test//something being different as
# stated in http://www.ietf.org/rfc/rfc2396.txt
self.path = '%s/%s' % (script_name.rstrip('/'),
path_info.replace('/', '', 1))
self.META = environ
self.META['PATH_INFO'] = path_info
self.META['SCRIPT_NAME'] = script_name
self.method = environ['REQUEST_METHOD'].upper()
_, content_params = cgi.parse_header(environ.get('CONTENT_TYPE', ''))
if 'charset' in content_params:
try:
codecs.lookup(content_params['charset'])
except LookupError:
pass
else:
self.encoding = content_params['charset']
self._post_parse_error = False
try:
content_length = int(environ.get('CONTENT_LENGTH'))
except (ValueError, TypeError):
content_length = 0
self._stream = LimitedStream(self.environ['wsgi.input'], content_length)
self._read_started = False
self.resolver_match = None
def _get_scheme(self):
return self.environ.get('wsgi.url_scheme')
@cached_property
def GET(self):
# The WSGI spec says 'QUERY_STRING' may be absent.
raw_query_string = get_bytes_from_wsgi(self.environ, 'QUERY_STRING', '')
return http.QueryDict(raw_query_string, encoding=self._encoding)
def _get_post(self):
if not hasattr(self, '_post'):
self._load_post_and_files()
return self._post
def _set_post(self, post):
self._post = post
@cached_property
def COOKIES(self):
raw_cookie = get_str_from_wsgi(self.environ, 'HTTP_COOKIE', '')
return http.parse_cookie(raw_cookie)
def _get_files(self):
if not hasattr(self, '_files'):
self._load_post_and_files()
return self._files
POST = property(_get_post, _set_post)
FILES = property(_get_files)
class WSGIHandler(base.BaseHandler):
initLock = Lock()
request_class = WSGIRequest
def __call__(self, environ, start_response):
# Set up middleware if needed. We couldn't do this earlier, because
# settings weren't available.
if self._request_middleware is None:
with self.initLock:
try:
# Check that middleware is still uninitialized.
if self._request_middleware is None:
self.load_middleware()
except:
# Unload whatever middleware we got
self._request_middleware = None
raise
set_script_prefix(get_script_name(environ))
signals.request_started.send(sender=self.__class__, environ=environ)
try:
request = self.request_class(environ)
except UnicodeDecodeError:
logger.warning('Bad Request (UnicodeDecodeError)',
exc_info=sys.exc_info(),
extra={
'status_code': 400,
}
)
response = http.HttpResponseBadRequest()
else:
response = self.get_response(request)
response._handler_class = self.__class__
status = '%s %s' % (response.status_code, response.reason_phrase)
response_headers = [(str(k), str(v)) for k, v in response.items()]
for c in response.cookies.values():
response_headers.append((str('Set-Cookie'), str(c.output(header=''))))
start_response(force_str(status), response_headers)
if getattr(response, 'file_to_stream', None) is not None and environ.get('wsgi.file_wrapper'):
response = environ['wsgi.file_wrapper'](response.file_to_stream)
return response
def get_path_info(environ):
"""
Returns the HTTP request's PATH_INFO as a unicode string.
"""
path_info = get_bytes_from_wsgi(environ, 'PATH_INFO', '/')
return path_info.decode(UTF_8)
def get_script_name(environ):
"""
Returns the equivalent of the HTTP request's SCRIPT_NAME environment
variable. If Apache mod_rewrite has been used, returns what would have been
the script name prior to any rewriting (so it's the script name as seen
from the client's perspective), unless the FORCE_SCRIPT_NAME setting is
set (to anything).
"""
if settings.FORCE_SCRIPT_NAME is not None:
return force_text(settings.FORCE_SCRIPT_NAME)
# If Apache's mod_rewrite had a whack at the URL, Apache set either
# SCRIPT_URL or REDIRECT_URL to the full resource URL before applying any
# rewrites. Unfortunately not every Web server (lighttpd!) passes this
# information through all the time, so FORCE_SCRIPT_NAME, above, is still
# needed.
script_url = get_bytes_from_wsgi(environ, 'SCRIPT_URL', '')
if not script_url:
script_url = get_bytes_from_wsgi(environ, 'REDIRECT_URL', '')
if script_url:
path_info = get_bytes_from_wsgi(environ, 'PATH_INFO', '')
script_name = script_url[:-len(path_info)] if path_info else script_url
else:
script_name = get_bytes_from_wsgi(environ, 'SCRIPT_NAME', '')
return script_name.decode(UTF_8)
def get_bytes_from_wsgi(environ, key, default):
"""
Get a value from the WSGI environ dictionary as bytes.
key and default should be str objects. Under Python 2 they may also be
unicode objects provided they only contain ASCII characters.
"""
value = environ.get(str(key), str(default))
# Under Python 3, non-ASCII values in the WSGI environ are arbitrarily
# decoded with ISO-8859-1. This is wrong for Django websites where UTF-8
# is the default. Re-encode to recover the original bytestring.
return value.encode(ISO_8859_1) if six.PY3 else value
def get_str_from_wsgi(environ, key, default):
"""
Get a value from the WSGI environ dictionary as str.
key and default should be str objects. Under Python 2 they may also be
unicode objects provided they only contain ASCII characters.
"""
value = get_bytes_from_wsgi(environ, key, default)
return value.decode(UTF_8, errors='replace') if six.PY3 else value
|
team-ferret/pip-in-toto | refs/heads/master | tests/lib/git_submodule_helpers.py | 58 | from __future__ import absolute_import
import textwrap
def _create_test_package_submodule(env):
env.scratch_path.join("version_pkg_submodule").mkdir()
submodule_path = env.scratch_path / 'version_pkg_submodule'
env.run('touch', 'testfile', cwd=submodule_path)
env.run('git', 'init', cwd=submodule_path)
env.run('git', 'add', '.', cwd=submodule_path)
env.run('git', 'commit', '-q',
'--author', 'pip <pypa-dev@googlegroups.com>',
'-am', 'initial version / submodule', cwd=submodule_path)
return submodule_path
def _change_test_package_submodule(env, submodule_path):
submodule_path.join("testfile").write("this is a changed file")
submodule_path.join("testfile2").write("this is an added file")
env.run('git', 'add', '.', cwd=submodule_path)
env.run('git', 'commit', '-q',
'--author', 'pip <pypa-dev@googlegroups.com>',
'-am', 'submodule change', cwd=submodule_path)
def _pull_in_submodule_changes_to_module(env, module_path):
env.run(
'git',
'pull',
'-q',
'origin',
'master',
cwd=module_path / 'testpkg/static/',
)
env.run('git', 'commit', '-q',
'--author', 'pip <pypa-dev@googlegroups.com>',
'-am', 'submodule change', cwd=module_path)
def _create_test_package_with_submodule(env):
env.scratch_path.join("version_pkg").mkdir()
version_pkg_path = env.scratch_path / 'version_pkg'
version_pkg_path.join("testpkg").mkdir()
pkg_path = version_pkg_path / 'testpkg'
pkg_path.join("__init__.py").write("# hello there")
pkg_path.join("version_pkg.py").write(textwrap.dedent('''\
def main():
print('0.1')
'''))
version_pkg_path.join("setup.py").write(textwrap.dedent('''\
from setuptools import setup, find_packages
setup(name='version_pkg',
version='0.1',
packages=find_packages(),
)
'''))
env.run('git', 'init', cwd=version_pkg_path, expect_error=True)
env.run('git', 'add', '.', cwd=version_pkg_path, expect_error=True)
env.run('git', 'commit', '-q',
'--author', 'pip <pypa-dev@googlegroups.com>',
'-am', 'initial version', cwd=version_pkg_path,
expect_error=True)
submodule_path = _create_test_package_submodule(env)
env.run(
'git',
'submodule',
'add',
submodule_path,
'testpkg/static',
cwd=version_pkg_path,
expect_error=True,
)
env.run('git', 'commit', '-q',
'--author', 'pip <pypa-dev@googlegroups.com>',
'-am', 'initial version w submodule', cwd=version_pkg_path,
expect_error=True)
return version_pkg_path, submodule_path
|
mifl/android_kernel_pantech_oscar | refs/heads/LA.AF.1.1-oscar | scripts/build-all.py | 1182 | #! /usr/bin/env python
# Copyright (c) 2009-2011, The Linux Foundation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Linux Foundation nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Build the kernel for all targets using the Android build environment.
#
# TODO: Accept arguments to indicate what to build.
import glob
from optparse import OptionParser
import subprocess
import os
import os.path
import shutil
import sys
version = 'build-all.py, version 0.01'
build_dir = '../all-kernels'
make_command = ["vmlinux", "modules"]
make_env = os.environ
make_env.update({
'ARCH': 'arm',
'CROSS_COMPILE': 'arm-none-linux-gnueabi-',
'KCONFIG_NOTIMESTAMP': 'true' })
all_options = {}
def error(msg):
sys.stderr.write("error: %s\n" % msg)
def fail(msg):
"""Fail with a user-printed message"""
error(msg)
sys.exit(1)
def check_kernel():
"""Ensure that PWD is a kernel directory"""
if (not os.path.isfile('MAINTAINERS') or
not os.path.isfile('arch/arm/mach-msm/Kconfig')):
fail("This doesn't seem to be an MSM kernel dir")
def check_build():
"""Ensure that the build directory is present."""
if not os.path.isdir(build_dir):
try:
os.makedirs(build_dir)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
def update_config(file, str):
print 'Updating %s with \'%s\'\n' % (file, str)
defconfig = open(file, 'a')
defconfig.write(str + '\n')
defconfig.close()
def scan_configs():
"""Get the full list of defconfigs appropriate for this tree."""
names = {}
for n in glob.glob('arch/arm/configs/[fm]sm[0-9-]*_defconfig'):
names[os.path.basename(n)[:-10]] = n
for n in glob.glob('arch/arm/configs/qsd*_defconfig'):
names[os.path.basename(n)[:-10]] = n
for n in glob.glob('arch/arm/configs/apq*_defconfig'):
names[os.path.basename(n)[:-10]] = n
return names
class Builder:
def __init__(self, logname):
self.logname = logname
self.fd = open(logname, 'w')
def run(self, args):
devnull = open('/dev/null', 'r')
proc = subprocess.Popen(args, stdin=devnull,
env=make_env,
bufsize=0,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
count = 0
# for line in proc.stdout:
rawfd = proc.stdout.fileno()
while True:
line = os.read(rawfd, 1024)
if not line:
break
self.fd.write(line)
self.fd.flush()
if all_options.verbose:
sys.stdout.write(line)
sys.stdout.flush()
else:
for i in range(line.count('\n')):
count += 1
if count == 64:
count = 0
print
sys.stdout.write('.')
sys.stdout.flush()
print
result = proc.wait()
self.fd.close()
return result
failed_targets = []
def build(target):
dest_dir = os.path.join(build_dir, target)
log_name = '%s/log-%s.log' % (build_dir, target)
print 'Building %s in %s log %s' % (target, dest_dir, log_name)
if not os.path.isdir(dest_dir):
os.mkdir(dest_dir)
defconfig = 'arch/arm/configs/%s_defconfig' % target
dotconfig = '%s/.config' % dest_dir
savedefconfig = '%s/defconfig' % dest_dir
shutil.copyfile(defconfig, dotconfig)
devnull = open('/dev/null', 'r')
subprocess.check_call(['make', 'O=%s' % dest_dir,
'%s_defconfig' % target], env=make_env, stdin=devnull)
devnull.close()
if not all_options.updateconfigs:
build = Builder(log_name)
result = build.run(['make', 'O=%s' % dest_dir] + make_command)
if result != 0:
if all_options.keep_going:
failed_targets.append(target)
fail_or_error = error
else:
fail_or_error = fail
fail_or_error("Failed to build %s, see %s" % (target, build.logname))
# Copy the defconfig back.
if all_options.configs or all_options.updateconfigs:
devnull = open('/dev/null', 'r')
subprocess.check_call(['make', 'O=%s' % dest_dir,
'savedefconfig'], env=make_env, stdin=devnull)
devnull.close()
shutil.copyfile(savedefconfig, defconfig)
def build_many(allconf, targets):
print "Building %d target(s)" % len(targets)
for target in targets:
if all_options.updateconfigs:
update_config(allconf[target], all_options.updateconfigs)
build(target)
if failed_targets:
fail('\n '.join(["Failed targets:"] +
[target for target in failed_targets]))
def main():
global make_command
check_kernel()
check_build()
configs = scan_configs()
usage = ("""
%prog [options] all -- Build all targets
%prog [options] target target ... -- List specific targets
%prog [options] perf -- Build all perf targets
%prog [options] noperf -- Build all non-perf targets""")
parser = OptionParser(usage=usage, version=version)
parser.add_option('--configs', action='store_true',
dest='configs',
help="Copy configs back into tree")
parser.add_option('--list', action='store_true',
dest='list',
help='List available targets')
parser.add_option('-v', '--verbose', action='store_true',
dest='verbose',
help='Output to stdout in addition to log file')
parser.add_option('--oldconfig', action='store_true',
dest='oldconfig',
help='Only process "make oldconfig"')
parser.add_option('--updateconfigs',
dest='updateconfigs',
help="Update defconfigs with provided option setting, "
"e.g. --updateconfigs=\'CONFIG_USE_THING=y\'")
parser.add_option('-j', '--jobs', type='int', dest="jobs",
help="Number of simultaneous jobs")
parser.add_option('-l', '--load-average', type='int',
dest='load_average',
help="Don't start multiple jobs unless load is below LOAD_AVERAGE")
parser.add_option('-k', '--keep-going', action='store_true',
dest='keep_going', default=False,
help="Keep building other targets if a target fails")
parser.add_option('-m', '--make-target', action='append',
help='Build the indicated make target (default: %s)' %
' '.join(make_command))
(options, args) = parser.parse_args()
global all_options
all_options = options
if options.list:
print "Available targets:"
for target in configs.keys():
print " %s" % target
sys.exit(0)
if options.oldconfig:
make_command = ["oldconfig"]
elif options.make_target:
make_command = options.make_target
if options.jobs:
make_command.append("-j%d" % options.jobs)
if options.load_average:
make_command.append("-l%d" % options.load_average)
if args == ['all']:
build_many(configs, configs.keys())
elif args == ['perf']:
targets = []
for t in configs.keys():
if "perf" in t:
targets.append(t)
build_many(configs, targets)
elif args == ['noperf']:
targets = []
for t in configs.keys():
if "perf" not in t:
targets.append(t)
build_many(configs, targets)
elif len(args) > 0:
targets = []
for t in args:
if t not in configs.keys():
parser.error("Target '%s' not one of %s" % (t, configs.keys()))
targets.append(t)
build_many(configs, targets)
else:
parser.error("Must specify a target to build, or 'all'")
if __name__ == "__main__":
main()
|
timokoola/timoechobot | refs/heads/master | requests/requests/packages/urllib3/packages/ssl_match_hostname/_implementation.py | 285 | """The match_hostname() function from Python 3.3.3, essential when using SSL."""
# Note: This file is under the PSF license as the code comes from the python
# stdlib. http://docs.python.org/3/license.html
import re
import sys
# ipaddress has been backported to 2.6+ in pypi. If it is installed on the
# system, use it to handle IPAddress ServerAltnames (this was added in
# python-3.5) otherwise only do DNS matching. This allows
# backports.ssl_match_hostname to continue to be used all the way back to
# python-2.4.
try:
import ipaddress
except ImportError:
ipaddress = None
__version__ = '3.5.0.1'
class CertificateError(ValueError):
pass
def _dnsname_match(dn, hostname, max_wildcards=1):
"""Matching according to RFC 6125, section 6.4.3
http://tools.ietf.org/html/rfc6125#section-6.4.3
"""
pats = []
if not dn:
return False
# Ported from python3-syntax:
# leftmost, *remainder = dn.split(r'.')
parts = dn.split(r'.')
leftmost = parts[0]
remainder = parts[1:]
wildcards = leftmost.count('*')
if wildcards > max_wildcards:
# Issue #17980: avoid denials of service by refusing more
# than one wildcard per fragment. A survey of established
# policy among SSL implementations showed it to be a
# reasonable choice.
raise CertificateError(
"too many wildcards in certificate DNS name: " + repr(dn))
# speed up common case w/o wildcards
if not wildcards:
return dn.lower() == hostname.lower()
# RFC 6125, section 6.4.3, subitem 1.
# The client SHOULD NOT attempt to match a presented identifier in which
# the wildcard character comprises a label other than the left-most label.
if leftmost == '*':
# When '*' is a fragment by itself, it matches a non-empty dotless
# fragment.
pats.append('[^.]+')
elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
# RFC 6125, section 6.4.3, subitem 3.
# The client SHOULD NOT attempt to match a presented identifier
# where the wildcard character is embedded within an A-label or
# U-label of an internationalized domain name.
pats.append(re.escape(leftmost))
else:
# Otherwise, '*' matches any dotless string, e.g. www*
pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
# add the remaining fragments, ignore any wildcards
for frag in remainder:
pats.append(re.escape(frag))
pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
return pat.match(hostname)
def _to_unicode(obj):
if isinstance(obj, str) and sys.version_info < (3,):
obj = unicode(obj, encoding='ascii', errors='strict')
return obj
def _ipaddress_match(ipname, host_ip):
"""Exact matching of IP addresses.
RFC 6125 explicitly doesn't define an algorithm for this
(section 1.7.2 - "Out of Scope").
"""
# OpenSSL may add a trailing newline to a subjectAltName's IP address
# Divergence from upstream: ipaddress can't handle byte str
ip = ipaddress.ip_address(_to_unicode(ipname).rstrip())
return ip == host_ip
def match_hostname(cert, hostname):
"""Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
rules are followed, but IP addresses are not accepted for *hostname*.
CertificateError is raised on failure. On success, the function
returns nothing.
"""
if not cert:
raise ValueError("empty or no certificate, match_hostname needs a "
"SSL socket or SSL context with either "
"CERT_OPTIONAL or CERT_REQUIRED")
try:
# Divergence from upstream: ipaddress can't handle byte str
host_ip = ipaddress.ip_address(_to_unicode(hostname))
except ValueError:
# Not an IP address (common case)
host_ip = None
except UnicodeError:
# Divergence from upstream: Have to deal with ipaddress not taking
# byte strings. addresses should be all ascii, so we consider it not
# an ipaddress in this case
host_ip = None
except AttributeError:
# Divergence from upstream: Make ipaddress library optional
if ipaddress is None:
host_ip = None
else:
raise
dnsnames = []
san = cert.get('subjectAltName', ())
for key, value in san:
if key == 'DNS':
if host_ip is None and _dnsname_match(value, hostname):
return
dnsnames.append(value)
elif key == 'IP Address':
if host_ip is not None and _ipaddress_match(value, host_ip):
return
dnsnames.append(value)
if not dnsnames:
# The subject is only checked when there is no dNSName entry
# in subjectAltName
for sub in cert.get('subject', ()):
for key, value in sub:
# XXX according to RFC 2818, the most specific Common Name
# must be used.
if key == 'commonName':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if len(dnsnames) > 1:
raise CertificateError("hostname %r "
"doesn't match either of %s"
% (hostname, ', '.join(map(repr, dnsnames))))
elif len(dnsnames) == 1:
raise CertificateError("hostname %r "
"doesn't match %r"
% (hostname, dnsnames[0]))
else:
raise CertificateError("no appropriate commonName or "
"subjectAltName fields were found")
|
chrooter/planbox | refs/heads/staging | src/planbox_ui/decorators.py | 4 | """
ssl_required decorator
----------------------
Redirect requests to the given to to HTTPS if they are not secure already.
Source: https://djangosnippets.org/snippets/1351/
Author: pjs (https://djangosnippets.org/users/pjs/)
"""
try:
# Python 2
import urlparse
except ImportError:
# Python 3
from urllib import parse as urlparse
from django.conf import settings
from django.http import HttpResponsePermanentRedirect
def ssl_required(view_func):
def is_ssl_enabled(request):
# If explicitly off, then it's off
if not getattr(settings, 'HTTPS_ENABLED', True):
return False
# If you're debugging and coming from an internal IP address, it's off
elif settings.DEBUG and request.META.get('REMOTE_ADDR', None) in settings.INTERNAL_IPS:
return False
# Otherwise, it's on
else:
return True
def _checkssl(request, *args, **kwargs):
if is_ssl_enabled(request) and not request.is_secure():
if hasattr(settings, 'SSL_DOMAIN'):
url_str = urlparse.urljoin(
settings.SSL_DOMAIN,
request.get_full_path()
)
else:
url_str = request.build_absolute_uri()
url_str = url_str.replace('http://', 'https://', 1)
return HttpResponsePermanentRedirect(url_str)
return view_func(request, *args, **kwargs)
return _checkssl
|
rghe/ansible | refs/heads/devel | test/units/modules/net_tools/nios/test_nios_a_record.py | 11 | # This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.modules.net_tools.nios import nios_a_record
from ansible.module_utils.net_tools.nios import api
from ansible.compat.tests.mock import patch, MagicMock, Mock
from .test_nios_module import TestNiosModule, load_fixture
class TestNiosARecordModule(TestNiosModule):
module = nios_a_record
def setUp(self):
super(TestNiosARecordModule, self).setUp()
self.module = MagicMock(name='ansible.modules.net_tools.nios.nios_a_record.WapiModule')
self.module.check_mode = False
self.module.params = {'provider': None}
self.mock_wapi = patch('ansible.modules.net_tools.nios.nios_a_record.WapiModule')
self.exec_command = self.mock_wapi.start()
self.mock_wapi_run = patch('ansible.modules.net_tools.nios.nios_a_record.WapiModule.run')
self.mock_wapi_run.start()
self.load_config = self.mock_wapi_run.start()
def tearDown(self):
super(TestNiosARecordModule, self).tearDown()
self.mock_wapi.stop()
self.mock_wapi_run.stop()
def _get_wapi(self, test_object):
wapi = api.WapiModule(self.module)
wapi.get_object = Mock(name='get_object', return_value=test_object)
wapi.create_object = Mock(name='create_object')
wapi.update_object = Mock(name='update_object')
wapi.delete_object = Mock(name='delete_object')
return wapi
def load_fixtures(self, commands=None):
self.exec_command.return_value = (0, load_fixture('nios_result.txt').strip(), None)
self.load_config.return_value = dict(diff=None, session='session')
def test_nios_a_record_create(self):
self.module.params = {'provider': None, 'state': 'present', 'name': 'a.ansible.com',
'ipv4': '192.168.10.1', 'comment': None, 'extattrs': None}
test_object = None
test_spec = {
"name": {"ib_req": True},
"ipv4": {"ib_req": True},
"comment": {},
"extattrs": {}
}
wapi = self._get_wapi(test_object)
print("WAPI: ", wapi.__dict__)
res = wapi.run('testobject', test_spec)
self.assertTrue(res['changed'])
wapi.create_object.assert_called_once_with('testobject', {'name': self.module._check_type_dict().__getitem__(),
'ipv4': '192.168.10.1'})
def test_nios_a_record_update_comment(self):
self.module.params = {'provider': None, 'state': 'present', 'name': 'a.ansible.com', 'ipv4': '192.168.10.1',
'comment': 'updated comment', 'extattrs': None}
test_object = [
{
"comment": "test comment",
"_ref": "arecord/ZG5zLm5ldHdvcmtfdmlldyQw:default/true",
"name": "a.ansible.com",
"ipv4": "192.168.10.1",
"extattrs": {}
}
]
test_spec = {
"name": {"ib_req": True},
"ipv4": {"ib_req": True},
"comment": {},
"extattrs": {}
}
wapi = self._get_wapi(test_object)
res = wapi.run('testobject', test_spec)
self.assertTrue(res['changed'])
def test_nios_a_record_remove(self):
self.module.params = {'provider': None, 'state': 'absent', 'name': 'a.ansible.com', 'ipv4': '192.168.10.1',
'comment': None, 'extattrs': None}
ref = "arecord/ZG5zLm5ldHdvcmtfdmlldyQw:default/false"
test_object = [{
"comment": "test comment",
"_ref": ref,
"name": "a.ansible.com",
"ipv4": "192.168.10.1",
"extattrs": {'Site': {'value': 'test'}}
}]
test_spec = {
"name": {"ib_req": True},
"ipv4": {"ib_req": True},
"comment": {},
"extattrs": {}
}
wapi = self._get_wapi(test_object)
res = wapi.run('testobject', test_spec)
self.assertTrue(res['changed'])
wapi.delete_object.assert_called_once_with(ref)
|
indico/indico | refs/heads/master | indico/util/signals.py | 4 | # This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from itertools import zip_longest
from types import GeneratorType
def values_from_signal(signal_response, single_value=False, skip_none=True, as_list=False,
multi_value_types=GeneratorType, return_plugins=False):
"""Combine the results from both single-value and multi-value signals.
The signal needs to return either a single object (which is not a
generator) or a generator (usually by returning its values using
`yield`).
:param signal_response: The return value of a Signal's `.send()` method
:param single_value: If each return value should be treated as a single
value in all cases (disables the generator check)
:param skip_none: If None return values should be skipped
:param as_list: If you want a list instead of a set (only use this if
you need non-hashable return values, the order is still
not defined!)
:param multi_value_types: Types which should be considered multi-value.
It is used in an `isinstance()` call and if
the check succeeds, the value is passed to
`list.extend()`
:param return_plugins: return `(plugin, value)` tuples instead of just
the values. `plugin` can be `None` if the signal
was not registered in a plugin.
:return: A set/list containing the results
"""
values = []
for func, value in signal_response:
plugin = getattr(func, 'indico_plugin', None)
if not single_value and isinstance(value, multi_value_types):
value_list = list(value)
if value_list:
values.extend(zip_longest([plugin], value_list, fillvalue=plugin))
else:
values.append((plugin, value))
if skip_none:
values = [(p, v) for p, v in values if v is not None]
if not return_plugins:
values = [v for p, v in values]
return values if as_list else set(values)
def named_objects_from_signal(signal_response, name_attr='name', plugin_attr=None):
"""Return a dict of objects based on an unique attribute on each object.
The signal needs to return either a single object (which is not a
generator) or a generator (usually by returning its values using
``yield``).
:param signal_response: The return value of a Signal's ``.send()`` method
:param name_attr: The attribute containing each object's unique name
:param plugin_attr: The attribute that will be set to the plugin containing
the object (set to `None` for objects in the core)
:return: dict mapping object names to objects
"""
objects = values_from_signal(signal_response, return_plugins=True)
if plugin_attr is not None:
for plugin, cls in objects:
setattr(cls, plugin_attr, plugin)
mapping = {getattr(cls, name_attr): cls for _, cls in objects}
# check for two different objects having the same name, e.g. because of
# two plugins using a too generic name for their object
conflicting = {cls for _, cls in objects} - set(mapping.values())
if conflicting:
names = ', '.join(sorted(getattr(x, name_attr) for x in conflicting))
raise RuntimeError(f'Non-unique object names: {names}')
return mapping
|
hrashk/sympy | refs/heads/master | sympy/matrices/matrices.py | 1 | from __future__ import print_function, division
import collections
from sympy.core.add import Add
from sympy.core.basic import Basic, C, Atom
from sympy.core.expr import Expr
from sympy.core.function import count_ops
from sympy.core.power import Pow
from sympy.core.symbol import Symbol, Dummy, symbols
from sympy.core.numbers import Integer, ilcm, Rational, Float
from sympy.core.singleton import S
from sympy.core.sympify import sympify
from sympy.core.compatibility import is_sequence, default_sort_key, xrange
from sympy.polys import PurePoly, roots, cancel, gcd
from sympy.simplify import simplify as _simplify, signsimp, nsimplify
from sympy.utilities.iterables import flatten
from sympy.functions.elementary.miscellaneous import sqrt, Max, Min
from sympy.functions import exp, factorial
from sympy.printing import sstr
from sympy.core.compatibility import reduce, as_int
from sympy.utilities.exceptions import SymPyDeprecationWarning
from types import FunctionType
def _iszero(x):
"""Returns True if x is zero."""
return x.is_zero
class MatrixError(Exception):
pass
class ShapeError(ValueError, MatrixError):
"""Wrong matrix shape"""
pass
class NonSquareMatrixError(ShapeError):
pass
class DeferredVector(Symbol):
"""A vector whose components are deferred (e.g. for use with lambdify)
Examples
========
>>> from sympy import DeferredVector, lambdify
>>> X = DeferredVector( 'X' )
>>> X
X
>>> expr = (X[0] + 2, X[2] + 3)
>>> func = lambdify( X, expr )
>>> func( [1, 2, 3] )
(3, 6)
"""
def __getitem__(self, i):
if i == -0:
i = 0
if i < 0:
raise IndexError('DeferredVector index out of range')
component_name = '%s[%d]' % (self.name, i)
return Symbol(component_name)
def __str__(self):
return sstr(self)
def __repr__(self):
return "DeferredVector('%s')" % (self.name)
class MatrixBase(object):
# Added just for numpy compatibility
__array_priority__ = 11
is_Matrix = True
is_Identity = None
_class_priority = 3
_sympify = staticmethod(sympify)
__hash__ = None # Mutable
@classmethod
def _handle_creation_inputs(cls, *args, **kwargs):
"""Return the number of rows, cols and flat matrix elements.
Examples
========
>>> from sympy import Matrix, I
Matrix can be constructed as follows:
* from a nested list of iterables
>>> Matrix( ((1, 2+I), (3, 4)) )
Matrix([
[1, 2 + I],
[3, 4]])
* from un-nested iterable (interpreted as a column)
>>> Matrix( [1, 2] )
Matrix([
[1],
[2]])
* from un-nested iterable with dimensions
>>> Matrix(1, 2, [1, 2] )
Matrix([[1, 2]])
* from no arguments (a 0 x 0 matrix)
>>> Matrix()
Matrix(0, 0, [])
* from a rule
>>> Matrix(2, 2, lambda i, j: i/(j + 1) )
Matrix([
[0, 0],
[1, 1/2]])
"""
from sympy.matrices.sparse import SparseMatrix
# Matrix(SparseMatrix(...))
if len(args) == 1 and isinstance(args[0], SparseMatrix):
return args[0].rows, args[0].cols, flatten(args[0].tolist())
# Matrix(Matrix(...))
if len(args) == 1 and isinstance(args[0], MatrixBase):
return args[0].rows, args[0].cols, args[0]._mat
# Matrix(MatrixSymbol('X', 2, 2))
if len(args) == 1 and isinstance(args[0], Basic) and args[0].is_Matrix:
return args[0].rows, args[0].cols, args[0].as_explicit()._mat
if len(args) == 3:
rows = as_int(args[0])
cols = as_int(args[1])
# Matrix(2, 2, lambda i, j: i+j)
if len(args) == 3 and isinstance(args[2], collections.Callable):
operation = args[2]
flat_list = []
for i in range(rows):
flat_list.extend([cls._sympify(operation(cls._sympify(i), j))
for j in range(cols)])
# Matrix(2, 2, [1, 2, 3, 4])
elif len(args) == 3 and is_sequence(args[2]):
flat_list = args[2]
if len(flat_list) != rows*cols:
raise ValueError('List length should be equal to rows*columns')
flat_list = [cls._sympify(i) for i in flat_list]
# Matrix(numpy.ones((2, 2)))
elif len(args) == 1 and hasattr(args[0], "__array__"): # pragma: no cover
# NumPy array or matrix or some other object that implements
# __array__. So let's first use this method to get a
# numpy.array() and then make a python list out of it.
arr = args[0].__array__()
if len(arr.shape) == 2:
rows, cols = arr.shape[0], arr.shape[1]
flat_list = [cls._sympify(i) for i in arr.ravel()]
return rows, cols, flat_list
elif len(arr.shape) == 1:
rows, cols = 1, arr.shape[0]
flat_list = [S.Zero]*cols
for i in range(len(arr)):
flat_list[i] = cls._sympify(arr[i])
return rows, cols, flat_list
else:
raise NotImplementedError(
"SymPy supports just 1D and 2D matrices")
# Matrix([1, 2, 3]) or Matrix([[1, 2], [3, 4]])
elif len(args) == 1 and is_sequence(args[0])\
and not isinstance(args[0], DeferredVector):
in_mat = []
ncol = set()
for row in args[0]:
if isinstance(row, MatrixBase):
in_mat.extend(row.tolist())
if row.cols or row.rows: # only pay attention if it's not 0x0
ncol.add(row.cols)
else:
in_mat.append(row)
try:
ncol.add(len(row))
except TypeError:
ncol.add(1)
if len(ncol) > 1:
raise ValueError("Got rows of variable lengths: %s" %
sorted(list(ncol)))
rows = len(in_mat)
if rows:
if not is_sequence(in_mat[0]):
cols = 1
flat_list = [cls._sympify(i) for i in in_mat]
return rows, cols, flat_list
cols = ncol.pop()
else:
cols = 0
flat_list = []
for j in range(rows):
for i in range(cols):
flat_list.append(cls._sympify(in_mat[j][i]))
# Matrix()
elif len(args) == 0:
# Empty Matrix
rows = cols = 0
flat_list = []
else:
raise TypeError("Data type not understood")
return rows, cols, flat_list
def _setitem(self, key, value):
"""Helper to set value at location given by key.
Examples
========
>>> from sympy import Matrix, I, zeros, ones
>>> m = Matrix(((1, 2+I), (3, 4)))
>>> m
Matrix([
[1, 2 + I],
[3, 4]])
>>> m[1, 0] = 9
>>> m
Matrix([
[1, 2 + I],
[9, 4]])
>>> m[1, 0] = [[0, 1]]
To replace row r you assign to position r*m where m
is the number of columns:
>>> M = zeros(4)
>>> m = M.cols
>>> M[3*m] = ones(1, m)*2; M
Matrix([
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[2, 2, 2, 2]])
And to replace column c you can assign to position c:
>>> M[2] = ones(m, 1)*4; M
Matrix([
[0, 0, 4, 0],
[0, 0, 4, 0],
[0, 0, 4, 0],
[2, 2, 4, 2]])
"""
from .dense import Matrix
is_slice = isinstance(key, slice)
i, j = key = self.key2ij(key)
is_mat = isinstance(value, MatrixBase)
if type(i) is slice or type(j) is slice:
if is_mat:
self.copyin_matrix(key, value)
return
if not isinstance(value, Expr) and is_sequence(value):
self.copyin_list(key, value)
return
raise ValueError('unexpected value: %s' % value)
else:
if (not is_mat and
not isinstance(value, Basic) and is_sequence(value)):
value = Matrix(value)
is_mat = True
if is_mat:
if is_slice:
key = (slice(*divmod(i, self.cols)),
slice(*divmod(j, self.cols)))
else:
key = (slice(i, i + value.rows),
slice(j, j + value.cols))
self.copyin_matrix(key, value)
else:
return i, j, self._sympify(value)
return
def copy(self):
return self._new(self.rows, self.cols, self._mat)
def trace(self):
if not self.is_square:
raise NonSquareMatrixError()
return self._eval_trace()
def inv(self, method=None, **kwargs):
if not self.is_square:
raise NonSquareMatrixError()
if method is not None:
kwargs['method'] = method
return self._eval_inverse(**kwargs)
def inv_mod(self, m):
r"""
Returns the inverse of the matrix `K` (mod `m`), if it exists.
Method to find the matrix inverse of `K` (mod `m`) implemented in this function:
* Compute `\mathrm{adj}(K) = \mathrm{cof}(K)^t`, the adjoint matrix of `K`.
* Compute `r = 1/\mathrm{det}(K) \pmod m`.
* `K^{-1} = r\cdot \mathrm{adj}(K) \pmod m`.
Examples
========
>>> from sympy import Matrix
>>> A = Matrix(2, 2, [1, 2, 3, 4])
>>> A.inv_mod(5)
Matrix([
[3, 1],
[4, 2]])
>>> A.inv_mod(3)
Matrix([
[1, 1],
[0, 1]])
"""
from sympy.ntheory import totient
if not self.is_square:
raise NonSquareMatrixError()
N = self.cols
phi = totient(m)
det_K = self.det()
if gcd(det_K, m) != 1:
raise ValueError('Matrix is not invertible (mod %d)' % m)
det_inv = pow(int(det_K), int(phi - 1), int(m))
K_adj = self.cofactorMatrix().transpose()
K_inv = self.__class__(N, N, [det_inv*K_adj[i, j] % m for i in range(N) for j in range(N)])
return K_inv
def transpose(self):
return self._eval_transpose()
T = property(transpose, None, None, "Matrix transposition.")
def conjugate(self):
return self._eval_conjugate()
C = property(conjugate, None, None, "By-element conjugation.")
def adjoint(self):
"""Conjugate transpose or Hermitian conjugation."""
return self.T.C
@property
def H(self):
"""Return Hermite conjugate.
Examples
========
>>> from sympy import Matrix, I
>>> m = Matrix((0, 1 + I, 2, 3))
>>> m
Matrix([
[ 0],
[1 + I],
[ 2],
[ 3]])
>>> m.H
Matrix([[0, 1 - I, 2, 3]])
See Also
========
conjugate: By-element conjugation
D: Dirac conjugation
"""
return self.T.C
@property
def D(self):
"""Return Dirac conjugate (if self.rows == 4).
Examples
========
>>> from sympy import Matrix, I, eye
>>> m = Matrix((0, 1 + I, 2, 3))
>>> m.D
Matrix([[0, 1 - I, -2, -3]])
>>> m = (eye(4) + I*eye(4))
>>> m[0, 3] = 2
>>> m.D
Matrix([
[1 - I, 0, 0, 0],
[ 0, 1 - I, 0, 0],
[ 0, 0, -1 + I, 0],
[ 2, 0, 0, -1 + I]])
If the matrix does not have 4 rows an AttributeError will be raised
because this property is only defined for matrices with 4 rows.
>>> Matrix(eye(2)).D
Traceback (most recent call last):
...
AttributeError: Matrix has no attribute D.
See Also
========
conjugate: By-element conjugation
H: Hermite conjugation
"""
from sympy.physics.matrices import mgamma
if self.rows != 4:
# In Python 3.2, properties can only return an AttributeError
# so we can't raise a ShapeError -- see commit which added the
# first line of this inline comment. Also, there is no need
# for a message since MatrixBase will raise the AttributeError
raise AttributeError
return self.H*mgamma(0)
def __array__(self):
from .dense import matrix2numpy
return matrix2numpy(self)
def __len__(self):
"""Return the number of elements of self.
Implemented mainly so bool(Matrix()) == False.
"""
return self.rows*self.cols
@property
def shape(self):
"""The shape (dimensions) of the matrix as the 2-tuple (rows, cols).
Examples
========
>>> from sympy.matrices import zeros
>>> M = zeros(2, 3)
>>> M.shape
(2, 3)
>>> M.rows
2
>>> M.cols
3
"""
return (self.rows, self.cols)
def __sub__(self, a):
return self + (-a)
def __rsub__(self, a):
return (-self) + a
def __mul__(self, other):
"""Return self*other where other is either a scalar or a matrix
of compatible dimensions.
Examples
========
>>> from sympy.matrices import Matrix
>>> A = Matrix([[1, 2, 3], [4, 5, 6]])
>>> 2*A == A*2 == Matrix([[2, 4, 6], [8, 10, 12]])
True
>>> B = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> A*B
Matrix([
[30, 36, 42],
[66, 81, 96]])
>>> B*A
Traceback (most recent call last):
...
ShapeError: Matrices size mismatch.
>>>
See Also
========
matrix_multiply_elementwise
"""
if getattr(other, 'is_Matrix', False):
# The following implmentation is equivalent, but about 5% slower
#ma, na = A.shape
#mb, nb = B.shape
#
#if na != mb:
# raise ShapeError()
#product = Matrix(ma, nb, lambda i, j: 0)
#for i in range(ma):
# for j in range(nb):
# s = 0
# for k in range(na):
# s += A[i, k]*B[k, j]
# product[i, j] = s
#return product
A = self
B = other
if A.cols != B.rows:
raise ShapeError("Matrices size mismatch.")
if A.cols == 0:
return classof(A, B)._new(A.rows, B.cols, lambda i, j: 0)
blst = B.T.tolist()
alst = A.tolist()
return classof(A, B)._new(A.rows, B.cols, lambda i, j:
reduce(lambda k, l: k + l,
[a_ik * b_kj for a_ik, b_kj in zip(alst[i], blst[j])]))
else:
return self._new(self.rows, self.cols,
[i*other for i in self._mat])
def __rmul__(self, a):
if getattr(a, 'is_Matrix', False):
return self._new(a)*self
return self*a
def __pow__(self, num):
from sympy.matrices import eye
if not self.is_square:
raise NonSquareMatrixError()
if isinstance(num, int) or isinstance(num, Integer):
n = int(num)
if n < 0:
return self.inv()**-n # A**-2 = (A**-1)**2
a = eye(self.cols)
s = self
while n:
if n % 2:
a *= s
n -= 1
if not n:
break
s *= s
n //= 2
return self._new(a)
elif isinstance(num, Rational):
try:
P, D = self.diagonalize()
except MatrixError:
raise NotImplementedError(
"Implemented only for diagonalizable matrices")
for i in range(D.rows):
D[i, i] = D[i, i]**num
return self._new(P*D*P.inv())
else:
raise NotImplementedError(
"Only integer and rational values are supported")
def __add__(self, other):
"""Return self + other, raising ShapeError if shapes don't match."""
if getattr(other, 'is_Matrix', False):
A = self
B = other
if A.shape != B.shape:
raise ShapeError("Matrix size mismatch.")
alst = A.tolist()
blst = B.tolist()
ret = [S.Zero]*A.rows
for i in range(A.shape[0]):
ret[i] = list(map(lambda j, k: j + k, alst[i], blst[i]))
rv = classof(A, B)._new(ret)
if not A.rows:
rv = rv.reshape(*A.shape)
return rv
raise TypeError('cannot add matrix and %s' % type(other))
def __radd__(self, other):
return self + other
def __div__(self, other):
return self*(S.One / other)
def __truediv__(self, other):
return self.__div__(other)
def __neg__(self):
return -1*self
def multiply(self, b):
"""Returns self*b
See Also
========
dot
cross
multiply_elementwise
"""
return self*b
def add(self, b):
"""Return self + b """
return self + b
def table(self, printer, rowsep='\n', colsep=', ', align='right'):
r"""
String form of Matrix as a table.
``printer`` is the printer to use for on the elements (generally
something like StrPrinter())
``rowsep`` is the string used to separate rows (by default a newline).
``colsep`` is the string used to separate columns (by default ', ').
``align`` defines how the elements are aligned. Must be one of 'left',
'right', or 'center'. You can also use '<', '>', and '^' to mean the
same thing, respectively.
This is used by the string printer for Matrix.
Examples
========
>>> from sympy import Matrix
>>> from sympy.printing.str import StrPrinter
>>> M = Matrix([[1, 2], [-33, 4]])
>>> printer = StrPrinter()
>>> M.table(printer)
'[ 1, 2]\n[-33, 4]'
>>> print(M.table(printer))
[ 1, 2]
[-33, 4]
>>> print(M.table(printer, rowsep=',\n'))
[ 1, 2],
[-33, 4]
>>> print('[%s]' % M.table(printer, rowsep=',\n'))
[[ 1, 2],
[-33, 4]]
>>> print(M.table(printer, colsep=' '))
[ 1 2]
[-33 4]
>>> print(M.table(printer, align='center'))
[ 1 , 2]
[-33, 4]
"""
# Handle zero dimensions:
if self.rows == 0 or self.cols == 0:
return '[]'
# Build table of string representations of the elements
res = []
# Track per-column max lengths for pretty alignment
maxlen = [0] * self.cols
for i in range(self.rows):
res.append([])
for j in range(self.cols):
s = printer._print(self[i,j])
res[-1].append(s)
maxlen[j] = max(len(s), maxlen[j])
# Patch strings together
align = {
'left': str.ljust,
'right': str.rjust,
'center': str.center,
'<': str.ljust,
'>': str.rjust,
'^': str.center,
}[align]
for i, row in enumerate(res):
for j, elem in enumerate(row):
row[j] = align(elem, maxlen[j])
res[i] = "[" + colsep.join(row) + "]"
return rowsep.join(res)
def _format_str(self, printer=None):
if not printer:
from sympy.printing.str import StrPrinter
printer = StrPrinter()
# Handle zero dimensions:
if self.rows == 0 or self.cols == 0:
return 'Matrix(%s, %s, [])' % (self.rows, self.cols)
if self.rows == 1:
return "Matrix([%s])" % self.table(printer, rowsep=',\n')
return "Matrix([\n%s])" % self.table(printer, rowsep=',\n')
def __str__(self):
if self.rows == 0 or self.cols == 0:
return 'Matrix(%s, %s, [])' % (self.rows, self.cols)
return "Matrix(%s)" % str(self.tolist())
def __repr__(self):
return sstr(self)
def cholesky(self):
"""Returns the Cholesky decomposition L of a matrix A
such that L * L.T = A
A must be a square, symmetric, positive-definite
and non-singular matrix.
Examples
========
>>> from sympy.matrices import Matrix
>>> A = Matrix(((25, 15, -5), (15, 18, 0), (-5, 0, 11)))
>>> A.cholesky()
Matrix([
[ 5, 0, 0],
[ 3, 3, 0],
[-1, 1, 3]])
>>> A.cholesky() * A.cholesky().T
Matrix([
[25, 15, -5],
[15, 18, 0],
[-5, 0, 11]])
See Also
========
LDLdecomposition
LUdecomposition
QRdecomposition
"""
if not self.is_square:
raise NonSquareMatrixError("Matrix must be square.")
if not self.is_symmetric():
raise ValueError("Matrix must be symmetric.")
return self._cholesky()
def LDLdecomposition(self):
"""Returns the LDL Decomposition (L, D) of matrix A,
such that L * D * L.T == A
This method eliminates the use of square root.
Further this ensures that all the diagonal entries of L are 1.
A must be a square, symmetric, positive-definite
and non-singular matrix.
Examples
========
>>> from sympy.matrices import Matrix, eye
>>> A = Matrix(((25, 15, -5), (15, 18, 0), (-5, 0, 11)))
>>> L, D = A.LDLdecomposition()
>>> L
Matrix([
[ 1, 0, 0],
[ 3/5, 1, 0],
[-1/5, 1/3, 1]])
>>> D
Matrix([
[25, 0, 0],
[ 0, 9, 0],
[ 0, 0, 9]])
>>> L * D * L.T * A.inv() == eye(A.rows)
True
See Also
========
cholesky
LUdecomposition
QRdecomposition
"""
if not self.is_square:
raise NonSquareMatrixError("Matrix must be square.")
if not self.is_symmetric():
raise ValueError("Matrix must be symmetric.")
return self._LDLdecomposition()
def lower_triangular_solve(self, rhs):
"""Solves Ax = B, where A is a lower triangular matrix.
See Also
========
upper_triangular_solve
cholesky_solve
diagonal_solve
LDLsolve
LUsolve
QRsolve
pinv_solve
"""
if not self.is_square:
raise NonSquareMatrixError("Matrix must be square.")
if rhs.rows != self.rows:
raise ShapeError("Matrices size mismatch.")
if not self.is_lower:
raise ValueError("Matrix must be lower triangular.")
return self._lower_triangular_solve(rhs)
def upper_triangular_solve(self, rhs):
"""Solves Ax = B, where A is an upper triangular matrix.
See Also
========
lower_triangular_solve
cholesky_solve
diagonal_solve
LDLsolve
LUsolve
QRsolve
pinv_solve
"""
if not self.is_square:
raise NonSquareMatrixError("Matrix must be square.")
if rhs.rows != self.rows:
raise TypeError("Matrix size mismatch.")
if not self.is_upper:
raise TypeError("Matrix is not upper triangular.")
return self._upper_triangular_solve(rhs)
def cholesky_solve(self, rhs):
"""Solves Ax = B using Cholesky decomposition,
for a general square non-singular matrix.
For a non-square matrix with rows > cols,
the least squares solution is returned.
See Also
========
lower_triangular_solve
upper_triangular_solve
diagonal_solve
LDLsolve
LUsolve
QRsolve
pinv_solve
"""
if self.is_symmetric():
L = self._cholesky()
elif self.rows >= self.cols:
L = (self.T*self)._cholesky()
rhs = self.T*rhs
else:
raise NotImplementedError("Under-determined System.")
Y = L._lower_triangular_solve(rhs)
return (L.T)._upper_triangular_solve(Y)
def diagonal_solve(self, rhs):
"""Solves Ax = B efficiently, where A is a diagonal Matrix,
with non-zero diagonal entries.
Examples
========
>>> from sympy.matrices import Matrix, eye
>>> A = eye(2)*2
>>> B = Matrix([[1, 2], [3, 4]])
>>> A.diagonal_solve(B) == B/2
True
See Also
========
lower_triangular_solve
upper_triangular_solve
cholesky_solve
LDLsolve
LUsolve
QRsolve
pinv_solve
"""
if not self.is_diagonal:
raise TypeError("Matrix should be diagonal")
if rhs.rows != self.rows:
raise TypeError("Size mis-match")
return self._diagonal_solve(rhs)
def LDLsolve(self, rhs):
"""Solves Ax = B using LDL decomposition,
for a general square and non-singular matrix.
For a non-square matrix with rows > cols,
the least squares solution is returned.
Examples
========
>>> from sympy.matrices import Matrix, eye
>>> A = eye(2)*2
>>> B = Matrix([[1, 2], [3, 4]])
>>> A.LDLsolve(B) == B/2
True
See Also
========
LDLdecomposition
lower_triangular_solve
upper_triangular_solve
cholesky_solve
diagonal_solve
LUsolve
QRsolve
pinv_solve
"""
if self.is_symmetric():
L, D = self.LDLdecomposition()
elif self.rows >= self.cols:
L, D = (self.T*self).LDLdecomposition()
rhs = self.T*rhs
else:
raise NotImplementedError("Under-determined System.")
Y = L._lower_triangular_solve(rhs)
Z = D._diagonal_solve(Y)
return (L.T)._upper_triangular_solve(Z)
def solve_least_squares(self, rhs, method='CH'):
"""Return the least-square fit to the data.
By default the cholesky_solve routine is used (method='CH'); other
methods of matrix inversion can be used. To find out which are
available, see the docstring of the .inv() method.
Examples
========
>>> from sympy.matrices import Matrix, ones
>>> A = Matrix([1, 2, 3])
>>> B = Matrix([2, 3, 4])
>>> S = Matrix(A.row_join(B))
>>> S
Matrix([
[1, 2],
[2, 3],
[3, 4]])
If each line of S represent coefficients of Ax + By
and x and y are [2, 3] then S*xy is:
>>> r = S*Matrix([2, 3]); r
Matrix([
[ 8],
[13],
[18]])
But let's add 1 to the middle value and then solve for the
least-squares value of xy:
>>> xy = S.solve_least_squares(Matrix([8, 14, 18])); xy
Matrix([
[ 5/3],
[10/3]])
The error is given by S*xy - r:
>>> S*xy - r
Matrix([
[1/3],
[1/3],
[1/3]])
>>> _.norm().n(2)
0.58
If a different xy is used, the norm will be higher:
>>> xy += ones(2, 1)/10
>>> (S*xy - r).norm().n(2)
1.5
"""
if method == 'CH':
return self.cholesky_solve(rhs)
t = self.T
return (t*self).inv(method=method)*t*rhs
def solve(self, rhs, method='GE'):
"""Return solution to self*soln = rhs using given inversion method.
For a list of possible inversion methods, see the .inv() docstring.
"""
if not self.is_square:
if self.rows < self.cols:
raise ValueError('Under-determined system.')
elif self.rows > self.cols:
raise ValueError('For over-determined system, M, having '
'more rows than columns, try M.solve_least_squares(rhs).')
else:
return self.inv(method=method)*rhs
def __mathml__(self):
mml = ""
for i in range(self.rows):
mml += "<matrixrow>"
for j in range(self.cols):
mml += self[i, j].__mathml__()
mml += "</matrixrow>"
return "<matrix>" + mml + "</matrix>"
def submatrix(self, keys):
"""
Get a slice/submatrix of the matrix using the given slice.
Examples
========
>>> from sympy import Matrix
>>> m = Matrix(4, 4, lambda i, j: i+j)
>>> m
Matrix([
[0, 1, 2, 3],
[1, 2, 3, 4],
[2, 3, 4, 5],
[3, 4, 5, 6]])
>>> m[:1, 1]
Matrix([[1]])
>>> m[:2, :1]
Matrix([
[0],
[1]])
>>> m[2:4, 2:4]
Matrix([
[4, 5],
[5, 6]])
See Also
========
extract
"""
rlo, rhi, clo, chi = self.key2bounds(keys)
rows, cols = rhi - rlo, chi - clo
mat = [S.Zero]*rows*cols
for i in range(rows):
mat[i*cols:(i + 1)*cols] = \
self._mat[(i + rlo)*self.cols + clo:(i + rlo)*self.cols + chi]
return self._new(rows, cols, mat)
def extract(self, rowsList, colsList):
"""Return a submatrix by specifying a list of rows and columns.
Negative indices can be given. All indices must be in the range
-n <= i < n where n is the number of rows or columns.
Examples
========
>>> from sympy import Matrix
>>> m = Matrix(4, 3, range(12))
>>> m
Matrix([
[0, 1, 2],
[3, 4, 5],
[6, 7, 8],
[9, 10, 11]])
>>> m.extract([0, 1, 3], [0, 1])
Matrix([
[0, 1],
[3, 4],
[9, 10]])
Rows or columns can be repeated:
>>> m.extract([0, 0, 1], [-1])
Matrix([
[2],
[2],
[5]])
Every other row can be taken by using range to provide the indices:
>>> m.extract(range(0, m.rows, 2), [-1])
Matrix([
[2],
[8]])
See Also
========
submatrix
"""
cols = self.cols
flat_list = self._mat
rowsList = [a2idx(k, self.rows) for k in rowsList]
colsList = [a2idx(k, self.cols) for k in colsList]
return self._new(len(rowsList), len(colsList),
lambda i, j: flat_list[rowsList[i]*cols + colsList[j]])
def key2bounds(self, keys):
"""Converts a key with potentially mixed types of keys (integer and slice)
into a tuple of ranges and raises an error if any index is out of self's
range.
See Also
========
key2ij
"""
islice, jslice = [isinstance(k, slice) for k in keys]
if islice:
if not self.rows:
rlo = rhi = 0
else:
rlo, rhi = keys[0].indices(self.rows)[:2]
else:
rlo = a2idx(keys[0], self.rows)
rhi = rlo + 1
if jslice:
if not self.cols:
clo = chi = 0
else:
clo, chi = keys[1].indices(self.cols)[:2]
else:
clo = a2idx(keys[1], self.cols)
chi = clo + 1
return rlo, rhi, clo, chi
def key2ij(self, key):
"""Converts key into canonical form, converting integers or indexable
items into valid integers for self's range or returning slices
unchanged.
See Also
========
key2bounds
"""
if is_sequence(key):
if not len(key) == 2:
raise TypeError('key must be a sequence of length 2')
return [a2idx(i, n) if not isinstance(i, slice) else i
for i, n in zip(key, self.shape)]
elif isinstance(key, slice):
return key.indices(len(self))[:2]
else:
return divmod(a2idx(key, len(self)), self.cols)
def evalf(self, prec=None, **options):
"""Apply evalf() to each element of self."""
if prec is None:
return self.applyfunc(lambda i: i.evalf(**options))
else:
return self.applyfunc(lambda i: i.evalf(prec, **options))
n = evalf
def atoms(self, *types):
"""Returns the atoms that form the current object.
>>> from sympy.abc import x, y
>>> from sympy.matrices import Matrix
>>> Matrix([[x]])
Matrix([[x]])
>>> _.atoms()
set([x])
"""
if types:
types = tuple(
[t if isinstance(t, type) else type(t) for t in types])
else:
types = (Atom,)
result = set()
for i in self:
result.update( i.atoms(*types) )
return result
def subs(self, *args, **kwargs): # should mirror core.basic.subs
"""Return a new matrix with subs applied to each entry.
Examples
========
>>> from sympy.abc import x, y
>>> from sympy.matrices import SparseMatrix, Matrix
>>> SparseMatrix(1, 1, [x])
Matrix([[x]])
>>> _.subs(x, y)
Matrix([[y]])
>>> Matrix(_).subs(y, x)
Matrix([[x]])
"""
return self.applyfunc(lambda x: x.subs(*args, **kwargs))
def expand(self, deep=True, modulus=None, power_base=True, power_exp=True,
mul=True, log=True, multinomial=True, basic=True, **hints):
"""Apply core.function.expand to each entry of the matrix.
Examples
========
>>> from sympy.abc import x
>>> from sympy.matrices import Matrix
>>> Matrix(1, 1, [x*(x+1)])
Matrix([[x*(x + 1)]])
>>> _.expand()
Matrix([[x**2 + x]])
"""
return self.applyfunc(lambda x: x.expand(
deep, modulus, power_base, power_exp, mul, log, multinomial, basic,
**hints))
def simplify(self, ratio=1.7, measure=count_ops):
"""Apply simplify to each element of the matrix.
Examples
========
>>> from sympy.abc import x, y
>>> from sympy import sin, cos
>>> from sympy.matrices import SparseMatrix
>>> SparseMatrix(1, 1, [x*sin(y)**2 + x*cos(y)**2])
Matrix([[x*sin(y)**2 + x*cos(y)**2]])
>>> _.simplify()
Matrix([[x]])
"""
return self.applyfunc(lambda x: x.simplify(ratio, measure))
_eval_simplify = simplify
def doit(self, **kwargs):
return self
def print_nonzero(self, symb="X"):
"""Shows location of non-zero entries for fast shape lookup.
Examples
========
>>> from sympy.matrices import Matrix, eye
>>> m = Matrix(2, 3, lambda i, j: i*3+j)
>>> m
Matrix([
[0, 1, 2],
[3, 4, 5]])
>>> m.print_nonzero()
[ XX]
[XXX]
>>> m = eye(4)
>>> m.print_nonzero("x")
[x ]
[ x ]
[ x ]
[ x]
"""
s = []
for i in range(self.rows):
line = []
for j in range(self.cols):
if self[i, j] == 0:
line.append(" ")
else:
line.append(str(symb))
s.append("[%s]" % ''.join(line))
print('\n'.join(s))
def LUsolve(self, rhs, iszerofunc=_iszero):
"""Solve the linear system Ax = rhs for x where A = self.
This is for symbolic matrices, for real or complex ones use
sympy.mpmath.lu_solve or sympy.mpmath.qr_solve.
See Also
========
lower_triangular_solve
upper_triangular_solve
cholesky_solve
diagonal_solve
LDLsolve
QRsolve
pinv_solve
LUdecomposition
"""
if rhs.rows != self.rows:
raise ShapeError("`self` and `rhs` must have the same number of rows.")
A, perm = self.LUdecomposition_Simple(iszerofunc=_iszero)
n = self.rows
b = rhs.permuteFwd(perm).as_mutable()
# forward substitution, all diag entries are scaled to 1
for i in xrange(n):
for j in xrange(i):
scale = A[i, j]
b.zip_row_op(i, j, lambda x, y: x - y*scale)
# backward substitution
for i in xrange(n - 1, -1, -1):
for j in xrange(i + 1, n):
scale = A[i, j]
b.zip_row_op(i, j, lambda x, y: x - y*scale)
scale = A[i, i]
b.row_op(i, lambda x, _: x/scale)
return rhs.__class__(b)
def LUdecomposition(self, iszerofunc=_iszero):
"""Returns the decomposition LU and the row swaps p.
Examples
========
>>> from sympy import Matrix
>>> a = Matrix([[4, 3], [6, 3]])
>>> L, U, _ = a.LUdecomposition()
>>> L
Matrix([
[ 1, 0],
[3/2, 1]])
>>> U
Matrix([
[4, 3],
[0, -3/2]])
See Also
========
cholesky
LDLdecomposition
QRdecomposition
LUdecomposition_Simple
LUdecompositionFF
LUsolve
"""
combined, p = self.LUdecomposition_Simple(iszerofunc=_iszero)
L = self.zeros(self.rows)
U = self.zeros(self.rows)
for i in range(self.rows):
for j in range(self.rows):
if i > j:
L[i, j] = combined[i, j]
else:
if i == j:
L[i, i] = 1
U[i, j] = combined[i, j]
return L, U, p
def LUdecomposition_Simple(self, iszerofunc=_iszero):
"""Returns A comprised of L, U (L's diag entries are 1) and
p which is the list of the row swaps (in order).
See Also
========
LUdecomposition
LUdecompositionFF
LUsolve
"""
if not self.is_square:
raise NonSquareMatrixError("A Matrix must be square to apply LUdecomposition_Simple().")
n = self.rows
A = self.as_mutable()
p = []
# factorization
for j in range(n):
for i in range(j):
for k in range(i):
A[i, j] = A[i, j] - A[i, k]*A[k, j]
pivot = -1
for i in range(j, n):
for k in range(j):
A[i, j] = A[i, j] - A[i, k]*A[k, j]
# find the first non-zero pivot, includes any expression
if pivot == -1 and not iszerofunc(A[i, j]):
pivot = i
if pivot < 0:
# this result is based on iszerofunc's analysis of the possible pivots, so even though
# the element may not be strictly zero, the supplied iszerofunc's evaluation gave True
raise ValueError("No nonzero pivot found; inversion failed.")
if pivot != j: # row must be swapped
A.row_swap(pivot, j)
p.append([pivot, j])
scale = 1 / A[j, j]
for i in range(j + 1, n):
A[i, j] = A[i, j]*scale
return A, p
def LUdecompositionFF(self):
"""Compute a fraction-free LU decomposition.
Returns 4 matrices P, L, D, U such that PA = L D**-1 U.
If the elements of the matrix belong to some integral domain I, then all
elements of L, D and U are guaranteed to belong to I.
**Reference**
- W. Zhou & D.J. Jeffrey, "Fraction-free matrix factors: new forms
for LU and QR factors". Frontiers in Computer Science in China,
Vol 2, no. 1, pp. 67-80, 2008.
See Also
========
LUdecomposition
LUdecomposition_Simple
LUsolve
"""
from sympy.matrices import SparseMatrix
zeros = SparseMatrix.zeros
eye = SparseMatrix.eye
n, m = self.rows, self.cols
U, L, P = self.as_mutable(), eye(n), eye(n)
DD = zeros(n, n)
oldpivot = 1
for k in range(n - 1):
if U[k, k] == 0:
for kpivot in range(k + 1, n):
if U[kpivot, k]:
break
else:
raise ValueError("Matrix is not full rank")
U[k, k:], U[kpivot, k:] = U[kpivot, k:], U[k, k:]
L[k, :k], L[kpivot, :k] = L[kpivot, :k], L[k, :k]
P[k, :], P[kpivot, :] = P[kpivot, :], P[k, :]
L[k, k] = Ukk = U[k, k]
DD[k, k] = oldpivot*Ukk
for i in range(k + 1, n):
L[i, k] = Uik = U[i, k]
for j in range(k + 1, m):
U[i, j] = (Ukk*U[i, j] - U[k, j]*Uik) / oldpivot
U[i, k] = 0
oldpivot = Ukk
DD[n - 1, n - 1] = oldpivot
return P, L, DD, U
def cofactorMatrix(self, method="berkowitz"):
"""Return a matrix containing the cofactor of each element.
See Also
========
cofactor
minorEntry
minorMatrix
adjugate
"""
out = self._new(self.rows, self.cols, lambda i, j:
self.cofactor(i, j, method))
return out
def minorEntry(self, i, j, method="berkowitz"):
"""Calculate the minor of an element.
See Also
========
minorMatrix
cofactor
cofactorMatrix
"""
if not 0 <= i < self.rows or not 0 <= j < self.cols:
raise ValueError("`i` and `j` must satisfy 0 <= i < `self.rows` " +
"(%d)" % self.rows + "and 0 <= j < `self.cols` (%d)." % self.cols)
return self.minorMatrix(i, j).det(method)
def minorMatrix(self, i, j):
"""Creates the minor matrix of a given element.
See Also
========
minorEntry
cofactor
cofactorMatrix
"""
if not 0 <= i < self.rows or not 0 <= j < self.cols:
raise ValueError("`i` and `j` must satisfy 0 <= i < `self.rows` " +
"(%d)" % self.rows + "and 0 <= j < `self.cols` (%d)." % self.cols)
M = self.as_mutable()
M.row_del(i)
M.col_del(j)
return self._new(M)
def cofactor(self, i, j, method="berkowitz"):
"""Calculate the cofactor of an element.
See Also
========
cofactorMatrix
minorEntry
minorMatrix
"""
if (i + j) % 2 == 0:
return self.minorEntry(i, j, method)
else:
return -1*self.minorEntry(i, j, method)
def jacobian(self, X):
"""Calculates the Jacobian matrix (derivative of a vectorial function).
Parameters
==========
self : vector of expressions representing functions f_i(x_1, ..., x_n).
X : set of x_i's in order, it can be a list or a Matrix
Both self and X can be a row or a column matrix in any order
(i.e., jacobian() should always work).
Examples
========
>>> from sympy import sin, cos, Matrix
>>> from sympy.abc import rho, phi
>>> X = Matrix([rho*cos(phi), rho*sin(phi), rho**2])
>>> Y = Matrix([rho, phi])
>>> X.jacobian(Y)
Matrix([
[cos(phi), -rho*sin(phi)],
[sin(phi), rho*cos(phi)],
[ 2*rho, 0]])
>>> X = Matrix([rho*cos(phi), rho*sin(phi)])
>>> X.jacobian(Y)
Matrix([
[cos(phi), -rho*sin(phi)],
[sin(phi), rho*cos(phi)]])
See Also
========
hessian
wronskian
"""
if not isinstance(X, MatrixBase):
X = self._new(X)
# Both X and self can be a row or a column matrix, so we need to make
# sure all valid combinations work, but everything else fails:
if self.shape[0] == 1:
m = self.shape[1]
elif self.shape[1] == 1:
m = self.shape[0]
else:
raise TypeError("self must be a row or a column matrix")
if X.shape[0] == 1:
n = X.shape[1]
elif X.shape[1] == 1:
n = X.shape[0]
else:
raise TypeError("X must be a row or a column matrix")
# m is the number of functions and n is the number of variables
# computing the Jacobian is now easy:
return self._new(m, n, lambda j, i: self[j].diff(X[i]))
def QRdecomposition(self):
"""Return Q, R where A = Q*R, Q is orthogonal and R is upper triangular.
Examples
========
This is the example from wikipedia:
>>> from sympy import Matrix
>>> A = Matrix([[12, -51, 4], [6, 167, -68], [-4, 24, -41]])
>>> Q, R = A.QRdecomposition()
>>> Q
Matrix([
[ 6/7, -69/175, -58/175],
[ 3/7, 158/175, 6/175],
[-2/7, 6/35, -33/35]])
>>> R
Matrix([
[14, 21, -14],
[ 0, 175, -70],
[ 0, 0, 35]])
>>> A == Q*R
True
QR factorization of an identity matrix:
>>> A = Matrix([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
>>> Q, R = A.QRdecomposition()
>>> Q
Matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
>>> R
Matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
See Also
========
cholesky
LDLdecomposition
LUdecomposition
QRsolve
"""
cls = self.__class__
self = self.as_mutable()
if not self.rows >= self.cols:
raise MatrixError(
"The number of rows must be greater than columns")
n = self.rows
m = self.cols
rank = n
row_reduced = self.rref()[0]
for i in range(row_reduced.rows):
if row_reduced.row(i).norm() == 0:
rank -= 1
if not rank == self.cols:
raise MatrixError("The rank of the matrix must match the columns")
Q, R = self.zeros(n, m), self.zeros(m)
for j in range(m): # for each column vector
tmp = self[:, j] # take original v
for i in range(j):
# subtract the project of self on new vector
tmp -= Q[:, i]*self[:, j].dot(Q[:, i])
tmp.expand()
# normalize it
R[j, j] = tmp.norm()
Q[:, j] = tmp / R[j, j]
if Q[:, j].norm() != 1:
raise NotImplementedError(
"Could not normalize the vector %d." % j)
for i in range(j):
R[i, j] = Q[:, i].dot(self[:, j])
return cls(Q), cls(R)
def QRsolve(self, b):
"""Solve the linear system 'Ax = b'.
'self' is the matrix 'A', the method argument is the vector
'b'. The method returns the solution vector 'x'. If 'b' is a
matrix, the system is solved for each column of 'b' and the
return value is a matrix of the same shape as 'b'.
This method is slower (approximately by a factor of 2) but
more stable for floating-point arithmetic than the LUsolve method.
However, LUsolve usually uses an exact arithmetic, so you don't need
to use QRsolve.
This is mainly for educational purposes and symbolic matrices, for real
(or complex) matrices use sympy.mpmath.qr_solve.
See Also
========
lower_triangular_solve
upper_triangular_solve
cholesky_solve
diagonal_solve
LDLsolve
LUsolve
pinv_solve
QRdecomposition
"""
Q, R = self.as_mutable().QRdecomposition()
y = Q.T*b
# back substitution to solve R*x = y:
# We build up the result "backwards" in the vector 'x' and reverse it
# only in the end.
x = []
n = R.rows
for j in range(n - 1, -1, -1):
tmp = y[j, :]
for k in range(j + 1, n):
tmp -= R[j, k]*x[n - 1 - k]
x.append(tmp / R[j, j])
return self._new([row._mat for row in reversed(x)])
def cross(self, b):
"""Return the cross product of `self` and `b` relaxing the condition
of compatible dimensions: if each has 3 elements, a matrix of the
same type and shape as `self` will be returned. If `b` has the same
shape as `self` then common identities for the cross product (like
`a x b = - b x a`) will hold.
See Also
========
dot
multiply
multiply_elementwise
"""
if not is_sequence(b):
raise TypeError("`b` must be an ordered iterable or Matrix, not %s." %
type(b))
if not (self.rows * self.cols == b.rows * b.cols == 3):
raise ShapeError("Dimensions incorrect for cross product.")
else:
return self._new(self.rows, self.cols, (
(self[1]*b[2] - self[2]*b[1]),
(self[2]*b[0] - self[0]*b[2]),
(self[0]*b[1] - self[1]*b[0])))
def dot(self, b):
"""Return the dot product of Matrix self and b relaxing the condition
of compatible dimensions: if either the number of rows or columns are
the same as the length of b then the dot product is returned. If self
is a row or column vector, a scalar is returned. Otherwise, a list
of results is returned (and in that case the number of columns in self
must match the length of b).
Examples
========
>>> from sympy import Matrix
>>> M = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> v = [1, 1, 1]
>>> M.row(0).dot(v)
6
>>> M.col(0).dot(v)
12
>>> M.dot(v)
[6, 15, 24]
See Also
========
cross
multiply
multiply_elementwise
"""
from .dense import Matrix
if not isinstance(b, MatrixBase):
if is_sequence(b):
if len(b) != self.cols and len(b) != self.rows:
raise ShapeError("Dimensions incorrect for dot product.")
return self.dot(Matrix(b))
else:
raise TypeError("`b` must be an ordered iterable or Matrix, not %s." %
type(b))
if self.cols == b.rows:
if b.cols != 1:
self = self.T
b = b.T
prod = flatten((self*b).tolist())
if len(prod) == 1:
return prod[0]
return prod
if self.cols == b.cols:
return self.dot(b.T)
elif self.rows == b.rows:
return self.T.dot(b)
else:
raise ShapeError("Dimensions incorrect for dot product.")
def multiply_elementwise(self, b):
"""Return the Hadamard product (elementwise product) of A and B
Examples
========
>>> from sympy.matrices import Matrix
>>> A = Matrix([[0, 1, 2], [3, 4, 5]])
>>> B = Matrix([[1, 10, 100], [100, 10, 1]])
>>> A.multiply_elementwise(B)
Matrix([
[ 0, 10, 200],
[300, 40, 5]])
See Also
========
cross
dot
multiply
"""
from sympy.matrices import matrix_multiply_elementwise
return matrix_multiply_elementwise(self, b)
def values(self):
"""Return non-zero values of self."""
return [i for i in flatten(self.tolist()) if not i.is_zero]
def norm(self, ord=None):
"""Return the Norm of a Matrix or Vector.
In the simplest case this is the geometric size of the vector
Other norms can be specified by the ord parameter
===== ============================ ==========================
ord norm for matrices norm for vectors
===== ============================ ==========================
None Frobenius norm 2-norm
'fro' Frobenius norm - does not exist
inf -- max(abs(x))
-inf -- min(abs(x))
1 -- as below
-1 -- as below
2 2-norm (largest sing. value) as below
-2 smallest singular value as below
other - does not exist sum(abs(x)**ord)**(1./ord)
===== ============================ ==========================
Examples
========
>>> from sympy import Matrix, Symbol, trigsimp, cos, sin, oo
>>> x = Symbol('x', real=True)
>>> v = Matrix([cos(x), sin(x)])
>>> trigsimp( v.norm() )
1
>>> v.norm(10)
(sin(x)**10 + cos(x)**10)**(1/10)
>>> A = Matrix([[1, 1], [1, 1]])
>>> A.norm(2)# Spectral norm (max of |Ax|/|x| under 2-vector-norm)
2
>>> A.norm(-2) # Inverse spectral norm (smallest singular value)
0
>>> A.norm() # Frobenius Norm
2
>>> Matrix([1, -2]).norm(oo)
2
>>> Matrix([-1, 2]).norm(-oo)
1
See Also
========
normalized
"""
# Row or Column Vector Norms
vals = list(self.values()) or [0]
if self.rows == 1 or self.cols == 1:
if ord == 2 or ord is None: # Common case sqrt(<x, x>)
return sqrt(Add(*(abs(i)**2 for i in vals)))
elif ord == 1: # sum(abs(x))
return Add(*(abs(i) for i in vals))
elif ord == S.Infinity: # max(abs(x))
return Max(*[abs(i) for i in vals])
elif ord == S.NegativeInfinity: # min(abs(x))
return Min(*[abs(i) for i in vals])
# Otherwise generalize the 2-norm, Sum(x_i**ord)**(1/ord)
# Note that while useful this is not mathematically a norm
try:
return Pow(Add(*(abs(i)**ord for i in vals)), S(1) / ord)
except (NotImplementedError, TypeError):
raise ValueError("Expected order to be Number, Symbol, oo")
# Matrix Norms
else:
if ord == 2: # Spectral Norm
# Maximum singular value
return Max(*self.singular_values())
elif ord == -2:
# Minimum singular value
return Min(*self.singular_values())
elif (ord is None or isinstance(ord, str) and ord.lower() in
['f', 'fro', 'frobenius', 'vector']):
# Reshape as vector and send back to norm function
return self.vec().norm(ord=2)
else:
raise NotImplementedError("Matrix Norms under development")
def normalized(self):
"""Return the normalized version of ``self``.
See Also
========
norm
"""
if self.rows != 1 and self.cols != 1:
raise ShapeError("A Matrix must be a vector to normalize.")
norm = self.norm()
out = self.applyfunc(lambda i: i / norm)
return out
def project(self, v):
"""Return the projection of ``self`` onto the line containing ``v``.
Examples
========
>>> from sympy import Matrix, S, sqrt
>>> V = Matrix([sqrt(3)/2, S.Half])
>>> x = Matrix([[1, 0]])
>>> V.project(x)
Matrix([[sqrt(3)/2, 0]])
>>> V.project(-x)
Matrix([[sqrt(3)/2, 0]])
"""
return v*(self.dot(v) / v.dot(v))
def permuteBkwd(self, perm):
"""Permute the rows of the matrix with the given permutation in reverse.
Examples
========
>>> from sympy.matrices import eye
>>> M = eye(3)
>>> M.permuteBkwd([[0, 1], [0, 2]])
Matrix([
[0, 1, 0],
[0, 0, 1],
[1, 0, 0]])
See Also
========
permuteFwd
"""
copy = self.copy()
for i in range(len(perm) - 1, -1, -1):
copy.row_swap(perm[i][0], perm[i][1])
return copy
def permuteFwd(self, perm):
"""Permute the rows of the matrix with the given permutation.
Examples
========
>>> from sympy.matrices import eye
>>> M = eye(3)
>>> M.permuteFwd([[0, 1], [0, 2]])
Matrix([
[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
See Also
========
permuteBkwd
"""
copy = self.copy()
for i in range(len(perm)):
copy.row_swap(perm[i][0], perm[i][1])
return copy
def exp(self):
"""Return the exponentiation of a square matrix."""
if not self.is_square:
raise NonSquareMatrixError(
"Exponentiation is valid only for square matrices")
try:
P, cells = self.jordan_cells()
except MatrixError:
raise NotImplementedError("Exponentiation is implemented only for matrices for which the Jordan normal form can be computed")
def _jblock_exponential(b):
# This function computes the matrix exponential for one single Jordan block
nr = b.rows
l = b[0, 0]
if nr == 1:
res = C.exp(l)
else:
from sympy import eye
# extract the diagonal part
d = b[0, 0]*eye(nr)
#and the nilpotent part
n = b-d
# compute its exponential
nex = eye(nr)
for i in range(1, nr):
nex = nex+n**i/factorial(i)
# combine the two parts
res = exp(b[0, 0])*nex
return(res)
blocks = list(map(_jblock_exponential, cells))
from sympy.matrices import diag
eJ = diag(* blocks)
# n = self.rows
ret = P*eJ*P.inv()
return type(self)(ret)
@property
def is_square(self):
"""Checks if a matrix is square.
A matrix is square if the number of rows equals the number of columns.
The empty matrix is square by definition, since the number of rows and
the number of columns are both zero.
Examples
========
>>> from sympy import Matrix
>>> a = Matrix([[1, 2, 3], [4, 5, 6]])
>>> b = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> c = Matrix([])
>>> a.is_square
False
>>> b.is_square
True
>>> c.is_square
True
"""
return self.rows == self.cols
@property
def is_zero(self):
"""Checks if a matrix is a zero matrix.
A matrix is zero if every element is zero. A matrix need not be square
to be considered zero. The empty matrix is zero by the principle of
vacuous truth.
Examples
========
>>> from sympy import Matrix, zeros
>>> a = Matrix([[0, 0], [0, 0]])
>>> b = zeros(3, 4)
>>> c = Matrix([[0, 1], [0, 0]])
>>> d = Matrix([])
>>> a.is_zero
True
>>> b.is_zero
True
>>> c.is_zero
False
>>> d.is_zero
True
"""
return not list(self.values())
def is_nilpotent(self):
"""Checks if a matrix is nilpotent.
A matrix B is nilpotent if for some integer k, B**k is
a zero matrix.
Examples
========
>>> from sympy import Matrix
>>> a = Matrix([[0, 0, 0], [1, 0, 0], [1, 1, 0]])
>>> a.is_nilpotent()
True
>>> a = Matrix([[1, 0, 1], [1, 0, 0], [1, 1, 0]])
>>> a.is_nilpotent()
False
"""
if not self.is_square:
raise NonSquareMatrixError(
"Nilpotency is valid only for square matrices")
x = Dummy('x')
if self.charpoly(x).args[0] == x**self.rows:
return True
return False
@property
def is_upper(self):
"""Check if matrix is an upper triangular matrix. True can be returned
even if the matrix is not square.
Examples
========
>>> from sympy import Matrix
>>> m = Matrix(2, 2, [1, 0, 0, 1])
>>> m
Matrix([
[1, 0],
[0, 1]])
>>> m.is_upper
True
>>> m = Matrix(4, 3, [5, 1, 9, 0, 4 , 6, 0, 0, 5, 0, 0, 0])
>>> m
Matrix([
[5, 1, 9],
[0, 4, 6],
[0, 0, 5],
[0, 0, 0]])
>>> m.is_upper
True
>>> m = Matrix(2, 3, [4, 2, 5, 6, 1, 1])
>>> m
Matrix([
[4, 2, 5],
[6, 1, 1]])
>>> m.is_upper
False
See Also
========
is_lower
is_diagonal
is_upper_hessenberg
"""
return all(self[i, j].is_zero
for i in range(1, self.rows)
for j in range(i))
@property
def is_lower(self):
"""Check if matrix is a lower triangular matrix. True can be returned
even if the matrix is not square.
Examples
========
>>> from sympy import Matrix
>>> m = Matrix(2, 2, [1, 0, 0, 1])
>>> m
Matrix([
[1, 0],
[0, 1]])
>>> m.is_lower
True
>>> m = Matrix(4, 3, [0, 0, 0, 2, 0, 0, 1, 4 , 0, 6, 6, 5])
>>> m
Matrix([
[0, 0, 0],
[2, 0, 0],
[1, 4, 0],
[6, 6, 5]])
>>> m.is_lower
True
>>> from sympy.abc import x, y
>>> m = Matrix(2, 2, [x**2 + y, y**2 + x, 0, x + y])
>>> m
Matrix([
[x**2 + y, x + y**2],
[ 0, x + y]])
>>> m.is_lower
False
See Also
========
is_upper
is_diagonal
is_lower_hessenberg
"""
return all(self[i, j].is_zero
for i in range(self.rows)
for j in range(i + 1, self.cols))
@property
def is_upper_hessenberg(self):
"""Checks if the matrix is the upper-Hessenberg form.
The upper hessenberg matrix has zero entries
below the first subdiagonal.
Examples
========
>>> from sympy.matrices import Matrix
>>> a = Matrix([[1, 4, 2, 3], [3, 4, 1, 7], [0, 2, 3, 4], [0, 0, 1, 3]])
>>> a
Matrix([
[1, 4, 2, 3],
[3, 4, 1, 7],
[0, 2, 3, 4],
[0, 0, 1, 3]])
>>> a.is_upper_hessenberg
True
See Also
========
is_lower_hessenberg
is_upper
"""
return all(self[i, j].is_zero
for i in range(2, self.rows)
for j in range(i - 1))
@property
def is_lower_hessenberg(self):
r"""Checks if the matrix is in the lower-Hessenberg form.
The lower hessenberg matrix has zero entries
above the first superdiagonal.
Examples
========
>>> from sympy.matrices import Matrix
>>> a = Matrix([[1, 2, 0, 0], [5, 2, 3, 0], [3, 4, 3, 7], [5, 6, 1, 1]])
>>> a
Matrix([
[1, 2, 0, 0],
[5, 2, 3, 0],
[3, 4, 3, 7],
[5, 6, 1, 1]])
>>> a.is_lower_hessenberg
True
See Also
========
is_upper_hessenberg
is_lower
"""
return all(self[i, j].is_zero
for i in range(self.rows)
for j in range(i + 2, self.cols))
def is_symbolic(self):
"""Checks if any elements contain Symbols.
Examples
========
>>> from sympy.matrices import Matrix
>>> from sympy.abc import x, y
>>> M = Matrix([[x, y], [1, 0]])
>>> M.is_symbolic()
True
"""
return any(element.has(Symbol) for element in self.values())
def is_symmetric(self, simplify=True):
"""Check if matrix is symmetric matrix,
that is square matrix and is equal to its transpose.
By default, simplifications occur before testing symmetry.
They can be skipped using 'simplify=False'; while speeding things a bit,
this may however induce false negatives.
Examples
========
>>> from sympy import Matrix
>>> m = Matrix(2, 2, [0, 1, 1, 2])
>>> m
Matrix([
[0, 1],
[1, 2]])
>>> m.is_symmetric()
True
>>> m = Matrix(2, 2, [0, 1, 2, 0])
>>> m
Matrix([
[0, 1],
[2, 0]])
>>> m.is_symmetric()
False
>>> m = Matrix(2, 3, [0, 0, 0, 0, 0, 0])
>>> m
Matrix([
[0, 0, 0],
[0, 0, 0]])
>>> m.is_symmetric()
False
>>> from sympy.abc import x, y
>>> m = Matrix(3, 3, [1, x**2 + 2*x + 1, y, (x + 1)**2 , 2, 0, y, 0, 3])
>>> m
Matrix([
[ 1, x**2 + 2*x + 1, y],
[(x + 1)**2, 2, 0],
[ y, 0, 3]])
>>> m.is_symmetric()
True
If the matrix is already simplified, you may speed-up is_symmetric()
test by using 'simplify=False'.
>>> m.is_symmetric(simplify=False)
False
>>> m1 = m.expand()
>>> m1.is_symmetric(simplify=False)
True
"""
if not self.is_square:
return False
if simplify:
delta = self - self.transpose()
delta.simplify()
return delta.equals(self.zeros(self.rows, self.cols))
else:
return self == self.transpose()
def is_anti_symmetric(self, simplify=True):
"""Check if matrix M is an antisymmetric matrix,
that is, M is a square matrix with all M[i, j] == -M[j, i].
When ``simplify=True`` (default), the sum M[i, j] + M[j, i] is
simplified before testing to see if it is zero. By default,
the SymPy simplify function is used. To use a custom function
set simplify to a function that accepts a single argument which
returns a simplified expression. To skip simplification, set
simplify to False but note that although this will be faster,
it may induce false negatives.
Examples
========
>>> from sympy import Matrix, symbols
>>> m = Matrix(2, 2, [0, 1, -1, 0])
>>> m
Matrix([
[ 0, 1],
[-1, 0]])
>>> m.is_anti_symmetric()
True
>>> x, y = symbols('x y')
>>> m = Matrix(2, 3, [0, 0, x, -y, 0, 0])
>>> m
Matrix([
[ 0, 0, x],
[-y, 0, 0]])
>>> m.is_anti_symmetric()
False
>>> from sympy.abc import x, y
>>> m = Matrix(3, 3, [0, x**2 + 2*x + 1, y,
... -(x + 1)**2 , 0, x*y,
... -y, -x*y, 0])
Simplification of matrix elements is done by default so even
though two elements which should be equal and opposite wouldn't
pass an equality test, the matrix is still reported as
anti-symmetric:
>>> m[0, 1] == -m[1, 0]
False
>>> m.is_anti_symmetric()
True
If 'simplify=False' is used for the case when a Matrix is already
simplified, this will speed things up. Here, we see that without
simplification the matrix does not appear anti-symmetric:
>>> m.is_anti_symmetric(simplify=False)
False
But if the matrix were already expanded, then it would appear
anti-symmetric and simplification in the is_anti_symmetric routine
is not needed:
>>> m = m.expand()
>>> m.is_anti_symmetric(simplify=False)
True
"""
# accept custom simplification
simpfunc = simplify if isinstance(simplify, FunctionType) else \
_simplify if simplify else False
if not self.is_square:
return False
n = self.rows
if simplify:
for i in range(n):
# diagonal
if not simpfunc(self[i, i]).is_zero:
return False
# others
for j in range(i + 1, n):
diff = self[i, j] + self[j, i]
if not simpfunc(diff).is_zero:
return False
return True
else:
for i in range(n):
for j in range(i, n):
if self[i, j] != -self[j, i]:
return False
return True
def is_diagonal(self):
"""Check if matrix is diagonal,
that is matrix in which the entries outside the main diagonal are all zero.
Examples
========
>>> from sympy import Matrix, diag
>>> m = Matrix(2, 2, [1, 0, 0, 2])
>>> m
Matrix([
[1, 0],
[0, 2]])
>>> m.is_diagonal()
True
>>> m = Matrix(2, 2, [1, 1, 0, 2])
>>> m
Matrix([
[1, 1],
[0, 2]])
>>> m.is_diagonal()
False
>>> m = diag(1, 2, 3)
>>> m
Matrix([
[1, 0, 0],
[0, 2, 0],
[0, 0, 3]])
>>> m.is_diagonal()
True
See Also
========
is_lower
is_upper
is_diagonalizable
diagonalize
"""
for i in range(self.rows):
for j in range(self.cols):
if i != j and self[i, j]:
return False
return True
def det(self, method="bareis"):
"""Computes the matrix determinant using the method "method".
Possible values for "method":
bareis ... det_bareis
berkowitz ... berkowitz_det
det_LU ... det_LU_decomposition
See Also
========
det_bareis
berkowitz_det
det_LU
"""
# if methods were made internal and all determinant calculations
# passed through here, then these lines could be factored out of
# the method routines
if not self.is_square:
raise NonSquareMatrixError()
if not self:
return S.One
if method == "bareis":
return self.det_bareis()
elif method == "berkowitz":
return self.berkowitz_det()
elif method == "det_LU":
return self.det_LU_decomposition()
else:
raise ValueError("Determinant method '%s' unrecognized" % method)
def det_bareis(self):
"""Compute matrix determinant using Bareis' fraction-free
algorithm which is an extension of the well known Gaussian
elimination method. This approach is best suited for dense
symbolic matrices and will result in a determinant with
minimal number of fractions. It means that less term
rewriting is needed on resulting formulae.
TODO: Implement algorithm for sparse matrices (SFF),
http://www.eecis.udel.edu/~saunders/papers/sffge/it5.ps.
See Also
========
det
berkowitz_det
"""
if not self.is_square:
raise NonSquareMatrixError()
if not self:
return S.One
M, n = self.copy(), self.rows
if n == 1:
det = M[0, 0]
elif n == 2:
det = M[0, 0]*M[1, 1] - M[0, 1]*M[1, 0]
elif n == 3:
det = (M[0, 0]*M[1, 1]*M[2, 2] + M[0, 1]*M[1, 2]*M[2, 0] + M[0, 2]*M[1, 0]*M[2, 1]) - \
(M[0, 2]*M[1, 1]*M[2, 0] + M[0, 0]*M[1, 2]*M[2, 1] + M[0, 1]*M[1, 0]*M[2, 2])
else:
sign = 1 # track current sign in case of column swap
for k in range(n - 1):
# look for a pivot in the current column
# and assume det == 0 if none is found
if M[k, k] == 0:
for i in range(k + 1, n):
if M[i, k]:
M.row_swap(i, k)
sign *= -1
break
else:
return S.Zero
# proceed with Bareis' fraction-free (FF)
# form of Gaussian elimination algorithm
for i in range(k + 1, n):
for j in range(k + 1, n):
D = M[k, k]*M[i, j] - M[i, k]*M[k, j]
if k > 0:
D /= M[k - 1, k - 1]
if D.is_Atom:
M[i, j] = D
else:
M[i, j] = cancel(D)
det = sign*M[n - 1, n - 1]
return det.expand()
def det_LU_decomposition(self):
"""Compute matrix determinant using LU decomposition
Note that this method fails if the LU decomposition itself
fails. In particular, if the matrix has no inverse this method
will fail.
TODO: Implement algorithm for sparse matrices (SFF),
http://www.eecis.udel.edu/~saunders/papers/sffge/it5.ps.
See Also
========
det
det_bareis
berkowitz_det
"""
if not self.is_square:
raise NonSquareMatrixError()
if not self:
return S.One
M, n = self.copy(), self.rows
p, prod = [], 1
l, u, p = M.LUdecomposition()
if len(p) % 2:
prod = -1
for k in range(n):
prod = prod*u[k, k]*l[k, k]
return prod.expand()
def adjugate(self, method="berkowitz"):
"""Returns the adjugate matrix.
Adjugate matrix is the transpose of the cofactor matrix.
http://en.wikipedia.org/wiki/Adjugate
See Also
========
cofactorMatrix
transpose
berkowitz
"""
return self.cofactorMatrix(method).T
def inverse_LU(self, iszerofunc=_iszero):
"""Calculates the inverse using LU decomposition.
See Also
========
inv
inverse_GE
inverse_ADJ
"""
if not self.is_square:
raise NonSquareMatrixError()
ok = self.rref(simplify=True)[0]
if any(iszerofunc(ok[j, j]) for j in range(ok.rows)):
raise ValueError("Matrix det == 0; not invertible.")
return self.LUsolve(self.eye(self.rows), iszerofunc=_iszero)
def inverse_GE(self, iszerofunc=_iszero):
"""Calculates the inverse using Gaussian elimination.
See Also
========
inv
inverse_LU
inverse_ADJ
"""
from .dense import Matrix
if not self.is_square:
raise NonSquareMatrixError("A Matrix must be square to invert.")
big = Matrix.hstack(self.as_mutable(), Matrix.eye(self.rows))
red = big.rref(iszerofunc=iszerofunc, simplify=True)[0]
if any(iszerofunc(red[j, j]) for j in range(red.rows)):
raise ValueError("Matrix det == 0; not invertible.")
return self._new(red[:, big.rows:])
def inverse_ADJ(self, iszerofunc=_iszero):
"""Calculates the inverse using the adjugate matrix and a determinant.
See Also
========
inv
inverse_LU
inverse_GE
"""
if not self.is_square:
raise NonSquareMatrixError("A Matrix must be square to invert.")
d = self.berkowitz_det()
zero = d.equals(0)
if zero is None:
# if equals() can't decide, will rref be able to?
ok = self.rref(simplify=True)[0]
zero = any(iszerofunc(ok[j, j]) for j in range(ok.rows))
if zero:
raise ValueError("Matrix det == 0; not invertible.")
return self.adjugate() / d
def rref(self, simplified=False, iszerofunc=_iszero,
simplify=False):
"""Return reduced row-echelon form of matrix and indices of pivot vars.
To simplify elements before finding nonzero pivots set simplify=True
(to use the default SymPy simplify function) or pass a custom
simplify function.
Examples
========
>>> from sympy import Matrix
>>> from sympy.abc import x
>>> m = Matrix([[1, 2], [x, 1 - 1/x]])
>>> m.rref()
(Matrix([
[1, 0],
[0, 1]]), [0, 1])
"""
if simplified is not False:
SymPyDeprecationWarning(
feature="'simplified' as a keyword to rref",
useinstead="simplify=True, or set simplify equal to your "
"own custom simplification function",
issue=3382, deprecated_since_version="0.7.2",
).warn()
simplify = simplify or True
simpfunc = simplify if isinstance(
simplify, FunctionType) else _simplify
# pivot: index of next row to contain a pivot
pivot, r = 0, self.as_mutable()
# pivotlist: indices of pivot variables (non-free)
pivotlist = []
for i in xrange(r.cols):
if pivot == r.rows:
break
if simplify:
r[pivot, i] = simpfunc(r[pivot, i])
if iszerofunc(r[pivot, i]):
for k in xrange(pivot, r.rows):
if simplify and k > pivot:
r[k, i] = simpfunc(r[k, i])
if not iszerofunc(r[k, i]):
break
if k == r.rows - 1 and iszerofunc(r[k, i]):
continue
r.row_swap(pivot, k)
scale = r[pivot, i]
r.row_op(pivot, lambda x, _: x / scale)
for j in xrange(r.rows):
if j == pivot:
continue
scale = r[j, i]
r.zip_row_op(j, pivot, lambda x, y: x - scale*y)
pivotlist.append(i)
pivot += 1
return self._new(r), pivotlist
def rank(self, simplified=False, iszerofunc=_iszero,
simplify=False):
"""
Returns the rank of a matrix
>>> from sympy import Matrix
>>> from sympy.abc import x
>>> m = Matrix([[1, 2], [x, 1 - 1/x]])
>>> m.rank()
2
>>> n = Matrix(3, 3, range(1, 10))
>>> n.rank()
2
"""
row_reduced = self.rref(simplified=simplified, iszerofunc=iszerofunc, simplify=simplify)
rank = len(row_reduced[-1])
return rank
def nullspace(self, simplified=False, simplify=False):
"""Returns list of vectors (Matrix objects) that span nullspace of self
"""
from sympy.matrices import zeros
if simplified is not False:
SymPyDeprecationWarning(
feature="'simplified' as a keyword to nullspace",
useinstead="simplify=True, or set simplify equal to your "
"own custom simplification function",
issue=3382, deprecated_since_version="0.7.2",
).warn()
simplify = simplify or True
simpfunc = simplify if isinstance(
simplify, FunctionType) else _simplify
reduced, pivots = self.rref(simplify=simpfunc)
basis = []
# create a set of vectors for the basis
for i in range(self.cols - len(pivots)):
basis.append(zeros(self.cols, 1))
# contains the variable index to which the vector corresponds
basiskey, cur = [-1]*len(basis), 0
for i in range(self.cols):
if i not in pivots:
basiskey[cur] = i
cur += 1
for i in range(self.cols):
if i not in pivots: # free var, just set vector's ith place to 1
basis[basiskey.index(i)][i, 0] = 1
else: # add negative of nonpivot entry to corr vector
for j in range(i + 1, self.cols):
line = pivots.index(i)
v = reduced[line, j]
if simplify:
v = simpfunc(v)
if v:
if j in pivots:
# XXX: Is this the correct error?
raise NotImplementedError(
"Could not compute the nullspace of `self`.")
basis[basiskey.index(j)][i, 0] = -v
return [self._new(b) for b in basis]
def berkowitz(self):
"""The Berkowitz algorithm.
Given N x N matrix with symbolic content, compute efficiently
coefficients of characteristic polynomials of 'self' and all
its square sub-matrices composed by removing both i-th row
and column, without division in the ground domain.
This method is particularly useful for computing determinant,
principal minors and characteristic polynomial, when 'self'
has complicated coefficients e.g. polynomials. Semi-direct
usage of this algorithm is also important in computing
efficiently sub-resultant PRS.
Assuming that M is a square matrix of dimension N x N and
I is N x N identity matrix, then the following following
definition of characteristic polynomial is begin used:
charpoly(M) = det(t*I - M)
As a consequence, all polynomials generated by Berkowitz
algorithm are monic.
>>> from sympy import Matrix
>>> from sympy.abc import x, y, z
>>> M = Matrix([[x, y, z], [1, 0, 0], [y, z, x]])
>>> p, q, r = M.berkowitz()
>>> p # 1 x 1 M's sub-matrix
(1, -x)
>>> q # 2 x 2 M's sub-matrix
(1, -x, -y)
>>> r # 3 x 3 M's sub-matrix
(1, -2*x, x**2 - y*z - y, x*y - z**2)
For more information on the implemented algorithm refer to:
[1] S.J. Berkowitz, On computing the determinant in small
parallel time using a small number of processors, ACM,
Information Processing Letters 18, 1984, pp. 147-150
[2] M. Keber, Division-Free computation of sub-resultants
using Bezout matrices, Tech. Report MPI-I-2006-1-006,
Saarbrucken, 2006
See Also
========
berkowitz_det
berkowitz_minors
berkowitz_charpoly
berkowitz_eigenvals
"""
from sympy.matrices import zeros
if not self.is_square:
raise NonSquareMatrixError()
A, N = self, self.rows
transforms = [0]*(N - 1)
for n in range(N, 1, -1):
T, k = zeros(n + 1, n), n - 1
R, C = -A[k, :k], A[:k, k]
A, a = A[:k, :k], -A[k, k]
items = [C]
for i in range(0, n - 2):
items.append(A*items[i])
for i, B in enumerate(items):
items[i] = (R*B)[0, 0]
items = [S.One, a] + items
for i in range(n):
T[i:, i] = items[:n - i + 1]
transforms[k - 1] = T
polys = [self._new([S.One, -A[0, 0]])]
for i, T in enumerate(transforms):
polys.append(T*polys[i])
return tuple(map(tuple, polys))
def berkowitz_det(self):
"""Computes determinant using Berkowitz method.
See Also
========
det
berkowitz
"""
if not self.is_square:
raise NonSquareMatrixError()
if not self:
return S.One
poly = self.berkowitz()[-1]
sign = (-1)**(len(poly) - 1)
return sign*poly[-1]
def berkowitz_minors(self):
"""Computes principal minors using Berkowitz method.
See Also
========
berkowitz
"""
sign, minors = S.NegativeOne, []
for poly in self.berkowitz():
minors.append(sign*poly[-1])
sign = -sign
return tuple(minors)
def berkowitz_charpoly(self, x=Dummy('lambda'), simplify=_simplify):
"""Computes characteristic polynomial minors using Berkowitz method.
A PurePoly is returned so using different variables for ``x`` does
not affect the comparison or the polynomials:
Examples
========
>>> from sympy import Matrix
>>> from sympy.abc import x, y
>>> A = Matrix([[1, 3], [2, 0]])
>>> A.berkowitz_charpoly(x) == A.berkowitz_charpoly(y)
True
Specifying ``x`` is optional; a Dummy with name ``lambda`` is used by
default (which looks good when pretty-printed in unicode):
>>> A.berkowitz_charpoly().as_expr()
_lambda**2 - _lambda - 6
No test is done to see that ``x`` doesn't clash with an existing
symbol, so using the default (``lambda``) or your own Dummy symbol is
the safest option:
>>> A = Matrix([[1, 2], [x, 0]])
>>> A.charpoly().as_expr()
_lambda**2 - _lambda - 2*x
>>> A.charpoly(x).as_expr()
x**2 - 3*x
See Also
========
berkowitz
"""
return PurePoly(list(map(simplify, self.berkowitz()[-1])), x)
charpoly = berkowitz_charpoly
def berkowitz_eigenvals(self, **flags):
"""Computes eigenvalues of a Matrix using Berkowitz method.
See Also
========
berkowitz
"""
return roots(self.berkowitz_charpoly(Dummy('x')), **flags)
def eigenvals(self, **flags):
"""Return eigen values using the berkowitz_eigenvals routine.
Since the roots routine doesn't always work well with Floats,
they will be replaced with Rationals before calling that
routine. If this is not desired, set flag ``rational`` to False.
"""
# roots doesn't like Floats, so replace them with Rationals
# unless the nsimplify flag indicates that this has already
# been done, e.g. in eigenvects
if flags.pop('rational', True):
if any(v.has(Float) for v in self):
self = self._new(self.rows, self.cols,
[nsimplify(v, rational=True) for v in self])
flags.pop('simplify', None) # pop unsupported flag
return self.berkowitz_eigenvals(**flags)
def eigenvects(self, **flags):
"""Return list of triples (eigenval, multiplicity, basis).
The flag ``simplify`` has two effects:
1) if bool(simplify) is True, as_content_primitive()
will be used to tidy up normalization artifacts;
2) if nullspace needs simplification to compute the
basis, the simplify flag will be passed on to the
nullspace routine which will interpret it there.
If the matrix contains any Floats, they will be changed to Rationals
for computation purposes, but the answers will be returned after being
evaluated with evalf. If it is desired to removed small imaginary
portions during the evalf step, pass a value for the ``chop`` flag.
"""
from sympy.matrices import eye
simplify = flags.get('simplify', True)
primitive = bool(flags.get('simplify', False))
chop = flags.pop('chop', False)
flags.pop('multiple', None) # remove this if it's there
# roots doesn't like Floats, so replace them with Rationals
float = False
if any(v.has(Float) for v in self):
float = True
self = self._new(self.rows, self.cols, [nsimplify(
v, rational=True) for v in self])
flags['rational'] = False # to tell eigenvals not to do this
out, vlist = [], self.eigenvals(**flags)
vlist = list(vlist.items())
vlist.sort(key=default_sort_key)
flags.pop('rational', None)
for r, k in vlist:
tmp = self.as_mutable() - eye(self.rows)*r
basis = tmp.nullspace()
# whether tmp.is_symbolic() is True or False, it is possible that
# the basis will come back as [] in which case simplification is
# necessary.
if not basis:
# The nullspace routine failed, try it again with simplification
basis = tmp.nullspace(simplify=simplify)
if not basis:
raise NotImplementedError(
"Can't evaluate eigenvector for eigenvalue %s" % r)
if primitive:
# the relationship A*e = lambda*e will still hold if we change the
# eigenvector; so if simplify is True we tidy up any normalization
# artifacts with as_content_primtive (default) and remove any pure Integer
# denominators.
l = 1
for i, b in enumerate(basis[0]):
c, p = signsimp(b).as_content_primitive()
if c is not S.One:
b = c*p
l = ilcm(l, c.q)
basis[0][i] = b
if l != 1:
basis[0] *= l
if float:
out.append((r.evalf(chop=chop), k, [
self._new(b).evalf(chop=chop) for b in basis]))
else:
out.append((r, k, [self._new(b) for b in basis]))
return out
def singular_values(self):
"""Compute the singular values of a Matrix
Examples
========
>>> from sympy import Matrix, Symbol
>>> x = Symbol('x', real=True)
>>> A = Matrix([[0, 1, 0], [0, x, 0], [-1, 0, 0]])
>>> A.singular_values()
[sqrt(x**2 + 1), 1, 0]
See Also
========
condition_number
"""
self = self.as_mutable()
# Compute eigenvalues of A.H A
valmultpairs = (self.H*self).eigenvals()
# Expands result from eigenvals into a simple list
vals = []
for k, v in valmultpairs.items():
vals += [sqrt(k)]*v # dangerous! same k in several spots!
# sort them in descending order
vals.sort(reverse=True, key=default_sort_key)
return vals
def condition_number(self):
"""Returns the condition number of a matrix.
This is the maximum singular value divided by the minimum singular value
Examples
========
>>> from sympy import Matrix, S
>>> A = Matrix([[1, 0, 0], [0, 10, 0], [0, 0, S.One/10]])
>>> A.condition_number()
100
See Also
========
singular_values
"""
singularvalues = self.singular_values()
return Max(*singularvalues) / Min(*singularvalues)
def __getattr__(self, attr):
if attr in ('diff', 'integrate', 'limit'):
def doit(*args):
item_doit = lambda item: getattr(item, attr)(*args)
return self.applyfunc(item_doit)
return doit
else:
raise AttributeError(
"%s has no attribute %s." % (self.__class__.__name__, attr))
def integrate(self, *args):
"""Integrate each element of the matrix.
Examples
========
>>> from sympy.matrices import Matrix
>>> from sympy.abc import x, y
>>> M = Matrix([[x, y], [1, 0]])
>>> M.integrate((x, ))
Matrix([
[x**2/2, x*y],
[ x, 0]])
>>> M.integrate((x, 0, 2))
Matrix([
[2, 2*y],
[2, 0]])
See Also
========
limit
diff
"""
return self._new(self.rows, self.cols,
lambda i, j: self[i, j].integrate(*args))
def limit(self, *args):
"""Calculate the limit of each element in the matrix.
Examples
========
>>> from sympy.matrices import Matrix
>>> from sympy.abc import x, y
>>> M = Matrix([[x, y], [1, 0]])
>>> M.limit(x, 2)
Matrix([
[2, y],
[1, 0]])
See Also
========
integrate
diff
"""
return self._new(self.rows, self.cols,
lambda i, j: self[i, j].limit(*args))
def diff(self, *args):
"""Calculate the derivative of each element in the matrix.
Examples
========
>>> from sympy.matrices import Matrix
>>> from sympy.abc import x, y
>>> M = Matrix([[x, y], [1, 0]])
>>> M.diff(x)
Matrix([
[1, 0],
[0, 0]])
See Also
========
integrate
limit
"""
return self._new(self.rows, self.cols,
lambda i, j: self[i, j].diff(*args))
def vec(self):
"""Return the Matrix converted into a one column matrix by stacking columns
Examples
========
>>> from sympy import Matrix
>>> m=Matrix([[1, 3], [2, 4]])
>>> m
Matrix([
[1, 3],
[2, 4]])
>>> m.vec()
Matrix([
[1],
[2],
[3],
[4]])
See Also
========
vech
"""
return self.T.reshape(len(self), 1)
def vech(self, diagonal=True, check_symmetry=True):
"""Return the unique elements of a symmetric Matrix as a one column matrix
by stacking the elements in the lower triangle.
Arguments:
diagonal -- include the diagonal cells of self or not
check_symmetry -- checks symmetry of self but not completely reliably
Examples
========
>>> from sympy import Matrix
>>> m=Matrix([[1, 2], [2, 3]])
>>> m
Matrix([
[1, 2],
[2, 3]])
>>> m.vech()
Matrix([
[1],
[2],
[3]])
>>> m.vech(diagonal=False)
Matrix([[2]])
See Also
========
vec
"""
from sympy.matrices import zeros
c = self.cols
if c != self.rows:
raise ShapeError("Matrix must be square")
if check_symmetry:
self.simplify()
if self != self.transpose():
raise ValueError("Matrix appears to be asymmetric; consider check_symmetry=False")
count = 0
if diagonal:
v = zeros(c*(c + 1) // 2, 1)
for j in range(c):
for i in range(j, c):
v[count] = self[i, j]
count += 1
else:
v = zeros(c*(c - 1) // 2, 1)
for j in range(c):
for i in range(j + 1, c):
v[count] = self[i, j]
count += 1
return v
def get_diag_blocks(self):
"""Obtains the square sub-matrices on the main diagonal of a square matrix.
Useful for inverting symbolic matrices or solving systems of
linear equations which may be decoupled by having a block diagonal
structure.
Examples
========
>>> from sympy import Matrix
>>> from sympy.abc import x, y, z
>>> A = Matrix([[1, 3, 0, 0], [y, z*z, 0, 0], [0, 0, x, 0], [0, 0, 0, 0]])
>>> a1, a2, a3 = A.get_diag_blocks()
>>> a1
Matrix([
[1, 3],
[y, z**2]])
>>> a2
Matrix([[x]])
>>> a3
Matrix([[0]])
"""
sub_blocks = []
def recurse_sub_blocks(M):
i = 1
while i <= M.shape[0]:
if i == 1:
to_the_right = M[0, i:]
to_the_bottom = M[i:, 0]
else:
to_the_right = M[:i, i:]
to_the_bottom = M[i:, :i]
if any(to_the_right) or any(to_the_bottom):
i += 1
continue
else:
sub_blocks.append(M[:i, :i])
if M.shape == M[:i, :i].shape:
return
else:
recurse_sub_blocks(M[i:, i:])
return
recurse_sub_blocks(self)
return sub_blocks
def diagonalize(self, reals_only=False, sort=False, normalize=False):
"""
Return (P, D), where D is diagonal and
D = P^-1 * M * P
where M is current matrix.
Examples
========
>>> from sympy import Matrix
>>> m = Matrix(3, 3, [1, 2, 0, 0, 3, 0, 2, -4, 2])
>>> m
Matrix([
[1, 2, 0],
[0, 3, 0],
[2, -4, 2]])
>>> (P, D) = m.diagonalize()
>>> D
Matrix([
[1, 0, 0],
[0, 2, 0],
[0, 0, 3]])
>>> P
Matrix([
[-1, 0, -1],
[ 0, 0, -1],
[ 2, 1, 2]])
>>> P.inv() * m * P
Matrix([
[1, 0, 0],
[0, 2, 0],
[0, 0, 3]])
See Also
========
is_diagonal
is_diagonalizable
"""
from sympy.matrices import diag
if not self.is_square:
raise NonSquareMatrixError()
if not self.is_diagonalizable(reals_only, False):
self._diagonalize_clear_subproducts()
raise MatrixError("Matrix is not diagonalizable")
else:
if self._eigenvects is None:
self._eigenvects = self.eigenvects(simplify=True)
if sort:
self._eigenvects.sort(key=default_sort_key)
self._eigenvects.reverse()
diagvals = []
P = self._new(self.rows, 0, [])
for eigenval, multiplicity, vects in self._eigenvects:
for k in range(multiplicity):
diagvals.append(eigenval)
vec = vects[k]
if normalize:
vec = vec / vec.norm()
P = P.col_insert(P.cols, vec)
D = diag(*diagvals)
self._diagonalize_clear_subproducts()
return (P, D)
def is_diagonalizable(self, reals_only=False, clear_subproducts=True):
"""Check if matrix is diagonalizable.
If reals_only==True then check that diagonalized matrix consists of the only not complex values.
Some subproducts could be used further in other methods to avoid double calculations,
By default (if clear_subproducts==True) they will be deleted.
Examples
========
>>> from sympy import Matrix
>>> m = Matrix(3, 3, [1, 2, 0, 0, 3, 0, 2, -4, 2])
>>> m
Matrix([
[1, 2, 0],
[0, 3, 0],
[2, -4, 2]])
>>> m.is_diagonalizable()
True
>>> m = Matrix(2, 2, [0, 1, 0, 0])
>>> m
Matrix([
[0, 1],
[0, 0]])
>>> m.is_diagonalizable()
False
>>> m = Matrix(2, 2, [0, 1, -1, 0])
>>> m
Matrix([
[ 0, 1],
[-1, 0]])
>>> m.is_diagonalizable()
True
>>> m.is_diagonalizable(True)
False
See Also
========
is_diagonal
diagonalize
"""
if not self.is_square:
return False
res = False
self._is_symbolic = self.is_symbolic()
self._is_symmetric = self.is_symmetric()
self._eigenvects = None
#if self._is_symbolic:
# self._diagonalize_clear_subproducts()
# raise NotImplementedError("Symbolic matrices are not implemented for diagonalization yet")
self._eigenvects = self.eigenvects(simplify=True)
all_iscorrect = True
for eigenval, multiplicity, vects in self._eigenvects:
if len(vects) != multiplicity:
all_iscorrect = False
break
elif reals_only and not eigenval.is_real:
all_iscorrect = False
break
res = all_iscorrect
if clear_subproducts:
self._diagonalize_clear_subproducts()
return res
def _diagonalize_clear_subproducts(self):
del self._is_symbolic
del self._is_symmetric
del self._eigenvects
def jordan_cell(self, eigenval, n):
n = int(n)
from sympy.matrices import MutableMatrix
out = MutableMatrix.zeros(n)
for i in range(n-1):
out[i, i] = eigenval
out[i, i+1] = 1
out[n-1, n-1] = eigenval
return type(self)(out)
def _jordan_block_structure(self):
# To every eingenvalue may belong `i` blocks with size s(i)
# and a chain of generalized eigenvectors
# which will be determined by the following computations:
# for every eigenvalue we will add a dictionary
# containing, for all blocks, the blockssizes and the attached chain vectors
# that will eventually be used to form the transformation P
jordan_block_structures = {}
_eigenvects = self.eigenvects()
ev = self.eigenvals()
if len(ev) == 0:
raise AttributeError("could not compute the eigenvalues")
for eigenval, multiplicity, vects in _eigenvects:
l_jordan_chains={}
geometrical = len(vects)
if geometrical == multiplicity:
# The Jordan chains have all length 1 and consist of only one vector
# which is the eigenvector of course
chains = []
for v in vects:
chain=[v]
chains.append(chain)
l_jordan_chains[1] = chains
jordan_block_structures[eigenval] = l_jordan_chains
elif geometrical == 0:
raise MatrixError("Matrix has the eigen vector with geometrical multiplicity equal zero.")
else:
# Up to now we know nothing about the sizes of the blocks of our Jordan matrix.
# Note that knowledge of algebraic and geometrical multiplicity
# will *NOT* be sufficient to determine this structure.
# The blocksize `s` could be defined as the minimal `k` where
# `kernel(self-lI)^k = kernel(self-lI)^(k+1)`
# The extreme case would be that k = (multiplicity-geometrical+1)
# but the blocks could be smaller.
# Consider for instance the following matrix
# [2 1 0 0]
# [0 2 1 0]
# [0 0 2 0]
# [0 0 0 2]
# which coincides with it own Jordan canonical form.
# It has only one eigenvalue l=2 of (algebraic) multiplicity=4.
# It has two eigenvectors, one belonging to the last row (blocksize 1)
# and one being the last part of a jordan chain of length 3 (blocksize of the first block).
# Note again that it is not not possible to obtain this from the algebraic and geometrical
# multiplicity alone. This only gives us an upper limit for the dimension of one of
# the subspaces (blocksize of according jordan block) given by
# max=(multiplicity-geometrical+1) which is reached for our matrix
# but not for
# [2 1 0 0]
# [0 2 0 0]
# [0 0 2 1]
# [0 0 0 2]
# although multiplicity=4 and geometrical=2 are the same for this matrix.
from sympy.matrices import MutableMatrix
I = MutableMatrix.eye(self.rows)
l = eigenval
M = (self-l*I)
# We will store the matrices `(self-l*I)^k` for further computations
# for convenience only we store `Ms[0]=(sefl-lI)^0=I`
# so the index is the same as the power for all further Ms entries
# We also store the vectors that span these kernels (Ns[0] = [])
# and also their dimensions `a_s`
# this is mainly done for debugging since the number of blocks of a given size
# can be computed from the a_s, in order to check our result which is obtained simpler
# by counting the number of jordanchains for `a` given `s`
# `a_0` is `dim(Kernel(Ms[0]) = dim (Kernel(I)) = 0` since `I` is regular
l_jordan_chains={}
chain_vectors=[]
Ms = [I]
Ns = [[]]
a = [0]
smax = 0
M_new = Ms[-1]*M
Ns_new = M_new.nullspace()
a_new = len(Ns_new)
Ms.append(M_new)
Ns.append(Ns_new)
while a_new > a[-1]: # as long as the nullspaces increase compute further powers
a.append(a_new)
M_new = Ms[-1]*M
Ns_new = M_new.nullspace()
a_new=len(Ns_new)
Ms.append(M_new)
Ns.append(Ns_new)
smax += 1
# We now have `Ms[-1]=((self-l*I)**s)=Z=0`
# We now know the size of the biggest jordan block
# associatet with `l` to be `s`
# now let us proceed with the computation of the associate part of the transformation matrix `P`
# We already know the kernel (=nullspace) `K_l` of (self-lI) which consists of the
# eigenvectors belonging to eigenvalue `l`
# The dimension of this space is the geometric multiplicity of eigenvalue `l`.
# For every eigenvector ev out of `K_l`, there exists a subspace that is
# spanned by the jordan chain of ev. The dimension of this subspace is
# represented by the length s of the jordan block.
# The chain itself is given by `{e_0,..,e_s-1}` where:
# `e_k+1 =(self-lI)e_k (*)`
# and
# `e_s-1=ev`
# So it would be possible to start with the already known `ev` and work backwards until one
# reaches `e_0`. Unfortunately this can not be done by simply solving system (*) since its matrix
# is singular (by definition of the eigenspaces).
# This approach would force us a choose in every step the degree of freedom undetermined
# by (*). This is difficult to implement with computer algebra systems and also quite unefficient.
# We therefore reformulate the problem in terms of nullspaces.
# To do so we start from the other end and choose `e0`'s out of
# `E=Kernel(self-lI)^s / Kernel(self-lI)^(s-1)`
# Note that `Kernel(self-lI)^s = Kernel(Z) = V` (the whole vector space).
# So in the first step `s=smax` this restriction turns out to actually restrict nothing at all
# and the only remaining condition is to choose vectors in `Kernel(self-lI)^(s-1)`.
# Subsequently we compute `e_1=(self-lI)e_0`, `e_2=(self-lI)*e_1` and so on.
# The subspace `E` can have a dimension larger than one.
# That means that we have more than one Jordanblocks of size `s` for the eigenvalue `l`
# and as many jordanchains (This is the case in the second example).
# In this case we start as many jordan chains and have as many blocks of size s in the jcf.
# We now have all the jordanblocks of size `s` but there might be others attached to the same
# eigenvalue that are smaller.
# So we will do the same procedure also for `s-1` and so on until 1 the lowest possible order
# where the jordanchain is of lenght 1 and just represented by the eigenvector.
for s in reversed(xrange(1, smax+1)):
S = Ms[s]
# We want the vectors in `Kernel((self-lI)^s)` (**),
# but without those in `Kernel(self-lI)^s-1` so we will add these as additional equations
# to the sytem formed by `S` (`S` will no longer be quadratic but this does not harm
# since S is rank deficiant).
exclude_vectors = Ns[s-1]
for k in range(0, a[s-1]):
S = S.col_join((exclude_vectors[k]).transpose())
# We also want to exclude the vectors in the chains for the bigger blogs
# that we have already computed (if there are any).
# (That is why we start wiht the biggest s).
######## Implementation remark: ########
# Doing so for *ALL* already computed chain vectors
# we actually exclude some vectors twice because they are already excluded
# by the condition (**).
# This happens if there are more than one blocks attached to the same eigenvalue *AND*
# the current blocksize is smaller than the block whose chain vectors we exclude.
# If the current block has size `s_i` and the next bigger block has size `s_i-1` then
# the first `s_i-s_i-1` chainvectors of the bigger block are allready excluded by (**).
# The unnecassary adding of these equations could be avoided if the algorithm would
# take into account the lengths of the already computed chains which are already stored
# and add only the last `s` items.
# However the following loop would be a good deal more nested to do so.
# Since adding a linear dependent equation does not change the result,
# it can harm only in terms of efficiency.
# So to be sure i let it there for the moment
# A more elegant alternative approach might be to drop condition (**) altogether
# because it is added implicitly by excluding the chainvectors but the original author
# of this code was not sure if this is correct in all cases.
l = len(chain_vectors)
if l > 0:
for k in range(0, l):
old = chain_vectors[k].transpose()
S = S.col_join(old)
e0s = S.nullspace()
# Determine the number of chain leaders which equals the number of blocks with that size.
n_e0 = len(e0s)
s_chains = []
# s_cells=[]
for i in range(0, n_e0):
chain=[e0s[i]]
for k in range(1, s):
v = M*chain[k-1]
chain.append(v)
# We want the chain leader appear as the last of the block.
chain.reverse()
chain_vectors += chain
s_chains.append(chain)
l_jordan_chains[s] = s_chains
jordan_block_structures[eigenval] = l_jordan_chains
return jordan_block_structures
def jordan_form(self, calc_transformation=True):
r"""Return Jordan form J of current matrix.
Also the transformation P such that
`J = P^{-1} \cdot M \cdot P`
and the jordan blocks forming J
will be calculated.
Examples
========
>>> from sympy import Matrix
>>> m = Matrix([
... [ 6, 5, -2, -3],
... [-3, -1, 3, 3],
... [ 2, 1, -2, -3],
... [-1, 1, 5, 5]])
>>> P, J = m.jordan_form()
>>> J
Matrix([
[2, 1, 0, 0],
[0, 2, 0, 0],
[0, 0, 2, 1],
[0, 0, 0, 2]])
See Also
========
jordan_cells
"""
P, Jcells = self.jordan_cells()
from sympy.matrices import diag
J = diag(*Jcells)
return P, type(self)(J)
def jordan_cells(self, calc_transformation=True):
r"""Return a list of Jordan cells of current matrix.
This list shape Jordan matrix J.
If calc_transformation is specified as False, then transformation P such that
`J = P^{-1} \cdot M \cdot P`
will not be calculated.
Notes
=====
Calculation of transformation P is not implemented yet.
Examples
========
>>> from sympy import Matrix
>>> m = Matrix(4, 4, [
... 6, 5, -2, -3,
... -3, -1, 3, 3,
... 2, 1, -2, -3,
... -1, 1, 5, 5])
>>> P, Jcells = m.jordan_cells()
>>> Jcells[0]
Matrix([
[2, 1],
[0, 2]])
>>> Jcells[1]
Matrix([
[2, 1],
[0, 2]])
See Also
========
jordan_form
"""
n = self.rows
Jcells = []
Pcols_new = []
jordan_block_structures = self._jordan_block_structure()
from sympy.matrices import MutableMatrix
# Order according to default_sort_key, this makes sure the order is the same as in .diagonalize():
for eigenval in (sorted(list(jordan_block_structures.keys()), key=default_sort_key)):
l_jordan_chains = jordan_block_structures[eigenval]
for s in reversed(sorted((l_jordan_chains).keys())): # Start with the biggest block
s_chains = l_jordan_chains[s]
block = self.jordan_cell(eigenval, s)
number_of_s_chains=len(s_chains)
for i in range(0, number_of_s_chains):
Jcells.append(type(self)(block))
chain_vectors = s_chains[i]
lc = len(chain_vectors)
assert lc == s
for j in range(0, lc):
generalized_eigen_vector = chain_vectors[j]
Pcols_new.append(generalized_eigen_vector)
P = MutableMatrix.zeros(n)
for j in range(0, n):
P[:, j] = Pcols_new[j]
return type(self)(P), Jcells
def _jordan_split(self, algebraical, geometrical):
"""Return a list of integers with sum equal to 'algebraical'
and length equal to 'geometrical'"""
n1 = algebraical // geometrical
res = [n1]*geometrical
res[len(res) - 1] += algebraical % geometrical
assert sum(res) == algebraical
return res
def has(self, *patterns):
"""Test whether any subexpression matches any of the patterns.
Examples
========
>>> from sympy import Matrix, Float
>>> from sympy.abc import x, y
>>> A = Matrix(((1, x), (0.2, 3)))
>>> A.has(x)
True
>>> A.has(y)
False
>>> A.has(Float)
True
"""
return any(a.has(*patterns) for a in self._mat)
def dual(self):
"""Returns the dual of a matrix, which is:
`(1/2)*levicivita(i, j, k, l)*M(k, l)` summed over indices `k` and `l`
Since the levicivita method is anti_symmetric for any pairwise
exchange of indices, the dual of a symmetric matrix is the zero
matrix. Strictly speaking the dual defined here assumes that the
'matrix' `M` is a contravariant anti_symmetric second rank tensor,
so that the dual is a covariant second rank tensor.
"""
from sympy import LeviCivita
from sympy.matrices import zeros
M, n = self[:, :], self.rows
work = zeros(n)
if self.is_symmetric():
return work
for i in range(1, n):
for j in range(1, n):
acum = 0
for k in range(1, n):
acum += LeviCivita(i, j, 0, k)*M[0, k]
work[i, j] = acum
work[j, i] = -acum
for l in range(1, n):
acum = 0
for a in range(1, n):
for b in range(1, n):
acum += LeviCivita(0, l, a, b)*M[a, b]
acum /= 2
work[0, l] = -acum
work[l, 0] = acum
return work
@classmethod
def hstack(cls, *args):
"""Return a matrix formed by joining args horizontally (i.e.
by repeated application of row_join).
Examples
========
>>> from sympy.matrices import Matrix, eye
>>> Matrix.hstack(eye(2), 2*eye(2))
Matrix([
[1, 0, 2, 0],
[0, 1, 0, 2]])
"""
return reduce(cls.row_join, args)
@classmethod
def vstack(cls, *args):
"""Return a matrix formed by joining args vertically (i.e.
by repeated application of col_join).
Examples
========
>>> from sympy.matrices import Matrix, eye
>>> Matrix.vstack(eye(2), 2*eye(2))
Matrix([
[1, 0],
[0, 1],
[2, 0],
[0, 2]])
"""
return reduce(cls.col_join, args)
def row_join(self, rhs):
"""Concatenates two matrices along self's last and rhs's first column
Examples
========
>>> from sympy import zeros, ones
>>> M = zeros(3)
>>> V = ones(3, 1)
>>> M.row_join(V)
Matrix([
[0, 0, 0, 1],
[0, 0, 0, 1],
[0, 0, 0, 1]])
See Also
========
row
col_join
"""
if self.rows != rhs.rows:
raise ShapeError(
"`self` and `rhs` must have the same number of rows.")
from sympy.matrices import MutableMatrix
newmat = MutableMatrix.zeros(self.rows, self.cols + rhs.cols)
newmat[:, :self.cols] = self
newmat[:, self.cols:] = rhs
return type(self)(newmat)
def col_join(self, bott):
"""Concatenates two matrices along self's last and bott's first row
Examples
========
>>> from sympy import zeros, ones
>>> M = zeros(3)
>>> V = ones(1, 3)
>>> M.col_join(V)
Matrix([
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[1, 1, 1]])
See Also
========
col
row_join
"""
if self.cols != bott.cols:
raise ShapeError(
"`self` and `bott` must have the same number of columns.")
from sympy.matrices import MutableMatrix
newmat = MutableMatrix.zeros(self.rows + bott.rows, self.cols)
newmat[:self.rows, :] = self
newmat[self.rows:, :] = bott
return type(self)(newmat)
def row_insert(self, pos, mti):
"""Insert one or more rows at the given row position.
Examples
========
>>> from sympy import zeros, ones
>>> M = zeros(3)
>>> V = ones(1, 3)
>>> M.row_insert(1, V)
Matrix([
[0, 0, 0],
[1, 1, 1],
[0, 0, 0],
[0, 0, 0]])
See Also
========
row
col_insert
"""
if pos == 0:
return mti.col_join(self)
elif pos < 0:
pos = self.rows + pos
if pos < 0:
pos = 0
elif pos > self.rows:
pos = self.rows
if self.cols != mti.cols:
raise ShapeError(
"`self` and `mti` must have the same number of columns.")
newmat = self.zeros(self.rows + mti.rows, self.cols)
i, j = pos, pos + mti.rows
newmat[:i, :] = self[:i, :]
newmat[i: j, :] = mti
newmat[j:, :] = self[i:, :]
return newmat
def col_insert(self, pos, mti):
"""Insert one or more columns at the given column position.
Examples
========
>>> from sympy import zeros, ones
>>> M = zeros(3)
>>> V = ones(3, 1)
>>> M.col_insert(1, V)
Matrix([
[0, 1, 0, 0],
[0, 1, 0, 0],
[0, 1, 0, 0]])
See Also
========
col
row_insert
"""
if pos == 0:
return mti.row_join(self)
elif pos < 0:
pos = self.cols + pos
if pos < 0:
pos = 0
elif pos > self.cols:
pos = self.cols
if self.rows != mti.rows:
raise ShapeError("self and mti must have the same number of rows.")
from sympy.matrices import MutableMatrix
newmat = MutableMatrix.zeros(self.rows, self.cols + mti.cols)
i, j = pos, pos + mti.cols
newmat[:, :i] = self[:, :i]
newmat[:, i:j] = mti
newmat[:, j:] = self[:, i:]
return type(self)(newmat)
def replace(self, F, G, map=False):
"""Replaces Function F in Matrix entries with Function G.
Examples
========
>>> from sympy import symbols, Function, Matrix
>>> F, G = symbols('F, G', cls=Function)
>>> M = Matrix(2, 2, lambda i, j: F(i+j)) ; M
Matrix([
[F(0), F(1)],
[F(1), F(2)]])
>>> N = M.replace(F,G)
>>> N
Matrix([
[G(0), G(1)],
[G(1), G(2)]])
"""
M = self[:, :]
return M.applyfunc(lambda x: x.replace(F, G, map))
def pinv(self):
"""Calculate the Moore-Penrose pseudoinverse of the matrix.
The Moore-Penrose pseudoinverse exists and is unique for any matrix.
If the matrix is invertible, the pseudoinverse is the same as the
inverse.
Examples
========
>>> from sympy import Matrix
>>> Matrix([[1, 2, 3], [4, 5, 6]]).pinv()
Matrix([
[-17/18, 4/9],
[ -1/9, 1/9],
[ 13/18, -2/9]])
See Also
========
inv
pinv_solve
References
==========
.. [1] https://en.wikipedia.org/wiki/Moore-Penrose_pseudoinverse
"""
A = self
AH = self.H
# Trivial case: pseudoinverse of all-zero matrix is its transpose.
if A.is_zero:
return AH
try:
if self.rows >= self.cols:
return (AH * A).inv() * AH
else:
return AH * (A * AH).inv()
except ValueError:
# Matrix is not full rank, so A*AH cannot be inverted.
raise NotImplementedError('Rank-deficient matrices are not yet '
'supported.')
def pinv_solve(self, B, arbitrary_matrix=None):
"""Solve Ax = B using the Moore-Penrose pseudoinverse.
There may be zero, one, or infinite solutions. If one solution
exists, it will be returned. If infinite solutions exist, one will
be returned based on the value of arbitrary_matrix. If no solutions
exist, the least-squares solution is returned.
Parameters
==========
B : Matrix
The right hand side of the equation to be solved for. Must have
the same number of rows as matrix A.
arbitrary_matrix : Matrix
If the system is underdetermined (e.g. A has more columns than
rows), infinite solutions are possible, in terms of an arbitrary
matrix. This parameter may be set to a specific matrix to use
for that purpose; if so, it must be the same shape as x, with as
many rows as matrix A has columns, and as many columns as matrix
B. If left as None, an appropriate matrix containing dummy
symbols in the form of ``wn_m`` will be used, with n and m being
row and column position of each symbol.
Returns
=======
x : Matrix
The matrix that will satisfy Ax = B. Will have as many rows as
matrix A has columns, and as many columns as matrix B.
Examples
========
>>> from sympy import Matrix
>>> A = Matrix([[1, 2, 3], [4, 5, 6]])
>>> B = Matrix([7, 8])
>>> A.pinv_solve(B)
Matrix([
[ _w0_0/6 - _w1_0/3 + _w2_0/6 - 55/18],
[-_w0_0/3 + 2*_w1_0/3 - _w2_0/3 + 1/9],
[ _w0_0/6 - _w1_0/3 + _w2_0/6 + 59/18]])
>>> A.pinv_solve(B, arbitrary_matrix=Matrix([0, 0, 0]))
Matrix([
[-55/18],
[ 1/9],
[ 59/18]])
See Also
========
lower_triangular_solve
upper_triangular_solve
cholesky_solve
diagonal_solve
LDLsolve
LUsolve
QRsolve
pinv
Notes
=====
This may return either exact solutions or least squares solutions.
To determine which, check ``A * A.pinv() * B == B``. It will be
True if exact solutions exist, and False if only a least-squares
solution exists. Be aware that the left hand side of that equation
may need to be simplified to correctly compare to the right hand
side.
References
==========
.. [1] https://en.wikipedia.org/wiki/Moore-Penrose_pseudoinverse#Obtaining_all_solutions_of_a_linear_system
"""
from sympy.matrices import eye
A = self
A_pinv = self.pinv()
if arbitrary_matrix is None:
rows, cols = A.cols, B.cols
w = symbols('w:{0}_:{1}'.format(rows, cols), cls=Dummy)
arbitrary_matrix = self.__class__(cols, rows, w).T
return A_pinv * B + (eye(A.cols) - A_pinv*A) * arbitrary_matrix
def classof(A, B):
"""
Get the type of the result when combining matrices of different types.
Currently the strategy is that immutability is contagious.
Examples
========
>>> from sympy import Matrix, ImmutableMatrix
>>> from sympy.matrices.matrices import classof
>>> M = Matrix([[1, 2], [3, 4]]) # a Mutable Matrix
>>> IM = ImmutableMatrix([[1, 2], [3, 4]])
>>> classof(M, IM)
<class 'sympy.matrices.immutable.ImmutableMatrix'>
"""
try:
if A._class_priority > B._class_priority:
return A.__class__
else:
return B.__class__
except:
pass
try:
import numpy
if isinstance(A, numpy.ndarray):
return B.__class__
if isinstance(B, numpy.ndarray):
return A.__class__
except:
pass
raise TypeError("Incompatible classes %s, %s" % (A.__class__, B.__class__))
def a2idx(j, n=None):
"""Return integer after making positive and validating against n."""
if isinstance(j, slice):
return j
if type(j) is not int:
try:
j = j.__index__()
except AttributeError:
raise IndexError("Invalid index a[%r]" % (j, ))
if n is not None:
if j < 0:
j += n
if not (j >= 0 and j < n):
raise IndexError("Index out of range: a[%s]" % (j, ))
return int(j)
|
unho/pootle | refs/heads/master | tests/misc/state.py | 9 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
import pytest
from pootle.core.state import State, ItemState
class DummyContext(object):
def __str__(self):
return "<DummyContext object>"
def test_state_instance():
context = DummyContext()
state = State(context)
assert state.context == context
assert state.__state__ == {}
assert state.prefix == "state"
assert state.has_changed is False
assert state.states == []
assert "x" not in state
assert list(state) == []
assert state.item_state_class == ItemState
assert str(state) == (
"<State(<DummyContext object>): Nothing to report>")
def test_state_states():
"""By default the State class will automagically find any
methods that start with `state_` and create a list of states
from these.
In a descendant class you can manually set the states, in order to
control which state methods are called and in what order. This tests that
"""
class ContextualState(State):
@property
def states(self):
return ["foo", "bar", "empty"]
def state_foo(self, **kwargs):
for x in [1, 2, 3]:
yield {str(x): x}
def state_bar(self, **kwargs):
for x in [4, 5, 6, 7, 8, 9]:
yield {str(x): x}
def state_baz(self, **kwargs):
yield dict(never="called")
def state_empty(self, **kwargs):
return []
context = DummyContext()
state = ContextualState(context)
assert str(state) == (
"<ContextualState(<DummyContext object>): foo: 3, bar: 6>")
assert state.context == context
assert sorted(state.__state__.keys()) == ["bar", "empty", "foo"]
assert "empty" in state.__state__
assert "baz" not in state.__state__
assert state["empty"] == []
with pytest.raises(KeyError):
state["baz"]
assert sorted(state) == ["bar", "foo"]
assert state.has_changed is True
assert state.states == ["foo", "bar", "empty"]
assert len(state["foo"]) == 3
assert isinstance(state["foo"][0], state.item_state_class)
assert state["foo"][0].kwargs == {"1": 1}
assert state["foo"][0].state == state
assert state["foo"][0].state_type == "foo"
def test_state_all_states():
class ContextualState(State):
def state_foo(self, **kwargs):
for x in [1, 2, 3]:
yield {str(x): x}
def state_bar(self, **kwargs):
for x in [4, 5, 6, 7, 8, 9]:
yield {str(x): x}
def state_baz(self, **kwargs):
for x in [10, 11, 12]:
yield {str(x): x}
def state_empty(self, **kwargs):
return []
context = DummyContext()
state = ContextualState(context)
assert str(state) == (
"<ContextualState(<DummyContext object>): bar: 6, baz: 3, foo: 3>")
assert state.context == context
assert sorted(state.__state__.keys()) == ['bar', 'baz', 'empty', 'foo']
assert "baz" in state.__state__
assert sorted(state) == ["bar", "baz", "foo"]
assert state.has_changed is True
assert state.states == ['bar', 'baz', 'empty', 'foo']
assert len(state["baz"]) == 3
def test_state_properties():
class ContextualState(State):
@property
def state_foo(self):
for x in self.kwargs["baz"]:
yield {"foo%s" % x: x}
@property
def state_bar(self):
for x in self.kwargs["baz"]:
yield {"bar%s" % x: x}
context = DummyContext()
state = ContextualState(context, baz=[1, 2])
assert state["foo"][0].kwargs.items() == [("foo1", 1)]
assert state["foo"][1].kwargs.items() == [("foo2", 2)]
assert state["bar"][0].kwargs.items() == [("bar1", 1)]
assert state["bar"][1].kwargs.items() == [("bar2", 2)]
def test_state_item_kwargs():
class ContextualState(State):
@property
def state_foo(self):
for x in self.kwargs["baz"]:
yield {"foo%s" % x: x}
def state_bar(self, **kwargs):
for x in self.kwargs["baz"]:
yield {"bar%s" % x: x}
context = DummyContext()
state = ContextualState(context, baz=[1, 2])
assert state["foo"][0].kwargs.items() == [("foo1", 1)]
assert state["foo"][0].foo1 == 1
assert not hasattr(state["foo"][0], "foo2")
assert state["foo"][1].kwargs.items() == [("foo2", 2)]
assert state["foo"][1].foo2 == 2
assert not hasattr(state["foo"][1], "foo3")
assert state["bar"][0].kwargs.items() == [("bar1", 1)]
assert state["bar"][0].bar1 == 1
assert not hasattr(state["bar"][0], "bar2")
assert state["bar"][1].kwargs.items() == [("bar2", 2)]
assert state["bar"][1].bar2 == 2
assert not hasattr(state["bar"][1], "bar3")
def test_state_bad():
# requires a context
with pytest.raises(TypeError):
State()
class ContextualState(State):
states = 3
# context.states must be iterable if set
with pytest.raises(TypeError):
ContextualState(DummyContext())
class ContextualState(State):
def state_foo(self, **kwargs):
yield []
# context.state_* methods should yield dict-like object
with pytest.raises(TypeError):
ContextualState(DummyContext())
def test_state_item_instance():
class DummyContext(object):
def __str__(self):
return "<DummyContext object>"
context = DummyContext()
state = State(context)
item = ItemState(state, "foo")
assert item.state == state
assert item.state_type == "foo"
assert str(item) == (
"<ItemState(<DummyContext object>): foo {}>")
assert item == ItemState(state, "foo")
def test_state_kwargs():
class ContextualState(State):
def state_foo(self, **kwargs):
yield kwargs
kwargs = dict(kwarg1="kw1", kwarg2="kw2")
state = ContextualState(DummyContext(), **kwargs)
assert state.kwargs == kwargs
assert state["foo"][0].kwargs == kwargs
state["foo"][0].kwarg1 == "kw1"
state["foo"][0].kwarg2 == "kw2"
def test_state_item_bad():
class ContextualState(State):
def state_foo(self, **kwargs):
for x in [1, 2, 3]:
yield {str(x): x}
# needs state and state_type arg
with pytest.raises(TypeError):
ItemState()
# needs state_type arg
with pytest.raises(TypeError):
ItemState(ContextualState(DummyContext()))
assert ItemState(ContextualState(DummyContext()), "foo")
def test_state_reload():
class ContextualState(State):
def state_foo(self, **kwargs):
yield dict(result=(2 * self.context.base))
context = DummyContext()
context.base = 2
state = ContextualState(context)
assert state["foo"][0].kwargs["result"] == 4
context.base = 3
assert state["foo"][0].kwargs["result"] == 4
state.reload()
assert state["foo"][0].kwargs["result"] == 6
state.clear_cache()
assert list(state) == []
|
sccn/SNAP | refs/heads/master | src/pylsl/binaries-python2.6-win32/liblsl.py | 24 | # This file was automatically generated by SWIG (http://www.swig.org).
# Version 2.0.8
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (2,6,0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_liblsl', [dirname(__file__)])
except ImportError:
import _liblsl
return _liblsl
if fp is not None:
try:
_mod = imp.load_module('_liblsl', fp, pathname, description)
finally:
fp.close()
return _mod
_liblsl = swig_import_helper()
del swig_import_helper
else:
import _liblsl
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError(name)
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
class SwigPyIterator(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, SwigPyIterator, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, SwigPyIterator, name)
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
__swig_destroy__ = _liblsl.delete_SwigPyIterator
__del__ = lambda self : None;
def value(self): return _liblsl.SwigPyIterator_value(self)
def incr(self, n=1): return _liblsl.SwigPyIterator_incr(self, n)
def decr(self, n=1): return _liblsl.SwigPyIterator_decr(self, n)
def distance(self, *args): return _liblsl.SwigPyIterator_distance(self, *args)
def equal(self, *args): return _liblsl.SwigPyIterator_equal(self, *args)
def copy(self): return _liblsl.SwigPyIterator_copy(self)
def next(self): return _liblsl.SwigPyIterator_next(self)
def __next__(self): return _liblsl.SwigPyIterator___next__(self)
def previous(self): return _liblsl.SwigPyIterator_previous(self)
def advance(self, *args): return _liblsl.SwigPyIterator_advance(self, *args)
def __eq__(self, *args): return _liblsl.SwigPyIterator___eq__(self, *args)
def __ne__(self, *args): return _liblsl.SwigPyIterator___ne__(self, *args)
def __iadd__(self, *args): return _liblsl.SwigPyIterator___iadd__(self, *args)
def __isub__(self, *args): return _liblsl.SwigPyIterator___isub__(self, *args)
def __add__(self, *args): return _liblsl.SwigPyIterator___add__(self, *args)
def __sub__(self, *args): return _liblsl.SwigPyIterator___sub__(self, *args)
def __iter__(self): return self
SwigPyIterator_swigregister = _liblsl.SwigPyIterator_swigregister
SwigPyIterator_swigregister(SwigPyIterator)
cf_float32 = _liblsl.cf_float32
cf_double64 = _liblsl.cf_double64
cf_string = _liblsl.cf_string
cf_int32 = _liblsl.cf_int32
cf_int16 = _liblsl.cf_int16
cf_int8 = _liblsl.cf_int8
cf_int64 = _liblsl.cf_int64
cf_undefined = _liblsl.cf_undefined
def protocol_version():
return _liblsl.protocol_version()
protocol_version = _liblsl.protocol_version
def library_version():
return _liblsl.library_version()
library_version = _liblsl.library_version
def local_clock():
return _liblsl.local_clock()
local_clock = _liblsl.local_clock
class stream_info(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, stream_info, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, stream_info, name)
__repr__ = _swig_repr
def name(self): return _liblsl.stream_info_name(self)
def type(self): return _liblsl.stream_info_type(self)
def channel_count(self): return _liblsl.stream_info_channel_count(self)
def nominal_srate(self): return _liblsl.stream_info_nominal_srate(self)
def channel_format(self): return _liblsl.stream_info_channel_format(self)
def source_id(self): return _liblsl.stream_info_source_id(self)
def version(self): return _liblsl.stream_info_version(self)
def created_at(self): return _liblsl.stream_info_created_at(self)
def uid(self): return _liblsl.stream_info_uid(self)
def session_id(self): return _liblsl.stream_info_session_id(self)
def hostname(self): return _liblsl.stream_info_hostname(self)
def desc(self, *args): return _liblsl.stream_info_desc(self, *args)
def as_xml(self): return _liblsl.stream_info_as_xml(self)
def channel_bytes(self): return _liblsl.stream_info_channel_bytes(self)
def sample_bytes(self): return _liblsl.stream_info_sample_bytes(self)
def impl(self, *args): return _liblsl.stream_info_impl(self, *args)
def __init__(self, *args):
this = _liblsl.new_stream_info(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _liblsl.delete_stream_info
__del__ = lambda self : None;
stream_info_swigregister = _liblsl.stream_info_swigregister
stream_info_swigregister(stream_info)
cvar = _liblsl.cvar
IRREGULAR_RATE = cvar.IRREGULAR_RATE
DEDUCED_TIMESTAMP = cvar.DEDUCED_TIMESTAMP
FOREVER = cvar.FOREVER
class stream_outlet(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, stream_outlet, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, stream_outlet, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _liblsl.new_stream_outlet(*args)
try: self.this.append(this)
except: self.this = this
def push_sample(self, *args): return _liblsl.stream_outlet_push_sample(self, *args)
def push_numeric_raw(self, *args): return _liblsl.stream_outlet_push_numeric_raw(self, *args)
def have_consumers(self): return _liblsl.stream_outlet_have_consumers(self)
def wait_for_consumers(self, *args): return _liblsl.stream_outlet_wait_for_consumers(self, *args)
def info(self): return _liblsl.stream_outlet_info(self)
__swig_destroy__ = _liblsl.delete_stream_outlet
__del__ = lambda self : None;
stream_outlet_swigregister = _liblsl.stream_outlet_swigregister
stream_outlet_swigregister(stream_outlet)
def resolve_streams(wait_time=1.0):
return _liblsl.resolve_streams(wait_time)
resolve_streams = _liblsl.resolve_streams
def resolve_stream(*args):
return _liblsl.resolve_stream(*args)
resolve_stream = _liblsl.resolve_stream
class continuous_resolver(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, continuous_resolver, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, continuous_resolver, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _liblsl.new_continuous_resolver(*args)
try: self.this.append(this)
except: self.this = this
def results(self): return _liblsl.continuous_resolver_results(self)
__swig_destroy__ = _liblsl.delete_continuous_resolver
__del__ = lambda self : None;
continuous_resolver_swigregister = _liblsl.continuous_resolver_swigregister
continuous_resolver_swigregister(continuous_resolver)
class stream_inlet(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, stream_inlet, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, stream_inlet, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _liblsl.new_stream_inlet(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _liblsl.delete_stream_inlet
__del__ = lambda self : None;
def info(self, *args): return _liblsl.stream_inlet_info(self, *args)
def open_stream(self, *args): return _liblsl.stream_inlet_open_stream(self, *args)
def close_stream(self): return _liblsl.stream_inlet_close_stream(self)
def time_correction(self, *args): return _liblsl.stream_inlet_time_correction(self, *args)
def pull_sample(self, *args): return _liblsl.stream_inlet_pull_sample(self, *args)
def pull_numeric_raw(self, *args): return _liblsl.stream_inlet_pull_numeric_raw(self, *args)
def samples_available(self): return _liblsl.stream_inlet_samples_available(self)
stream_inlet_swigregister = _liblsl.stream_inlet_swigregister
stream_inlet_swigregister(stream_inlet)
class xml_element(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, xml_element, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, xml_element, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _liblsl.new_xml_element(*args)
try: self.this.append(this)
except: self.this = this
def first_child(self): return _liblsl.xml_element_first_child(self)
def last_child(self): return _liblsl.xml_element_last_child(self)
def parent(self): return _liblsl.xml_element_parent(self)
def child(self, *args): return _liblsl.xml_element_child(self, *args)
def next_sibling(self, *args): return _liblsl.xml_element_next_sibling(self, *args)
def previous_sibling(self, *args): return _liblsl.xml_element_previous_sibling(self, *args)
def empty(self): return _liblsl.xml_element_empty(self)
def is_text(self): return _liblsl.xml_element_is_text(self)
def name(self): return _liblsl.xml_element_name(self)
def value(self): return _liblsl.xml_element_value(self)
def child_value(self, *args): return _liblsl.xml_element_child_value(self, *args)
def append_child_value(self, *args): return _liblsl.xml_element_append_child_value(self, *args)
def prepend_child_value(self, *args): return _liblsl.xml_element_prepend_child_value(self, *args)
def set_child_value(self, *args): return _liblsl.xml_element_set_child_value(self, *args)
def set_name(self, *args): return _liblsl.xml_element_set_name(self, *args)
def set_value(self, *args): return _liblsl.xml_element_set_value(self, *args)
def append_child(self, *args): return _liblsl.xml_element_append_child(self, *args)
def prepend_child(self, *args): return _liblsl.xml_element_prepend_child(self, *args)
def append_copy(self, *args): return _liblsl.xml_element_append_copy(self, *args)
def prepend_copy(self, *args): return _liblsl.xml_element_prepend_copy(self, *args)
def remove_child(self, *args): return _liblsl.xml_element_remove_child(self, *args)
def ptr(self): return _liblsl.xml_element_ptr(self)
__swig_destroy__ = _liblsl.delete_xml_element
__del__ = lambda self : None;
xml_element_swigregister = _liblsl.xml_element_swigregister
xml_element_swigregister(xml_element)
class lost_error(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, lost_error, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, lost_error, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _liblsl.new_lost_error(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _liblsl.delete_lost_error
__del__ = lambda self : None;
lost_error_swigregister = _liblsl.lost_error_swigregister
lost_error_swigregister(lost_error)
class timeout_error(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, timeout_error, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, timeout_error, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _liblsl.new_timeout_error(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _liblsl.delete_timeout_error
__del__ = lambda self : None;
timeout_error_swigregister = _liblsl.timeout_error_swigregister
timeout_error_swigregister(timeout_error)
class vectorf(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, vectorf, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, vectorf, name)
__repr__ = _swig_repr
def iterator(self): return _liblsl.vectorf_iterator(self)
def __iter__(self): return self.iterator()
def __nonzero__(self): return _liblsl.vectorf___nonzero__(self)
def __bool__(self): return _liblsl.vectorf___bool__(self)
def __len__(self): return _liblsl.vectorf___len__(self)
def pop(self): return _liblsl.vectorf_pop(self)
def __getslice__(self, *args): return _liblsl.vectorf___getslice__(self, *args)
def __setslice__(self, *args): return _liblsl.vectorf___setslice__(self, *args)
def __delslice__(self, *args): return _liblsl.vectorf___delslice__(self, *args)
def __delitem__(self, *args): return _liblsl.vectorf___delitem__(self, *args)
def __getitem__(self, *args): return _liblsl.vectorf___getitem__(self, *args)
def __setitem__(self, *args): return _liblsl.vectorf___setitem__(self, *args)
def append(self, *args): return _liblsl.vectorf_append(self, *args)
def empty(self): return _liblsl.vectorf_empty(self)
def size(self): return _liblsl.vectorf_size(self)
def clear(self): return _liblsl.vectorf_clear(self)
def swap(self, *args): return _liblsl.vectorf_swap(self, *args)
def get_allocator(self): return _liblsl.vectorf_get_allocator(self)
def begin(self): return _liblsl.vectorf_begin(self)
def end(self): return _liblsl.vectorf_end(self)
def rbegin(self): return _liblsl.vectorf_rbegin(self)
def rend(self): return _liblsl.vectorf_rend(self)
def pop_back(self): return _liblsl.vectorf_pop_back(self)
def erase(self, *args): return _liblsl.vectorf_erase(self, *args)
def __init__(self, *args):
this = _liblsl.new_vectorf(*args)
try: self.this.append(this)
except: self.this = this
def push_back(self, *args): return _liblsl.vectorf_push_back(self, *args)
def front(self): return _liblsl.vectorf_front(self)
def back(self): return _liblsl.vectorf_back(self)
def assign(self, *args): return _liblsl.vectorf_assign(self, *args)
def resize(self, *args): return _liblsl.vectorf_resize(self, *args)
def insert(self, *args): return _liblsl.vectorf_insert(self, *args)
def reserve(self, *args): return _liblsl.vectorf_reserve(self, *args)
def capacity(self): return _liblsl.vectorf_capacity(self)
__swig_destroy__ = _liblsl.delete_vectorf
__del__ = lambda self : None;
vectorf_swigregister = _liblsl.vectorf_swigregister
vectorf_swigregister(vectorf)
class vectord(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, vectord, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, vectord, name)
__repr__ = _swig_repr
def iterator(self): return _liblsl.vectord_iterator(self)
def __iter__(self): return self.iterator()
def __nonzero__(self): return _liblsl.vectord___nonzero__(self)
def __bool__(self): return _liblsl.vectord___bool__(self)
def __len__(self): return _liblsl.vectord___len__(self)
def pop(self): return _liblsl.vectord_pop(self)
def __getslice__(self, *args): return _liblsl.vectord___getslice__(self, *args)
def __setslice__(self, *args): return _liblsl.vectord___setslice__(self, *args)
def __delslice__(self, *args): return _liblsl.vectord___delslice__(self, *args)
def __delitem__(self, *args): return _liblsl.vectord___delitem__(self, *args)
def __getitem__(self, *args): return _liblsl.vectord___getitem__(self, *args)
def __setitem__(self, *args): return _liblsl.vectord___setitem__(self, *args)
def append(self, *args): return _liblsl.vectord_append(self, *args)
def empty(self): return _liblsl.vectord_empty(self)
def size(self): return _liblsl.vectord_size(self)
def clear(self): return _liblsl.vectord_clear(self)
def swap(self, *args): return _liblsl.vectord_swap(self, *args)
def get_allocator(self): return _liblsl.vectord_get_allocator(self)
def begin(self): return _liblsl.vectord_begin(self)
def end(self): return _liblsl.vectord_end(self)
def rbegin(self): return _liblsl.vectord_rbegin(self)
def rend(self): return _liblsl.vectord_rend(self)
def pop_back(self): return _liblsl.vectord_pop_back(self)
def erase(self, *args): return _liblsl.vectord_erase(self, *args)
def __init__(self, *args):
this = _liblsl.new_vectord(*args)
try: self.this.append(this)
except: self.this = this
def push_back(self, *args): return _liblsl.vectord_push_back(self, *args)
def front(self): return _liblsl.vectord_front(self)
def back(self): return _liblsl.vectord_back(self)
def assign(self, *args): return _liblsl.vectord_assign(self, *args)
def resize(self, *args): return _liblsl.vectord_resize(self, *args)
def insert(self, *args): return _liblsl.vectord_insert(self, *args)
def reserve(self, *args): return _liblsl.vectord_reserve(self, *args)
def capacity(self): return _liblsl.vectord_capacity(self)
__swig_destroy__ = _liblsl.delete_vectord
__del__ = lambda self : None;
vectord_swigregister = _liblsl.vectord_swigregister
vectord_swigregister(vectord)
class vectorl(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, vectorl, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, vectorl, name)
__repr__ = _swig_repr
def iterator(self): return _liblsl.vectorl_iterator(self)
def __iter__(self): return self.iterator()
def __nonzero__(self): return _liblsl.vectorl___nonzero__(self)
def __bool__(self): return _liblsl.vectorl___bool__(self)
def __len__(self): return _liblsl.vectorl___len__(self)
def pop(self): return _liblsl.vectorl_pop(self)
def __getslice__(self, *args): return _liblsl.vectorl___getslice__(self, *args)
def __setslice__(self, *args): return _liblsl.vectorl___setslice__(self, *args)
def __delslice__(self, *args): return _liblsl.vectorl___delslice__(self, *args)
def __delitem__(self, *args): return _liblsl.vectorl___delitem__(self, *args)
def __getitem__(self, *args): return _liblsl.vectorl___getitem__(self, *args)
def __setitem__(self, *args): return _liblsl.vectorl___setitem__(self, *args)
def append(self, *args): return _liblsl.vectorl_append(self, *args)
def empty(self): return _liblsl.vectorl_empty(self)
def size(self): return _liblsl.vectorl_size(self)
def clear(self): return _liblsl.vectorl_clear(self)
def swap(self, *args): return _liblsl.vectorl_swap(self, *args)
def get_allocator(self): return _liblsl.vectorl_get_allocator(self)
def begin(self): return _liblsl.vectorl_begin(self)
def end(self): return _liblsl.vectorl_end(self)
def rbegin(self): return _liblsl.vectorl_rbegin(self)
def rend(self): return _liblsl.vectorl_rend(self)
def pop_back(self): return _liblsl.vectorl_pop_back(self)
def erase(self, *args): return _liblsl.vectorl_erase(self, *args)
def __init__(self, *args):
this = _liblsl.new_vectorl(*args)
try: self.this.append(this)
except: self.this = this
def push_back(self, *args): return _liblsl.vectorl_push_back(self, *args)
def front(self): return _liblsl.vectorl_front(self)
def back(self): return _liblsl.vectorl_back(self)
def assign(self, *args): return _liblsl.vectorl_assign(self, *args)
def resize(self, *args): return _liblsl.vectorl_resize(self, *args)
def insert(self, *args): return _liblsl.vectorl_insert(self, *args)
def reserve(self, *args): return _liblsl.vectorl_reserve(self, *args)
def capacity(self): return _liblsl.vectorl_capacity(self)
__swig_destroy__ = _liblsl.delete_vectorl
__del__ = lambda self : None;
vectorl_swigregister = _liblsl.vectorl_swigregister
vectorl_swigregister(vectorl)
class vectori(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, vectori, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, vectori, name)
__repr__ = _swig_repr
def iterator(self): return _liblsl.vectori_iterator(self)
def __iter__(self): return self.iterator()
def __nonzero__(self): return _liblsl.vectori___nonzero__(self)
def __bool__(self): return _liblsl.vectori___bool__(self)
def __len__(self): return _liblsl.vectori___len__(self)
def pop(self): return _liblsl.vectori_pop(self)
def __getslice__(self, *args): return _liblsl.vectori___getslice__(self, *args)
def __setslice__(self, *args): return _liblsl.vectori___setslice__(self, *args)
def __delslice__(self, *args): return _liblsl.vectori___delslice__(self, *args)
def __delitem__(self, *args): return _liblsl.vectori___delitem__(self, *args)
def __getitem__(self, *args): return _liblsl.vectori___getitem__(self, *args)
def __setitem__(self, *args): return _liblsl.vectori___setitem__(self, *args)
def append(self, *args): return _liblsl.vectori_append(self, *args)
def empty(self): return _liblsl.vectori_empty(self)
def size(self): return _liblsl.vectori_size(self)
def clear(self): return _liblsl.vectori_clear(self)
def swap(self, *args): return _liblsl.vectori_swap(self, *args)
def get_allocator(self): return _liblsl.vectori_get_allocator(self)
def begin(self): return _liblsl.vectori_begin(self)
def end(self): return _liblsl.vectori_end(self)
def rbegin(self): return _liblsl.vectori_rbegin(self)
def rend(self): return _liblsl.vectori_rend(self)
def pop_back(self): return _liblsl.vectori_pop_back(self)
def erase(self, *args): return _liblsl.vectori_erase(self, *args)
def __init__(self, *args):
this = _liblsl.new_vectori(*args)
try: self.this.append(this)
except: self.this = this
def push_back(self, *args): return _liblsl.vectori_push_back(self, *args)
def front(self): return _liblsl.vectori_front(self)
def back(self): return _liblsl.vectori_back(self)
def assign(self, *args): return _liblsl.vectori_assign(self, *args)
def resize(self, *args): return _liblsl.vectori_resize(self, *args)
def insert(self, *args): return _liblsl.vectori_insert(self, *args)
def reserve(self, *args): return _liblsl.vectori_reserve(self, *args)
def capacity(self): return _liblsl.vectori_capacity(self)
__swig_destroy__ = _liblsl.delete_vectori
__del__ = lambda self : None;
vectori_swigregister = _liblsl.vectori_swigregister
vectori_swigregister(vectori)
class vectors(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, vectors, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, vectors, name)
__repr__ = _swig_repr
def iterator(self): return _liblsl.vectors_iterator(self)
def __iter__(self): return self.iterator()
def __nonzero__(self): return _liblsl.vectors___nonzero__(self)
def __bool__(self): return _liblsl.vectors___bool__(self)
def __len__(self): return _liblsl.vectors___len__(self)
def pop(self): return _liblsl.vectors_pop(self)
def __getslice__(self, *args): return _liblsl.vectors___getslice__(self, *args)
def __setslice__(self, *args): return _liblsl.vectors___setslice__(self, *args)
def __delslice__(self, *args): return _liblsl.vectors___delslice__(self, *args)
def __delitem__(self, *args): return _liblsl.vectors___delitem__(self, *args)
def __getitem__(self, *args): return _liblsl.vectors___getitem__(self, *args)
def __setitem__(self, *args): return _liblsl.vectors___setitem__(self, *args)
def append(self, *args): return _liblsl.vectors_append(self, *args)
def empty(self): return _liblsl.vectors_empty(self)
def size(self): return _liblsl.vectors_size(self)
def clear(self): return _liblsl.vectors_clear(self)
def swap(self, *args): return _liblsl.vectors_swap(self, *args)
def get_allocator(self): return _liblsl.vectors_get_allocator(self)
def begin(self): return _liblsl.vectors_begin(self)
def end(self): return _liblsl.vectors_end(self)
def rbegin(self): return _liblsl.vectors_rbegin(self)
def rend(self): return _liblsl.vectors_rend(self)
def pop_back(self): return _liblsl.vectors_pop_back(self)
def erase(self, *args): return _liblsl.vectors_erase(self, *args)
def __init__(self, *args):
this = _liblsl.new_vectors(*args)
try: self.this.append(this)
except: self.this = this
def push_back(self, *args): return _liblsl.vectors_push_back(self, *args)
def front(self): return _liblsl.vectors_front(self)
def back(self): return _liblsl.vectors_back(self)
def assign(self, *args): return _liblsl.vectors_assign(self, *args)
def resize(self, *args): return _liblsl.vectors_resize(self, *args)
def insert(self, *args): return _liblsl.vectors_insert(self, *args)
def reserve(self, *args): return _liblsl.vectors_reserve(self, *args)
def capacity(self): return _liblsl.vectors_capacity(self)
__swig_destroy__ = _liblsl.delete_vectors
__del__ = lambda self : None;
vectors_swigregister = _liblsl.vectors_swigregister
vectors_swigregister(vectors)
class vectorstr(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, vectorstr, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, vectorstr, name)
__repr__ = _swig_repr
def iterator(self): return _liblsl.vectorstr_iterator(self)
def __iter__(self): return self.iterator()
def __nonzero__(self): return _liblsl.vectorstr___nonzero__(self)
def __bool__(self): return _liblsl.vectorstr___bool__(self)
def __len__(self): return _liblsl.vectorstr___len__(self)
def pop(self): return _liblsl.vectorstr_pop(self)
def __getslice__(self, *args): return _liblsl.vectorstr___getslice__(self, *args)
def __setslice__(self, *args): return _liblsl.vectorstr___setslice__(self, *args)
def __delslice__(self, *args): return _liblsl.vectorstr___delslice__(self, *args)
def __delitem__(self, *args): return _liblsl.vectorstr___delitem__(self, *args)
def __getitem__(self, *args): return _liblsl.vectorstr___getitem__(self, *args)
def __setitem__(self, *args): return _liblsl.vectorstr___setitem__(self, *args)
def append(self, *args): return _liblsl.vectorstr_append(self, *args)
def empty(self): return _liblsl.vectorstr_empty(self)
def size(self): return _liblsl.vectorstr_size(self)
def clear(self): return _liblsl.vectorstr_clear(self)
def swap(self, *args): return _liblsl.vectorstr_swap(self, *args)
def get_allocator(self): return _liblsl.vectorstr_get_allocator(self)
def begin(self): return _liblsl.vectorstr_begin(self)
def end(self): return _liblsl.vectorstr_end(self)
def rbegin(self): return _liblsl.vectorstr_rbegin(self)
def rend(self): return _liblsl.vectorstr_rend(self)
def pop_back(self): return _liblsl.vectorstr_pop_back(self)
def erase(self, *args): return _liblsl.vectorstr_erase(self, *args)
def __init__(self, *args):
this = _liblsl.new_vectorstr(*args)
try: self.this.append(this)
except: self.this = this
def push_back(self, *args): return _liblsl.vectorstr_push_back(self, *args)
def front(self): return _liblsl.vectorstr_front(self)
def back(self): return _liblsl.vectorstr_back(self)
def assign(self, *args): return _liblsl.vectorstr_assign(self, *args)
def resize(self, *args): return _liblsl.vectorstr_resize(self, *args)
def insert(self, *args): return _liblsl.vectorstr_insert(self, *args)
def reserve(self, *args): return _liblsl.vectorstr_reserve(self, *args)
def capacity(self): return _liblsl.vectorstr_capacity(self)
__swig_destroy__ = _liblsl.delete_vectorstr
__del__ = lambda self : None;
vectorstr_swigregister = _liblsl.vectorstr_swigregister
vectorstr_swigregister(vectorstr)
class vectorinfo(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, vectorinfo, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, vectorinfo, name)
__repr__ = _swig_repr
def iterator(self): return _liblsl.vectorinfo_iterator(self)
def __iter__(self): return self.iterator()
def __nonzero__(self): return _liblsl.vectorinfo___nonzero__(self)
def __bool__(self): return _liblsl.vectorinfo___bool__(self)
def __len__(self): return _liblsl.vectorinfo___len__(self)
def pop(self): return _liblsl.vectorinfo_pop(self)
def __getslice__(self, *args): return _liblsl.vectorinfo___getslice__(self, *args)
def __setslice__(self, *args): return _liblsl.vectorinfo___setslice__(self, *args)
def __delslice__(self, *args): return _liblsl.vectorinfo___delslice__(self, *args)
def __delitem__(self, *args): return _liblsl.vectorinfo___delitem__(self, *args)
def __getitem__(self, *args): return _liblsl.vectorinfo___getitem__(self, *args)
def __setitem__(self, *args): return _liblsl.vectorinfo___setitem__(self, *args)
def append(self, *args): return _liblsl.vectorinfo_append(self, *args)
def empty(self): return _liblsl.vectorinfo_empty(self)
def size(self): return _liblsl.vectorinfo_size(self)
def clear(self): return _liblsl.vectorinfo_clear(self)
def swap(self, *args): return _liblsl.vectorinfo_swap(self, *args)
def get_allocator(self): return _liblsl.vectorinfo_get_allocator(self)
def begin(self): return _liblsl.vectorinfo_begin(self)
def end(self): return _liblsl.vectorinfo_end(self)
def rbegin(self): return _liblsl.vectorinfo_rbegin(self)
def rend(self): return _liblsl.vectorinfo_rend(self)
def pop_back(self): return _liblsl.vectorinfo_pop_back(self)
def erase(self, *args): return _liblsl.vectorinfo_erase(self, *args)
def __init__(self, *args):
this = _liblsl.new_vectorinfo(*args)
try: self.this.append(this)
except: self.this = this
def push_back(self, *args): return _liblsl.vectorinfo_push_back(self, *args)
def front(self): return _liblsl.vectorinfo_front(self)
def back(self): return _liblsl.vectorinfo_back(self)
def assign(self, *args): return _liblsl.vectorinfo_assign(self, *args)
def resize(self, *args): return _liblsl.vectorinfo_resize(self, *args)
def insert(self, *args): return _liblsl.vectorinfo_insert(self, *args)
def reserve(self, *args): return _liblsl.vectorinfo_reserve(self, *args)
def capacity(self): return _liblsl.vectorinfo_capacity(self)
__swig_destroy__ = _liblsl.delete_vectorinfo
__del__ = lambda self : None;
vectorinfo_swigregister = _liblsl.vectorinfo_swigregister
vectorinfo_swigregister(vectorinfo)
# This file is compatible with both classic and new-style classes.
|
idncom/odoo | refs/heads/8.0 | addons/l10n_si/__init__.py | 439 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright: (C) 2012 - Mentis d.o.o., Dravograd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_wizard |
legrostdg/django-knowledge | refs/heads/master | tests/mock/tests/utils.py | 5 | from mock.tests.base import TestCase
from knowledge.utils import paginate, get_module
class BasicPaginateTest(TestCase):
def test_paginate_helper(self):
paginator, objects = paginate(range(0,1000), 100, 'xcvb')
self.assertEquals(objects.number, 1) # fall back to first page
paginator, objects = paginate(range(0,1000), 100, 154543)
self.assertEquals(objects.number, 10) # fall back to last page
paginator, objects = paginate(range(0,1000), 100, 1)
self.assertEquals(len(objects.object_list), 100)
self.assertEquals(paginator.count, 1000)
self.assertEquals(paginator.num_pages, 10)
def test_importer_basic(self):
from django.template.defaultfilters import slugify
sluggy = get_module('django.template.defaultfilters.slugify')
self.assertTrue(slugify is sluggy)
def test_importer_fail(self):
self.assertRaises(ImportError, get_module, 'django.notreal.america')
self.assertRaises(ImportError, get_module, 'django.template.defaultfilters.slugbug') |
CINPLA/expipe-dev | refs/heads/master | phy/phy/plot/base.py | 2 | # -*- coding: utf-8 -*-
"""Base VisPy classes."""
#------------------------------------------------------------------------------
# Imports
#------------------------------------------------------------------------------
from collections import defaultdict
import logging
import re
from vispy import gloo
from vispy.app import Canvas
from vispy.util.event import Event
from .transform import TransformChain, Clip
from .utils import _load_shader, _enable_depth_mask
logger = logging.getLogger(__name__)
#------------------------------------------------------------------------------
# Utils
#------------------------------------------------------------------------------
def indent(text):
return '\n'.join(' ' + l.strip() for l in text.splitlines())
#------------------------------------------------------------------------------
# Base spike visual
#------------------------------------------------------------------------------
class BaseVisual(object):
"""A Visual represents one object (or homogeneous set of objects).
It is rendered with a single pass of a single gloo program with a single
type of GL primitive.
"""
"""Data variables that can be lists of arrays."""
allow_list = ()
def __init__(self):
self.gl_primitive_type = None
self.transforms = TransformChain()
self.inserter = GLSLInserter()
self.inserter.insert_vert('uniform vec2 u_window_size;', 'header')
# The program will be set by the canvas when the visual is
# added to the canvas.
self.program = None
self.set_canvas_transforms_filter(lambda t: t)
# Visual definition
# -------------------------------------------------------------------------
def set_shader(self, name):
self.vertex_shader = _load_shader(name + '.vert')
self.fragment_shader = _load_shader(name + '.frag')
def set_primitive_type(self, primitive_type):
self.gl_primitive_type = primitive_type
def on_draw(self):
"""Draw the visual."""
# Skip the drawing if the program hasn't been built yet.
# The program is built by the interact.
if self.program:
# Draw the program.
self.program.draw(self.gl_primitive_type)
else: # pragma: no cover
logger.debug("Skipping drawing visual `%s` because the program "
"has not been built yet.", self)
def on_resize(self, size):
# HACK: we check whether u_window_size is used in order to avoid
# the VisPy warning. We only update it if that uniform is active.
s = '\n'.join(self.program.shaders)
s = s.replace('uniform vec2 u_window_size;', '')
if 'u_window_size' in s:
self.program['u_window_size'] = size
# To override
# -------------------------------------------------------------------------
@staticmethod
def validate(**kwargs):
"""Make consistent the input data for the visual."""
return kwargs # pragma: no cover
@staticmethod
def vertex_count(**kwargs):
"""Return the number of vertices as a function of the input data."""
return 0 # pragma: no cover
def set_data(self):
"""Set data to the program.
Must be called *after* attach(canvas), because the program is built
when the visual is attached to the canvas.
"""
raise NotImplementedError()
def set_canvas_transforms_filter(self, f):
"""Set a function filtering the canvas' transforms."""
self.canvas_transforms_filter = f
#------------------------------------------------------------------------------
# Build program with interacts
#------------------------------------------------------------------------------
def _insert_glsl(vertex, fragment, to_insert):
"""Insert snippets in a shader.
to_insert is a dict `{(shader_type, location): snippet}`.
Snippets can contain `{{ var }}` placeholders for the transformed variable
name.
"""
# Find the place where to insert the GLSL snippet.
# This is "gl_Position = transform(data_var_name);" where
# data_var_name is typically an attribute.
vs_regex = re.compile(r'gl_Position = transform\(([\S]+)\);')
r = vs_regex.search(vertex)
if not r:
logger.debug("The vertex shader doesn't contain the transform "
"placeholder: skipping the transform chain "
"GLSL insertion.")
return vertex, fragment
assert r
logger.log(5, "Found transform placeholder in vertex code: `%s`",
r.group(0))
# Find the GLSL variable with the data (should be a `vec2`).
var = r.group(1)
assert var and var in vertex
# Headers.
vertex = to_insert['vert', 'header'] + '\n\n' + vertex
fragment = to_insert['frag', 'header'] + '\n\n' + fragment
# Get the pre and post transforms.
vs_insert = to_insert['vert', 'before_transforms']
vs_insert += to_insert['vert', 'transforms']
vs_insert += to_insert['vert', 'after_transforms']
# Insert the GLSL snippet in the vertex shader.
vertex = vs_regex.sub(indent(vs_insert), vertex)
# Now, we make the replacements in the fragment shader.
fs_regex = re.compile(r'(void main\(\)\s*\{)')
# NOTE: we add the `void main(){` that was removed by the regex.
fs_insert = '\\1\n' + to_insert['frag', 'before_transforms']
fragment = fs_regex.sub(indent(fs_insert), fragment)
# Replace the transformed variable placeholder by its name.
vertex = vertex.replace('{{ var }}', var)
return vertex, fragment
class GLSLInserter(object):
"""Insert GLSL snippets into shader codes."""
def __init__(self):
self._to_insert = defaultdict(list)
self.insert_vert('vec2 temp_pos_tr = {{ var }};',
'before_transforms')
self.insert_vert('gl_Position = vec4(temp_pos_tr, 0., 1.);',
'after_transforms')
self.insert_vert('varying vec2 v_temp_pos_tr;\n', 'header')
self.insert_frag('varying vec2 v_temp_pos_tr;\n', 'header')
def _insert(self, shader_type, glsl, location):
assert location in (
'header',
'before_transforms',
'transforms',
'after_transforms',
)
self._to_insert[shader_type, location].append(glsl)
def insert_vert(self, glsl, location='transforms'):
"""Insert a GLSL snippet into the vertex shader.
The location can be:
* `header`: declaration of GLSL variables
* `before_transforms`: just before the transforms in the vertex shader
* `transforms`: where the GPU transforms are applied in the vertex
shader
* `after_transforms`: just after the GPU transforms
"""
self._insert('vert', glsl, location)
def insert_frag(self, glsl, location=None):
"""Insert a GLSL snippet into the fragment shader."""
self._insert('frag', glsl, location)
def add_transform_chain(self, tc):
"""Insert the GLSL snippets of a transform chain."""
# Generate the transforms snippet.
for t in tc.gpu_transforms:
if isinstance(t, Clip):
# Set the varying value in the vertex shader.
self.insert_vert('v_temp_pos_tr = temp_pos_tr;')
continue
self.insert_vert(t.glsl('temp_pos_tr'))
# Clipping.
clip = tc.get('Clip')
if clip:
self.insert_frag(clip.glsl('v_temp_pos_tr'), 'before_transforms')
def insert_into_shaders(self, vertex, fragment):
"""Apply the insertions to shader code."""
to_insert = defaultdict(str)
to_insert.update({key: '\n'.join(self._to_insert[key]) + '\n'
for key in self._to_insert})
return _insert_glsl(vertex, fragment, to_insert)
def __add__(self, inserter):
"""Concatenate two inserters."""
for key, values in self._to_insert.items():
values.extend([_ for _ in inserter._to_insert[key]
if _ not in values])
return self
#------------------------------------------------------------------------------
# Base canvas
#------------------------------------------------------------------------------
class VisualEvent(Event):
def __init__(self, type, visual=None):
super(VisualEvent, self).__init__(type)
self.visual = visual
class BaseCanvas(Canvas):
"""A blank VisPy canvas with a custom event system that keeps the order."""
def __init__(self, *args, **kwargs):
super(BaseCanvas, self).__init__(*args, **kwargs)
self.transforms = TransformChain()
self.inserter = GLSLInserter()
self.visuals = []
self.events.add(visual_added=VisualEvent)
# Enable transparency.
_enable_depth_mask()
def add_visual(self, visual):
"""Add a visual to the canvas, and build its program by the same
occasion.
We can't build the visual's program before, because we need the canvas'
transforms first.
"""
# Retrieve the visual's GLSL inserter.
inserter = visual.inserter
# Add the visual's transforms.
inserter.add_transform_chain(visual.transforms)
# Then, add the canvas' transforms.
canvas_transforms = visual.canvas_transforms_filter(self.transforms)
inserter.add_transform_chain(canvas_transforms)
# Also, add the canvas' inserter.
inserter += self.inserter
# Now, we insert the transforms GLSL into the shaders.
vs, fs = visual.vertex_shader, visual.fragment_shader
vs, fs = inserter.insert_into_shaders(vs, fs)
# Finally, we create the visual's program.
visual.program = gloo.Program(vs, fs)
logger.log(5, "Vertex shader: %s", vs)
logger.log(5, "Fragment shader: %s", fs)
# Initialize the size.
visual.on_resize(self.size)
# Register the visual in the list of visuals in the canvas.
self.visuals.append(visual)
self.events.visual_added(visual=visual)
def on_resize(self, event):
"""Resize the OpenGL context."""
self.context.set_viewport(0, 0, event.size[0], event.size[1])
for visual in self.visuals:
visual.on_resize(event.size)
self.update()
def on_draw(self, e):
"""Draw all visuals."""
gloo.clear()
for visual in self.visuals:
logger.log(5, "Draw visual `%s`.", visual)
visual.on_draw()
#------------------------------------------------------------------------------
# Base interact
#------------------------------------------------------------------------------
class BaseInteract(object):
"""Implement dynamic transforms on a canvas."""
canvas = None
def attach(self, canvas):
"""Attach this interact to a canvas."""
self.canvas = canvas
@canvas.connect
def on_visual_added(e):
self.update_program(e.visual.program)
def update_program(self, program):
"""Override this method to update programs when `self.update()`
is called."""
pass
def update(self):
"""Update all visuals in the attached canvas."""
if not self.canvas:
return
for visual in self.canvas.visuals:
self.update_program(visual.program)
self.canvas.update()
|
openslack/openslack-crawler | refs/heads/master | crawler/utils/select_result.py | 1 | #!/usr/bin/python
# -*-coding:utf-8-*-
import types
from urlparse import urljoin
from w3lib.html import remove_entities
NULL = [None, 'null']
list_first_item = lambda x: x[0] if x else None
def strip_null(arg, null=None):
"""
strip list,set,tuple,dict null item.
@param:
arg:the variable to strip null
null:the null definition,if it is None,then use NULL as the null
if arg is list,then strip the null item,return the new list
if arg is tuple,then strip the null item,return the new tuple
if arg is set,then strip the null item,return the new set
if arg is dict,then strip the dict item which value is null.return the new dict
"""
if null is None:
null = NULL
if type(arg) is types.ListType:
return [i for i in arg if i not in null]
elif type(arg) is types.TupleType:
return tuple([i for i in arg if i not in null])
elif type(arg) is type(set()):
return arg.difference(set(null))
elif type(arg) is types.DictType:
return {key: value for key, value in arg.items() if value not in null}
return arg
def deduplication(arg):
"""
deduplication the arg.
@param:
arg:the variable to deduplication
if arg is list,then deduplication it and then the new list.
if arg is tuple,then deduplication it and then the new tuple.
"""
if type(arg) is types.ListType:
return list(set(arg))
elif type(arg) is types.TupleType:
return tuple(set(arg))
return arg
def clean_link(link_text):
"""
Remove leading and trailing whitespace and punctuation
"""
return link_text.strip("\t\r\n '\"")
clean_url = lambda base_url, u, response_encoding: urljoin(base_url,
remove_entities(clean_link(u.decode(response_encoding))))
"""
remove leading and trailing whitespace and punctuation and entities from the given text.
then join the base_url and the link that extract
"""
|
maxziv/SEApp | refs/heads/master | server/lib/flask/_compat.py | 783 | # -*- coding: utf-8 -*-
"""
flask._compat
~~~~~~~~~~~~~
Some py2/py3 compatibility support based on a stripped down
version of six so we don't have to depend on a specific version
of it.
:copyright: (c) 2013 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import sys
PY2 = sys.version_info[0] == 2
_identity = lambda x: x
if not PY2:
text_type = str
string_types = (str,)
integer_types = (int, )
iterkeys = lambda d: iter(d.keys())
itervalues = lambda d: iter(d.values())
iteritems = lambda d: iter(d.items())
from io import StringIO
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
implements_to_string = _identity
else:
text_type = unicode
string_types = (str, unicode)
integer_types = (int, long)
iterkeys = lambda d: d.iterkeys()
itervalues = lambda d: d.itervalues()
iteritems = lambda d: d.iteritems()
from cStringIO import StringIO
exec('def reraise(tp, value, tb=None):\n raise tp, value, tb')
def implements_to_string(cls):
cls.__unicode__ = cls.__str__
cls.__str__ = lambda x: x.__unicode__().encode('utf-8')
return cls
def with_metaclass(meta, *bases):
# This requires a bit of explanation: the basic idea is to make a
# dummy metaclass for one level of class instantiation that replaces
# itself with the actual metaclass. Because of internal type checks
# we also need to make sure that we downgrade the custom metaclass
# for one level to something closer to type (that's why __call__ and
# __init__ comes back from type etc.).
#
# This has the advantage over six.with_metaclass in that it does not
# introduce dummy classes into the final MRO.
class metaclass(meta):
__call__ = type.__call__
__init__ = type.__init__
def __new__(cls, name, this_bases, d):
if this_bases is None:
return type.__new__(cls, name, (), d)
return meta(name, bases, d)
return metaclass('temporary_class', None, {})
|
fossoult/odoo | refs/heads/8.0 | addons/website_payment/__openerp__.py | 389 | # -*- coding: utf-8 -*-
{
'name': 'Payment: Website Integration',
'category': 'Website',
'summary': 'Payment: Website Integration',
'version': '1.0',
'description': """Bridge module for acquirers and website.""",
'author': 'OpenERP SA',
'depends': [
'website',
'payment',
],
'data': [
'views/website_payment_templates.xml',
'views/website_settings_payment.xml',
],
'auto_install': False,
}
|
mitya57/django | refs/heads/master | tests/utils_tests/test_datastructures.py | 9 | """
Tests for stuff in django.utils.datastructures.
"""
import copy
from django.test import SimpleTestCase
from django.utils.datastructures import (
DictWrapper, ImmutableList, MultiValueDict, MultiValueDictKeyError,
OrderedSet,
)
class OrderedSetTests(SimpleTestCase):
def test_bool(self):
# Refs #23664
s = OrderedSet()
self.assertFalse(s)
s.add(1)
self.assertTrue(s)
def test_len(self):
s = OrderedSet()
self.assertEqual(len(s), 0)
s.add(1)
s.add(2)
s.add(2)
self.assertEqual(len(s), 2)
class MultiValueDictTests(SimpleTestCase):
def test_multivaluedict(self):
d = MultiValueDict({'name': ['Adrian', 'Simon'],
'position': ['Developer']})
self.assertEqual(d['name'], 'Simon')
self.assertEqual(d.get('name'), 'Simon')
self.assertEqual(d.getlist('name'), ['Adrian', 'Simon'])
self.assertEqual(
sorted(d.items()),
[('name', 'Simon'), ('position', 'Developer')]
)
self.assertEqual(
sorted(d.lists()),
[('name', ['Adrian', 'Simon']), ('position', ['Developer'])]
)
with self.assertRaises(MultiValueDictKeyError) as cm:
d.__getitem__('lastname')
self.assertEqual(str(cm.exception), "'lastname'")
self.assertIsNone(d.get('lastname'))
self.assertEqual(d.get('lastname', 'nonexistent'), 'nonexistent')
self.assertEqual(d.getlist('lastname'), [])
self.assertEqual(d.getlist('doesnotexist', ['Adrian', 'Simon']),
['Adrian', 'Simon'])
d.setlist('lastname', ['Holovaty', 'Willison'])
self.assertEqual(d.getlist('lastname'), ['Holovaty', 'Willison'])
self.assertEqual(sorted(d.values()), ['Developer', 'Simon', 'Willison'])
def test_appendlist(self):
d = MultiValueDict()
d.appendlist('name', 'Adrian')
d.appendlist('name', 'Simon')
self.assertEqual(d.getlist('name'), ['Adrian', 'Simon'])
def test_copy(self):
for copy_func in [copy.copy, lambda d: d.copy()]:
d1 = MultiValueDict({
"developers": ["Carl", "Fred"]
})
self.assertEqual(d1["developers"], "Fred")
d2 = copy_func(d1)
d2.update({"developers": "Groucho"})
self.assertEqual(d2["developers"], "Groucho")
self.assertEqual(d1["developers"], "Fred")
d1 = MultiValueDict({
"key": [[]]
})
self.assertEqual(d1["key"], [])
d2 = copy_func(d1)
d2["key"].append("Penguin")
self.assertEqual(d1["key"], ["Penguin"])
self.assertEqual(d2["key"], ["Penguin"])
def test_dict_translation(self):
mvd = MultiValueDict({
'devs': ['Bob', 'Joe'],
'pm': ['Rory'],
})
d = mvd.dict()
self.assertEqual(sorted(d.keys()), sorted(mvd.keys()))
for key in mvd.keys():
self.assertEqual(d[key], mvd[key])
self.assertEqual({}, MultiValueDict().dict())
def test_getlist_doesnt_mutate(self):
x = MultiValueDict({'a': ['1', '2'], 'b': ['3']})
values = x.getlist('a')
values += x.getlist('b')
self.assertEqual(x.getlist('a'), ['1', '2'])
def test_internal_getlist_does_mutate(self):
x = MultiValueDict({'a': ['1', '2'], 'b': ['3']})
values = x._getlist('a')
values += x._getlist('b')
self.assertEqual(x._getlist('a'), ['1', '2', '3'])
def test_getlist_default(self):
x = MultiValueDict({'a': [1]})
MISSING = object()
values = x.getlist('b', default=MISSING)
self.assertIs(values, MISSING)
def test_getlist_none_empty_values(self):
x = MultiValueDict({'a': None, 'b': []})
self.assertIsNone(x.getlist('a'))
self.assertEqual(x.getlist('b'), [])
class ImmutableListTests(SimpleTestCase):
def test_sort(self):
d = ImmutableList(range(10))
# AttributeError: ImmutableList object is immutable.
with self.assertRaisesMessage(AttributeError, 'ImmutableList object is immutable.'):
d.sort()
self.assertEqual(repr(d), '(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)')
def test_custom_warning(self):
d = ImmutableList(range(10), warning="Object is immutable!")
self.assertEqual(d[1], 1)
# AttributeError: Object is immutable!
with self.assertRaisesMessage(AttributeError, 'Object is immutable!'):
d.__setitem__(1, 'test')
class DictWrapperTests(SimpleTestCase):
def test_dictwrapper(self):
def f(x):
return "*%s" % x
d = DictWrapper({'a': 'a'}, f, 'xx_')
self.assertEqual(
"Normal: %(a)s. Modified: %(xx_a)s" % d,
'Normal: a. Modified: *a'
)
|
gracefullife/gerrit | refs/heads/master | tools/gitlog2asciidoc.py | 22 | #!/usr/bin/python
from optparse import OptionParser
import re
import subprocess
import sys
"""
This script generates a release note from the output of git log
between the specified tags.
Options:
--issues Show output the commits with issues associated with them.
--issue-numbers Show outputs issue numbers of the commits with issues
associated with them
Arguments:
since -- tag name
until -- tag name
Example Input:
* <commit subject>
+
<commit message>
Bug: issue 123
Change-Id: <change id>
Signed-off-by: <name>
Expected Output:
* issue 123 <commit subject>
+
<commit message>
"""
parser = OptionParser(usage='usage: %prog [options] <since> <until>')
parser.add_option('-i', '--issues', action='store_true',
dest='issues_only', default=False,
help='only output the commits with issues association')
parser.add_option('-n', '--issue-numbers', action='store_true',
dest='issue_numbers_only', default=False,
help='only outputs issue numbers of the commits with \
issues association')
(options, args) = parser.parse_args()
if len(args) != 2:
parser.error("wrong number of arguments")
issues_only = options.issues_only
issue_numbers_only = options.issue_numbers_only
since_until = args[0] + '..' + args[1]
proc = subprocess.Popen(['git', 'log', '--reverse', '--no-merges',
since_until, "--format=* %s%n+%n%b"],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,)
stdout_value = proc.communicate()[0]
subject = ""
message = []
is_issue = False
# regex pattern to match following cases such as Bug: 123, Issue Bug: 123,
# Bug: GERRIT-123, Bug: issue 123, Bug issue: 123, issue: 123, issue: bug 123
p = re.compile('bug: GERRIT-|bug(:? issue)?:? |issue(:? bug)?:? ',
re.IGNORECASE)
if issue_numbers_only:
for line in stdout_value.splitlines(True):
if p.match(line):
sys.stdout.write(p.sub('', line))
else:
for line in stdout_value.splitlines(True):
# Move issue number to subject line
if p.match(line):
line = p.sub('issue ', line).replace('\n',' ')
subject = subject[:2] + line + subject[2:]
is_issue = True
elif line.startswith('* '):
# Write change log for a commit
if subject != "":
if (not issues_only or is_issue):
# Write subject
sys.stdout.write(subject)
# Write message lines
if message != []:
# Clear + from last line in commit message
message[-1] = '\n'
for m in message:
sys.stdout.write(m)
# Start new commit block
message = []
subject = line
is_issue = False
# Remove commit footers
elif re.match(r'((\w+-)+\w+:)', line):
continue
# Don't add extra blank line if last one is already blank
elif line == '\n' and message and message[-1] != '+\n':
message.append('+\n')
elif line != '\n':
message.append(line)
|
robobrobro/ballin-octo-shame | refs/heads/master | lib/Python-3.4.3/Lib/multiprocessing/reduction.py | 94 | #
# Module which deals with pickling of objects.
#
# multiprocessing/reduction.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
import copyreg
import functools
import io
import os
import pickle
import socket
import sys
from . import context
__all__ = ['send_handle', 'recv_handle', 'ForkingPickler', 'register', 'dump']
HAVE_SEND_HANDLE = (sys.platform == 'win32' or
(hasattr(socket, 'CMSG_LEN') and
hasattr(socket, 'SCM_RIGHTS') and
hasattr(socket.socket, 'sendmsg')))
#
# Pickler subclass
#
class ForkingPickler(pickle.Pickler):
'''Pickler subclass used by multiprocessing.'''
_extra_reducers = {}
_copyreg_dispatch_table = copyreg.dispatch_table
def __init__(self, *args):
super().__init__(*args)
self.dispatch_table = self._copyreg_dispatch_table.copy()
self.dispatch_table.update(self._extra_reducers)
@classmethod
def register(cls, type, reduce):
'''Register a reduce function for a type.'''
cls._extra_reducers[type] = reduce
@classmethod
def dumps(cls, obj, protocol=None):
buf = io.BytesIO()
cls(buf, protocol).dump(obj)
return buf.getbuffer()
loads = pickle.loads
register = ForkingPickler.register
def dump(obj, file, protocol=None):
'''Replacement for pickle.dump() using ForkingPickler.'''
ForkingPickler(file, protocol).dump(obj)
#
# Platform specific definitions
#
if sys.platform == 'win32':
# Windows
__all__ += ['DupHandle', 'duplicate', 'steal_handle']
import _winapi
def duplicate(handle, target_process=None, inheritable=False):
'''Duplicate a handle. (target_process is a handle not a pid!)'''
if target_process is None:
target_process = _winapi.GetCurrentProcess()
return _winapi.DuplicateHandle(
_winapi.GetCurrentProcess(), handle, target_process,
0, inheritable, _winapi.DUPLICATE_SAME_ACCESS)
def steal_handle(source_pid, handle):
'''Steal a handle from process identified by source_pid.'''
source_process_handle = _winapi.OpenProcess(
_winapi.PROCESS_DUP_HANDLE, False, source_pid)
try:
return _winapi.DuplicateHandle(
source_process_handle, handle,
_winapi.GetCurrentProcess(), 0, False,
_winapi.DUPLICATE_SAME_ACCESS | _winapi.DUPLICATE_CLOSE_SOURCE)
finally:
_winapi.CloseHandle(source_process_handle)
def send_handle(conn, handle, destination_pid):
'''Send a handle over a local connection.'''
dh = DupHandle(handle, _winapi.DUPLICATE_SAME_ACCESS, destination_pid)
conn.send(dh)
def recv_handle(conn):
'''Receive a handle over a local connection.'''
return conn.recv().detach()
class DupHandle(object):
'''Picklable wrapper for a handle.'''
def __init__(self, handle, access, pid=None):
if pid is None:
# We just duplicate the handle in the current process and
# let the receiving process steal the handle.
pid = os.getpid()
proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False, pid)
try:
self._handle = _winapi.DuplicateHandle(
_winapi.GetCurrentProcess(),
handle, proc, access, False, 0)
finally:
_winapi.CloseHandle(proc)
self._access = access
self._pid = pid
def detach(self):
'''Get the handle. This should only be called once.'''
# retrieve handle from process which currently owns it
if self._pid == os.getpid():
# The handle has already been duplicated for this process.
return self._handle
# We must steal the handle from the process whose pid is self._pid.
proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False,
self._pid)
try:
return _winapi.DuplicateHandle(
proc, self._handle, _winapi.GetCurrentProcess(),
self._access, False, _winapi.DUPLICATE_CLOSE_SOURCE)
finally:
_winapi.CloseHandle(proc)
else:
# Unix
__all__ += ['DupFd', 'sendfds', 'recvfds']
import array
# On MacOSX we should acknowledge receipt of fds -- see Issue14669
ACKNOWLEDGE = sys.platform == 'darwin'
def sendfds(sock, fds):
'''Send an array of fds over an AF_UNIX socket.'''
fds = array.array('i', fds)
msg = bytes([len(fds) % 256])
sock.sendmsg([msg], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, fds)])
if ACKNOWLEDGE and sock.recv(1) != b'A':
raise RuntimeError('did not receive acknowledgement of fd')
def recvfds(sock, size):
'''Receive an array of fds over an AF_UNIX socket.'''
a = array.array('i')
bytes_size = a.itemsize * size
msg, ancdata, flags, addr = sock.recvmsg(1, socket.CMSG_LEN(bytes_size))
if not msg and not ancdata:
raise EOFError
try:
if ACKNOWLEDGE:
sock.send(b'A')
if len(ancdata) != 1:
raise RuntimeError('received %d items of ancdata' %
len(ancdata))
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
if (cmsg_level == socket.SOL_SOCKET and
cmsg_type == socket.SCM_RIGHTS):
if len(cmsg_data) % a.itemsize != 0:
raise ValueError
a.frombytes(cmsg_data)
assert len(a) % 256 == msg[0]
return list(a)
except (ValueError, IndexError):
pass
raise RuntimeError('Invalid data received')
def send_handle(conn, handle, destination_pid):
'''Send a handle over a local connection.'''
with socket.fromfd(conn.fileno(), socket.AF_UNIX, socket.SOCK_STREAM) as s:
sendfds(s, [handle])
def recv_handle(conn):
'''Receive a handle over a local connection.'''
with socket.fromfd(conn.fileno(), socket.AF_UNIX, socket.SOCK_STREAM) as s:
return recvfds(s, 1)[0]
def DupFd(fd):
'''Return a wrapper for an fd.'''
popen_obj = context.get_spawning_popen()
if popen_obj is not None:
return popen_obj.DupFd(popen_obj.duplicate_for_child(fd))
elif HAVE_SEND_HANDLE:
from . import resource_sharer
return resource_sharer.DupFd(fd)
else:
raise ValueError('SCM_RIGHTS appears not to be available')
#
# Try making some callable types picklable
#
def _reduce_method(m):
if m.__self__ is None:
return getattr, (m.__class__, m.__func__.__name__)
else:
return getattr, (m.__self__, m.__func__.__name__)
class _C:
def f(self):
pass
register(type(_C().f), _reduce_method)
def _reduce_method_descriptor(m):
return getattr, (m.__objclass__, m.__name__)
register(type(list.append), _reduce_method_descriptor)
register(type(int.__add__), _reduce_method_descriptor)
def _reduce_partial(p):
return _rebuild_partial, (p.func, p.args, p.keywords or {})
def _rebuild_partial(func, args, keywords):
return functools.partial(func, *args, **keywords)
register(functools.partial, _reduce_partial)
#
# Make sockets picklable
#
if sys.platform == 'win32':
def _reduce_socket(s):
from .resource_sharer import DupSocket
return _rebuild_socket, (DupSocket(s),)
def _rebuild_socket(ds):
return ds.detach()
register(socket.socket, _reduce_socket)
else:
def _reduce_socket(s):
df = DupFd(s.fileno())
return _rebuild_socket, (df, s.family, s.type, s.proto)
def _rebuild_socket(df, family, type, proto):
fd = df.detach()
return socket.socket(family, type, proto, fileno=fd)
register(socket.socket, _reduce_socket)
|
macosforge/ccs-calendarserver | refs/heads/master | txdav/caldav/datastore/scheduling/ischedule/test/test_localservers.py | 1 | ##
# Copyright (c) 2009-2017 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from txweb2.test.test_server import SimpleRequest
from twisted.trial import unittest
from twistedcaldav.stdconfig import config
from txdav.caldav.datastore.scheduling.ischedule.localservers import (
ServersDB, SERVER_SECRET_HEADER
)
import StringIO as StringIO
class ServerTests(unittest.TestCase):
data1 = """<?xml version="1.0" encoding="utf-8"?>
<servers>
<server>
<id>00001</id>
<uri>http://caldav1.example.com:8008</uri>
<allowed-from>127.0.0.1</allowed-from>
<shared-secret>foobar</shared-secret>
</server>
<server>
<id>00002</id>
<uri>https://caldav2.example.com:8843</uri>
</server>
</servers>
"""
data2 = """<?xml version="1.0" encoding="utf-8"?>
<servers>
<server>
<id>00001</id>
<uri>http://caldav1.example.com:8008</uri>
<allowed-from>localhost</allowed-from>
<shared-secret>foobar</shared-secret>
</server>
<server>
<id>00002</id>
<uri>https://caldav2.example.com:8843</uri>
</server>
</servers>
"""
data_v5 = """<?xml version="1.0" encoding="utf-8"?>
<servers>
<server>
<id>00001</id>
<uri>http://caldav1.example.com:8008</uri>
<allowed-from>127.0.0.1</allowed-from>
<shared-secret>foobar</shared-secret>
</server>
<server v5='yes'>
<id>00002</id>
<uri>https://caldav2.example.com:8843</uri>
</server>
<server v5='no'>
<id>00003</id>
<uri>https://caldav3.example.com:8943</uri>
</server>
</servers>
"""
def _setupServers(self, data=data1):
self.patch(config, "ServerHostName", "caldav1.example.com")
self.patch(config, "HTTPPort", 8008)
xmlFile = StringIO.StringIO(data)
servers = ServersDB()
servers.load(xmlFile, ignoreIPLookupFailures=True)
return servers
def test_read_ok(self):
servers = self._setupServers()
self.assertTrue(servers.getServerById("00001") is not None)
self.assertTrue(servers.getServerById("00002") is not None)
self.assertEqual(servers.getServerById("00001").uri, "http://caldav1.example.com:8008")
self.assertEqual(servers.getServerById("00002").uri, "https://caldav2.example.com:8843")
self.assertEqual(servers.getServerById("00001").allowed_from_ips, set(("127.0.0.1",)))
self.assertEqual(servers.getServerById("00002").allowed_from_ips, set())
self.assertEqual(servers.getServerById("00001").shared_secret, "foobar")
self.assertEqual(servers.getServerById("00002").shared_secret, None)
def test_this_server(self):
servers = self._setupServers()
self.assertTrue(servers.getServerById("00001").thisServer)
self.assertFalse(servers.getServerById("00002").thisServer)
self.assertEqual(servers.getThisServer(), servers.getServerById("00001"))
self.patch(config, "ServerHostName", "caldav2.example.com")
self.patch(config, "SSLPort", 8443)
self.patch(config, "BindSSLPorts", [8843])
xmlFile = StringIO.StringIO(ServerTests.data1)
servers = ServersDB()
servers.load(xmlFile, ignoreIPLookupFailures=True)
self.assertFalse(servers.getServerById("00001").thisServer)
self.assertTrue(servers.getServerById("00002").thisServer)
self.assertEqual(servers.getThisServer(), servers.getServerById("00002"))
def test_all_except_this_server(self):
servers = self._setupServers()
self.assertTrue(servers.getServerById("00001").thisServer)
self.assertFalse(servers.getServerById("00002").thisServer)
self.assertEqual(servers.allServersExceptThis(), [servers.getServerById("00002"), ])
self.patch(config, "ServerHostName", "caldav2.example.com")
self.patch(config, "SSLPort", 8443)
self.patch(config, "BindSSLPorts", [8843])
xmlFile = StringIO.StringIO(ServerTests.data1)
servers = ServersDB()
servers.load(xmlFile, ignoreIPLookupFailures=True)
self.assertFalse(servers.getServerById("00001").thisServer)
self.assertTrue(servers.getServerById("00002").thisServer)
self.assertEqual(servers.allServersExceptThis(), [servers.getServerById("00001"), ])
def test_check_this_ip(self):
servers = self._setupServers()
servers.getServerById("00001").ips = set(("127.0.0.2",))
servers.getServerById("00002").ips = set(("127.0.0.3",))
self.assertTrue(servers.getServerById("00001").checkThisIP("127.0.0.2"))
self.assertFalse(servers.getServerById("00001").checkThisIP("127.0.0.3"))
def test_check_allowed_from(self):
for servers in (self._setupServers(), self._setupServers(data=self.data2),):
self.assertTrue(servers.getServerById("00001").hasAllowedFromIP())
self.assertFalse(servers.getServerById("00002").hasAllowedFromIP())
self.assertTrue(servers.getServerById("00001").checkAllowedFromIP("127.0.0.1"))
self.assertFalse(servers.getServerById("00001").checkAllowedFromIP("127.0.0.2"))
self.assertFalse(servers.getServerById("00001").checkAllowedFromIP("127.0.0.3"))
self.assertFalse(servers.getServerById("00002").checkAllowedFromIP("127.0.0.1"))
self.assertFalse(servers.getServerById("00002").checkAllowedFromIP("127.0.0.2"))
self.assertFalse(servers.getServerById("00002").checkAllowedFromIP("127.0.0.3"))
def test_check_shared_secret(self):
servers = self._setupServers()
request = SimpleRequest(None, "POST", "/ischedule")
request.headers.addRawHeader(SERVER_SECRET_HEADER, "foobar")
self.assertTrue(servers.getServerById("00001").checkSharedSecret(request.headers))
request = SimpleRequest(None, "POST", "/ischedule")
request.headers.addRawHeader(SERVER_SECRET_HEADER, "foobar1")
self.assertFalse(servers.getServerById("00001").checkSharedSecret(request.headers))
request = SimpleRequest(None, "POST", "/ischedule")
self.assertFalse(servers.getServerById("00001").checkSharedSecret(request.headers))
request = SimpleRequest(None, "POST", "/ischedule")
request.headers.addRawHeader(SERVER_SECRET_HEADER, "foobar")
self.assertFalse(servers.getServerById("00002").checkSharedSecret(request.headers))
request = SimpleRequest(None, "POST", "/ischedule")
request.headers.addRawHeader(SERVER_SECRET_HEADER, "foobar1")
self.assertFalse(servers.getServerById("00002").checkSharedSecret(request.headers))
request = SimpleRequest(None, "POST", "/ischedule")
self.assertTrue(servers.getServerById("00002").checkSharedSecret(request.headers))
def test_urn_uuid(self):
servers = self._setupServers(self.data_v5)
self.assertFalse(servers.getServerById("00001").v5)
self.assertTrue(servers.getServerById("00002").v5)
self.assertFalse(servers.getServerById("00003").v5)
|
epssy/hue | refs/heads/master | desktop/core/ext-py/Django-1.6.10/tests/aggregation/tests.py | 59 | from __future__ import absolute_import
import datetime
from decimal import Decimal
from django.db.models import Avg, Sum, Count, Max, Min
from django.test import TestCase, Approximate
from .models import Author, Publisher, Book, Store
class BaseAggregateTestCase(TestCase):
fixtures = ["aggregation.json"]
def test_empty_aggregate(self):
self.assertEqual(Author.objects.all().aggregate(), {})
def test_single_aggregate(self):
vals = Author.objects.aggregate(Avg("age"))
self.assertEqual(vals, {"age__avg": Approximate(37.4, places=1)})
def test_multiple_aggregates(self):
vals = Author.objects.aggregate(Sum("age"), Avg("age"))
self.assertEqual(vals, {"age__sum": 337, "age__avg": Approximate(37.4, places=1)})
def test_filter_aggregate(self):
vals = Author.objects.filter(age__gt=29).aggregate(Sum("age"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["age__sum"], 254)
def test_related_aggregate(self):
vals = Author.objects.aggregate(Avg("friends__age"))
self.assertEqual(len(vals), 1)
self.assertAlmostEqual(vals["friends__age__avg"], 34.07, places=2)
vals = Book.objects.filter(rating__lt=4.5).aggregate(Avg("authors__age"))
self.assertEqual(len(vals), 1)
self.assertAlmostEqual(vals["authors__age__avg"], 38.2857, places=2)
vals = Author.objects.all().filter(name__contains="a").aggregate(Avg("book__rating"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["book__rating__avg"], 4.0)
vals = Book.objects.aggregate(Sum("publisher__num_awards"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["publisher__num_awards__sum"], 30)
vals = Publisher.objects.aggregate(Sum("book__price"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["book__price__sum"], Decimal("270.27"))
def test_aggregate_multi_join(self):
vals = Store.objects.aggregate(Max("books__authors__age"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["books__authors__age__max"], 57)
vals = Author.objects.aggregate(Min("book__publisher__num_awards"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["book__publisher__num_awards__min"], 1)
def test_aggregate_alias(self):
vals = Store.objects.filter(name="Amazon.com").aggregate(amazon_mean=Avg("books__rating"))
self.assertEqual(len(vals), 1)
self.assertAlmostEqual(vals["amazon_mean"], 4.08, places=2)
def test_annotate_basic(self):
self.assertQuerysetEqual(
Book.objects.annotate().order_by('pk'), [
"The Definitive Guide to Django: Web Development Done Right",
"Sams Teach Yourself Django in 24 Hours",
"Practical Django Projects",
"Python Web Development with Django",
"Artificial Intelligence: A Modern Approach",
"Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp"
],
lambda b: b.name
)
books = Book.objects.annotate(mean_age=Avg("authors__age"))
b = books.get(pk=1)
self.assertEqual(
b.name,
'The Definitive Guide to Django: Web Development Done Right'
)
self.assertEqual(b.mean_age, 34.5)
def test_annotate_m2m(self):
books = Book.objects.filter(rating__lt=4.5).annotate(Avg("authors__age")).order_by("name")
self.assertQuerysetEqual(
books, [
('Artificial Intelligence: A Modern Approach', 51.5),
('Practical Django Projects', 29.0),
('Python Web Development with Django', Approximate(30.3, places=1)),
('Sams Teach Yourself Django in 24 Hours', 45.0)
],
lambda b: (b.name, b.authors__age__avg),
)
books = Book.objects.annotate(num_authors=Count("authors")).order_by("name")
self.assertQuerysetEqual(
books, [
('Artificial Intelligence: A Modern Approach', 2),
('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 1),
('Practical Django Projects', 1),
('Python Web Development with Django', 3),
('Sams Teach Yourself Django in 24 Hours', 1),
('The Definitive Guide to Django: Web Development Done Right', 2)
],
lambda b: (b.name, b.num_authors)
)
def test_backwards_m2m_annotate(self):
authors = Author.objects.filter(name__contains="a").annotate(Avg("book__rating")).order_by("name")
self.assertQuerysetEqual(
authors, [
('Adrian Holovaty', 4.5),
('Brad Dayley', 3.0),
('Jacob Kaplan-Moss', 4.5),
('James Bennett', 4.0),
('Paul Bissex', 4.0),
('Stuart Russell', 4.0)
],
lambda a: (a.name, a.book__rating__avg)
)
authors = Author.objects.annotate(num_books=Count("book")).order_by("name")
self.assertQuerysetEqual(
authors, [
('Adrian Holovaty', 1),
('Brad Dayley', 1),
('Jacob Kaplan-Moss', 1),
('James Bennett', 1),
('Jeffrey Forcier', 1),
('Paul Bissex', 1),
('Peter Norvig', 2),
('Stuart Russell', 1),
('Wesley J. Chun', 1)
],
lambda a: (a.name, a.num_books)
)
def test_reverse_fkey_annotate(self):
books = Book.objects.annotate(Sum("publisher__num_awards")).order_by("name")
self.assertQuerysetEqual(
books, [
('Artificial Intelligence: A Modern Approach', 7),
('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 9),
('Practical Django Projects', 3),
('Python Web Development with Django', 7),
('Sams Teach Yourself Django in 24 Hours', 1),
('The Definitive Guide to Django: Web Development Done Right', 3)
],
lambda b: (b.name, b.publisher__num_awards__sum)
)
publishers = Publisher.objects.annotate(Sum("book__price")).order_by("name")
self.assertQuerysetEqual(
publishers, [
('Apress', Decimal("59.69")),
("Jonno's House of Books", None),
('Morgan Kaufmann', Decimal("75.00")),
('Prentice Hall', Decimal("112.49")),
('Sams', Decimal("23.09"))
],
lambda p: (p.name, p.book__price__sum)
)
def test_annotate_values(self):
books = list(Book.objects.filter(pk=1).annotate(mean_age=Avg("authors__age")).values())
self.assertEqual(
books, [
{
"contact_id": 1,
"id": 1,
"isbn": "159059725",
"mean_age": 34.5,
"name": "The Definitive Guide to Django: Web Development Done Right",
"pages": 447,
"price": Approximate(Decimal("30")),
"pubdate": datetime.date(2007, 12, 6),
"publisher_id": 1,
"rating": 4.5,
}
]
)
books = Book.objects.filter(pk=1).annotate(mean_age=Avg('authors__age')).values('pk', 'isbn', 'mean_age')
self.assertEqual(
list(books), [
{
"pk": 1,
"isbn": "159059725",
"mean_age": 34.5,
}
]
)
books = Book.objects.filter(pk=1).annotate(mean_age=Avg("authors__age")).values("name")
self.assertEqual(
list(books), [
{
"name": "The Definitive Guide to Django: Web Development Done Right"
}
]
)
books = Book.objects.filter(pk=1).values().annotate(mean_age=Avg('authors__age'))
self.assertEqual(
list(books), [
{
"contact_id": 1,
"id": 1,
"isbn": "159059725",
"mean_age": 34.5,
"name": "The Definitive Guide to Django: Web Development Done Right",
"pages": 447,
"price": Approximate(Decimal("30")),
"pubdate": datetime.date(2007, 12, 6),
"publisher_id": 1,
"rating": 4.5,
}
]
)
books = Book.objects.values("rating").annotate(n_authors=Count("authors__id"), mean_age=Avg("authors__age")).order_by("rating")
self.assertEqual(
list(books), [
{
"rating": 3.0,
"n_authors": 1,
"mean_age": 45.0,
},
{
"rating": 4.0,
"n_authors": 6,
"mean_age": Approximate(37.16, places=1)
},
{
"rating": 4.5,
"n_authors": 2,
"mean_age": 34.5,
},
{
"rating": 5.0,
"n_authors": 1,
"mean_age": 57.0,
}
]
)
authors = Author.objects.annotate(Avg("friends__age")).order_by("name")
self.assertEqual(len(authors), 9)
self.assertQuerysetEqual(
authors, [
('Adrian Holovaty', 32.0),
('Brad Dayley', None),
('Jacob Kaplan-Moss', 29.5),
('James Bennett', 34.0),
('Jeffrey Forcier', 27.0),
('Paul Bissex', 31.0),
('Peter Norvig', 46.0),
('Stuart Russell', 57.0),
('Wesley J. Chun', Approximate(33.66, places=1))
],
lambda a: (a.name, a.friends__age__avg)
)
def test_count(self):
vals = Book.objects.aggregate(Count("rating"))
self.assertEqual(vals, {"rating__count": 6})
vals = Book.objects.aggregate(Count("rating", distinct=True))
self.assertEqual(vals, {"rating__count": 4})
def test_fkey_aggregate(self):
explicit = list(Author.objects.annotate(Count('book__id')))
implicit = list(Author.objects.annotate(Count('book')))
self.assertEqual(explicit, implicit)
def test_annotate_ordering(self):
books = Book.objects.values('rating').annotate(oldest=Max('authors__age')).order_by('oldest', 'rating')
self.assertEqual(
list(books), [
{
"rating": 4.5,
"oldest": 35,
},
{
"rating": 3.0,
"oldest": 45
},
{
"rating": 4.0,
"oldest": 57,
},
{
"rating": 5.0,
"oldest": 57,
}
]
)
books = Book.objects.values("rating").annotate(oldest=Max("authors__age")).order_by("-oldest", "-rating")
self.assertEqual(
list(books), [
{
"rating": 5.0,
"oldest": 57,
},
{
"rating": 4.0,
"oldest": 57,
},
{
"rating": 3.0,
"oldest": 45,
},
{
"rating": 4.5,
"oldest": 35,
}
]
)
def test_aggregate_annotation(self):
vals = Book.objects.annotate(num_authors=Count("authors__id")).aggregate(Avg("num_authors"))
self.assertEqual(vals, {"num_authors__avg": Approximate(1.66, places=1)})
def test_filtering(self):
p = Publisher.objects.create(name='Expensive Publisher', num_awards=0)
Book.objects.create(
name='ExpensiveBook1',
pages=1,
isbn='111',
rating=3.5,
price=Decimal("1000"),
publisher=p,
contact_id=1,
pubdate=datetime.date(2008,12,1)
)
Book.objects.create(
name='ExpensiveBook2',
pages=1,
isbn='222',
rating=4.0,
price=Decimal("1000"),
publisher=p,
contact_id=1,
pubdate=datetime.date(2008,12,2)
)
Book.objects.create(
name='ExpensiveBook3',
pages=1,
isbn='333',
rating=4.5,
price=Decimal("35"),
publisher=p,
contact_id=1,
pubdate=datetime.date(2008,12,3)
)
publishers = Publisher.objects.annotate(num_books=Count("book__id")).filter(num_books__gt=1).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Prentice Hall",
"Expensive Publisher",
],
lambda p: p.name,
)
publishers = Publisher.objects.filter(book__price__lt=Decimal("40.0")).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Apress",
"Sams",
"Prentice Hall",
"Expensive Publisher",
],
lambda p: p.name
)
publishers = Publisher.objects.annotate(num_books=Count("book__id")).filter(num_books__gt=1, book__price__lt=Decimal("40.0")).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Prentice Hall",
"Expensive Publisher",
],
lambda p: p.name,
)
publishers = Publisher.objects.filter(book__price__lt=Decimal("40.0")).annotate(num_books=Count("book__id")).filter(num_books__gt=1).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
],
lambda p: p.name
)
publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__range=[1, 3]).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Sams",
"Prentice Hall",
"Morgan Kaufmann",
"Expensive Publisher",
],
lambda p: p.name
)
publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__range=[1, 2]).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Sams",
"Prentice Hall",
"Morgan Kaufmann",
],
lambda p: p.name
)
publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__in=[1, 3]).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Sams",
"Morgan Kaufmann",
"Expensive Publisher",
],
lambda p: p.name,
)
publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__isnull=True)
self.assertEqual(len(publishers), 0)
def test_annotation(self):
vals = Author.objects.filter(pk=1).aggregate(Count("friends__id"))
self.assertEqual(vals, {"friends__id__count": 2})
books = Book.objects.annotate(num_authors=Count("authors__name")).filter(num_authors__ge=2).order_by("pk")
self.assertQuerysetEqual(
books, [
"The Definitive Guide to Django: Web Development Done Right",
"Artificial Intelligence: A Modern Approach",
],
lambda b: b.name
)
authors = Author.objects.annotate(num_friends=Count("friends__id", distinct=True)).filter(num_friends=0).order_by("pk")
self.assertQuerysetEqual(
authors, [
"Brad Dayley",
],
lambda a: a.name
)
publishers = Publisher.objects.annotate(num_books=Count("book__id")).filter(num_books__gt=1).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Prentice Hall",
],
lambda p: p.name
)
publishers = Publisher.objects.filter(book__price__lt=Decimal("40.0")).annotate(num_books=Count("book__id")).filter(num_books__gt=1)
self.assertQuerysetEqual(
publishers, [
"Apress",
],
lambda p: p.name
)
books = Book.objects.annotate(num_authors=Count("authors__id")).filter(authors__name__contains="Norvig", num_authors__gt=1)
self.assertQuerysetEqual(
books, [
"Artificial Intelligence: A Modern Approach",
],
lambda b: b.name
)
def test_more_aggregation(self):
a = Author.objects.get(name__contains='Norvig')
b = Book.objects.get(name__contains='Done Right')
b.authors.add(a)
b.save()
vals = Book.objects.annotate(num_authors=Count("authors__id")).filter(authors__name__contains="Norvig", num_authors__gt=1).aggregate(Avg("rating"))
self.assertEqual(vals, {"rating__avg": 4.25})
def test_even_more_aggregate(self):
publishers = Publisher.objects.annotate(earliest_book=Min("book__pubdate")).exclude(earliest_book=None).order_by("earliest_book").values()
self.assertEqual(
list(publishers), [
{
'earliest_book': datetime.date(1991, 10, 15),
'num_awards': 9,
'id': 4,
'name': 'Morgan Kaufmann'
},
{
'earliest_book': datetime.date(1995, 1, 15),
'num_awards': 7,
'id': 3,
'name': 'Prentice Hall'
},
{
'earliest_book': datetime.date(2007, 12, 6),
'num_awards': 3,
'id': 1,
'name': 'Apress'
},
{
'earliest_book': datetime.date(2008, 3, 3),
'num_awards': 1,
'id': 2,
'name': 'Sams'
}
]
)
vals = Store.objects.aggregate(Max("friday_night_closing"), Min("original_opening"))
self.assertEqual(
vals,
{
"friday_night_closing__max": datetime.time(23, 59, 59),
"original_opening__min": datetime.datetime(1945, 4, 25, 16, 24, 14),
}
)
def test_annotate_values_list(self):
books = Book.objects.filter(pk=1).annotate(mean_age=Avg("authors__age")).values_list("pk", "isbn", "mean_age")
self.assertEqual(
list(books), [
(1, "159059725", 34.5),
]
)
books = Book.objects.filter(pk=1).annotate(mean_age=Avg("authors__age")).values_list("isbn")
self.assertEqual(
list(books), [
('159059725',)
]
)
books = Book.objects.filter(pk=1).annotate(mean_age=Avg("authors__age")).values_list("mean_age")
self.assertEqual(
list(books), [
(34.5,)
]
)
books = Book.objects.filter(pk=1).annotate(mean_age=Avg("authors__age")).values_list("mean_age", flat=True)
self.assertEqual(list(books), [34.5])
books = Book.objects.values_list("price").annotate(count=Count("price")).order_by("-count", "price")
self.assertEqual(
list(books), [
(Decimal("29.69"), 2),
(Decimal('23.09'), 1),
(Decimal('30'), 1),
(Decimal('75'), 1),
(Decimal('82.8'), 1),
]
)
def test_dates_with_aggregation(self):
"""
Test that .dates() returns a distinct set of dates when applied to a
QuerySet with aggregation.
Refs #18056. Previously, .dates() would return distinct (date_kind,
aggregation) sets, in this case (year, num_authors), so 2008 would be
returned twice because there are books from 2008 with a different
number of authors.
"""
dates = Book.objects.annotate(num_authors=Count("authors")).dates('pubdate', 'year')
self.assertQuerysetEqual(
dates, [
"datetime.date(1991, 1, 1)",
"datetime.date(1995, 1, 1)",
"datetime.date(2007, 1, 1)",
"datetime.date(2008, 1, 1)"
]
)
|
PriceChild/ansible | refs/heads/devel | lib/ansible/modules/cloud/rackspace/rax_cdb.py | 70 | #!/usr/bin/python -tt
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: rax_cdb
short_description: create/delete or resize a Rackspace Cloud Databases instance
description:
- creates / deletes or resize a Rackspace Cloud Databases instance
and optionally waits for it to be 'running'. The name option needs to be
unique since it's used to identify the instance.
version_added: "1.8"
options:
name:
description:
- Name of the databases server instance
default: null
flavor:
description:
- flavor to use for the instance 1 to 6 (i.e. 512MB to 16GB)
default: 1
volume:
description:
- Volume size of the database 1-150GB
default: 2
cdb_type:
description:
- type of instance (i.e. MySQL, MariaDB, Percona)
default: MySQL
version_added: "2.0"
aliases: ['type']
cdb_version:
description:
- version of database (MySQL supports 5.1 and 5.6, MariaDB supports 10, Percona supports 5.6)
choices: ['5.1', '5.6', '10']
version_added: "2.0"
aliases: ['version']
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
wait:
description:
- wait for the instance to be in state 'running' before returning
default: "no"
choices: [ "yes", "no" ]
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 300
author: "Simon JAILLET (@jails)"
extends_documentation_fragment: rackspace
'''
EXAMPLES = '''
- name: Build a Cloud Databases
gather_facts: False
tasks:
- name: Server build request
local_action:
module: rax_cdb
credentials: ~/.raxpub
region: IAD
name: db-server1
flavor: 1
volume: 2
cdb_type: MySQL
cdb_version: 5.6
wait: yes
state: present
register: rax_db_server
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
def find_instance(name):
cdb = pyrax.cloud_databases
instances = cdb.list()
if instances:
for instance in instances:
if instance.name == name:
return instance
return False
def save_instance(module, name, flavor, volume, cdb_type, cdb_version, wait,
wait_timeout):
for arg, value in dict(name=name, flavor=flavor,
volume=volume, type=cdb_type, version=cdb_version
).items():
if not value:
module.fail_json(msg='%s is required for the "rax_cdb"'
' module' % arg)
if not (volume >= 1 and volume <= 150):
module.fail_json(msg='volume is required to be between 1 and 150')
cdb = pyrax.cloud_databases
flavors = []
for item in cdb.list_flavors():
flavors.append(item.id)
if not (flavor in flavors):
module.fail_json(msg='unexisting flavor reference "%s"' % str(flavor))
changed = False
instance = find_instance(name)
if not instance:
action = 'create'
try:
instance = cdb.create(name=name, flavor=flavor, volume=volume,
type=cdb_type, version=cdb_version)
except Exception as e:
module.fail_json(msg='%s' % e.message)
else:
changed = True
else:
action = None
if instance.volume.size != volume:
action = 'resize'
if instance.volume.size > volume:
module.fail_json(changed=False, action=action,
msg='The new volume size must be larger than '
'the current volume size',
cdb=rax_to_dict(instance))
instance.resize_volume(volume)
changed = True
if int(instance.flavor.id) != flavor:
action = 'resize'
pyrax.utils.wait_until(instance, 'status', 'ACTIVE',
attempts=wait_timeout)
instance.resize(flavor)
changed = True
if wait:
pyrax.utils.wait_until(instance, 'status', 'ACTIVE',
attempts=wait_timeout)
if wait and instance.status != 'ACTIVE':
module.fail_json(changed=changed, action=action,
cdb=rax_to_dict(instance),
msg='Timeout waiting for "%s" databases instance to '
'be created' % name)
module.exit_json(changed=changed, action=action, cdb=rax_to_dict(instance))
def delete_instance(module, name, wait, wait_timeout):
if not name:
module.fail_json(msg='name is required for the "rax_cdb" module')
changed = False
instance = find_instance(name)
if not instance:
module.exit_json(changed=False, action='delete')
try:
instance.delete()
except Exception as e:
module.fail_json(msg='%s' % e.message)
else:
changed = True
if wait:
pyrax.utils.wait_until(instance, 'status', 'SHUTDOWN',
attempts=wait_timeout)
if wait and instance.status != 'SHUTDOWN':
module.fail_json(changed=changed, action='delete',
cdb=rax_to_dict(instance),
msg='Timeout waiting for "%s" databases instance to '
'be deleted' % name)
module.exit_json(changed=changed, action='delete',
cdb=rax_to_dict(instance))
def rax_cdb(module, state, name, flavor, volume, cdb_type, cdb_version, wait,
wait_timeout):
# act on the state
if state == 'present':
save_instance(module, name, flavor, volume, cdb_type, cdb_version, wait,
wait_timeout)
elif state == 'absent':
delete_instance(module, name, wait, wait_timeout)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
name=dict(type='str', required=True),
flavor=dict(type='int', default=1),
volume=dict(type='int', default=2),
cdb_type=dict(type='str', default='MySQL', aliases=['type']),
cdb_version=dict(type='str', default='5.6', aliases=['version']),
state=dict(default='present', choices=['present', 'absent']),
wait=dict(type='bool', default=False),
wait_timeout=dict(type='int', default=300),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together(),
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
name = module.params.get('name')
flavor = module.params.get('flavor')
volume = module.params.get('volume')
cdb_type = module.params.get('cdb_type')
cdb_version = module.params.get('cdb_version')
state = module.params.get('state')
wait = module.params.get('wait')
wait_timeout = module.params.get('wait_timeout')
setup_rax_module(module, pyrax)
rax_cdb(module, state, name, flavor, volume, cdb_type, cdb_version, wait, wait_timeout)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
# invoke the module
if __name__ == '__main__':
main()
|
bwrsandman/OpenUpgrade | refs/heads/8.0 | openerp/conf/__init__.py | 442 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" Library-wide configuration variables.
For now, configuration code is in openerp.tools.config. It is in mainly
unprocessed form, e.g. addons_path is a string with commas-separated
paths. The aim is to have code related to configuration (command line
parsing, configuration file loading and saving, ...) in this module
and provide real Python variables, e.g. addons_paths is really a list
of paths.
To initialize properly this module, openerp.tools.config.parse_config()
must be used.
"""
import deprecation
# Paths to search for OpenERP addons.
addons_paths = []
# List of server-wide modules to load. Those modules are supposed to provide
# features not necessarily tied to a particular database. This is in contrast
# to modules that are always bound to a specific database when they are
# installed (i.e. the majority of OpenERP addons). This is set with the --load
# command-line option.
server_wide_modules = []
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
alexchamberlain/pacman | refs/heads/master | test/pacman/tests/sync044.py | 28 | self.description = "A dependency induces a replacement"
lp1 = pmpkg("pkg1")
self.addpkg2db("local", lp1);
sp2 = pmpkg("pkg2")
sp2.depends = ["pkg3"]
self.addpkg2db("sync", sp2);
sp3 = pmpkg("pkg3")
sp3.conflicts = ["pkg1"]
self.addpkg2db("sync", sp3);
self.args = "-S pkg2 --ask=4"
self.addrule("PACMAN_RETCODE=0")
self.addrule("!PKG_EXIST=pkg1")
self.addrule("PKG_EXIST=pkg2")
self.addrule("PKG_EXIST=pkg3")
self.addrule("PKG_REASON=pkg3|1")
|
closeio/flask-admin | refs/heads/master | flask_admin/contrib/sqla/validators.py | 22 | from sqlalchemy.orm.exc import NoResultFound
from wtforms import ValidationError
try:
from wtforms.validators import InputRequired
except ImportError:
from wtforms.validators import Required as InputRequired
class Unique(object):
"""Checks field value unicity against specified table field.
:param get_session:
A function that return a SQAlchemy Session.
:param model:
The model to check unicity against.
:param column:
The unique column.
:param message:
The error message.
"""
field_flags = ('unique', )
def __init__(self, db_session, model, column, message=None):
self.db_session = db_session
self.model = model
self.column = column
self.message = message
def __call__(self, form, field):
# databases allow multiple NULL values for unique columns
if field.data is None:
return
try:
obj = (self.db_session.query(self.model)
.filter(self.column == field.data)
.one())
if not hasattr(form, '_obj') or not form._obj == obj:
if self.message is None:
self.message = field.gettext(u'Already exists.')
raise ValidationError(self.message)
except NoResultFound:
pass
class ItemsRequired(InputRequired):
"""
A version of the ``InputRequired`` validator that works with relations,
to require a minimum number of related items.
"""
def __init__(self, min=1, message=None):
super(ItemsRequired, self).__init__(message=message)
self.min = min
def __call__(self, form, field):
if len(field.data) < self.min:
if self.message is None:
message = field.ngettext(
u"At least %(num)d item is required",
u"At least %(num)d items are required",
self.min
)
else:
message = self.message
raise ValidationError(message)
|
thedrow/django-rest-framework-1 | refs/heads/master | tests/test_serializer_nested.py | 36 | from django.http import QueryDict
from rest_framework import serializers
class TestNestedSerializer:
def setup(self):
class NestedSerializer(serializers.Serializer):
one = serializers.IntegerField(max_value=10)
two = serializers.IntegerField(max_value=10)
class TestSerializer(serializers.Serializer):
nested = NestedSerializer()
self.Serializer = TestSerializer
def test_nested_validate(self):
input_data = {
'nested': {
'one': '1',
'two': '2',
}
}
expected_data = {
'nested': {
'one': 1,
'two': 2,
}
}
serializer = self.Serializer(data=input_data)
assert serializer.is_valid()
assert serializer.validated_data == expected_data
def test_nested_serialize_empty(self):
expected_data = {
'nested': {
'one': None,
'two': None
}
}
serializer = self.Serializer()
assert serializer.data == expected_data
class TestNotRequiredNestedSerializer:
def setup(self):
class NestedSerializer(serializers.Serializer):
one = serializers.IntegerField(max_value=10)
class TestSerializer(serializers.Serializer):
nested = NestedSerializer(required=False)
self.Serializer = TestSerializer
def test_json_validate(self):
input_data = {}
serializer = self.Serializer(data=input_data)
assert serializer.is_valid()
input_data = {'nested': {'one': '1'}}
serializer = self.Serializer(data=input_data)
assert serializer.is_valid()
def test_multipart_validate(self):
input_data = QueryDict('')
serializer = self.Serializer(data=input_data)
assert serializer.is_valid()
input_data = QueryDict('nested[one]=1')
serializer = self.Serializer(data=input_data)
assert serializer.is_valid()
class TestNestedSerializerWithMany:
def setup(self):
class NestedSerializer(serializers.Serializer):
example = serializers.IntegerField(max_value=10)
class TestSerializer(serializers.Serializer):
allow_null = NestedSerializer(many=True, allow_null=True)
not_allow_null = NestedSerializer(many=True)
allow_empty = NestedSerializer(many=True, allow_empty=True)
not_allow_empty = NestedSerializer(many=True, allow_empty=False)
self.Serializer = TestSerializer
def test_null_allowed_if_allow_null_is_set(self):
input_data = {
'allow_null': None,
'not_allow_null': [{'example': '2'}, {'example': '3'}],
'allow_empty': [{'example': '2'}],
'not_allow_empty': [{'example': '2'}],
}
expected_data = {
'allow_null': None,
'not_allow_null': [{'example': 2}, {'example': 3}],
'allow_empty': [{'example': 2}],
'not_allow_empty': [{'example': 2}],
}
serializer = self.Serializer(data=input_data)
assert serializer.is_valid(), serializer.errors
assert serializer.validated_data == expected_data
def test_null_is_not_allowed_if_allow_null_is_not_set(self):
input_data = {
'allow_null': None,
'not_allow_null': None,
'allow_empty': [{'example': '2'}],
'not_allow_empty': [{'example': '2'}],
}
serializer = self.Serializer(data=input_data)
assert not serializer.is_valid()
expected_errors = {'not_allow_null': [serializer.error_messages['null']]}
assert serializer.errors == expected_errors
def test_run_the_field_validation_even_if_the_field_is_null(self):
class TestSerializer(self.Serializer):
validation_was_run = False
def validate_allow_null(self, value):
TestSerializer.validation_was_run = True
return value
input_data = {
'allow_null': None,
'not_allow_null': [{'example': 2}],
'allow_empty': [{'example': 2}],
'not_allow_empty': [{'example': 2}],
}
serializer = TestSerializer(data=input_data)
assert serializer.is_valid()
assert serializer.validated_data == input_data
assert TestSerializer.validation_was_run
def test_empty_allowed_if_allow_empty_is_set(self):
input_data = {
'allow_null': [{'example': '2'}],
'not_allow_null': [{'example': '2'}],
'allow_empty': [],
'not_allow_empty': [{'example': '2'}],
}
expected_data = {
'allow_null': [{'example': 2}],
'not_allow_null': [{'example': 2}],
'allow_empty': [],
'not_allow_empty': [{'example': 2}],
}
serializer = self.Serializer(data=input_data)
assert serializer.is_valid(), serializer.errors
assert serializer.validated_data == expected_data
def test_empty_not_allowed_if_allow_empty_is_set_to_false(self):
input_data = {
'allow_null': [{'example': '2'}],
'not_allow_null': [{'example': '2'}],
'allow_empty': [],
'not_allow_empty': [],
}
serializer = self.Serializer(data=input_data)
assert not serializer.is_valid()
expected_errors = {'not_allow_empty': {'non_field_errors': [serializers.ListSerializer.default_error_messages['empty']]}}
assert serializer.errors == expected_errors
|
lordblackfox/aircox | refs/heads/master | aircox/forms.py | 2 | from django import forms
from django.forms import ModelForm
from .models import Comment
class CommentForm(ModelForm):
nickname = forms.CharField()
email = forms.EmailField(required=False)
content = forms.CharField(widget=forms.Textarea())
nickname.widget.attrs.update({'class': 'input'})
email.widget.attrs.update({'class': 'input'})
content.widget.attrs.update({'class': 'textarea'})
class Meta:
model = Comment
fields = ['nickname', 'email', 'content']
|
MKLab-ITI/news-popularity-prediction | refs/heads/master | reveal_fp7_module/reveal-popularity-prediction/reveal_popularity_prediction/features/extraction.py | 1 | __author__ = 'Georgios Rizos (georgerizos@iti.gr)'
import numpy as np
from reveal_popularity_prediction.features import wrappers
def extract_snapshot_features(comment_tree,
user_graph,
timestamp_list,
tweet_timestamp,
initial_post,
author,
platform):
graph_snapshot_input = dict()
graph_snapshot_input["comment_tree"] = comment_tree
graph_snapshot_input["user_graph"] = user_graph
graph_snapshot_input["timestamp_list"] = timestamp_list
graph_snapshot_input["tweet_timestamp"] = tweet_timestamp
graph_snapshot_input["initial_post"] = initial_post
graph_snapshot_input["author"] = author
feature_names = sorted(get_handcrafted_feature_names(platform))
handcrafted_function_list = [getattr(wrappers, "wrapper_" + feature_name) for feature_name in feature_names]
features = calculate_handcrafted_features(graph_snapshot_input,
feature_names,
handcrafted_function_list)
return features
def calculate_handcrafted_features(graph_snapshot_input,
feature_names,
handcrafted_function_list):
features = dict()
for feature_name, calculation_function in zip(feature_names, handcrafted_function_list):
feature_value = calculation_function(graph_snapshot_input)
features[feature_name] = feature_value
return features
def get_handcrafted_feature_names(platform):
"""
Returns a set of feature names to be calculated.
Output: - names: A set of strings, corresponding to the features to be calculated.
"""
names = set()
####################################################################################################################
# Add basic discussion tree features.
####################################################################################################################
names.update(["comment_count",
"max_depth",
"avg_depth",
"max_width",
"avg_width",
"max_depth_over_max_width",
"avg_depth_over_width"])
####################################################################################################################
# Add branching discussion tree features.
####################################################################################################################
names.update(["comment_tree_hirsch",
"comment_tree_wiener",
"comment_tree_randic"])
####################################################################################################################
# Add user graph features.
####################################################################################################################
names.update(["user_count",
"user_graph_hirsch",
"user_graph_randic",
"outdegree_entropy",
"norm_outdegree_entropy",
"indegree_entropy",
"norm_indegree_entropy"])
####################################################################################################################
# Add temporal features.
####################################################################################################################
names.update(["avg_time_differences_1st_half",
"avg_time_differences_2nd_half",
"time_differences_std",
"last_comment_lifetime"])
####################################################################################################################
# Add YouTube channel features.
####################################################################################################################
if platform == "YouTube":
names.update(["author_privacy_status_youtube",
"author_is_linked_youtube",
"author_long_uploads_status_youtube",
"author_comment_count_youtube",
"author_comment_rate_youtube",
"author_view_count_youtube",
"author_view_rate_youtube",
"author_video_upload_count_youtube",
"author_video_upload_rate_youtube",
"author_subscriber_count_youtube",
"author_subscriber_rate_youtube",
"author_hidden_subscriber_count_youtube",
"author_channel_lifetime_youtube"])
####################################################################################################################
# Add Reddit author features.
####################################################################################################################
elif platform == "Reddit":
names.update(["author_has_verified_mail_reddit",
"author_account_lifetime_reddit",
"author_hide_from_robots_reddit",
"author_is_mod_reddit",
"author_link_karma_reddit",
"author_link_karma_rate_reddit",
"author_comment_karma_reddit",
"author_comment_karma_rate_reddit",
"author_is_gold_reddit"])
else:
print("Invalid platform name.")
raise RuntimeError
return names
def get_comment_tree_feature_names():
names = set()
names.update(["comment_count",
"max_depth",
"avg_depth",
"max_width",
"avg_width",
"max_depth_over_max_width",
"avg_depth_over_width"])
return names
def get_user_graph_feature_names():
names = set()
names.update(["user_count",
"user_graph_hirsch",
"user_graph_randic",
"outdegree_entropy",
"norm_outdegree_entropy",
"indegree_entropy",
"norm_indegree_entropy"])
return names
def get_temporal_feature_names():
names = set()
names.update(["avg_time_differences_1st_half",
"avg_time_differences_2nd_half",
"time_differences_std",
"last_comment_lifetime"])
return names
def get_author_feature_names(platform):
names = set()
if platform == "YouTube":
names.update(["author_privacy_status_youtube",
"author_is_linked_youtube",
"author_long_uploads_status_youtube",
"author_comment_count_youtube",
"author_comment_rate_youtube",
"author_view_count_youtube",
"author_view_rate_youtube",
"author_video_upload_count_youtube",
"author_video_upload_rate_youtube",
"author_subscriber_count_youtube",
"author_subscriber_rate_youtube",
"author_hidden_subscriber_count_youtube",
"author_channel_lifetime_youtube"])
####################################################################################################################
# Add Reddit author features.
####################################################################################################################
elif platform == "Reddit":
names.update(["author_has_verified_mail_reddit",
"author_account_lifetime_reddit",
"author_hide_from_robots_reddit",
"author_is_mod_reddit",
"author_link_karma_reddit",
"author_link_karma_rate_reddit",
"author_comment_karma_reddit",
"author_comment_karma_rate_reddit",
"author_is_gold_reddit"])
else:
print("Invalid platform name.")
raise RuntimeError
return names
# print(sorted(get_handcrafted_feature_names("YouTube")))
# print(sorted(get_handcrafted_feature_names("Reddit")))
def make_features_vector(features_dict, platform):
feature_names = sorted(get_handcrafted_feature_names(platform))
features_vector_list = list()
for feature_name in feature_names:
feature_value = features_dict[feature_name]
features_vector_list.append(feature_value)
features_vector = np.empty((1, len(feature_names)), dtype=np.float64)
for i, v in enumerate(features_vector_list):
features_vector[0, i] = v
return features_vector
|
piyush82/icclab-rcb-web | refs/heads/master | virtualenv/lib/python2.7/site-packages/django/contrib/auth/admin.py | 29 | from django.db import transaction
from django.conf import settings
from django.contrib import admin
from django.contrib.admin.options import IS_POPUP_VAR
from django.contrib.auth.forms import (UserCreationForm, UserChangeForm,
AdminPasswordChangeForm)
from django.contrib.auth.models import User, Group
from django.contrib import messages
from django.core.exceptions import PermissionDenied
from django.http import HttpResponseRedirect, Http404
from django.shortcuts import get_object_or_404
from django.template.response import TemplateResponse
from django.utils.html import escape
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext, ugettext_lazy as _
from django.views.decorators.csrf import csrf_protect
from django.views.decorators.debug import sensitive_post_parameters
csrf_protect_m = method_decorator(csrf_protect)
sensitive_post_parameters_m = method_decorator(sensitive_post_parameters())
class GroupAdmin(admin.ModelAdmin):
search_fields = ('name',)
ordering = ('name',)
filter_horizontal = ('permissions',)
def formfield_for_manytomany(self, db_field, request=None, **kwargs):
if db_field.name == 'permissions':
qs = kwargs.get('queryset', db_field.rel.to.objects)
# Avoid a major performance hit resolving permission names which
# triggers a content_type load:
kwargs['queryset'] = qs.select_related('content_type')
return super(GroupAdmin, self).formfield_for_manytomany(
db_field, request=request, **kwargs)
class UserAdmin(admin.ModelAdmin):
add_form_template = 'admin/auth/user/add_form.html'
change_user_password_template = None
fieldsets = (
(None, {'fields': ('username', 'password')}),
(_('Personal info'), {'fields': ('first_name', 'last_name', 'email')}),
(_('Permissions'), {'fields': ('is_active', 'is_staff', 'is_superuser',
'groups', 'user_permissions')}),
(_('Important dates'), {'fields': ('last_login', 'date_joined')}),
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('username', 'password1', 'password2')}
),
)
form = UserChangeForm
add_form = UserCreationForm
change_password_form = AdminPasswordChangeForm
list_display = ('username', 'email', 'first_name', 'last_name', 'is_staff')
list_filter = ('is_staff', 'is_superuser', 'is_active', 'groups')
search_fields = ('username', 'first_name', 'last_name', 'email')
ordering = ('username',)
filter_horizontal = ('groups', 'user_permissions',)
def get_fieldsets(self, request, obj=None):
if not obj:
return self.add_fieldsets
return super(UserAdmin, self).get_fieldsets(request, obj)
def get_form(self, request, obj=None, **kwargs):
"""
Use special form during user creation
"""
defaults = {}
if obj is None:
defaults.update({
'form': self.add_form,
'fields': admin.util.flatten_fieldsets(self.add_fieldsets),
})
defaults.update(kwargs)
return super(UserAdmin, self).get_form(request, obj, **defaults)
def get_urls(self):
from django.conf.urls import patterns
return patterns('',
(r'^(\d+)/password/$',
self.admin_site.admin_view(self.user_change_password))
) + super(UserAdmin, self).get_urls()
def lookup_allowed(self, lookup, value):
# See #20078: we don't want to allow any lookups involving passwords.
if lookup.startswith('password'):
return False
return super(UserAdmin, self).lookup_allowed(lookup, value)
@sensitive_post_parameters_m
@csrf_protect_m
@transaction.atomic
def add_view(self, request, form_url='', extra_context=None):
# It's an error for a user to have add permission but NOT change
# permission for users. If we allowed such users to add users, they
# could create superusers, which would mean they would essentially have
# the permission to change users. To avoid the problem entirely, we
# disallow users from adding users if they don't have change
# permission.
if not self.has_change_permission(request):
if self.has_add_permission(request) and settings.DEBUG:
# Raise Http404 in debug mode so that the user gets a helpful
# error message.
raise Http404(
'Your user does not have the "Change user" permission. In '
'order to add users, Django requires that your user '
'account have both the "Add user" and "Change user" '
'permissions set.')
raise PermissionDenied
if extra_context is None:
extra_context = {}
username_field = self.model._meta.get_field(self.model.USERNAME_FIELD)
defaults = {
'auto_populated_fields': (),
'username_help_text': username_field.help_text,
}
extra_context.update(defaults)
return super(UserAdmin, self).add_view(request, form_url,
extra_context)
@sensitive_post_parameters_m
def user_change_password(self, request, id, form_url=''):
if not self.has_change_permission(request):
raise PermissionDenied
user = get_object_or_404(self.get_queryset(request), pk=id)
if request.method == 'POST':
form = self.change_password_form(user, request.POST)
if form.is_valid():
form.save()
change_message = self.construct_change_message(request, form, None)
self.log_change(request, request.user, change_message)
msg = ugettext('Password changed successfully.')
messages.success(request, msg)
return HttpResponseRedirect('..')
else:
form = self.change_password_form(user)
fieldsets = [(None, {'fields': list(form.base_fields)})]
adminForm = admin.helpers.AdminForm(form, fieldsets, {})
context = {
'title': _('Change password: %s') % escape(user.get_username()),
'adminForm': adminForm,
'form_url': form_url,
'form': form,
'is_popup': IS_POPUP_VAR in request.REQUEST,
'add': True,
'change': False,
'has_delete_permission': False,
'has_change_permission': True,
'has_absolute_url': False,
'opts': self.model._meta,
'original': user,
'save_as': False,
'show_save': True,
}
return TemplateResponse(request,
self.change_user_password_template or
'admin/auth/user/change_password.html',
context, current_app=self.admin_site.name)
def response_add(self, request, obj, post_url_continue=None):
"""
Determines the HttpResponse for the add_view stage. It mostly defers to
its superclass implementation but is customized because the User model
has a slightly different workflow.
"""
# We should allow further modification of the user just added i.e. the
# 'Save' button should behave like the 'Save and continue editing'
# button except in two scenarios:
# * The user has pressed the 'Save and add another' button
# * We are adding a user in a popup
if '_addanother' not in request.POST and IS_POPUP_VAR not in request.POST:
request.POST['_continue'] = 1
return super(UserAdmin, self).response_add(request, obj,
post_url_continue)
admin.site.register(Group, GroupAdmin)
admin.site.register(User, UserAdmin)
|
dfunckt/django | refs/heads/master | django/template/library.py | 115 | import functools
import warnings
from importlib import import_module
from django.utils import six
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.html import conditional_escape
from django.utils.inspect import getargspec
from django.utils.itercompat import is_iterable
from .base import Node, Template, token_kwargs
from .exceptions import TemplateSyntaxError
class InvalidTemplateLibrary(Exception):
pass
class Library(object):
"""
A class for registering template tags and filters. Compiled filter and
template tag functions are stored in the filters and tags attributes.
The filter, simple_tag, and inclusion_tag methods provide a convenient
way to register callables as tags.
"""
def __init__(self):
self.filters = {}
self.tags = {}
def tag(self, name=None, compile_function=None):
if name is None and compile_function is None:
# @register.tag()
return self.tag_function
elif name is not None and compile_function is None:
if callable(name):
# @register.tag
return self.tag_function(name)
else:
# @register.tag('somename') or @register.tag(name='somename')
def dec(func):
return self.tag(name, func)
return dec
elif name is not None and compile_function is not None:
# register.tag('somename', somefunc)
self.tags[name] = compile_function
return compile_function
else:
raise ValueError(
"Unsupported arguments to Library.tag: (%r, %r)" %
(name, compile_function),
)
def tag_function(self, func):
self.tags[getattr(func, "_decorated_function", func).__name__] = func
return func
def filter(self, name=None, filter_func=None, **flags):
"""
Register a callable as a template filter. Example:
@register.filter
def lower(value):
return value.lower()
"""
if name is None and filter_func is None:
# @register.filter()
def dec(func):
return self.filter_function(func, **flags)
return dec
elif name is not None and filter_func is None:
if callable(name):
# @register.filter
return self.filter_function(name, **flags)
else:
# @register.filter('somename') or @register.filter(name='somename')
def dec(func):
return self.filter(name, func, **flags)
return dec
elif name is not None and filter_func is not None:
# register.filter('somename', somefunc)
self.filters[name] = filter_func
for attr in ('expects_localtime', 'is_safe', 'needs_autoescape'):
if attr in flags:
value = flags[attr]
# set the flag on the filter for FilterExpression.resolve
setattr(filter_func, attr, value)
# set the flag on the innermost decorated function
# for decorators that need it, e.g. stringfilter
if hasattr(filter_func, "_decorated_function"):
setattr(filter_func._decorated_function, attr, value)
filter_func._filter_name = name
return filter_func
else:
raise ValueError(
"Unsupported arguments to Library.filter: (%r, %r)" %
(name, filter_func),
)
def filter_function(self, func, **flags):
name = getattr(func, "_decorated_function", func).__name__
return self.filter(name, func, **flags)
def simple_tag(self, func=None, takes_context=None, name=None):
"""
Register a callable as a compiled template tag. Example:
@register.simple_tag
def hello(*args, **kwargs):
return 'world'
"""
def dec(func):
params, varargs, varkw, defaults = getargspec(func)
function_name = (name or getattr(func, '_decorated_function', func).__name__)
@functools.wraps(func)
def compile_func(parser, token):
bits = token.split_contents()[1:]
target_var = None
if len(bits) >= 2 and bits[-2] == 'as':
target_var = bits[-1]
bits = bits[:-2]
args, kwargs = parse_bits(
parser, bits, params, varargs, varkw, defaults,
takes_context, function_name
)
return SimpleNode(func, takes_context, args, kwargs, target_var)
self.tag(function_name, compile_func)
return func
if func is None:
# @register.simple_tag(...)
return dec
elif callable(func):
# @register.simple_tag
return dec(func)
else:
raise ValueError("Invalid arguments provided to simple_tag")
def assignment_tag(self, func=None, takes_context=None, name=None):
warnings.warn(
"assignment_tag() is deprecated. Use simple_tag() instead",
RemovedInDjango20Warning,
stacklevel=2,
)
return self.simple_tag(func, takes_context, name)
def inclusion_tag(self, filename, func=None, takes_context=None, name=None):
"""
Register a callable as an inclusion tag:
@register.inclusion_tag('results.html')
def show_results(poll):
choices = poll.choice_set.all()
return {'choices': choices}
"""
def dec(func):
params, varargs, varkw, defaults = getargspec(func)
function_name = (name or getattr(func, '_decorated_function', func).__name__)
@functools.wraps(func)
def compile_func(parser, token):
bits = token.split_contents()[1:]
args, kwargs = parse_bits(
parser, bits, params, varargs, varkw, defaults,
takes_context, function_name,
)
return InclusionNode(
func, takes_context, args, kwargs, filename,
)
self.tag(function_name, compile_func)
return func
return dec
class TagHelperNode(Node):
"""
Base class for tag helper nodes such as SimpleNode and InclusionNode.
Manages the positional and keyword arguments to be passed to the decorated
function.
"""
def __init__(self, func, takes_context, args, kwargs):
self.func = func
self.takes_context = takes_context
self.args = args
self.kwargs = kwargs
def get_resolved_arguments(self, context):
resolved_args = [var.resolve(context) for var in self.args]
if self.takes_context:
resolved_args = [context] + resolved_args
resolved_kwargs = {k: v.resolve(context) for k, v in self.kwargs.items()}
return resolved_args, resolved_kwargs
class SimpleNode(TagHelperNode):
def __init__(self, func, takes_context, args, kwargs, target_var):
super(SimpleNode, self).__init__(func, takes_context, args, kwargs)
self.target_var = target_var
def render(self, context):
resolved_args, resolved_kwargs = self.get_resolved_arguments(context)
output = self.func(*resolved_args, **resolved_kwargs)
if self.target_var is not None:
context[self.target_var] = output
return ''
if context.autoescape:
output = conditional_escape(output)
return output
class InclusionNode(TagHelperNode):
def __init__(self, func, takes_context, args, kwargs, filename):
super(InclusionNode, self).__init__(func, takes_context, args, kwargs)
self.filename = filename
def render(self, context):
"""
Render the specified template and context. Cache the template object
in render_context to avoid reparsing and loading when used in a for
loop.
"""
resolved_args, resolved_kwargs = self.get_resolved_arguments(context)
_dict = self.func(*resolved_args, **resolved_kwargs)
t = context.render_context.get(self)
if t is None:
if isinstance(self.filename, Template):
t = self.filename
elif isinstance(getattr(self.filename, 'template', None), Template):
t = self.filename.template
elif not isinstance(self.filename, six.string_types) and is_iterable(self.filename):
t = context.template.engine.select_template(self.filename)
else:
t = context.template.engine.get_template(self.filename)
context.render_context[self] = t
new_context = context.new(_dict)
# Copy across the CSRF token, if present, because inclusion tags are
# often used for forms, and we need instructions for using CSRF
# protection to be as simple as possible.
csrf_token = context.get('csrf_token')
if csrf_token is not None:
new_context['csrf_token'] = csrf_token
return t.render(new_context)
def parse_bits(parser, bits, params, varargs, varkw, defaults,
takes_context, name):
"""
Parse bits for template tag helpers simple_tag and inclusion_tag, in
particular by detecting syntax errors and by extracting positional and
keyword arguments.
"""
if takes_context:
if params[0] == 'context':
params = params[1:]
else:
raise TemplateSyntaxError(
"'%s' is decorated with takes_context=True so it must "
"have a first argument of 'context'" % name)
args = []
kwargs = {}
unhandled_params = list(params)
for bit in bits:
# First we try to extract a potential kwarg from the bit
kwarg = token_kwargs([bit], parser)
if kwarg:
# The kwarg was successfully extracted
param, value = kwarg.popitem()
if param not in params and varkw is None:
# An unexpected keyword argument was supplied
raise TemplateSyntaxError(
"'%s' received unexpected keyword argument '%s'" %
(name, param))
elif param in kwargs:
# The keyword argument has already been supplied once
raise TemplateSyntaxError(
"'%s' received multiple values for keyword argument '%s'" %
(name, param))
else:
# All good, record the keyword argument
kwargs[str(param)] = value
if param in unhandled_params:
# If using the keyword syntax for a positional arg, then
# consume it.
unhandled_params.remove(param)
else:
if kwargs:
raise TemplateSyntaxError(
"'%s' received some positional argument(s) after some "
"keyword argument(s)" % name)
else:
# Record the positional argument
args.append(parser.compile_filter(bit))
try:
# Consume from the list of expected positional arguments
unhandled_params.pop(0)
except IndexError:
if varargs is None:
raise TemplateSyntaxError(
"'%s' received too many positional arguments" %
name)
if defaults is not None:
# Consider the last n params handled, where n is the
# number of defaults.
unhandled_params = unhandled_params[:-len(defaults)]
if unhandled_params:
# Some positional arguments were not supplied
raise TemplateSyntaxError(
"'%s' did not receive value(s) for the argument(s): %s" %
(name, ", ".join("'%s'" % p for p in unhandled_params)))
return args, kwargs
def import_library(name):
"""
Load a Library object from a template tag module.
"""
try:
module = import_module(name)
except ImportError as e:
raise InvalidTemplateLibrary(
"Invalid template library specified. ImportError raised when "
"trying to load '%s': %s" % (name, e)
)
try:
return module.register
except AttributeError:
raise InvalidTemplateLibrary(
"Module %s does not have a variable named 'register'" % name,
)
|
victoredwardocallaghan/xen | refs/heads/master | tools/python/xen/xend/xenstore/tests/__init__.py | 45382 | |
Hakuba/youtube-dl | refs/heads/master | youtube_dl/extractor/yourupload.py | 142 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
class YourUploadIE(InfoExtractor):
_VALID_URL = r'''(?x)https?://(?:www\.)?
(?:yourupload\.com/watch|
embed\.yourupload\.com|
embed\.yucache\.net
)/(?P<id>[A-Za-z0-9]+)
'''
_TESTS = [
{
'url': 'http://yourupload.com/watch/14i14h',
'md5': '5e2c63385454c557f97c4c4131a393cd',
'info_dict': {
'id': '14i14h',
'ext': 'mp4',
'title': 'BigBuckBunny_320x180.mp4',
'thumbnail': 're:^https?://.*\.jpe?g',
}
},
{
'url': 'http://embed.yourupload.com/14i14h',
'only_matching': True,
},
{
'url': 'http://embed.yucache.net/14i14h?client_file_id=803349',
'only_matching': True,
},
]
def _real_extract(self, url):
video_id = self._match_id(url)
embed_url = 'http://embed.yucache.net/{0:}'.format(video_id)
webpage = self._download_webpage(embed_url, video_id)
title = self._og_search_title(webpage)
video_url = self._og_search_video_url(webpage)
thumbnail = self._og_search_thumbnail(webpage, default=None)
return {
'id': video_id,
'title': title,
'url': video_url,
'thumbnail': thumbnail,
'http_headers': {
'Referer': embed_url,
},
}
|
scottrice/Ice | refs/heads/master | ice/steam_shortcut_synchronizer.py | 1 |
from pysteam import shortcuts
import roms
from consoles import console_roms_directory
from logs import logger
class SteamShortcutSynchronizer(object):
def __init__(self, config, managed_rom_archive):
self.config = config
self.managed_rom_archive = managed_rom_archive
def _guess_whether_shortcut_is_managed_by_ice(self, shortcut, consoles):
# Helper function which guesses whether the shortcut was added during a
# previous run of Ice with its console set as `console`. We do this the
# same way we did before we had the flag tag, we check the console's
# ROMs directory and see if it shows up in the executable for the shortcut
def shortcut_is_managed_by_console(console):
return console_roms_directory(self.config, console) in shortcut.exe
return any(map(shortcut_is_managed_by_console, consoles))
def shortcut_is_managed_by_ice(self, managed_ids, shortcut, consoles):
# LEGACY: At one point I added ICE_FLAG_TAG to every shortcut Ice made.
# That was a terrible idea, the managed_ids is a much better system. I
# keep this check around for legacy reasons though.
if roms.ICE_FLAG_TAG in shortcut.tags:
return True
# LEGACY: For most of Ice's life it guessed whether it managed a shortcut
# or not. This was REALLY bad, as it was very dependent on configuration
# and caused really strange bugs where moving directories would cause ROMs
# to get duplicated and all sorts of bad stuff.
#
# Luckily, we have a history now and don't have to deal with that crap.
# Yay! Except that this screws over anyone who used Ice //before// it had
# a history, as we have no record of what they added before. Shit.
#
# To fix this, we provide a migration path for these people. If we have NO
# history (not an empty history, NO history) then we fall back to our old
# way of checking whether we manage the shortcut. The next time Ice is run
# we will have a history to work with and can avoid using this hacky garbage.
if managed_ids is None:
return self._guess_whether_shortcut_is_managed_by_ice(shortcut, consoles)
# We only 'manage' it if we added the shortcut in the last run
return shortcuts.shortcut_app_id(shortcut) in managed_ids
def unmanaged_shortcuts(self, managed_ids, shortcuts, consoles):
return filter(
lambda shortcut: not self.shortcut_is_managed_by_ice(managed_ids, shortcut, consoles),
shortcuts,
)
def removed_shortcuts(self, current_shortcuts, new_shortcuts):
# To get the list of only removed shortcuts we take all of the current
# shortcuts and filter out any that exist in the new shortcuts
return filter(lambda shortcut: shortcut not in new_shortcuts, current_shortcuts)
def added_shortcuts(self, current_shortcuts, new_shortcuts):
# To get the list of only added shortcuts we take all of the new shortcuts
# and filter out any that existed in the current shortcuts
return filter(lambda shortcut: shortcut not in current_shortcuts, new_shortcuts)
def sync_roms_for_user(self, user, users_roms, consoles, dry_run=False):
"""
This function takes care of syncing ROMs. After this function exits,
Steam will contain only non-Ice shortcuts and the ROMs represented
by `roms`.
"""
# 'Unmanaged' is just the term I am using for shortcuts that the user has
# added that Ice shouldn't delete. For example, something like a shortcut
# to Plex would be 'Unmanaged'
previous_managed_ids = self.managed_rom_archive.previous_managed_ids(user)
logger.debug("Previous managed ids: %s" % previous_managed_ids)
current_shortcuts = shortcuts.get_shortcuts(user)
unmanaged_shortcuts = self.unmanaged_shortcuts(previous_managed_ids, current_shortcuts, consoles)
logger.debug("Unmanaged shortcuts: %s" % unmanaged_shortcuts)
current_ice_shortcuts = filter(lambda shortcut: shortcut not in unmanaged_shortcuts, current_shortcuts)
logger.debug("Current Ice shortcuts: %s" % current_ice_shortcuts)
# Generate a list of shortcuts out of our list of ROMs
rom_shortcuts = map(roms.rom_to_shortcut, users_roms)
# Calculate which ROMs were added and which were removed so we can inform
# the user
removed = self.removed_shortcuts(current_ice_shortcuts, rom_shortcuts)
map(lambda shortcut: logger.info("Removing ROM: `%s`" % shortcut.name), removed)
added = self.added_shortcuts(current_ice_shortcuts, rom_shortcuts)
map(lambda shortcut: logger.info("Adding ROM: `%s`" % shortcut.name), added)
# Set the updated shortcuts
updated_shortcuts = unmanaged_shortcuts + rom_shortcuts
logger.debug("Sync Result: %s" % updated_shortcuts)
if dry_run:
logger.debug("Not saving or updating history due to dry run")
return
logger.debug("Saving shortcuts")
shortcuts.set_shortcuts(user, updated_shortcuts)
# Update the archive
new_managed_ids = map(shortcuts.shortcut_app_id, rom_shortcuts)
logger.debug("Updating archive to ids: %s" % new_managed_ids)
self.managed_rom_archive.set_managed_ids(user, new_managed_ids)
|
komsas/OpenUpgrade | refs/heads/master | addons/account_test/__init__.py | 441 | import account_test
import report
|
kwailamchan/programming-languages | refs/heads/master | javascript/backbone/backbone-templates/backbone-fileupload/venvs/lib/python2.7/site-packages/django/core/cache/backends/db.py | 94 | "Database cache backend."
import base64
import time
from datetime import datetime
try:
import cPickle as pickle
except ImportError:
import pickle
from django.conf import settings
from django.core.cache.backends.base import BaseCache
from django.db import connections, router, transaction, DatabaseError
from django.utils import timezone
class Options(object):
"""A class that will quack like a Django model _meta class.
This allows cache operations to be controlled by the router
"""
def __init__(self, table):
self.db_table = table
self.app_label = 'django_cache'
self.module_name = 'cacheentry'
self.verbose_name = 'cache entry'
self.verbose_name_plural = 'cache entries'
self.object_name = 'CacheEntry'
self.abstract = False
self.managed = True
self.proxy = False
class BaseDatabaseCache(BaseCache):
def __init__(self, table, params):
BaseCache.__init__(self, params)
self._table = table
class CacheEntry(object):
_meta = Options(table)
self.cache_model_class = CacheEntry
class DatabaseCache(BaseDatabaseCache):
# This class uses cursors provided by the database connection. This means
# it reads expiration values as aware or naive datetimes depending on the
# value of USE_TZ. They must be compared to aware or naive representations
# of "now" respectively.
# But it bypasses the ORM for write operations. As a consequence, aware
# datetimes aren't made naive for databases that don't support time zones.
# We work around this problem by always using naive datetimes when writing
# expiration values, in UTC when USE_TZ = True and in local time otherwise.
def get(self, key, default=None, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
db = router.db_for_read(self.cache_model_class)
table = connections[db].ops.quote_name(self._table)
cursor = connections[db].cursor()
cursor.execute("SELECT cache_key, value, expires FROM %s "
"WHERE cache_key = %%s" % table, [key])
row = cursor.fetchone()
if row is None:
return default
now = timezone.now()
if row[2] < now:
db = router.db_for_write(self.cache_model_class)
cursor = connections[db].cursor()
cursor.execute("DELETE FROM %s "
"WHERE cache_key = %%s" % table, [key])
transaction.commit_unless_managed(using=db)
return default
value = connections[db].ops.process_clob(row[1])
return pickle.loads(base64.decodestring(value))
def set(self, key, value, timeout=None, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
self._base_set('set', key, value, timeout)
def add(self, key, value, timeout=None, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
return self._base_set('add', key, value, timeout)
def _base_set(self, mode, key, value, timeout=None):
if timeout is None:
timeout = self.default_timeout
db = router.db_for_write(self.cache_model_class)
table = connections[db].ops.quote_name(self._table)
cursor = connections[db].cursor()
cursor.execute("SELECT COUNT(*) FROM %s" % table)
num = cursor.fetchone()[0]
now = timezone.now()
now = now.replace(microsecond=0)
if settings.USE_TZ:
exp = datetime.utcfromtimestamp(time.time() + timeout)
else:
exp = datetime.fromtimestamp(time.time() + timeout)
exp = exp.replace(microsecond=0)
if num > self._max_entries:
self._cull(db, cursor, now)
pickled = pickle.dumps(value, pickle.HIGHEST_PROTOCOL)
encoded = base64.encodestring(pickled).strip()
cursor.execute("SELECT cache_key, expires FROM %s "
"WHERE cache_key = %%s" % table, [key])
try:
result = cursor.fetchone()
if result and (mode == 'set' or
(mode == 'add' and result[1] < now)):
cursor.execute("UPDATE %s SET value = %%s, expires = %%s "
"WHERE cache_key = %%s" % table,
[encoded, connections[db].ops.value_to_db_datetime(exp), key])
else:
cursor.execute("INSERT INTO %s (cache_key, value, expires) "
"VALUES (%%s, %%s, %%s)" % table,
[key, encoded, connections[db].ops.value_to_db_datetime(exp)])
except DatabaseError:
# To be threadsafe, updates/inserts are allowed to fail silently
transaction.rollback_unless_managed(using=db)
return False
else:
transaction.commit_unless_managed(using=db)
return True
def delete(self, key, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
db = router.db_for_write(self.cache_model_class)
table = connections[db].ops.quote_name(self._table)
cursor = connections[db].cursor()
cursor.execute("DELETE FROM %s WHERE cache_key = %%s" % table, [key])
transaction.commit_unless_managed(using=db)
def has_key(self, key, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
db = router.db_for_read(self.cache_model_class)
table = connections[db].ops.quote_name(self._table)
cursor = connections[db].cursor()
if settings.USE_TZ:
now = datetime.utcnow()
else:
now = datetime.now()
now = now.replace(microsecond=0)
cursor.execute("SELECT cache_key FROM %s "
"WHERE cache_key = %%s and expires > %%s" % table,
[key, connections[db].ops.value_to_db_datetime(now)])
return cursor.fetchone() is not None
def _cull(self, db, cursor, now):
if self._cull_frequency == 0:
self.clear()
else:
# When USE_TZ is True, 'now' will be an aware datetime in UTC.
now = now.replace(tzinfo=None)
table = connections[db].ops.quote_name(self._table)
cursor.execute("DELETE FROM %s WHERE expires < %%s" % table,
[connections[db].ops.value_to_db_datetime(now)])
cursor.execute("SELECT COUNT(*) FROM %s" % table)
num = cursor.fetchone()[0]
if num > self._max_entries:
cull_num = num / self._cull_frequency
if connections[db].vendor == 'oracle':
# Oracle doesn't support LIMIT + OFFSET
cursor.execute("""SELECT cache_key FROM
(SELECT ROW_NUMBER() OVER (ORDER BY cache_key) AS counter, cache_key FROM %s)
WHERE counter > %%s AND COUNTER <= %%s""" % table, [cull_num, cull_num + 1])
else:
# This isn't standard SQL, it's likely to break
# with some non officially supported databases
cursor.execute("SELECT cache_key FROM %s "
"ORDER BY cache_key "
"LIMIT 1 OFFSET %%s" % table, [cull_num])
cursor.execute("DELETE FROM %s "
"WHERE cache_key < %%s" % table,
[cursor.fetchone()[0]])
def clear(self):
db = router.db_for_write(self.cache_model_class)
table = connections[db].ops.quote_name(self._table)
cursor = connections[db].cursor()
cursor.execute('DELETE FROM %s' % table)
# For backwards compatibility
class CacheClass(DatabaseCache):
pass
|
pong3489/TEST_Mission | refs/heads/master | Lib/site-packages/numpy/testing/setupscons.py | 51 | #!"C:\Users\hog\Documents\Visual Studio 2010\Projects\ArdupilotMega\ArdupilotMega\bin\Debug\ipy.exe"
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('testing',parent_package,top_path)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(maintainer = "NumPy Developers",
maintainer_email = "numpy-dev@numpy.org",
description = "NumPy test module",
url = "http://www.numpy.org",
license = "NumPy License (BSD Style)",
configuration = configuration,
)
|
yukoba/sympy | refs/heads/master | sympy/physics/tests/test_sho.py | 98 | from sympy.core import symbols, Rational, Function, diff
from sympy.core.compatibility import range
from sympy.physics.sho import R_nl, E_nl
from sympy import simplify
def test_sho_R_nl():
omega, r = symbols('omega r')
l = symbols('l', integer=True)
u = Function('u')
# check that it obeys the Schrodinger equation
for n in range(5):
schreq = ( -diff(u(r), r, 2)/2 + ((l*(l + 1))/(2*r**2)
+ omega**2*r**2/2 - E_nl(n, l, omega))*u(r) )
result = schreq.subs(u(r), r*R_nl(n, l, omega/2, r))
assert simplify(result.doit()) == 0
def test_energy():
n, l, hw = symbols('n l hw')
assert simplify(E_nl(n, l, hw) - (2*n + l + Rational(3, 2))*hw) == 0
|
BuildingLink/sentry | refs/heads/master | src/social_auth/views.py | 8 | """Views
Notes:
* Some views are marked to avoid csrf token check because they rely
on third party providers that (if using POST) won't be sending csrf
token back.
"""
from __future__ import absolute_import
import six
from sudo.utils import is_safe_url
from django.conf import settings
from django.http import HttpResponseRedirect, HttpResponse
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.views.decorators.csrf import csrf_exempt
from social_auth.exceptions import AuthException
from social_auth.utils import (
setting, backend_setting, clean_partial_pipeline)
from social_auth.decorators import dsa_view
DEFAULT_REDIRECT = setting('SOCIAL_AUTH_LOGIN_REDIRECT_URL',
setting('LOGIN_REDIRECT_URL'))
ASSOCIATE_ERROR_URL = setting('SOCIAL_AUTH_ASSOCIATE_ERROR_URL')
PIPELINE_KEY = setting('SOCIAL_AUTH_PARTIAL_PIPELINE_KEY', 'partial_pipeline')
@dsa_view(setting('SOCIAL_AUTH_COMPLETE_URL_NAME', 'socialauth_associate_complete'))
def auth(request, backend):
"""Authenticate using social backend"""
data = request.POST if request.method == 'POST' else request.GET
# Save extra data into session.
for field_name in setting('SOCIAL_AUTH_FIELDS_STORED_IN_SESSION', []):
if field_name in data:
request.session[field_name] = data[field_name]
# Save any defined next value into session
if REDIRECT_FIELD_NAME in data:
# Check and sanitize a user-defined GET/POST next field value
redirect = data[REDIRECT_FIELD_NAME]
# NOTE: django-sudo's `is_safe_url` is much better at catching bad
# redirections to different domains than social_auth's
# `sanitize_redirect` call.
if not is_safe_url(redirect, host=request.get_host()):
redirect = DEFAULT_REDIRECT
request.session[REDIRECT_FIELD_NAME] = redirect or DEFAULT_REDIRECT
# Clean any partial pipeline info before starting the process
clean_partial_pipeline(request)
if backend.uses_redirect:
return HttpResponseRedirect(backend.auth_url())
else:
return HttpResponse(backend.auth_html(),
content_type='text/html;charset=UTF-8')
@csrf_exempt
@login_required
@dsa_view()
def complete(request, backend, *args, **kwargs):
"""Authentication complete process"""
# pop redirect value before the session is trashed on login()
redirect_value = request.session.get(REDIRECT_FIELD_NAME, '')
backend_name = backend.AUTH_BACKEND.name
try:
user = auth_complete(request, backend, request.user, *args, **kwargs)
except AuthException as exc:
messages.add_message(
request, messages.ERROR,
six.text_type(exc)
)
user = None
else:
messages.add_message(
request, messages.SUCCESS,
'You have linked your account with {}.'.format(
settings.AUTH_PROVIDER_LABELS.get(backend_name, backend_name),
)
)
if not user:
url = (
redirect_value or
ASSOCIATE_ERROR_URL or
DEFAULT_REDIRECT
)
elif isinstance(user, HttpResponse):
return user
else:
url = (
redirect_value or
backend_setting(backend, 'SOCIAL_AUTH_NEW_ASSOCIATION_REDIRECT_URL') or
DEFAULT_REDIRECT
)
return HttpResponseRedirect(url)
def auth_complete(request, backend, user, *args, **kwargs):
"""Complete auth process. Return authenticated user or None."""
if request.session.get(PIPELINE_KEY):
data = request.session.pop(PIPELINE_KEY)
kwargs = kwargs.copy()
if user:
kwargs['user'] = user
idx, xargs, xkwargs = backend.from_session_dict(data, request=request,
*args, **kwargs)
if 'backend' in xkwargs and \
xkwargs['backend'].name == backend.AUTH_BACKEND.name:
return backend.continue_pipeline(pipeline_index=idx,
*xargs, **xkwargs)
return backend.auth_complete(user=user, request=request, *args, **kwargs)
|
hsaputra/tensorflow | refs/heads/master | tensorflow/python/debug/cli/offline_analyzer.py | 162 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Offline dump analyzer of TensorFlow Debugger (tfdbg)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
# Google-internal import(s).
from tensorflow.python.debug.cli import analyzer_cli
from tensorflow.python.debug.lib import debug_data
from tensorflow.python.platform import app
def main(_):
if FLAGS.log_usage:
pass # No logging for open-source.
if not FLAGS.dump_dir:
print("ERROR: dump_dir flag is empty.", file=sys.stderr)
sys.exit(1)
print("tfdbg offline: FLAGS.dump_dir = %s" % FLAGS.dump_dir)
debug_dump = debug_data.DebugDumpDir(
FLAGS.dump_dir, validate=FLAGS.validate_graph)
cli = analyzer_cli.create_analyzer_ui(
debug_dump,
tensor_filters={"has_inf_or_nan": debug_data.has_inf_or_nan},
ui_type=FLAGS.ui_type)
title = "tfdbg offline @ %s" % FLAGS.dump_dir
cli.run_ui(title=title, title_color="black_on_white", init_command="lt")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--dump_dir", type=str, default="", help="tfdbg dump directory to load")
parser.add_argument(
"--log_usage",
type="bool",
nargs="?",
const=True,
default=True,
help="Whether the usage of this tool is to be logged")
parser.add_argument(
"--ui_type",
type=str,
default="curses",
help="Command-line user interface type (curses | readline)")
parser.add_argument(
"--validate_graph",
nargs="?",
const=True,
type="bool",
default=True,
help="""\
Whether the dumped tensors will be validated against the GraphDefs\
""")
FLAGS, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
jasonbot/django | refs/heads/master | tests/auth_tests/test_basic.py | 328 | from __future__ import unicode_literals
from django.apps import apps
from django.contrib.auth import get_user_model
from django.contrib.auth.models import AnonymousUser, User
from django.contrib.auth.tests.custom_user import CustomUser
from django.core.exceptions import ImproperlyConfigured
from django.dispatch import receiver
from django.test import TestCase, override_settings
from django.test.signals import setting_changed
from django.utils import translation
@receiver(setting_changed)
def user_model_swapped(**kwargs):
if kwargs['setting'] == 'AUTH_USER_MODEL':
from django.db.models.manager import ensure_default_manager
# Reset User manager
setattr(User, 'objects', User._default_manager)
ensure_default_manager(User)
apps.clear_cache()
class BasicTestCase(TestCase):
def test_user(self):
"Check that users can be created and can set their password"
u = User.objects.create_user('testuser', 'test@example.com', 'testpw')
self.assertTrue(u.has_usable_password())
self.assertFalse(u.check_password('bad'))
self.assertTrue(u.check_password('testpw'))
# Check we can manually set an unusable password
u.set_unusable_password()
u.save()
self.assertFalse(u.check_password('testpw'))
self.assertFalse(u.has_usable_password())
u.set_password('testpw')
self.assertTrue(u.check_password('testpw'))
u.set_password(None)
self.assertFalse(u.has_usable_password())
# Check username getter
self.assertEqual(u.get_username(), 'testuser')
# Check authentication/permissions
self.assertTrue(u.is_authenticated())
self.assertFalse(u.is_staff)
self.assertTrue(u.is_active)
self.assertFalse(u.is_superuser)
# Check API-based user creation with no password
u2 = User.objects.create_user('testuser2', 'test2@example.com')
self.assertFalse(u2.has_usable_password())
def test_user_no_email(self):
"Check that users can be created without an email"
u = User.objects.create_user('testuser1')
self.assertEqual(u.email, '')
u2 = User.objects.create_user('testuser2', email='')
self.assertEqual(u2.email, '')
u3 = User.objects.create_user('testuser3', email=None)
self.assertEqual(u3.email, '')
def test_anonymous_user(self):
"Check the properties of the anonymous user"
a = AnonymousUser()
self.assertEqual(a.pk, None)
self.assertEqual(a.username, '')
self.assertEqual(a.get_username(), '')
self.assertFalse(a.is_authenticated())
self.assertFalse(a.is_staff)
self.assertFalse(a.is_active)
self.assertFalse(a.is_superuser)
self.assertEqual(a.groups.all().count(), 0)
self.assertEqual(a.user_permissions.all().count(), 0)
def test_superuser(self):
"Check the creation and properties of a superuser"
super = User.objects.create_superuser('super', 'super@example.com', 'super')
self.assertTrue(super.is_superuser)
self.assertTrue(super.is_active)
self.assertTrue(super.is_staff)
def test_get_user_model(self):
"The current user model can be retrieved"
self.assertEqual(get_user_model(), User)
@override_settings(AUTH_USER_MODEL='auth.CustomUser')
def test_swappable_user(self):
"The current user model can be swapped out for another"
self.assertEqual(get_user_model(), CustomUser)
with self.assertRaises(AttributeError):
User.objects.all()
@override_settings(AUTH_USER_MODEL='badsetting')
def test_swappable_user_bad_setting(self):
"The alternate user setting must point to something in the format app.model"
with self.assertRaises(ImproperlyConfigured):
get_user_model()
@override_settings(AUTH_USER_MODEL='thismodel.doesntexist')
def test_swappable_user_nonexistent_model(self):
"The current user model must point to an installed model"
with self.assertRaises(ImproperlyConfigured):
get_user_model()
def test_user_verbose_names_translatable(self):
"Default User model verbose names are translatable (#19945)"
with translation.override('en'):
self.assertEqual(User._meta.verbose_name, 'user')
self.assertEqual(User._meta.verbose_name_plural, 'users')
with translation.override('es'):
self.assertEqual(User._meta.verbose_name, 'usuario')
self.assertEqual(User._meta.verbose_name_plural, 'usuarios')
|
istinspring/grablab | refs/heads/master | back/back/wsgi.py | 2 | """
WSGI config for back project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "back.settings")
application = get_wsgi_application()
|
bottompawn/kbengine | refs/heads/master | kbe/res/scripts/common/Lib/encodings/cp1125.py | 213 | """ Python Character Mapping Codec for CP1125
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1125',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x0410, # CYRILLIC CAPITAL LETTER A
0x0081: 0x0411, # CYRILLIC CAPITAL LETTER BE
0x0082: 0x0412, # CYRILLIC CAPITAL LETTER VE
0x0083: 0x0413, # CYRILLIC CAPITAL LETTER GHE
0x0084: 0x0414, # CYRILLIC CAPITAL LETTER DE
0x0085: 0x0415, # CYRILLIC CAPITAL LETTER IE
0x0086: 0x0416, # CYRILLIC CAPITAL LETTER ZHE
0x0087: 0x0417, # CYRILLIC CAPITAL LETTER ZE
0x0088: 0x0418, # CYRILLIC CAPITAL LETTER I
0x0089: 0x0419, # CYRILLIC CAPITAL LETTER SHORT I
0x008a: 0x041a, # CYRILLIC CAPITAL LETTER KA
0x008b: 0x041b, # CYRILLIC CAPITAL LETTER EL
0x008c: 0x041c, # CYRILLIC CAPITAL LETTER EM
0x008d: 0x041d, # CYRILLIC CAPITAL LETTER EN
0x008e: 0x041e, # CYRILLIC CAPITAL LETTER O
0x008f: 0x041f, # CYRILLIC CAPITAL LETTER PE
0x0090: 0x0420, # CYRILLIC CAPITAL LETTER ER
0x0091: 0x0421, # CYRILLIC CAPITAL LETTER ES
0x0092: 0x0422, # CYRILLIC CAPITAL LETTER TE
0x0093: 0x0423, # CYRILLIC CAPITAL LETTER U
0x0094: 0x0424, # CYRILLIC CAPITAL LETTER EF
0x0095: 0x0425, # CYRILLIC CAPITAL LETTER HA
0x0096: 0x0426, # CYRILLIC CAPITAL LETTER TSE
0x0097: 0x0427, # CYRILLIC CAPITAL LETTER CHE
0x0098: 0x0428, # CYRILLIC CAPITAL LETTER SHA
0x0099: 0x0429, # CYRILLIC CAPITAL LETTER SHCHA
0x009a: 0x042a, # CYRILLIC CAPITAL LETTER HARD SIGN
0x009b: 0x042b, # CYRILLIC CAPITAL LETTER YERU
0x009c: 0x042c, # CYRILLIC CAPITAL LETTER SOFT SIGN
0x009d: 0x042d, # CYRILLIC CAPITAL LETTER E
0x009e: 0x042e, # CYRILLIC CAPITAL LETTER YU
0x009f: 0x042f, # CYRILLIC CAPITAL LETTER YA
0x00a0: 0x0430, # CYRILLIC SMALL LETTER A
0x00a1: 0x0431, # CYRILLIC SMALL LETTER BE
0x00a2: 0x0432, # CYRILLIC SMALL LETTER VE
0x00a3: 0x0433, # CYRILLIC SMALL LETTER GHE
0x00a4: 0x0434, # CYRILLIC SMALL LETTER DE
0x00a5: 0x0435, # CYRILLIC SMALL LETTER IE
0x00a6: 0x0436, # CYRILLIC SMALL LETTER ZHE
0x00a7: 0x0437, # CYRILLIC SMALL LETTER ZE
0x00a8: 0x0438, # CYRILLIC SMALL LETTER I
0x00a9: 0x0439, # CYRILLIC SMALL LETTER SHORT I
0x00aa: 0x043a, # CYRILLIC SMALL LETTER KA
0x00ab: 0x043b, # CYRILLIC SMALL LETTER EL
0x00ac: 0x043c, # CYRILLIC SMALL LETTER EM
0x00ad: 0x043d, # CYRILLIC SMALL LETTER EN
0x00ae: 0x043e, # CYRILLIC SMALL LETTER O
0x00af: 0x043f, # CYRILLIC SMALL LETTER PE
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x0440, # CYRILLIC SMALL LETTER ER
0x00e1: 0x0441, # CYRILLIC SMALL LETTER ES
0x00e2: 0x0442, # CYRILLIC SMALL LETTER TE
0x00e3: 0x0443, # CYRILLIC SMALL LETTER U
0x00e4: 0x0444, # CYRILLIC SMALL LETTER EF
0x00e5: 0x0445, # CYRILLIC SMALL LETTER HA
0x00e6: 0x0446, # CYRILLIC SMALL LETTER TSE
0x00e7: 0x0447, # CYRILLIC SMALL LETTER CHE
0x00e8: 0x0448, # CYRILLIC SMALL LETTER SHA
0x00e9: 0x0449, # CYRILLIC SMALL LETTER SHCHA
0x00ea: 0x044a, # CYRILLIC SMALL LETTER HARD SIGN
0x00eb: 0x044b, # CYRILLIC SMALL LETTER YERU
0x00ec: 0x044c, # CYRILLIC SMALL LETTER SOFT SIGN
0x00ed: 0x044d, # CYRILLIC SMALL LETTER E
0x00ee: 0x044e, # CYRILLIC SMALL LETTER YU
0x00ef: 0x044f, # CYRILLIC SMALL LETTER YA
0x00f0: 0x0401, # CYRILLIC CAPITAL LETTER IO
0x00f1: 0x0451, # CYRILLIC SMALL LETTER IO
0x00f2: 0x0490, # CYRILLIC CAPITAL LETTER GHE WITH UPTURN
0x00f3: 0x0491, # CYRILLIC SMALL LETTER GHE WITH UPTURN
0x00f4: 0x0404, # CYRILLIC CAPITAL LETTER UKRAINIAN IE
0x00f5: 0x0454, # CYRILLIC SMALL LETTER UKRAINIAN IE
0x00f6: 0x0406, # CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
0x00f7: 0x0456, # CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
0x00f8: 0x0407, # CYRILLIC CAPITAL LETTER YI
0x00f9: 0x0457, # CYRILLIC SMALL LETTER YI
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x221a, # SQUARE ROOT
0x00fc: 0x2116, # NUMERO SIGN
0x00fd: 0x00a4, # CURRENCY SIGN
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
'\x00' # 0x0000 -> NULL
'\x01' # 0x0001 -> START OF HEADING
'\x02' # 0x0002 -> START OF TEXT
'\x03' # 0x0003 -> END OF TEXT
'\x04' # 0x0004 -> END OF TRANSMISSION
'\x05' # 0x0005 -> ENQUIRY
'\x06' # 0x0006 -> ACKNOWLEDGE
'\x07' # 0x0007 -> BELL
'\x08' # 0x0008 -> BACKSPACE
'\t' # 0x0009 -> HORIZONTAL TABULATION
'\n' # 0x000a -> LINE FEED
'\x0b' # 0x000b -> VERTICAL TABULATION
'\x0c' # 0x000c -> FORM FEED
'\r' # 0x000d -> CARRIAGE RETURN
'\x0e' # 0x000e -> SHIFT OUT
'\x0f' # 0x000f -> SHIFT IN
'\x10' # 0x0010 -> DATA LINK ESCAPE
'\x11' # 0x0011 -> DEVICE CONTROL ONE
'\x12' # 0x0012 -> DEVICE CONTROL TWO
'\x13' # 0x0013 -> DEVICE CONTROL THREE
'\x14' # 0x0014 -> DEVICE CONTROL FOUR
'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x0016 -> SYNCHRONOUS IDLE
'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
'\x18' # 0x0018 -> CANCEL
'\x19' # 0x0019 -> END OF MEDIUM
'\x1a' # 0x001a -> SUBSTITUTE
'\x1b' # 0x001b -> ESCAPE
'\x1c' # 0x001c -> FILE SEPARATOR
'\x1d' # 0x001d -> GROUP SEPARATOR
'\x1e' # 0x001e -> RECORD SEPARATOR
'\x1f' # 0x001f -> UNIT SEPARATOR
' ' # 0x0020 -> SPACE
'!' # 0x0021 -> EXCLAMATION MARK
'"' # 0x0022 -> QUOTATION MARK
'#' # 0x0023 -> NUMBER SIGN
'$' # 0x0024 -> DOLLAR SIGN
'%' # 0x0025 -> PERCENT SIGN
'&' # 0x0026 -> AMPERSAND
"'" # 0x0027 -> APOSTROPHE
'(' # 0x0028 -> LEFT PARENTHESIS
')' # 0x0029 -> RIGHT PARENTHESIS
'*' # 0x002a -> ASTERISK
'+' # 0x002b -> PLUS SIGN
',' # 0x002c -> COMMA
'-' # 0x002d -> HYPHEN-MINUS
'.' # 0x002e -> FULL STOP
'/' # 0x002f -> SOLIDUS
'0' # 0x0030 -> DIGIT ZERO
'1' # 0x0031 -> DIGIT ONE
'2' # 0x0032 -> DIGIT TWO
'3' # 0x0033 -> DIGIT THREE
'4' # 0x0034 -> DIGIT FOUR
'5' # 0x0035 -> DIGIT FIVE
'6' # 0x0036 -> DIGIT SIX
'7' # 0x0037 -> DIGIT SEVEN
'8' # 0x0038 -> DIGIT EIGHT
'9' # 0x0039 -> DIGIT NINE
':' # 0x003a -> COLON
';' # 0x003b -> SEMICOLON
'<' # 0x003c -> LESS-THAN SIGN
'=' # 0x003d -> EQUALS SIGN
'>' # 0x003e -> GREATER-THAN SIGN
'?' # 0x003f -> QUESTION MARK
'@' # 0x0040 -> COMMERCIAL AT
'A' # 0x0041 -> LATIN CAPITAL LETTER A
'B' # 0x0042 -> LATIN CAPITAL LETTER B
'C' # 0x0043 -> LATIN CAPITAL LETTER C
'D' # 0x0044 -> LATIN CAPITAL LETTER D
'E' # 0x0045 -> LATIN CAPITAL LETTER E
'F' # 0x0046 -> LATIN CAPITAL LETTER F
'G' # 0x0047 -> LATIN CAPITAL LETTER G
'H' # 0x0048 -> LATIN CAPITAL LETTER H
'I' # 0x0049 -> LATIN CAPITAL LETTER I
'J' # 0x004a -> LATIN CAPITAL LETTER J
'K' # 0x004b -> LATIN CAPITAL LETTER K
'L' # 0x004c -> LATIN CAPITAL LETTER L
'M' # 0x004d -> LATIN CAPITAL LETTER M
'N' # 0x004e -> LATIN CAPITAL LETTER N
'O' # 0x004f -> LATIN CAPITAL LETTER O
'P' # 0x0050 -> LATIN CAPITAL LETTER P
'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
'R' # 0x0052 -> LATIN CAPITAL LETTER R
'S' # 0x0053 -> LATIN CAPITAL LETTER S
'T' # 0x0054 -> LATIN CAPITAL LETTER T
'U' # 0x0055 -> LATIN CAPITAL LETTER U
'V' # 0x0056 -> LATIN CAPITAL LETTER V
'W' # 0x0057 -> LATIN CAPITAL LETTER W
'X' # 0x0058 -> LATIN CAPITAL LETTER X
'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
'Z' # 0x005a -> LATIN CAPITAL LETTER Z
'[' # 0x005b -> LEFT SQUARE BRACKET
'\\' # 0x005c -> REVERSE SOLIDUS
']' # 0x005d -> RIGHT SQUARE BRACKET
'^' # 0x005e -> CIRCUMFLEX ACCENT
'_' # 0x005f -> LOW LINE
'`' # 0x0060 -> GRAVE ACCENT
'a' # 0x0061 -> LATIN SMALL LETTER A
'b' # 0x0062 -> LATIN SMALL LETTER B
'c' # 0x0063 -> LATIN SMALL LETTER C
'd' # 0x0064 -> LATIN SMALL LETTER D
'e' # 0x0065 -> LATIN SMALL LETTER E
'f' # 0x0066 -> LATIN SMALL LETTER F
'g' # 0x0067 -> LATIN SMALL LETTER G
'h' # 0x0068 -> LATIN SMALL LETTER H
'i' # 0x0069 -> LATIN SMALL LETTER I
'j' # 0x006a -> LATIN SMALL LETTER J
'k' # 0x006b -> LATIN SMALL LETTER K
'l' # 0x006c -> LATIN SMALL LETTER L
'm' # 0x006d -> LATIN SMALL LETTER M
'n' # 0x006e -> LATIN SMALL LETTER N
'o' # 0x006f -> LATIN SMALL LETTER O
'p' # 0x0070 -> LATIN SMALL LETTER P
'q' # 0x0071 -> LATIN SMALL LETTER Q
'r' # 0x0072 -> LATIN SMALL LETTER R
's' # 0x0073 -> LATIN SMALL LETTER S
't' # 0x0074 -> LATIN SMALL LETTER T
'u' # 0x0075 -> LATIN SMALL LETTER U
'v' # 0x0076 -> LATIN SMALL LETTER V
'w' # 0x0077 -> LATIN SMALL LETTER W
'x' # 0x0078 -> LATIN SMALL LETTER X
'y' # 0x0079 -> LATIN SMALL LETTER Y
'z' # 0x007a -> LATIN SMALL LETTER Z
'{' # 0x007b -> LEFT CURLY BRACKET
'|' # 0x007c -> VERTICAL LINE
'}' # 0x007d -> RIGHT CURLY BRACKET
'~' # 0x007e -> TILDE
'\x7f' # 0x007f -> DELETE
'\u0410' # 0x0080 -> CYRILLIC CAPITAL LETTER A
'\u0411' # 0x0081 -> CYRILLIC CAPITAL LETTER BE
'\u0412' # 0x0082 -> CYRILLIC CAPITAL LETTER VE
'\u0413' # 0x0083 -> CYRILLIC CAPITAL LETTER GHE
'\u0414' # 0x0084 -> CYRILLIC CAPITAL LETTER DE
'\u0415' # 0x0085 -> CYRILLIC CAPITAL LETTER IE
'\u0416' # 0x0086 -> CYRILLIC CAPITAL LETTER ZHE
'\u0417' # 0x0087 -> CYRILLIC CAPITAL LETTER ZE
'\u0418' # 0x0088 -> CYRILLIC CAPITAL LETTER I
'\u0419' # 0x0089 -> CYRILLIC CAPITAL LETTER SHORT I
'\u041a' # 0x008a -> CYRILLIC CAPITAL LETTER KA
'\u041b' # 0x008b -> CYRILLIC CAPITAL LETTER EL
'\u041c' # 0x008c -> CYRILLIC CAPITAL LETTER EM
'\u041d' # 0x008d -> CYRILLIC CAPITAL LETTER EN
'\u041e' # 0x008e -> CYRILLIC CAPITAL LETTER O
'\u041f' # 0x008f -> CYRILLIC CAPITAL LETTER PE
'\u0420' # 0x0090 -> CYRILLIC CAPITAL LETTER ER
'\u0421' # 0x0091 -> CYRILLIC CAPITAL LETTER ES
'\u0422' # 0x0092 -> CYRILLIC CAPITAL LETTER TE
'\u0423' # 0x0093 -> CYRILLIC CAPITAL LETTER U
'\u0424' # 0x0094 -> CYRILLIC CAPITAL LETTER EF
'\u0425' # 0x0095 -> CYRILLIC CAPITAL LETTER HA
'\u0426' # 0x0096 -> CYRILLIC CAPITAL LETTER TSE
'\u0427' # 0x0097 -> CYRILLIC CAPITAL LETTER CHE
'\u0428' # 0x0098 -> CYRILLIC CAPITAL LETTER SHA
'\u0429' # 0x0099 -> CYRILLIC CAPITAL LETTER SHCHA
'\u042a' # 0x009a -> CYRILLIC CAPITAL LETTER HARD SIGN
'\u042b' # 0x009b -> CYRILLIC CAPITAL LETTER YERU
'\u042c' # 0x009c -> CYRILLIC CAPITAL LETTER SOFT SIGN
'\u042d' # 0x009d -> CYRILLIC CAPITAL LETTER E
'\u042e' # 0x009e -> CYRILLIC CAPITAL LETTER YU
'\u042f' # 0x009f -> CYRILLIC CAPITAL LETTER YA
'\u0430' # 0x00a0 -> CYRILLIC SMALL LETTER A
'\u0431' # 0x00a1 -> CYRILLIC SMALL LETTER BE
'\u0432' # 0x00a2 -> CYRILLIC SMALL LETTER VE
'\u0433' # 0x00a3 -> CYRILLIC SMALL LETTER GHE
'\u0434' # 0x00a4 -> CYRILLIC SMALL LETTER DE
'\u0435' # 0x00a5 -> CYRILLIC SMALL LETTER IE
'\u0436' # 0x00a6 -> CYRILLIC SMALL LETTER ZHE
'\u0437' # 0x00a7 -> CYRILLIC SMALL LETTER ZE
'\u0438' # 0x00a8 -> CYRILLIC SMALL LETTER I
'\u0439' # 0x00a9 -> CYRILLIC SMALL LETTER SHORT I
'\u043a' # 0x00aa -> CYRILLIC SMALL LETTER KA
'\u043b' # 0x00ab -> CYRILLIC SMALL LETTER EL
'\u043c' # 0x00ac -> CYRILLIC SMALL LETTER EM
'\u043d' # 0x00ad -> CYRILLIC SMALL LETTER EN
'\u043e' # 0x00ae -> CYRILLIC SMALL LETTER O
'\u043f' # 0x00af -> CYRILLIC SMALL LETTER PE
'\u2591' # 0x00b0 -> LIGHT SHADE
'\u2592' # 0x00b1 -> MEDIUM SHADE
'\u2593' # 0x00b2 -> DARK SHADE
'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
'\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
'\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
'\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
'\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
'\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
'\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
'\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
'\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
'\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
'\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
'\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
'\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
'\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
'\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
'\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
'\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
'\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
'\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
'\u2588' # 0x00db -> FULL BLOCK
'\u2584' # 0x00dc -> LOWER HALF BLOCK
'\u258c' # 0x00dd -> LEFT HALF BLOCK
'\u2590' # 0x00de -> RIGHT HALF BLOCK
'\u2580' # 0x00df -> UPPER HALF BLOCK
'\u0440' # 0x00e0 -> CYRILLIC SMALL LETTER ER
'\u0441' # 0x00e1 -> CYRILLIC SMALL LETTER ES
'\u0442' # 0x00e2 -> CYRILLIC SMALL LETTER TE
'\u0443' # 0x00e3 -> CYRILLIC SMALL LETTER U
'\u0444' # 0x00e4 -> CYRILLIC SMALL LETTER EF
'\u0445' # 0x00e5 -> CYRILLIC SMALL LETTER HA
'\u0446' # 0x00e6 -> CYRILLIC SMALL LETTER TSE
'\u0447' # 0x00e7 -> CYRILLIC SMALL LETTER CHE
'\u0448' # 0x00e8 -> CYRILLIC SMALL LETTER SHA
'\u0449' # 0x00e9 -> CYRILLIC SMALL LETTER SHCHA
'\u044a' # 0x00ea -> CYRILLIC SMALL LETTER HARD SIGN
'\u044b' # 0x00eb -> CYRILLIC SMALL LETTER YERU
'\u044c' # 0x00ec -> CYRILLIC SMALL LETTER SOFT SIGN
'\u044d' # 0x00ed -> CYRILLIC SMALL LETTER E
'\u044e' # 0x00ee -> CYRILLIC SMALL LETTER YU
'\u044f' # 0x00ef -> CYRILLIC SMALL LETTER YA
'\u0401' # 0x00f0 -> CYRILLIC CAPITAL LETTER IO
'\u0451' # 0x00f1 -> CYRILLIC SMALL LETTER IO
'\u0490' # 0x00f2 -> CYRILLIC CAPITAL LETTER GHE WITH UPTURN
'\u0491' # 0x00f3 -> CYRILLIC SMALL LETTER GHE WITH UPTURN
'\u0404' # 0x00f4 -> CYRILLIC CAPITAL LETTER UKRAINIAN IE
'\u0454' # 0x00f5 -> CYRILLIC SMALL LETTER UKRAINIAN IE
'\u0406' # 0x00f6 -> CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
'\u0456' # 0x00f7 -> CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
'\u0407' # 0x00f8 -> CYRILLIC CAPITAL LETTER YI
'\u0457' # 0x00f9 -> CYRILLIC SMALL LETTER YI
'\xb7' # 0x00fa -> MIDDLE DOT
'\u221a' # 0x00fb -> SQUARE ROOT
'\u2116' # 0x00fc -> NUMERO SIGN
'\xa4' # 0x00fd -> CURRENCY SIGN
'\u25a0' # 0x00fe -> BLACK SQUARE
'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a4: 0x00fd, # CURRENCY SIGN
0x00b7: 0x00fa, # MIDDLE DOT
0x0401: 0x00f0, # CYRILLIC CAPITAL LETTER IO
0x0404: 0x00f4, # CYRILLIC CAPITAL LETTER UKRAINIAN IE
0x0406: 0x00f6, # CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
0x0407: 0x00f8, # CYRILLIC CAPITAL LETTER YI
0x0410: 0x0080, # CYRILLIC CAPITAL LETTER A
0x0411: 0x0081, # CYRILLIC CAPITAL LETTER BE
0x0412: 0x0082, # CYRILLIC CAPITAL LETTER VE
0x0413: 0x0083, # CYRILLIC CAPITAL LETTER GHE
0x0414: 0x0084, # CYRILLIC CAPITAL LETTER DE
0x0415: 0x0085, # CYRILLIC CAPITAL LETTER IE
0x0416: 0x0086, # CYRILLIC CAPITAL LETTER ZHE
0x0417: 0x0087, # CYRILLIC CAPITAL LETTER ZE
0x0418: 0x0088, # CYRILLIC CAPITAL LETTER I
0x0419: 0x0089, # CYRILLIC CAPITAL LETTER SHORT I
0x041a: 0x008a, # CYRILLIC CAPITAL LETTER KA
0x041b: 0x008b, # CYRILLIC CAPITAL LETTER EL
0x041c: 0x008c, # CYRILLIC CAPITAL LETTER EM
0x041d: 0x008d, # CYRILLIC CAPITAL LETTER EN
0x041e: 0x008e, # CYRILLIC CAPITAL LETTER O
0x041f: 0x008f, # CYRILLIC CAPITAL LETTER PE
0x0420: 0x0090, # CYRILLIC CAPITAL LETTER ER
0x0421: 0x0091, # CYRILLIC CAPITAL LETTER ES
0x0422: 0x0092, # CYRILLIC CAPITAL LETTER TE
0x0423: 0x0093, # CYRILLIC CAPITAL LETTER U
0x0424: 0x0094, # CYRILLIC CAPITAL LETTER EF
0x0425: 0x0095, # CYRILLIC CAPITAL LETTER HA
0x0426: 0x0096, # CYRILLIC CAPITAL LETTER TSE
0x0427: 0x0097, # CYRILLIC CAPITAL LETTER CHE
0x0428: 0x0098, # CYRILLIC CAPITAL LETTER SHA
0x0429: 0x0099, # CYRILLIC CAPITAL LETTER SHCHA
0x042a: 0x009a, # CYRILLIC CAPITAL LETTER HARD SIGN
0x042b: 0x009b, # CYRILLIC CAPITAL LETTER YERU
0x042c: 0x009c, # CYRILLIC CAPITAL LETTER SOFT SIGN
0x042d: 0x009d, # CYRILLIC CAPITAL LETTER E
0x042e: 0x009e, # CYRILLIC CAPITAL LETTER YU
0x042f: 0x009f, # CYRILLIC CAPITAL LETTER YA
0x0430: 0x00a0, # CYRILLIC SMALL LETTER A
0x0431: 0x00a1, # CYRILLIC SMALL LETTER BE
0x0432: 0x00a2, # CYRILLIC SMALL LETTER VE
0x0433: 0x00a3, # CYRILLIC SMALL LETTER GHE
0x0434: 0x00a4, # CYRILLIC SMALL LETTER DE
0x0435: 0x00a5, # CYRILLIC SMALL LETTER IE
0x0436: 0x00a6, # CYRILLIC SMALL LETTER ZHE
0x0437: 0x00a7, # CYRILLIC SMALL LETTER ZE
0x0438: 0x00a8, # CYRILLIC SMALL LETTER I
0x0439: 0x00a9, # CYRILLIC SMALL LETTER SHORT I
0x043a: 0x00aa, # CYRILLIC SMALL LETTER KA
0x043b: 0x00ab, # CYRILLIC SMALL LETTER EL
0x043c: 0x00ac, # CYRILLIC SMALL LETTER EM
0x043d: 0x00ad, # CYRILLIC SMALL LETTER EN
0x043e: 0x00ae, # CYRILLIC SMALL LETTER O
0x043f: 0x00af, # CYRILLIC SMALL LETTER PE
0x0440: 0x00e0, # CYRILLIC SMALL LETTER ER
0x0441: 0x00e1, # CYRILLIC SMALL LETTER ES
0x0442: 0x00e2, # CYRILLIC SMALL LETTER TE
0x0443: 0x00e3, # CYRILLIC SMALL LETTER U
0x0444: 0x00e4, # CYRILLIC SMALL LETTER EF
0x0445: 0x00e5, # CYRILLIC SMALL LETTER HA
0x0446: 0x00e6, # CYRILLIC SMALL LETTER TSE
0x0447: 0x00e7, # CYRILLIC SMALL LETTER CHE
0x0448: 0x00e8, # CYRILLIC SMALL LETTER SHA
0x0449: 0x00e9, # CYRILLIC SMALL LETTER SHCHA
0x044a: 0x00ea, # CYRILLIC SMALL LETTER HARD SIGN
0x044b: 0x00eb, # CYRILLIC SMALL LETTER YERU
0x044c: 0x00ec, # CYRILLIC SMALL LETTER SOFT SIGN
0x044d: 0x00ed, # CYRILLIC SMALL LETTER E
0x044e: 0x00ee, # CYRILLIC SMALL LETTER YU
0x044f: 0x00ef, # CYRILLIC SMALL LETTER YA
0x0451: 0x00f1, # CYRILLIC SMALL LETTER IO
0x0454: 0x00f5, # CYRILLIC SMALL LETTER UKRAINIAN IE
0x0456: 0x00f7, # CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
0x0457: 0x00f9, # CYRILLIC SMALL LETTER YI
0x0490: 0x00f2, # CYRILLIC CAPITAL LETTER GHE WITH UPTURN
0x0491: 0x00f3, # CYRILLIC SMALL LETTER GHE WITH UPTURN
0x2116: 0x00fc, # NUMERO SIGN
0x221a: 0x00fb, # SQUARE ROOT
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x258c: 0x00dd, # LEFT HALF BLOCK
0x2590: 0x00de, # RIGHT HALF BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
|
simongoffin/website_version | refs/heads/Multi_fonctionnel | addons/project_issue/__init__.py | 433 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import project_issue
import report
import res_config
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
o3project/odenos | refs/heads/develop | apps/mininet_examples/multi_network_control/start_mininet.py | 6 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# Copyright 2015 NEC Corporation. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
from mininet.cli import CLI
from mininet.net import Mininet
from mininet.node import RemoteController, OVSSwitch
def start_of13_switches(controller, switches):
for s in switches:
s.start([controller])
s.sendCmd('ovs-vsctl set bridge %s protocols=OpenFlow13' % s)
if '__main__' == __name__:
net = Mininet(controller=RemoteController, autoStaticArp=True, switch=OVSSwitch)
c1 = net.addController('c1', ip='127.0.0.1', port=6653)
c2 = net.addController('c2', ip='127.0.0.1', port=6654)
c3 = net.addController('c3', ip='127.0.0.1', port=6655)
c4 = net.addController('c4', ip='127.0.0.1', port=6656)
switches = {}
for i in range(1, 19):
switches[i] = net.addSwitch('s' + str(i))
# create three dc networks
for i in range(1, 4):
for j in range(2, 5):
n = 4 * (i - 1) + j
switches[n-1].linkTo(switches[n])
n = 4 * (i - 1)
switches[n + 1].linkTo(switches[n + 4])
# create one wan network
for i in range(14, 19):
switches[i-1].linkTo(switches[i])
switches[13].linkTo(switches[18])
# add hosts
h1 = net.addHost('h1')
h2 = net.addHost('h2')
h3 = net.addHost('h3')
h4 = net.addHost('h4')
switches[1].linkTo(h1)
switches[3].linkTo(h2)
switches[8].linkTo(h3)
switches[12].linkTo(h4)
# connect dc networks with core networks
switches[4].linkTo(switches[13])
switches[3].linkTo(switches[14])
switches[6].linkTo(switches[18])
switches[7].linkTo(switches[17])
switches[9].linkTo(switches[16])
switches[10].linkTo(switches[15])
net.build()
c1.start()
c2.start()
c3.start()
c4.start()
start_of13_switches(c1, switches.values()[0:4])
start_of13_switches(c2, switches.values()[4:8])
start_of13_switches(c3, switches.values()[8:12])
start_of13_switches(c4, switches.values()[12:18])
CLI(net)
net.stop()
|
gdimitris/ChessPuzzlerBackend | refs/heads/master | Virtual_Environment/lib/python2.7/site-packages/requests/packages/urllib3/packages/ssl_match_hostname/__init__.py | 2057 | try:
# Python 3.2+
from ssl import CertificateError, match_hostname
except ImportError:
try:
# Backport of the function from a pypi module
from backports.ssl_match_hostname import CertificateError, match_hostname
except ImportError:
# Our vendored copy
from ._implementation import CertificateError, match_hostname
# Not needed, but documenting what we provide.
__all__ = ('CertificateError', 'match_hostname')
|
georgefrank/ansible-modules-extras | refs/heads/devel | database/vertica/vertica_facts.py | 148 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = """
---
module: vertica_facts
version_added: '2.0'
short_description: Gathers Vertica database facts.
description:
- Gathers Vertica database facts.
options:
cluster:
description:
- Name of the cluster running the schema.
required: false
default: localhost
port:
description:
Database port to connect to.
required: false
default: 5433
db:
description:
- Name of the database running the schema.
required: false
default: null
login_user:
description:
- The username used to authenticate with.
required: false
default: dbadmin
login_password:
description:
- The password used to authenticate with.
required: false
default: null
notes:
- The default authentication assumes that you are either logging in as or sudo'ing
to the C(dbadmin) account on the host.
- This module uses C(pyodbc), a Python ODBC database adapter. You must ensure
that C(unixODBC) and C(pyodbc) is installed on the host and properly configured.
- Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so)
to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini)
and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16)
to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini).
requirements: [ 'unixODBC', 'pyodbc' ]
author: "Dariusz Owczarek (@dareko)"
"""
EXAMPLES = """
- name: gathering vertica facts
vertica_facts: db=db_name
"""
try:
import pyodbc
except ImportError:
pyodbc_found = False
else:
pyodbc_found = True
class NotSupportedError(Exception):
pass
# module specific functions
def get_schema_facts(cursor, schema=''):
facts = {}
cursor.execute("""
select schema_name, schema_owner, create_time
from schemata
where not is_system_schema and schema_name not in ('public')
and (? = '' or schema_name ilike ?)
""", schema, schema)
while True:
rows = cursor.fetchmany(100)
if not rows:
break
for row in rows:
facts[row.schema_name.lower()] = {
'name': row.schema_name,
'owner': row.schema_owner,
'create_time': str(row.create_time),
'usage_roles': [],
'create_roles': []}
cursor.execute("""
select g.object_name as schema_name, r.name as role_name,
lower(g.privileges_description) privileges_description
from roles r join grants g
on g.grantee = r.name and g.object_type='SCHEMA'
and g.privileges_description like '%USAGE%'
and g.grantee not in ('public', 'dbadmin')
and (? = '' or g.object_name ilike ?)
""", schema, schema)
while True:
rows = cursor.fetchmany(100)
if not rows:
break
for row in rows:
schema_key = row.schema_name.lower()
if 'create' in row.privileges_description:
facts[schema_key]['create_roles'].append(row.role_name)
else:
facts[schema_key]['usage_roles'].append(row.role_name)
return facts
def get_user_facts(cursor, user=''):
facts = {}
cursor.execute("""
select u.user_name, u.is_locked, u.lock_time,
p.password, p.acctexpired as is_expired,
u.profile_name, u.resource_pool,
u.all_roles, u.default_roles
from users u join password_auditor p on p.user_id = u.user_id
where not u.is_super_user
and (? = '' or u.user_name ilike ?)
""", user, user)
while True:
rows = cursor.fetchmany(100)
if not rows:
break
for row in rows:
user_key = row.user_name.lower()
facts[user_key] = {
'name': row.user_name,
'locked': str(row.is_locked),
'password': row.password,
'expired': str(row.is_expired),
'profile': row.profile_name,
'resource_pool': row.resource_pool,
'roles': [],
'default_roles': []}
if row.is_locked:
facts[user_key]['locked_time'] = str(row.lock_time)
if row.all_roles:
facts[user_key]['roles'] = row.all_roles.replace(' ', '').split(',')
if row.default_roles:
facts[user_key]['default_roles'] = row.default_roles.replace(' ', '').split(',')
return facts
def get_role_facts(cursor, role=''):
facts = {}
cursor.execute("""
select r.name, r.assigned_roles
from roles r
where (? = '' or r.name ilike ?)
""", role, role)
while True:
rows = cursor.fetchmany(100)
if not rows:
break
for row in rows:
role_key = row.name.lower()
facts[role_key] = {
'name': row.name,
'assigned_roles': []}
if row.assigned_roles:
facts[role_key]['assigned_roles'] = row.assigned_roles.replace(' ', '').split(',')
return facts
def get_configuration_facts(cursor, parameter=''):
facts = {}
cursor.execute("""
select c.parameter_name, c.current_value, c.default_value
from configuration_parameters c
where c.node_name = 'ALL'
and (? = '' or c.parameter_name ilike ?)
""", parameter, parameter)
while True:
rows = cursor.fetchmany(100)
if not rows:
break
for row in rows:
facts[row.parameter_name.lower()] = {
'parameter_name': row.parameter_name,
'current_value': row.current_value,
'default_value': row.default_value}
return facts
def get_node_facts(cursor, schema=''):
facts = {}
cursor.execute("""
select node_name, node_address, export_address, node_state, node_type,
catalog_path
from nodes
""")
while True:
rows = cursor.fetchmany(100)
if not rows:
break
for row in rows:
facts[row.node_address] = {
'node_name': row.node_name,
'export_address': row.export_address,
'node_state': row.node_state,
'node_type': row.node_type,
'catalog_path': row.catalog_path}
return facts
# module logic
def main():
module = AnsibleModule(
argument_spec=dict(
cluster=dict(default='localhost'),
port=dict(default='5433'),
db=dict(default=None),
login_user=dict(default='dbadmin'),
login_password=dict(default=None),
), supports_check_mode = True)
if not pyodbc_found:
module.fail_json(msg="The python pyodbc module is required.")
db = ''
if module.params['db']:
db = module.params['db']
changed = False
try:
dsn = (
"Driver=Vertica;"
"Server={0};"
"Port={1};"
"Database={2};"
"User={3};"
"Password={4};"
"ConnectionLoadBalance={5}"
).format(module.params['cluster'], module.params['port'], db,
module.params['login_user'], module.params['login_password'], 'true')
db_conn = pyodbc.connect(dsn, autocommit=True)
cursor = db_conn.cursor()
except Exception, e:
module.fail_json(msg="Unable to connect to database: {0}.".format(e))
try:
schema_facts = get_schema_facts(cursor)
user_facts = get_user_facts(cursor)
role_facts = get_role_facts(cursor)
configuration_facts = get_configuration_facts(cursor)
node_facts = get_node_facts(cursor)
module.exit_json(changed=False,
ansible_facts={'vertica_schemas': schema_facts,
'vertica_users': user_facts,
'vertica_roles': role_facts,
'vertica_configuration': configuration_facts,
'vertica_nodes': node_facts})
except NotSupportedError, e:
module.fail_json(msg=str(e))
except SystemExit:
# avoid catching this on python 2.4
raise
except Exception, e:
module.fail_json(msg=e)
# import ansible utilities
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
rahul67/hue | refs/heads/master | desktop/core/ext-py/Django-1.6.10/tests/utils_tests/test_feedgenerator.py | 104 | from __future__ import unicode_literals
import datetime
from django.utils import feedgenerator, tzinfo, unittest
class FeedgeneratorTest(unittest.TestCase):
"""
Tests for the low-level syndication feed framework.
"""
def test_get_tag_uri(self):
"""
Test get_tag_uri() correctly generates TagURIs.
"""
self.assertEqual(
feedgenerator.get_tag_uri('http://example.org/foo/bar#headline', datetime.date(2004, 10, 25)),
'tag:example.org,2004-10-25:/foo/bar/headline')
def test_get_tag_uri_with_port(self):
"""
Test that get_tag_uri() correctly generates TagURIs from URLs with port
numbers.
"""
self.assertEqual(
feedgenerator.get_tag_uri('http://www.example.org:8000/2008/11/14/django#headline', datetime.datetime(2008, 11, 14, 13, 37, 0)),
'tag:www.example.org,2008-11-14:/2008/11/14/django/headline')
def test_rfc2822_date(self):
"""
Test rfc2822_date() correctly formats datetime objects.
"""
self.assertEqual(
feedgenerator.rfc2822_date(datetime.datetime(2008, 11, 14, 13, 37, 0)),
"Fri, 14 Nov 2008 13:37:00 -0000"
)
def test_rfc2822_date_with_timezone(self):
"""
Test rfc2822_date() correctly formats datetime objects with tzinfo.
"""
self.assertEqual(
feedgenerator.rfc2822_date(datetime.datetime(2008, 11, 14, 13, 37, 0, tzinfo=tzinfo.FixedOffset(datetime.timedelta(minutes=60)))),
"Fri, 14 Nov 2008 13:37:00 +0100"
)
def test_rfc2822_date_without_time(self):
"""
Test rfc2822_date() correctly formats date objects.
"""
self.assertEqual(
feedgenerator.rfc2822_date(datetime.date(2008, 11, 14)),
"Fri, 14 Nov 2008 00:00:00 -0000"
)
def test_rfc3339_date(self):
"""
Test rfc3339_date() correctly formats datetime objects.
"""
self.assertEqual(
feedgenerator.rfc3339_date(datetime.datetime(2008, 11, 14, 13, 37, 0)),
"2008-11-14T13:37:00Z"
)
def test_rfc3339_date_with_timezone(self):
"""
Test rfc3339_date() correctly formats datetime objects with tzinfo.
"""
self.assertEqual(
feedgenerator.rfc3339_date(datetime.datetime(2008, 11, 14, 13, 37, 0, tzinfo=tzinfo.FixedOffset(datetime.timedelta(minutes=120)))),
"2008-11-14T13:37:00+02:00"
)
def test_rfc3339_date_without_time(self):
"""
Test rfc3339_date() correctly formats date objects.
"""
self.assertEqual(
feedgenerator.rfc3339_date(datetime.date(2008, 11, 14)),
"2008-11-14T00:00:00Z"
)
def test_atom1_mime_type(self):
"""
Test to make sure Atom MIME type has UTF8 Charset parameter set
"""
atom_feed = feedgenerator.Atom1Feed("title", "link", "description")
self.assertEqual(
atom_feed.mime_type, "application/atom+xml; charset=utf-8"
)
def test_rss_mime_type(self):
"""
Test to make sure RSS MIME type has UTF8 Charset parameter set
"""
rss_feed = feedgenerator.Rss201rev2Feed("title", "link", "description")
self.assertEqual(
rss_feed.mime_type, "application/rss+xml; charset=utf-8"
)
# Two regression tests for #14202
def test_feed_without_feed_url_gets_rendered_without_atom_link(self):
feed = feedgenerator.Rss201rev2Feed('title', '/link/', 'descr')
self.assertEqual(feed.feed['feed_url'], None)
feed_content = feed.writeString('utf-8')
self.assertNotIn('<atom:link', feed_content)
self.assertNotIn('href="/feed/"', feed_content)
self.assertNotIn('rel="self"', feed_content)
def test_feed_with_feed_url_gets_rendered_with_atom_link(self):
feed = feedgenerator.Rss201rev2Feed('title', '/link/', 'descr', feed_url='/feed/')
self.assertEqual(feed.feed['feed_url'], '/feed/')
feed_content = feed.writeString('utf-8')
self.assertIn('<atom:link', feed_content)
self.assertIn('href="/feed/"', feed_content)
self.assertIn('rel="self"', feed_content)
|
u-mobo/linux-imx | refs/heads/imx_3.10.17_umobo | tools/perf/scripts/python/netdev-times.py | 11271 | # Display a process of packets and processed time.
# It helps us to investigate networking or network device.
#
# options
# tx: show only tx chart
# rx: show only rx chart
# dev=: show only thing related to specified device
# debug: work with debug mode. It shows buffer status.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
all_event_list = []; # insert all tracepoint event related with this script
irq_dic = {}; # key is cpu and value is a list which stacks irqs
# which raise NET_RX softirq
net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry
# and a list which stacks receive
receive_hunk_list = []; # a list which include a sequence of receive events
rx_skb_list = []; # received packet list for matching
# skb_copy_datagram_iovec
buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and
# tx_xmit_list
of_count_rx_skb_list = 0; # overflow count
tx_queue_list = []; # list of packets which pass through dev_queue_xmit
of_count_tx_queue_list = 0; # overflow count
tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit
of_count_tx_xmit_list = 0; # overflow count
tx_free_list = []; # list of packets which is freed
# options
show_tx = 0;
show_rx = 0;
dev = 0; # store a name of device specified by option "dev="
debug = 0;
# indices of event_info tuple
EINFO_IDX_NAME= 0
EINFO_IDX_CONTEXT=1
EINFO_IDX_CPU= 2
EINFO_IDX_TIME= 3
EINFO_IDX_PID= 4
EINFO_IDX_COMM= 5
# Calculate a time interval(msec) from src(nsec) to dst(nsec)
def diff_msec(src, dst):
return (dst - src) / 1000000.0
# Display a process of transmitting a packet
def print_transmit(hunk):
if dev != 0 and hunk['dev'].find(dev) < 0:
return
print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \
(hunk['dev'], hunk['len'],
nsecs_secs(hunk['queue_t']),
nsecs_nsecs(hunk['queue_t'])/1000,
diff_msec(hunk['queue_t'], hunk['xmit_t']),
diff_msec(hunk['xmit_t'], hunk['free_t']))
# Format for displaying rx packet processing
PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)"
PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)"
PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)"
PF_JOINT= " |"
PF_WJOINT= " | |"
PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)"
PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)"
PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)"
PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)"
PF_CONS_SKB= " | consume_skb(+%.3fmsec)"
# Display a process of received packets and interrputs associated with
# a NET_RX softirq
def print_receive(hunk):
show_hunk = 0
irq_list = hunk['irq_list']
cpu = irq_list[0]['cpu']
base_t = irq_list[0]['irq_ent_t']
# check if this hunk should be showed
if dev != 0:
for i in range(len(irq_list)):
if irq_list[i]['name'].find(dev) >= 0:
show_hunk = 1
break
else:
show_hunk = 1
if show_hunk == 0:
return
print "%d.%06dsec cpu=%d" % \
(nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
for i in range(len(irq_list)):
print PF_IRQ_ENTRY % \
(diff_msec(base_t, irq_list[i]['irq_ent_t']),
irq_list[i]['irq'], irq_list[i]['name'])
print PF_JOINT
irq_event_list = irq_list[i]['event_list']
for j in range(len(irq_event_list)):
irq_event = irq_event_list[j]
if irq_event['event'] == 'netif_rx':
print PF_NET_RX % \
(diff_msec(base_t, irq_event['time']),
irq_event['skbaddr'])
print PF_JOINT
print PF_SOFT_ENTRY % \
diff_msec(base_t, hunk['sirq_ent_t'])
print PF_JOINT
event_list = hunk['event_list']
for i in range(len(event_list)):
event = event_list[i]
if event['event_name'] == 'napi_poll':
print PF_NAPI_POLL % \
(diff_msec(base_t, event['event_t']), event['dev'])
if i == len(event_list) - 1:
print ""
else:
print PF_JOINT
else:
print PF_NET_RECV % \
(diff_msec(base_t, event['event_t']), event['skbaddr'],
event['len'])
if 'comm' in event.keys():
print PF_WJOINT
print PF_CPY_DGRAM % \
(diff_msec(base_t, event['comm_t']),
event['pid'], event['comm'])
elif 'handle' in event.keys():
print PF_WJOINT
if event['handle'] == "kfree_skb":
print PF_KFREE_SKB % \
(diff_msec(base_t,
event['comm_t']),
event['location'])
elif event['handle'] == "consume_skb":
print PF_CONS_SKB % \
diff_msec(base_t,
event['comm_t'])
print PF_JOINT
def trace_begin():
global show_tx
global show_rx
global dev
global debug
for i in range(len(sys.argv)):
if i == 0:
continue
arg = sys.argv[i]
if arg == 'tx':
show_tx = 1
elif arg =='rx':
show_rx = 1
elif arg.find('dev=',0, 4) >= 0:
dev = arg[4:]
elif arg == 'debug':
debug = 1
if show_tx == 0 and show_rx == 0:
show_tx = 1
show_rx = 1
def trace_end():
# order all events in time
all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME],
b[EINFO_IDX_TIME]))
# process all events
for i in range(len(all_event_list)):
event_info = all_event_list[i]
name = event_info[EINFO_IDX_NAME]
if name == 'irq__softirq_exit':
handle_irq_softirq_exit(event_info)
elif name == 'irq__softirq_entry':
handle_irq_softirq_entry(event_info)
elif name == 'irq__softirq_raise':
handle_irq_softirq_raise(event_info)
elif name == 'irq__irq_handler_entry':
handle_irq_handler_entry(event_info)
elif name == 'irq__irq_handler_exit':
handle_irq_handler_exit(event_info)
elif name == 'napi__napi_poll':
handle_napi_poll(event_info)
elif name == 'net__netif_receive_skb':
handle_netif_receive_skb(event_info)
elif name == 'net__netif_rx':
handle_netif_rx(event_info)
elif name == 'skb__skb_copy_datagram_iovec':
handle_skb_copy_datagram_iovec(event_info)
elif name == 'net__net_dev_queue':
handle_net_dev_queue(event_info)
elif name == 'net__net_dev_xmit':
handle_net_dev_xmit(event_info)
elif name == 'skb__kfree_skb':
handle_kfree_skb(event_info)
elif name == 'skb__consume_skb':
handle_consume_skb(event_info)
# display receive hunks
if show_rx:
for i in range(len(receive_hunk_list)):
print_receive(receive_hunk_list[i])
# display transmit hunks
if show_tx:
print " dev len Qdisc " \
" netdevice free"
for i in range(len(tx_free_list)):
print_transmit(tx_free_list[i])
if debug:
print "debug buffer status"
print "----------------------------"
print "xmit Qdisc:remain:%d overflow:%d" % \
(len(tx_queue_list), of_count_tx_queue_list)
print "xmit netdevice:remain:%d overflow:%d" % \
(len(tx_xmit_list), of_count_tx_xmit_list)
print "receive:remain:%d overflow:%d" % \
(len(rx_skb_list), of_count_rx_skb_list)
# called from perf, when it finds a correspoinding event
def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
irq, irq_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
irq, irq_name)
all_event_list.append(event_info)
def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, irq, ret):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
all_event_list.append(event_info)
def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, napi, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
napi, dev_name)
all_event_list.append(event_info)
def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, rc, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, rc ,dev_name)
all_event_list.append(event_info)
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, protocol, location):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, protocol, location)
all_event_list.append(event_info)
def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr)
all_event_list.append(event_info)
def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen)
all_event_list.append(event_info)
def handle_irq_handler_entry(event_info):
(name, context, cpu, time, pid, comm, irq, irq_name) = event_info
if cpu not in irq_dic.keys():
irq_dic[cpu] = []
irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time}
irq_dic[cpu].append(irq_record)
def handle_irq_handler_exit(event_info):
(name, context, cpu, time, pid, comm, irq, ret) = event_info
if cpu not in irq_dic.keys():
return
irq_record = irq_dic[cpu].pop()
if irq != irq_record['irq']:
return
irq_record.update({'irq_ext_t':time})
# if an irq doesn't include NET_RX softirq, drop.
if 'event_list' in irq_record.keys():
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_raise(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'sirq_raise'})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_entry(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]}
def handle_irq_softirq_exit(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
irq_list = []
event_list = 0
if cpu in irq_dic.keys():
irq_list = irq_dic[cpu]
del irq_dic[cpu]
if cpu in net_rx_dic.keys():
sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t']
event_list = net_rx_dic[cpu]['event_list']
del net_rx_dic[cpu]
if irq_list == [] or event_list == 0:
return
rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
'irq_list':irq_list, 'event_list':event_list}
# merge information realted to a NET_RX softirq
receive_hunk_list.append(rec_data)
def handle_napi_poll(event_info):
(name, context, cpu, time, pid, comm, napi, dev_name) = event_info
if cpu in net_rx_dic.keys():
event_list = net_rx_dic[cpu]['event_list']
rec_data = {'event_name':'napi_poll',
'dev':dev_name, 'event_t':time}
event_list.append(rec_data)
def handle_netif_rx(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'netif_rx',
'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_netif_receive_skb(event_info):
global of_count_rx_skb_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu in net_rx_dic.keys():
rec_data = {'event_name':'netif_receive_skb',
'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
event_list = net_rx_dic[cpu]['event_list']
event_list.append(rec_data)
rx_skb_list.insert(0, rec_data)
if len(rx_skb_list) > buffer_budget:
rx_skb_list.pop()
of_count_rx_skb_list += 1
def handle_net_dev_queue(event_info):
global of_count_tx_queue_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time}
tx_queue_list.insert(0, skb)
if len(tx_queue_list) > buffer_budget:
tx_queue_list.pop()
of_count_tx_queue_list += 1
def handle_net_dev_xmit(event_info):
global of_count_tx_xmit_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, rc, dev_name) = event_info
if rc == 0: # NETDEV_TX_OK
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
skb['xmit_t'] = time
tx_xmit_list.insert(0, skb)
del tx_queue_list[i]
if len(tx_xmit_list) > buffer_budget:
tx_xmit_list.pop()
of_count_tx_xmit_list += 1
return
def handle_kfree_skb(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, protocol, location) = event_info
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
del tx_queue_list[i]
return
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if rec_data['skbaddr'] == skbaddr:
rec_data.update({'handle':"kfree_skb",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
def handle_consume_skb(event_info):
(name, context, cpu, time, pid, comm, skbaddr) = event_info
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
def handle_skb_copy_datagram_iovec(event_info):
(name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if skbaddr == rec_data['skbaddr']:
rec_data.update({'handle':"skb_copy_datagram_iovec",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
|
gwu-libraries/sfm-twitter-harvester | refs/heads/master | twitter_rest_warc_iter.py | 1 | #!/usr/bin/env python3
from __future__ import absolute_import
from sfmutils.warc_iter import BaseWarcIter
from dateutil.parser import parse as date_parse
import json
import sys
SEARCH_URL = "https://api.twitter.com/1.1/search/tweets.json"
TIMELINE_URL = "https://api.twitter.com/1.1/statuses/user_timeline.json"
class TwitterRestWarcIter(BaseWarcIter):
def __init__(self, filepaths, limit_user_ids=None):
BaseWarcIter.__init__(self, filepaths)
self.limit_user_ids = limit_user_ids
def _select_record(self, url):
return url.startswith(SEARCH_URL) or url.startswith(TIMELINE_URL)
def _item_iter(self, url, json_obj):
# Ignore error messages
if isinstance(json_obj, dict) and ('error' in json_obj or 'errors' in json_obj):
return
# Search has { "statuses": [tweets] }
# Timeline has [tweets]
tweet_list = json_obj.get("statuses", []) if url.startswith(SEARCH_URL) else json_obj
for status in tweet_list:
yield "twitter_status", status["id_str"], date_parse(status["created_at"]), status
@staticmethod
def item_types():
return ["twitter_status"]
def _select_item(self, item):
if not self.limit_user_ids or item.get("user", {}).get("id_str") in self.limit_user_ids:
return True
return False
if __name__ == "__main__":
TwitterRestWarcIter.main(TwitterRestWarcIter)
|
alephdata/ingestors | refs/heads/dependabot/pip/click-8.0.1 | tests/test_audio.py | 1 | # -*- coding: utf-8 -*-
import datetime
from .support import TestCase
class AudioIngestorTest(TestCase):
def test_audio(self):
fixture_path, entity = self.fixture("memo.m4a")
self.manager.ingest(fixture_path, entity)
self.assertEqual(entity.first("processingStatus"), self.manager.STATUS_SUCCESS)
self.assertEqual(entity.first("title"), "Core Media Audio")
self.assertEqual(entity.first("generator"), "com.apple.VoiceMemos (iOS 11.4)")
self.assertEqual(
entity.first("authoredAt"),
datetime.datetime(2018, 6, 20, 12, 9, 42).isoformat(),
)
self.assertEqual(entity.first("duration"), "2808")
self.assertEqual(entity.first("samplingRate"), "44100")
self.assertEqual(entity.schema.name, "Audio")
|
jcpowermac/ansible | refs/heads/devel | lib/ansible/modules/system/aix_lvol.py | 73 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Alain Dejoux <adejoux@djouxtech.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
author:
- Alain Dejoux (@adejoux)
module: aix_lvol
short_description: Configure AIX LVM logical volumes
description:
- This module creates, removes or resizes AIX logical volumes. Inspired by lvol module.
version_added: "2.4"
options:
vg:
description:
- The volume group this logical volume is part of.
required: true
lv:
description:
- The name of the logical volume.
required: true
lv_type:
description:
- The type of the logical volume.
default: jfs2
size:
description:
- The size of the logical volume with one of the [MGT] units.
copies:
description:
- The number of copies of the logical volume. Maximum copies are 3.
default: '1'
policy:
choices: [ maximum, minimum ]
default: maximum
description:
- Sets the interphysical volume allocation policy. C(maximum) allocates logical partitions across the maximum number of physical volumes.
C(minimum) allocates logical partitions across the minimum number of physical volumes.
state:
choices: [ absent, present ]
default: present
description:
- Control if the logical volume exists. If C(present) and the
volume does not already exist then the C(size) option is required.
opts:
description:
- Free-form options to be passed to the mklv command.
pvs:
description:
- Comma separated list of physical volumes e.g. C(hdisk1,hdisk2).
'''
EXAMPLES = r'''
- name: Create a logical volume of 512M
aix_lvol:
vg: testvg
lv: testlv
size: 512M
- name: Create a logical volume of 512M with disks hdisk1 and hdisk2
aix_lvol:
vg: testvg
lv: test2lv
size: 512M
pvs: hdisk1,hdisk2
- name: Create a logical volume of 512M mirrored
aix_lvol:
vg: testvg
lv: test3lv
size: 512M
copies: 2
- name: Create a logical volume of 1G with a minimum placement policy
aix_lvol:
vg: rootvg
lv: test4lv
size: 1G
policy: minimum
- name: Create a logical volume with special options like mirror pool
aix_lvol:
vg: testvg
lv: testlv
size: 512M
opts: -p copy1=poolA -p copy2=poolB
- name: Extend the logical volume to 1200M
aix_lvol:
vg: testvg
lv: test4lv
size: 1200M
- name: Remove the logical volume
aix_lvol:
vg: testvg
lv: testlv
state: absent
'''
RETURN = r'''
msg:
type: string
description: A friendly message describing the task result.
returned: always
sample: Logical volume testlv created.
'''
import re
from ansible.module_utils.basic import AnsibleModule
def convert_size(module, size):
unit = size[-1].upper()
units = ['M', 'G', 'T']
try:
multiplier = 1024 ** units.index(unit)
except ValueError:
module.fail_json(msg="No valid size unit specified.")
return int(size[:-1]) * multiplier
def round_ppsize(x, base=16):
new_size = int(base * round(float(x) / base))
if new_size < x:
new_size += base
return new_size
def parse_lv(data):
name = None
for line in data.splitlines():
match = re.search(r"LOGICAL VOLUME:\s+(\w+)\s+VOLUME GROUP:\s+(\w+)", line)
if match is not None:
name = match.group(1)
vg = match.group(2)
continue
match = re.search(r"LPs:\s+(\d+).*PPs", line)
if match is not None:
lps = int(match.group(1))
continue
match = re.search(r"PP SIZE:\s+(\d+)", line)
if match is not None:
pp_size = int(match.group(1))
continue
match = re.search(r"INTER-POLICY:\s+(\w+)", line)
if match is not None:
policy = match.group(1)
continue
if not name:
return None
size = lps * pp_size
return {'name': name, 'vg': vg, 'size': size, 'policy': policy}
def parse_vg(data):
for line in data.splitlines():
match = re.search(r"VOLUME GROUP:\s+(\w+)", line)
if match is not None:
name = match.group(1)
continue
match = re.search(r"TOTAL PP.*\((\d+)", line)
if match is not None:
size = int(match.group(1))
continue
match = re.search(r"PP SIZE:\s+(\d+)", line)
if match is not None:
pp_size = int(match.group(1))
continue
match = re.search(r"FREE PP.*\((\d+)", line)
if match is not None:
free = int(match.group(1))
continue
return {'name': name, 'size': size, 'free': free, 'pp_size': pp_size}
def main():
module = AnsibleModule(
argument_spec=dict(
vg=dict(type='str', required=True),
lv=dict(type='str', required=True),
lv_type=dict(type='str', default='jfs2'),
size=dict(type='str'),
opts=dict(type='str', default=''),
copies=dict(type='str', default='1'),
state=dict(type='str', default='present', choices=['absent', 'present']),
policy=dict(type='str', default='maximum', choices=['maximum', 'minimum']),
pvs=dict(type='list', default=list())
),
supports_check_mode=True,
)
vg = module.params['vg']
lv = module.params['lv']
lv_type = module.params['lv_type']
size = module.params['size']
opts = module.params['opts']
copies = module.params['copies']
policy = module.params['policy']
state = module.params['state']
pvs = module.params['pvs']
pv_list = ' '.join(pvs)
if policy == 'maximum':
lv_policy = 'x'
else:
lv_policy = 'm'
# Add echo command when running in check-mode
if module.check_mode:
test_opt = 'echo '
else:
test_opt = ''
# check if system commands are available
lsvg_cmd = module.get_bin_path("lsvg", required=True)
lslv_cmd = module.get_bin_path("lslv", required=True)
# Get information on volume group requested
rc, vg_info, err = module.run_command("%s %s" % (lsvg_cmd, vg))
if rc != 0:
if state == 'absent':
module.exit_json(changed=False, msg="Volume group %s does not exist." % vg)
else:
module.fail_json(msg="Volume group %s does not exist." % vg, rc=rc, out=vg_info, err=err)
this_vg = parse_vg(vg_info)
if size is not None:
# Calculate pp size and round it up based on pp size.
lv_size = round_ppsize(convert_size(module, size), base=this_vg['pp_size'])
# Get information on logical volume requested
rc, lv_info, err = module.run_command(
"%s %s" % (lslv_cmd, lv))
if rc != 0:
if state == 'absent':
module.exit_json(changed=False, msg="Logical Volume %s does not exist." % lv)
changed = False
this_lv = parse_lv(lv_info)
if state == 'present' and not size:
if this_lv is None:
module.fail_json(msg="No size given.")
if this_lv is None:
if state == 'present':
if lv_size > this_vg['free']:
module.fail_json(msg="Not enough free space in volume group %s: %s MB free." % (this_vg['name'], this_vg['free']))
# create LV
mklv_cmd = module.get_bin_path("mklv", required=True)
cmd = "%s %s -t %s -y %s -c %s -e %s %s %s %sM %s" % (test_opt, mklv_cmd, lv_type, lv, copies, lv_policy, opts, vg, lv_size, pv_list)
rc, out, err = module.run_command(cmd)
if rc == 0:
module.exit_json(changed=True, msg="Logical volume %s created." % lv)
else:
module.fail_json(msg="Creating logical volume %s failed." % lv, rc=rc, out=out, err=err)
else:
if state == 'absent':
# remove LV
rmlv_cmd = module.get_bin_path("rmlv", required=True)
rc, out, err = module.run_command("%s %s -f %s" % (test_opt, rmlv_cmd, this_lv['name']))
if rc == 0:
module.exit_json(changed=True, msg="Logical volume %s deleted." % lv)
else:
module.fail_json(msg="Failed to remove logical volume %s." % lv, rc=rc, out=out, err=err)
else:
if this_lv['policy'] != policy:
# change lv allocation policy
chlv_cmd = module.get_bin_path("chlv", required=True)
rc, out, err = module.run_command("%s %s -e %s %s" % (test_opt, chlv_cmd, lv_policy, this_lv['name']))
if rc == 0:
module.exit_json(changed=True, msg="Logical volume %s policy changed: %s." % (lv, policy))
else:
module.fail_json(msg="Failed to change logical volume %s policy." % lv, rc=rc, out=out, err=err)
if vg != this_lv['vg']:
module.fail_json(msg="Logical volume %s already exist in volume group %s" % (lv, this_lv['vg']))
# from here the last remaining action is to resize it, if no size parameter is passed we do nothing.
if not size:
module.exit_json(changed=False, msg="Logical volume %s already exist." % (lv))
# resize LV based on absolute values
if int(lv_size) > this_lv['size']:
extendlv_cmd = module.get_bin_path("extendlv", required=True)
cmd = "%s %s %s %sM" % (test_opt, extendlv_cmd, lv, lv_size - this_lv['size'])
rc, out, err = module.run_command(cmd)
if rc == 0:
module.exit_json(changed=True, msg="Logical volume %s size extended to %sMB." % (lv, lv_size))
else:
module.fail_json(msg="Unable to resize %s to %sMB." % (lv, lv_size), rc=rc, out=out, err=err)
elif lv_size < this_lv['size']:
module.fail_json(msg="No shrinking of Logical Volume %s permitted. Current size: %s MB" % (lv, this_lv['size']))
else:
module.exit_json(changed=False, msg="Logical volume %s size is already %sMB." % (lv, lv_size))
if __name__ == '__main__':
main()
|
dagmartin/capirca | refs/heads/master | tools/ldpush/cisconx.py | 7 | #!/usr/bin/python
#
# Copyright 2013 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A push implementation for Cisco Nexus(NX-OS) devices.
This module implements the base device interface of base_device.py for
Cisco NX-OS devices.
"""
__author__ = 'mijith@google.com (Mijith)'
import gflags
import paramiko_device
import push_exceptions as exceptions
FLAGS = gflags.FLAGS
gflags.DEFINE_float('cisconx_timeout_response', None,
'Cisco nexus device response timeout in seconds.')
gflags.DEFINE_float('cisconx_timeout_connect', None,
'Cisco nexus device connect timeout in seconds.')
gflags.DEFINE_float('cisconx_timeout_idle', None,
'Cisco nexus device idle timeout in seconds.')
gflags.DEFINE_float('cisconx_timeout_disconnect', None,
'Cisco nexus device disconnect timeout in seconds.')
gflags.DEFINE_float('cisconx_timeout_act_user', None,
'Cisco nexus device user activation timeout in seconds.')
INVALID_OUT = 'Cmd exec error.'
# eg:.
# [ mijith@pulsar: ~ ].
# $ ssh gmonitor@us-mtv-43-fabsw1.mtv 'foo'.
# Syntax error while parsing 'foo'.
#
# Cmd exec error.
class CiscoNexusDevice(paramiko_device.ParamikoDevice):
"""A base device model suitable for Cisco Nexus devices.
See the base_device.BaseDevice method docstrings.
"""
def __init__(self, **kwargs):
self.vendor_name = 'cisconx'
super(CiscoNexusDevice, self).__init__(**kwargs)
def _Cmd(self, command, mode=None):
"""Cisco Nexus wrapper for ParamikoDevice._Cmd()."""
result = super(CiscoNexusDevice, self)._Cmd(command, mode)
# On Successful execution of a command.
# ssh gmonitor@us-mtv-43-fabsw1.mtv 'show version'.
# Password:.
# Cisco Nexus Operating System (NX-OS) Software
# TAC support: http://www.cisco.com/tac.
# [output truncated].
# Incomplete Command Example.
# [ mijith@pulsar: ~ ].
# $ ssh gmonitor@us-mtv-43-fabsw1.mtv 'show'
# Syntax error while parsing 'show'.
# Cmd exec error.
# Invalid Command Example.
# [ mijith@pulsar: ~ ].
# $ ssh gmonitor@us-mtv-43-fabsw1.mtv 'foo'.
# Syntax error while parsing 'foo'.
# Cmd exec error.
if result.endswith(INVALID_OUT):
raise exceptions.CmdError('INVALID COMMAND: %s' % command)
return result
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.