repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
slinderman/pyhsmm_spiketrains
|
refs/heads/master
|
experiments/make_figure8.py
|
1
|
"""
Plot true and inferred place fields for the hippocampal data
"""
import os
import cPickle
import gzip
from collections import namedtuple
import numpy as np
from scipy.io import loadmat
import matplotlib
matplotlib.rcParams.update({'font.sans-serif' : 'Helvetica',
'axes.labelsize': 9,
'xtick.labelsize' : 9,
'ytick.labelsize' : 9,
'axes.titlesize' : 11})
import brewer2mpl
allcolors = brewer2mpl.get_map("Set1", "Qualitative", 9).mpl_colors
from pyhsmm_spiketrains.internals.utils import split_train_test
Results = namedtuple(
"Results", ["name", "loglikes", "predictive_lls",
"N_used", "alphas", "gammas",
"rates", "obs_hypers",
"samples", "timestamps"])
from hips.plotting.layout import *
from hips.plotting.colormaps import white_to_color_cmap
from hips.distributions.circular_distribution import CircularDistribution
from experiment_helper import load_hipp_data
def plot_place_fields(results, pos, center, radius, data,
figdir='.'):
"""
Plot the observation vector associated with a latent state
"""
model = results.samples
model.relabel_by_usage()
N_used = results.N_used[-1]
lmbdas = model.rates[:N_used,:]
stateseq = model.stateseqs[0]
occupancy = model.state_usages
# Plot a figure for each latent state
N_colors = 9
colors = brewer2mpl.get_map('Set1', 'qualitative', N_colors).mpl_colors
# State distributions
dists = []
for s in xrange(N_used):
cd = CircularDistribution(center, radius)
cd.fit_xy(pos[stateseq==s,0], pos[stateseq==s,1])
dists.append(cd)
# Plot the log likelihood as a function of iteration
fig = create_figure((5,4))
plt.figtext(0.05/5.0, 3.8/4.0, "A")
toplot = [0, 13, 28, 38]
for i,c in enumerate([0, 13, 28, 38]):
left = 1.25 * i + 0.05
print "Plotting cell ", c
color = colors[np.mod(c,N_colors)]
cmap = white_to_color_cmap(color)
# Compute the inferred place field
inf_place_field = dists[0] * lmbdas[0,c] * occupancy[0]
for s in range(1,N_used):
inf_place_field += dists[s] * lmbdas[s,c] * occupancy[s]
# inf_place_field = sum([d*(l*o) for d,l,o in zip(dists, lmbdas[c,:], occupancy)])
spks = np.array(data[:,c] > 0).ravel()
true_place_field = CircularDistribution(center, radius)
true_place_field.fit_xy(pos[spks, 0], pos[spks, 1])
# Plot the locations of this state
ax = create_axis_at_location(fig, left, 2.65, 1.15, 1.15, transparent=True)
remove_plot_labels(ax)
# Plot the empirical location distribution
inf_place_field.plot(ax=ax, cmap=cmap, plot_data=True, plot_colorbar=False)
ax.set_title('Inf. Place Field %d' % (c+1),
fontdict={'fontsize' : 9})
# Now plot the true place field
ax = create_axis_at_location(fig, left, 1.25, 1.15, 1.15, transparent=True)
remove_plot_labels(ax)
true_place_field.plot(ax=ax, cmap=cmap, plot_data=True, plot_colorbar=False)
ax.set_title('True Place Field %d' % (c+1),
fontdict={'fontsize' : 9})
# Plot the KL divergence histogram
kls = np.zeros(model.N)
tvs = np.zeros(model.N)
for c in xrange(model.N):
# Compute the inferred place field
inf_place_field = dists[0] * lmbdas[0,c] * occupancy[0]
for s in range(1,N_used):
inf_place_field += dists[s] * lmbdas[s,c] * occupancy[s]
# inf_place_field = sum([d*(l*o) for d,l,o in zip(dists, lmbdas[c,:], occupancy)])
spks = np.array(data[:,c] > 0).ravel()
true_place_field = CircularDistribution(center, radius)
true_place_field.fit_xy(pos[spks, 0], pos[spks, 1])
kls[c] = compute_place_field_KL(inf_place_field, true_place_field)
tvs[c] = compute_place_field_TV(inf_place_field, true_place_field)
bin_centers = np.arange(0.006, 0.0141, 0.001)
bin_width = 0.001
bin_edges = np.concatenate((bin_centers - bin_width/2.0,
[bin_centers[-1] + bin_width/2.0]))
ax = create_axis_at_location(fig, 0.5, 0.5, 4., .5, transparent=True)
ax.hist(tvs, bins=bin_edges, facecolor=allcolors[1])
ax.set_xlim(0.005, 0.015)
ax.set_xticks(bin_centers)
ax.set_xticklabels(["{0:.3f}".format(bc) if i % 2 == 0 else ""
for i,bc in enumerate(bin_centers)])
ax.set_xlabel("$TV(p_{inf}, p_{true})$")
ax.set_yticks(np.arange(17, step=4))
ax.set_ylabel("Count")
plt.figtext(0.05/5.0, 1.1/4.0, "B")
print "TVs of plotted cells: "
print tvs[toplot]
# fig.savefig(os.path.join(figdir,'figure8.pdf'))
fig.savefig(os.path.join(figdir,'figure8.png'))
plt.show()
def compute_place_field_KL(dist1, dist2):
"""
Compute KL(p,q) = E_p[log p/q] for p = true dist and q = inf dist
:param dist1:
:param dist2:
:return:
"""
p = dist1.pdf
q = dist2.pdf
a = dist1.areas
kl = (p*a * np.log(p/q)).sum()
return kl
def compute_place_field_TV(dist1, dist2):
"""
Compute KL(p,q) = E_p[log p/q] for p = true dist and q = inf dist
:param true_dist:
:param inf_dist:
:return:
"""
p = dist1.pdf
q = dist2.pdf
tv = abs(p-q).sum()
return tv
if __name__ == "__main__":
dataset = "hipp_2dtrack_a"
N, S_train, pos_train, S_test, pos_test, center, radius = \
load_hipp_data(dataname=dataset)
# Load results
runnum = 1
results_dir = os.path.join("results", dataset, "run%03d" % runnum)
results_type = "hdphmm_scale"
results_file = os.path.join(results_dir, results_type + ".pkl.gz")
with gzip.open(results_file, "r") as f:
results = cPickle.load(f)
plot_place_fields(results, pos_train, center, radius, S_train,
figdir=results_dir)
|
codacy/python-codacy-coverage
|
refs/heads/master
|
tests/tests.py
|
1
|
import json
import os
import unittest
import codacy.reporter
HERE = os.path.abspath(os.path.dirname(__file__))
def _file_location(*args):
return os.path.join(HERE, *args)
class ReporterTests(unittest.TestCase):
def compare_parse_result(self, generated, expected_filename):
def file_get_contents(filename):
with open(filename) as f:
return f.read()
def to_utf8(d):
if type(d) is dict:
result = {}
for key, value in d.items():
result[to_utf8(key)] = to_utf8(value)
elif type(d) is unicode:
return d.encode('utf8')
else:
return d
json_content = file_get_contents(expected_filename)
expected = json.loads(json_content)
self.assertEqual(to_utf8(generated), to_utf8(expected))
def test_parser_coverage3(self):
self.maxDiff = None
parsed = codacy.reporter.parse_report_file(
_file_location('coverage3', 'cobertura.xml'), '')
rounded = codacy.reporter.merge_and_round_reports([parsed])
self.compare_parse_result(rounded,
_file_location('coverage3', 'coverage.json'))
def test_parser_coverage4(self):
self.maxDiff = None
parsed = codacy.reporter.parse_report_file(
_file_location('coverage4', 'cobertura.xml'), '')
rounded = codacy.reporter.merge_and_round_reports([parsed])
self.compare_parse_result(rounded,
_file_location('coverage4', 'coverage.json'))
def test_parser_git_filepath(self):
self.maxDiff = None
parsed = codacy.reporter.parse_report_file(
_file_location('filepath', 'cobertura.xml.tpl'), '')
rounded = codacy.reporter.merge_and_round_reports([parsed])
self.compare_parse_result(rounded,
_file_location('filepath', 'coverage.json'))
def test_merge(self):
self.maxDiff = None
generated3 = codacy.reporter.parse_report_file(
_file_location('coverage-merge', 'cobertura.3.xml'), '')
generated4 = codacy.reporter.parse_report_file(
_file_location('coverage-merge', 'cobertura.4.xml'), '')
result = codacy.reporter.merge_and_round_reports([generated3, generated4])
self.compare_parse_result(result, _file_location('coverage-merge', 'coverage-merge.json'))
if __name__ == '__main__':
unittest.main()
|
api0cradle/Empire
|
refs/heads/master
|
lib/modules/python/persistence/multi/crontab.py
|
2
|
class Module:
def __init__(self, mainMenu, params=[]):
# metadata info about the module, not modified during runtime
self.info = {
# name for the module that will appear in module menus
'Name': 'Persistence with crontab',
# list of one or more authors for the module
'Author': ['@424f424f'],
# more verbose multi-line description of the module
'Description': 'This module establishes persistence via crontab',
# True if the module needs to run in the background
'Background' : False,
# File extension to save the file as
'OutputExtension' : "",
# if the module needs administrative privileges
'NeedsAdmin' : False,
# True if the method doesn't touch disk/is reasonably opsec safe
'OpsecSafe' : False,
# the module language
'Language' : 'python',
# the minimum language version needed
'MinLanguageVersion' : '2.6',
# list of any references/other comments
'Comments': ['']
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
# The 'Agent' option is the only one that MUST be in a module
'Description' : 'Agent to grab a screenshot from.',
'Required' : True,
'Value' : ''
},
'Remove' : {
'Description' : 'Remove Persistence. True/False',
'Required' : False,
'Value' : ''
},
'Hourly' : {
'Description' : 'Hourly persistence.',
'Required' : False,
'Value' : ''
},
'Hour' : {
'Description' : 'Hour to callback. 24hr format.',
'Required' : False,
'Value' : ''
},
'FileName' : {
'Description' : 'File name for the launcher.',
'Required' : True,
'Value' : ''
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
# During instantiation, any settable option parameters
# are passed as an object set to the module and the
# options dictionary is automatically set. This is mostly
# in case options are passed on the command line
if params:
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self):
Remove = self.options['Remove']['Value']
Hourly = self.options['Hourly']['Value']
Hour = self.options['Hour']['Value']
FileName = self.options['FileName']['Value']
script = """
import subprocess
import sys
Remove = "%s"
Hourly = "%s"
Hour = "%s"
if Remove == "True":
cmd = 'crontab -l | grep -v "%s" | crontab -'
print subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout.read()
print subprocess.Popen('crontab -l', shell=True, stdout=subprocess.PIPE).stdout.read()
print "Finished"
else:
if Hourly == "True":
cmd = 'crontab -l | { cat; echo "0 * * * * %s"; } | crontab -'
print subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout.read()
print subprocess.Popen('crontab -l', shell=True, stdout=subprocess.PIPE).stdout.read()
print subprocess.Popen('chmod +x %s', shell=True, stdout=subprocess.PIPE).stdout.read()
print "Finished"
elif Hour:
cmd = 'crontab -l | { cat; echo "%s * * * * %s"; } | crontab -'
print subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout.read()
print subprocess.Popen('crontab -l', shell=True, stdout=subprocess.PIPE).stdout.read()
print subprocess.Popen('chmod +x %s', shell=True, stdout=subprocess.PIPE).stdout.read()
print "Finished"
""" % (Remove, Hourly, Hour, FileName, FileName, FileName, Hour, FileName, FileName)
return script
|
1tush/sentry
|
refs/heads/master
|
src/sentry/api/serializers/models/project.py
|
15
|
from __future__ import absolute_import
from sentry.api.serializers import Serializer, register
from sentry.models import OrganizationMemberType, Project, Team
@register(Project)
class ProjectSerializer(Serializer):
def get_attrs(self, item_list, user):
organization = item_list[0].team.organization
team_map = dict(
(t.id, t) for t in Team.objects.get_for_user(
organization=organization,
user=user,
)
)
result = {}
for project in item_list:
try:
team = team_map[project.team_id]
except KeyError:
access_type = None
else:
access_type = team.access_type
result[project] = {
'access_type': access_type,
}
return result
def serialize(self, obj, attrs, user):
d = {
'id': str(obj.id),
'slug': obj.slug,
'name': obj.name,
'isPublic': obj.public,
'dateCreated': obj.date_added,
'permission': {
'owner': attrs['access_type'] <= OrganizationMemberType.OWNER,
'admin': attrs['access_type'] <= OrganizationMemberType.ADMIN,
},
}
return d
|
erikrose/pip
|
refs/heads/develop
|
pip/_vendor/requests/packages/urllib3/util/response.py
|
928
|
def is_fp_closed(obj):
"""
Checks whether a given file-like object is closed.
:param obj:
The file-like object to check.
"""
try:
# Check via the official file-like-object way.
return obj.closed
except AttributeError:
pass
try:
# Check if the object is a container for another file-like object that
# gets released on exhaustion (e.g. HTTPResponse).
return obj.fp is None
except AttributeError:
pass
raise ValueError("Unable to determine whether fp is closed.")
|
hehongliang/tensorflow
|
refs/heads/master
|
tensorflow/contrib/estimator/python/estimator/replicate_model_fn.py
|
19
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""replicate_model_fn python module.
Importing from tensorflow.python.estimator is unsupported
and will soon break!
"""
# pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_estimator.contrib.estimator.python.estimator import replicate_model_fn
# Include attrs that start with single underscore.
_HAS_DYNAMIC_ATTRIBUTES = True
replicate_model_fn.__all__ = [
s for s in dir(replicate_model_fn) if not s.startswith('__')
]
from tensorflow_estimator.contrib.estimator.python.estimator.replicate_model_fn import *
|
blrm/openshift-tools
|
refs/heads/stg
|
openshift/installer/vendored/openshift-ansible-3.9.40/roles/lib_utils/test/test_load_and_handle_cert.py
|
78
|
'''
Unit tests for the load_and_handle_cert method
'''
import datetime
import os
import sys
import pytest
MODULE_PATH = os.path.realpath(os.path.join(__file__, os.pardir, os.pardir, 'library'))
sys.path.insert(1, MODULE_PATH)
# pylint: disable=import-error,wrong-import-position,missing-docstring
# pylint: disable=invalid-name,redefined-outer-name
import openshift_cert_expiry # noqa: E402
# TODO: More testing on the results of the load_and_handle_cert function
# could be implemented here as well, such as verifying subjects
# match up.
@pytest.fixture(params=['OpenSSLCertificate', 'FakeOpenSSLCertificate'])
def loaded_cert(request, valid_cert):
""" parameterized fixture to provide load_and_handle_cert results
for both OpenSSL and FakeOpenSSL parsed certificates
"""
now = datetime.datetime.now()
openshift_cert_expiry.HAS_OPENSSL = request.param == 'OpenSSLCertificate'
# valid_cert['cert_file'] is a `py.path.LocalPath` object and
# provides a read_text() method for reading the file contents.
cert_string = valid_cert['cert_file'].read_text('utf8')
(subject,
expiry_date,
time_remaining,
serial) = openshift_cert_expiry.load_and_handle_cert(cert_string, now)
return {
'now': now,
'subject': subject,
'expiry_date': expiry_date,
'time_remaining': time_remaining,
'serial': serial,
}
def test_serial(loaded_cert, valid_cert):
"""Params:
* `loaded_cert` comes from the `loaded_cert` fixture in this file
* `valid_cert` comes from the 'valid_cert' fixture in conftest.py
"""
valid_cert_serial = valid_cert['cert'].get_serial_number()
assert loaded_cert['serial'] == valid_cert_serial
def test_expiry(loaded_cert):
"""Params:
* `loaded_cert` comes from the `loaded_cert` fixture in this file
"""
expiry_date = loaded_cert['expiry_date']
time_remaining = loaded_cert['time_remaining']
now = loaded_cert['now']
assert expiry_date == now + time_remaining
|
Charlotte-Morgan/inasafe
|
refs/heads/develop
|
safe/gui/tools/test/test_options_dialog.py
|
6
|
# coding=utf-8
"""Test InaSAFE Options Dialog."""
import unittest
import logging
from safe.definitions.constants import INASAFE_TEST
from safe.definitions.default_settings import inasafe_default_settings
from safe.definitions.messages import disclaimer
from safe.gui.tools.options_dialog import OptionsDialog
from safe.test.utilities import get_qgis_app
from safe.common.utilities import temp_dir
from safe.defaults import default_north_arrow_path, supporters_logo_path
from qgis.PyQt.QtCore import QSettings
QGIS_APP, CANVAS, IFACE, PARENT = get_qgis_app(qsetting=INASAFE_TEST)
LOGGER = logging.getLogger('InaSAFE')
__copyright__ = "Copyright 2016, The InaSAFE Project"
__license__ = "GPL version 3"
__email__ = "info@inasafe.org"
__revision__ = '$Format:%H$'
class TestOptionsDialog(unittest.TestCase):
"""Test Options Dialog."""
def setUp(self):
"""Fixture run before all tests."""
self.qsetting = QSettings(INASAFE_TEST)
self.qsetting.clear()
def tearDown(self):
"""Fixture run after each test."""
# Make sure it's empty
self.qsetting.clear()
def test_setup_dialog(self):
"""Test Setup Options Dialog."""
dialog = OptionsDialog(
parent=PARENT, iface=IFACE, qsetting=INASAFE_TEST)
self.assertIsNotNone(dialog)
# Check default values
self.assertEqual(
dialog.cbxVisibleLayersOnly.isChecked(),
inasafe_default_settings['visibleLayersOnlyFlag'])
self.assertEqual(
dialog.cbxSetLayerNameFromTitle.isChecked(),
inasafe_default_settings['set_layer_from_title_flag'])
self.assertEqual(
dialog.cbxZoomToImpact.isChecked(),
inasafe_default_settings['setZoomToImpactFlag'])
self.assertEqual(
dialog.cbxHideExposure.isChecked(),
inasafe_default_settings['setHideExposureFlag'])
self.assertEqual(
dialog.cbxUseSelectedFeaturesOnly.isChecked(),
inasafe_default_settings['useSelectedFeaturesOnly'])
self.assertEqual(
dialog.leKeywordCachePath.text(),
inasafe_default_settings['keywordCachePath'])
self.assertEqual(
dialog.template_warning_checkbox.isChecked(),
inasafe_default_settings['template_warning_verbose'])
self.assertEqual(
dialog.organisation_on_dock_checkbox.isChecked(),
inasafe_default_settings['showOrganisationLogoInDockFlag'])
self.assertEqual(
dialog.cbxDevMode.isChecked(),
inasafe_default_settings['developer_mode'])
self.assertEqual(
dialog.leNorthArrowPath.text(), default_north_arrow_path())
self.assertEqual(
dialog.organisation_logo_path_line_edit.text(),
supporters_logo_path())
self.assertEqual(dialog.leReportTemplatePath.text(), '')
self.assertEqual(dialog.txtDisclaimer.toPlainText(), disclaimer())
self.assertEqual(
dialog.leUserDirectoryPath.text(), temp_dir('impacts'))
self.assertEqual(
dialog.organisation_line_edit.text(),
inasafe_default_settings['ISO19115_ORGANIZATION'])
self.assertEqual(
dialog.website_line_edit.text(),
inasafe_default_settings['ISO19115_URL'])
self.assertEqual(
dialog.email_line_edit.text(),
inasafe_default_settings['ISO19115_EMAIL'])
self.assertEqual(
dialog.license_line_edit.text(),
inasafe_default_settings['ISO19115_LICENSE'])
def test_update_settings(self):
"""Test update InaSAFE Option works."""
# Create new option dialog
dialog = OptionsDialog(
parent=PARENT, iface=IFACE, qsetting=INASAFE_TEST)
# Update some state
new_state = not inasafe_default_settings['visibleLayersOnlyFlag']
dialog.cbxVisibleLayersOnly.setChecked(new_state)
new_organization = 'Super Organization'
dialog.organisation_line_edit.setText(new_organization)
# Accept the dialog
dialog.accept()
# Check the value in QSettings
# Next two lines a hack because windows qsettings returns a string
# rather than a bool...TS
value = self.qsetting.value('inasafe/visibleLayersOnlyFlag')
if value == 'false':
value = False
if value == 'true':
value = True
self.assertEqual(
new_state, value)
self.assertEqual(
new_organization,
self.qsetting.value('inasafe/ISO19115_ORGANIZATION'))
# Open the options dialog
dialog = OptionsDialog(
iface=IFACE, parent=PARENT, qsetting=INASAFE_TEST)
# Check the state of the dialog after save the settings
self.assertEqual(new_state, dialog.cbxVisibleLayersOnly.isChecked())
self.assertEqual(
new_organization, dialog.organisation_line_edit.text())
def test_mode(self):
"""Test for checking that the state is correct for the mode.
If your test is failed, perhaps one the following is the cause:
1. You add / remove tab in the options.
2. You rename the tab's name.
3. The function show_welcome_dialog or show_option_dialog is changed
"""
# Welcome mode
dialog = OptionsDialog(parent=PARENT, iface=IFACE)
dialog.show_welcome_dialog()
expected_tabs = [
dialog.welcome_tab,
dialog.organisation_profile_tab,
dialog.preference_tab
]
message = 'Tab count should be %d in welcome dialog.' % len(
expected_tabs)
self.assertEqual(dialog.tabWidget.count(), len(expected_tabs), message)
message = 'Current tab index should be 0.'
self.assertEqual(dialog.tabWidget.currentIndex(), 0, message)
for index, expected_tab in enumerate(expected_tabs):
dialog.tabWidget.setCurrentIndex(index)
message = 'Current tab should be %s.' % expected_tab.objectName()
current_tab = dialog.tabWidget.currentWidget()
self.assertEqual(current_tab, expected_tab, message)
# Usual option mode
dialog = OptionsDialog(parent=PARENT, iface=IFACE)
dialog.show_option_dialog()
expected_tabs = [
dialog.organisation_profile_tab,
dialog.preference_tab,
dialog.gis_environment_tab,
dialog.earthquake_tab,
dialog.template_option_tab,
dialog.demographic_defaults_tab,
dialog.advanced_tab
]
message = 'Tab count should be %d in welcome dialog.' % len(
expected_tabs)
self.assertEqual(dialog.tabWidget.count(), len(expected_tabs), message)
message = 'Current tab index should be 0.'
self.assertEqual(dialog.tabWidget.currentIndex(), 0, message)
for index, expected_tab in enumerate(expected_tabs):
dialog.tabWidget.setCurrentIndex(index)
message = 'Current tab should be %s.' % expected_tab.objectName()
current_tab = dialog.tabWidget.currentWidget()
self.assertEqual(current_tab, expected_tab, message)
if __name__ == '__main__':
suite = unittest.makeSuite(TestOptionsDialog, 'test')
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite)
|
lancekrogers/music-network
|
refs/heads/master
|
cleff/profiles/migrations/0018_auto_20150808_1917.py
|
1
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('profiles', '0017_auto_20150808_0615'),
]
operations = [
migrations.RemoveField(
model_name='comrade',
name='id',
),
migrations.AlterField(
model_name='comrade',
name='musicians',
field=models.OneToOneField(serialize=False, to='profiles.SavedMusician', blank=True, primary_key=True),
),
]
|
CSF-JH/crossbarexamples
|
refs/heads/master
|
sharedregs/python/backend.py
|
9
|
###############################################################################
##
## Copyright (C) 2015, Tavendo GmbH and/or collaborators. All rights reserved.
##
## Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## 1. Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
##
## 2. Redistributions in binary form must reproduce the above copyright notice,
## this list of conditions and the following disclaimer in the documentation
## and/or other materials provided with the distribution.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
## IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
## ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
## LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
## CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
## SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
## INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
## CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
## ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
## POSSIBILITY OF SUCH DAMAGE.
##
###############################################################################
import os
import sys
from twisted.python import log
from twisted.internet.defer import inlineCallbacks
from autobahn import wamp
from autobahn.wamp.types import RegisterOptions
from autobahn.twisted.wamp import ApplicationSession
from autobahn.twisted.wamp import ApplicationRunner
class MyComponent(ApplicationSession):
@inlineCallbacks
def onJoin(self, details):
self._ident = "MyComponent (PID {}, Session {})".format(os.getpid(), details.session)
yield self.register(self.add2, u'com.example.add2', options=RegisterOptions(invoke=u'roundrobin'))
print("MyComponent: add2() registered!")
def add2(self, a, b):
print("add2 called on {}".format(self._ident))
return {'result': a + b, 'ident': self._ident}
if __name__ == '__main__':
log.startLogging(sys.stdout)
runner = ApplicationRunner(url=u"ws://127.0.0.1:8080/ws", realm=u"realm1")
runner.run(MyComponent)
|
40223101/2015cd_midterm
|
refs/heads/master
|
static/Brython3.1.0-20150301-090019/Lib/atexit.py
|
743
|
"""allow programmer to define multiple exit functions to be executedupon normal program termination.
Two public functions, register and unregister, are defined.
"""
class __loader__(object):
pass
def _clear(*args,**kw):
"""_clear() -> None
Clear the list of previously registered exit functions."""
pass
def _run_exitfuncs(*args,**kw):
"""_run_exitfuncs() -> None
Run all registered exit functions."""
pass
def register(*args,**kw):
"""register(func, *args, **kwargs) -> func
Register a function to be executed upon normal program termination
func - function to be called at exit
args - optional arguments to pass to func
kwargs - optional keyword arguments to pass to func
func is returned to facilitate usage as a decorator."""
pass
def unregister(*args,**kw):
"""unregister(func) -> None
Unregister a exit function which was previously registered using
atexit.register
func - function to be unregistered"""
pass
|
QijunPan/ansible
|
refs/heads/devel
|
test/units/mock/generator.py
|
97
|
# Copyright 2016 Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from collections import Mapping
def make_method(func, args, kwargs):
def test_method(self):
func(self, *args, **kwargs)
# Format the argument string
arg_string = ', '.join(repr(a) for a in args)
kwarg_string = ', '.join('{0}={1}'.format(item[0], repr(item[1])) for item in kwargs.items())
arg_list = []
if arg_string:
arg_list.append(arg_string)
if kwarg_string:
arg_list.append(kwarg_string)
test_method.__name__ = 'test_{0}({1})'.format(func.__name__, ', '.join(arg_list))
return test_method
def add_method(func, *combined_args):
"""
Add a test case via a class decorator.
nose uses generators for this but doesn't work with unittest.TestCase
subclasses. So we have to write our own.
The first argument to this decorator is a test function. All subsequent
arguments are the arguments to create each generated test function with in
the following format:
Each set of arguments is a two-tuple. The first element is an iterable of
positional arguments. the second is a dict representing the kwargs.
"""
def wrapper(cls):
for combined_arg in combined_args:
if len(combined_arg) == 2:
args = combined_arg[0]
kwargs = combined_arg[1]
elif isinstance(combined_arg[0], Mapping):
args = []
kwargs = combined_arg[0]
else:
args = combined_arg[0]
kwargs = {}
test_method = make_method(func, args, kwargs)
setattr(cls, test_method.__name__, test_method)
return cls
return wrapper
|
spisneha25/django
|
refs/heads/master
|
tests/aggregation_regress/tests.py
|
66
|
from __future__ import unicode_literals
import datetime
import pickle
from decimal import Decimal
from operator import attrgetter
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import FieldError
from django.db import connection
from django.db.models import (
F, Q, Avg, Count, Max, StdDev, Sum, Value, Variance,
)
from django.test import TestCase, skipUnlessAnyDBFeature, skipUnlessDBFeature
from django.test.utils import Approximate
from django.utils import six
from .models import (
Alfa, Author, Book, Bravo, Charlie, Clues, Entries, HardbackBook, ItemTag,
Publisher, SelfRefFK, Store, WithManualPK,
)
class AggregationTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.a1 = Author.objects.create(name='Adrian Holovaty', age=34)
cls.a2 = Author.objects.create(name='Jacob Kaplan-Moss', age=35)
cls.a3 = Author.objects.create(name='Brad Dayley', age=45)
cls.a4 = Author.objects.create(name='James Bennett', age=29)
cls.a5 = Author.objects.create(name='Jeffrey Forcier', age=37)
cls.a6 = Author.objects.create(name='Paul Bissex', age=29)
cls.a7 = Author.objects.create(name='Wesley J. Chun', age=25)
cls.a8 = Author.objects.create(name='Peter Norvig', age=57)
cls.a9 = Author.objects.create(name='Stuart Russell', age=46)
cls.a1.friends.add(cls.a2, cls.a4)
cls.a2.friends.add(cls.a1, cls.a7)
cls.a4.friends.add(cls.a1)
cls.a5.friends.add(cls.a6, cls.a7)
cls.a6.friends.add(cls.a5, cls.a7)
cls.a7.friends.add(cls.a2, cls.a5, cls.a6)
cls.a8.friends.add(cls.a9)
cls.a9.friends.add(cls.a8)
cls.p1 = Publisher.objects.create(name='Apress', num_awards=3)
cls.p2 = Publisher.objects.create(name='Sams', num_awards=1)
cls.p3 = Publisher.objects.create(name='Prentice Hall', num_awards=7)
cls.p4 = Publisher.objects.create(name='Morgan Kaufmann', num_awards=9)
cls.p5 = Publisher.objects.create(name="Jonno's House of Books", num_awards=0)
cls.b1 = Book.objects.create(
isbn='159059725', name='The Definitive Guide to Django: Web Development Done Right',
pages=447, rating=4.5, price=Decimal('30.00'), contact=cls.a1, publisher=cls.p1,
pubdate=datetime.date(2007, 12, 6)
)
cls.b2 = Book.objects.create(
isbn='067232959', name='Sams Teach Yourself Django in 24 Hours',
pages=528, rating=3.0, price=Decimal('23.09'), contact=cls.a3, publisher=cls.p2,
pubdate=datetime.date(2008, 3, 3)
)
cls.b3 = Book.objects.create(
isbn='159059996', name='Practical Django Projects',
pages=300, rating=4.0, price=Decimal('29.69'), contact=cls.a4, publisher=cls.p1,
pubdate=datetime.date(2008, 6, 23)
)
cls.b4 = Book.objects.create(
isbn='013235613', name='Python Web Development with Django',
pages=350, rating=4.0, price=Decimal('29.69'), contact=cls.a5, publisher=cls.p3,
pubdate=datetime.date(2008, 11, 3)
)
cls.b5 = HardbackBook.objects.create(
isbn='013790395', name='Artificial Intelligence: A Modern Approach',
pages=1132, rating=4.0, price=Decimal('82.80'), contact=cls.a8, publisher=cls.p3,
pubdate=datetime.date(1995, 1, 15), weight=4.5)
cls.b6 = HardbackBook.objects.create(
isbn='155860191', name='Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp',
pages=946, rating=5.0, price=Decimal('75.00'), contact=cls.a8, publisher=cls.p4,
pubdate=datetime.date(1991, 10, 15), weight=3.7)
cls.b1.authors.add(cls.a1, cls.a2)
cls.b2.authors.add(cls.a3)
cls.b3.authors.add(cls.a4)
cls.b4.authors.add(cls.a5, cls.a6, cls.a7)
cls.b5.authors.add(cls.a8, cls.a9)
cls.b6.authors.add(cls.a8)
s1 = Store.objects.create(
name='Amazon.com',
original_opening=datetime.datetime(1994, 4, 23, 9, 17, 42),
friday_night_closing=datetime.time(23, 59, 59)
)
s2 = Store.objects.create(
name='Books.com',
original_opening=datetime.datetime(2001, 3, 15, 11, 23, 37),
friday_night_closing=datetime.time(23, 59, 59)
)
s3 = Store.objects.create(
name="Mamma and Pappa's Books",
original_opening=datetime.datetime(1945, 4, 25, 16, 24, 14),
friday_night_closing=datetime.time(21, 30)
)
s1.books.add(cls.b1, cls.b2, cls.b3, cls.b4, cls.b5, cls.b6)
s2.books.add(cls.b1, cls.b3, cls.b5, cls.b6)
s3.books.add(cls.b3, cls.b4, cls.b6)
def assertObjectAttrs(self, obj, **kwargs):
for attr, value in six.iteritems(kwargs):
self.assertEqual(getattr(obj, attr), value)
def test_aggregates_in_where_clause(self):
"""
Regression test for #12822: DatabaseError: aggregates not allowed in
WHERE clause
Tests that the subselect works and returns results equivalent to a
query with the IDs listed.
Before the corresponding fix for this bug, this test passed in 1.1 and
failed in 1.2-beta (trunk).
"""
qs = Book.objects.values('contact').annotate(Max('id'))
qs = qs.order_by('contact').values_list('id__max', flat=True)
# don't do anything with the queryset (qs) before including it as a
# subquery
books = Book.objects.order_by('id')
qs1 = books.filter(id__in=qs)
qs2 = books.filter(id__in=list(qs))
self.assertEqual(list(qs1), list(qs2))
def test_aggregates_in_where_clause_pre_eval(self):
"""
Regression test for #12822: DatabaseError: aggregates not allowed in
WHERE clause
Same as the above test, but evaluates the queryset for the subquery
before it's used as a subquery.
Before the corresponding fix for this bug, this test failed in both
1.1 and 1.2-beta (trunk).
"""
qs = Book.objects.values('contact').annotate(Max('id'))
qs = qs.order_by('contact').values_list('id__max', flat=True)
# force the queryset (qs) for the subquery to be evaluated in its
# current state
list(qs)
books = Book.objects.order_by('id')
qs1 = books.filter(id__in=qs)
qs2 = books.filter(id__in=list(qs))
self.assertEqual(list(qs1), list(qs2))
@skipUnlessDBFeature('supports_subqueries_in_group_by')
def test_annotate_with_extra(self):
"""
Regression test for #11916: Extra params + aggregation creates
incorrect SQL.
"""
# Oracle doesn't support subqueries in group by clause
shortest_book_sql = """
SELECT name
FROM aggregation_regress_book b
WHERE b.publisher_id = aggregation_regress_publisher.id
ORDER BY b.pages
LIMIT 1
"""
# tests that this query does not raise a DatabaseError due to the full
# subselect being (erroneously) added to the GROUP BY parameters
qs = Publisher.objects.extra(select={
'name_of_shortest_book': shortest_book_sql,
}).annotate(total_books=Count('book'))
# force execution of the query
list(qs)
def test_aggregate(self):
# Ordering requests are ignored
self.assertEqual(
Author.objects.order_by("name").aggregate(Avg("age")),
{"age__avg": Approximate(37.444, places=1)}
)
# Implicit ordering is also ignored
self.assertEqual(
Book.objects.aggregate(Sum("pages")),
{"pages__sum": 3703},
)
# Baseline results
self.assertEqual(
Book.objects.aggregate(Sum('pages'), Avg('pages')),
{'pages__sum': 3703, 'pages__avg': Approximate(617.166, places=2)}
)
# Empty values query doesn't affect grouping or results
self.assertEqual(
Book.objects.values().aggregate(Sum('pages'), Avg('pages')),
{'pages__sum': 3703, 'pages__avg': Approximate(617.166, places=2)}
)
# Aggregate overrides extra selected column
self.assertEqual(
Book.objects.extra(select={'price_per_page': 'price / pages'}).aggregate(Sum('pages')),
{'pages__sum': 3703}
)
def test_annotation(self):
# Annotations get combined with extra select clauses
obj = Book.objects.annotate(mean_auth_age=Avg("authors__age")).extra(
select={"manufacture_cost": "price * .5"}).get(pk=self.b2.pk)
self.assertObjectAttrs(
obj,
contact_id=self.a3.id,
isbn='067232959',
mean_auth_age=45.0,
name='Sams Teach Yourself Django in 24 Hours',
pages=528,
price=Decimal("23.09"),
pubdate=datetime.date(2008, 3, 3),
publisher_id=self.p2.id,
rating=3.0
)
# Different DB backends return different types for the extra select computation
self.assertIn(obj.manufacture_cost, (11.545, Decimal('11.545')))
# Order of the annotate/extra in the query doesn't matter
obj = Book.objects.extra(select={'manufacture_cost': 'price * .5'}).annotate(
mean_auth_age=Avg('authors__age')).get(pk=self.b2.pk)
self.assertObjectAttrs(
obj,
contact_id=self.a3.id,
isbn='067232959',
mean_auth_age=45.0,
name='Sams Teach Yourself Django in 24 Hours',
pages=528,
price=Decimal("23.09"),
pubdate=datetime.date(2008, 3, 3),
publisher_id=self.p2.id,
rating=3.0
)
# Different DB backends return different types for the extra select computation
self.assertIn(obj.manufacture_cost, (11.545, Decimal('11.545')))
# Values queries can be combined with annotate and extra
obj = Book.objects.annotate(mean_auth_age=Avg('authors__age')).extra(
select={'manufacture_cost': 'price * .5'}).values().get(pk=self.b2.pk)
manufacture_cost = obj['manufacture_cost']
self.assertIn(manufacture_cost, (11.545, Decimal('11.545')))
del obj['manufacture_cost']
self.assertEqual(obj, {
'id': self.b2.id,
'contact_id': self.a3.id,
'isbn': '067232959',
'mean_auth_age': 45.0,
'name': 'Sams Teach Yourself Django in 24 Hours',
'pages': 528,
'price': Decimal('23.09'),
'pubdate': datetime.date(2008, 3, 3),
'publisher_id': self.p2.id,
'rating': 3.0,
})
# The order of the (empty) values, annotate and extra clauses doesn't
# matter
obj = Book.objects.values().annotate(mean_auth_age=Avg('authors__age')).extra(
select={'manufacture_cost': 'price * .5'}).get(pk=self.b2.pk)
manufacture_cost = obj['manufacture_cost']
self.assertIn(manufacture_cost, (11.545, Decimal('11.545')))
del obj['manufacture_cost']
self.assertEqual(obj, {
'id': self.b2.id,
'contact_id': self.a3.id,
'isbn': '067232959',
'mean_auth_age': 45.0,
'name': 'Sams Teach Yourself Django in 24 Hours',
'pages': 528,
'price': Decimal('23.09'),
'pubdate': datetime.date(2008, 3, 3),
'publisher_id': self.p2.id,
'rating': 3.0
})
# If the annotation precedes the values clause, it won't be included
# unless it is explicitly named
obj = Book.objects.annotate(mean_auth_age=Avg('authors__age')).extra(
select={'price_per_page': 'price / pages'}).values('name').get(pk=self.b1.pk)
self.assertEqual(obj, {
"name": 'The Definitive Guide to Django: Web Development Done Right',
})
obj = Book.objects.annotate(mean_auth_age=Avg('authors__age')).extra(
select={'price_per_page': 'price / pages'}).values('name', 'mean_auth_age').get(pk=self.b1.pk)
self.assertEqual(obj, {
'mean_auth_age': 34.5,
'name': 'The Definitive Guide to Django: Web Development Done Right',
})
# If an annotation isn't included in the values, it can still be used
# in a filter
qs = Book.objects.annotate(n_authors=Count('authors')).values('name').filter(n_authors__gt=2)
self.assertQuerysetEqual(
qs, [
{"name": 'Python Web Development with Django'}
],
lambda b: b,
)
# The annotations are added to values output if values() precedes
# annotate()
obj = Book.objects.values('name').annotate(mean_auth_age=Avg('authors__age')).extra(
select={'price_per_page': 'price / pages'}).get(pk=self.b1.pk)
self.assertEqual(obj, {
'mean_auth_age': 34.5,
'name': 'The Definitive Guide to Django: Web Development Done Right',
})
# Check that all of the objects are getting counted (allow_nulls) and
# that values respects the amount of objects
self.assertEqual(
len(Author.objects.annotate(Avg('friends__age')).values()),
9
)
# Check that consecutive calls to annotate accumulate in the query
qs = Book.objects.values('price').annotate(oldest=Max('authors__age')).order_by('oldest', 'price').annotate(Max('publisher__num_awards'))
self.assertQuerysetEqual(
qs, [
{'price': Decimal("30"), 'oldest': 35, 'publisher__num_awards__max': 3},
{'price': Decimal("29.69"), 'oldest': 37, 'publisher__num_awards__max': 7},
{'price': Decimal("23.09"), 'oldest': 45, 'publisher__num_awards__max': 1},
{'price': Decimal("75"), 'oldest': 57, 'publisher__num_awards__max': 9},
{'price': Decimal("82.8"), 'oldest': 57, 'publisher__num_awards__max': 7}
],
lambda b: b,
)
def test_aggrate_annotation(self):
# Aggregates can be composed over annotations.
# The return type is derived from the composed aggregate
vals = Book.objects.all().annotate(num_authors=Count('authors__id')).aggregate(Max('pages'), Max('price'), Sum('num_authors'), Avg('num_authors'))
self.assertEqual(vals, {
'num_authors__sum': 10,
'num_authors__avg': Approximate(1.666, places=2),
'pages__max': 1132,
'price__max': Decimal("82.80")
})
# Regression for #15624 - Missing SELECT columns when using values, annotate
# and aggregate in a single query
self.assertEqual(
Book.objects.annotate(c=Count('authors')).values('c').aggregate(Max('c')),
{'c__max': 3}
)
def test_decimal_aggregate_annotation_filter(self):
"""
Filtering on an aggregate annotation with Decimal values should work.
Requires special handling on SQLite (#18247).
"""
self.assertEqual(
len(Author.objects.annotate(sum=Sum('book_contact_set__price')).filter(sum__gt=Decimal(40))),
1
)
self.assertEqual(
len(Author.objects.annotate(sum=Sum('book_contact_set__price')).filter(sum__lte=Decimal(40))),
4
)
def test_field_error(self):
# Bad field requests in aggregates are caught and reported
self.assertRaises(
FieldError,
lambda: Book.objects.all().aggregate(num_authors=Count('foo'))
)
self.assertRaises(
FieldError,
lambda: Book.objects.all().annotate(num_authors=Count('foo'))
)
self.assertRaises(
FieldError,
lambda: Book.objects.all().annotate(num_authors=Count('authors__id')).aggregate(Max('foo'))
)
def test_more(self):
# Old-style count aggregations can be mixed with new-style
self.assertEqual(
Book.objects.annotate(num_authors=Count('authors')).count(),
6
)
# Non-ordinal, non-computed Aggregates over annotations correctly
# inherit the annotation's internal type if the annotation is ordinal
# or computed
vals = Book.objects.annotate(num_authors=Count('authors')).aggregate(Max('num_authors'))
self.assertEqual(
vals,
{'num_authors__max': 3}
)
vals = Publisher.objects.annotate(avg_price=Avg('book__price')).aggregate(Max('avg_price'))
self.assertEqual(
vals,
{'avg_price__max': 75.0}
)
# Aliases are quoted to protected aliases that might be reserved names
vals = Book.objects.aggregate(number=Max('pages'), select=Max('pages'))
self.assertEqual(
vals,
{'number': 1132, 'select': 1132}
)
# Regression for #10064: select_related() plays nice with aggregates
obj = Book.objects.select_related('publisher').annotate(
num_authors=Count('authors')).values().get(isbn='013790395')
self.assertEqual(obj, {
'contact_id': self.a8.id,
'id': self.b5.id,
'isbn': '013790395',
'name': 'Artificial Intelligence: A Modern Approach',
'num_authors': 2,
'pages': 1132,
'price': Decimal("82.8"),
'pubdate': datetime.date(1995, 1, 15),
'publisher_id': self.p3.id,
'rating': 4.0,
})
# Regression for #10010: exclude on an aggregate field is correctly
# negated
self.assertEqual(
len(Book.objects.annotate(num_authors=Count('authors'))),
6
)
self.assertEqual(
len(Book.objects.annotate(num_authors=Count('authors')).filter(num_authors__gt=2)),
1
)
self.assertEqual(
len(Book.objects.annotate(num_authors=Count('authors')).exclude(num_authors__gt=2)),
5
)
self.assertEqual(
len(Book.objects.annotate(num_authors=Count('authors')).filter(num_authors__lt=3).exclude(num_authors__lt=2)),
2
)
self.assertEqual(
len(Book.objects.annotate(num_authors=Count('authors')).exclude(num_authors__lt=2).filter(num_authors__lt=3)),
2
)
def test_aggregate_fexpr(self):
# Aggregates can be used with F() expressions
# ... where the F() is pushed into the HAVING clause
qs = Publisher.objects.annotate(num_books=Count('book')).filter(num_books__lt=F('num_awards') / 2).order_by('name').values('name', 'num_books', 'num_awards')
self.assertQuerysetEqual(
qs, [
{'num_books': 1, 'name': 'Morgan Kaufmann', 'num_awards': 9},
{'num_books': 2, 'name': 'Prentice Hall', 'num_awards': 7}
],
lambda p: p,
)
qs = Publisher.objects.annotate(num_books=Count('book')).exclude(num_books__lt=F('num_awards') / 2).order_by('name').values('name', 'num_books', 'num_awards')
self.assertQuerysetEqual(
qs, [
{'num_books': 2, 'name': 'Apress', 'num_awards': 3},
{'num_books': 0, 'name': "Jonno's House of Books", 'num_awards': 0},
{'num_books': 1, 'name': 'Sams', 'num_awards': 1}
],
lambda p: p,
)
# ... and where the F() references an aggregate
qs = Publisher.objects.annotate(num_books=Count('book')).filter(num_awards__gt=2 * F('num_books')).order_by('name').values('name', 'num_books', 'num_awards')
self.assertQuerysetEqual(
qs, [
{'num_books': 1, 'name': 'Morgan Kaufmann', 'num_awards': 9},
{'num_books': 2, 'name': 'Prentice Hall', 'num_awards': 7}
],
lambda p: p,
)
qs = Publisher.objects.annotate(num_books=Count('book')).exclude(num_books__lt=F('num_awards') / 2).order_by('name').values('name', 'num_books', 'num_awards')
self.assertQuerysetEqual(
qs, [
{'num_books': 2, 'name': 'Apress', 'num_awards': 3},
{'num_books': 0, 'name': "Jonno's House of Books", 'num_awards': 0},
{'num_books': 1, 'name': 'Sams', 'num_awards': 1}
],
lambda p: p,
)
def test_db_col_table(self):
# Tests on fields with non-default table and column names.
qs = Clues.objects.values('EntryID__Entry').annotate(Appearances=Count('EntryID'), Distinct_Clues=Count('Clue', distinct=True))
self.assertQuerysetEqual(qs, [])
qs = Entries.objects.annotate(clue_count=Count('clues__ID'))
self.assertQuerysetEqual(qs, [])
def test_boolean_conversion(self):
# Aggregates mixed up ordering of columns for backend's convert_values
# method. Refs #21126.
e = Entries.objects.create(Entry='foo')
c = Clues.objects.create(EntryID=e, Clue='bar')
qs = Clues.objects.select_related('EntryID').annotate(Count('ID'))
self.assertQuerysetEqual(
qs, [c], lambda x: x)
self.assertEqual(qs[0].EntryID, e)
self.assertIs(qs[0].EntryID.Exclude, False)
def test_empty(self):
# Regression for #10089: Check handling of empty result sets with
# aggregates
self.assertEqual(
Book.objects.filter(id__in=[]).count(),
0
)
vals = Book.objects.filter(id__in=[]).aggregate(num_authors=Count('authors'), avg_authors=Avg('authors'), max_authors=Max('authors'), max_price=Max('price'), max_rating=Max('rating'))
self.assertEqual(
vals,
{'max_authors': None, 'max_rating': None, 'num_authors': 0, 'avg_authors': None, 'max_price': None}
)
qs = Publisher.objects.filter(name="Jonno's House of Books").annotate(num_authors=Count('book__authors'), avg_authors=Avg('book__authors'), max_authors=Max('book__authors'), max_price=Max('book__price'), max_rating=Max('book__rating')).values()
self.assertQuerysetEqual(
qs, [
{'max_authors': None, 'name': "Jonno's House of Books", 'num_awards': 0, 'max_price': None, 'num_authors': 0, 'max_rating': None, 'id': self.p5.id, 'avg_authors': None}
],
lambda p: p
)
def test_more_more(self):
# Regression for #10113 - Fields mentioned in order_by() must be
# included in the GROUP BY. This only becomes a problem when the
# order_by introduces a new join.
self.assertQuerysetEqual(
Book.objects.annotate(num_authors=Count('authors')).order_by('publisher__name', 'name'), [
"Practical Django Projects",
"The Definitive Guide to Django: Web Development Done Right",
"Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp",
"Artificial Intelligence: A Modern Approach",
"Python Web Development with Django",
"Sams Teach Yourself Django in 24 Hours",
],
lambda b: b.name
)
# Regression for #10127 - Empty select_related() works with annotate
qs = Book.objects.filter(rating__lt=4.5).select_related().annotate(Avg('authors__age'))
self.assertQuerysetEqual(
qs, [
('Artificial Intelligence: A Modern Approach', 51.5, 'Prentice Hall', 'Peter Norvig'),
('Practical Django Projects', 29.0, 'Apress', 'James Bennett'),
('Python Web Development with Django', Approximate(30.333, places=2), 'Prentice Hall', 'Jeffrey Forcier'),
('Sams Teach Yourself Django in 24 Hours', 45.0, 'Sams', 'Brad Dayley')
],
lambda b: (b.name, b.authors__age__avg, b.publisher.name, b.contact.name)
)
# Regression for #10132 - If the values() clause only mentioned extra
# (select=) columns, those columns are used for grouping
qs = Book.objects.extra(select={'pub': 'publisher_id'}).values('pub').annotate(Count('id')).order_by('pub')
self.assertQuerysetEqual(
qs, [
{'pub': self.b1.id, 'id__count': 2},
{'pub': self.b2.id, 'id__count': 1},
{'pub': self.b3.id, 'id__count': 2},
{'pub': self.b4.id, 'id__count': 1}
],
lambda b: b
)
qs = Book.objects.extra(select={'pub': 'publisher_id', 'foo': 'pages'}).values('pub').annotate(Count('id')).order_by('pub')
self.assertQuerysetEqual(
qs, [
{'pub': self.p1.id, 'id__count': 2},
{'pub': self.p2.id, 'id__count': 1},
{'pub': self.p3.id, 'id__count': 2},
{'pub': self.p4.id, 'id__count': 1}
],
lambda b: b
)
# Regression for #10182 - Queries with aggregate calls are correctly
# realiased when used in a subquery
ids = Book.objects.filter(pages__gt=100).annotate(n_authors=Count('authors')).filter(n_authors__gt=2).order_by('n_authors')
self.assertQuerysetEqual(
Book.objects.filter(id__in=ids), [
"Python Web Development with Django",
],
lambda b: b.name
)
# Regression for #15709 - Ensure each group_by field only exists once
# per query
qstr = str(Book.objects.values('publisher').annotate(max_pages=Max('pages')).order_by().query)
# Check that there is just one GROUP BY clause (zero commas means at
# most one clause)
self.assertEqual(qstr[qstr.index('GROUP BY'):].count(', '), 0)
def test_duplicate_alias(self):
# Regression for #11256 - duplicating a default alias raises ValueError.
self.assertRaises(ValueError, Book.objects.all().annotate, Avg('authors__age'), authors__age__avg=Avg('authors__age'))
def test_field_name_conflict(self):
# Regression for #11256 - providing an aggregate name that conflicts with a field name on the model raises ValueError
self.assertRaises(ValueError, Author.objects.annotate, age=Avg('friends__age'))
def test_m2m_name_conflict(self):
# Regression for #11256 - providing an aggregate name that conflicts with an m2m name on the model raises ValueError
self.assertRaises(ValueError, Author.objects.annotate, friends=Count('friends'))
def test_values_queryset_non_conflict(self):
# Regression for #14707 -- If you're using a values query set, some potential conflicts are avoided.
# age is a field on Author, so it shouldn't be allowed as an aggregate.
# But age isn't included in values(), so it is.
results = Author.objects.values('name').annotate(age=Count('book_contact_set')).order_by('name')
self.assertEqual(len(results), 9)
self.assertEqual(results[0]['name'], 'Adrian Holovaty')
self.assertEqual(results[0]['age'], 1)
# Same problem, but aggregating over m2m fields
results = Author.objects.values('name').annotate(age=Avg('friends__age')).order_by('name')
self.assertEqual(len(results), 9)
self.assertEqual(results[0]['name'], 'Adrian Holovaty')
self.assertEqual(results[0]['age'], 32.0)
# Same problem, but colliding with an m2m field
results = Author.objects.values('name').annotate(friends=Count('friends')).order_by('name')
self.assertEqual(len(results), 9)
self.assertEqual(results[0]['name'], 'Adrian Holovaty')
self.assertEqual(results[0]['friends'], 2)
def test_reverse_relation_name_conflict(self):
# Regression for #11256 - providing an aggregate name that conflicts with a reverse-related name on the model raises ValueError
self.assertRaises(ValueError, Author.objects.annotate, book_contact_set=Avg('friends__age'))
def test_pickle(self):
# Regression for #10197 -- Queries with aggregates can be pickled.
# First check that pickling is possible at all. No crash = success
qs = Book.objects.annotate(num_authors=Count('authors'))
pickle.dumps(qs)
# Then check that the round trip works.
query = qs.query.get_compiler(qs.db).as_sql()[0]
qs2 = pickle.loads(pickle.dumps(qs))
self.assertEqual(
qs2.query.get_compiler(qs2.db).as_sql()[0],
query,
)
def test_more_more_more(self):
# Regression for #10199 - Aggregate calls clone the original query so
# the original query can still be used
books = Book.objects.all()
books.aggregate(Avg("authors__age"))
self.assertQuerysetEqual(
books.all(), [
'Artificial Intelligence: A Modern Approach',
'Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp',
'Practical Django Projects',
'Python Web Development with Django',
'Sams Teach Yourself Django in 24 Hours',
'The Definitive Guide to Django: Web Development Done Right'
],
lambda b: b.name
)
# Regression for #10248 - Annotations work with DateQuerySets
qs = Book.objects.annotate(num_authors=Count('authors')).filter(num_authors=2).dates('pubdate', 'day')
self.assertQuerysetEqual(
qs, [
datetime.date(1995, 1, 15),
datetime.date(2007, 12, 6),
],
lambda b: b
)
# Regression for #10290 - extra selects with parameters can be used for
# grouping.
qs = Book.objects.annotate(mean_auth_age=Avg('authors__age')).extra(select={'sheets': '(pages + %s) / %s'}, select_params=[1, 2]).order_by('sheets').values('sheets')
self.assertQuerysetEqual(
qs, [
150,
175,
224,
264,
473,
566
],
lambda b: int(b["sheets"])
)
# Regression for 10425 - annotations don't get in the way of a count()
# clause
self.assertEqual(
Book.objects.values('publisher').annotate(Count('publisher')).count(),
4
)
self.assertEqual(
Book.objects.annotate(Count('publisher')).values('publisher').count(),
6
)
# Note: intentionally no order_by(), that case needs tests, too.
publishers = Publisher.objects.filter(id__in=[1, 2])
self.assertEqual(
sorted(p.name for p in publishers),
[
"Apress",
"Sams"
]
)
publishers = publishers.annotate(n_books=Count("book"))
sorted_publishers = sorted(publishers, key=lambda x: x.name)
self.assertEqual(
sorted_publishers[0].n_books,
2
)
self.assertEqual(
sorted_publishers[1].n_books,
1
)
self.assertEqual(
sorted(p.name for p in publishers),
[
"Apress",
"Sams"
]
)
books = Book.objects.filter(publisher__in=publishers)
self.assertQuerysetEqual(
books, [
"Practical Django Projects",
"Sams Teach Yourself Django in 24 Hours",
"The Definitive Guide to Django: Web Development Done Right",
],
lambda b: b.name
)
self.assertEqual(
sorted(p.name for p in publishers),
[
"Apress",
"Sams"
]
)
# Regression for 10666 - inherited fields work with annotations and
# aggregations
self.assertEqual(
HardbackBook.objects.aggregate(n_pages=Sum('book_ptr__pages')),
{'n_pages': 2078}
)
self.assertEqual(
HardbackBook.objects.aggregate(n_pages=Sum('pages')),
{'n_pages': 2078},
)
qs = HardbackBook.objects.annotate(n_authors=Count('book_ptr__authors')).values('name', 'n_authors')
self.assertQuerysetEqual(
qs, [
{'n_authors': 2, 'name': 'Artificial Intelligence: A Modern Approach'},
{'n_authors': 1, 'name': 'Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp'}
],
lambda h: h
)
qs = HardbackBook.objects.annotate(n_authors=Count('authors')).values('name', 'n_authors')
self.assertQuerysetEqual(
qs, [
{'n_authors': 2, 'name': 'Artificial Intelligence: A Modern Approach'},
{'n_authors': 1, 'name': 'Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp'}
],
lambda h: h,
)
# Regression for #10766 - Shouldn't be able to reference an aggregate
# fields in an aggregate() call.
self.assertRaises(
FieldError,
lambda: Book.objects.annotate(mean_age=Avg('authors__age')).annotate(Avg('mean_age'))
)
def test_empty_filter_count(self):
self.assertEqual(
Author.objects.filter(id__in=[]).annotate(Count("friends")).count(),
0
)
def test_empty_filter_aggregate(self):
self.assertEqual(
Author.objects.filter(id__in=[]).annotate(Count("friends")).aggregate(Count("pk")),
{"pk__count": None}
)
def test_none_call_before_aggregate(self):
# Regression for #11789
self.assertEqual(
Author.objects.none().aggregate(Avg('age')),
{'age__avg': None}
)
def test_annotate_and_join(self):
self.assertEqual(
Author.objects.annotate(c=Count("friends__name")).exclude(friends__name="Joe").count(),
Author.objects.count()
)
def test_f_expression_annotation(self):
# Books with less than 200 pages per author.
qs = Book.objects.values("name").annotate(
n_authors=Count("authors")
).filter(
pages__lt=F("n_authors") * 200
).values_list("pk")
self.assertQuerysetEqual(
Book.objects.filter(pk__in=qs), [
"Python Web Development with Django"
],
attrgetter("name")
)
def test_values_annotate_values(self):
qs = Book.objects.values("name").annotate(
n_authors=Count("authors")
).values_list("pk", flat=True)
self.assertEqual(list(qs), list(Book.objects.values_list("pk", flat=True)))
def test_having_group_by(self):
# Test that when a field occurs on the LHS of a HAVING clause that it
# appears correctly in the GROUP BY clause
qs = Book.objects.values_list("name").annotate(
n_authors=Count("authors")
).filter(
pages__gt=F("n_authors")
).values_list("name", flat=True)
# Results should be the same, all Books have more pages than authors
self.assertEqual(
list(qs), list(Book.objects.values_list("name", flat=True))
)
def test_values_list_annotation_args_ordering(self):
"""
Annotate *args ordering should be preserved in values_list results.
**kwargs comes after *args.
Regression test for #23659.
"""
books = Book.objects.values_list("publisher__name").annotate(
Count("id"), Avg("price"), Avg("authors__age"), avg_pgs=Avg("pages")
).order_by("-publisher__name")
self.assertEqual(books[0], ('Sams', 1, 23.09, 45.0, 528.0))
def test_annotation_disjunction(self):
qs = Book.objects.annotate(n_authors=Count("authors")).filter(
Q(n_authors=2) | Q(name="Python Web Development with Django")
)
self.assertQuerysetEqual(
qs, [
"Artificial Intelligence: A Modern Approach",
"Python Web Development with Django",
"The Definitive Guide to Django: Web Development Done Right",
],
attrgetter("name")
)
qs = Book.objects.annotate(n_authors=Count("authors")).filter(
Q(name="The Definitive Guide to Django: Web Development Done Right") | (Q(name="Artificial Intelligence: A Modern Approach") & Q(n_authors=3))
)
self.assertQuerysetEqual(
qs, [
"The Definitive Guide to Django: Web Development Done Right",
],
attrgetter("name")
)
qs = Publisher.objects.annotate(
rating_sum=Sum("book__rating"),
book_count=Count("book")
).filter(
Q(rating_sum__gt=5.5) | Q(rating_sum__isnull=True)
).order_by('pk')
self.assertQuerysetEqual(
qs, [
"Apress",
"Prentice Hall",
"Jonno's House of Books",
],
attrgetter("name")
)
qs = Publisher.objects.annotate(
rating_sum=Sum("book__rating"),
book_count=Count("book")
).filter(
Q(rating_sum__gt=F("book_count")) | Q(rating_sum=None)
).order_by("num_awards")
self.assertQuerysetEqual(
qs, [
"Jonno's House of Books",
"Sams",
"Apress",
"Prentice Hall",
"Morgan Kaufmann"
],
attrgetter("name")
)
def test_quoting_aggregate_order_by(self):
qs = Book.objects.filter(
name="Python Web Development with Django"
).annotate(
authorCount=Count("authors")
).order_by("authorCount")
self.assertQuerysetEqual(
qs, [
("Python Web Development with Django", 3),
],
lambda b: (b.name, b.authorCount)
)
@skipUnlessDBFeature('supports_stddev')
def test_stddev(self):
self.assertEqual(
Book.objects.aggregate(StdDev('pages')),
{'pages__stddev': Approximate(311.46, 1)}
)
self.assertEqual(
Book.objects.aggregate(StdDev('rating')),
{'rating__stddev': Approximate(0.60, 1)}
)
self.assertEqual(
Book.objects.aggregate(StdDev('price')),
{'price__stddev': Approximate(24.16, 2)}
)
self.assertEqual(
Book.objects.aggregate(StdDev('pages', sample=True)),
{'pages__stddev': Approximate(341.19, 2)}
)
self.assertEqual(
Book.objects.aggregate(StdDev('rating', sample=True)),
{'rating__stddev': Approximate(0.66, 2)}
)
self.assertEqual(
Book.objects.aggregate(StdDev('price', sample=True)),
{'price__stddev': Approximate(26.46, 1)}
)
self.assertEqual(
Book.objects.aggregate(Variance('pages')),
{'pages__variance': Approximate(97010.80, 1)}
)
self.assertEqual(
Book.objects.aggregate(Variance('rating')),
{'rating__variance': Approximate(0.36, 1)}
)
self.assertEqual(
Book.objects.aggregate(Variance('price')),
{'price__variance': Approximate(583.77, 1)}
)
self.assertEqual(
Book.objects.aggregate(Variance('pages', sample=True)),
{'pages__variance': Approximate(116412.96, 1)}
)
self.assertEqual(
Book.objects.aggregate(Variance('rating', sample=True)),
{'rating__variance': Approximate(0.44, 2)}
)
self.assertEqual(
Book.objects.aggregate(Variance('price', sample=True)),
{'price__variance': Approximate(700.53, 2)}
)
def test_filtering_by_annotation_name(self):
# Regression test for #14476
# The name of the explicitly provided annotation name in this case
# poses no problem
qs = Author.objects.annotate(book_cnt=Count('book')).filter(book_cnt=2).order_by('name')
self.assertQuerysetEqual(
qs,
['Peter Norvig'],
lambda b: b.name
)
# Neither in this case
qs = Author.objects.annotate(book_count=Count('book')).filter(book_count=2).order_by('name')
self.assertQuerysetEqual(
qs,
['Peter Norvig'],
lambda b: b.name
)
# This case used to fail because the ORM couldn't resolve the
# automatically generated annotation name `book__count`
qs = Author.objects.annotate(Count('book')).filter(book__count=2).order_by('name')
self.assertQuerysetEqual(
qs,
['Peter Norvig'],
lambda b: b.name
)
def test_annotate_joins(self):
"""
Test that the base table's join isn't promoted to LOUTER. This could
cause the query generation to fail if there is an exclude() for fk-field
in the query, too. Refs #19087.
"""
qs = Book.objects.annotate(n=Count('pk'))
self.assertIs(qs.query.alias_map['aggregation_regress_book'].join_type, None)
# Check that the query executes without problems.
self.assertEqual(len(qs.exclude(publisher=-1)), 6)
@skipUnlessAnyDBFeature('allows_group_by_pk', 'allows_group_by_selected_pks')
def test_aggregate_duplicate_columns(self):
# Regression test for #17144
results = Author.objects.annotate(num_contacts=Count('book_contact_set'))
# There should only be one GROUP BY clause, for the `id` column.
# `name` and `age` should not be grouped on.
_, _, group_by = results.query.get_compiler(using='default').pre_sql_setup()
self.assertEqual(len(group_by), 1)
self.assertIn('id', group_by[0][0])
self.assertNotIn('name', group_by[0][0])
self.assertNotIn('age', group_by[0][0])
# Ensure that we get correct results.
self.assertEqual(
[(a.name, a.num_contacts) for a in results.order_by('name')],
[
('Adrian Holovaty', 1),
('Brad Dayley', 1),
('Jacob Kaplan-Moss', 0),
('James Bennett', 1),
('Jeffrey Forcier', 1),
('Paul Bissex', 0),
('Peter Norvig', 2),
('Stuart Russell', 0),
('Wesley J. Chun', 0),
]
)
@skipUnlessAnyDBFeature('allows_group_by_pk', 'allows_group_by_selected_pks')
def test_aggregate_duplicate_columns_only(self):
# Works with only() too.
results = Author.objects.only('id', 'name').annotate(num_contacts=Count('book_contact_set'))
_, _, grouping = results.query.get_compiler(using='default').pre_sql_setup()
self.assertEqual(len(grouping), 1)
self.assertIn('id', grouping[0][0])
self.assertNotIn('name', grouping[0][0])
self.assertNotIn('age', grouping[0][0])
# Ensure that we get correct results.
self.assertEqual(
[(a.name, a.num_contacts) for a in results.order_by('name')],
[
('Adrian Holovaty', 1),
('Brad Dayley', 1),
('Jacob Kaplan-Moss', 0),
('James Bennett', 1),
('Jeffrey Forcier', 1),
('Paul Bissex', 0),
('Peter Norvig', 2),
('Stuart Russell', 0),
('Wesley J. Chun', 0),
]
)
@skipUnlessAnyDBFeature('allows_group_by_pk', 'allows_group_by_selected_pks')
def test_aggregate_duplicate_columns_select_related(self):
# And select_related()
results = Book.objects.select_related('contact').annotate(
num_authors=Count('authors'))
_, _, grouping = results.query.get_compiler(using='default').pre_sql_setup()
# In the case of `group_by_selected_pks` we also group by contact.id because of the select_related.
self.assertEqual(len(grouping), 1 if connection.features.allows_group_by_pk else 2)
self.assertIn('id', grouping[0][0])
self.assertNotIn('name', grouping[0][0])
self.assertNotIn('contact', grouping[0][0])
# Ensure that we get correct results.
self.assertEqual(
[(b.name, b.num_authors) for b in results.order_by('name')],
[
('Artificial Intelligence: A Modern Approach', 2),
('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 1),
('Practical Django Projects', 1),
('Python Web Development with Django', 3),
('Sams Teach Yourself Django in 24 Hours', 1),
('The Definitive Guide to Django: Web Development Done Right', 2)
]
)
def test_reverse_join_trimming(self):
qs = Author.objects.annotate(Count('book_contact_set__contact'))
self.assertIn(' JOIN ', str(qs.query))
def test_aggregation_with_generic_reverse_relation(self):
"""
Regression test for #10870: Aggregates with joins ignore extra
filters provided by setup_joins
tests aggregations with generic reverse relations
"""
django_book = Book.objects.get(name='Practical Django Projects')
ItemTag.objects.create(object_id=django_book.id, tag='intermediate',
content_type=ContentType.objects.get_for_model(django_book))
ItemTag.objects.create(object_id=django_book.id, tag='django',
content_type=ContentType.objects.get_for_model(django_book))
# Assign a tag to model with same PK as the book above. If the JOIN
# used in aggregation doesn't have content type as part of the
# condition the annotation will also count the 'hi mom' tag for b.
wmpk = WithManualPK.objects.create(id=django_book.pk)
ItemTag.objects.create(object_id=wmpk.id, tag='hi mom',
content_type=ContentType.objects.get_for_model(wmpk))
ai_book = Book.objects.get(name__startswith='Paradigms of Artificial Intelligence')
ItemTag.objects.create(object_id=ai_book.id, tag='intermediate',
content_type=ContentType.objects.get_for_model(ai_book))
self.assertEqual(Book.objects.aggregate(Count('tags')), {'tags__count': 3})
results = Book.objects.annotate(Count('tags')).order_by('-tags__count', 'name')
self.assertEqual(
[(b.name, b.tags__count) for b in results],
[
('Practical Django Projects', 2),
('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 1),
('Artificial Intelligence: A Modern Approach', 0),
('Python Web Development with Django', 0),
('Sams Teach Yourself Django in 24 Hours', 0),
('The Definitive Guide to Django: Web Development Done Right', 0)
]
)
def test_negated_aggregation(self):
expected_results = Author.objects.exclude(
pk__in=Author.objects.annotate(book_cnt=Count('book')).filter(book_cnt=2)
).order_by('name')
expected_results = [a.name for a in expected_results]
qs = Author.objects.annotate(book_cnt=Count('book')).exclude(
Q(book_cnt=2), Q(book_cnt=2)).order_by('name')
self.assertQuerysetEqual(
qs,
expected_results,
lambda b: b.name
)
expected_results = Author.objects.exclude(
pk__in=Author.objects.annotate(book_cnt=Count('book')).filter(book_cnt=2)
).order_by('name')
expected_results = [a.name for a in expected_results]
qs = Author.objects.annotate(book_cnt=Count('book')).exclude(Q(book_cnt=2) | Q(book_cnt=2)).order_by('name')
self.assertQuerysetEqual(
qs,
expected_results,
lambda b: b.name
)
def test_name_filters(self):
qs = Author.objects.annotate(Count('book')).filter(
Q(book__count__exact=2) | Q(name='Adrian Holovaty')
).order_by('name')
self.assertQuerysetEqual(
qs,
['Adrian Holovaty', 'Peter Norvig'],
lambda b: b.name
)
def test_name_expressions(self):
# Test that aggregates are spotted correctly from F objects.
# Note that Adrian's age is 34 in the fixtures, and he has one book
# so both conditions match one author.
qs = Author.objects.annotate(Count('book')).filter(
Q(name='Peter Norvig') | Q(age=F('book__count') + 33)
).order_by('name')
self.assertQuerysetEqual(
qs,
['Adrian Holovaty', 'Peter Norvig'],
lambda b: b.name
)
def test_ticket_11293(self):
q1 = Q(price__gt=50)
q2 = Q(authors__count__gt=1)
query = Book.objects.annotate(Count('authors')).filter(
q1 | q2).order_by('pk')
self.assertQuerysetEqual(
query, [1, 4, 5, 6],
lambda b: b.pk)
def test_ticket_11293_q_immutable(self):
"""
Check that splitting a q object to parts for where/having doesn't alter
the original q-object.
"""
q1 = Q(isbn='')
q2 = Q(authors__count__gt=1)
query = Book.objects.annotate(Count('authors'))
query.filter(q1 | q2)
self.assertEqual(len(q2.children), 1)
def test_fobj_group_by(self):
"""
Check that an F() object referring to related column works correctly
in group by.
"""
qs = Book.objects.annotate(
acount=Count('authors')
).filter(
acount=F('publisher__num_awards')
)
self.assertQuerysetEqual(
qs, ['Sams Teach Yourself Django in 24 Hours'],
lambda b: b.name)
def test_annotate_reserved_word(self):
"""
Regression #18333 - Ensure annotated column name is properly quoted.
"""
vals = Book.objects.annotate(select=Count('authors__id')).aggregate(Sum('select'), Avg('select'))
self.assertEqual(vals, {
'select__sum': 10,
'select__avg': Approximate(1.666, places=2),
})
def test_annotate_on_relation(self):
book = Book.objects.annotate(avg_price=Avg('price'), publisher_name=F('publisher__name')).get(pk=self.b1.pk)
self.assertEqual(book.avg_price, 30.00)
self.assertEqual(book.publisher_name, "Apress")
def test_aggregate_on_relation(self):
# A query with an existing annotation aggregation on a relation should
# succeed.
qs = Book.objects.annotate(avg_price=Avg('price')).aggregate(
publisher_awards=Sum('publisher__num_awards')
)
self.assertEqual(qs['publisher_awards'], 30)
def test_annotate_distinct_aggregate(self):
# There are three books with rating of 4.0 and two of the books have
# the same price. Hence, the distinct removes one rating of 4.0
# from the results.
vals1 = Book.objects.values('rating', 'price').distinct().aggregate(result=Sum('rating'))
vals2 = Book.objects.aggregate(result=Sum('rating') - Value(4.0))
self.assertEqual(vals1, vals2)
class JoinPromotionTests(TestCase):
def test_ticket_21150(self):
b = Bravo.objects.create()
c = Charlie.objects.create(bravo=b)
qs = Charlie.objects.select_related('alfa').annotate(Count('bravo__charlie'))
self.assertQuerysetEqual(
qs, [c], lambda x: x)
self.assertIs(qs[0].alfa, None)
a = Alfa.objects.create()
c.alfa = a
c.save()
# Force re-evaluation
qs = qs.all()
self.assertQuerysetEqual(
qs, [c], lambda x: x)
self.assertEqual(qs[0].alfa, a)
def test_existing_join_not_promoted(self):
# No promotion for existing joins
qs = Charlie.objects.filter(alfa__name__isnull=False).annotate(Count('alfa__name'))
self.assertIn(' INNER JOIN ', str(qs.query))
# Also, the existing join is unpromoted when doing filtering for already
# promoted join.
qs = Charlie.objects.annotate(Count('alfa__name')).filter(alfa__name__isnull=False)
self.assertIn(' INNER JOIN ', str(qs.query))
# But, as the join is nullable first use by annotate will be LOUTER
qs = Charlie.objects.annotate(Count('alfa__name'))
self.assertIn(' LEFT OUTER JOIN ', str(qs.query))
def test_non_nullable_fk_not_promoted(self):
qs = Book.objects.annotate(Count('contact__name'))
self.assertIn(' INNER JOIN ', str(qs.query))
class SelfReferentialFKTests(TestCase):
def test_ticket_24748(self):
t1 = SelfRefFK.objects.create(name='t1')
SelfRefFK.objects.create(name='t2', parent=t1)
SelfRefFK.objects.create(name='t3', parent=t1)
self.assertQuerysetEqual(
SelfRefFK.objects.annotate(num_children=Count('children')).order_by('name'),
[('t1', 2), ('t2', 0), ('t3', 0)],
lambda x: (x.name, x.num_children)
)
|
craig5/salt-essentials-utils
|
refs/heads/master
|
example-data/extend-2/5-runner/runner/monitor.py
|
3
|
#!/usr/bin/env python
import salt.client
def procs(num_procs_raw):
'''
Show any minions that are above a certain number of processes.
'''
master_client = salt.client.LocalClient()
num_procs = int(num_procs_raw)
all_procs = master_client.cmd('*', 'cmd.run', ['ps -e | wc -l'])
for minion in all_procs.keys():
cur_procs = int(all_procs[minion])
if cur_procs > num_procs:
print 'Minion {0}: {1} > {2}'.format(minion, cur_procs, num_procs)
|
hpcuantwerpen/easybuild-framework
|
refs/heads/develop
|
test/framework/sandbox/easybuild/easyblocks/generic/__init__.py
|
197
|
__path__ = __import__('pkgutil').extend_path(__path__, __name__)
|
antoan2/incubator-mxnet
|
refs/heads/master
|
python/mxnet/contrib/tensorboard.py
|
34
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
"""TensorBoard functions that can be used to log various status during epoch."""
from __future__ import absolute_import
import logging
class LogMetricsCallback(object):
"""Log metrics periodically in TensorBoard.
This callback works almost same as `callback.Speedometer`, but write TensorBoard event file
for visualization. For more usage, please refer https://github.com/dmlc/tensorboard
Parameters
----------
logging_dir : str
TensorBoard event file directory.
After that, use `tensorboard --logdir=path/to/logs` to launch TensorBoard visualization.
prefix : str
Prefix for a metric name of `scalar` value.
You might want to use this param to leverage TensorBoard plot feature,
where TensorBoard plots different curves in one graph when they have same `name`.
The follow example shows the usage(how to compare a train and eval metric in a same graph).
Examples
--------
>>> # log train and eval metrics under different directories.
>>> training_log = 'logs/train'
>>> evaluation_log = 'logs/eval'
>>> # in this case, each training and evaluation metric pairs has same name,
>>> # you can add a prefix to make it separate.
>>> batch_end_callbacks = [mx.contrib.tensorboard.LogMetricsCallback(training_log)]
>>> eval_end_callbacks = [mx.contrib.tensorboard.LogMetricsCallback(evaluation_log)]
>>> # run
>>> model.fit(train,
>>> ...
>>> batch_end_callback = batch_end_callbacks,
>>> eval_end_callback = eval_end_callbacks)
>>> # Then use `tensorboard --logdir=logs/` to launch TensorBoard visualization.
"""
def __init__(self, logging_dir, prefix=None):
self.prefix = prefix
try:
from tensorboard import SummaryWriter
self.summary_writer = SummaryWriter(logging_dir)
except ImportError:
logging.error('You can install tensorboard via `pip install tensorboard`.')
def __call__(self, param):
"""Callback to log training speed and metrics in TensorBoard."""
if param.eval_metric is None:
return
name_value = param.eval_metric.get_name_value()
for name, value in name_value:
if self.prefix is not None:
name = '%s-%s' % (self.prefix, name)
self.summary_writer.add_scalar(name, value)
|
gshivani/ansible-modules-extras
|
refs/heads/devel
|
database/mysql/mysql_replication.py
|
8
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Ansible module to manage mysql replication
(c) 2013, Balazs Pocze <banyek@gawker.com>
Certain parts are taken from Mark Theunissen's mysqldb module
This file is part of Ansible
Ansible is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Ansible is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""
DOCUMENTATION = '''
---
module: mysql_replication
short_description: Manage MySQL replication
description:
- Manages MySQL server replication, slave, master status get and change master host.
version_added: "1.3"
author: '"Balazs Pocze (@banyek)" <banyek@gawker.com>'
options:
mode:
description:
- module operating mode. Could be getslave (SHOW SLAVE STATUS), getmaster (SHOW MASTER STATUS), changemaster (CHANGE MASTER TO), startslave (START SLAVE), stopslave (STOP SLAVE)
required: False
choices:
- getslave
- getmaster
- changemaster
- stopslave
- startslave
default: getslave
login_user:
description:
- username to connect mysql host, if defined login_password also needed.
required: False
login_password:
description:
- password to connect mysql host, if defined login_user also needed.
required: False
login_host:
description:
- mysql host to connect
required: False
login_port:
description:
- Port of the MySQL server. Requires login_host be defined as other then localhost if login_port is used
required: False
default: 3306
version_added: "1.9"
login_unix_socket:
description:
- unix socket to connect mysql server
master_host:
description:
- same as mysql variable
master_user:
description:
- same as mysql variable
master_password:
description:
- same as mysql variable
master_port:
description:
- same as mysql variable
master_connect_retry:
description:
- same as mysql variable
master_log_file:
description:
- same as mysql variable
master_log_pos:
description:
- same as mysql variable
relay_log_file:
description:
- same as mysql variable
relay_log_pos:
description:
- same as mysql variable
master_ssl:
description:
- same as mysql variable
choices: [ 0, 1 ]
master_ssl_ca:
description:
- same as mysql variable
master_ssl_capath:
description:
- same as mysql variable
master_ssl_cert:
description:
- same as mysql variable
master_ssl_key:
description:
- same as mysql variable
master_ssl_cipher:
description:
- same as mysql variable
master_auto_position:
description:
- does the host uses GTID based replication or not
required: false
default: null
version_added: "2.0"
'''
EXAMPLES = '''
# Stop mysql slave thread
- mysql_replication: mode=stopslave
# Get master binlog file name and binlog position
- mysql_replication: mode=getmaster
# Change master to master server 192.168.1.1 and use binary log 'mysql-bin.000009' with position 4578
- mysql_replication: mode=changemaster master_host=192.168.1.1 master_log_file=mysql-bin.000009 master_log_pos=4578
# Check slave status using port 3308
- mysql_replication: mode=getslave login_host=ansible.example.com login_port=3308
'''
import ConfigParser
import os
import warnings
try:
import MySQLdb
except ImportError:
mysqldb_found = False
else:
mysqldb_found = True
def get_master_status(cursor):
cursor.execute("SHOW MASTER STATUS")
masterstatus = cursor.fetchone()
return masterstatus
def get_slave_status(cursor):
cursor.execute("SHOW SLAVE STATUS")
slavestatus = cursor.fetchone()
return slavestatus
def stop_slave(cursor):
try:
cursor.execute("STOP SLAVE")
stopped = True
except:
stopped = False
return stopped
def start_slave(cursor):
try:
cursor.execute("START SLAVE")
started = True
except:
started = False
return started
def changemaster(cursor, chm, chm_params):
sql_param = ",".join(chm)
query = 'CHANGE MASTER TO %s' % sql_param
cursor.execute(query, chm_params)
def strip_quotes(s):
""" Remove surrounding single or double quotes
>>> print strip_quotes('hello')
hello
>>> print strip_quotes('"hello"')
hello
>>> print strip_quotes("'hello'")
hello
>>> print strip_quotes("'hello")
'hello
"""
single_quote = "'"
double_quote = '"'
if s.startswith(single_quote) and s.endswith(single_quote):
s = s.strip(single_quote)
elif s.startswith(double_quote) and s.endswith(double_quote):
s = s.strip(double_quote)
return s
def config_get(config, section, option):
""" Calls ConfigParser.get and strips quotes
See: http://dev.mysql.com/doc/refman/5.0/en/option-files.html
"""
return strip_quotes(config.get(section, option))
def load_mycnf():
config = ConfigParser.RawConfigParser()
mycnf = os.path.expanduser('~/.my.cnf')
if not os.path.exists(mycnf):
return False
try:
config.readfp(open(mycnf))
except (IOError):
return False
# We support two forms of passwords in .my.cnf, both pass= and password=,
# as these are both supported by MySQL.
try:
passwd = config_get(config, 'client', 'password')
except (ConfigParser.NoOptionError):
try:
passwd = config_get(config, 'client', 'pass')
except (ConfigParser.NoOptionError):
return False
# If .my.cnf doesn't specify a user, default to user login name
try:
user = config_get(config, 'client', 'user')
except (ConfigParser.NoOptionError):
user = getpass.getuser()
creds = dict(user=user, passwd=passwd)
return creds
def main():
module = AnsibleModule(
argument_spec = dict(
login_user=dict(default=None),
login_password=dict(default=None),
login_host=dict(default="localhost"),
login_port=dict(default=3306, type='int'),
login_unix_socket=dict(default=None),
mode=dict(default="getslave", choices=["getmaster", "getslave", "changemaster", "stopslave", "startslave"]),
master_auto_position=dict(default=False, type='bool'),
master_host=dict(default=None),
master_user=dict(default=None),
master_password=dict(default=None),
master_port=dict(default=None, type='int'),
master_connect_retry=dict(default=None, type='int'),
master_log_file=dict(default=None),
master_log_pos=dict(default=None, type='int'),
relay_log_file=dict(default=None),
relay_log_pos=dict(default=None, type='int'),
master_ssl=dict(default=False, type='bool'),
master_ssl_ca=dict(default=None),
master_ssl_capath=dict(default=None),
master_ssl_cert=dict(default=None),
master_ssl_key=dict(default=None),
master_ssl_cipher=dict(default=None),
)
)
user = module.params["login_user"]
password = module.params["login_password"]
host = module.params["login_host"]
port = module.params["login_port"]
mode = module.params["mode"]
master_host = module.params["master_host"]
master_user = module.params["master_user"]
master_password = module.params["master_password"]
master_port = module.params["master_port"]
master_connect_retry = module.params["master_connect_retry"]
master_log_file = module.params["master_log_file"]
master_log_pos = module.params["master_log_pos"]
relay_log_file = module.params["relay_log_file"]
relay_log_pos = module.params["relay_log_pos"]
master_ssl = module.params["master_ssl"]
master_ssl_ca = module.params["master_ssl_ca"]
master_ssl_capath = module.params["master_ssl_capath"]
master_ssl_cert = module.params["master_ssl_cert"]
master_ssl_key = module.params["master_ssl_key"]
master_ssl_cipher = module.params["master_ssl_cipher"]
master_auto_position = module.params["master_auto_position"]
if not mysqldb_found:
module.fail_json(msg="the python mysqldb module is required")
else:
warnings.filterwarnings('error', category=MySQLdb.Warning)
# Either the caller passes both a username and password with which to connect to
# mysql, or they pass neither and allow this module to read the credentials from
# ~/.my.cnf.
login_password = module.params["login_password"]
login_user = module.params["login_user"]
if login_user is None and login_password is None:
mycnf_creds = load_mycnf()
if mycnf_creds is False:
login_user = "root"
login_password = ""
else:
login_user = mycnf_creds["user"]
login_password = mycnf_creds["passwd"]
elif login_password is None or login_user is None:
module.fail_json(msg="when supplying login arguments, both login_user and login_password must be provided")
try:
if module.params["login_unix_socket"]:
db_connection = MySQLdb.connect(host=module.params["login_host"], unix_socket=module.params["login_unix_socket"], user=login_user, passwd=login_password)
elif module.params["login_port"] != 3306 and module.params["login_host"] == "localhost":
module.fail_json(msg="login_host is required when login_port is defined, login_host cannot be localhost when login_port is defined")
else:
db_connection = MySQLdb.connect(host=module.params["login_host"], port=module.params["login_port"], user=login_user, passwd=login_password)
except Exception, e:
module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or ~/.my.cnf has the credentials")
try:
cursor = db_connection.cursor(cursorclass=MySQLdb.cursors.DictCursor)
except Exception, e:
module.fail_json(msg="Trouble getting DictCursor from db_connection: %s" % e)
if mode in "getmaster":
masterstatus = get_master_status(cursor)
try:
module.exit_json( **masterstatus )
except TypeError:
module.fail_json(msg="Server is not configured as mysql master")
elif mode in "getslave":
slavestatus = get_slave_status(cursor)
try:
module.exit_json( **slavestatus )
except TypeError:
module.fail_json(msg="Server is not configured as mysql slave")
elif mode in "changemaster":
chm=[]
chm_params = {}
if master_host:
chm.append("MASTER_HOST=%(master_host)s")
chm_params['master_host'] = master_host
if master_user:
chm.append("MASTER_USER=%(master_user)s")
chm_params['master_user'] = master_user
if master_password:
chm.append("MASTER_PASSWORD=%(master_password)s")
chm_params['master_password'] = master_password
if master_port is not None:
chm.append("MASTER_PORT=%(master_port)s")
chm_params['master_port'] = master_port
if master_connect_retry is not None:
chm.append("MASTER_CONNECT_RETRY=%(master_connect_retry)s")
chm_params['master_connect_retry'] = master_connect_retry
if master_log_file:
chm.append("MASTER_LOG_FILE=%(master_log_file)s")
chm_params['master_log_file'] = master_log_file
if master_log_pos is not None:
chm.append("MASTER_LOG_POS=%(master_log_pos)s")
chm_params['master_log_pos'] = master_log_pos
if relay_log_file:
chm.append("RELAY_LOG_FILE=%(relay_log_file)s")
chm_params['relay_log_file'] = relay_log_file
if relay_log_pos is not None:
chm.append("RELAY_LOG_POS=%(relay_log_pos)s")
chm_params['relay_log_pos'] = relay_log_pos
if master_ssl:
chm.append("MASTER_SSL=1")
if master_ssl_ca:
chm.append("MASTER_SSL_CA=%(master_ssl_ca)s")
chm_params['master_ssl_ca'] = master_ssl_ca
if master_ssl_capath:
chm.append("MASTER_SSL_CAPATH=%(master_ssl_capath)s")
chm_params['master_ssl_capath'] = master_ssl_capath
if master_ssl_cert:
chm.append("MASTER_SSL_CERT=%(master_ssl_cert)s")
chm_params['master_ssl_cert'] = master_ssl_cert
if master_ssl_key:
chm.append("MASTER_SSL_KEY=%(master_ssl_key)s")
chm_params['master_ssl_key'] = master_ssl_key
if master_ssl_cipher:
chm.append("MASTER_SSL_CIPHER=%(master_ssl_cipher)s")
chm_params['master_ssl_cipher'] = master_ssl_cipher
if master_auto_position:
chm.append("MASTER_AUTO_POSITION = 1")
changemaster(cursor, chm, chm_params)
module.exit_json(changed=True)
elif mode in "startslave":
started = start_slave(cursor)
if started is True:
module.exit_json(msg="Slave started ", changed=True)
else:
module.exit_json(msg="Slave already started (Or cannot be started)", changed=False)
elif mode in "stopslave":
stopped = stop_slave(cursor)
if stopped is True:
module.exit_json(msg="Slave stopped", changed=True)
else:
module.exit_json(msg="Slave already stopped", changed=False)
# import module snippets
from ansible.module_utils.basic import *
main()
warnings.simplefilter("ignore")
|
camptocamp/odoo
|
refs/heads/master
|
addons/resource/tests/test_resource.py
|
243
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2013-TODAY OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from openerp.addons.resource.tests.common import TestResourceCommon
class TestResource(TestResourceCommon):
def test_00_intervals(self):
intervals = [
(
datetime.strptime('2013-02-04 09:00:00', '%Y-%m-%d %H:%M:%S'),
datetime.strptime('2013-02-04 11:00:00', '%Y-%m-%d %H:%M:%S')
), (
datetime.strptime('2013-02-04 08:00:00', '%Y-%m-%d %H:%M:%S'),
datetime.strptime('2013-02-04 12:00:00', '%Y-%m-%d %H:%M:%S')
), (
datetime.strptime('2013-02-04 11:00:00', '%Y-%m-%d %H:%M:%S'),
datetime.strptime('2013-02-04 14:00:00', '%Y-%m-%d %H:%M:%S')
), (
datetime.strptime('2013-02-04 17:00:00', '%Y-%m-%d %H:%M:%S'),
datetime.strptime('2013-02-04 21:00:00', '%Y-%m-%d %H:%M:%S')
), (
datetime.strptime('2013-02-03 08:00:00', '%Y-%m-%d %H:%M:%S'),
datetime.strptime('2013-02-03 10:00:00', '%Y-%m-%d %H:%M:%S')
), (
datetime.strptime('2013-02-04 18:00:00', '%Y-%m-%d %H:%M:%S'),
datetime.strptime('2013-02-04 19:00:00', '%Y-%m-%d %H:%M:%S')
)
]
# Test: interval cleaning
cleaned_intervals = self.resource_calendar.interval_clean(intervals)
self.assertEqual(len(cleaned_intervals), 3, 'resource_calendar: wrong interval cleaning')
# First interval: 03, unchanged
self.assertEqual(cleaned_intervals[0][0], datetime.strptime('2013-02-03 08:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong interval cleaning')
self.assertEqual(cleaned_intervals[0][1], datetime.strptime('2013-02-03 10:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong interval cleaning')
# Second intreval: 04, 08-14, combining 08-12 and 11-14, 09-11 being inside 08-12
self.assertEqual(cleaned_intervals[1][0], datetime.strptime('2013-02-04 08:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong interval cleaning')
self.assertEqual(cleaned_intervals[1][1], datetime.strptime('2013-02-04 14:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong interval cleaning')
# Third interval: 04, 17-21, 18-19 being inside 17-21
self.assertEqual(cleaned_intervals[2][0], datetime.strptime('2013-02-04 17:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong interval cleaning')
self.assertEqual(cleaned_intervals[2][1], datetime.strptime('2013-02-04 21:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong interval cleaning')
# Test: disjoint removal
working_interval = (datetime.strptime('2013-02-04 08:00:00', '%Y-%m-%d %H:%M:%S'), datetime.strptime('2013-02-04 18:00:00', '%Y-%m-%d %H:%M:%S'))
result = self.resource_calendar.interval_remove_leaves(working_interval, intervals)
self.assertEqual(len(result), 1, 'resource_calendar: wrong leave removal from interval')
# First interval: 04, 14-17
self.assertEqual(result[0][0], datetime.strptime('2013-02-04 14:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong leave removal from interval')
self.assertEqual(result[0][1], datetime.strptime('2013-02-04 17:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong leave removal from interval')
# Test: schedule hours on intervals
result = self.resource_calendar.interval_schedule_hours(cleaned_intervals, 5.5)
self.assertEqual(len(result), 2, 'resource_calendar: wrong hours scheduling in interval')
# First interval: 03, 8-10 untouches
self.assertEqual(result[0][0], datetime.strptime('2013-02-03 08:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong leave removal from interval')
self.assertEqual(result[0][1], datetime.strptime('2013-02-03 10:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong leave removal from interval')
# First interval: 04, 08-11:30
self.assertEqual(result[1][0], datetime.strptime('2013-02-04 08:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong leave removal from interval')
self.assertEqual(result[1][1], datetime.strptime('2013-02-04 11:30:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong leave removal from interval')
# Test: schedule hours on intervals, backwards
cleaned_intervals.reverse()
result = self.resource_calendar.interval_schedule_hours(cleaned_intervals, 5.5, remove_at_end=False)
self.assertEqual(len(result), 2, 'resource_calendar: wrong hours scheduling in interval')
# First interval: 03, 8-10 untouches
self.assertEqual(result[0][0], datetime.strptime('2013-02-04 17:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong leave removal from interval')
self.assertEqual(result[0][1], datetime.strptime('2013-02-04 21:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong leave removal from interval')
# First interval: 04, 08-11:30
self.assertEqual(result[1][0], datetime.strptime('2013-02-04 12:30:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong leave removal from interval')
self.assertEqual(result[1][1], datetime.strptime('2013-02-04 14:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong leave removal from interval')
def test_10_calendar_basics(self):
""" Testing basic method of resource.calendar """
cr, uid = self.cr, self.uid
# --------------------------------------------------
# Test1: get_next_day
# --------------------------------------------------
# Test: next day: next day after day1 is day4
date = self.resource_calendar.get_next_day(cr, uid, self.calendar_id, day_date=self.date1.date())
self.assertEqual(date, self.date2.date(), 'resource_calendar: wrong next day computing')
# Test: next day: next day after day4 is (day1+7)
date = self.resource_calendar.get_next_day(cr, uid, self.calendar_id, day_date=self.date2.date())
self.assertEqual(date, self.date1.date() + relativedelta(days=7), 'resource_calendar: wrong next day computing')
# Test: next day: next day after day4+1 is (day1+7)
date = self.resource_calendar.get_next_day(cr, uid, self.calendar_id, day_date=self.date2.date() + relativedelta(days=1))
self.assertEqual(date, self.date1.date() + relativedelta(days=7), 'resource_calendar: wrong next day computing')
# Test: next day: next day after day1-1 is day1
date = self.resource_calendar.get_next_day(cr, uid, self.calendar_id, day_date=self.date1.date() + relativedelta(days=-1))
self.assertEqual(date, self.date1.date(), 'resource_calendar: wrong next day computing')
# --------------------------------------------------
# Test2: get_previous_day
# --------------------------------------------------
# Test: previous day: previous day before day1 is (day4-7)
date = self.resource_calendar.get_previous_day(cr, uid, self.calendar_id, day_date=self.date1.date())
self.assertEqual(date, self.date2.date() + relativedelta(days=-7), 'resource_calendar: wrong previous day computing')
# Test: previous day: previous day before day4 is day1
date = self.resource_calendar.get_previous_day(cr, uid, self.calendar_id, day_date=self.date2.date())
self.assertEqual(date, self.date1.date(), 'resource_calendar: wrong previous day computing')
# Test: previous day: previous day before day4+1 is day4
date = self.resource_calendar.get_previous_day(cr, uid, self.calendar_id, day_date=self.date2.date() + relativedelta(days=1))
self.assertEqual(date, self.date2.date(), 'resource_calendar: wrong previous day computing')
# Test: previous day: previous day before day1-1 is (day4-7)
date = self.resource_calendar.get_previous_day(cr, uid, self.calendar_id, day_date=self.date1.date() + relativedelta(days=-1))
self.assertEqual(date, self.date2.date() + relativedelta(days=-7), 'resource_calendar: wrong previous day computing')
# --------------------------------------------------
# Test3: misc
# --------------------------------------------------
weekdays = self.resource_calendar.get_weekdays(cr, uid, self.calendar_id)
self.assertEqual(weekdays, [1, 4], 'resource_calendar: wrong weekdays computing')
attendances = self.resource_calendar.get_attendances_for_weekdays(cr, uid, self.calendar_id, [2, 3, 4, 5])
self.assertEqual(set([att.id for att in attendances]), set([self.att2_id, self.att3_id]),
'resource_calendar: wrong attendances filtering by weekdays computing')
def test_20_calendar_working_intervals(self):
""" Testing working intervals computing method of resource.calendar """
cr, uid = self.cr, self.uid
_format = '%Y-%m-%d %H:%M:%S'
# Test: day0 without leaves: 1 interval
intervals = self.resource_calendar.get_working_intervals_of_day(cr, uid, self.calendar_id, start_dt=self.date1)
self.assertEqual(len(intervals), 1, 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[0][0], datetime.strptime('2013-02-12 09:08:07', _format), 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[0][1], datetime.strptime('2013-02-12 16:00:00', _format), 'resource_calendar: wrong working intervals')
# Test: day3 without leaves: 2 interval
intervals = self.resource_calendar.get_working_intervals_of_day(cr, uid, self.calendar_id, start_dt=self.date2)
self.assertEqual(len(intervals), 2, 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[0][0], datetime.strptime('2013-02-15 10:11:12', _format), 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[0][1], datetime.strptime('2013-02-15 13:00:00', _format), 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[1][0], datetime.strptime('2013-02-15 16:00:00', _format), 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[1][1], datetime.strptime('2013-02-15 23:00:00', _format), 'resource_calendar: wrong working intervals')
# Test: day0 with leaves outside range: 1 interval
intervals = self.resource_calendar.get_working_intervals_of_day(cr, uid, self.calendar_id, start_dt=self.date1.replace(hour=0), compute_leaves=True)
self.assertEqual(len(intervals), 1, 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[0][0], datetime.strptime('2013-02-12 08:00:00', _format), 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[0][1], datetime.strptime('2013-02-12 16:00:00', _format), 'resource_calendar: wrong working intervals')
# Test: day0 with leaves: 2 intervals because of leave between 9 ans 12, ending at 15:45:30
intervals = self.resource_calendar.get_working_intervals_of_day(cr, uid, self.calendar_id,
start_dt=self.date1.replace(hour=8) + relativedelta(days=7),
end_dt=self.date1.replace(hour=15, minute=45, second=30) + relativedelta(days=7),
compute_leaves=True)
self.assertEqual(len(intervals), 2, 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[0][0], datetime.strptime('2013-02-19 08:08:07', _format), 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[0][1], datetime.strptime('2013-02-19 09:00:00', _format), 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[1][0], datetime.strptime('2013-02-19 12:00:00', _format), 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[1][1], datetime.strptime('2013-02-19 15:45:30', _format), 'resource_calendar: wrong working intervals')
def test_30_calendar_working_days(self):
""" Testing calendar hours computation on a working day """
cr, uid = self.cr, self.uid
_format = '%Y-%m-%d %H:%M:%S'
# Test: day1, beginning at 10:30 -> work from 10:30 (arrival) until 16:00
intervals = self.resource_calendar.get_working_intervals_of_day(cr, uid, self.calendar_id, start_dt=self.date1.replace(hour=10, minute=30, second=0))
self.assertEqual(len(intervals), 1, 'resource_calendar: wrong working interval / day computing')
self.assertEqual(intervals[0][0], datetime.strptime('2013-02-12 10:30:00', _format), 'resource_calendar: wrong working interval / day computing')
self.assertEqual(intervals[0][1], datetime.strptime('2013-02-12 16:00:00', _format), 'resource_calendar: wrong working interval / day computing')
# Test: hour computation for same interval, should give 5.5
wh = self.resource_calendar.get_working_hours_of_date(cr, uid, self.calendar_id, start_dt=self.date1.replace(hour=10, minute=30, second=0))
self.assertEqual(wh, 5.5, 'resource_calendar: wrong working interval / day time computing')
# Test: day1+7 on leave, without leave computation
intervals = self.resource_calendar.get_working_intervals_of_day(
cr, uid, self.calendar_id,
start_dt=self.date1.replace(hour=7, minute=0, second=0) + relativedelta(days=7)
)
# Result: day1 (08->16)
self.assertEqual(len(intervals), 1, 'resource_calendar: wrong working interval/day computing')
self.assertEqual(intervals[0][0], datetime.strptime('2013-02-19 08:00:00', _format), 'resource_calendar: wrong working interval / day computing')
self.assertEqual(intervals[0][1], datetime.strptime('2013-02-19 16:00:00', _format), 'resource_calendar: wrong working interval / day computing')
# Test: day1+7 on leave, with generic leave computation
intervals = self.resource_calendar.get_working_intervals_of_day(
cr, uid, self.calendar_id,
start_dt=self.date1.replace(hour=7, minute=0, second=0) + relativedelta(days=7),
compute_leaves=True
)
# Result: day1 (08->09 + 12->16)
self.assertEqual(len(intervals), 2, 'resource_calendar: wrong working interval/day computing')
self.assertEqual(intervals[0][0], datetime.strptime('2013-02-19 08:00:00', _format), 'resource_calendar: wrong working interval / day computing')
self.assertEqual(intervals[0][1], datetime.strptime('2013-02-19 09:00:00', _format), 'resource_calendar: wrong working interval / day computing')
self.assertEqual(intervals[1][0], datetime.strptime('2013-02-19 12:00:00', _format), 'resource_calendar: wrong working interval / day computing')
self.assertEqual(intervals[1][1], datetime.strptime('2013-02-19 16:00:00', _format), 'resource_calendar: wrong working interval / day computing')
# Test: day1+14 on leave, with generic leave computation
intervals = self.resource_calendar.get_working_intervals_of_day(
cr, uid, self.calendar_id,
start_dt=self.date1.replace(hour=7, minute=0, second=0) + relativedelta(days=14),
compute_leaves=True
)
# Result: day1 (08->16)
self.assertEqual(len(intervals), 1, 'resource_calendar: wrong working interval/day computing')
self.assertEqual(intervals[0][0], datetime.strptime('2013-02-26 08:00:00', _format), 'resource_calendar: wrong working interval / day computing')
self.assertEqual(intervals[0][1], datetime.strptime('2013-02-26 16:00:00', _format), 'resource_calendar: wrong working interval / day computing')
# Test: day1+14 on leave, with resource leave computation
intervals = self.resource_calendar.get_working_intervals_of_day(
cr, uid, self.calendar_id,
start_dt=self.date1.replace(hour=7, minute=0, second=0) + relativedelta(days=14),
compute_leaves=True,
resource_id=self.resource1_id
)
# Result: nothing, because on leave
self.assertEqual(len(intervals), 0, 'resource_calendar: wrong working interval/day computing')
def test_40_calendar_hours_scheduling(self):
""" Testing calendar hours scheduling """
cr, uid = self.cr, self.uid
_format = '%Y-%m-%d %H:%M:%S'
# --------------------------------------------------
# Test0: schedule hours backwards (old interval_min_get)
# Done without calendar
# --------------------------------------------------
# Done without calendar
# res = self.resource_calendar.interval_min_get(cr, uid, None, self.date1, 40, resource=False)
# res: (datetime.datetime(2013, 2, 7, 9, 8, 7), datetime.datetime(2013, 2, 12, 9, 8, 7))
# --------------------------------------------------
# Test1: schedule hours backwards (old interval_min_get)
# --------------------------------------------------
# res = self.resource_calendar.interval_min_get(cr, uid, self.calendar_id, self.date1, 40, resource=False)
# (datetime.datetime(2013, 1, 29, 9, 0), datetime.datetime(2013, 1, 29, 16, 0))
# (datetime.datetime(2013, 2, 1, 8, 0), datetime.datetime(2013, 2, 1, 13, 0))
# (datetime.datetime(2013, 2, 1, 16, 0), datetime.datetime(2013, 2, 1, 23, 0))
# (datetime.datetime(2013, 2, 5, 8, 0), datetime.datetime(2013, 2, 5, 16, 0))
# (datetime.datetime(2013, 2, 8, 8, 0), datetime.datetime(2013, 2, 8, 13, 0))
# (datetime.datetime(2013, 2, 8, 16, 0), datetime.datetime(2013, 2, 8, 23, 0))
# (datetime.datetime(2013, 2, 12, 8, 0), datetime.datetime(2013, 2, 12, 9, 0))
res = self.resource_calendar.schedule_hours(cr, uid, self.calendar_id, -40, day_dt=self.date1.replace(minute=0, second=0))
# current day, limited at 09:00 because of day_dt specified -> 1 hour
self.assertEqual(res[-1][0], datetime.strptime('2013-02-12 08:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[-1][1], datetime.strptime('2013-02-12 09:00:00', _format), 'resource_calendar: wrong hours scheduling')
# previous days: 5+7 hours / 8 hours / 5+7 hours -> 32 hours
self.assertEqual(res[-2][0], datetime.strptime('2013-02-08 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[-2][1], datetime.strptime('2013-02-08 23:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[-3][0], datetime.strptime('2013-02-08 08:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[-3][1], datetime.strptime('2013-02-08 13:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[-4][0], datetime.strptime('2013-02-05 08:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[-4][1], datetime.strptime('2013-02-05 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[-5][0], datetime.strptime('2013-02-01 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[-5][1], datetime.strptime('2013-02-01 23:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[-6][0], datetime.strptime('2013-02-01 08:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[-6][1], datetime.strptime('2013-02-01 13:00:00', _format), 'resource_calendar: wrong hours scheduling')
# 7 hours remaining
self.assertEqual(res[-7][0], datetime.strptime('2013-01-29 09:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[-7][1], datetime.strptime('2013-01-29 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
# Compute scheduled hours
td = timedelta()
for item in res:
td += item[1] - item[0]
self.assertEqual(seconds(td) / 3600.0, 40.0, 'resource_calendar: wrong hours scheduling')
# --------------------------------------------------
# Test2: schedule hours forward (old interval_get)
# --------------------------------------------------
# res = self.resource_calendar.interval_get(cr, uid, self.calendar_id, self.date1, 40, resource=False, byday=True)
# (datetime.datetime(2013, 2, 12, 9, 0), datetime.datetime(2013, 2, 12, 16, 0))
# (datetime.datetime(2013, 2, 15, 8, 0), datetime.datetime(2013, 2, 15, 13, 0))
# (datetime.datetime(2013, 2, 15, 16, 0), datetime.datetime(2013, 2, 15, 23, 0))
# (datetime.datetime(2013, 2, 22, 8, 0), datetime.datetime(2013, 2, 22, 13, 0))
# (datetime.datetime(2013, 2, 22, 16, 0), datetime.datetime(2013, 2, 22, 23, 0))
# (datetime.datetime(2013, 2, 26, 8, 0), datetime.datetime(2013, 2, 26, 16, 0))
# (datetime.datetime(2013, 3, 1, 8, 0), datetime.datetime(2013, 3, 1, 9, 0))
res = self.resource_calendar.schedule_hours(
cr, uid, self.calendar_id, 40,
day_dt=self.date1.replace(minute=0, second=0)
)
self.assertEqual(res[0][0], datetime.strptime('2013-02-12 09:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[0][1], datetime.strptime('2013-02-12 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[1][0], datetime.strptime('2013-02-15 08:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[1][1], datetime.strptime('2013-02-15 13:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[2][0], datetime.strptime('2013-02-15 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[2][1], datetime.strptime('2013-02-15 23:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[3][0], datetime.strptime('2013-02-19 08:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[3][1], datetime.strptime('2013-02-19 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[4][0], datetime.strptime('2013-02-22 08:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[4][1], datetime.strptime('2013-02-22 13:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[5][0], datetime.strptime('2013-02-22 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[5][1], datetime.strptime('2013-02-22 23:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[6][0], datetime.strptime('2013-02-26 08:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[6][1], datetime.strptime('2013-02-26 09:00:00', _format), 'resource_calendar: wrong hours scheduling')
td = timedelta()
for item in res:
td += item[1] - item[0]
self.assertEqual(seconds(td) / 3600.0, 40.0, 'resource_calendar: wrong hours scheduling')
# res = self.resource_calendar.interval_get(cr, uid, self.calendar_id, self.date1, 40, resource=self.resource1_id, byday=True)
# (datetime.datetime(2013, 2, 12, 9, 0), datetime.datetime(2013, 2, 12, 16, 0))
# (datetime.datetime(2013, 2, 15, 8, 0), datetime.datetime(2013, 2, 15, 13, 0))
# (datetime.datetime(2013, 2, 15, 16, 0), datetime.datetime(2013, 2, 15, 23, 0))
# (datetime.datetime(2013, 3, 1, 8, 0), datetime.datetime(2013, 3, 1, 13, 0))
# (datetime.datetime(2013, 3, 1, 16, 0), datetime.datetime(2013, 3, 1, 23, 0))
# (datetime.datetime(2013, 3, 5, 8, 0), datetime.datetime(2013, 3, 5, 16, 0))
# (datetime.datetime(2013, 3, 8, 8, 0), datetime.datetime(2013, 3, 8, 9, 0))
res = self.resource_calendar.schedule_hours(
cr, uid, self.calendar_id, 40,
day_dt=self.date1.replace(minute=0, second=0),
compute_leaves=True,
resource_id=self.resource1_id
)
self.assertEqual(res[0][0], datetime.strptime('2013-02-12 09:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[0][1], datetime.strptime('2013-02-12 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[1][0], datetime.strptime('2013-02-15 08:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[1][1], datetime.strptime('2013-02-15 13:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[2][0], datetime.strptime('2013-02-15 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[2][1], datetime.strptime('2013-02-15 23:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[3][0], datetime.strptime('2013-02-19 08:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[3][1], datetime.strptime('2013-02-19 09:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[4][0], datetime.strptime('2013-02-19 12:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[4][1], datetime.strptime('2013-02-19 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[5][0], datetime.strptime('2013-02-22 08:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[5][1], datetime.strptime('2013-02-22 09:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[6][0], datetime.strptime('2013-02-22 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[6][1], datetime.strptime('2013-02-22 23:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[7][0], datetime.strptime('2013-03-01 11:30:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[7][1], datetime.strptime('2013-03-01 13:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[8][0], datetime.strptime('2013-03-01 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[8][1], datetime.strptime('2013-03-01 22:30:00', _format), 'resource_calendar: wrong hours scheduling')
td = timedelta()
for item in res:
td += item[1] - item[0]
self.assertEqual(seconds(td) / 3600.0, 40.0, 'resource_calendar: wrong hours scheduling')
# --------------------------------------------------
# Test3: working hours (old _interval_hours_get)
# --------------------------------------------------
# old API: resource without leaves
# res: 2 weeks -> 40 hours
res = self.resource_calendar._interval_hours_get(
cr, uid, self.calendar_id,
self.date1.replace(hour=6, minute=0),
self.date2.replace(hour=23, minute=0) + relativedelta(days=7),
resource_id=self.resource1_id, exclude_leaves=True)
self.assertEqual(res, 40.0, 'resource_calendar: wrong _interval_hours_get compatibility computation')
# new API: resource without leaves
# res: 2 weeks -> 40 hours
res = self.resource_calendar.get_working_hours(
cr, uid, self.calendar_id,
self.date1.replace(hour=6, minute=0),
self.date2.replace(hour=23, minute=0) + relativedelta(days=7),
compute_leaves=False, resource_id=self.resource1_id)
self.assertEqual(res, 40.0, 'resource_calendar: wrong get_working_hours computation')
# old API: resource and leaves
# res: 2 weeks -> 40 hours - (3+4) leave hours
res = self.resource_calendar._interval_hours_get(
cr, uid, self.calendar_id,
self.date1.replace(hour=6, minute=0),
self.date2.replace(hour=23, minute=0) + relativedelta(days=7),
resource_id=self.resource1_id, exclude_leaves=False)
self.assertEqual(res, 33.0, 'resource_calendar: wrong _interval_hours_get compatibility computation')
# new API: resource and leaves
# res: 2 weeks -> 40 hours - (3+4) leave hours
res = self.resource_calendar.get_working_hours(
cr, uid, self.calendar_id,
self.date1.replace(hour=6, minute=0),
self.date2.replace(hour=23, minute=0) + relativedelta(days=7),
compute_leaves=True, resource_id=self.resource1_id)
self.assertEqual(res, 33.0, 'resource_calendar: wrong get_working_hours computation')
# --------------------------------------------------
# Test4: misc
# --------------------------------------------------
# Test without calendar and default_interval
res = self.resource_calendar.get_working_hours(
cr, uid, None,
self.date1.replace(hour=6, minute=0),
self.date2.replace(hour=23, minute=0),
compute_leaves=True, resource_id=self.resource1_id,
default_interval=(8, 16))
self.assertEqual(res, 32.0, 'resource_calendar: wrong get_working_hours computation')
def test_50_calendar_schedule_days(self):
""" Testing calendar days scheduling """
cr, uid = self.cr, self.uid
_format = '%Y-%m-%d %H:%M:%S'
# --------------------------------------------------
# Test1: with calendar
# --------------------------------------------------
res = self.resource_calendar.schedule_days_get_date(cr, uid, self.calendar_id, 5, day_date=self.date1)
self.assertEqual(res.date(), datetime.strptime('2013-02-26 00:0:00', _format).date(), 'resource_calendar: wrong days scheduling')
res = self.resource_calendar.schedule_days_get_date(
cr, uid, self.calendar_id, 5, day_date=self.date1,
compute_leaves=True, resource_id=self.resource1_id)
self.assertEqual(res.date(), datetime.strptime('2013-03-01 00:0:00', _format).date(), 'resource_calendar: wrong days scheduling')
# --------------------------------------------------
# Test2: misc
# --------------------------------------------------
# Without calendar, should only count days -> 12 -> 16, 5 days with default intervals
res = self.resource_calendar.schedule_days_get_date(cr, uid, None, 5, day_date=self.date1, default_interval=(8, 16))
self.assertEqual(res, datetime.strptime('2013-02-16 16:00:00', _format), 'resource_calendar: wrong days scheduling')
def seconds(td):
assert isinstance(td, timedelta)
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / 10.**6
|
spulec/moto
|
refs/heads/master
|
moto/ec2/responses/vpn_connections.py
|
2
|
from __future__ import unicode_literals
from moto.core.responses import BaseResponse
from moto.ec2.utils import filters_from_querystring
class VPNConnections(BaseResponse):
def create_vpn_connection(self):
type = self._get_param("Type")
cgw_id = self._get_param("CustomerGatewayId")
vgw_id = self._get_param("VpnGatewayId")
static_routes = self._get_param("StaticRoutesOnly")
vpn_connection = self.ec2_backend.create_vpn_connection(
type, cgw_id, vgw_id, static_routes_only=static_routes
)
template = self.response_template(CREATE_VPN_CONNECTION_RESPONSE)
return template.render(vpn_connection=vpn_connection)
def delete_vpn_connection(self):
vpn_connection_id = self._get_param("VpnConnectionId")
vpn_connection = self.ec2_backend.delete_vpn_connection(vpn_connection_id)
template = self.response_template(DELETE_VPN_CONNECTION_RESPONSE)
return template.render(vpn_connection=vpn_connection)
def describe_vpn_connections(self):
vpn_connection_ids = self._get_multi_param("VpnConnectionId")
filters = filters_from_querystring(self.querystring)
vpn_connections = self.ec2_backend.get_all_vpn_connections(
vpn_connection_ids=vpn_connection_ids, filters=filters
)
template = self.response_template(DESCRIBE_VPN_CONNECTION_RESPONSE)
return template.render(vpn_connections=vpn_connections)
CREATE_VPN_CONNECTION_RESPONSE = """
<CreateVpnConnectionResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<vpnConnection>
<vpnConnectionId>{{ vpn_connection.id }}</vpnConnectionId>
<state>pending</state>
<customerGatewayConfiguration>
<vpn_connection id="{{ vpn_connection.id }}">
<customer_gateway_id>{{ vpn_connection.customer_gateway_id }}</customer_gateway_id>
<vpn_gateway_id>{{ vpn_connection.vpn_gateway_id }}</vpn_gateway_id>
<vpn_connection_type>ipsec.1</vpn_connection_type>
<ipsec_tunnel>
<customer_gateway>
<tunnel_outside_address>
<ip_address>12.1.2.3</ip_address>
</tunnel_outside_address>
<tunnel_inside_address>
<ip_address>169.254.44.42</ip_address>
<network_mask>255.255.255.252</network_mask>
<network_cidr>30</network_cidr>
</tunnel_inside_address>
<bgp>
<asn>65000</asn>
<hold_time>30</hold_time>
</bgp>
</customer_gateway>
<vpn_gateway>
<tunnel_outside_address>
<ip_address>52.2.144.13</ip_address>
</tunnel_outside_address>
<tunnel_inside_address>
<ip_address>169.254.44.41</ip_address>
<network_mask>255.255.255.252</network_mask>
<network_cidr>30</network_cidr>
</tunnel_inside_address>
<bgp>
<asn>7224</asn>
<hold_time>30</hold_time>
</bgp>
</vpn_gateway>
<ike>
<authentication_protocol>sha1</authentication_protocol>
<encryption_protocol>aes-128-cbc</encryption_protocol>
<lifetime>28800</lifetime>
<perfect_forward_secrecy>group2</perfect_forward_secrecy>
<mode>main</mode>
<pre_shared_key>Iw2IAN9XUsQeYUrkMGP3kP59ugFDkfHg</pre_shared_key>
</ike>
<ipsec>
<protocol>esp</protocol>
<authentication_protocol>hmac-sha1-96</authentication_protocol>
<encryption_protocol>aes-128-cbc</encryption_protocol>
<lifetime>3600</lifetime>
<perfect_forward_secrecy>group2</perfect_forward_secrecy>
<mode>tunnel</mode>
<clear_df_bit>true</clear_df_bit>
<fragmentation_before_encryption>true</fragmentation_before_encryption>
<tcp_mss_adjustment>1387</tcp_mss_adjustment>
<dead_peer_detection>
<interval>10</interval>
<retries>3</retries>
</dead_peer_detection>
</ipsec>
</ipsec_tunnel>
<ipsec_tunnel>
<customer_gateway>
<tunnel_outside_address>
<ip_address>12.1.2.3</ip_address>
</tunnel_outside_address>
<tunnel_inside_address>
<ip_address>169.254.44.42</ip_address>
<network_mask>255.255.255.252</network_mask>
<network_cidr>30</network_cidr>
</tunnel_inside_address>
<bgp>
<asn>65000</asn>
<hold_time>30</hold_time>
</bgp>
</customer_gateway>
<vpn_gateway>
<tunnel_outside_address>
<ip_address>52.2.144.13</ip_address>
</tunnel_outside_address>
<tunnel_inside_address>
<ip_address>169.254.44.41</ip_address>
<network_mask>255.255.255.252</network_mask>
<network_cidr>30</network_cidr>
</tunnel_inside_address>
<bgp>
<asn>7224</asn>
<hold_time>30</hold_time>
</bgp>
</vpn_gateway>
<ike>
<authentication_protocol>sha1</authentication_protocol>
<encryption_protocol>aes-128-cbc</encryption_protocol>
<lifetime>28800</lifetime>
<perfect_forward_secrecy>group2</perfect_forward_secrecy>
<mode>main</mode>
<pre_shared_key>Iw2IAN9XUsQeYUrkMGP3kP59ugFDkfHg</pre_shared_key>
</ike>
<ipsec>
<protocol>esp</protocol>
<authentication_protocol>hmac-sha1-96</authentication_protocol>
<encryption_protocol>aes-128-cbc</encryption_protocol>
<lifetime>3600</lifetime>
<perfect_forward_secrecy>group2</perfect_forward_secrecy>
<mode>tunnel</mode>
<clear_df_bit>true</clear_df_bit>
<fragmentation_before_encryption>true</fragmentation_before_encryption>
<tcp_mss_adjustment>1387</tcp_mss_adjustment>
<dead_peer_detection>
<interval>10</interval>
<retries>3</retries>
</dead_peer_detection>
</ipsec>
</ipsec_tunnel>
</vpn_connection>
</customerGatewayConfiguration>
<type>ipsec.1</type>
<customerGatewayId>{{ vpn_connection.customer_gateway_id }}</customerGatewayId>
<vpnGatewayId>{{ vpn_connection.vpn_gateway_id }}</vpnGatewayId>
<tagSet>
{% for tag in vpn_connection.get_tags() %}
<item>
<resourceId>{{ tag.resource_id }}</resourceId>
<resourceType>{{ tag.resource_type }}</resourceType>
<key>{{ tag.key }}</key>
<value>{{ tag.value }}</value>
</item>
{% endfor %}
</tagSet>
</vpnConnection>
</CreateVpnConnectionResponse>"""
CREATE_VPN_CONNECTION_ROUTE_RESPONSE = """
<CreateVpnConnectionRouteResponse xmlns="http://ec2.amazonaws.com/doc/2013-10- 15/">
<requestId>4f35a1b2-c2c3-4093-b51f-abb9d7311990</requestId>
<return>true</return>
</CreateVpnConnectionRouteResponse>"""
DELETE_VPN_CONNECTION_RESPONSE = """
<DeleteVpnConnectionResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<return>true</return>
</DeleteVpnConnectionResponse>"""
DELETE_VPN_CONNECTION_ROUTE_RESPONSE = """
<DeleteVpnConnectionRouteResponse xmlns="http://ec2.amazonaws.com/doc/2013-10- 15/">
<requestId>4f35a1b2-c2c3-4093-b51f-abb9d7311990</requestId>
<return>true</return>
</DeleteVpnConnectionRouteResponse>"""
DESCRIBE_VPN_CONNECTION_RESPONSE = """
<DescribeVpnConnectionsResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<vpnConnectionSet>
{% for vpn_connection in vpn_connections %}
<item>
<vpnConnectionId>{{ vpn_connection.id }}</vpnConnectionId>
<state>available</state>
<customerGatewayConfiguration>
<vpn_connection id="{{ vpn_connection.id }}">
<customer_gateway_id>{{ vpn_connection.customer_gateway_id }}</customer_gateway_id>
<vpn_gateway_id>{{ vpn_connection.vpn_gateway_id }}</vpn_gateway_id>
<vpn_connection_type>ipsec.1</vpn_connection_type>
<ipsec_tunnel>
<customer_gateway>
<tunnel_outside_address>
<ip_address>12.1.2.3</ip_address>
</tunnel_outside_address>
<tunnel_inside_address>
<ip_address>169.254.44.42</ip_address>
<network_mask>255.255.255.252</network_mask>
<network_cidr>30</network_cidr>
</tunnel_inside_address>
<bgp>
<asn>65000</asn>
<hold_time>30</hold_time>
</bgp>
</customer_gateway>
<vpn_gateway>
<tunnel_outside_address>
<ip_address>52.2.144.13</ip_address>
</tunnel_outside_address>
<tunnel_inside_address>
<ip_address>169.254.44.41</ip_address>
<network_mask>255.255.255.252</network_mask>
<network_cidr>30</network_cidr>
</tunnel_inside_address>
<bgp>
<asn>7224</asn>
<hold_time>30</hold_time>
</bgp>
</vpn_gateway>
<ike>
<authentication_protocol>sha1</authentication_protocol>
<encryption_protocol>aes-128-cbc</encryption_protocol>
<lifetime>28800</lifetime>
<perfect_forward_secrecy>group2</perfect_forward_secrecy>
<mode>main</mode>
<pre_shared_key>Iw2IAN9XUsQeYUrkMGP3kP59ugFDkfHg</pre_shared_key>
</ike>
<ipsec>
<protocol>esp</protocol>
<authentication_protocol>hmac-sha1-96</authentication_protocol>
<encryption_protocol>aes-128-cbc</encryption_protocol>
<lifetime>3600</lifetime>
<perfect_forward_secrecy>group2</perfect_forward_secrecy>
<mode>tunnel</mode>
<clear_df_bit>true</clear_df_bit>
<fragmentation_before_encryption>true</fragmentation_before_encryption>
<tcp_mss_adjustment>1387</tcp_mss_adjustment>
<dead_peer_detection>
<interval>10</interval>
<retries>3</retries>
</dead_peer_detection>
</ipsec>
</ipsec_tunnel>
<ipsec_tunnel>
<customer_gateway>
<tunnel_outside_address>
<ip_address>12.1.2.3</ip_address>
</tunnel_outside_address>
<tunnel_inside_address>
<ip_address>169.254.44.42</ip_address>
<network_mask>255.255.255.252</network_mask>
<network_cidr>30</network_cidr>
</tunnel_inside_address>
<bgp>
<asn>65000</asn>
<hold_time>30</hold_time>
</bgp>
</customer_gateway>
<vpn_gateway>
<tunnel_outside_address>
<ip_address>52.2.144.13</ip_address>
</tunnel_outside_address>
<tunnel_inside_address>
<ip_address>169.254.44.41</ip_address>
<network_mask>255.255.255.252</network_mask>
<network_cidr>30</network_cidr>
</tunnel_inside_address>
<bgp>
<asn>7224</asn>
<hold_time>30</hold_time>
</bgp>
</vpn_gateway>
<ike>
<authentication_protocol>sha1</authentication_protocol>
<encryption_protocol>aes-128-cbc</encryption_protocol>
<lifetime>28800</lifetime>
<perfect_forward_secrecy>group2</perfect_forward_secrecy>
<mode>main</mode>
<pre_shared_key>Iw2IAN9XUsQeYUrkMGP3kP59ugFDkfHg</pre_shared_key>
</ike>
<ipsec>
<protocol>esp</protocol>
<authentication_protocol>hmac-sha1-96</authentication_protocol>
<encryption_protocol>aes-128-cbc</encryption_protocol>
<lifetime>3600</lifetime>
<perfect_forward_secrecy>group2</perfect_forward_secrecy>
<mode>tunnel</mode>
<clear_df_bit>true</clear_df_bit>
<fragmentation_before_encryption>true</fragmentation_before_encryption>
<tcp_mss_adjustment>1387</tcp_mss_adjustment>
<dead_peer_detection>
<interval>10</interval>
<retries>3</retries>
</dead_peer_detection>
</ipsec>
</ipsec_tunnel>
</vpn_connection>
</customerGatewayConfiguration>
<type>ipsec.1</type>
<customerGatewayId>{{ vpn_connection.customer_gateway_id }}</customerGatewayId>
<vpnGatewayId>{{ vpn_connection.vpn_gateway_id }}</vpnGatewayId>
<tagSet>
{% for tag in vpn_connection.get_tags() %}
<item>
<resourceId>{{ tag.resource_id }}</resourceId>
<resourceType>{{ tag.resource_type }}</resourceType>
<key>{{ tag.key }}</key>
<value>{{ tag.value }}</value>
</item>
{% endfor %}
</tagSet>
</item>
{% endfor %}
</vpnConnectionSet>
</DescribeVpnConnectionsResponse>"""
|
gojira/tensorflow
|
refs/heads/master
|
tensorflow/contrib/signal/python/ops/util_ops.py
|
71
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility ops shared across tf.contrib.signal."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import fractions
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
def gcd(a, b, name=None):
"""Returns the greatest common divisor via Euclid's algorithm.
Args:
a: The dividend. A scalar integer `Tensor`.
b: The divisor. A scalar integer `Tensor`.
name: An optional name for the operation.
Returns:
A scalar `Tensor` representing the greatest common divisor between `a` and
`b`.
Raises:
ValueError: If `a` or `b` are not scalar integers.
"""
with ops.name_scope(name, 'gcd', [a, b]):
a = ops.convert_to_tensor(a)
b = ops.convert_to_tensor(b)
a.shape.assert_has_rank(0)
b.shape.assert_has_rank(0)
if not a.dtype.is_integer:
raise ValueError('a must be an integer type. Got: %s' % a.dtype)
if not b.dtype.is_integer:
raise ValueError('b must be an integer type. Got: %s' % b.dtype)
# TPU requires static shape inference. GCD is used for subframe size
# computation, so we should prefer static computation where possible.
const_a = tensor_util.constant_value(a)
const_b = tensor_util.constant_value(b)
if const_a is not None and const_b is not None:
return ops.convert_to_tensor(fractions.gcd(const_a, const_b))
cond = lambda _, b: math_ops.greater(b, array_ops.zeros_like(b))
body = lambda a, b: [b, math_ops.mod(a, b)]
a, b = control_flow_ops.while_loop(cond, body, [a, b], back_prop=False)
return a
|
xsynergy510x/android_external_chromium_org
|
refs/heads/cm-12.1
|
tools/lsan/PRESUBMIT.py
|
32
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into gcl.
"""
import re
def CheckChange(input_api, output_api):
errors = []
for f in input_api.AffectedFiles():
if not f.LocalPath().endswith('suppressions.txt'):
continue
for line_num, line in enumerate(f.NewContents()):
line = line.strip()
if line.startswith('#') or not line:
continue
if not line.startswith('leak:'):
errors.append('"%s" should be "leak:..." in %s line %d' %
(line, f.LocalPath(), line_num))
if errors:
return [output_api.PresubmitError('\n'.join(errors))]
return []
def CheckChangeOnUpload(input_api, output_api):
return CheckChange(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return CheckChange(input_api, output_api)
def GetPreferredTryMasters(project, change):
return {
'tryserver.chromium.linux': {
'linux_asan': set(['compile']),
},
'tryserver.chromium.mac': {
'mac_asan': set(['compile']),
}
}
|
ESOedX/edx-platform
|
refs/heads/master
|
common/test/acceptance/pages/lms/admin.py
|
2
|
"""
Pages object for the Django's /admin/ views.
"""
from __future__ import absolute_import
from bok_choy.page_object import PageObject
from common.test.acceptance.pages.lms import BASE_URL
class ChangeUserAdminPage(PageObject):
"""
Change user page in Django's admin.
"""
def __init__(self, browser, user_pk):
super(ChangeUserAdminPage, self).__init__(browser)
self.user_pk = user_pk
@property
def url(self):
"""
Returns the page URL for the page based on self.user_pk.
"""
return u'{base}/admin/auth/user/{user_pk}/'.format(
base=BASE_URL,
user_pk=self.user_pk,
)
@property
def username(self):
"""
Reads the read-only username.
"""
return self.q(css='.field-username .readonly').text[0]
@property
def first_name_element(self):
"""
Selects the first name element.
"""
return self.q(css='[name="first_name"]')
@property
def first_name(self):
"""
Reads the first name value from the input field.
"""
return self.first_name_element.attrs('value')[0]
@property
def submit_element(self):
"""
Gets the "Save" submit element.
Note that there are multiple submit elements in the change view.
"""
return self.q(css='input.default[type="submit"]')
def submit(self):
"""
Submits the form.
"""
self.submit_element.click()
def change_first_name(self, first_name):
"""
Changes the first name and submits the form.
Args:
first_name: The first name as unicode.
"""
self.first_name_element.fill(first_name)
self.submit()
def is_browser_on_page(self):
"""
Returns True if the browser is currently on the right page.
"""
return self.q(css='#user_form').present
|
boland1992/seissuite_iran
|
refs/heads/master
|
build/lib/seissuite/ant/pscrosscorr (copy).py
|
2
|
#!/usr/bin/env python
"""
Module that contains classes holding cross-correlations and related
processing, such as frequency-time analysis (FTAN) to measure
dispersion curves.
"""
from seissuite.ant import pserrors, psstation, psutils, pstomo
import obspy.signal
import obspy.xseed
import obspy.signal.cross_correlation
import obspy.signal.filter
from obspy.core import AttribDict, read, UTCDateTime, Stream
from obspy.signal.invsim import cosTaper
import numpy as np
from numpy.fft import rfft, irfft, fft, ifft, fftfreq
from scipy import integrate
from scipy.interpolate import RectBivariateSpline, interp1d
from scipy.optimize import minimize
import itertools as it
import os
import shutil
import glob
import pickle
import copy
from collections import OrderedDict
import datetime as dt
from calendar import monthrange
import scipy
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib import gridspec
plt.ioff() # turning off interactive mode
# ====================================================
# parsing configuration file to import some parameters
# ====================================================
from seissuite.ant.psconfig import (CROSSCORR_DIR, FTAN_DIR, PERIOD_BANDS,
CROSSCORR_TMAX, PERIOD_RESAMPLE,
CROSSCORR_SKIPLOCS, MINFILL, FREQMIN,
FREQMAX, CORNERS, ZEROPHASE,ONEBIT_NORM,
FREQMIN_EARTHQUAKE, FREQMAX_EARTHQUAKE,
WINDOW_TIME, WINDOW_FREQ,
SIGNAL_WINDOW_VMIN, SIGNAL_WINDOW_VMAX,
SIGNAL2NOISE_TRAIL, NOISE_WINDOW_SIZE,
RAWFTAN_PERIODS, CLEANFTAN_PERIODS,
FTAN_VELOCITIES, FTAN_ALPHA,
STRENGTH_SMOOTHING, USE_INSTANTANEOUS_FREQ,
MAX_RELDIFF_INST_NOMINAL_PERIOD,
MIN_INST_PERIOD, HALFWINDOW_MEDIAN_PERIOD,
MAX_RELDIFF_INST_MEDIAN_PERIOD, BBOX_LARGE,
BBOX_SMALL)
# ========================
# Constants and parameters
# ========================
EPS = 1.0e-5
ONESEC = dt.timedelta(seconds=1)
class MonthYear:
"""
Hashable class holding a month of a year
"""
def __init__(self, *args, **kwargs):
"""
Usage: MonthYear(3, 2012) or MonthYear(month=3, year=2012) or
MonthYear(date[time](2012, 3, 12))
"""
if len(args) == 2 and not kwargs:
month, year = args
elif not args and set(kwargs.keys()) == {'month', 'year'}:
month, year = kwargs['month'], kwargs['year']
elif len(args) == 1 and not kwargs:
month, year = args[0].month, args[0].year
else:
s = ("Usage: MonthYear(3, 2012) or MonthYear(month=3, year=2012) or "
"MonthYear(date[time](2012, 3, 12))")
raise Exception(s)
self.m = month
self.y = year
def __str__(self):
"""
E.g., 03-2012
"""
return '{:02d}-{}'.format(self.m, self.y)
def __repr__(self):
"""
E.g., <03-2012>
"""
return '<{}>'.format(str(self))
def __eq__(self, other):
"""
Comparison with other, which can be a MonthYear object,
or a sequence of int (month, year)
@type other: L{MonthYear} or (int, int)
"""
try:
return self.m == other.m and self.y == other.y
except:
try:
return (self.m, self.y) == tuple(other)
except:
return False
def __hash__(self):
return hash(self.m) ^ hash(self.y)
class MonthCrossCorrelation:
"""
Class holding cross-correlation over a single month
"""
def __init__(self, month, ndata):
"""
@type month: L{MonthYear}
@type ndata: int
"""
# attaching month and year
self.month = month
# initializing stats
self.nday = 0
# data array of month cross-correlation
self.dataarray = np.zeros(ndata)
def monthfill(self):
"""
Returns the relative month fill (between 0-1)
"""
return float(self.nday) / monthrange(year=self.month.y, month=self.month.m)[1]
def __repr__(self):
s = '<cross-correlation over single month {}: {} days>'
return s.format(self.month, self.nday)
class CrossCorrelation:
"""
Cross-correlation class, which contains:
- a pair of stations
- a pair of sets of locations (from trace.location)
- a pair of sets of ids (from trace.id)
- start day, end day and nb of days of cross-correlation
- distance between stations
- a time array and a (cross-correlation) data array
"""
def __init__(self, station1, station2, xcorr_dt=PERIOD_RESAMPLE,
xcorr_tmax=CROSSCORR_TMAX):
"""
@type station1: L{pysismo.psstation.Station}
@type station2: L{pysismo.psstation.Station}
@type xcorr_dt: float
@type xcorr_tmax: float
"""
# pair of stations
self.station1 = station1
self.station2 = station2
# locations and trace ids of stations
self.locs1 = set()
self.locs2 = set()
self.ids1 = set()
self.ids2 = set()
# initializing stats
self.startday = None
self.endday = None
self.nday = 0
# initializing time and data arrays of cross-correlation
nmax = int(xcorr_tmax / xcorr_dt)
self.timearray = np.arange(-nmax * xcorr_dt, (nmax + 1)*xcorr_dt, xcorr_dt)
self.dataarray = np.zeros(2 * nmax + 1)
self.phasearray = self.dataarray
self.pws = self.phasearray
# has cross-corr been symmetrized? whitened?
self.symmetrized = False
self.whitened = False
# initializing list of cross-correlations over a single month
self.monthxcs = []
def __repr__(self):
s = '<cross-correlation between stations {0}-{1}: avg {2} stacks>'
return s.format(self.station1.name, self.station2.name, self.nday)
def __str__(self):
"""
E.g., 'Cross-correlation between stations SPB['10'] - ITAB['00','10']:
365 days from 2002-01-01 to 2002-12-01'
"""
locs1 = ','.join(sorted("'{}'".format(loc) for loc in self.locs1))
locs2 = ','.join(sorted("'{}'".format(loc) for loc in self.locs2))
s = ('Cross-correlation between stations '
'{sta1}[{locs1}]-{sta2}[{locs2}]: '
'{nday} stacks from {start} to {end}')
return s.format(sta1=self.station1.name, locs1=locs1,
sta2=self.station2.name, locs2=locs2, nday=self.nday,
start=self.startday, end=self.endday)
def dist(self):
"""
Geodesic distance (in km) between stations, using the
WGS-84 ellipsoidal model of the Earth
"""
return self.station1.dist(self.station2)
def copy(self):
"""
Makes a copy of self
"""
# shallow copy
result = copy.copy(self)
# copy of month cross-correlations
result.monthxcs = [copy.copy(mxc) for mxc in self.monthxcs]
return result
def phase_stack(self, tr1, tr2, xcorr=None):
"""
This function is used when the input parameter stack_type=='phase'
and applies the technique of Schimmel et al. (1997) and stacks
cross-correlation waveforms based on their instaneous phases. The
technique is known as phase stacking (PS).
"""
# cross-correlation
if xcorr is None:
# calculating cross-corr using obspy, if not already provided
xcorr = obspy.signal.cross_correlation.xcorr(
tr1, tr2, shift_len=self._get_xcorr_nmax(), full_xcorr=True)[2]
xcorr = scipy.signal.correlate(tr1, tr2, mode='same')
# verifying that we don't have NaN
if np.any(np.isnan(xcorr)):
s = u"Got NaN in cross-correlation between traces:\n{tr1}\n{tr2}"
raise pserrors.NaNError(s.format(tr1=tr1, tr2=tr2))
xcorr = xcorr / np.max(xcorr)
#xcorr = xcorr - np.mean(xcorr)
inst_phase = np.arctan2(xcorr, range(0,len(xcorr)))
# phase-stacking cross-corr
self.phasearray += np.real(np.exp(1j*inst_phase))
# reduce stack about zero
#self.phasearray = self.phasearray - np.mean(self.phasearray)
# normalise about 0, max amplitude 1
def phase_weighted_stack(self, power_v=2):
"""
This function is applies the technique of Schimmel et al. (1997)
and stacks cross-correlation waveforms based on their instaneous
phases. The technique is known as phase-weighted stacking (PWS).
This uses a combination of the phase and linear stack and the number
of stacks performed. power_v variable is described in Schimmel et al.
and is almost always set at 2 for successful stacking.
"""
# this function only produces the most recent PWS, NOT stacking
# each iteration one atop the other! note also that nday is the
# number of iterations performed, NOT the number of days passed.
N = self.nday
linear_comp = (self.dataarray - np.mean(self.dataarray))
phase_comp = (np.abs(self.phasearray) - \
np.mean(np.abs(self.phasearray)))
self.pws = linear_comp * (phase_comp ** power_v)
self.pws = self.pws / np.max(self.pws)
def add(self, tr1, tr2, xcorr=None):
"""
Stacks cross-correlation between 2 traces
@type tr1: L{obspy.core.trace.Trace}
@type tr2: L{obspy.core.trace.Trace}
"""
# verifying sampling rates
#try:
# assert 1.0 / tr1.stats.sampling_rate == self._get_xcorr_dt()
# assert 1.0 / tr2.stats.sampling_rate == self._get_xcorr_dt()
#except AssertionError:
# s = 'Sampling rates of traces are not equal:\n{tr1}\n{tr2}'
# raise Exception(s.format(tr1=tr1, tr2=tr2))
# cross-correlation
if xcorr is None:
# calculating cross-corr using obspy, if not already provided
xcorr = obspy.signal.cross_correlation.xcorr(
tr1, tr2, shift_len=self._get_xcorr_nmax(), full_xcorr=True)[2]
# verifying that we don't have NaN
if np.any(np.isnan(xcorr)):
s = u"Got NaN in cross-correlation between traces:\n{tr1}\n{tr2}"
raise pserrors.NaNError(s.format(tr1=tr1, tr2=tr2))
#/ np.max(xcorr)
self.dataarray += xcorr #/ (self.nday + 1)
# reduce stack about zero
#self.dataarray = self.dataarray - np.mean(self.dataarray)
# normalise about 0, max amplitude 1
#self.dataarray = self.dataarray / np.max(self.dataarray)
# updating stats: 1st day, last day, nb of days of cross-corr
startday = (tr1.stats.starttime + ONESEC)
self.startday = min(self.startday, startday) if self.startday else startday
endday = (tr1.stats.endtime - ONESEC)
self.endday = max(self.endday, endday) if self.endday else endday
self.nday += 1
# stacking cross-corr over single month
month = MonthYear((tr1.stats.starttime + ONESEC).date)
try:
monthxc = next(monthxc for monthxc in self.monthxcs
if monthxc.month == month)
except StopIteration:
# appending new month xc
monthxc = MonthCrossCorrelation(month=month, ndata=len(self.timearray))
self.monthxcs.append(monthxc)
monthxc.dataarray += xcorr
monthxc.nday += 1
# updating (adding) locs and ids
self.locs1.add(tr1.stats.location)
self.locs2.add(tr2.stats.location)
self.ids1.add(tr1.id)
self.ids2.add(tr2.id)
def symmetrize(self, inplace=False):
"""
Symmetric component of cross-correlation (including
the list of cross-corr over a single month).
Returns self if already symmetrized or inPlace=True
@rtype: CrossCorrelation
"""
if self.symmetrized:
# already symmetrized
return self
# symmetrizing on self or copy of self
xcout = self if inplace else self.copy()
n = len(xcout.timearray)
mid = (n - 1) / 2
# verifying that time array is symmetric wrt 0
if n % 2 != 1:
raise Exception('Cross-correlation cannot be symmetrized')
if not np.alltrue(xcout.timearray[mid:] + xcout.timearray[mid::-1] < EPS):
raise Exception('Cross-correlation cannot be symmetrized')
# calculating symmetric component of cross-correlation
xcout.timearray = xcout.timearray[mid:]
for obj in [xcout] + (xcout.monthxcs if hasattr(xcout, 'monthxcs') else []):
a = obj.dataarray
obj.dataarray = (a[mid:] + a[mid::-1]) / 2.0
xcout.symmetrized = True
return xcout
def whiten(self, inplace=False, window_freq=0.004,
bandpass_tmin=7.0, bandpass_tmax=150):
"""
Spectral whitening of cross-correlation (including
the list of cross-corr over a single month).
@rtype: CrossCorrelation
"""
if hasattr(self, 'whitened') and self.whitened:
# already whitened
return self
# whitening on self or copy of self
xcout = self if inplace else self.copy()
# frequency step
npts = len(xcout.timearray)
sampling_rate = 1.0 / xcout._get_xcorr_dt()
deltaf = sampling_rate / npts
# loop over cross-corr and one-month stacks
for obj in [xcout] + (xcout.monthxcs if hasattr(xcout, 'monthxcs') else []):
a = obj.dataarray
# Fourier transform
ffta = rfft(a)
# smoothing amplitude spectrum
halfwindow = int(round(window_freq / deltaf / 2.0))
weight = psutils.moving_avg(abs(ffta), halfwindow=halfwindow)
a[:] = irfft(ffta / weight, n=npts)
# bandpass to avoid low/high freq noise
obj.dataarray = psutils.bandpass_butterworth(data=a,
dt=xcout._get_xcorr_dt(),
periodmin=bandpass_tmin,
periodmax=bandpass_tmax)
xcout.whitened = True
return xcout
def signal_noise_windows(self, vmin, vmax, signal2noise_trail, noise_window_size):
"""
Returns the signal window and the noise window.
The signal window is defined by *vmin* and *vmax*:
dist/*vmax* < t < dist/*vmin*
The noise window starts *signal2noise_trail* after the
signal window and has a size of *noise_window_size*:
t > dist/*vmin* + *signal2noise_trail*
t < dist/*vmin* + *signal2noise_trail* + *noise_window_size*
If the noise window hits the time limit of the cross-correlation,
we try to extend it to the left until it hits the signal
window.
@rtype: (float, float), (float, float)
"""
# signal window
tmin_signal = self.dist() / vmax
tmax_signal = self.dist() / vmin
# noise window
tmin_noise = tmax_signal + signal2noise_trail
tmax_noise = tmin_noise + noise_window_size
if tmax_noise > self.timearray.max():
# the noise window hits the rightmost limit:
# let's shift it to the left without crossing
# the signal window
delta = min(tmax_noise - self.timearray.max(), tmin_noise - tmax_signal)
tmin_noise -= delta
tmax_noise -= delta
return (tmin_signal, tmax_signal), (tmin_noise, tmax_noise)
def SNR(self, periodbands=None,
centerperiods_and_alpha=None,
whiten=False, months=None,
vmin=SIGNAL_WINDOW_VMIN,
vmax=SIGNAL_WINDOW_VMAX,
signal2noise_trail=SIGNAL2NOISE_TRAIL,
noise_window_size=NOISE_WINDOW_SIZE):
"""
[spectral] signal-to-noise ratio, calculated as the peak
of the absolute amplitude in the signal window divided by
the standard deviation in the noise window.
If period bands are given (in *periodbands*, as a list of
(periodmin, periodmax)), then for each band the SNR is
calculated after band-passing the cross-correlation using
a butterworth filter.
If center periods and alpha are given (in *centerperiods_and_alpha*,
as a list of (center period, alpha)), then for each center
period and alpha the SNR is calculated after band-passing
the cross-correlation using a Gaussian filter
The signal window is defined by *vmin* and *vmax*:
dist/*vmax* < t < dist/*vmin*
The noise window starts *signal2noise_trail* after the
signal window and has a size of *noise_window_size*:
t > dist/*vmin* + *signal2noise_trail*
t < dist/*vmin* + *signal2noise_trail* + *noise_window_size*
If the noise window hits the time limit of the cross-correlation,
we try to extend it to the left until it hits the signal
window.
@type periodbands: (list of (float, float))
@type whiten: bool
@type vmin: float
@type vmax: float
@type signal2noise_trail: float
@type noise_window_size: float
@type months: list of (L{MonthYear} or (int, int))
@rtype: L{numpy.ndarray}
"""
# symmetric part of cross-corr
xcout = self.symmetrize(inplace=False)
# spectral whitening
if whiten:
xcout = xcout.whiten(inplace=False)
# cross-corr of desired months
xcdata = xcout._get_monthyears_xcdataarray(months=months)
# filter type and associated arguments
if periodbands:
filtertype = 'Butterworth'
kwargslist = [{'periodmin': band[0], 'periodmax': band[1]}
for band in periodbands]
elif centerperiods_and_alpha:
filtertype = 'Gaussian'
kwargslist = [{'period': period, 'alpha': alpha}
for period, alpha in centerperiods_and_alpha]
else:
filtertype = None
kwargslist = [{}]
SNR = []
for filterkwargs in kwargslist:
if not filtertype:
dataarray = xcdata
else:
# bandpass filtering data before calculating SNR
dataarray = psutils.bandpass(data=xcdata,
dt=xcout._get_xcorr_dt(),
filtertype=filtertype,
**filterkwargs)
# signal and noise windows
tsignal, tnoise = xcout.signal_noise_windows(
vmin, vmax, signal2noise_trail, noise_window_size)
signal_window = (xcout.timearray >= tsignal[0]) & \
(xcout.timearray <= tsignal[1])
noise_window = (xcout.timearray >= tnoise[0]) & \
(xcout.timearray <= tnoise[1])
peak = np.abs(dataarray[signal_window]).max()
noise = dataarray[noise_window].std()
# appending SNR
SNR.append(peak / noise)
# returning 1d array if spectral SNR, 0d array if normal SNR
return np.array(SNR) if len(SNR) > 1 else np.array(SNR[0])
def plot(self, whiten=False, sym=False, vmin=SIGNAL_WINDOW_VMIN,
vmax=SIGNAL_WINDOW_VMAX, months=None):
"""
Plots cross-correlation and its spectrum
"""
xcout = self.symmetrize(inplace=False) if sym else self
if whiten:
xcout = xcout.whiten(inplace=False)
# cross-corr of desired months
xcdata = xcout._get_monthyears_xcdataarray(months=months)
# cross-correlation plot ===
plt.figure()
plt.subplot(2, 1, 1)
plt.plot(xcout.timearray, xcdata)
plt.xlabel('Time (s)')
plt.ylabel('Cross-correlation')
plt.grid()
# vmin, vmax
vkwargs = {
'fontsize': 8,
'horizontalalignment': 'center',
'bbox': dict(color='k', facecolor='white')}
if vmin:
ylim = plt.ylim()
plt.plot(2 * [xcout.dist() / vmin], ylim, color='grey')
xy = (xcout.dist() / vmin, plt.ylim()[0])
plt.annotate('{0} km/s'.format(vmin), xy=xy, xytext=xy, **vkwargs)
plt.ylim(ylim)
if vmax:
ylim = plt.ylim()
plt.plot(2 * [xcout.dist() / vmax], ylim, color='grey')
xy = (xcout.dist() / vmax, plt.ylim()[0])
plt.annotate('{0} km/s'.format(vmax), xy=xy, xytext=xy, **vkwargs)
plt.ylim(ylim)
# title
plt.title(xcout._plottitle(months=months))
# spectrum plot ===
plt.subplot(2, 1, 2)
plt.xlabel('Frequency (Hz)')
plt.ylabel('Amplitude')
plt.grid()
# frequency and amplitude arrays
npts = len(xcdata)
nfreq = npts / 2 + 1 if npts % 2 == 0 else (npts + 1) / 2
sampling_rate = 1.0 / xcout._get_xcorr_dt()
freqarray = np.arange(nfreq) * sampling_rate / npts
amplarray = np.abs(rfft(xcdata))
plt.plot(freqarray, amplarray)
plt.xlim((0.0, 0.2))
plt.show()
def plot_by_period_band(self, axlist=None, bands=PERIOD_BANDS,
plot_title=True, whiten=False, tmax=None,
vmin=SIGNAL_WINDOW_VMIN,
vmax=SIGNAL_WINDOW_VMAX,
signal2noise_trail=SIGNAL2NOISE_TRAIL,
noise_window_size=NOISE_WINDOW_SIZE,
months=None, outfile=None):
"""
Plots cross-correlation for various bands of periods
The signal window:
vmax / dist < t < vmin / dist,
and the noise window:
t > vmin / dist + signal2noise_trail
t < vmin / dist + signal2noise_trail + noise_window_size,
serve to estimate the SNRs and are highlighted on the plot.
If *tmax* is not given, default is to show times up to the noise
window (plus 5 %). The y-scale is adapted to fit the min and max
cross-correlation AFTER the beginning of the signal window.
@type axlist: list of L{matplotlib.axes.AxesSubplot}
"""
# one plot per band + plot of original xcorr
nplot = len(bands) + 1
# limits of time axis
if not tmax:
# default is to show time up to the noise window (plus 5 %)
tmax = self.dist() / vmin + signal2noise_trail + noise_window_size
tmax = min(1.05 * tmax, self.timearray.max())
xlim = (0, tmax)
# creating figure if not given as input
fig = None
if not axlist:
fig = plt.figure()
axlist = [fig.add_subplot(nplot, 1, i) for i in range(1, nplot + 1)]
for ax in axlist:
# smaller y tick label
ax.tick_params(axis='y', labelsize=9)
axlist[0].get_figure().subplots_adjust(hspace=0)
# symmetrization
xcout = self.symmetrize(inplace=False)
# spectral whitening
if whiten:
xcout = xcout.whiten(inplace=False)
# cross-corr of desired months
xcdata = xcout._get_monthyears_xcdataarray(months=months)
# limits of y-axis = min/max of the cross-correlation
# AFTER the beginning of the signal window
mask = (xcout.timearray >= min(self.dist() / vmax, xlim[1])) & \
(xcout.timearray <= xlim[1])
ylim = (xcdata[mask].min(), xcdata[mask].max())
# signal and noise windows
tsignal, tnoise = xcout.signal_noise_windows(
vmin, vmax, signal2noise_trail, noise_window_size)
# plotting original cross-correlation
axlist[0].plot(xcout.timearray, xcdata)
# title
if plot_title:
title = xcout._plottitle(prefix='Cross-corr. ', months=months)
axlist[0].set_title(title)
# signal window
for t, v, align in zip(tsignal, [vmax, vmin], ['right', 'left']):
axlist[0].plot(2 * [t], ylim, color='k', lw=1.5)
xy = (t, ylim[0] + 0.1 * (ylim[1] - ylim[0]))
axlist[0].annotate(s='{} km/s'.format(v), xy=xy, xytext=xy,
horizontalalignment=align, fontsize=8,
bbox={'color': 'k', 'facecolor': 'white'})
# noise window
axlist[0].fill_between(x=tnoise, y1=[ylim[1], ylim[1]],
y2=[ylim[0], ylim[0]], color='k', alpha=0.2)
# inserting text, e.g., "Original data, SNR = 10.1"
SNR = xcout.SNR(vmin=vmin, vmax=vmax,
signal2noise_trail=signal2noise_trail,
noise_window_size=noise_window_size)
axlist[0].text(x=xlim[1],
y=ylim[0] + 0.85 * (ylim[1] - ylim[0]),
s="Original data, SNR = {:.1f}".format(float(SNR)),
fontsize=9,
horizontalalignment='right',
bbox={'color': 'k', 'facecolor': 'white'})
# formatting axes
axlist[0].set_xlim(xlim)
axlist[0].set_ylim(ylim)
axlist[0].grid(True)
# formatting labels
axlist[0].set_xticklabels([])
axlist[0].get_figure().canvas.draw()
labels = [l.get_text() for l in axlist[0].get_yticklabels()]
labels[0] = labels[-1] = ''
labels[2:-2] = [''] * (len(labels) - 4)
axlist[0].set_yticklabels(labels)
# plotting band-filtered cross-correlation
for ax, (tmin, tmax) in zip(axlist[1:], bands):
lastplot = ax is axlist[-1]
dataarray = psutils.bandpass_butterworth(data=xcdata,
dt=xcout._get_xcorr_dt(),
periodmin=tmin,
periodmax=tmax)
# limits of y-axis = min/max of the cross-correlation
# AFTER the beginning of the signal window
mask = (xcout.timearray >= min(self.dist() / vmax, xlim[1])) & \
(xcout.timearray <= xlim[1])
ylim = (dataarray[mask].min(), dataarray[mask].max())
ax.plot(xcout.timearray, dataarray)
# signal window
for t in tsignal:
ax.plot(2 * [t], ylim, color='k', lw=2)
# noise window
ax.fill_between(x=tnoise, y1=[ylim[1], ylim[1]],
y2=[ylim[0], ylim[0]], color='k', alpha=0.2)
# inserting text, e.g., "10 - 20 s, SNR = 10.1"
SNR = float(xcout.SNR(periodbands=[(tmin, tmax)],
vmin=vmin, vmax=vmax,
signal2noise_trail=signal2noise_trail,
noise_window_size=noise_window_size))
ax.text(x=xlim[1],
y=ylim[0] + 0.85 * (ylim[1] - ylim[0]),
s="{} - {} s, SNR = {:.1f}".format(tmin, tmax, SNR),
fontsize=9,
horizontalalignment='right',
bbox={'color': 'k', 'facecolor': 'white'})
if lastplot:
# adding label to signalwindows
ax.text(x=self.dist() * (1.0 / vmin + 1.0 / vmax) / 2.0,
y=ylim[0] + 0.1 * (ylim[1] - ylim[0]),
s="Signal window",
horizontalalignment='center',
fontsize=8,
bbox={'color': 'k', 'facecolor': 'white'})
# adding label to noise windows
ax.text(x=sum(tnoise) / 2,
y=ylim[0] + 0.1 * (ylim[1] - ylim[0]),
s="Noise window",
horizontalalignment='center',
fontsize=8,
bbox={'color': 'k', 'facecolor': 'white'})
# formatting axes
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.grid(True)
if lastplot:
ax.set_xlabel('Time (s)')
# formatting labels
if not lastplot:
ax.set_xticklabels([])
ax.get_figure().canvas.draw()
labels = [l.get_text() for l in ax.get_yticklabels()]
labels[0] = labels[-1] = ''
labels[2:-2] = [''] * (len(labels) - 4)
ax.set_yticklabels(labels)
if outfile:
axlist[0].gcf().savefig(outfile, dpi=300, transparent=True)
if fig:
fig.show()
def FTAN(self, whiten=False, phase_corr=None, months=None, vgarray_init=None,
optimize_curve=None, strength_smoothing=STRENGTH_SMOOTHING,
use_inst_freq=USE_INSTANTANEOUS_FREQ, vg_at_nominal_freq=None,
debug=False):
"""
Frequency-time analysis of a cross-correlation function.
Calculates the Fourier transform of the cross-correlation,
calculates the analytic signal in the frequency domain,
applies Gaussian bandpass filters centered around given
center periods, calculates the filtered analytic
signal back in time domain and extracts the group velocity
dispersion curve.
Options:
- set *whiten*=True to whiten the spectrum of the cross-corr.
- provide a function of frequency in *phase_corr* to include a
phase correction.
- provide a list of (int, int) in *months* to restrict the FTAN
to a subset of month-year
- provide an initial guess of dispersion curve (in *vgarray_init*)
to accelerate the group velocity curve extraction
- set *optimize_curve*=True to further optimize the dispersion
curve, i.e., find the curve that really minimizes the penalty
function (which seeks to maximize the traversed amplitude while
penalizing jumps) -- but not necessarily rides through
local maxima any more. Default is True for the raw FTAN (no phase
corr provided), False for the clean FTAN (phase corr provided)
- set the strength of the smoothing term of the dispersion curve
in *strength_smoothing*
- set *use_inst_freq*=True to replace the nominal frequency with
the instantaneous frequency in the dispersion curve.
- if an array is provided in *vg_at_nominal_freq*, then it is filled
with the vg curve BEFORE the nominal freqs are replaced with
instantaneous freqs
Returns (1) the amplitude matrix A(T0,v), (2) the phase matrix
phi(T0,v) (that is, the amplitude and phase function of velocity
v of the analytic signal filtered around period T0) and (3) the
group velocity disperion curve extracted from the amplitude
matrix.
Raises CannotCalculateInstFreq if the calculation of instantaneous
frequencies only gives bad values.
FTAN periods in variable *RAWFTAN_PERIODS* and *CLEANFTAN_PERIODS*
FTAN velocities in variable *FTAN_VELOCITIES*
See. e.g., Levshin & Ritzwoller, "Automated detection,
extraction, and measurement of regional surface waves",
Pure Appl. Geoph. (2001) and Bensen et al., "Processing
seismic ambient noise data to obtain reliable broad-band
surface wave dispersion measurements", Geophys. J. Int. (2007).
@type whiten: bool
@type phase_corr: L{scipy.interpolate.interpolate.interp1d}
@type months: list of (L{MonthYear} or (int, int))
@type vgarray_init: L{numpy.ndarray}
@type vg_at_nominal_freq: L{numpy.ndarray}
@rtype: (L{numpy.ndarray}, L{numpy.ndarray}, L{DispersionCurve})
"""
# no phase correction given <=> raw FTAN
raw_ftan = phase_corr is None
if optimize_curve is None:
optimize_curve = raw_ftan
ftan_periods = RAWFTAN_PERIODS if raw_ftan else CLEANFTAN_PERIODS
# getting the symmetrized cross-correlation
xcout = self.symmetrize(inplace=False)
# whitening cross-correlation
if whiten:
xcout = xcout.whiten(inplace=False)
# cross-corr of desired months
xcdata = xcout._get_monthyears_xcdataarray(months=months)
if xcdata is None:
raise Exception('No data to perform FTAN in selected months')
# FTAN analysis: amplitute and phase function of
# center periods T0 and time t
ampl, phase = FTAN(x=xcdata,
dt=xcout._get_xcorr_dt(),
periods=ftan_periods,
alpha=FTAN_ALPHA,
phase_corr=phase_corr)
# re-interpolating amplitude and phase as functions
# of center periods T0 and velocities v
tne0 = xcout.timearray != 0.0
x = ftan_periods # x = periods
y = (self.dist() / xcout.timearray[tne0])[::-1] # y = velocities
zampl = ampl[:, tne0][:, ::-1] # z = amplitudes
zphase = phase[:, tne0][:, ::-1] # z = phases
# spline interpolation
ampl_interp_func = RectBivariateSpline(x, y, zampl)
phase_interp_func = RectBivariateSpline(x, y, zphase)
# re-sampling over periods and velocities
ampl_resampled = ampl_interp_func(ftan_periods, FTAN_VELOCITIES)
phase_resampled = phase_interp_func(ftan_periods, FTAN_VELOCITIES)
# extracting the group velocity curve from the amplitude matrix,
# that is, the velocity curve that maximizes amplitude and best
# avoids jumps
vgarray = extract_dispcurve(amplmatrix=ampl_resampled,
velocities=FTAN_VELOCITIES,
varray_init=vgarray_init,
optimizecurve=optimize_curve,
strength_smoothing=strength_smoothing)
if not vg_at_nominal_freq is None:
# filling array with group velocities before replacing
# nominal freqs with instantaneous freqs
vg_at_nominal_freq[...] = vgarray
# if *use_inst_freq*=True, we replace nominal freq with instantaneous
# freq, i.e., we consider that ampl[iT, :], phase[iT, :] and vgarray[iT]
# actually correspond to period 2.pi/|dphi/dt|(t=arrival time), with
# phi(.) = phase[iT, :] and arrival time = dist / vgarray[iT],
# and we re-interpolate them along periods of *ftan_periods*
nom2inst_periods = None
if use_inst_freq:
# array of arrival times
tarray = xcout.dist() / vgarray
# indices of arrival times in time array
it = xcout.timearray.searchsorted(tarray)
it = np.minimum(len(xcout.timearray) - 1, np.maximum(1, it))
# instantaneous freq: omega = |dphi/dt|(t=arrival time),
# with phi = phase of FTAN
dt = xcout.timearray[it] - xcout.timearray[it-1]
nT = phase.shape[0]
omega = np.abs((phase[range(nT), it] - phase[range(nT), it-1]) / dt)
# -> instantaneous period = 2.pi/omega
inst_periods = 2.0 * np.pi / omega
assert isinstance(inst_periods, np.ndarray) # just to enable autocompletion
if debug:
plt.plot(ftan_periods, inst_periods)
# removing outliers (inst periods too small or too different from nominal)
reldiffs = np.abs((inst_periods - ftan_periods) / ftan_periods)
discard = (inst_periods < MIN_INST_PERIOD) | \
(reldiffs > MAX_RELDIFF_INST_NOMINAL_PERIOD)
inst_periods = np.where(discard, np.nan, inst_periods)
# despiking curve of inst freqs (by removing values too
# different from the running median)
n = np.size(inst_periods)
median_periods = []
for i in range(n):
sl = slice(max(i - HALFWINDOW_MEDIAN_PERIOD, 0),
min(i + HALFWINDOW_MEDIAN_PERIOD + 1, n))
mask = ~np.isnan(inst_periods[sl])
if np.any(mask):
med = np.median(inst_periods[sl][mask])
median_periods.append(med)
else:
median_periods.append(np.nan)
reldiffs = np.abs((inst_periods - np.array(median_periods)) / inst_periods)
mask = ~np.isnan(reldiffs)
inst_periods[mask] = np.where(reldiffs[mask] > MAX_RELDIFF_INST_MEDIAN_PERIOD,
np.nan,
inst_periods[mask])
# filling holes by linear interpolation
masknan = np.isnan(inst_periods)
if masknan.all():
# not a single correct value of inst period!
s = "Not a single correct value of instantaneous period!"
raise pserrors.CannotCalculateInstFreq(s)
if masknan.any():
inst_periods[masknan] = np.interp(x=masknan.nonzero()[0],
xp=(~masknan).nonzero()[0],
fp=inst_periods[~masknan])
# looking for the increasing curve that best-fits
# calculated instantaneous periods
def fun(periods):
# misfit wrt calculated instantaneous periods
return np.sum((periods - inst_periods)**2)
# constraints = positive increments
constraints = [{'type': 'ineq', 'fun': lambda p, i=i: p[i+1] - p[i]}
for i in range(len(inst_periods) - 1)]
res = minimize(fun, x0=ftan_periods, method='SLSQP', constraints=constraints)
inst_periods = res['x']
if debug:
plt.plot(ftan_periods, inst_periods)
plt.show()
# re-interpolating amplitude, phase and dispersion curve
# along periods of array *ftan_periods* -- assuming that
# their are currently evaluated along *inst_periods*
vgarray = np.interp(x=ftan_periods,
xp=inst_periods,
fp=vgarray,
left=np.nan,
right=np.nan)
for iv in range(len(FTAN_VELOCITIES)):
ampl_resampled[:, iv] = np.interp(x=ftan_periods,
xp=inst_periods,
fp=ampl_resampled[:, iv],
left=np.nan,
right=np.nan)
phase_resampled[:, iv] = np.interp(x=ftan_periods,
xp=inst_periods,
fp=phase_resampled[:, iv],
left=np.nan,
right=np.nan)
# list of (nominal period, inst period)
nom2inst_periods = zip(ftan_periods, inst_periods)
vgcurve = pstomo.DispersionCurve(periods=ftan_periods,
v=vgarray,
station1=self.station1,
station2=self.station2,
nom2inst_periods=nom2inst_periods)
return ampl_resampled, phase_resampled, vgcurve
def FTAN_complete(self, whiten=False, months=None, add_SNRs=True,
vmin=SIGNAL_WINDOW_VMIN, vmax=SIGNAL_WINDOW_VMAX,
signal2noise_trail=SIGNAL2NOISE_TRAIL,
noise_window_size=NOISE_WINDOW_SIZE,
optimize_curve=None,
strength_smoothing=STRENGTH_SMOOTHING,
use_inst_freq=USE_INSTANTANEOUS_FREQ,
**kwargs):
"""
Frequency-time analysis including phase-matched filter and
seasonal variability:
(1) Performs a FTAN of the raw cross-correlation signal,
(2) Uses the raw group velocities to calculate the phase corr.
(3) Performs a FTAN with the phase correction
("phase matched filter")
(4) Repeats the procedure for all 12 trimesters if no
list of months is given
Optionally, adds spectral SNRs at the periods of the clean
vg curve. In this case, parameters *vmin*, *vmax*,
*signal2noise_trail*, *noise_window_size* control the location
of the signal window and the noise window
(see function xc.SNR()).
Options:
- set *whiten*=True to whiten the spectrum of the cross-corr.
- provide a list of (int, int) in *months* to restrict the FTAN
to a subset of month-year
- set *add_SNRs* to calculate the SNR function of period associated
with the disperions curves
- adjust the signal window and the noise window of the SNR through
*vmin*, *vmax*, *signal2noise_trail*, *noise_window_size*
- set *optimize_curve*=True to further optimize the dispersion
curve, i.e., find the curve that really minimizes the penalty
function (which seeks to maximize the traversed amplitude while
preserving smoothness) -- but not necessarily rides through
local maxima. Default is True for the raw FTAN, False for the
clean FTAN
- set the strength of the smoothing term of the dispersion curve
in *strength_smoothing*
- other *kwargs* sent to CrossCorrelation.FTAN()
Returns raw ampl, raw vg, cleaned ampl, cleaned vg.
See. e.g., Levshin & Ritzwoller, "Automated detection,
extraction, and measurement of regional surface waves",
Pure Appl. Geoph. (2001) and Bensen et al., "Processing
seismic ambient noise data to obtain reliable broad-band
surface wave dispersion measurements", Geophys. J. Int. (2007).
@type whiten: bool
@type months: list of (L{MonthYear} or (int, int))
@type add_SNRs: bool
@rtype: (L{numpy.ndarray}, L{numpy.ndarray},
L{numpy.ndarray}, L{DispersionCurve})
"""
# symmetrized, whitened cross-corr
xc = self.symmetrize(inplace=False)
if whiten:
xc = xc.whiten(inplace=False)
# raw FTAN (no need to whiten any more)
rawvg_init = np.zeros_like(RAWFTAN_PERIODS)
try:
rawampl, _, rawvg = xc.FTAN(whiten=False,
months=months,
optimize_curve=optimize_curve,
strength_smoothing=strength_smoothing,
use_inst_freq=use_inst_freq,
vg_at_nominal_freq=rawvg_init,
**kwargs)
except pserrors.CannotCalculateInstFreq:
# pb with instantaneous frequency: returnin NaNs
print "Warning: could not calculate instantenous frequencies in raw FTAN!"
rawampl = np.nan * np.zeros((len(RAWFTAN_PERIODS), len(FTAN_VELOCITIES)))
cleanampl = np.nan * np.zeros((len(CLEANFTAN_PERIODS), len(FTAN_VELOCITIES)))
rawvg = pstomo.DispersionCurve(periods=RAWFTAN_PERIODS,
v=np.nan * np.zeros(len(RAWFTAN_PERIODS)),
station1=self.station1,
station2=self.station2)
cleanvg = pstomo.DispersionCurve(periods=CLEANFTAN_PERIODS,
v=np.nan * np.zeros(len(CLEANFTAN_PERIODS)),
station1=self.station1,
station2=self.station2)
return rawampl, rawvg, cleanampl, cleanvg
# phase function from raw vg curve
phase_corr = xc.phase_func(vgcurve=rawvg)
# clean FTAN
cleanvg_init = np.zeros_like(CLEANFTAN_PERIODS)
try:
cleanampl, _, cleanvg = xc.FTAN(whiten=False,
phase_corr=phase_corr,
months=months,
optimize_curve=optimize_curve,
strength_smoothing=strength_smoothing,
use_inst_freq=use_inst_freq,
vg_at_nominal_freq=cleanvg_init,
**kwargs)
except pserrors.CannotCalculateInstFreq:
# pb with instantaneous frequency: returnin NaNs
print "Warning: could not calculate instantenous frequencies in clean FTAN!"
cleanampl = np.nan * np.zeros((len(CLEANFTAN_PERIODS), len(FTAN_VELOCITIES)))
cleanvg = pstomo.DispersionCurve(periods=CLEANFTAN_PERIODS,
v=np.nan * np.zeros(len(CLEANFTAN_PERIODS)),
station1=self.station1,
station2=self.station2)
return rawampl, rawvg, cleanampl, cleanvg
# adding spectral SNRs associated with the periods of the
# clean vg curve
if add_SNRs:
cleanvg.add_SNRs(xc, months=months,
vmin=vmin, vmax=vmax,
signal2noise_trail=signal2noise_trail,
noise_window_size=noise_window_size)
if months is None:
# set of available months (without year)
available_months = set(mxc.month.m for mxc in xc.monthxcs)
# extracting clean vg curves for all 12 trimesters:
# Jan-Feb-March, Feb-March-Apr ... Dec-Jan-Feb
for trimester_start in range(1, 13):
# months of trimester, e.g. [1, 2, 3], [2, 3, 4] ... [12, 1, 2]
trimester_months = [(trimester_start + i - 1) % 12 + 1
for i in range(3)]
# do we have data in all months?
if any(month not in available_months for month in trimester_months):
continue
# list of month-year whose month belong to current trimester
months_of_xc = [mxc.month for mxc in xc.monthxcs
if mxc.month.m in trimester_months]
# raw-clean FTAN on trimester data, using the vg curve
# extracted from all data as initial guess
try:
_, _, rawvg_trimester = xc.FTAN(
whiten=False,
months=months_of_xc,
vgarray_init=rawvg_init,
optimize_curve=optimize_curve,
strength_smoothing=strength_smoothing,
use_inst_freq=use_inst_freq,
**kwargs)
phase_corr_trimester = xc.phase_func(vgcurve=rawvg_trimester)
_, _, cleanvg_trimester = xc.FTAN(
whiten=False,
phase_corr=phase_corr_trimester,
months=months_of_xc,
vgarray_init=cleanvg_init,
optimize_curve=optimize_curve,
strength_smoothing=strength_smoothing,
use_inst_freq=use_inst_freq,
**kwargs)
except pserrors.CannotCalculateInstFreq:
# skipping trimester in case of pb with instantenous frequency
continue
# adding spectral SNRs associated with the periods of the
# clean trimester vg curve
if add_SNRs:
cleanvg_trimester.add_SNRs(xc, months=months_of_xc,
vmin=vmin, vmax=vmax,
signal2noise_trail=signal2noise_trail,
noise_window_size=noise_window_size)
# adding trimester vg curve
cleanvg.add_trimester(trimester_start, cleanvg_trimester)
return rawampl, rawvg, cleanampl, cleanvg
def phase_func(self, vgcurve):
"""
Calculates the phase from the group velocity obtained
using method self.FTAN, following the relationship:
k(f) = 2.pi.integral[ 1/vg(f'), f'=f0..f ]
phase(f) = distance.k(f)
Returns the function phase: freq -> phase(freq)
@param vgcurve: group velocity curve
@type vgcurve: L{DispersionCurve}
@rtype: L{scipy.interpolate.interpolate.interp1d}
"""
freqarray = 1.0 / vgcurve.periods[::-1]
vgarray = vgcurve.v[::-1]
mask = ~np.isnan(vgarray)
# array k[f]
k = np.zeros_like(freqarray[mask])
k[0] = 0.0
k[1:] = 2 * np.pi * integrate.cumtrapz(y=1.0 / vgarray[mask], x=freqarray[mask])
# array phi[f]
phi = k * self.dist()
# phase function of f
return interp1d(x=freqarray[mask], y=phi)
def plot_FTAN(self, rawampl=None, rawvg=None, cleanampl=None, cleanvg=None,
whiten=False, months=None, showplot=True, normalize_ampl=True,
logscale=True, bbox=BBOX_SMALL, figsize=(16, 5), outfile=None,
vmin=SIGNAL_WINDOW_VMIN, vmax=SIGNAL_WINDOW_VMAX,
signal2noise_trail=SIGNAL2NOISE_TRAIL,
noise_window_size=NOISE_WINDOW_SIZE,
**kwargs):
"""
Plots 4 panels related to frequency-time analysis:
- 1st panel contains the cross-correlation (original, and bandpass
filtered: see method self.plot_by_period_band)
- 2nd panel contains an image of log(ampl^2) (or ampl) function of period
T and group velocity vg, where ampl is the amplitude of the
raw FTAN (basically, the amplitude of the envelope of the
cross-correlation at time t = dist / vg, after applying a Gaussian
bandpass filter centered at period T). The raw and clean dispersion
curves (group velocity function of period) are also shown.
- 3rd panel shows the same image, but for the clean FTAN (wherein the
phase of the cross-correlation is corrected thanks to the raw
dispersion curve). Also shown are the clean dispersion curve,
the 3-month dispersion curves, the standard deviation of the
group velocity calculated from these 3-month dispersion curves
and the SNR function of period.
Only the velocities passing the default selection criteria
(defined in the configuration file) are plotted.
- 4th panel shows a small map with the pair of stations, with
bounding box *bbox* = (min lon, max lon, min lat, max lat),
and, if applicable, a plot of instantaneous vs nominal period
The raw amplitude, raw dispersion curve, clean amplitude and clean
dispersion curve of the FTAN are given in *rawampl*, *rawvg*,
*cleanampl*, *cleanvg* (normally from method self.FTAN_complete).
If not given, the FTAN is performed by calling self.FTAN_complete().
Options:
- Parameters *vmin*, *vmax*, *signal2noise_trail*, *noise_window_size*
control the location of the signal window and the noise window
(see function self.SNR()).
- Set whiten=True to whiten the spectrum of the cross-correlation.
- Set normalize_ampl=True to normalize the plotted amplitude (so
that the max amplitude = 1 at each period).
- Set logscale=True to plot log(ampl^2) instead of ampl.
- Give a list of months in parameter *months* to perform the FTAN
for a particular subset of months.
- additional kwargs sent to *self.FTAN_complete*
The method returns the plot figure.
@param rawampl: 2D array containing the amplitude of the raw FTAN
@type rawampl: L{numpy.ndarray}
@param rawvg: raw dispersion curve
@type rawvg: L{DispersionCurve}
@param cleanampl: 2D array containing the amplitude of the clean FTAN
@type cleanampl: L{numpy.ndarray}
@param cleanvg: clean dispersion curve
@type cleanvg: L{DispersionCurve}
@type showplot: bool
@param whiten: set to True to whiten the spectrum of the cross-correlation
@type whiten: bool
@param normalize_ampl: set to True to normalize amplitude
@type normalize_ampl: bool
@param months: list of months on which perform the FTAN (set to None to
perform the FTAN on all months)
@type months: list of (L{MonthYear} or (int, int))
@param logscale: set to True to plot log(ampl^2), to False to plot ampl
@type logscale: bool
@rtype: L{matplotlib.figure.Figure}
"""
# performing FTAN analysis if needed
if any(obj is None for obj in [rawampl, rawvg, cleanampl, cleanvg]):
rawampl, rawvg, cleanampl, cleanvg = self.FTAN_complete(
whiten=whiten, months=months, add_SNRs=True,
vmin=vmin, vmax=vmax,
signal2noise_trail=signal2noise_trail,
noise_window_size=noise_window_size,
**kwargs)
if normalize_ampl:
# normalizing amplitude at each period before plotting it
# (so that the max = 1)
for a in rawampl:
a[...] /= a.max()
for a in cleanampl:
a[...] /= a.max()
# preparing figure
fig = plt.figure(figsize=figsize)
# =======================================================
# 1th panel: cross-correlation (original and band-passed)
# =======================================================
gs1 = gridspec.GridSpec(len(PERIOD_BANDS) + 1, 1, wspace=0.0, hspace=0.0)
axlist = [fig.add_subplot(ss) for ss in gs1]
self.plot_by_period_band(axlist=axlist, plot_title=False,
whiten=whiten, months=months,
vmin=vmin, vmax=vmax,
signal2noise_trail=signal2noise_trail,
noise_window_size=noise_window_size)
# ===================
# 2st panel: raw FTAN
# ===================
gs2 = gridspec.GridSpec(1, 1, wspace=0.2, hspace=0)
ax = fig.add_subplot(gs2[0, 0])
extent = (min(RAWFTAN_PERIODS), max(RAWFTAN_PERIODS),
min(FTAN_VELOCITIES), max(FTAN_VELOCITIES))
m = np.log10(rawampl.transpose() ** 2) if logscale else rawampl.transpose()
ax.imshow(m, aspect='auto', origin='lower', extent=extent)
# Period is instantaneous iif a list of (nominal period, inst period)
# is associated with dispersion curve
periodlabel = 'Instantaneous period (sec)' if rawvg.nom2inst_periods \
else 'Nominal period (sec)'
ax.set_xlabel(periodlabel)
ax.set_ylabel("Velocity (km/sec)")
# saving limits
xlim = ax.get_xlim()
ylim = ax.get_ylim()
# raw & clean vg curves
fmt = '--' if (~np.isnan(rawvg.v)).sum() > 1 else 'o'
ax.plot(rawvg.periods, rawvg.v, fmt, color='blue',
lw=2, label='raw disp curve')
fmt = '-' if (~np.isnan(cleanvg.v)).sum() > 1 else 'o'
ax.plot(cleanvg.periods, cleanvg.v, fmt, color='black',
lw=2, label='clean disp curve')
# plotting cut-off period
cutoffperiod = self.dist() / 12.0
ax.plot([cutoffperiod, cutoffperiod], ylim, color='grey')
# setting legend and initial extent
ax.legend(fontsize=11, loc='upper right')
x = (xlim[0] + xlim[1]) / 2.0
y = ylim[0] + 0.05 * (ylim[1] - ylim[0])
ax.text(x, y, "Raw FTAN", fontsize=12,
bbox={'color': 'k', 'facecolor': 'white', 'lw': 0.5},
horizontalalignment='center',
verticalalignment='center')
ax.set_xlim(xlim)
ax.set_ylim(ylim)
# ===========================
# 3nd panel: clean FTAN + SNR
# ===========================
gs3 = gridspec.GridSpec(1, 1, wspace=0.2, hspace=0)
ax = fig.add_subplot(gs3[0, 0])
extent = (min(CLEANFTAN_PERIODS), max(CLEANFTAN_PERIODS),
min(FTAN_VELOCITIES), max(FTAN_VELOCITIES))
m = np.log10(cleanampl.transpose() ** 2) if logscale else cleanampl.transpose()
ax.imshow(m, aspect='auto', origin='lower', extent=extent)
# Period is instantaneous iif a list of (nominal period, inst period)
# is associated with dispersion curve
periodlabel = 'Instantaneous period (sec)' if cleanvg.nom2inst_periods \
else 'Nominal period (sec)'
ax.set_xlabel(periodlabel)
ax.set_ylabel("Velocity (km/sec)")
# saving limits
xlim = ax.get_xlim()
ylim = ax.get_ylim()
# adding SNR function of period (on a separate y-axis)
ax2 = ax.twinx()
ax2.plot(cleanvg.periods, cleanvg.get_SNRs(xc=self), color='green', lw=2)
# fake plot for SNR to appear in legend
ax.plot([-1, 0], [0, 0], lw=2, color='green', label='SNR')
ax2.set_ylabel('SNR', color='green')
for tl in ax2.get_yticklabels():
tl.set_color('green')
# trimester vg curves
ntrimester = len(cleanvg.v_trimesters)
for i, vg_trimester in enumerate(cleanvg.filtered_trimester_vels()):
label = '3-month disp curves (n={})'.format(ntrimester) if i == 0 else None
ax.plot(cleanvg.periods, vg_trimester, color='gray', label=label)
# clean vg curve + error bars
vels, sdevs = cleanvg.filtered_vels_sdevs()
fmt = '-' if (~np.isnan(vels)).sum() > 1 else 'o'
ax.errorbar(x=cleanvg.periods, y=vels, yerr=sdevs, fmt=fmt, color='black',
lw=2, label='clean disp curve')
# legend
ax.legend(fontsize=11, loc='upper right')
x = (xlim[0] + xlim[1]) / 2.0
y = ylim[0] + 0.05 * (ylim[1] - ylim[0])
ax.text(x, y, "Clean FTAN", fontsize=12,
bbox={'color': 'k', 'facecolor': 'white', 'lw': 0.5},
horizontalalignment='center',
verticalalignment='center')
# plotting cut-off period
cutoffperiod = self.dist() / 12.0
ax.plot([cutoffperiod, cutoffperiod], ylim, color='grey')
# setting initial extent
ax.set_xlim(xlim)
ax.set_ylim(ylim)
# ===========================================
# 4rd panel: tectonic provinces + pair (top),
# instantaneous vs nominal period (bottom)
# ===========================================
# tectonic provinces and pairs
gs4 = gridspec.GridSpec(1, 1, wspace=0.2, hspace=0.0)
ax = fig.add_subplot(gs4[0, 0])
psutils.basemap(ax, labels=False, axeslabels=False)
x = (self.station1.coord[0], self.station2.coord[0])
y = (self.station1.coord[1], self.station2.coord[1])
s = (self.station1.name, self.station2.name)
ax.plot(x, y, '^-', color='k', ms=10, mfc='w', mew=1)
for lon, lat, label in zip(x, y, s):
ax.text(lon, lat, label, ha='center', va='bottom', fontsize=7, weight='bold')
ax.set_xlim(bbox[:2])
ax.set_ylim(bbox[2:])
# instantaneous vs nominal period (if applicable)
gs5 = gridspec.GridSpec(1, 1, wspace=0.2, hspace=0.0)
if rawvg.nom2inst_periods or cleanvg.nom2inst_periods:
ax = fig.add_subplot(gs5[0, 0])
if rawvg.nom2inst_periods:
nomperiods, instperiods = zip(*rawvg.nom2inst_periods)
ax.plot(nomperiods, instperiods, '-', label='raw FTAN')
if cleanvg.nom2inst_periods:
nomperiods, instperiods = zip(*cleanvg.nom2inst_periods)
ax.plot(nomperiods, instperiods, '-', label='clean FTAN')
ax.set_xlabel('Nominal period (s)')
ax.set_ylabel('Instantaneous period (s)')
ax.legend(fontsize=9, loc='lower right')
ax.grid(True)
# adjusting sizes
gs1.update(left=0.03, right=0.25)
gs2.update(left=0.30, right=0.535)
gs3.update(left=0.585, right=0.81)
gs4.update(left=0.85, right=0.98, bottom=0.51)
gs5.update(left=0.87, right=0.98, top=0.48)
# figure title, e.g., 'BL.GNSB-IU.RCBR, dist=1781 km, ndays=208'
title = self._FTANplot_title(months=months)
fig.suptitle(title, fontsize=14)
# exporting to file
if outfile:
fig.savefig(outfile, dpi=300, transparent=True)
if showplot:
plt.show()
return fig
def _plottitle(self, prefix='', months=None):
"""
E.g., 'SPB-ITAB (365 days from 2002-01-01 to 2002-12-01)'
or 'SPB-ITAB (90 days in months 01-2002, 02-2002)'
"""
s = '{pref}{sta1}-{sta2} '
s = s.format(pref=prefix, sta1=self.station1.name, sta2=self.station2.name)
if not months:
nday = self.nday
s += '({} days from {} to {})'.format(
nday, self.startday.strftime('%d/%m/%Y'),
self.endday.strftime('%d/%m/%Y'))
else:
monthxcs = [mxc for mxc in self.monthxcs if mxc.month in months]
nday = sum(monthxc.nday for monthxc in monthxcs)
strmonths = ', '.join(str(m.month) for m in monthxcs)
s += '{} days in months {}'.format(nday, strmonths)
return s
def _FTANplot_title(self, months=None):
"""
E.g., 'BL.GNSB-IU.RCBR, dist=1781 km, ndays=208'
"""
if not months:
nday = self.nday
else:
nday = sum(monthxc.nday for monthxc in self.monthxcs
if monthxc.month in months)
title = u"{}-{}, dist={:.0f} km, ndays={}"
title = title.format(self.station1.network + '.' + self.station1.name,
self.station2.network + '.' + self.station2.name,
self.dist(), nday)
return title
def _get_xcorr_dt(self):
"""
Returns the interval of the time array.
Warning: no check is made to ensure that that interval is constant.
@rtype: float
"""
return self.timearray[1] - self.timearray[0]
def _get_xcorr_nmax(self):
"""
Returns the max index of time array:
- self.timearray = [-t[nmax] ... t[0] ... t[nmax]] if not symmetrized
- = [t[0] ... t[nmax-1] t[nmax]] if symmetrized
@rtype: int
"""
nt = len(self.timearray)
return (nt - 1) * 0.5 if not self.symmetrized else nt - 1
def _get_monthyears_xcdataarray(self, months=None):
"""
Returns the sum of cross-corr data arrays of given
list of (month,year) -- or the whole cross-corr if
monthyears is None.
@type months: list of (L{MonthYear} or (int, int))
@rtype: L{numpy.ndarray}
"""
if not months:
return self.dataarray
else:
monthxcs = [mxc for mxc in self.monthxcs if mxc.month in months]
if monthxcs:
return sum(monthxc.dataarray for monthxc in monthxcs)
else:
return None
class CrossCorrelationCollection(AttribDict):
"""
Collection of cross-correlations
= AttribDict{station1.name: AttribDict {station2.name: instance of CrossCorrelation}}
AttribDict is a dict (defined in obspy.core) whose keys are also
attributes. This means that a cross-correlation between a pair
of stations STA01-STA02 can be accessed both ways:
- self['STA01']['STA02'] (easier in regular code)
- self.STA01.STA02 (easier in an interactive session)
"""
def __init__(self):
"""
Initializing object as AttribDict
"""
AttribDict.__init__(self)
def __repr__(self):
npair = len(self.pairs())
s = '(AttribDict)<Collection of cross-correlation between {0} pairs>'
return s.format(npair)
def pairs(self, sort=False, minday=1, minSNR=None, mindist=None,
withnets=None, onlywithnets=None, pairs_subset=None,
**kwargs):
"""
Returns pairs of stations of cross-correlation collection
verifying conditions.
Additional arguments in *kwargs* are sent to xc.SNR().
@type sort: bool
@type minday: int
@type minSNR: float
@type mindist: float
@type withnets: list of str
@type onlywithnets: list of str
@type pairs_subset: list of (str, str)
@rtype: list of (str, str)
"""
pairs = [(s1, s2) for s1 in self for s2 in self[s1]]
if sort:
pairs.sort()
# filtering subset of pairs
if pairs_subset:
pairs_subset = [set(pair) for pair in pairs_subset]
pairs = [pair for pair in pairs if set(pair) in pairs_subset]
# filtering by nb of days
pairs = [(s1, s2) for (s1, s2) in pairs
if self[s1][s2].nday >= minday]
# filtering by min SNR
if minSNR:
pairs = [(s1, s2) for (s1, s2) in pairs
if self[s1][s2].SNR(**kwargs) >= minSNR]
# filtering by distance
if mindist:
pairs = [(s1, s2) for (s1, s2) in pairs
if self[s1][s2].dist() >= mindist]
# filtering by network
if withnets:
# one of the station of the pair must belong to networks
pairs = [(s1, s2) for (s1, s2) in pairs if
self[s1][s2].station1.network in withnets or
self[s1][s2].station2.network in withnets]
if onlywithnets:
# both stations of the pair must belong to networks
pairs = [(s1, s2) for (s1, s2) in pairs if
self[s1][s2].station1.network in onlywithnets and
self[s1][s2].station2.network in onlywithnets]
return pairs
def pairs_and_SNRarrays(self, pairs_subset=None, minspectSNR=None,
whiten=False, verbose=False,
vmin=SIGNAL_WINDOW_VMIN, vmax=SIGNAL_WINDOW_VMAX,
signal2noise_trail=SIGNAL2NOISE_TRAIL,
noise_window_size=NOISE_WINDOW_SIZE):
"""
Returns pairs and spectral SNR array whose spectral SNRs
are all >= minspectSNR
Parameters *vmin*, *vmax*, *signal2noise_trail*, *noise_window_size*
control the location of the signal window and the noise window
(see function self.SNR()).
Returns {pair1: SNRarray1, pair2: SNRarray2 etc.}
@type pairs_subset: list of (str, str)
@type minspectSNR: float
@type whiten: bool
@type verbose: bool
@rtype: dict from (str, str) to L{numpy.ndarray}
"""
if verbose:
print "Estimating spectral SNR of pair:",
# initial list of pairs
pairs = pairs_subset if pairs_subset else self.pairs()
# filetring by min spectral SNR
SNRarraydict = {}
for (s1, s2) in pairs:
if verbose:
print '{0}-{1}'.format(s1, s2),
SNRarray = self[s1][s2].SNR(periodbands=PERIOD_BANDS, whiten=whiten,
vmin=vmin, vmax=vmax,
signal2noise_trail=signal2noise_trail,
noise_window_size=noise_window_size)
if not minspectSNR or min(SNRarray) >= minspectSNR:
SNRarraydict[(s1, s2)] = SNRarray
if verbose:
print
return SNRarraydict
def add(self, tracedict, stations, xcorr_tmax,
xcorrdict=None, verbose=False):
"""
Stacks cross-correlations between pairs of stations
from a dict of {station.name: Trace} (in *tracedict*).
You can provide pre-calculated cross-correlations in *xcorrdict*
= dict {(station1.name, station2.name): numpy array containing cross-corr}
Initializes self[station1][station2] as an instance of CrossCorrelation
if the pair station1-station2 is not in self
@type tracedict: dict from str to L{obspy.core.trace.Trace}
@type stations: list of L{pysismo.psstation.Station}
@type xcorr_tmax: float
@type verbose: bool
"""
if not xcorrdict:
xcorrdict = {}
stationtrace_pairs = it.combinations(sorted(tracedict.items()), 2)
for (s1name, tr1), (s2name, tr2) in stationtrace_pairs:
if verbose:
print "{s1}-{s2}".format(s1=s1name, s2=s2name),
# checking that sampling rates are equal
assert tr1.stats.sampling_rate == tr2.stats.sampling_rate
# looking for s1 and s2 in the list of stations
station1 = next(s for s in stations if s.name == s1name)
station2 = next(s for s in stations if s.name == s2name)
# initializing self[s1] if s1 not in self
# (avoiding setdefault() since behavior in unknown with AttribDict)
if s1name not in self:
self[s1name] = AttribDict()
# initializing self[s1][s2] if s2 not in self[s1]
if s2name not in self[s1name]:
self[s1name][s2name] = CrossCorrelation(
station1=station1,
station2=station2,
xcorr_dt=1.0 / tr1.stats.sampling_rate,
xcorr_tmax=xcorr_tmax)
# stacking cross-correlation
try:
# getting pre-calculated cross-corr, if provided
xcorr = xcorrdict.get((s1name, s2name), None)
self[s1name][s2name].add(tr1, tr2, xcorr=xcorr)
#self[s1name][s2name].phase_stack(tr1, tr2, xcorr=xcorr)
#self[s1name][s2name].phase_weighted_stack()
except pserrors.NaNError:
# got NaN
s = "Warning: got NaN in cross-corr between {s1}-{s2} -> skipping"
print s.format(s1=s1name, s2=s2name)
if verbose:
print
def plot_pws(self, xlim=None, norm=True,
whiten=False, sym=False, minSNR=None, minday=1,
withnets=None, onlywithnets=None, figsize=(21.0, 12.0),
outfile=None, dpi=300, showplot=True, stack_style='linear'):
"""
Method for plotting phase-weighted stacking, one plot per station
pair is produced.
"""
# preparing pairs
pairs = self.pairs(minday=minday, minSNR=minSNR, withnets=withnets,
onlywithnets=onlywithnets)
npair = len(pairs)
if not npair:
print "Nothing to plot!"
return
plt.figure()
# one plot for each pair
nrow = int(np.sqrt(npair))
if np.sqrt(npair) != nrow:
nrow += 1
ncol = int(npair / nrow)
if npair % nrow != 0:
ncol += 1
# sorting pairs alphabetically
pairs.sort()
for iplot, (s1, s2) in enumerate(pairs):
plt.figure(iplot)
# symmetrizing cross-corr if necessary
xcplot = self[s1][s2]
# plotting
plt.plot(xcplot.timearray, xcplot.dataarray)
if xlim:
plt.xlim(xlim)
# title
s = '{s1}-{s2}: {nday} stacks from {t1} to {t2}.png'
#remove microseconds in time string
title = s.format(s1=s1, s2=s2,
nday=xcplot.nday,
t1=str(xcplot.startday)[:-11],
t2=str(xcplot.endday)[:-11])
plt.title(title)
# x-axis label
#if iplot + 1 == npair:
plt.xlabel('Time (s)')
out = os.path.abspath(os.path.join(outfile, os.pardir))
outfile_individual = os.path.join(out, title)
if os.path.exists(outfile_individual):
# backup
shutil.copyfile(outfile_individual, \
outfile_individual + '~')
fig = plt.gcf()
fig.set_size_inches(figsize)
print(outfile_individual)
fig.savefig(outfile_individual, dpi=dpi)
def plot(self, plot_type='distance', xlim=None, norm=True, whiten=False,
sym=False, minSNR=None, minday=1, withnets=None, onlywithnets=None,
figsize=(21.0, 12.0), outfile=None, dpi=300, showplot=True,
stack_style='linear'):
"""
method to plot a collection of cross-correlations
"""
# preparing pairs
pairs = self.pairs(minday=minday, minSNR=minSNR, withnets=withnets,
onlywithnets=onlywithnets)
npair = len(pairs)
if not npair:
print "Nothing to plot!"
return
plt.figure()
# classic plot = one plot for each pair
if plot_type == 'classic':
nrow = int(np.sqrt(npair))
if np.sqrt(npair) != nrow:
nrow += 1
ncol = int(npair / nrow)
if npair % nrow != 0:
ncol += 1
# sorting pairs alphabetically
pairs.sort()
for iplot, (s1, s2) in enumerate(pairs):
plt.figure(iplot)
# symmetrizing cross-corr if necessary
xcplot = self[s1][s2].symmetrize(inplace=False) \
if sym else self[s1][s2]
# spectral whitening
if whiten:
xcplot = xcplot.whiten(inplace=False)
# subplot
#plt.subplot(nrow, ncol, iplot + 1)
# normalizing factor
nrm = max(abs(xcplot.dataarray)) if norm else 1.0
# plotting
plt.plot(xcplot.timearray, xcplot.dataarray / nrm)
if xlim:
plt.xlim(xlim)
# title
locs1 = ','.join(sorted(["'{0}'".format(loc) \
for loc in xcplot.locs1]))
locs2 = ','.join(sorted(["'{0}'".format(loc) \
for loc in xcplot.locs2]))
s = '{s1}-{s2}: {nday} stacks from {t1} to {t2}.png'
#remove microseconds in time string
title = s.format(s1=s1, s2=s2,
nday=xcplot.nday,
t1=str(xcplot.startday)[:-11],
t2=str(xcplot.endday)[:-11])
plt.title(title)
# x-axis label
#if iplot + 1 == npair:
plt.xlabel('Time (s)')
out = os.path.abspath(os.path.join(outfile, os.pardir))
outfile_individual = os.path.join(out, title)
if os.path.exists(outfile_individual):
# backup
shutil.copyfile(outfile_individual, \
outfile_individual + '~')
fig = plt.gcf()
fig.set_size_inches(figsize)
print(outfile_individual)
fig.savefig(outfile_individual, dpi=dpi)
#enter number of cross-correlations to be plotted as to not crowd the image
# distance plot = one plot for all pairs, y-shifted according to pair distance
elif plot_type == 'distance':
maxdist = max(self[x][y].dist() for (x, y) in pairs)
corr2km = maxdist / 30.0
cc = mpl.rcParams['axes.color_cycle'] # color cycle
#maybe only plot every 10th station pair?
# sorting pairs by distance
pairs.sort(key=lambda (s1, s2): self[s1][s2].dist())
pairs.reverse()
pairs_copy = pairs #create a pairs list of equi-distant station pairs no longer than plot_number
pairs_list = []
plot_number = len(pairs) #gives maximum number of xcorrs that can fit on a page
instance_number = len(pairs) / plot_number #gives number of instances to skip
#gives every instance number inside a list e.g. every 7th instance in pairs
for i, pair in enumerate(pairs_copy):
if i < len(pairs) - 1 and (i == 0 or i%instance_number == 0):
pairs_list.append(pair)
if plot_number <= len(pairs_list):
break
else:
continue
else:
pass
for ipair, (s1, s2) in enumerate(pairs_list):
# symmetrizing cross-corr if necessary
xcplot = self[s1][s2].symmetrize(inplace=False) if sym else self[s1][s2]
# spectral whitening
if whiten:
xcplot = xcplot.whiten(inplace=False)
fill = False
absolute = True
color = cc[ipair % len(cc)]
#color = 'k'
# normalizing factor
nrm = max(abs(xcplot.dataarray)) if norm else 1.0
# plotting
xarray = xcplot.timearray
#get absolute value of data array for more visual plot
if fill and absolute:
yarray = corr2km * abs(xcplot.dataarray) / nrm + xcplot.dist()
#for point in yarray:
# if point - xcplot.dist() < 400: point = xcplot.dist();
plt.fill_between(xarray, xcplot.dist(), yarray, color=color)
elif fill and not absolute:
yarray = xcplot.dist() + corr2km * xcplot.dataarray / nrm
plt.fill_between(xarray, xcplot.dist(), yarray, color=color)
elif not fill and absolute:
yarray = xcplot.dist() + corr2km * abs(xcplot.dataarray) / nrm
plt.plot(xarray, yarray, color = color)
else:
yarray = xcplot.dist() + corr2km * xcplot.dataarray / nrm
plt.plot(xarray, yarray, color = color)
#d = [0]*len(yarray)
if xlim:
plt.xlim(xlim)
# adding annotation @ xytest, annotation line @ xyarrow
xmin, xmax = plt.xlim()
xextent = plt.xlim()[1] - plt.xlim()[0]
ymin = -0.1 * maxdist
ymax = 1.1 * maxdist
# # all annotations on the right side
# x = xmax - xextent / 10.0
# y = maxdist if npair == 1 else ymin + ipair*(ymax-ymin)/(npair-1)
# xytext = (x, y)
# xyarrow = (x - xextent / 30.0, xcplot.dist())
# align = 'left'
# relpos = (0, 0.5)
if npair <= 1:
# alternating right/left
sign = 2 * (ipair % 2 - 0.5)
x = xmin + xextent / 10.0 if sign > 0 else xmax - xextent / 10.0
y = ymin + ipair / 2 * (ymax - ymin) / (npair / 2 - 1.0)
xytext = (x, y)
xyarrow = (x + sign * xextent / 30.0, xcplot.dist())
align = 'right' if sign > 0 else 'left'
relpos = (1, 0.5) if sign > 0 else (0, 0.5)
bbox = {'color': color, 'facecolor': 'white', 'alpha': 0.9}
arrowprops = {'arrowstyle': "-", 'relpos': relpos, 'color': color}
plt.annotate(s=s, xy=xyarrow, xytext=xytext, fontsize=9,
color='k', horizontalalignment=align,
bbox=bbox, arrowprops=arrowprops)
net1 = xcplot.station1.network
net2 = xcplot.station2.network
locs1 = ','.join(sorted(["'{0}'".format(loc) for loc in xcplot.locs1]))
locs2 = ','.join(sorted(["'{0}'".format(loc) for loc in xcplot.locs2]))
s = '{net1}.{s1}[{locs1}]-{net2}.{s2}[{locs2}]: {nday} days {t1}-{t2}'
s = s.format(net1=net1, s1=s1, locs1=locs1, net2=net2, s2=s2,
locs2=locs2, nday=xcplot.nday,
t1=xcplot.startday.strftime('%d/%m/%y'),
t2=xcplot.endday.strftime('%d/%m/%y'))
plt.grid()
plt.xlabel('Time (s)')
plt.ylabel('Distance (km)')
plt.ylim((0, plt.ylim()[1]))
plt.grid()
plt.xlabel('Time (s)')
plt.ylabel('Distance (km)')
plt.ylim((0, plt.ylim()[1]))
# saving figure
if plot_type == 'distance':
if outfile:
if os.path.exists(outfile):
# backup
shutil.copyfile(outfile, outfile + '~')
fig = plt.gcf()
fig.set_size_inches(figsize)
fig.savefig(outfile, dpi=dpi)
else:
pass
if showplot:
# showing plot
plt.show()
plt.close()
def plot_spectral_SNR(self, whiten=False, minSNR=None, minspectSNR=None,
minday=1, mindist=None, withnets=None, onlywithnets=None,
vmin=SIGNAL_WINDOW_VMIN, vmax=SIGNAL_WINDOW_VMAX,
signal2noise_trail=SIGNAL2NOISE_TRAIL,
noise_window_size=NOISE_WINDOW_SIZE):
"""
Plots spectral SNRs
"""
# filtering pairs
pairs = self.pairs(minday=minday, minSNR=minSNR, mindist=mindist,
withnets=withnets, onlywithnets=onlywithnets,
vmin=vmin, vmax=vmax,
signal2noise_trail=signal2noise_trail,
noise_window_size=noise_window_size)
# SNRarrays = dict {(station1,station2): SNR array}
SNRarrays = self.pairs_and_SNRarrays(
pairs_subset=pairs, minspectSNR=minspectSNR,
whiten=whiten, verbose=True,
vmin=vmin, vmax=vmax,
signal2noise_trail=signal2noise_trail,
noise_window_size=noise_window_size)
npair = len(SNRarrays)
if not npair:
print 'Nothing to plot!!!'
return
# min-max SNR
minSNR = min([SNR for SNRarray in SNRarrays.values() for SNR in SNRarray])
maxSNR = max([SNR for SNRarray in SNRarrays.values() for SNR in SNRarray])
# sorting SNR arrays by increasing first value
SNRarrays = OrderedDict(sorted(SNRarrays.items(), key=lambda (k, v): v[0]))
# array of mid of time bands
periodarray = [(tmin + tmax) / 2.0 for (tmin, tmax) in PERIOD_BANDS]
minperiod = min(periodarray)
# color cycle
cc = mpl.rcParams['axes.color_cycle']
# plotting SNR arrays
plt.figure()
for ipair, ((s1, s2), SNRarray) in enumerate(SNRarrays.items()):
xc = self[s1][s2]
color = cc[ipair % len(cc)]
# SNR vs period
plt.plot(periodarray, SNRarray, color=color)
# annotation
xtext = minperiod - 4
ytext = minSNR * 0.5 + ipair * (maxSNR - minSNR * 0.5) / (npair - 1)
xytext = (xtext, ytext)
xyarrow = (minperiod - 1, SNRarray[0])
relpos = (1, 0.5)
net1 = xc.station1.network
net2 = xc.station2.network
s = '{i}: {net1}.{s1}-{net2}.{s2}: {dist:.1f} km, {nday} days'
s = s.format(i=ipair, net1=net1, s1=s1, net2=net2, s2=s2,
dist=xc.dist(), nday=xc.nday)
bbox = {'color': color, 'facecolor': 'white', 'alpha': 0.9}
arrowprops = {'arrowstyle': '-', 'relpos': relpos, 'color': color}
plt.annotate(s=s, xy=xyarrow, xytext=xytext, fontsize=9,
color='k', horizontalalignment='right',
bbox=bbox, arrowprops=arrowprops)
plt.xlim((0.0, plt.xlim()[1]))
plt.xlabel('Period (s)')
plt.ylabel('SNR')
plt.title(u'{0} pairs'.format(npair))
plt.grid()
plt.show()
def plot_pairs(self, minSNR=None, minspectSNR=None, minday=1, mindist=None,
withnets=None, onlywithnets=None, pairs_subset=None, whiten=False,
stationlabel=False, bbox=BBOX_LARGE, xsize=10, plotkwargs=None,
SNRkwargs=None):
"""
Plots pairs of stations on a map
@type bbox: tuple
"""
if not plotkwargs:
plotkwargs = {}
if not SNRkwargs:
SNRkwargs = {}
# filtering pairs
pairs = self.pairs(minday=minday, minSNR=minSNR, mindist=mindist,
withnets=withnets, onlywithnets=onlywithnets,
pairs_subset=pairs_subset, **SNRkwargs)
if minspectSNR:
# plotting only pairs with all spect SNR >= minspectSNR
SNRarraydict = self.pairs_and_SNRarrays(
pairs_subset=pairs, minspectSNR=minspectSNR,
whiten=whiten, verbose=True, **SNRkwargs)
pairs = SNRarraydict.keys()
# nb of pairs
npair = len(pairs)
if not npair:
print 'Nothing to plot!!!'
return
# initializing figure
aspectratio = (bbox[3] - bbox[2]) / (bbox[1] - bbox[0])
plt.figure(figsize=(xsize, aspectratio * xsize))
# plotting coasts and tectonic provinces
psutils.basemap(plt.gca(), bbox=bbox)
# plotting pairs
for s1, s2 in pairs:
x, y = zip(self[s1][s2].station1.coord, self[s1][s2].station2.coord)
if not plotkwargs:
plotkwargs = dict(color='grey', lw=0.5)
plt.plot(x, y, '-', **plotkwargs)
# plotting stations
x, y = zip(*[s.coord for s in self.stations(pairs)])
plt.plot(x, y, '^', color='k', ms=10, mfc='w', mew=1)
if stationlabel:
# stations label
for station in self.stations(pairs):
plt.text(station.coord[0], station.coord[1], station.name,
ha='center', va='bottom', fontsize=10, weight='bold')
# setting axes
plt.title(u'{0} pairs'.format(npair))
plt.xlim(bbox[:2])
plt.ylim(bbox[2:])
plt.show()
def export(self, outprefix, stations=None, verbose=False):
"""
Exports cross-correlations to picke file and txt file
@type outprefix: str or unicode
@type stations: list of L{Station}
"""
self._to_picklefile(outprefix, verbose=verbose)
self._to_ascii(outprefix, verbose=verbose)
self._pairsinfo_to_ascii(outprefix, verbose=verbose)
self._stationsinfo_to_ascii(outprefix, stations=stations, verbose=verbose)
def FTANs(self, prefix=None, suffix='', whiten=False,
normalize_ampl=True, logscale=True, mindist=None,
minSNR=None, minspectSNR=None, monthyears=None,
vmin=SIGNAL_WINDOW_VMIN, vmax=SIGNAL_WINDOW_VMAX,
signal2noise_trail=SIGNAL2NOISE_TRAIL,
noise_window_size=NOISE_WINDOW_SIZE,
**kwargs):
"""
Exports raw-clean FTAN plots to pdf (one page per pair)
and clean dispersion curves to pickle file by calling
plot_FTAN() for each cross-correlation.
pdf is exported to *prefix*[_*suffix*].pdf
dispersion curves are exported to *prefix*[_*suffix*].pickle
If *prefix* is not given, then it is automatically set up as:
*FTAN_DIR*/FTAN[_whitenedxc][_mindist=...][_minsSNR=...]
[_minspectSNR=...][_month-year_month-year]
e.g.: ./output/FTAN/FTAN_whitenedxc_minspectSNR=10
Options:
- Parameters *vmin*, *vmax*, *signal2noise_trail*, *noise_window_size*
control the location of the signal window and the noise window
(see function xc.SNR()).
- Set whiten=True to whiten the spectrum of the cross-correlation.
- Set normalize_ampl=True to normalize the plotted amplitude (so
that the max amplitude = 1 at each period).
- Set logscale=True to plot log(ampl^2) instead of ampl.
- additional kwargs sent to FTAN_complete() and plot_FTAN()
See. e.g., Levshin & Ritzwoller, "Automated detection,
extraction, and measurement of regional surface waves",
Pure Appl. Geoph. (2001) and Bensen et al., "Processing
seismic ambient noise data to obtain reliable broad-band
surface wave dispersion measurements", Geophys. J. Int. (2007).
@type prefix: str or unicode
@type suffix: str or unicode
@type minSNR: float
@type mindist: float
@type minspectSNR: float
@type whiten: bool
@type monthyears: list of (int, int)
"""
# setting default prefix if not given
if not prefix:
parts = [os.path.join(FTAN_DIR, 'FTAN')]
if whiten:
parts.append('whitenedxc')
if mindist:
parts.append('mindist={}'.format(mindist))
if minSNR:
parts.append('minSNR={}'.format(minSNR))
if minspectSNR:
parts.append('minspectSNR={}'.format(minspectSNR))
if monthyears:
parts.extend('{:02d}-{}'.format(m, y) for m, y in monthyears)
else:
parts = [prefix]
if suffix:
parts.append(suffix)
# path of output files (without extension)
outputpath = u'_'.join(parts)
# opening pdf file
pdfpath = u'{}.pdf'.format(outputpath)
if os.path.exists(pdfpath):
# backup
shutil.copyfile(pdfpath, pdfpath + '~')
pdf = PdfPages(pdfpath)
# filtering pairs
pairs = self.pairs(sort=True, minSNR=minSNR, mindist=mindist,
vmin=vmin, vmax=vmax,
signal2noise_trail=signal2noise_trail,
noise_window_size=noise_window_size)
if minspectSNR:
# plotting only pairs with all spect SNR >= minspectSNR
SNRarraydict = self.pairs_and_SNRarrays(
pairs_subset=pairs, minspectSNR=minspectSNR,
whiten=whiten, verbose=True,
vmin=vmin, vmax=vmax,
signal2noise_trail=signal2noise_trail,
noise_window_size=noise_window_size)
pairs = sorted(SNRarraydict.keys())
s = ("Exporting FTANs of {0} pairs to file {1}.pdf\n"
"and dispersion curves to file {1}.pickle\n")
print s.format(len(pairs), outputpath)
cleanvgcurves = []
print "Appending FTAN of pair:",
for i, (s1, s2) in enumerate(pairs):
# appending FTAN plot of pair s1-s2 to pdf
print "[{}] {}-{}".format(i + 1, s1, s2),
xc = self[s1][s2]
assert isinstance(xc, CrossCorrelation)
try:
# complete FTAN analysis
rawampl, rawvg, cleanampl, cleanvg = xc.FTAN_complete(
whiten=whiten, months=monthyears,
vmin=vmin, vmax=vmax,
signal2noise_trail=signal2noise_trail,
noise_window_size=noise_window_size,
**kwargs)
# plotting raw-clean FTAN
fig = xc.plot_FTAN(rawampl, rawvg, cleanampl, cleanvg,
whiten=whiten,
normalize_ampl=normalize_ampl,
logscale=logscale,
showplot=False,
vmin=vmin, vmax=vmax,
signal2noise_trail=signal2noise_trail,
noise_window_size=noise_window_size,
**kwargs)
pdf.savefig(fig)
plt.close()
# appending clean vg curve
cleanvgcurves.append(cleanvg)
except Exception as err:
# something went wrong with this FTAN
print "\nGot unexpected error:\n\n{}\n\nSKIPPING PAIR!".format(err)
print "\nSaving files..."
# closing pdf
pdf.close()
# exporting vg curves to pickle file
f = psutils.openandbackup(outputpath + '.pickle', mode='wb')
pickle.dump(cleanvgcurves, f, protocol=2)
f.close()
def stations(self, pairs, sort=True):
"""
Returns a list of unique stations corresponding
to a list of pairs (of station name).
@type pairs: list of (str, str)
@rtype: list of L{pysismo.psstation.Station}
"""
stations = []
for s1, s2 in pairs:
if self[s1][s2].station1 not in stations:
stations.append(self[s1][s2].station1)
if self[s1][s2].station2 not in stations:
stations.append(self[s1][s2].station2)
if sort:
stations.sort(key=lambda obj: obj.name)
return stations
def _to_picklefile(self, outprefix, verbose=False):
"""
Dumps cross-correlations to (binary) pickle file
@type outprefix: str or unicode
"""
if verbose:
s = "Exporting cross-correlations in binary format to file: {}.pickle"
print s.format(outprefix)
f = psutils.openandbackup(outprefix + '.pickle', mode='wb')
pickle.dump(self, f, protocol=2)
f.close()
def _to_ascii(self, outprefix, verbose=False):
"""
Exports cross-correlations to txt file
@type outprefix: str or unicode
"""
if verbose:
s = "Exporting cross-correlations in ascci format to file: {}.txt"
print s.format(outprefix)
# writing data file: time array (1st column)
# and cross-corr array (one column per pair)
f = psutils.openandbackup(outprefix + '.txt', mode='w')
pairs = [(s1, s2) for (s1, s2) in self.pairs(sort=True) if self[s1][s2].nday]
# writing header
header = ['time'] + ["{0}-{1}".format(s1, s2) for s1, s2 in pairs]
f.write('\t'.join(header) + '\n')
# writing line = ith [time, cross-corr 1st pair, cross-corr 2nd pair etc]
data = zip(self._get_timearray(), *[self[s1][s2].dataarray for s1, s2 in pairs])
for fields in data:
line = [str(fld) for fld in fields]
f.write('\t'.join(line) + '\n')
f.close()
def _pairsinfo_to_ascii(self, outprefix, verbose=False):
"""
Exports pairs information to txt file
@type outprefix: str or unicode
"""
if verbose:
s = "Exporting pairs information to file: {}.stats.txt"
print s.format(outprefix)
# writing file: coord, locations, ids etc. for each pair
pairs = self.pairs(sort=True)
f = psutils.openandbackup(outprefix + '.stats.txt', mode='w')
# header
header = ['pair', 'lon1', 'lat1', 'lon2', 'lat2',
'locs1', 'locs2', 'ids1', 'ids2',
'distance', 'startday', 'endday', 'nday']
f.write('\t'.join(header) + '\n')
# fields
for (s1, s2) in pairs:
fields = [
'{0}-{1}'.format(s1, s2),
self[s1][s2].station1.coord[0],
self[s1][s2].station1.coord[1],
self[s1][s2].station2.coord[0],
self[s1][s2].station2.coord[1],
','.join(sorted("'{}'".format(l) for l in self[s1][s2].locs1)),
','.join(sorted("'{}'".format(l) for l in self[s1][s2].locs2)),
','.join(sorted(sid for sid in self[s1][s2].ids1)),
','.join(sorted(sid for sid in self[s1][s2].ids2)),
self[s1][s2].dist(),
self[s1][s2].startday,
self[s1][s2].endday,
self[s1][s2].nday
]
line = [str(fld) if (fld or fld == 0) else 'none' for fld in fields]
f.write('\t'.join(line) + '\n')
f.close()
def _stationsinfo_to_ascii(self, outprefix, stations=None, verbose=False):
"""
Exports information on cross-correlated stations
to txt file
@type outprefix: str or unicode
@type stations: list of {Station}
"""
if verbose:
s = "Exporting stations information to file: {}.stations.txt"
print s.format(outprefix)
if not stations:
# extracting the list of stations from cross-correlations
# if not provided
stations = self.stations(self.pairs(minday=0), sort=True)
# opening stations file and writing:
# station name, network, lon, lat, nb of pairs, total days of cross-corr
f = psutils.openandbackup(outprefix + '.stations.txt', mode='w')
header = ['name', 'network', 'lon', 'lat', 'npair', 'nday']
f.write('\t'.join(header) + '\n')
for station in stations:
# pairs in which station appears
pairs = [(s1, s2) for s1, s2 in self.pairs()
if station in [self[s1][s2].station1, self[s1][s2].station2]]
# total nb of days of pairs
nday = sum(self[s1][s2].nday for s1, s2 in pairs)
# writing fields
fields = [
station.name,
station.network,
str(station.coord[0]),
str(station.coord[1]),
str(len(pairs)),
str(nday)
]
f.write('\t'.join(fields) + '\n')
f.close()
def _get_timearray(self):
"""
Returns time array of cross-correlations
@rtype: L{numpy.ndarray}
"""
pairs = self.pairs()
# reference time array
s1, s2 = pairs[0]
reftimearray = self[s1][s2].timearray
# checking that all time arrays are equal to reference time array
for (s1, s2) in pairs:
if np.any(self[s1][s2].timearray != reftimearray):
s = 'Cross-corr collection does not have a unique timelag array'
raise Exception(s)
return reftimearray
def get_merged_trace(station, date, xcorr_interval, skiplocs=CROSSCORR_SKIPLOCS, minfill=MINFILL):
"""
Returns one trace extracted from selected station, at selected date
(+/- 1 hour on each side to avoid edge effects during subsequent
processing).
for 45 minute xcorr interval, change edge effects by 1 minute!
Traces whose location belongs to *skiplocs* are discarded, then
if several locations remain, only the first is kept. Finally,
if several traces (with the same location) remain, they are
merged, WITH GAPS FILLED USING LINEAR INTERPOLATION.
Raises CannotPreprocess exception if:
- no trace remain after discarded the unwanted locations
- data fill is < *minfill*
@type station: L{psstation.Station}
@type date: L{datetime.date}
@param skiplocs: list of locations to discard in station's data
@type skiplocs: iterable
@param minfill: minimum data fill to keep trace
@rtype: L{Trace}
"""
#calculate edge addition and subtraction as 1/24th of the overall time interval
#
startminutes = (xcorr_interval / 24.0)
endminutes = xcorr_interval + startminutes
# getting station's stream at selected date
# (+/- one hour to avoid edge effects when removing response)
t0 = UTCDateTime(date) # date at time 00h00m00s
path_start = t0 - dt.timedelta(minutes=startminutes)
path_end = t0 + dt.timedelta(minutes=endminutes)
#station_path_old = station.getpath(date, MSEED_DIR)
station_path_SQL = station.getpath(t0, t0+dt.timedelta\
(minutes=xcorr_interval))
#print "station old path: ", station_path_old
#print "station SQl path: ", station_path_SQL
st = read(pathname_or_url=station_path_SQL,
starttime=path_start, endtime=path_end)
#st = read(pathname_or_url=station.getpath(date),
# starttime=t0 - dt.timedelta(hours=1),
# endtime=t0 + dt.timedelta(days=1, hours=1))
# removing traces of stream from locations to skip
for tr in [tr for tr in st if tr.stats.location in skiplocs]:
st.remove(tr)
if not st.traces:
# no remaining trace!
raise pserrors.CannotPreprocess("No trace")
# if more than one location, we retain only the first one
if len(set(tr.id for tr in st)) > 1:
select_loc = sorted(set(tr.stats.location for tr in st))[0]
for tr in [tr for tr in st if tr.stats.location != select_loc]:
st.remove(tr)
# Data fill for current date
fill = psutils.get_fill(st, starttime=t0, endtime=t0 + dt.timedelta(minutes=endminutes))
if fill < minfill:
# not enough data
raise pserrors.CannotPreprocess("{:.0f}% fill".format(fill * 100))
# Merging traces, FILLING GAPS WITH LINEAR INTERP
st.merge(fill_value='interpolate')
trace = st[0]
return trace
def get_or_attach_response(trace, dataless_inventories=(), xml_inventories=()):
"""
Returns or attach instrumental response, from dataless seed inventories
(as returned by psstation.get_dataless_inventories()) and/or StationXML
inventories (as returned by psstation.get_stationxml_inventories()).
If a response if found in a dataless inventory, then a dict of poles
and zeros is returned. If a response is found in a StationXML
inventory, then it is directly attached to the trace and nothing is
returned.
Raises CannotPreprocess exception if no instrumental response is found.
@type trace: L{Trace}
@param dataless_inventories: inventories from dataless seed files (as returned by
psstation.get_dataless_inventories())
@type dataless_inventories: list of L{obspy.xseed.parser.Parser}
@param xml_inventories: inventories from StationXML files (as returned by
psstation.get_stationxml_inventories())
@type xml_inventories: list of L{obspy.station.inventory.Inventory}
"""
t1 = dt.datetime.now()
# looking for instrument response...
try:
# ...first in dataless seed inventories
paz = psstation.get_paz(channelid=trace.id,
t=trace.stats.starttime,
inventories=dataless_inventories)
return paz
except pserrors.NoPAZFound:
# ... then in dataless seed inventories, replacing 'BHZ' with 'HHZ'
# in trace's id (trick to make code work with Diogo's data)
try:
paz = psstation.get_paz(channelid=trace.id.replace('BHZ', 'HHZ'),
t=trace.stats.starttime,
inventories=dataless_inventories)
return paz
except pserrors.NoPAZFound:
# ...finally in StationXML inventories
try:
trace.attach_response(inventories=xml_inventories)
except:
# no response found!
raise pserrors.CannotPreprocess("No response found")
delta = (dt.datetime.now() - t1).total_seconds()
print "\nProcessed response attachment in {:.1f} seconds".format(delta)
def preprocess_trace(trace, paz=None, freqmin=FREQMIN, freqmax=FREQMAX,
freqmin_earthquake=FREQMIN_EARTHQUAKE,
freqmax_earthquake=FREQMAX_EARTHQUAKE,
corners=CORNERS, zerophase=ZEROPHASE,
period_resample=PERIOD_RESAMPLE,
onebit_norm=ONEBIT_NORM,
window_time=WINDOW_TIME, window_freq=WINDOW_FREQ):
"""
Preprocesses a trace (so that it is ready to be cross-correlated),
by applying the following steps:
- removal of instrument response, mean and trend
- band-pass filtering between *freqmin*-*freqmax*
- downsampling to *period_resample* secs
- time-normalization (one-bit normalization or normalization
by the running mean in the earthquake frequency band)
- spectral whitening (if running mean normalization)
Raises CannotPreprocess exception if:
- trace only contains 0 (happens sometimes...)
- a normalization weight is 0 or NaN
- a Nan appeared in trace data
Note that the processing steps are performed in-place.
@type trace: L{Trace}
@param paz: poles and zeros of instrumental response
(set None if response is directly attached to trace)
@param freqmin: low frequency of the band-pass filter
@param freqmax: high frequency of the band-pass filter
@param freqmin_earthquake: low frequency of the earthquake band
@param freqmax_earthquake: high frequency of the earthquake band
@param corners: nb or corners of the band-pass filter
@param zerophase: set to True for filter not to shift phase
@type zerophase: bool
@param period_resample: resampling period in seconds
@param onebit_norm: set to True to apply one-bit normalization (else,
running mean normalization is applied)
@type onebit_norm: bool
@param window_time: width of the window to calculate the running mean
in the earthquake band (for the time-normalization)
@param window_freq: width of the window to calculate the running mean
of the amplitude spectrum (for the spectral whitening)
"""
# ============================================
# Removing instrument response, mean and trend
# ============================================
# removing response...
if paz:
# ...using paz:
if trace.stats.sampling_rate > 10.0:
# decimating large trace, else fft crashes
factor = int(np.ceil(trace.stats.sampling_rate / 10))
trace.decimate(factor=factor, no_filter=True)
trace.simulate(paz_remove=paz,
paz_simulate=obspy.signal.cornFreq2Paz(0.01),
remove_sensitivity=True,
simulate_sensitivity=True,
nfft_pow2=True)
else:
# ...using StationXML:
# first band-pass to downsample data before removing response
# (else remove_response() method is slow or even hangs)
trace.filter(type="bandpass",
freqmin=freqmin,
freqmax=freqmax,
corners=corners,
zerophase=zerophase)
psutils.resample(trace, dt_resample=period_resample)
trace.remove_response(output="VEL", zero_mean=True)
t1 = dt.datetime.now()
# trimming, demeaning, detrending
midt = trace.stats.starttime + (trace.stats.endtime - trace.stats.starttime) / 2.0
t0 = UTCDateTime(midt.date) # date of trace, at time 00h00m00s
trace.trim(starttime=t0, endtime=t0 + dt.timedelta(days=1))
trace.detrend(type='constant')
trace.detrend(type='linear')
if np.all(trace.data == 0.0):
# no data -> skipping trace
raise pserrors.CannotPreprocess("Only zeros")
delta = (dt.datetime.now() - t1).total_seconds()
#print "\nProcessed trim in {:.1f} seconds".format(delta)
# =========
# Band-pass
# =========
t0 = dt.datetime.now()
# keeping a copy of the trace to calculate weights of time-normalization
trcopy = trace.copy()
# band-pass
trace.filter(type="bandpass",
freqmin=freqmin,
freqmax=freqmax,
corners=corners,
zerophase=zerophase)
# downsampling trace if not already done
if abs(1.0 / trace.stats.sampling_rate - period_resample) > EPS:
psutils.resample(trace, dt_resample=period_resample)
#delta = (dt.datetime.now() - t0).total_seconds()
#print "\nProcessed filters in {:.1f} seconds".format(delta)
# ==================
# Time normalization
# ==================
t0 = dt.datetime.now()
if onebit_norm:
# one-bit normalization
trace.data = np.sign(trace.data)
else:
# normalization of the signal by the running mean
# in the earthquake frequency band
trcopy.filter(type="bandpass",
freqmin=freqmin_earthquake,
freqmax=freqmax_earthquake,
corners=corners,
zerophase=zerophase)
# decimating trace
psutils.resample(trcopy, period_resample)
# Time-normalization weights from smoothed abs(data)
# Note that trace's data can be a masked array
halfwindow = int(round(window_time * trcopy.stats.sampling_rate / 2))
mask = ~trcopy.data.mask if np.ma.isMA(trcopy.data) else None
tnorm_w = psutils.moving_avg(np.abs(trcopy.data),
halfwindow=halfwindow,
mask=mask)
if np.ma.isMA(trcopy.data):
# turning time-normalization weights into a masked array
s = "[warning: {}.{} trace's data is a masked array]"
print s.format(trace.stats.network, trace.stats.station),
tnorm_w = np.ma.masked_array(tnorm_w, trcopy.data.mask)
if np.any((tnorm_w == 0.0) | np.isnan(tnorm_w)):
# illegal normalizing value -> skipping trace
raise pserrors.CannotPreprocess("Zero or NaN normalization weight")
# time-normalization
trace.data /= tnorm_w
# delta = (dt.datetime.now() - t0).total_seconds()
# print "\nProcessed time-normalisation in {:.1f} seconds".format(delta)
# ==================
# Spectral whitening
# ==================
fft = rfft(trace.data) # real FFT
deltaf = trace.stats.sampling_rate / trace.stats.npts # frequency step
# smoothing amplitude spectrum
halfwindow = int(round(window_freq / deltaf / 2.0))
weight = psutils.moving_avg(abs(fft), halfwindow=halfwindow)
# normalizing spectrum and back to time domain
trace.data = irfft(fft / weight, n=len(trace.data))
# re bandpass to avoid low/high freq noise
trace.filter(type="bandpass",
freqmin=freqmin,
freqmax=freqmax,
corners=corners,
zerophase=zerophase)
# Verifying that we don't have nan in trace data
if np.any(np.isnan(trace.data)):
raise pserrors.CannotPreprocess("Got NaN in trace data")
def load_pickled_xcorr(pickle_file):
"""
Loads pickle-dumped cross-correlations
@type pickle_file: str or unicode
@rtype: L{CrossCorrelationCollection}
"""
f = open(name=pickle_file, mode='rb')
xc = pickle.load(f)
f.close()
return xc
def load_pickled_xcorr_interactive(xcorr_dir=CROSSCORR_DIR, xcorr_files='xcorr*.pickle*'):
"""
Loads interactively pickle-dumped cross-correlations, by giving the user
a choice among a list of file matching xcorrFiles
@type xcorr_dir: str or unicode
@type xcorr_files: str or unicode
@rtype: L{CrossCorrelationCollection}
"""
# looking for files that match xcorrFiles
pathxcorr = os.path.join(xcorr_dir, xcorr_files)
flist = glob.glob(pathname=pathxcorr)
flist.sort()
pickle_file = None
if len(flist) == 1:
pickle_file = flist[0]
print 'Reading cross-correlation from file ' + pickle_file
elif len(flist) > 0:
print 'Select file containing cross-correlations:'
print '\n'.join('{i} - {f}'.format(i=i, f=os.path.basename(f))
for (i, f) in enumerate(flist))
i = int(raw_input('\n'))
pickle_file = flist[i]
# loading cross-correlations
xc = load_pickled_xcorr(pickle_file=pickle_file)
return xc
def FTAN(x, dt, periods, alpha, phase_corr=None):
"""
Frequency-time analysis of a time series.
Calculates the Fourier transform of the signal (xarray),
calculates the analytic signal in frequency domain,
applies Gaussian bandpass filters centered around given
center periods, and calculates the filtered analytic
signal back in time domain.
Returns the amplitude/phase matrices A(f0,t) and phi(f0,t),
that is, the amplitude/phase function of time t of the
analytic signal filtered around period T0 = 1 / f0.
See. e.g., Levshin & Ritzwoller, "Automated detection,
extraction, and measurement of regional surface waves",
Pure Appl. Geoph. (2001) and Bensen et al., "Processing
seismic ambient noise data to obtain reliable broad-band
surface wave dispersion measurements", Geophys. J. Int. (2007).
@param dt: sample spacing
@type dt: float
@param x: data array
@type x: L{numpy.ndarray}
@param periods: center periods around of Gaussian bandpass filters
@type periods: L{numpy.ndarray} or list
@param alpha: smoothing parameter of Gaussian filter
@type alpha: float
@param phase_corr: phase correction, function of freq
@type phase_corr: L{scipy.interpolate.interpolate.interp1d}
@rtype: (L{numpy.ndarray}, L{numpy.ndarray})
"""
# Initializing amplitude/phase matrix: each column =
# amplitude function of time for a given Gaussian filter
# centered around a period
amplitude = np.zeros(shape=(len(periods), len(x)))
phase = np.zeros(shape=(len(periods), len(x)))
# Fourier transform
Xa = fft(x)
# aray of frequencies
freq = fftfreq(len(Xa), d=dt)
# analytic signal in frequency domain:
# | 2X(f) for f > 0
# Xa(f) = | X(f) for f = 0
# | 0 for f < 0
# with X = fft(x)
Xa[freq < 0] = 0.0
Xa[freq > 0] *= 2.0
# applying phase correction: replacing phase with given
# phase function of freq
if phase_corr:
# doamin of definition of phase_corr(f)
minfreq = phase_corr.x.min()
maxfreq = phase_corr.x.max()
mask = (freq >= minfreq) & (freq <= maxfreq)
# replacing phase with user-provided phase correction:
# updating Xa(f) as |Xa(f)|.exp(-i.phase_corr(f))
phi = phase_corr(freq[mask])
Xa[mask] = np.abs(Xa[mask]) * np.exp(-1j * phi)
# tapering
taper = cosTaper(npts=mask.sum(), p=0.05)
Xa[mask] *= taper
Xa[~mask] = 0.0
# applying narrow bandpass Gaussian filters
for iperiod, T0 in enumerate(periods):
# bandpassed analytic signal
f0 = 1.0 / T0
Xa_f0 = Xa * np.exp(-alpha * ((freq - f0) / f0) ** 2)
# back to time domain
xa_f0 = ifft(Xa_f0)
# filling amplitude and phase of column
amplitude[iperiod, :] = np.abs(xa_f0)
phase[iperiod, :] = np.angle(xa_f0)
return amplitude, phase
def extract_dispcurve(amplmatrix, velocities, periodmask=None, varray_init=None,
optimizecurve=True, strength_smoothing=STRENGTH_SMOOTHING):
"""
Extracts a disperion curve (velocity vs period) from an amplitude
matrix *amplmatrix*, itself obtained from FTAN.
Among the curves that ride along local maxima of amplitude,
the selected group velocity curve v(T) maximizes the sum of
amplitudes, while preserving some smoothness (minimizing of
*dispcurve_penaltyfunc*).
The curve can be furthered optimized using a minimization
algorithm, which then seek the curve that really minimizes
the penalty function -- but does not necessarily ride through
the local maxima any more.
If an initial vel array is given (*varray_init*) and
*optimizecurve*=True then only the optimization algorithm
is applied, using *varray_init* as starting point.
*strength_smoothing* controls the relative strength of the
smoothing term in the penalty function.
amplmatrix[i, j] = amplitude at period nb i and velocity nb j
@type amplmatrix: L{numpy.ndarray}
@type velocities: L{numpy.ndarray}
@type varray_init: L{numpy.ndarray}
@rtype: L{numpy.ndarray}
"""
if not varray_init is None and optimizecurve:
# if an initial guess for vg array is given, we simply apply
# the optimization procedure using it as starting guess
return optimize_dispcurve(amplmatrix=amplmatrix,
velocities=velocities,
vg0=varray_init,
strength_smoothing=strength_smoothing)[0]
nperiods = amplmatrix.shape[0]
# building list of possible (v, ampl) curves at all periods
v_ampl_arrays = None
for iperiod in range(nperiods):
# local maxima of amplitude at period nb *iperiod*
argsmax = psutils.local_maxima_indices(amplmatrix[iperiod, :])
if not argsmax:
# no local minimum => leave nan in (v, ampl) curves
continue
if not v_ampl_arrays:
# initialzing the list of possible (v, ampl) curves with local maxima
# at current period, and nan elsewhere
v_ampl_arrays = [(np.zeros(nperiods) * np.nan, np.zeros(nperiods) * np.nan)
for _ in range(len(argsmax))]
for argmax, (varray, amplarray) in zip(argsmax, v_ampl_arrays):
varray[iperiod] = velocities[argmax]
amplarray[iperiod] = amplmatrix[iperiod, argmax]
continue
# inserting the velocities that locally maximizes amplitude
# to the correct curves
for argmax in argsmax:
# velocity that locally maximizes amplitude
v = velocities[argmax]
# we select the (v, ampl) curve for which the jump wrt previous
# v (not nan) is minimum
lastv = lambda varray: varray[:iperiod][~np.isnan(varray[:iperiod])][-1]
vjump = lambda (varray, amplarray): abs(lastv(varray) - v)
varray, amplarray = min(v_ampl_arrays, key=vjump)
# if the curve already has a vel attributed at this period, we
# duplicate it
if not np.isnan(varray[iperiod]):
varray, amplarray = copy.copy(varray), copy.copy(amplarray)
v_ampl_arrays.append((varray, amplarray))
# inserting (vg, ampl) at current period to the selected curve
varray[iperiod] = v
amplarray[iperiod] = amplmatrix[iperiod, argmax]
# filling curves without (vg, ampl) data at the current period
unfilledcurves = [(varray, amplarray) for varray, amplarray in v_ampl_arrays
if np.isnan(varray[iperiod])]
for varray, amplarray in unfilledcurves:
# inserting vel (which locally maximizes amplitude) for which
# the jump wrt the previous (not nan) v of the curve is minimum
lastv = varray[:iperiod][~np.isnan(varray[:iperiod])][-1]
vjump = lambda arg: abs(lastv - velocities[arg])
argmax = min(argsmax, key=vjump)
varray[iperiod] = velocities[argmax]
amplarray[iperiod] = amplmatrix[iperiod, argmax]
# amongst possible vg curves, we select the one that maximizes amplitude,
# while preserving some smoothness
def funcmin((varray, amplarray)):
if not periodmask is None:
return dispcurve_penaltyfunc(varray[periodmask],
amplarray[periodmask],
strength_smoothing=strength_smoothing)
else:
return dispcurve_penaltyfunc(varray, amplarray,
strength_smoothing=strength_smoothing)
varray, _ = min(v_ampl_arrays, key=funcmin)
# filling holes of vg curve
masknan = np.isnan(varray)
if masknan.any():
varray[masknan] = np.interp(x=masknan.nonzero()[0],
xp=(~masknan).nonzero()[0],
fp=varray[~masknan])
# further optimizing curve using a minimization algorithm
if optimizecurve:
# first trying with initial guess = the one above
varray1, funcmin1 = optimize_dispcurve(amplmatrix=amplmatrix,
velocities=velocities,
vg0=varray,
periodmask=periodmask,
strength_smoothing=strength_smoothing)
# then trying with initial guess = constant velocity 3 km/s
varray2, funcmin2 = optimize_dispcurve(amplmatrix=amplmatrix,
velocities=velocities,
vg0=3.0 * np.ones(nperiods),
periodmask=periodmask,
strength_smoothing=strength_smoothing)
varray = varray1 if funcmin1 <= funcmin2 else varray2
return varray
def optimize_dispcurve(amplmatrix, velocities, vg0, periodmask=None,
strength_smoothing=STRENGTH_SMOOTHING):
"""
Optimizing vel curve, i.e., looking for curve that really
minimizes *dispcurve_penaltyfunc* -- and does not necessarily
ride any more through local maxima
Returns optimized vel curve and the corresponding
value of the objective function to minimize
@type amplmatrix: L{numpy.ndarray}
@type velocities: L{numpy.ndarray}
@rtype: L{numpy.ndarray}, float
"""
if np.any(np.isnan(vg0)):
raise Exception("Init velocity array cannot contain NaN")
nperiods = amplmatrix.shape[0]
# function that returns the amplitude curve
# a given input vel curve goes through
ixperiods = np.arange(nperiods)
amplcurvefunc2d = RectBivariateSpline(ixperiods, velocities, amplmatrix, kx=1, ky=1)
amplcurvefunc = lambda vgcurve: amplcurvefunc2d.ev(ixperiods, vgcurve)
def funcmin(varray):
"""Objective function to minimize"""
# amplitude curve corresponding to vel curve
if not periodmask is None:
return dispcurve_penaltyfunc(varray[periodmask],
amplcurvefunc(varray)[periodmask],
strength_smoothing=strength_smoothing)
else:
return dispcurve_penaltyfunc(varray,
amplcurvefunc(varray),
strength_smoothing=strength_smoothing)
bounds = nperiods * [(min(velocities) + 0.1, max(velocities) - 0.1)]
method = 'SLSQP' # methods with bounds: L-BFGS-B, TNC, SLSQP
resmin = minimize(fun=funcmin, x0=vg0, method=method, bounds=bounds)
vgcurve = resmin['x']
# _ = funcmin(vgcurve, verbose=True)
return vgcurve, resmin['fun']
def dispcurve_penaltyfunc(vgarray, amplarray, strength_smoothing=STRENGTH_SMOOTHING):
"""
Objective function that the vg dispersion curve must minimize.
The function is composed of two terms:
- the first term, - sum(amplitude), seeks to maximize the amplitudes
traversed by the curve
- the second term, sum(dvg**2) (with dvg the difference between
consecutive velocities), is a smoothing term penalizing
discontinuities
*vgarray* is the velocity curve function of period, *amplarray*
gives the amplitudes traversed by the curve and *strength_smoothing*
is the strength of the smoothing term.
@type vgarray: L{numpy.ndarray}
@type amplarray: L{numpy.ndarray}
"""
# removing nans
notnan = ~(np.isnan(vgarray) | np.isnan(amplarray))
vgarray = vgarray[notnan]
# jumps
dvg = vgarray[1:] - vgarray[:-1]
sumdvg2 = np.sum(dvg**2)
# amplitude
sumamplitude = amplarray.sum()
# vg curve must maximize amplitude and minimize jumps
return -sumamplitude + strength_smoothing*sumdvg2
if __name__ == '__main__':
# loading pickled cross-correlations
xc = load_pickled_xcorr_interactive()
print "Cross-correlations available in variable 'xc':"
print xc
|
unho/pootle
|
refs/heads/master
|
pootle/middleware/cache.py
|
8
|
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
from django.utils.cache import add_never_cache_headers
from django.utils.deprecation import MiddlewareMixin
class CacheAnonymousOnly(MiddlewareMixin):
"""Imitate the deprecated `CACHE_MIDDLEWARE_ANONYMOUS_ONLY` behavior."""
def process_response(self, request, response):
if hasattr(request, 'user') and request.user.is_authenticated:
add_never_cache_headers(response)
return response
|
chrislacy/digitalashes.com
|
refs/heads/master
|
src/main.py
|
1
|
'''
Copyright 2014 Chris Lacy.
Licensed under the MIT License.
You may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from BaseHandler import BaseHandler
import webapp2
import appdef
#==============================================================================
class PrivacyHandler(BaseHandler):
def get(self):
self.write_template(appdef.TEMPLATE_ROOT_PATH + 'privacy_policy.html', None)
#==============================================================================
class TermsHandler(BaseHandler):
def get(self):
self.write_template(appdef.TEMPLATE_ROOT_PATH + 'terms.html', None)
#==============================================================================
class PageNotFoundHandler(BaseHandler):
def get(self):
self.write_html_file(appdef.TEMPLATE_ROOT_PATH + 'page_not_found.html')
#==============================================================================
class AboutHandler(BaseHandler):
def get(self):
self.write_html_file(appdef.TEMPLATE_ROOT_PATH + 'about.html')
#==============================================================================
class ContactHandler(BaseHandler):
def get(self):
self.write_html_file(appdef.TEMPLATE_ROOT_PATH + 'contact.html')
#==============================================================================
class MainPage(BaseHandler):
def get(self):
self.write_html_file(appdef.TEMPLATE_ROOT_PATH + 'index.html')
#==============================================================================
application = webapp2.WSGIApplication([('/privacy', PrivacyHandler),
('/terms', TermsHandler),
('/about', AboutHandler),
('/contact', ContactHandler),
('/', MainPage),
('/.*', PageNotFoundHandler)],
debug=True)
|
sk2/autonetkit
|
refs/heads/master
|
autonetkit/workflow/__init__.py
|
12133432
| |
yunwkh/_sample_
|
refs/heads/master
|
sample/helpers.py
|
12133432
| |
itsMagondu/oscar
|
refs/heads/master
|
apps/photo/migrations/__init__.py
|
12133432
| |
tectronics/smap-data
|
refs/heads/master
|
python/smap/drivers/acmex2/__init__.py
|
12133432
| |
sushramesh/lwc
|
refs/heads/master
|
lib/python2.7/site-packages/django/conf/locale/uk/__init__.py
|
12133432
| |
lygics/spaCy
|
refs/heads/master
|
bin/ner_tag.py
|
3
|
import codecs
import plac
from spacy.en import English
def main(text_loc):
with codecs.open(text_loc, 'r', 'utf8') as file_:
text = file_.read()
NLU = English()
for paragraph in text.split('\n\n'):
tokens = NLU(paragraph)
ent_starts = {}
ent_ends = {}
for span in tokens.ents:
ent_starts[span.start] = span.label_
ent_ends[span.end] = span.label_
output = []
for token in tokens:
if token.i in ent_starts:
output.append('<%s>' % ent_starts[token.i])
output.append(token.orth_)
if (token.i+1) in ent_ends:
output.append('</%s>' % ent_ends[token.i+1])
output.append('\n\n')
print ' '.join(output)
if __name__ == '__main__':
plac.call(main)
|
pbs/django-cms
|
refs/heads/support/2.3.x
|
cms/plugins/inherit/forms.py
|
1
|
from cms.models import Page
from cms.plugins.inherit.models import InheritPagePlaceholder
from django import forms
from django.forms.models import ModelForm
from django.forms.utils import ErrorList
from django.utils.translation import ugettext_lazy as _
class InheritForm(ModelForm):
from_page = forms.ModelChoiceField(label=_("page"), queryset=Page.objects.drafts(), required=False)
class Meta:
model = InheritPagePlaceholder
exclude = ('page', 'position', 'placeholder', 'language', 'plugin_type')
def for_site(self, site):
# override the page_link fields queryset to containt just pages for
# current site
self.fields['from_page'].queryset = Page.objects.drafts().on_site(site)
def clean(self):
cleaned_data = super(InheritForm, self).clean()
if not cleaned_data['from_page'] and not cleaned_data['from_language']:
self._errors['from_page'] = ErrorList([_("Language or Page must be filled out")])
return cleaned_data
|
coreynicholson/youtube-dl
|
refs/heads/master
|
youtube_dl/extractor/lovehomeporn.py
|
93
|
from __future__ import unicode_literals
import re
from .nuevo import NuevoBaseIE
class LoveHomePornIE(NuevoBaseIE):
_VALID_URL = r'https?://(?:www\.)?lovehomeporn\.com/video/(?P<id>\d+)(?:/(?P<display_id>[^/?#&]+))?'
_TEST = {
'url': 'http://lovehomeporn.com/video/48483/stunning-busty-brunette-girlfriend-sucking-and-riding-a-big-dick#menu',
'info_dict': {
'id': '48483',
'display_id': 'stunning-busty-brunette-girlfriend-sucking-and-riding-a-big-dick',
'ext': 'mp4',
'title': 'Stunning busty brunette girlfriend sucking and riding a big dick',
'age_limit': 18,
'duration': 238.47,
},
'params': {
'skip_download': True,
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
display_id = mobj.group('display_id')
info = self._extract_nuevo(
'http://lovehomeporn.com/media/nuevo/config.php?key=%s' % video_id,
video_id)
info.update({
'display_id': display_id,
'age_limit': 18
})
return info
|
apple/llvm-project
|
refs/heads/llvm.org/main
|
cross-project-tests/debuginfo-tests/dexter/feature_tests/commands/perfect/dex_declare_file/precompiled_binary_different_dir/lit.local.cfg.py
|
12
|
config.suffixes = ['.dex']
|
ouhouhsami/django-imapauth
|
refs/heads/master
|
imapauth/backends.py
|
1
|
from imaplib import IMAP4_SSL
from django.contrib.auth.models import User
from imapauth.settings import IMAPAUTH_HOST
class IMAPBackend(object):
# Create an authentication method
# This is called by the standard Django login procedure
# ! authentificated user with this system will not be able to
# login user admin, they are not keystaff
def authenticate(self, username=None, password=None):
try:
# Check if this user is valid on the mail server
# TODO: add settings for port, type of IMAP connection ...
c = IMAP4_SSL(IMAPAUTH_HOST)
c.login(username, password)
c.logout()
except:
return None
try:
# Check if the user exists in Django's local database
user = User.objects.get(username=username)
except User.DoesNotExist:
# Create a user in Django's local database
user = User.objects.create_user(username,
password='passworddoesntmatter')
return user
# Required for your backend to work properly - unchanged in most scenarios
def get_user(self, user_id):
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist:
return None
|
ergodicbreak/evennia
|
refs/heads/master
|
evennia/contrib/tutorial_world/__init__.py
|
6
|
# -*- coding: utf-8 -*-
"""
This package holds the demo game of Evennia.
"""
from __future__ import absolute_import
from . import mob, objects, rooms
|
idalin/calibre-web
|
refs/heads/master
|
cps/oauth.py
|
3
|
# -*- coding: utf-8 -*-
# This file is part of the Calibre-Web (https://github.com/janeczku/calibre-web)
# Copyright (C) 2018-2019 jim3ma
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
from __future__ import division, print_function, unicode_literals
from flask import session
try:
from flask_dance.consumer.backend.sqla import SQLAlchemyBackend, first, _get_real_user
from sqlalchemy.orm.exc import NoResultFound
backend_resultcode = False # prevent storing values with this resultcode
except ImportError:
# fails on flask-dance >1.3, due to renaming
try:
from flask_dance.consumer.storage.sqla import SQLAlchemyStorage as SQLAlchemyBackend
from flask_dance.consumer.storage.sqla import first, _get_real_user
from sqlalchemy.orm.exc import NoResultFound
backend_resultcode = True # prevent storing values with this resultcode
except ImportError:
pass
class OAuthBackend(SQLAlchemyBackend):
"""
Stores and retrieves OAuth tokens using a relational database through
the `SQLAlchemy`_ ORM.
.. _SQLAlchemy: https://www.sqlalchemy.org/
"""
def __init__(self, model, session, provider_id,
user=None, user_id=None, user_required=None, anon_user=None,
cache=None):
self.provider_id = provider_id
super(OAuthBackend, self).__init__(model, session, user, user_id, user_required, anon_user, cache)
def get(self, blueprint, user=None, user_id=None):
if self.provider_id + '_oauth_token' in session and session[self.provider_id + '_oauth_token'] != '':
return session[self.provider_id + '_oauth_token']
# check cache
cache_key = self.make_cache_key(blueprint=blueprint, user=user, user_id=user_id)
token = self.cache.get(cache_key)
if token:
return token
# if not cached, make database queries
query = (
self.session.query(self.model)
.filter_by(provider=self.provider_id)
)
uid = first([user_id, self.user_id, blueprint.config.get("user_id")])
u = first(_get_real_user(ref, self.anon_user)
for ref in (user, self.user, blueprint.config.get("user")))
use_provider_user_id = False
if self.provider_id + '_oauth_user_id' in session and session[self.provider_id + '_oauth_user_id'] != '':
query = query.filter_by(provider_user_id=session[self.provider_id + '_oauth_user_id'])
use_provider_user_id = True
if self.user_required and not u and not uid and not use_provider_user_id:
# raise ValueError("Cannot get OAuth token without an associated user")
return None
# check for user ID
if hasattr(self.model, "user_id") and uid:
query = query.filter_by(user_id=uid)
# check for user (relationship property)
elif hasattr(self.model, "user") and u:
query = query.filter_by(user=u)
# if we have the property, but not value, filter by None
elif hasattr(self.model, "user_id"):
query = query.filter_by(user_id=None)
# run query
try:
token = query.one().token
except NoResultFound:
token = None
# cache the result
self.cache.set(cache_key, token)
return token
def set(self, blueprint, token, user=None, user_id=None):
uid = first([user_id, self.user_id, blueprint.config.get("user_id")])
u = first(_get_real_user(ref, self.anon_user)
for ref in (user, self.user, blueprint.config.get("user")))
if self.user_required and not u and not uid:
raise ValueError("Cannot set OAuth token without an associated user")
# if there was an existing model, delete it
existing_query = (
self.session.query(self.model)
.filter_by(provider=self.provider_id)
)
# check for user ID
has_user_id = hasattr(self.model, "user_id")
if has_user_id and uid:
existing_query = existing_query.filter_by(user_id=uid)
# check for user (relationship property)
has_user = hasattr(self.model, "user")
if has_user and u:
existing_query = existing_query.filter_by(user=u)
# queue up delete query -- won't be run until commit()
existing_query.delete()
# create a new model for this token
kwargs = {
"provider": self.provider_id,
"token": token,
}
if has_user_id and uid:
kwargs["user_id"] = uid
if has_user and u:
kwargs["user"] = u
self.session.add(self.model(**kwargs))
# commit to delete and add simultaneously
self.session.commit()
# invalidate cache
self.cache.delete(self.make_cache_key(
blueprint=blueprint, user=user, user_id=user_id
))
def delete(self, blueprint, user=None, user_id=None):
query = (
self.session.query(self.model)
.filter_by(provider=self.provider_id)
)
uid = first([user_id, self.user_id, blueprint.config.get("user_id")])
u = first(_get_real_user(ref, self.anon_user)
for ref in (user, self.user, blueprint.config.get("user")))
if self.user_required and not u and not uid:
raise ValueError("Cannot delete OAuth token without an associated user")
# check for user ID
if hasattr(self.model, "user_id") and uid:
query = query.filter_by(user_id=uid)
# check for user (relationship property)
elif hasattr(self.model, "user") and u:
query = query.filter_by(user=u)
# if we have the property, but not value, filter by None
elif hasattr(self.model, "user_id"):
query = query.filter_by(user_id=None)
# run query
query.delete()
self.session.commit()
# invalidate cache
self.cache.delete(self.make_cache_key(
blueprint=blueprint, user=user, user_id=user_id,
))
|
seem-sky/kbengine
|
refs/heads/master
|
kbe/src/lib/python/Lib/multiprocessing/managers.py
|
85
|
#
# Module providing the `SyncManager` class for dealing
# with shared objects
#
# multiprocessing/managers.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
__all__ = [ 'BaseManager', 'SyncManager', 'BaseProxy', 'Token' ]
#
# Imports
#
import sys
import threading
import array
import queue
from time import time as _time
from traceback import format_exc
from . import connection
from . import context
from . import pool
from . import process
from . import reduction
from . import util
from . import get_context
#
# Register some things for pickling
#
def reduce_array(a):
return array.array, (a.typecode, a.tobytes())
reduction.register(array.array, reduce_array)
view_types = [type(getattr({}, name)()) for name in ('items','keys','values')]
if view_types[0] is not list: # only needed in Py3.0
def rebuild_as_list(obj):
return list, (list(obj),)
for view_type in view_types:
reduction.register(view_type, rebuild_as_list)
#
# Type for identifying shared objects
#
class Token(object):
'''
Type to uniquely indentify a shared object
'''
__slots__ = ('typeid', 'address', 'id')
def __init__(self, typeid, address, id):
(self.typeid, self.address, self.id) = (typeid, address, id)
def __getstate__(self):
return (self.typeid, self.address, self.id)
def __setstate__(self, state):
(self.typeid, self.address, self.id) = state
def __repr__(self):
return 'Token(typeid=%r, address=%r, id=%r)' % \
(self.typeid, self.address, self.id)
#
# Function for communication with a manager's server process
#
def dispatch(c, id, methodname, args=(), kwds={}):
'''
Send a message to manager using connection `c` and return response
'''
c.send((id, methodname, args, kwds))
kind, result = c.recv()
if kind == '#RETURN':
return result
raise convert_to_error(kind, result)
def convert_to_error(kind, result):
if kind == '#ERROR':
return result
elif kind == '#TRACEBACK':
assert type(result) is str
return RemoteError(result)
elif kind == '#UNSERIALIZABLE':
assert type(result) is str
return RemoteError('Unserializable message: %s\n' % result)
else:
return ValueError('Unrecognized message type')
class RemoteError(Exception):
def __str__(self):
return ('\n' + '-'*75 + '\n' + str(self.args[0]) + '-'*75)
#
# Functions for finding the method names of an object
#
def all_methods(obj):
'''
Return a list of names of methods of `obj`
'''
temp = []
for name in dir(obj):
func = getattr(obj, name)
if callable(func):
temp.append(name)
return temp
def public_methods(obj):
'''
Return a list of names of methods of `obj` which do not start with '_'
'''
return [name for name in all_methods(obj) if name[0] != '_']
#
# Server which is run in a process controlled by a manager
#
class Server(object):
'''
Server class which runs in a process controlled by a manager object
'''
public = ['shutdown', 'create', 'accept_connection', 'get_methods',
'debug_info', 'number_of_objects', 'dummy', 'incref', 'decref']
def __init__(self, registry, address, authkey, serializer):
assert isinstance(authkey, bytes)
self.registry = registry
self.authkey = process.AuthenticationString(authkey)
Listener, Client = listener_client[serializer]
# do authentication later
self.listener = Listener(address=address, backlog=16)
self.address = self.listener.address
self.id_to_obj = {'0': (None, ())}
self.id_to_refcount = {}
self.mutex = threading.RLock()
def serve_forever(self):
'''
Run the server forever
'''
self.stop_event = threading.Event()
process.current_process()._manager_server = self
try:
accepter = threading.Thread(target=self.accepter)
accepter.daemon = True
accepter.start()
try:
while not self.stop_event.is_set():
self.stop_event.wait(1)
except (KeyboardInterrupt, SystemExit):
pass
finally:
if sys.stdout != sys.__stdout__:
util.debug('resetting stdout, stderr')
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
sys.exit(0)
def accepter(self):
while True:
try:
c = self.listener.accept()
except OSError:
continue
t = threading.Thread(target=self.handle_request, args=(c,))
t.daemon = True
t.start()
def handle_request(self, c):
'''
Handle a new connection
'''
funcname = result = request = None
try:
connection.deliver_challenge(c, self.authkey)
connection.answer_challenge(c, self.authkey)
request = c.recv()
ignore, funcname, args, kwds = request
assert funcname in self.public, '%r unrecognized' % funcname
func = getattr(self, funcname)
except Exception:
msg = ('#TRACEBACK', format_exc())
else:
try:
result = func(c, *args, **kwds)
except Exception:
msg = ('#TRACEBACK', format_exc())
else:
msg = ('#RETURN', result)
try:
c.send(msg)
except Exception as e:
try:
c.send(('#TRACEBACK', format_exc()))
except Exception:
pass
util.info('Failure to send message: %r', msg)
util.info(' ... request was %r', request)
util.info(' ... exception was %r', e)
c.close()
def serve_client(self, conn):
'''
Handle requests from the proxies in a particular process/thread
'''
util.debug('starting server thread to service %r',
threading.current_thread().name)
recv = conn.recv
send = conn.send
id_to_obj = self.id_to_obj
while not self.stop_event.is_set():
try:
methodname = obj = None
request = recv()
ident, methodname, args, kwds = request
obj, exposed, gettypeid = id_to_obj[ident]
if methodname not in exposed:
raise AttributeError(
'method %r of %r object is not in exposed=%r' %
(methodname, type(obj), exposed)
)
function = getattr(obj, methodname)
try:
res = function(*args, **kwds)
except Exception as e:
msg = ('#ERROR', e)
else:
typeid = gettypeid and gettypeid.get(methodname, None)
if typeid:
rident, rexposed = self.create(conn, typeid, res)
token = Token(typeid, self.address, rident)
msg = ('#PROXY', (rexposed, token))
else:
msg = ('#RETURN', res)
except AttributeError:
if methodname is None:
msg = ('#TRACEBACK', format_exc())
else:
try:
fallback_func = self.fallback_mapping[methodname]
result = fallback_func(
self, conn, ident, obj, *args, **kwds
)
msg = ('#RETURN', result)
except Exception:
msg = ('#TRACEBACK', format_exc())
except EOFError:
util.debug('got EOF -- exiting thread serving %r',
threading.current_thread().name)
sys.exit(0)
except Exception:
msg = ('#TRACEBACK', format_exc())
try:
try:
send(msg)
except Exception as e:
send(('#UNSERIALIZABLE', repr(msg)))
except Exception as e:
util.info('exception in thread serving %r',
threading.current_thread().name)
util.info(' ... message was %r', msg)
util.info(' ... exception was %r', e)
conn.close()
sys.exit(1)
def fallback_getvalue(self, conn, ident, obj):
return obj
def fallback_str(self, conn, ident, obj):
return str(obj)
def fallback_repr(self, conn, ident, obj):
return repr(obj)
fallback_mapping = {
'__str__':fallback_str,
'__repr__':fallback_repr,
'#GETVALUE':fallback_getvalue
}
def dummy(self, c):
pass
def debug_info(self, c):
'''
Return some info --- useful to spot problems with refcounting
'''
self.mutex.acquire()
try:
result = []
keys = list(self.id_to_obj.keys())
keys.sort()
for ident in keys:
if ident != '0':
result.append(' %s: refcount=%s\n %s' %
(ident, self.id_to_refcount[ident],
str(self.id_to_obj[ident][0])[:75]))
return '\n'.join(result)
finally:
self.mutex.release()
def number_of_objects(self, c):
'''
Number of shared objects
'''
return len(self.id_to_obj) - 1 # don't count ident='0'
def shutdown(self, c):
'''
Shutdown this process
'''
try:
util.debug('manager received shutdown message')
c.send(('#RETURN', None))
except:
import traceback
traceback.print_exc()
finally:
self.stop_event.set()
def create(self, c, typeid, *args, **kwds):
'''
Create a new shared object and return its id
'''
self.mutex.acquire()
try:
callable, exposed, method_to_typeid, proxytype = \
self.registry[typeid]
if callable is None:
assert len(args) == 1 and not kwds
obj = args[0]
else:
obj = callable(*args, **kwds)
if exposed is None:
exposed = public_methods(obj)
if method_to_typeid is not None:
assert type(method_to_typeid) is dict
exposed = list(exposed) + list(method_to_typeid)
ident = '%x' % id(obj) # convert to string because xmlrpclib
# only has 32 bit signed integers
util.debug('%r callable returned object with id %r', typeid, ident)
self.id_to_obj[ident] = (obj, set(exposed), method_to_typeid)
if ident not in self.id_to_refcount:
self.id_to_refcount[ident] = 0
# increment the reference count immediately, to avoid
# this object being garbage collected before a Proxy
# object for it can be created. The caller of create()
# is responsible for doing a decref once the Proxy object
# has been created.
self.incref(c, ident)
return ident, tuple(exposed)
finally:
self.mutex.release()
def get_methods(self, c, token):
'''
Return the methods of the shared object indicated by token
'''
return tuple(self.id_to_obj[token.id][1])
def accept_connection(self, c, name):
'''
Spawn a new thread to serve this connection
'''
threading.current_thread().name = name
c.send(('#RETURN', None))
self.serve_client(c)
def incref(self, c, ident):
self.mutex.acquire()
try:
self.id_to_refcount[ident] += 1
finally:
self.mutex.release()
def decref(self, c, ident):
self.mutex.acquire()
try:
assert self.id_to_refcount[ident] >= 1
self.id_to_refcount[ident] -= 1
if self.id_to_refcount[ident] == 0:
del self.id_to_obj[ident], self.id_to_refcount[ident]
util.debug('disposing of obj with id %r', ident)
finally:
self.mutex.release()
#
# Class to represent state of a manager
#
class State(object):
__slots__ = ['value']
INITIAL = 0
STARTED = 1
SHUTDOWN = 2
#
# Mapping from serializer name to Listener and Client types
#
listener_client = {
'pickle' : (connection.Listener, connection.Client),
'xmlrpclib' : (connection.XmlListener, connection.XmlClient)
}
#
# Definition of BaseManager
#
class BaseManager(object):
'''
Base class for managers
'''
_registry = {}
_Server = Server
def __init__(self, address=None, authkey=None, serializer='pickle',
ctx=None):
if authkey is None:
authkey = process.current_process().authkey
self._address = address # XXX not final address if eg ('', 0)
self._authkey = process.AuthenticationString(authkey)
self._state = State()
self._state.value = State.INITIAL
self._serializer = serializer
self._Listener, self._Client = listener_client[serializer]
self._ctx = ctx or get_context()
def get_server(self):
'''
Return server object with serve_forever() method and address attribute
'''
assert self._state.value == State.INITIAL
return Server(self._registry, self._address,
self._authkey, self._serializer)
def connect(self):
'''
Connect manager object to the server process
'''
Listener, Client = listener_client[self._serializer]
conn = Client(self._address, authkey=self._authkey)
dispatch(conn, None, 'dummy')
self._state.value = State.STARTED
def start(self, initializer=None, initargs=()):
'''
Spawn a server process for this manager object
'''
assert self._state.value == State.INITIAL
if initializer is not None and not callable(initializer):
raise TypeError('initializer must be a callable')
# pipe over which we will retrieve address of server
reader, writer = connection.Pipe(duplex=False)
# spawn process which runs a server
self._process = self._ctx.Process(
target=type(self)._run_server,
args=(self._registry, self._address, self._authkey,
self._serializer, writer, initializer, initargs),
)
ident = ':'.join(str(i) for i in self._process._identity)
self._process.name = type(self).__name__ + '-' + ident
self._process.start()
# get address of server
writer.close()
self._address = reader.recv()
reader.close()
# register a finalizer
self._state.value = State.STARTED
self.shutdown = util.Finalize(
self, type(self)._finalize_manager,
args=(self._process, self._address, self._authkey,
self._state, self._Client),
exitpriority=0
)
@classmethod
def _run_server(cls, registry, address, authkey, serializer, writer,
initializer=None, initargs=()):
'''
Create a server, report its address and run it
'''
if initializer is not None:
initializer(*initargs)
# create server
server = cls._Server(registry, address, authkey, serializer)
# inform parent process of the server's address
writer.send(server.address)
writer.close()
# run the manager
util.info('manager serving at %r', server.address)
server.serve_forever()
def _create(self, typeid, *args, **kwds):
'''
Create a new shared object; return the token and exposed tuple
'''
assert self._state.value == State.STARTED, 'server not yet started'
conn = self._Client(self._address, authkey=self._authkey)
try:
id, exposed = dispatch(conn, None, 'create', (typeid,)+args, kwds)
finally:
conn.close()
return Token(typeid, self._address, id), exposed
def join(self, timeout=None):
'''
Join the manager process (if it has been spawned)
'''
if self._process is not None:
self._process.join(timeout)
if not self._process.is_alive():
self._process = None
def _debug_info(self):
'''
Return some info about the servers shared objects and connections
'''
conn = self._Client(self._address, authkey=self._authkey)
try:
return dispatch(conn, None, 'debug_info')
finally:
conn.close()
def _number_of_objects(self):
'''
Return the number of shared objects
'''
conn = self._Client(self._address, authkey=self._authkey)
try:
return dispatch(conn, None, 'number_of_objects')
finally:
conn.close()
def __enter__(self):
if self._state.value == State.INITIAL:
self.start()
assert self._state.value == State.STARTED
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.shutdown()
@staticmethod
def _finalize_manager(process, address, authkey, state, _Client):
'''
Shutdown the manager process; will be registered as a finalizer
'''
if process.is_alive():
util.info('sending shutdown message to manager')
try:
conn = _Client(address, authkey=authkey)
try:
dispatch(conn, None, 'shutdown')
finally:
conn.close()
except Exception:
pass
process.join(timeout=1.0)
if process.is_alive():
util.info('manager still alive')
if hasattr(process, 'terminate'):
util.info('trying to `terminate()` manager process')
process.terminate()
process.join(timeout=0.1)
if process.is_alive():
util.info('manager still alive after terminate')
state.value = State.SHUTDOWN
try:
del BaseProxy._address_to_local[address]
except KeyError:
pass
address = property(lambda self: self._address)
@classmethod
def register(cls, typeid, callable=None, proxytype=None, exposed=None,
method_to_typeid=None, create_method=True):
'''
Register a typeid with the manager type
'''
if '_registry' not in cls.__dict__:
cls._registry = cls._registry.copy()
if proxytype is None:
proxytype = AutoProxy
exposed = exposed or getattr(proxytype, '_exposed_', None)
method_to_typeid = method_to_typeid or \
getattr(proxytype, '_method_to_typeid_', None)
if method_to_typeid:
for key, value in list(method_to_typeid.items()):
assert type(key) is str, '%r is not a string' % key
assert type(value) is str, '%r is not a string' % value
cls._registry[typeid] = (
callable, exposed, method_to_typeid, proxytype
)
if create_method:
def temp(self, *args, **kwds):
util.debug('requesting creation of a shared %r object', typeid)
token, exp = self._create(typeid, *args, **kwds)
proxy = proxytype(
token, self._serializer, manager=self,
authkey=self._authkey, exposed=exp
)
conn = self._Client(token.address, authkey=self._authkey)
dispatch(conn, None, 'decref', (token.id,))
return proxy
temp.__name__ = typeid
setattr(cls, typeid, temp)
#
# Subclass of set which get cleared after a fork
#
class ProcessLocalSet(set):
def __init__(self):
util.register_after_fork(self, lambda obj: obj.clear())
def __reduce__(self):
return type(self), ()
#
# Definition of BaseProxy
#
class BaseProxy(object):
'''
A base for proxies of shared objects
'''
_address_to_local = {}
_mutex = util.ForkAwareThreadLock()
def __init__(self, token, serializer, manager=None,
authkey=None, exposed=None, incref=True):
BaseProxy._mutex.acquire()
try:
tls_idset = BaseProxy._address_to_local.get(token.address, None)
if tls_idset is None:
tls_idset = util.ForkAwareLocal(), ProcessLocalSet()
BaseProxy._address_to_local[token.address] = tls_idset
finally:
BaseProxy._mutex.release()
# self._tls is used to record the connection used by this
# thread to communicate with the manager at token.address
self._tls = tls_idset[0]
# self._idset is used to record the identities of all shared
# objects for which the current process owns references and
# which are in the manager at token.address
self._idset = tls_idset[1]
self._token = token
self._id = self._token.id
self._manager = manager
self._serializer = serializer
self._Client = listener_client[serializer][1]
if authkey is not None:
self._authkey = process.AuthenticationString(authkey)
elif self._manager is not None:
self._authkey = self._manager._authkey
else:
self._authkey = process.current_process().authkey
if incref:
self._incref()
util.register_after_fork(self, BaseProxy._after_fork)
def _connect(self):
util.debug('making connection to manager')
name = process.current_process().name
if threading.current_thread().name != 'MainThread':
name += '|' + threading.current_thread().name
conn = self._Client(self._token.address, authkey=self._authkey)
dispatch(conn, None, 'accept_connection', (name,))
self._tls.connection = conn
def _callmethod(self, methodname, args=(), kwds={}):
'''
Try to call a method of the referrent and return a copy of the result
'''
try:
conn = self._tls.connection
except AttributeError:
util.debug('thread %r does not own a connection',
threading.current_thread().name)
self._connect()
conn = self._tls.connection
conn.send((self._id, methodname, args, kwds))
kind, result = conn.recv()
if kind == '#RETURN':
return result
elif kind == '#PROXY':
exposed, token = result
proxytype = self._manager._registry[token.typeid][-1]
token.address = self._token.address
proxy = proxytype(
token, self._serializer, manager=self._manager,
authkey=self._authkey, exposed=exposed
)
conn = self._Client(token.address, authkey=self._authkey)
dispatch(conn, None, 'decref', (token.id,))
return proxy
raise convert_to_error(kind, result)
def _getvalue(self):
'''
Get a copy of the value of the referent
'''
return self._callmethod('#GETVALUE')
def _incref(self):
conn = self._Client(self._token.address, authkey=self._authkey)
dispatch(conn, None, 'incref', (self._id,))
util.debug('INCREF %r', self._token.id)
self._idset.add(self._id)
state = self._manager and self._manager._state
self._close = util.Finalize(
self, BaseProxy._decref,
args=(self._token, self._authkey, state,
self._tls, self._idset, self._Client),
exitpriority=10
)
@staticmethod
def _decref(token, authkey, state, tls, idset, _Client):
idset.discard(token.id)
# check whether manager is still alive
if state is None or state.value == State.STARTED:
# tell manager this process no longer cares about referent
try:
util.debug('DECREF %r', token.id)
conn = _Client(token.address, authkey=authkey)
dispatch(conn, None, 'decref', (token.id,))
except Exception as e:
util.debug('... decref failed %s', e)
else:
util.debug('DECREF %r -- manager already shutdown', token.id)
# check whether we can close this thread's connection because
# the process owns no more references to objects for this manager
if not idset and hasattr(tls, 'connection'):
util.debug('thread %r has no more proxies so closing conn',
threading.current_thread().name)
tls.connection.close()
del tls.connection
def _after_fork(self):
self._manager = None
try:
self._incref()
except Exception as e:
# the proxy may just be for a manager which has shutdown
util.info('incref failed: %s' % e)
def __reduce__(self):
kwds = {}
if context.get_spawning_popen() is not None:
kwds['authkey'] = self._authkey
if getattr(self, '_isauto', False):
kwds['exposed'] = self._exposed_
return (RebuildProxy,
(AutoProxy, self._token, self._serializer, kwds))
else:
return (RebuildProxy,
(type(self), self._token, self._serializer, kwds))
def __deepcopy__(self, memo):
return self._getvalue()
def __repr__(self):
return '<%s object, typeid %r at %s>' % \
(type(self).__name__, self._token.typeid, '0x%x' % id(self))
def __str__(self):
'''
Return representation of the referent (or a fall-back if that fails)
'''
try:
return self._callmethod('__repr__')
except Exception:
return repr(self)[:-1] + "; '__str__()' failed>"
#
# Function used for unpickling
#
def RebuildProxy(func, token, serializer, kwds):
'''
Function used for unpickling proxy objects.
If possible the shared object is returned, or otherwise a proxy for it.
'''
server = getattr(process.current_process(), '_manager_server', None)
if server and server.address == token.address:
return server.id_to_obj[token.id][0]
else:
incref = (
kwds.pop('incref', True) and
not getattr(process.current_process(), '_inheriting', False)
)
return func(token, serializer, incref=incref, **kwds)
#
# Functions to create proxies and proxy types
#
def MakeProxyType(name, exposed, _cache={}):
'''
Return an proxy type whose methods are given by `exposed`
'''
exposed = tuple(exposed)
try:
return _cache[(name, exposed)]
except KeyError:
pass
dic = {}
for meth in exposed:
exec('''def %s(self, *args, **kwds):
return self._callmethod(%r, args, kwds)''' % (meth, meth), dic)
ProxyType = type(name, (BaseProxy,), dic)
ProxyType._exposed_ = exposed
_cache[(name, exposed)] = ProxyType
return ProxyType
def AutoProxy(token, serializer, manager=None, authkey=None,
exposed=None, incref=True):
'''
Return an auto-proxy for `token`
'''
_Client = listener_client[serializer][1]
if exposed is None:
conn = _Client(token.address, authkey=authkey)
try:
exposed = dispatch(conn, None, 'get_methods', (token,))
finally:
conn.close()
if authkey is None and manager is not None:
authkey = manager._authkey
if authkey is None:
authkey = process.current_process().authkey
ProxyType = MakeProxyType('AutoProxy[%s]' % token.typeid, exposed)
proxy = ProxyType(token, serializer, manager=manager, authkey=authkey,
incref=incref)
proxy._isauto = True
return proxy
#
# Types/callables which we will register with SyncManager
#
class Namespace(object):
def __init__(self, **kwds):
self.__dict__.update(kwds)
def __repr__(self):
items = list(self.__dict__.items())
temp = []
for name, value in items:
if not name.startswith('_'):
temp.append('%s=%r' % (name, value))
temp.sort()
return 'Namespace(%s)' % str.join(', ', temp)
class Value(object):
def __init__(self, typecode, value, lock=True):
self._typecode = typecode
self._value = value
def get(self):
return self._value
def set(self, value):
self._value = value
def __repr__(self):
return '%s(%r, %r)'%(type(self).__name__, self._typecode, self._value)
value = property(get, set)
def Array(typecode, sequence, lock=True):
return array.array(typecode, sequence)
#
# Proxy types used by SyncManager
#
class IteratorProxy(BaseProxy):
_exposed_ = ('__next__', 'send', 'throw', 'close')
def __iter__(self):
return self
def __next__(self, *args):
return self._callmethod('__next__', args)
def send(self, *args):
return self._callmethod('send', args)
def throw(self, *args):
return self._callmethod('throw', args)
def close(self, *args):
return self._callmethod('close', args)
class AcquirerProxy(BaseProxy):
_exposed_ = ('acquire', 'release')
def acquire(self, blocking=True, timeout=None):
args = (blocking,) if timeout is None else (blocking, timeout)
return self._callmethod('acquire', args)
def release(self):
return self._callmethod('release')
def __enter__(self):
return self._callmethod('acquire')
def __exit__(self, exc_type, exc_val, exc_tb):
return self._callmethod('release')
class ConditionProxy(AcquirerProxy):
_exposed_ = ('acquire', 'release', 'wait', 'notify', 'notify_all')
def wait(self, timeout=None):
return self._callmethod('wait', (timeout,))
def notify(self):
return self._callmethod('notify')
def notify_all(self):
return self._callmethod('notify_all')
def wait_for(self, predicate, timeout=None):
result = predicate()
if result:
return result
if timeout is not None:
endtime = _time() + timeout
else:
endtime = None
waittime = None
while not result:
if endtime is not None:
waittime = endtime - _time()
if waittime <= 0:
break
self.wait(waittime)
result = predicate()
return result
class EventProxy(BaseProxy):
_exposed_ = ('is_set', 'set', 'clear', 'wait')
def is_set(self):
return self._callmethod('is_set')
def set(self):
return self._callmethod('set')
def clear(self):
return self._callmethod('clear')
def wait(self, timeout=None):
return self._callmethod('wait', (timeout,))
class BarrierProxy(BaseProxy):
_exposed_ = ('__getattribute__', 'wait', 'abort', 'reset')
def wait(self, timeout=None):
return self._callmethod('wait', (timeout,))
def abort(self):
return self._callmethod('abort')
def reset(self):
return self._callmethod('reset')
@property
def parties(self):
return self._callmethod('__getattribute__', ('parties',))
@property
def n_waiting(self):
return self._callmethod('__getattribute__', ('n_waiting',))
@property
def broken(self):
return self._callmethod('__getattribute__', ('broken',))
class NamespaceProxy(BaseProxy):
_exposed_ = ('__getattribute__', '__setattr__', '__delattr__')
def __getattr__(self, key):
if key[0] == '_':
return object.__getattribute__(self, key)
callmethod = object.__getattribute__(self, '_callmethod')
return callmethod('__getattribute__', (key,))
def __setattr__(self, key, value):
if key[0] == '_':
return object.__setattr__(self, key, value)
callmethod = object.__getattribute__(self, '_callmethod')
return callmethod('__setattr__', (key, value))
def __delattr__(self, key):
if key[0] == '_':
return object.__delattr__(self, key)
callmethod = object.__getattribute__(self, '_callmethod')
return callmethod('__delattr__', (key,))
class ValueProxy(BaseProxy):
_exposed_ = ('get', 'set')
def get(self):
return self._callmethod('get')
def set(self, value):
return self._callmethod('set', (value,))
value = property(get, set)
BaseListProxy = MakeProxyType('BaseListProxy', (
'__add__', '__contains__', '__delitem__', '__getitem__', '__len__',
'__mul__', '__reversed__', '__rmul__', '__setitem__',
'append', 'count', 'extend', 'index', 'insert', 'pop', 'remove',
'reverse', 'sort', '__imul__'
))
class ListProxy(BaseListProxy):
def __iadd__(self, value):
self._callmethod('extend', (value,))
return self
def __imul__(self, value):
self._callmethod('__imul__', (value,))
return self
DictProxy = MakeProxyType('DictProxy', (
'__contains__', '__delitem__', '__getitem__', '__len__',
'__setitem__', 'clear', 'copy', 'get', 'has_key', 'items',
'keys', 'pop', 'popitem', 'setdefault', 'update', 'values'
))
ArrayProxy = MakeProxyType('ArrayProxy', (
'__len__', '__getitem__', '__setitem__'
))
BasePoolProxy = MakeProxyType('PoolProxy', (
'apply', 'apply_async', 'close', 'imap', 'imap_unordered', 'join',
'map', 'map_async', 'starmap', 'starmap_async', 'terminate',
))
BasePoolProxy._method_to_typeid_ = {
'apply_async': 'AsyncResult',
'map_async': 'AsyncResult',
'starmap_async': 'AsyncResult',
'imap': 'Iterator',
'imap_unordered': 'Iterator'
}
class PoolProxy(BasePoolProxy):
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.terminate()
#
# Definition of SyncManager
#
class SyncManager(BaseManager):
'''
Subclass of `BaseManager` which supports a number of shared object types.
The types registered are those intended for the synchronization
of threads, plus `dict`, `list` and `Namespace`.
The `multiprocessing.Manager()` function creates started instances of
this class.
'''
SyncManager.register('Queue', queue.Queue)
SyncManager.register('JoinableQueue', queue.Queue)
SyncManager.register('Event', threading.Event, EventProxy)
SyncManager.register('Lock', threading.Lock, AcquirerProxy)
SyncManager.register('RLock', threading.RLock, AcquirerProxy)
SyncManager.register('Semaphore', threading.Semaphore, AcquirerProxy)
SyncManager.register('BoundedSemaphore', threading.BoundedSemaphore,
AcquirerProxy)
SyncManager.register('Condition', threading.Condition, ConditionProxy)
SyncManager.register('Barrier', threading.Barrier, BarrierProxy)
SyncManager.register('Pool', pool.Pool, PoolProxy)
SyncManager.register('list', list, ListProxy)
SyncManager.register('dict', dict, DictProxy)
SyncManager.register('Value', Value, ValueProxy)
SyncManager.register('Array', Array, ArrayProxy)
SyncManager.register('Namespace', Namespace, NamespaceProxy)
# types returned by methods of PoolProxy
SyncManager.register('Iterator', proxytype=IteratorProxy, create_method=False)
SyncManager.register('AsyncResult', create_method=False)
|
krieger-od/nwjs_chromium.src
|
refs/heads/master
|
third_party/tlslite/tlslite/utils/python_rsakey.py
|
60
|
# Author: Trevor Perrin
# See the LICENSE file for legal information regarding use of this file.
"""Pure-Python RSA implementation."""
from .cryptomath import *
from .asn1parser import ASN1Parser
from .rsakey import *
from .pem import *
class Python_RSAKey(RSAKey):
def __init__(self, n=0, e=0, d=0, p=0, q=0, dP=0, dQ=0, qInv=0):
if (n and not e) or (e and not n):
raise AssertionError()
self.n = n
self.e = e
self.d = d
self.p = p
self.q = q
self.dP = dP
self.dQ = dQ
self.qInv = qInv
self.blinder = 0
self.unblinder = 0
def hasPrivateKey(self):
return self.d != 0
def _rawPrivateKeyOp(self, m):
#Create blinding values, on the first pass:
if not self.blinder:
self.unblinder = getRandomNumber(2, self.n)
self.blinder = powMod(invMod(self.unblinder, self.n), self.e,
self.n)
#Blind the input
m = (m * self.blinder) % self.n
#Perform the RSA operation
c = self._rawPrivateKeyOpHelper(m)
#Unblind the output
c = (c * self.unblinder) % self.n
#Update blinding values
self.blinder = (self.blinder * self.blinder) % self.n
self.unblinder = (self.unblinder * self.unblinder) % self.n
#Return the output
return c
def _rawPrivateKeyOpHelper(self, m):
#Non-CRT version
#c = powMod(m, self.d, self.n)
#CRT version (~3x faster)
s1 = powMod(m, self.dP, self.p)
s2 = powMod(m, self.dQ, self.q)
h = ((s1 - s2) * self.qInv) % self.p
c = s2 + self.q * h
return c
def _rawPublicKeyOp(self, c):
m = powMod(c, self.e, self.n)
return m
def acceptsPassword(self): return False
def generate(bits):
key = Python_RSAKey()
p = getRandomPrime(bits//2, False)
q = getRandomPrime(bits//2, False)
t = lcm(p-1, q-1)
key.n = p * q
key.e = 65537
key.d = invMod(key.e, t)
key.p = p
key.q = q
key.dP = key.d % (p-1)
key.dQ = key.d % (q-1)
key.qInv = invMod(q, p)
return key
generate = staticmethod(generate)
def parsePEM(s, passwordCallback=None):
"""Parse a string containing a <privateKey> or <publicKey>, or
PEM-encoded key."""
if pemSniff(s, "PRIVATE KEY"):
bytes = dePem(s, "PRIVATE KEY")
return Python_RSAKey._parsePKCS8(bytes)
elif pemSniff(s, "RSA PRIVATE KEY"):
bytes = dePem(s, "RSA PRIVATE KEY")
return Python_RSAKey._parseSSLeay(bytes)
else:
raise SyntaxError("Not a PEM private key file")
parsePEM = staticmethod(parsePEM)
def _parsePKCS8(bytes):
p = ASN1Parser(bytes)
version = p.getChild(0).value[0]
if version != 0:
raise SyntaxError("Unrecognized PKCS8 version")
rsaOID = p.getChild(1).value
if list(rsaOID) != [6, 9, 42, 134, 72, 134, 247, 13, 1, 1, 1, 5, 0]:
raise SyntaxError("Unrecognized AlgorithmIdentifier")
#Get the privateKey
privateKeyP = p.getChild(2)
#Adjust for OCTET STRING encapsulation
privateKeyP = ASN1Parser(privateKeyP.value)
return Python_RSAKey._parseASN1PrivateKey(privateKeyP)
_parsePKCS8 = staticmethod(_parsePKCS8)
def _parseSSLeay(bytes):
privateKeyP = ASN1Parser(bytes)
return Python_RSAKey._parseASN1PrivateKey(privateKeyP)
_parseSSLeay = staticmethod(_parseSSLeay)
def _parseASN1PrivateKey(privateKeyP):
version = privateKeyP.getChild(0).value[0]
if version != 0:
raise SyntaxError("Unrecognized RSAPrivateKey version")
n = bytesToNumber(privateKeyP.getChild(1).value)
e = bytesToNumber(privateKeyP.getChild(2).value)
d = bytesToNumber(privateKeyP.getChild(3).value)
p = bytesToNumber(privateKeyP.getChild(4).value)
q = bytesToNumber(privateKeyP.getChild(5).value)
dP = bytesToNumber(privateKeyP.getChild(6).value)
dQ = bytesToNumber(privateKeyP.getChild(7).value)
qInv = bytesToNumber(privateKeyP.getChild(8).value)
return Python_RSAKey(n, e, d, p, q, dP, dQ, qInv)
_parseASN1PrivateKey = staticmethod(_parseASN1PrivateKey)
|
Adnn/django
|
refs/heads/master
|
django/utils/deconstruct.py
|
502
|
from importlib import import_module
from django.utils.version import get_docs_version
def deconstructible(*args, **kwargs):
"""
Class decorator that allow the decorated class to be serialized
by the migrations subsystem.
Accepts an optional kwarg `path` to specify the import path.
"""
path = kwargs.pop('path', None)
def decorator(klass):
def __new__(cls, *args, **kwargs):
# We capture the arguments to make returning them trivial
obj = super(klass, cls).__new__(cls)
obj._constructor_args = (args, kwargs)
return obj
def deconstruct(obj):
"""
Returns a 3-tuple of class import path, positional arguments,
and keyword arguments.
"""
# Python 2/fallback version
if path:
module_name, _, name = path.rpartition('.')
else:
module_name = obj.__module__
name = obj.__class__.__name__
# Make sure it's actually there and not an inner class
module = import_module(module_name)
if not hasattr(module, name):
raise ValueError(
"Could not find object %s in %s.\n"
"Please note that you cannot serialize things like inner "
"classes. Please move the object into the main module "
"body to use migrations.\n"
"For more information, see "
"https://docs.djangoproject.com/en/%s/topics/migrations/#serializing-values"
% (name, module_name, get_docs_version()))
return (
path or '%s.%s' % (obj.__class__.__module__, name),
obj._constructor_args[0],
obj._constructor_args[1],
)
klass.__new__ = staticmethod(__new__)
klass.deconstruct = deconstruct
return klass
if not args:
return decorator
return decorator(*args, **kwargs)
|
Kryz/sentry
|
refs/heads/master
|
tests/sentry/management/commands/test_cleanup.py
|
23
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from django.core.management import call_command
from sentry.models import Event, Group, GroupTagValue, TagValue, TagKey
from sentry.testutils import TestCase
ALL_MODELS = (Event, Group, GroupTagValue, TagValue, TagKey)
class SentryCleanupTest(TestCase):
fixtures = ['tests/fixtures/cleanup.json']
def test_simple(self):
call_command('cleanup', days=1)
for model in ALL_MODELS:
assert model.objects.count() == 0
def test_project(self):
orig_counts = {}
for model in ALL_MODELS:
orig_counts[model] = model.objects.count()
call_command('cleanup', days=1, project=2)
for model in ALL_MODELS:
assert model.objects.count() == orig_counts[model]
call_command('cleanup', days=1, project=1)
for model in ALL_MODELS:
assert model.objects.count() == 0
|
ROB-Seismology/oq-hazardlib
|
refs/heads/rob-hazardlib
|
openquake/hazardlib/gsim/bindi_2014.py
|
1
|
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2014-2018 GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
"""
Module exports :class:`BindiEtAl2014Rjb`,
:class:`BindiEtAl2014RjbEC8`,
:class:`BindiEtAl2014RjbEC8NoSOF`,
:class:`BindiEtAl2014Rhyp`,
:class:`BindiEtAl2014RhypEC8`,
:class:`BindiEtAl2014RhypEC8NoSOF`
"""
import numpy as np
from scipy.constants import g
from openquake.hazardlib.gsim.base import GMPE, CoeffsTable
from openquake.hazardlib import const
from openquake.hazardlib.imt import PGA, PGV, SA
class BindiEtAl2014Rjb(GMPE):
"""
Implements European GMPE:
D.Bindi, M. Massa, L.Luzi, G. Ameri, F. Pacor, R.Puglia and P. Augliera
(2014), "Pan-European ground motion prediction equations for the
average horizontal component of PGA, PGV and 5 %-damped PSA at spectral
periods of up to 3.0 s using the RESORCE dataset", Bulletin of
Earthquake Engineering, 12(1), 391 - 340
The regressions are developed considering the geometrical mean of the
as-recorded horizontal components
The printed version of the GMPE was corrected by Erratum:
D.Bindi, M. Massa, L.Luzi, G. Ameri, F. Pacor, R.Puglia and P. Augliera
(2014), "Erratum to Pan-European ground motion prediction equations for the
average horizontal component of PGA, PGV and 5 %-damped PSA at spectral
periods of up to 3.0 s using the RESORCE dataset", Bulletin of
Earthquake Engineering, 12(1), 431 - 448. The erratum notes that the
printed coefficients tables were in error. In this implementation
coefficients tables were taken from the Electronic Supplementary
material of the original paper, which are indicated as being unaffected.
"""
#: Supported tectonic region type is 'active shallow crust'
DEFINED_FOR_TECTONIC_REGION_TYPE = const.TRT.ACTIVE_SHALLOW_CRUST
#: Set of :mod:`intensity measure types <openquake.hazardlib.imt>`
#: this GSIM can calculate. A set should contain classes from module
#: :mod:`openquake.hazardlib.imt`.
DEFINED_FOR_INTENSITY_MEASURE_TYPES = set([
PGA,
PGV,
SA
])
#: Supported intensity measure component is the geometric mean of two
#: horizontal components
DEFINED_FOR_INTENSITY_MEASURE_COMPONENT = const.IMC.AVERAGE_HORIZONTAL
#: Supported standard deviation types are inter-event, intra-event
#: and total
DEFINED_FOR_STANDARD_DEVIATION_TYPES = set([
const.StdDev.TOTAL,
const.StdDev.INTER_EVENT,
const.StdDev.INTRA_EVENT
])
#: Required site parameter is only Vs30
REQUIRES_SITES_PARAMETERS = set(('vs30', ))
#: Required rupture parameters are magnitude and rake (eq. 1).
REQUIRES_RUPTURE_PARAMETERS = set(('rake', 'mag'))
#: Required distance measure is Rjb (eq. 1).
REQUIRES_DISTANCES = set(('rjb', ))
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types):
"""
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
"""
# extracting dictionary of coefficients specific to required
# intensity measure type.
C = self.COEFFS[imt]
imean = self._get_mean(C, rup, dists, sites)
if isinstance(imt, (PGA, SA)):
# Convert units to g,
# but only for PGA and SA (not PGV):
mean = np.log((10.0 ** (imean - 2.0)) / g)
else:
# PGV:
mean = np.log(10.0 ** imean)
istddevs = self._get_stddevs(C, stddev_types, len(sites.vs30))
stddevs = np.log(10.0 ** np.array(istddevs))
return mean, stddevs
def _get_mean(self, C, rup, dists, sites):
"""
Returns the mean ground motion
"""
return (self._get_magnitude_scaling_term(C, rup.mag) +
self._get_distance_scaling_term(C, dists.rjb, rup.mag) +
self._get_style_of_faulting_term(C, rup) +
self._get_site_amplification_term(C, sites.vs30))
def _get_magnitude_scaling_term(self, C, mag):
"""
Returns the magnitude scaling term of the GMPE described in
equation 3
"""
dmag = mag - self.CONSTS["Mh"]
if mag < self.CONSTS["Mh"]:
return C["e1"] + (C["b1"] * dmag) + (C["b2"] * (dmag ** 2.0))
else:
return C["e1"] + (C["b3"] * dmag)
def _get_distance_scaling_term(self, C, rval, mag):
"""
Returns the distance scaling term of the GMPE described in equation 2
"""
r_adj = np.sqrt(rval ** 2.0 + C["h"] ** 2.0)
return (
(C["c1"] + C["c2"] * (mag - self.CONSTS["Mref"])) *
np.log10(r_adj / self.CONSTS["Rref"]) -
(C["c3"] * (r_adj - self.CONSTS["Rref"])))
def _get_style_of_faulting_term(self, C, rup):
"""
Returns the style-of-faulting term.
Fault type (Strike-slip, Normal, Thrust/reverse) is
derived from rake angle.
Rakes angles within 30 of horizontal are strike-slip,
angles from 30 to 150 are reverse, and angles from
-30 to -150 are normal.
Note that the 'Unspecified' case is not considered in this class
as rake is required as an input variable
"""
SS, NS, RS = 0.0, 0.0, 0.0
if np.abs(rup.rake) <= 30.0 or (180.0 - np.abs(rup.rake)) <= 30.0:
# strike-slip
SS = 1.0
elif rup.rake > 30.0 and rup.rake < 150.0:
# reverse
RS = 1.0
else:
# normal
NS = 1.0
return (C["sofN"] * NS) + (C["sofR"] * RS) + (C["sofS"] * SS)
def _get_site_amplification_term(self, C, vs30):
"""
Returns the site amplification term for the case in which Vs30
is used directly
"""
return C["gamma"] * np.log10(vs30 / self.CONSTS["Vref"])
def _get_stddevs(self, C, stddev_types, num_sites):
"""
Return standard deviations as defined in table 2.
"""
stddevs = []
for stddev_type in stddev_types:
assert stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES
if stddev_type == const.StdDev.TOTAL:
stddevs.append(C['sigma'] + np.zeros(num_sites))
elif stddev_type == const.StdDev.INTRA_EVENT:
stddevs.append(C['phi'] + np.zeros(num_sites))
elif stddev_type == const.StdDev.INTER_EVENT:
stddevs.append(C['tau'] + np.zeros(num_sites))
return stddevs
#: Coefficients from Table 2
COEFFS = CoeffsTable(sa_damping=5, table="""
imt e1 c1 c2 h c3 b1 b2 b3 gamma sofN sofR sofS tau phi phis2s sigma
pgv 2.264810000 -1.224080000 0.202085000 5.061240000 0.000000000 0.162802000 -0.092632400 0.044030100 -0.529443000 -0.009476750 0.040057400 -0.030580500 0.156062000 0.277714000 0.120398000 0.318560000
pga 3.328190000 -1.239800000 0.217320000 5.264860000 0.001186240 -0.085504500 -0.092563900 0.000000000 -0.301899000 -0.039769500 0.077525300 -0.037755800 0.149977000 0.282398000 0.165611000 0.319753000
0.02 3.370530000 -1.263580000 0.220527000 5.200820000 0.001118160 -0.089055400 -0.091615200 0.000000000 -0.294021000 -0.039236000 0.081051600 -0.041815600 0.158670000 0.282356000 0.183959000 0.323885000
0.04 3.439220000 -1.310250000 0.244676000 4.916690000 0.001091830 -0.116919000 -0.078378900 0.000000000 -0.241765000 -0.037720400 0.079778300 -0.042057900 0.154621000 0.291143000 0.187409000 0.329654000
0.07 3.596510000 -1.290510000 0.231878000 5.359220000 0.001820940 -0.085012400 -0.056996800 0.000000000 -0.207629000 -0.045943700 0.087496800 -0.041553000 0.172785000 0.291499000 0.199913000 0.338860000
0.10 3.686380000 -1.281780000 0.219406000 6.121460000 0.002114430 -0.113550000 -0.075332500 0.000000000 -0.173237000 -0.038052800 0.084710300 -0.046658500 0.169691000 0.301967000 0.208178000 0.346379000
0.15 3.686320000 -1.176970000 0.182662000 5.741540000 0.002540270 -0.092872600 -0.102433000 0.073904200 -0.202492000 -0.026729300 0.067844100 -0.041114700 0.152902000 0.305804000 0.212124000 0.341900000
0.20 3.682620000 -1.103010000 0.133154000 5.319980000 0.002420890 0.010085700 -0.105184000 0.150461000 -0.291228000 -0.032653700 0.075976900 -0.043323200 0.150055000 0.300109000 0.190469000 0.335532000
0.26 3.643140000 -1.085270000 0.115603000 5.134550000 0.001964370 0.029939700 -0.127173000 0.178899000 -0.354425000 -0.033843800 0.074982000 -0.041138100 0.151209000 0.302419000 0.187037000 0.338114000
0.30 3.639850000 -1.105910000 0.108276000 5.128460000 0.001499220 0.039190400 -0.138578000 0.189682000 -0.393060000 -0.037245300 0.076701100 -0.039455900 0.157946000 0.297402000 0.174118000 0.336741000
0.36 3.574800000 -1.099550000 0.103083000 4.905570000 0.001049050 0.052103000 -0.151385000 0.216011000 -0.453905000 -0.027906700 0.069789800 -0.041883200 0.165436000 0.294395000 0.175848000 0.337694000
0.40 3.530060000 -1.095380000 0.101111000 4.953860000 0.000851474 0.045846400 -0.162090000 0.224827000 -0.492063000 -0.025630900 0.072566800 -0.046936000 0.157728000 0.296992000 0.169883000 0.336278000
0.46 3.433870000 -1.065860000 0.109066000 4.659900000 0.000868165 0.060083800 -0.165897000 0.197716000 -0.564463000 -0.018663500 0.064599300 -0.045935800 0.173005000 0.291868000 0.164162000 0.339290000
0.50 3.405540000 -1.057670000 0.112197000 4.432050000 0.000788528 0.088318900 -0.164108000 0.154750000 -0.596196000 -0.017419400 0.060282600 -0.042863200 0.180820000 0.289957000 0.165090000 0.341717000
0.60 3.304420000 -1.050140000 0.121734000 4.216570000 0.000487285 0.120182000 -0.163325000 0.117576000 -0.667824000 -0.000486417 0.044920900 -0.044434500 0.182233000 0.292223000 0.175634000 0.344388000
0.70 3.238820000 -1.050210000 0.114674000 4.171270000 0.000159408 0.166933000 -0.161112000 0.112005000 -0.738390000 0.011203300 0.028150600 -0.039353900 0.189396000 0.289307000 0.168617000 0.345788000
0.80 3.153700000 -1.046540000 0.129522000 4.200160000 0.000000000 0.193817000 -0.156553000 0.051728500 -0.794076000 0.016525800 0.020352200 -0.036878300 0.189074000 0.288815000 0.168170000 0.345200000
0.90 3.134810000 -1.046120000 0.114536000 4.480030000 0.000000000 0.247547000 -0.153819000 0.081575400 -0.821699000 0.016449300 0.021242200 -0.037691300 0.191986000 0.293264000 0.183719000 0.350517000
1.00 3.124740000 -1.052700000 0.103471000 4.416130000 0.000000000 0.306569000 -0.147558000 0.092837300 -0.826584000 0.026307100 0.018604300 -0.044911100 0.195026000 0.297907000 0.200775000 0.356067000
1.30 2.898410000 -0.973828000 0.104898000 4.258210000 0.000000000 0.349119000 -0.149483000 0.108209000 -0.845047000 0.025233900 0.022362100 -0.047595700 0.181782000 0.306676000 0.209625000 0.356504000
1.50 2.847270000 -0.983388000 0.109072000 4.566970000 0.000000000 0.384546000 -0.139867000 0.098737200 -0.823200000 0.018673800 0.023089400 -0.041763000 0.177752000 0.316312000 0.218569000 0.362835000
1.80 2.680160000 -0.983082000 0.164027000 4.680080000 0.000000000 0.343663000 -0.135933000 0.000000000 -0.778657000 0.011371300 0.016688200 -0.028059400 0.163242000 0.326484000 0.221367000 0.365020000
2.00 2.601710000 -0.979215000 0.163344000 4.581860000 0.000000000 0.331747000 -0.148282000 0.000000000 -0.769243000 0.005535450 0.019856600 -0.025392000 0.164958000 0.329916000 0.225350000 0.368857000
2.60 2.390670000 -0.977532000 0.211831000 5.395170000 0.000000000 0.357514000 -0.122539000 0.000000000 -0.769609000 0.008734600 0.023314200 -0.032048600 0.170280000 0.320626000 0.210193000 0.363037000
3.00 2.253990000 -0.940373000 0.227241000 5.741730000 0.000000000 0.385526000 -0.111445000 0.000000000 -0.732072000 0.022989300 -0.020662000 -0.002327150 0.176546000 0.314165000 0.207247000 0.360373000
""")
CONSTS = {"Mref": 5.5,
"Mh": 6.75,
"Rref": 1.0,
"Vref": 800.0}
class BindiEtAl2014RjbEC8(BindiEtAl2014Rjb):
"""
Implements the Bindi et al (2014) GMPE for the case where Joyner-Boore
distance is specified but Eurocode 8 Site classification is used.
"""
def _get_site_amplification_term(self, C, vs30):
"""
Returns the site amplification given Eurocode 8 site classification
"""
f_s = np.zeros_like(vs30)
# Site class B
idx = np.logical_and(vs30 < 800.0, vs30 >= 360.0)
f_s[idx] = C["eB"]
# Site Class C
idx = np.logical_and(vs30 < 360.0, vs30 >= 180.0)
f_s[idx] = C["eC"]
# Site Class D
idx = vs30 < 180.0
f_s[idx] = C["eD"]
return f_s
#: Coefficients from Table 1
COEFFS = CoeffsTable(sa_damping=5, table="""
imt e1 c1 c2 h c3 b1 b2 b3 eA eB eC eD sofN sofR sofS sofU tau phi phis2s sigma
pgv 2.375220000 -1.304700000 0.209460000 5.761910000 0.000000000 0.273952000 -0.051424900 0.000000000 0.000000000 0.122258000 0.276738000 0.380306000 -0.001827210 0.057498900 0.022657800 0.000000000 0.186089000 0.271268000 0.177104000 0.328961000
pga 3.450780000 -1.360610000 0.215873000 6.147170000 0.000732525 -0.020871500 -0.072242500 0.000000000 0.000000000 0.137715000 0.233048000 0.214227000 -0.032284600 0.073677800 -0.019431300 0.000000000 0.180904000 0.276335000 0.206288000 0.330284000
0.02 3.478060000 -1.375190000 0.218095000 5.906840000 0.000710063 -0.026825000 -0.072604300 0.000000000 0.000000000 0.134904000 0.226827000 0.213357000 -0.028085300 0.077531800 -0.020641400 0.000000000 0.182533000 0.278823000 0.208393000 0.333258000
0.04 3.580060000 -1.433270000 0.238839000 5.793940000 0.000685158 -0.056875100 -0.063729800 0.000000000 0.000000000 0.133973000 0.218136000 0.176183000 -0.038661200 0.060308000 -0.033402300 0.000000000 0.180630000 0.289652000 0.220859000 0.341358000
0.07 3.781630000 -1.461340000 0.225844000 6.620190000 0.001175680 -0.043052000 -0.049789000 0.000000000 0.000000000 0.139714000 0.206862000 0.145621000 -0.038893400 0.071260300 -0.027363900 0.000000000 0.194176000 0.296609000 0.235714000 0.354515000
0.10 3.792600000 -1.414410000 0.208667000 6.892480000 0.001601790 -0.058451800 -0.064433500 0.000000000 0.000000000 0.155236000 0.210168000 0.156052000 -0.019545700 0.084246100 -0.022831500 0.000000000 0.181926000 0.306918000 0.244969000 0.356785000
0.15 3.778380000 -1.293440000 0.163550000 6.717350000 0.002028820 -0.035863600 -0.091537900 0.085537200 0.000000000 0.158937000 0.199726000 0.186495000 -0.020557800 0.074269000 -0.026728700 0.000000000 0.181380000 0.305998000 0.241833000 0.355716000
0.20 3.692760000 -1.181950000 0.119101000 5.786590000 0.002122900 0.067201900 -0.091505400 0.145251000 0.000000000 0.138968000 0.216584000 0.199500000 0.018953200 0.133352000 0.026665200 0.000000000 0.177903000 0.300131000 0.219913000 0.348896000
0.26 3.676100000 -1.165490000 0.102609000 5.451920000 0.001653610 0.129716000 -0.097514500 0.135986000 0.000000000 0.126737000 0.249141000 0.229736000 0.023562700 0.143428000 0.039233500 0.000000000 0.178211000 0.300652000 0.200662000 0.349501000
0.30 3.669660000 -1.175200000 0.099164000 5.407320000 0.001247800 0.145499000 -0.104880000 0.135159000 0.000000000 0.113881000 0.259274000 0.252504000 0.018438300 0.138662000 0.043489300 0.000000000 0.184254000 0.295463000 0.193285000 0.348207000
0.36 3.597210000 -1.144790000 0.095007700 5.020640000 0.000918966 0.168179000 -0.114223000 0.149582000 0.000000000 0.109638000 0.274211000 0.282686000 0.012675100 0.122472000 0.036661700 0.000000000 0.184085000 0.295192000 0.187569000 0.347887000
0.40 3.556710000 -1.145200000 0.094317300 5.080660000 0.000672779 0.173884000 -0.120149000 0.151849000 0.000000000 0.110223000 0.280836000 0.301657000 0.022149900 0.129181000 0.046122800 0.000000000 0.191734000 0.292878000 0.180758000 0.350056000
0.46 3.501770000 -1.130800000 0.100456000 4.957770000 0.000583160 0.190813000 -0.123177000 0.130847000 0.000000000 0.108079000 0.298022000 0.347080000 0.017164500 0.115968000 0.044778200 0.000000000 0.199690000 0.291096000 0.182941000 0.353006000
0.50 3.457170000 -1.116310000 0.101994000 4.698770000 0.000508794 0.203522000 -0.126077000 0.122339000 0.000000000 0.108783000 0.305295000 0.370989000 0.016711700 0.114252000 0.049822200 0.000000000 0.200063000 0.291640000 0.175988000 0.353665000
0.60 3.387990000 -1.104700000 0.104529000 4.546430000 0.000249318 0.242603000 -0.126011000 0.095964800 0.000000000 0.106929000 0.321296000 0.440581000 0.013694500 0.100223000 0.042017600 0.000000000 0.207756000 0.289459000 0.176453000 0.356299000
0.70 3.343810000 -1.116090000 0.099989200 4.640170000 0.000000000 0.280922000 -0.124614000 0.092047500 0.000000000 0.102965000 0.331801000 0.503562000 0.024399300 0.092189300 0.049608600 0.000000000 0.208828000 0.290952000 0.178954000 0.358137000
0.80 3.258020000 -1.109070000 0.119754000 4.638490000 0.000000000 0.291242000 -0.122604000 0.032747700 0.000000000 0.097480900 0.341281000 0.542709000 0.024482700 0.078739400 0.049226200 0.000000000 0.211136000 0.294168000 0.180310000 0.362096000
0.90 3.168990000 -1.087140000 0.117879000 4.504810000 0.000000000 0.311362000 -0.123730000 0.052576100 0.000000000 0.087056700 0.342803000 0.581633000 0.042375500 0.091253700 0.068451600 0.000000000 0.220213000 0.293618000 0.194549000 0.367022000
1.00 3.146490000 -1.093870000 0.114285000 4.531180000 0.000000000 0.359324000 -0.117738000 0.044584200 0.000000000 0.086495700 0.345210000 0.590175000 0.053679200 0.091382100 0.067455400 0.000000000 0.221524000 0.295365000 0.196091000 0.369206000
1.30 2.895150000 -1.030420000 0.136666000 4.532080000 0.000000000 0.393471000 -0.115441000 0.000000000 0.000000000 0.092091300 0.345292000 0.618805000 0.087972000 0.119863000 0.100768000 0.000000000 0.222493000 0.296657000 0.196817000 0.370822000
1.50 2.763660000 -1.014370000 0.144100000 4.611720000 0.000000000 0.432513000 -0.104296000 0.000000000 0.000000000 0.103385000 0.342842000 0.653192000 0.123393000 0.165217000 0.143638000 0.000000000 0.218105000 0.303878000 0.198490000 0.374047000
1.80 2.636620000 -1.048380000 0.180838000 5.396070000 0.000000000 0.434162000 -0.096297900 0.000000000 0.000000000 0.107251000 0.333706000 0.618956000 0.161886000 0.193198000 0.201695000 0.000000000 0.212905000 0.310360000 0.201126000 0.376367000
2.00 2.621500000 -1.054300000 0.181367000 5.567720000 0.000000000 0.458752000 -0.095576300 0.000000000 0.000000000 0.099358000 0.329709000 0.604177000 0.139794000 0.167929000 0.185814000 0.000000000 0.222240000 0.309638000 0.202676000 0.381138000
2.60 2.463180000 -1.073080000 0.226407000 6.234910000 0.000000000 0.475305000 -0.078811800 0.000000000 0.000000000 0.105913000 0.312454000 0.577657000 0.125695000 0.153396000 0.173281000 0.000000000 0.223041000 0.310755000 0.207080000 0.382513000
3.00 2.396800000 -1.057060000 0.248126000 6.767400000 0.000000000 0.481080000 -0.071968900 0.000000000 0.000000000 0.127642000 0.318684000 0.597588000 0.052424200 0.047118500 0.116645000 0.000000000 0.236576000 0.302186000 0.212410000 0.383777000
""")
class BindiEtAl2014RjbEC8NoSOF(BindiEtAl2014RjbEC8):
"""
Implements the Bindi et al (2014) GMPE for the case in which
the site amplification is defined according to the Eurocode 8
classification, but style-of-faulting is neglected
"""
#: Required rupture parameters are magnitude
REQUIRES_RUPTURE_PARAMETERS = set(('mag',))
def _get_mean(self, C, rup, dists, sites):
"""
Returns the mean value of ground motion - noting that in this case
the style-of-faulting term is neglected
"""
return (self._get_magnitude_scaling_term(C, rup.mag) +
self._get_distance_scaling_term(C, dists.rjb, rup.mag) +
self._get_site_amplification_term(C, sites.vs30))
class BindiEtAl2014Rhyp(BindiEtAl2014Rjb):
"""
Implements the Bindi et al (2014) GMPE for the case in which hypocentral
distance is preferred, style-of-faulting is specfieid and for which the
site amplification is dependent directly on Vs30
"""
#: Required distance measure is Rhypo (eq. 1).
REQUIRES_DISTANCES = set(('rhypo', ))
def _get_mean(self, C, rup, dists, sites):
"""
Returns the mean value of ground motion
"""
return (self._get_magnitude_scaling_term(C, rup.mag) +
self._get_distance_scaling_term(C, dists.rhypo, rup.mag) +
self._get_style_of_faulting_term(C, rup) +
self._get_site_amplification_term(C, sites.vs30))
#: Coefficients from Table 4
COEFFS = CoeffsTable(sa_damping=5, table="""
imt e1 c1 c2 h c3 b1 b2 b3 gamma sofN sofR sofS tau phi phis2s sigma
pgv 3.242490000 -1.575560000 0.079177400 4.389180000 0.0000000000 0.472433000 -0.072548400 0.436952000 -0.508833000 -0.015719500 0.071385900 -0.055666000 0.193206000 0.295126000 0.178867000 0.352744000
pga 4.273910000 -1.578210000 0.108218000 4.827430000 0.0000963923 0.217109000 -0.068256300 0.352976000 -0.293242000 -0.047214500 0.110979000 -0.063763900 0.145783000 0.291566000 0.186662000 0.325981000
0.02 4.339700000 -1.604020000 0.103401000 4.478520000 0.0000263293 0.230422000 -0.066535400 0.363906000 -0.286524000 -0.046923100 0.115063000 -0.068140000 0.154538000 0.290986000 0.188250000 0.329477000
0.04 4.468390000 -1.685360000 0.126703000 4.580630000 0.0000000000 0.205651000 -0.052810200 0.323734000 -0.232462000 -0.045172300 0.114597000 -0.069425000 0.158402000 0.298261000 0.192664000 0.337714000
0.07 4.572400000 -1.638630000 0.123954000 5.120960000 0.0007222300 0.226272000 -0.029801500 0.311109000 -0.195629000 -0.053205000 0.121653000 -0.068447700 0.169775000 0.302117000 0.205229000 0.346552000
0.10 4.552550000 -1.579470000 0.125609000 5.675110000 0.0012390400 0.167382000 -0.050906600 0.348968000 -0.168432000 -0.047039300 0.119021000 -0.071982100 0.165148000 0.310963000 0.212643000 0.352097000
0.15 4.511190000 -1.447100000 0.084609700 4.824800000 0.0016920200 0.194714000 -0.078450700 0.448903000 -0.194539000 -0.036312300 0.102481000 -0.066168600 0.145533000 0.310621000 0.216313000 0.343023000
0.20 4.495710000 -1.370390000 0.038535800 4.569650000 0.0015859300 0.289627000 -0.081549900 0.533244000 -0.270912000 -0.038675400 0.107555000 -0.068879300 0.144701000 0.308845000 0.202040000 0.341063000
0.26 4.492240000 -1.366790000 0.012937400 3.948020000 0.0010587800 0.321065000 -0.104184000 0.596455000 -0.323555000 -0.036577100 0.103236000 -0.066658900 0.156869000 0.313737000 0.199484000 0.350769000
0.30 4.517260000 -1.400780000 0.001979970 4.268160000 0.0005648190 0.336096000 -0.115261000 0.612107000 -0.363199000 -0.038065000 0.104818000 -0.066753200 0.165195000 0.311052000 0.186722000 0.352197000
0.36 4.465590000 -1.409730000 0.000488761 4.399780000 0.0000596605 0.346351000 -0.127114000 0.600314000 -0.430464000 -0.028534300 0.095509300 -0.066974900 0.164907000 0.310509000 0.180734000 0.351583000
0.40 4.468340000 -1.428930000 -0.009095590 4.603900000 0.0000000000 0.353351000 -0.137776000 0.621323000 -0.467397000 -0.026162600 0.097198300 -0.071035500 0.165146000 0.310959000 0.182064000 0.352092000
0.46 4.371500000 -1.406550000 0.001009530 4.602540000 0.0000000000 0.357170000 -0.142768000 0.589127000 -0.531694000 -0.019281900 0.090202000 -0.070919800 0.181401000 0.306033000 0.176797000 0.355756000
0.50 4.341980000 -1.397510000 0.004238030 4.430450000 0.0000000000 0.384532000 -0.140916000 0.543301000 -0.555531000 -0.017579800 0.086012300 -0.068432100 0.189686000 0.304174000 0.178065000 0.358473000
0.60 4.214950000 -1.379190000 0.013733000 3.696150000 0.0000000000 0.408720000 -0.141998000 0.504772000 -0.627036000 0.001156930 0.071288600 -0.070131400 0.200502000 0.306270000 0.189183000 0.366066000
0.70 4.148320000 -1.371690000 0.002264110 3.009780000 0.0000000000 0.466754000 -0.138065000 0.498126000 -0.698998000 0.010002700 0.054387600 -0.064390000 0.201810000 0.308270000 0.264361000 0.368453000
0.80 4.092460000 -1.377360000 0.008956000 3.157270000 0.0000000000 0.510102000 -0.132630000 0.437529000 -0.757522000 0.015018400 0.045864700 -0.060882800 0.211664000 0.308550000 0.208994000 0.374172000
0.90 4.083240000 -1.386490000 -0.004531510 3.453700000 0.0000000000 0.567727000 -0.127244000 0.458110000 -0.786632000 0.016380200 0.044223600 -0.060603500 0.225279000 0.313873000 0.225906000 0.386351000
1.00 4.072070000 -1.387350000 -0.018545800 3.316300000 0.0000000000 0.631338000 -0.121241000 0.474982000 -0.791438000 0.026395700 0.041136600 -0.067531900 0.238973000 0.318631000 0.246861000 0.398289000
1.30 3.779540000 -1.273430000 -0.013766200 3.049760000 0.0000000000 0.650829000 -0.129005000 0.488244000 -0.803656000 0.024922000 0.038329000 -0.063250700 0.212162000 0.324083000 0.245588000 0.387354000
1.50 3.694470000 -1.264770000 -0.003373340 3.654820000 0.0000000000 0.674600000 -0.119081000 0.461122000 -0.780198000 0.019123100 0.038696600 -0.057819500 0.208441000 0.334250000 0.244150000 0.393917000
1.80 3.454080000 -1.273640000 0.083746000 4.599880000 0.0000000000 0.563304000 -0.117803000 0.184126000 -0.749008000 0.011675900 0.029249000 -0.040924700 0.203238000 0.342873000 0.256308000 0.398582000
2.00 3.389010000 -1.282830000 0.086724000 4.952850000 0.0000000000 0.548353000 -0.129571000 0.171017000 -0.744073000 0.004992770 0.033587300 -0.038579800 0.205751000 0.347114000 0.261830000 0.403511000
2.60 3.066010000 -1.234270000 0.150146000 4.455110000 0.0000000000 0.541750000 -0.103699000 0.009302580 -0.744468000 0.006026810 0.030508100 -0.036534700 0.190711000 0.339373000 0.242015000 0.389288000
3.00 2.893910000 -1.164610000 0.162354000 4.623210000 0.0000000000 0.590765000 -0.085328600 0.034058400 -0.693999000 0.018621100 -0.018982400 0.000361328 0.183363000 0.326297000 0.228650000 0.374289000
""")
class BindiEtAl2014RhypEC8(BindiEtAl2014RjbEC8):
"""
Implements the Bindi et al (2014) GMPE for the case in which hypocentral
distance is preferred, style-of-faulting is specfied and site amplification
is characterised according to the Eurocode 8 site class
"""
#: Required distance measure is Rhypo
REQUIRES_DISTANCES = set(('rhypo', ))
def _get_mean(self, C, rup, dists, sites):
"""
Returns the mean value of ground motion
"""
return (self._get_magnitude_scaling_term(C, rup.mag) +
self._get_distance_scaling_term(C, dists.rhypo, rup.mag) +
self._get_style_of_faulting_term(C, rup) +
self._get_site_amplification_term(C, sites.vs30))
#: Coefficients from Table 3
COEFFS = CoeffsTable(sa_damping=5, table="""
imt e1 c1 c2 h c3 b1 b2 b3 eA eB eC eD sofN sofR sofS sofU tau phi phis2s sigma
pgv 3.292610000 -1.665480000 0.136478000 6.310130000 0.0000000000 0.436373000 -0.049720200 0.264336000 0.000000000 0.130319000 0.272298000 0.350870000 -0.090869900 0.013282500 -0.067381500 0.000000000 0.241933000 0.284305000 0.231138000 0.373311000
pga 4.366930000 -1.752120000 0.150507000 7.321920000 0.0000000000 0.144291000 -0.066081100 0.284211000 0.000000000 0.143778000 0.231064000 0.187402000 -0.071745100 0.084957800 -0.057096500 0.000000000 0.195249000 0.284622000 0.213455000 0.345155000
0.02 4.420440000 -1.777540000 0.147715000 7.064280000 0.0000000000 0.147874000 -0.066205600 0.297090000 0.000000000 0.141110000 0.225339000 0.187033000 -0.065306900 0.091731900 -0.056125500 0.000000000 0.197407000 0.287767000 0.216309000 0.348969000
0.04 4.549920000 -1.854600000 0.165968000 6.982270000 0.0000000000 0.124402000 -0.056602000 0.260601000 0.000000000 0.140350000 0.217010000 0.146507000 -0.065379200 0.088098100 -0.057670900 0.000000000 0.204345000 0.297881000 0.222929000 0.361234000
0.07 4.732850000 -1.878220000 0.157048000 8.133700000 0.0000000000 0.138028000 -0.040786500 0.276090000 0.000000000 0.145543000 0.206101000 0.115846000 -0.051289600 0.113143000 -0.037623000 0.000000000 0.208843000 0.304438000 0.242821000 0.369185000
0.10 4.675030000 -1.799170000 0.151808000 8.380980000 0.0005478660 0.098832300 -0.056937000 0.322027000 0.000000000 0.158622000 0.208849000 0.125428000 -0.037486800 0.120065000 -0.036904000 0.000000000 0.195390000 0.313320000 0.251339000 0.369252000
0.15 4.569650000 -1.614050000 0.105601000 7.496250000 0.0011834100 0.125747000 -0.083500900 0.464456000 0.000000000 0.162534000 0.197589000 0.158161000 -0.047089600 0.098045600 -0.050605600 0.000000000 0.193856000 0.310861000 0.247987000 0.366353000
0.20 4.450170000 -1.465010000 0.056754500 6.272220000 0.0014308100 0.236642000 -0.083463900 0.542025000 0.000000000 0.143446000 0.213637000 0.170195000 -0.021448300 0.139454000 -0.012459600 0.000000000 0.191231000 0.306652000 0.226544000 0.361392000
0.26 4.455930000 -1.443420000 0.032061300 5.480400000 0.0009816830 0.313239000 -0.089717600 0.555789000 0.000000000 0.133443000 0.244854000 0.202162000 -0.030488000 0.132769000 -0.015155100 0.000000000 0.192222000 0.308241000 0.214042000 0.363266000
0.30 4.471710000 -1.460160000 0.025927200 5.503160000 0.0005543760 0.332549000 -0.097217900 0.551296000 0.000000000 0.121637000 0.254554000 0.226009000 -0.042269100 0.119803000 -0.019226600 0.000000000 0.199096000 0.304125000 0.207111000 0.363499000
0.36 4.387990000 -1.418420000 0.022150300 4.769520000 0.0002687480 0.355357000 -0.106041000 0.543724000 0.000000000 0.118062000 0.268087000 0.258058000 -0.056669000 0.092863000 -0.034960300 0.000000000 0.199491000 0.304728000 0.201784000 0.364220000
0.40 4.376090000 -1.428430000 0.016902400 4.819740000 0.0000000000 0.368987000 -0.111955000 0.547881000 0.000000000 0.119481000 0.275041000 0.275672000 -0.053267600 0.091980000 -0.032188300 0.000000000 0.207716000 0.302796000 0.194828000 0.367194000
0.46 4.333720000 -1.425030000 0.025903300 5.109610000 0.0000000000 0.379142000 -0.115152000 0.511833000 0.000000000 0.117659000 0.291964000 0.321124000 -0.062509500 0.073772300 -0.039294000 0.000000000 0.216313000 0.301380000 0.197633000 0.370974000
0.50 4.293590000 -1.414650000 0.028367500 4.955190000 0.0000000000 0.389410000 -0.118151000 0.495459000 0.000000000 0.118871000 0.298870000 0.344584000 -0.064737900 0.069448700 -0.037414200 0.000000000 0.225415000 0.300553000 0.198934000 0.375691000
0.60 4.239150000 -1.406030000 0.026979900 4.635970000 0.0000000000 0.430341000 -0.119284000 0.475308000 0.000000000 0.117717000 0.314097000 0.412316000 -0.076075300 0.045870600 -0.054880500 0.000000000 0.234484000 0.299514000 0.208675000 0.380383000
0.70 4.196960000 -1.412970000 0.020875700 4.293770000 0.0000000000 0.470648000 -0.118095000 0.460014000 0.000000000 0.115734000 0.325887000 0.477053000 -0.074956400 0.028574500 -0.055644400 0.000000000 0.246498000 0.301897000 0.212696000 0.389747000
0.80 4.114530000 -1.404290000 0.038146400 4.010590000 0.0000000000 0.481962000 -0.116743000 0.393948000 0.000000000 0.110981000 0.334461000 0.517530000 -0.081627800 0.008428810 -0.063434400 0.000000000 0.249844000 0.305995000 0.224068000 0.395038000
0.90 4.032490000 -1.389770000 0.037093500 3.978120000 0.0000000000 0.504043000 -0.116645000 0.400442000 0.000000000 0.103765000 0.334934000 0.559004000 -0.064291400 0.019498400 -0.045615800 0.000000000 0.261433000 0.307220000 0.240384000 0.403399000
1.00 4.011400000 -1.395430000 0.034061400 4.096680000 0.0000000000 0.550001000 -0.110860000 0.386023000 0.000000000 0.103026000 0.336196000 0.566463000 -0.057167500 0.014892500 -0.051388400 0.000000000 0.274446000 0.309616000 0.244465000 0.413742000
1.30 3.684020000 -1.302310000 0.069534500 3.732900000 0.0000000000 0.544404000 -0.113618000 0.282169000 0.000000000 0.108865000 0.337519000 0.592894000 -0.034663900 0.029823500 -0.025078900 0.000000000 0.265310000 0.311777000 0.244067000 0.409383000
1.50 3.535870000 -1.273510000 0.082245800 4.074080000 0.0000000000 0.570581000 -0.103758000 0.249760000 0.000000000 0.119032000 0.333110000 0.626267000 -0.010667700 0.060266600 0.007385850 0.000000000 0.269363000 0.316539000 0.236824000 0.415637000
1.80 3.465880000 -1.361020000 0.137018000 6.097100000 0.0000000000 0.524014000 -0.101089000 0.046975200 0.000000000 0.123814000 0.323505000 0.600530000 -0.002974540 0.058459200 0.039470900 0.000000000 0.275390000 0.323622000 0.257636000 0.424936000
2.00 3.469100000 -1.381110000 0.137878000 6.539170000 0.0000000000 0.551312000 -0.098766100 0.000000000 0.000000000 0.115091000 0.320404000 0.586654000 -0.023796000 0.034963600 0.025270300 0.000000000 0.277179000 0.325724000 0.259839000 0.427696000
2.60 3.283840000 -1.389770000 0.188643000 7.040110000 0.0000000000 0.547984000 -0.084231400 0.000000000 0.000000000 0.124833000 0.306133000 0.548523000 -0.050663600 0.003435150 0.007395600 0.000000000 0.278908000 0.327756000 0.263531000 0.430364000
3.00 3.264700000 -1.399740000 0.216533000 8.339210000 0.0000000000 0.552993000 -0.071343600 0.000000000 0.000000000 0.143969000 0.315187000 0.559213000 -0.146666000 -0.128655000 -0.067567300 0.000000000 0.283885000 0.320266000 0.267078000 0.427973000
""")
class BindiEtAl2014RhypEC8NoSOF(BindiEtAl2014RhypEC8):
"""
Implements the Bindi et al. (2014) GMPE for the case in which
hypocentral distance is preferred, Eurocode 8 site amplification is used
and style-of-faulting is unspecfied.
"""
#: Required rupture parameters are magnitude
REQUIRES_RUPTURE_PARAMETERS = set(('mag',))
def _get_mean(self, C, rup, dists, sites):
"""
Returns the mean value of ground motion - noting that in this case
the style-of-faulting term is neglected
"""
return (self._get_magnitude_scaling_term(C, rup.mag) +
self._get_distance_scaling_term(C, dists.rhypo, rup.mag) +
self._get_site_amplification_term(C, sites.vs30))
|
pwong-mapr/private-hue
|
refs/heads/HUE-1096-abe
|
apps/beeswax/src/beeswax/migrations/0008_auto__add_field_queryhistory_query_type.py
|
2
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'QueryHistory.query_type'
db.add_column('beeswax_queryhistory', 'query_type', self.gf('django.db.models.fields.SmallIntegerField')(default=0), keep_default=False)
def backwards(self, orm):
# Deleting field 'QueryHistory.query_type'
db.delete_column('beeswax_queryhistory', 'query_type')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'beeswax.metainstall': {
'Meta': {'object_name': 'MetaInstall'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'installed_example': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'})
},
'beeswax.queryhistory': {
'Meta': {'object_name': 'QueryHistory'},
'design': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['beeswax.SavedQuery']", 'null': 'True'}),
'has_results': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_state': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'log_context': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}),
'modified_row_count': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'notify': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'operation_type': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'query': ('django.db.models.fields.TextField', [], {}),
'query_type': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'server_guid': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '1024', 'null': 'True'}),
'server_host': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128'}),
'server_id': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}),
'server_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128'}),
'server_port': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'server_type': ('django.db.models.fields.CharField', [], {'default': "'beeswax'", 'max_length': '128'}),
'statement_number': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'submission_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'beeswax.savedquery': {
'Meta': {'object_name': 'SavedQuery'},
'data': ('django.db.models.fields.TextField', [], {'max_length': '65536'}),
'desc': ('django.db.models.fields.TextField', [], {'max_length': '1024'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_auto': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}),
'is_trashed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}),
'mtime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'type': ('django.db.models.fields.IntegerField', [], {})
},
'beeswax.session': {
'Meta': {'object_name': 'Session'},
'application': ('django.db.models.fields.CharField', [], {'default': "'beeswax'", 'max_length': '128'}),
'guid': ('django.db.models.fields.TextField', [], {'max_length': "'100'"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_used': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'secret': ('django.db.models.fields.TextField', [], {'max_length': "'100'"}),
'server_protocol_version': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'status_code': ('django.db.models.fields.PositiveSmallIntegerField', [], {})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['beeswax']
|
w1ll1am23/home-assistant
|
refs/heads/dev
|
homeassistant/components/swiss_hydrological_data/sensor.py
|
3
|
"""Support for hydrological data from the Fed. Office for the Environment."""
from datetime import timedelta
import logging
from swisshydrodata import SwissHydroData
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA, SensorEntity
from homeassistant.const import ATTR_ATTRIBUTION, CONF_MONITORED_CONDITIONS
import homeassistant.helpers.config_validation as cv
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
ATTRIBUTION = "Data provided by the Swiss Federal Office for the Environment FOEN"
ATTR_DELTA_24H = "delta-24h"
ATTR_MAX_1H = "max-1h"
ATTR_MAX_24H = "max-24h"
ATTR_MEAN_1H = "mean-1h"
ATTR_MEAN_24H = "mean-24h"
ATTR_MIN_1H = "min-1h"
ATTR_MIN_24H = "min-24h"
ATTR_PREVIOUS_24H = "previous-24h"
ATTR_STATION = "station"
ATTR_STATION_UPDATE = "station_update"
ATTR_WATER_BODY = "water_body"
ATTR_WATER_BODY_TYPE = "water_body_type"
CONF_STATION = "station"
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=60)
SENSOR_DISCHARGE = "discharge"
SENSOR_LEVEL = "level"
SENSOR_TEMPERATURE = "temperature"
CONDITIONS = {
SENSOR_DISCHARGE: "mdi:waves",
SENSOR_LEVEL: "mdi:zodiac-aquarius",
SENSOR_TEMPERATURE: "mdi:oil-temperature",
}
CONDITION_DETAILS = [
ATTR_DELTA_24H,
ATTR_MAX_1H,
ATTR_MAX_24H,
ATTR_MEAN_1H,
ATTR_MEAN_24H,
ATTR_MIN_1H,
ATTR_MIN_24H,
ATTR_PREVIOUS_24H,
]
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_STATION): vol.Coerce(int),
vol.Optional(CONF_MONITORED_CONDITIONS, default=[SENSOR_TEMPERATURE]): vol.All(
cv.ensure_list, [vol.In(CONDITIONS)]
),
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Swiss hydrological sensor."""
station = config.get(CONF_STATION)
monitored_conditions = config.get(CONF_MONITORED_CONDITIONS)
hydro_data = HydrologicalData(station)
hydro_data.update()
if hydro_data.data is None:
_LOGGER.error("The station doesn't exists: %s", station)
return
entities = []
for condition in monitored_conditions:
entities.append(SwissHydrologicalDataSensor(hydro_data, station, condition))
add_entities(entities, True)
class SwissHydrologicalDataSensor(SensorEntity):
"""Implementation of a Swiss hydrological sensor."""
def __init__(self, hydro_data, station, condition):
"""Initialize the Swiss hydrological sensor."""
self.hydro_data = hydro_data
self._condition = condition
self._data = self._state = self._unit_of_measurement = None
self._icon = CONDITIONS[condition]
self._station = station
@property
def name(self):
"""Return the name of the sensor."""
return "{} {}".format(self._data["water-body-name"], self._condition)
@property
def unique_id(self) -> str:
"""Return a unique, friendly identifier for this entity."""
return f"{self._station}_{self._condition}"
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
if self._state is not None:
return self.hydro_data.data["parameters"][self._condition]["unit"]
return None
@property
def state(self):
"""Return the state of the sensor."""
if isinstance(self._state, (int, float)):
return round(self._state, 2)
return None
@property
def extra_state_attributes(self):
"""Return the device state attributes."""
attrs = {}
if not self._data:
attrs[ATTR_ATTRIBUTION] = ATTRIBUTION
return attrs
attrs[ATTR_WATER_BODY_TYPE] = self._data["water-body-type"]
attrs[ATTR_STATION] = self._data["name"]
attrs[ATTR_STATION_UPDATE] = self._data["parameters"][self._condition][
"datetime"
]
attrs[ATTR_ATTRIBUTION] = ATTRIBUTION
for entry in CONDITION_DETAILS:
attrs[entry.replace("-", "_")] = self._data["parameters"][self._condition][
entry
]
return attrs
@property
def icon(self):
"""Icon to use in the frontend."""
return self._icon
def update(self):
"""Get the latest data and update the state."""
self.hydro_data.update()
self._data = self.hydro_data.data
if self._data is None:
self._state = None
else:
self._state = self._data["parameters"][self._condition]["value"]
class HydrologicalData:
"""The Class for handling the data retrieval."""
def __init__(self, station):
"""Initialize the data object."""
self.station = station
self.data = None
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest data."""
shd = SwissHydroData()
self.data = shd.get_station(self.station)
|
sunxi/linux-3.14
|
refs/heads/master
|
tools/perf/python/twatch.py
|
1565
|
#! /usr/bin/python
# -*- python -*-
# -*- coding: utf-8 -*-
# twatch - Experimental use of the perf python interface
# Copyright (C) 2011 Arnaldo Carvalho de Melo <acme@redhat.com>
#
# This application is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import perf
def main():
cpus = perf.cpu_map()
threads = perf.thread_map()
evsel = perf.evsel(task = 1, comm = 1, mmap = 0,
wakeup_events = 1, watermark = 1,
sample_id_all = 1,
sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU)
evsel.open(cpus = cpus, threads = threads);
evlist = perf.evlist(cpus, threads)
evlist.add(evsel)
evlist.mmap()
while True:
evlist.poll(timeout = -1)
for cpu in cpus:
event = evlist.read_on_cpu(cpu)
if not event:
continue
print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu,
event.sample_pid,
event.sample_tid),
print event
if __name__ == '__main__':
main()
|
hehongliang/tensorflow
|
refs/heads/master
|
tensorflow/contrib/keras/api/keras/losses/__init__.py
|
31
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras built-in loss functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Loss functions.
from tensorflow.python.keras.losses import binary_crossentropy
from tensorflow.python.keras.losses import categorical_crossentropy
from tensorflow.python.keras.losses import categorical_hinge
from tensorflow.python.keras.losses import cosine_proximity
from tensorflow.python.keras.losses import hinge
from tensorflow.python.keras.losses import kullback_leibler_divergence
from tensorflow.python.keras.losses import logcosh
from tensorflow.python.keras.losses import mean_absolute_error
from tensorflow.python.keras.losses import mean_absolute_percentage_error
from tensorflow.python.keras.losses import mean_squared_error
from tensorflow.python.keras.losses import mean_squared_logarithmic_error
from tensorflow.python.keras.losses import poisson
from tensorflow.python.keras.losses import sparse_categorical_crossentropy
from tensorflow.python.keras.losses import squared_hinge
# Auxiliary utils.
# pylint: disable=g-bad-import-order
from tensorflow.python.keras.losses import deserialize
from tensorflow.python.keras.losses import serialize
from tensorflow.python.keras.losses import get
del absolute_import
del division
del print_function
|
tqtran7/horizon
|
refs/heads/master
|
openstack_dashboard/dashboards/project/images/__init__.py
|
12133432
| |
nomadcube/scikit-learn
|
refs/heads/master
|
examples/svm/plot_svm_nonlinear.py
|
268
|
"""
==============
Non-linear SVM
==============
Perform binary classification using non-linear SVC
with RBF kernel. The target to predict is a XOR of the
inputs.
The color map illustrates the decision function learned by the SVC.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-3, 3, 500),
np.linspace(-3, 3, 500))
np.random.seed(0)
X = np.random.randn(300, 2)
Y = np.logical_xor(X[:, 0] > 0, X[:, 1] > 0)
# fit the model
clf = svm.NuSVC()
clf.fit(X, Y)
# plot the decision function for each datapoint on the grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()), aspect='auto',
origin='lower', cmap=plt.cm.PuOr_r)
contours = plt.contour(xx, yy, Z, levels=[0], linewidths=2,
linetypes='--')
plt.scatter(X[:, 0], X[:, 1], s=30, c=Y, cmap=plt.cm.Paired)
plt.xticks(())
plt.yticks(())
plt.axis([-3, 3, -3, 3])
plt.show()
|
ddki/my_study_project
|
refs/heads/master
|
language/python/frameworks/flask/venv/lib/python2.7/site-packages/setuptools/dep_util.py
|
316
|
from distutils.dep_util import newer_group
# yes, this is was almost entirely copy-pasted from
# 'newer_pairwise()', this is just another convenience
# function.
def newer_pairwise_group(sources_groups, targets):
"""Walk both arguments in parallel, testing if each source group is newer
than its corresponding target. Returns a pair of lists (sources_groups,
targets) where sources is newer than target, according to the semantics
of 'newer_group()'.
"""
if len(sources_groups) != len(targets):
raise ValueError("'sources_group' and 'targets' must be the same length")
# build a pair of lists (sources_groups, targets) where source is newer
n_sources = []
n_targets = []
for i in range(len(sources_groups)):
if newer_group(sources_groups[i], targets[i]):
n_sources.append(sources_groups[i])
n_targets.append(targets[i])
return n_sources, n_targets
|
josepedro/acidentes_em_rodovias_refatoracao
|
refs/heads/master
|
acidentes_em_rodovias/app/tests/tests_interface/interface_suite.py
|
2
|
from tests_acidentes_rodovias_periodo import *
from tests_acidentes_rodovias_regiao import *
def suite_tests_interface():
suite1 = unittest.TestLoader().loadTestsFromTestCase(AcidentesRodoviasRegiaoTestCase)
suite2 = unittest.TestLoader().loadTestsFromTestCase(AcidentesRodoviasPeriodoTestCase)
alltests = unittest.TestSuite([suite1, suite2])
return alltests
if __name__ == '__main__':
runner = unittest.TextTestRunner()
test_suite = suite_tests_interface()
runner.run(test_suite)
|
dcbdmb/example-code
|
refs/heads/master
|
07-closure-deco/clockdeco_param_demo2.py
|
8
|
import time
from clockdeco_param import clock
@clock('{name}({args}) dt={elapsed:0.3f}s')
def snooze(seconds):
time.sleep(seconds)
for i in range(3):
snooze(.123)
|
home-assistant/home-assistant
|
refs/heads/dev
|
homeassistant/components/rituals_perfume_genie/const.py
|
2
|
"""Constants for the Rituals Perfume Genie integration."""
DOMAIN = "rituals_perfume_genie"
COORDINATORS = "coordinators"
DEVICES = "devices"
ACCOUNT_HASH = "account_hash"
ATTRIBUTES = "attributes"
HUBLOT = "hublot"
ID = "id"
SENSORS = "sensors"
|
endolith/numpy
|
refs/heads/master
|
numpy/distutils/command/sdist.py
|
17
|
import sys
if 'setuptools' in sys.modules:
from setuptools.command.sdist import sdist as old_sdist
else:
from distutils.command.sdist import sdist as old_sdist
from numpy.distutils.misc_util import get_data_files
class sdist(old_sdist):
def add_defaults (self):
old_sdist.add_defaults(self)
dist = self.distribution
if dist.has_data_files():
for data in dist.data_files:
self.filelist.extend(get_data_files(data))
if dist.has_headers():
headers = []
for h in dist.headers:
if isinstance(h, str): headers.append(h)
else: headers.append(h[1])
self.filelist.extend(headers)
return
|
matbu/ansible-modules-extras
|
refs/heads/devel
|
packaging/os/__init__.py
|
12133432
| |
pylixm/sae-django-demo
|
refs/heads/master
|
django1.7-sae/site-packages/django/views/generic/detail.py
|
68
|
from __future__ import unicode_literals
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from django.http import Http404
from django.utils.translation import ugettext as _
from django.views.generic.base import TemplateResponseMixin, ContextMixin, View
class SingleObjectMixin(ContextMixin):
"""
Provides the ability to retrieve a single object for further manipulation.
"""
model = None
queryset = None
slug_field = 'slug'
context_object_name = None
slug_url_kwarg = 'slug'
pk_url_kwarg = 'pk'
def get_object(self, queryset=None):
"""
Returns the object the view is displaying.
By default this requires `self.queryset` and a `pk` or `slug` argument
in the URLconf, but subclasses can override this to return any object.
"""
# Use a custom queryset if provided; this is required for subclasses
# like DateDetailView
if queryset is None:
queryset = self.get_queryset()
# Next, try looking up by primary key.
pk = self.kwargs.get(self.pk_url_kwarg, None)
slug = self.kwargs.get(self.slug_url_kwarg, None)
if pk is not None:
queryset = queryset.filter(pk=pk)
# Next, try looking up by slug.
elif slug is not None:
slug_field = self.get_slug_field()
queryset = queryset.filter(**{slug_field: slug})
# If none of those are defined, it's an error.
else:
raise AttributeError("Generic detail view %s must be called with "
"either an object pk or a slug."
% self.__class__.__name__)
try:
# Get the single item from the filtered queryset
obj = queryset.get()
except queryset.model.DoesNotExist:
raise Http404(_("No %(verbose_name)s found matching the query") %
{'verbose_name': queryset.model._meta.verbose_name})
return obj
def get_queryset(self):
"""
Return the `QuerySet` that will be used to look up the object.
Note that this method is called by the default implementation of
`get_object` and may not be called if `get_object` is overridden.
"""
if self.queryset is None:
if self.model:
return self.model._default_manager.all()
else:
raise ImproperlyConfigured(
"%(cls)s is missing a QuerySet. Define "
"%(cls)s.model, %(cls)s.queryset, or override "
"%(cls)s.get_queryset()." % {
'cls': self.__class__.__name__
}
)
return self.queryset.all()
def get_slug_field(self):
"""
Get the name of a slug field to be used to look up by slug.
"""
return self.slug_field
def get_context_object_name(self, obj):
"""
Get the name to use for the object.
"""
if self.context_object_name:
return self.context_object_name
elif isinstance(obj, models.Model):
return obj._meta.model_name
else:
return None
def get_context_data(self, **kwargs):
"""
Insert the single object into the context dict.
"""
context = {}
if self.object:
context['object'] = self.object
context_object_name = self.get_context_object_name(self.object)
if context_object_name:
context[context_object_name] = self.object
context.update(kwargs)
return super(SingleObjectMixin, self).get_context_data(**context)
class BaseDetailView(SingleObjectMixin, View):
"""
A base view for displaying a single object
"""
def get(self, request, *args, **kwargs):
self.object = self.get_object()
context = self.get_context_data(object=self.object)
return self.render_to_response(context)
class SingleObjectTemplateResponseMixin(TemplateResponseMixin):
template_name_field = None
template_name_suffix = '_detail'
def get_template_names(self):
"""
Return a list of template names to be used for the request. May not be
called if render_to_response is overridden. Returns the following list:
* the value of ``template_name`` on the view (if provided)
* the contents of the ``template_name_field`` field on the
object instance that the view is operating upon (if available)
* ``<app_label>/<model_name><template_name_suffix>.html``
"""
try:
names = super(SingleObjectTemplateResponseMixin, self).get_template_names()
except ImproperlyConfigured:
# If template_name isn't specified, it's not a problem --
# we just start with an empty list.
names = []
# If self.template_name_field is set, grab the value of the field
# of that name from the object; this is the most specific template
# name, if given.
if self.object and self.template_name_field:
name = getattr(self.object, self.template_name_field, None)
if name:
names.insert(0, name)
# The least-specific option is the default <app>/<model>_detail.html;
# only use this if the object in question is a model.
if isinstance(self.object, models.Model):
names.append("%s/%s%s.html" % (
self.object._meta.app_label,
self.object._meta.model_name,
self.template_name_suffix
))
elif hasattr(self, 'model') and self.model is not None and issubclass(self.model, models.Model):
names.append("%s/%s%s.html" % (
self.model._meta.app_label,
self.model._meta.model_name,
self.template_name_suffix
))
# If we still haven't managed to find any template names, we should
# re-raise the ImproperlyConfigured to alert the user.
if not names:
raise
return names
class DetailView(SingleObjectTemplateResponseMixin, BaseDetailView):
"""
Render a "detail" view of an object.
By default this is a model instance looked up from `self.queryset`, but the
view will support display of *any* object by overriding `self.get_object()`.
"""
|
exxeleron/qPython
|
refs/heads/master
|
tests/qreader_test.py
|
1
|
#
# Copyright (c) 2011-2014 Exxeleron GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import binascii
import struct
import sys
try:
from cStringIO import BytesIO
except ImportError:
from io import BytesIO
if sys.version > '3':
long = int
from collections import OrderedDict
from qpython import qreader
from qpython.qtype import * # @UnusedWildImport
from qpython.qcollection import qlist, QList, QTemporalList, QDictionary, qtable, QKeyedTable
from qpython.qtemporal import qtemporal, QTemporal
EXPRESSIONS = OrderedDict((
(b'("G"$"8c680a01-5a49-5aab-5a65-d4bfddb6a661"; 0Ng)',
qlist(numpy.array([uuid.UUID('8c680a01-5a49-5aab-5a65-d4bfddb6a661'), qnull(QGUID)]), qtype=QGUID_LIST)),
(b'"G"$"8c680a01-5a49-5aab-5a65-d4bfddb6a661"', uuid.UUID('8c680a01-5a49-5aab-5a65-d4bfddb6a661')),
(b'"G"$"00000000-0000-0000-0000-000000000000"', uuid.UUID('00000000-0000-0000-0000-000000000000')),
(b'(2001.01m; 0Nm)', qlist(numpy.array([12, qnull(QMONTH)]), qtype=QMONTH_LIST)),
(b'2001.01m', qtemporal(numpy.datetime64('2001-01', 'M'), qtype=QMONTH)),
(b'0Nm', qtemporal(numpy.datetime64('NaT', 'M'), qtype=QMONTH)),
(b'2001.01.01 2000.05.01 0Nd', qlist(numpy.array([366, 121, qnull(QDATE)]), qtype=QDATE_LIST)),
(b'2001.01.01', qtemporal(numpy.datetime64('2001-01-01', 'D'), qtype=QDATE)),
(b'0Nd', qtemporal(numpy.datetime64('NaT', 'D'), qtype=QDATE)),
(b'2000.01.04T05:36:57.600 0Nz', qlist(numpy.array([3.234, qnull(QDATETIME)]), qtype=QDATETIME_LIST)),
(b'2000.01.04T05:36:57.600', qtemporal(numpy.datetime64('2000-01-04T05:36:57.600Z', 'ms'), qtype=QDATETIME)),
(b'0Nz', qtemporal(numpy.datetime64('NaT', 'ms'), qtype=QDATETIME)),
(b'12:01 0Nu', qlist(numpy.array([721, qnull(QMINUTE)]), qtype=QMINUTE_LIST)),
(b'12:01', qtemporal(numpy.timedelta64(721, 'm'), qtype=QMINUTE)),
(b'0Nu', qtemporal(numpy.timedelta64('NaT', 'm'), qtype=QMINUTE)),
(b'12:05:00 0Nv', qlist(numpy.array([43500, qnull(QSECOND)]), qtype=QSECOND_LIST)),
(b'12:05:00', qtemporal(numpy.timedelta64(43500, 's'), qtype=QSECOND)),
(b'0Nv', qtemporal(numpy.timedelta64('NaT', 's'), qtype=QSECOND)),
(b'12:04:59.123 0Nt', qlist(numpy.array([43499123, qnull(QTIME)]), qtype=QTIME_LIST)),
(b'12:04:59.123', qtemporal(numpy.timedelta64(43499123, 'ms'), qtype=QTIME)),
(b'0Nt', qtemporal(numpy.timedelta64('NaT', 'ms'), qtype=QTIME)),
(b'2000.01.04D05:36:57.600 0Np', qlist(numpy.array([long(279417600000000), qnull(QTIMESTAMP)]), qtype=QTIMESTAMP_LIST)),
(b'2000.01.04D05:36:57.600', qtemporal(numpy.datetime64('2000-01-04T05:36:57.600Z', 'ns'), qtype=QTIMESTAMP)),
(b'0Np', qtemporal(numpy.datetime64('NaT', 'ns'), qtype=QTIMESTAMP)),
(b'0D05:36:57.600 0Nn', qlist(numpy.array([long(20217600000000), qnull(QTIMESPAN)]), qtype=QTIMESPAN_LIST)),
(b'0D05:36:57.600', qtemporal(numpy.timedelta64(20217600000000, 'ns'), qtype=QTIMESPAN)),
(b'0Nn', qtemporal(numpy.timedelta64('NaT', 'ns'), qtype=QTIMESPAN)),
(b'::', None),
(b'1+`', QException(b'type')),
(b'1', numpy.int64(1)),
(b'1i', numpy.int32(1)),
(b'-234h', numpy.int16(-234)),
(b'0b', numpy.bool_(False)),
(b'1b', numpy.bool_(True)),
(b'0x2a', numpy.byte(0x2a)),
(b'89421099511627575j', numpy.int64(long(89421099511627575))),
(b'5.5e', numpy.float32(5.5)),
(b'3.234', numpy.float64(3.234)),
(b'"0"', b'0'),
(b'"abc"', b'abc'),
(b'"quick brown fox jumps over a lazy dog"', b'quick brown fox jumps over a lazy dog'),
(b'`abc', numpy.string_('abc')),
(b'`quickbrownfoxjumpsoveralazydog', numpy.string_('quickbrownfoxjumpsoveralazydog')),
(b'0Nh', qnull(QSHORT)),
(b'0N', qnull(QLONG)),
(b'0Ni', qnull(QINT)),
(b'0Nj', qnull(QLONG)),
(b'0Ne', qnull(QFLOAT)),
(b'0n', qnull(QDOUBLE)),
(b'" "', qnull(QSTRING)),
(b'`', qnull(QSYMBOL)),
(b'0Ng', qnull(QGUID)),
(b'()', []),
(b'(0b;1b;0b)', qlist(numpy.array([False, True, False], dtype=numpy.bool_), qtype=QBOOL_LIST)),
(b'(0x01;0x02;0xff)', qlist(numpy.array([0x01, 0x02, 0xff], dtype=numpy.byte), qtype=QBYTE_LIST)),
(b'(1h;2h;3h)', qlist(numpy.array([1, 2, 3], dtype=numpy.int16), qtype=QSHORT_LIST)),
(b'(1h;0Nh;3h)', qlist(numpy.array([1, qnull(QSHORT), 3], dtype=numpy.int16), qtype=QSHORT_LIST)),
(b'1 2 3', qlist(numpy.array([1, 2, 3], dtype=numpy.int64), qtype=QLONG_LIST)),
(b'1 0N 3', qlist(numpy.array([1, qnull(QLONG), 3], dtype=numpy.int64), qtype=QLONG_LIST)),
(b'(1i;2i;3i)', qlist(numpy.array([1, 2, 3], dtype=numpy.int32), qtype=QINT_LIST)),
(b'(1i;0Ni;3i)', qlist(numpy.array([1, qnull(QINT), 3], dtype=numpy.int32), qtype=QINT_LIST)),
(b'(1j;2j;3j)', qlist(numpy.array([1, 2, 3], dtype=numpy.int64), qtype=QLONG_LIST)),
(b'(1j;0Nj;3j)', qlist(numpy.array([1, qnull(QLONG), 3], dtype=numpy.int64), qtype=QLONG_LIST)),
(b'(5.5e; 8.5e)', qlist(numpy.array([5.5, 8.5], dtype=numpy.float32), qtype=QFLOAT_LIST)),
(b'(5.5e; 0Ne)', qlist(numpy.array([5.5, qnull(QFLOAT)], dtype=numpy.float32), qtype=QFLOAT_LIST)),
(b'3.23 6.46', qlist(numpy.array([3.23, 6.46], dtype=numpy.float64), qtype=QDOUBLE_LIST)),
(b'3.23 0n', qlist(numpy.array([3.23, qnull(QDOUBLE)], dtype=numpy.float64), qtype=QDOUBLE_LIST)),
(b'(1;`bcd;"0bc";5.5e)', [numpy.int64(1), numpy.string_('bcd'), b'0bc', numpy.float32(5.5)]),
(b'(42;::;`foo)', [numpy.int64(42), None, numpy.string_('foo')]),
(b'`the`quick`brown`fox', qlist(numpy.array([numpy.string_('the'), numpy.string_('quick'), numpy.string_('brown'), numpy.string_('fox')], dtype=numpy.object), qtype=QSYMBOL_LIST)),
(b'``quick``fox', qlist(numpy.array([qnull(QSYMBOL), numpy.string_('quick'), qnull(QSYMBOL), numpy.string_('fox')], dtype=numpy.object), qtype=QSYMBOL_LIST)),
(b'``', qlist(numpy.array([qnull(QSYMBOL), qnull(QSYMBOL)], dtype=numpy.object), qtype=QSYMBOL_LIST)),
(b'("quick"; "brown"; "fox"; "jumps"; "over"; "a lazy"; "dog")',
[b'quick', b'brown', b'fox', b'jumps', b'over', b'a lazy', b'dog']),
(b'("quick"; " "; "fox"; "jumps"; "over"; "a lazy"; "dog")',
[b'quick', b' ', b'fox', b'jumps', b'over', b'a lazy', b'dog']),
(b'{x+y}', QLambda('{x+y}')),
(b'{x+y}[3]', QProjection([QLambda('{x+y}'), numpy.int64(3)])),
(b'insert [1]', QProjection([QFunction(0), numpy.int64(1)])),
(b'xbar', QLambda('k){x*y div x:$[16h=abs[@x];"j"$x;x]}')),
(b'not', QFunction(0)),
(b'and', QFunction(0)),
(b'md5', QProjection([QFunction(0), numpy.int64(-15)])),
(b'any', QFunction(0)),
(b'save', QFunction(0)),
(b'raze', QFunction(0)),
(b'sums', QFunction(0)),
(b'prev', QFunction(0)),
(b'(enlist `a)!(enlist 1)', QDictionary(qlist(numpy.array(['a']), qtype = QSYMBOL_LIST),
qlist(numpy.array([1], dtype=numpy.int64), qtype=QLONG_LIST))),
(b'1 2!`abc`cdefgh', QDictionary(qlist(numpy.array([1, 2], dtype=numpy.int64), qtype=QLONG_LIST),
qlist(numpy.array(['abc', 'cdefgh']), qtype = QSYMBOL_LIST))),
(b'`abc`def`gh!([] one: 1 2 3; two: 4 5 6)', QDictionary(qlist(numpy.array(['abc', 'def', 'gh']), qtype = QSYMBOL_LIST),
qtable(qlist(numpy.array(['one', 'two']), qtype = QSYMBOL_LIST),
[qlist(numpy.array([1, 2, 3]), qtype = QLONG_LIST),
qlist(numpy.array([4, 5, 6]), qtype = QLONG_LIST)]))),
(b'(0 1; 2 3)!`first`second', QDictionary([qlist(numpy.array([0, 1], dtype=numpy.int64), qtype=QLONG_LIST), qlist(numpy.array([2, 3], dtype=numpy.int64), qtype=QLONG_LIST)],
qlist(numpy.array(['first', 'second']), qtype = QSYMBOL_LIST))),
(b'(1;2h;3.234;"4")!(`one;2 3;"456";(7;8 9))', QDictionary([numpy.int64(1), numpy.int16(2), numpy.float64(3.234), b'4'],
[numpy.string_('one'), qlist(numpy.array([2, 3], dtype=numpy.int64), qtype=QLONG_LIST), b'456', [numpy.int64(7), qlist(numpy.array([8, 9], dtype=numpy.int64), qtype=QLONG_LIST)]])),
(b'`A`B`C!((1;3.234;3);(`x`y!(`a;2));5.5e)', QDictionary(qlist(numpy.array(['A', 'B', 'C']), qtype = QSYMBOL_LIST),
[[numpy.int64(1), numpy.float64(3.234), numpy.int64(3)], QDictionary(qlist(numpy.array(['x', 'y']), qtype = QSYMBOL_LIST), [b'a', numpy.int64(2)]), numpy.float32(5.5)])),
(b'flip `abc`def!(1 2 3; 4 5 6)', qtable(qlist(numpy.array(['abc', 'def']), qtype = QSYMBOL_LIST),
[qlist(numpy.array([1, 2, 3]), qtype = QLONG_LIST),
qlist(numpy.array([4, 5, 6]), qtype = QLONG_LIST)])),
(b'flip `name`iq!(`Dent`Beeblebrox`Prefect;98 42 126)',
qtable(qlist(numpy.array(['name', 'iq']), qtype = QSYMBOL_LIST),
[qlist(numpy.array(['Dent', 'Beeblebrox', 'Prefect']), qtype = QSYMBOL_LIST),
qlist(numpy.array([98, 42, 126]), qtype = QLONG_LIST)])),
(b'flip `name`iq`grade!(`Dent`Beeblebrox`Prefect;98 42 126;"a c")',
qtable(qlist(numpy.array(['name', 'iq', 'grade']), qtype = QSYMBOL_LIST),
[qlist(numpy.array(['Dent', 'Beeblebrox', 'Prefect']), qtype = QSYMBOL_LIST),
qlist(numpy.array([98, 42, 126]), qtype = QLONG_LIST),
b"a c"])),
(b'flip `name`iq`fullname!(`Dent`Beeblebrox`Prefect;98 42 126;("Arthur Dent"; "Zaphod Beeblebrox"; "Ford Prefect"))',
qtable(qlist(numpy.array(['name', 'iq', 'fullname']), qtype = QSYMBOL_LIST),
[qlist(numpy.array(['Dent', 'Beeblebrox', 'Prefect']), qtype = QSYMBOL_LIST),
qlist(numpy.array([98, 42, 126]), qtype = QLONG_LIST),
[b"Arthur Dent", b"Zaphod Beeblebrox", b"Ford Prefect"]])),
(b'flip `name`iq`fullname!(`Dent`Beeblebrox`Prefect;98 42 126;("Arthur Dent"; " "; "Ford Prefect"))',
qtable(qlist(numpy.array(['name', 'iq', 'fullname']), qtype = QSYMBOL_LIST),
[qlist(numpy.array(['Dent', 'Beeblebrox', 'Prefect']), qtype = QSYMBOL_LIST),
qlist(numpy.array([98, 42, 126]), qtype = QLONG_LIST),
[b"Arthur Dent", b" ", b"Ford Prefect"]])),
(b'([] sc:1 2 3; nsc:(1 2; 3 4; 5 6 7))', qtable(qlist(numpy.array(['sc', 'nsc']), qtype = QSYMBOL_LIST),
[qlist(numpy.array([1, 2, 3]), qtype = QLONG_LIST),
[qlist(numpy.array([1, 2]), qtype = QLONG_LIST),
qlist(numpy.array([3, 4]), qtype = QLONG_LIST),
qlist(numpy.array([5, 6, 7]), qtype = QLONG_LIST)]])),
(b'([] sc:1 2 3; nsc:(1 2; 3 4; 5 6))', qtable(qlist(numpy.array(['sc', 'nsc']), qtype = QSYMBOL_LIST),
[qlist(numpy.array([1, 2, 3]), qtype = QLONG_LIST),
[qlist(numpy.array([1, 2]), qtype = QLONG_LIST),
qlist(numpy.array([3, 4]), qtype = QLONG_LIST),
qlist(numpy.array([5, 6]), qtype = QLONG_LIST)]])),
(b'1#([] sym:`x`x`x;str:" a")', qtable(qlist(numpy.array(['sym', 'str']), qtype = QSYMBOL_LIST),
[qlist(numpy.array(['x'], dtype=numpy.string_), qtype = QSYMBOL_LIST),
b" "])),
(b'-1#([] sym:`x`x`x;str:" a")', qtable(qlist(numpy.array(['sym', 'str']), qtype = QSYMBOL_LIST),
[qlist(numpy.array(['x'], dtype=numpy.string_), qtype = QSYMBOL_LIST),
b"a"])),
(b'2#([] sym:`x`x`x`x;str:" aa")', qtable(qlist(numpy.array(['sym', 'str']), qtype = QSYMBOL_LIST),
[qlist(numpy.array(['x', 'x'], dtype=numpy.string_), qtype = QSYMBOL_LIST),
b" "])),
(b'-2#([] sym:`x`x`x`x;str:" aa")', qtable(qlist(numpy.array(['sym', 'str']), qtype = QSYMBOL_LIST),
[qlist(numpy.array(['x', 'x'], dtype=numpy.string_), qtype = QSYMBOL_LIST),
b"aa"])),
(b'([] name:`symbol$(); iq:`int$())', qtable(qlist(numpy.array(['name', 'iq']), qtype = QSYMBOL_LIST),
[qlist(numpy.array([], dtype=numpy.string_), qtype = QSYMBOL_LIST),
qlist(numpy.array([]), qtype = QINT_LIST)])),
(b'([] pos:`d1`d2`d3;dates:(2001.01.01;2000.05.01;0Nd))',
qtable(qlist(numpy.array(['pos', 'dates']), qtype = QSYMBOL_LIST),
[qlist(numpy.array(['d1', 'd2', 'd3']), qtype = QSYMBOL_LIST),
qlist(numpy.array([366, 121, qnull(QDATE)]), qtype=QDATE_LIST)])),
(b'([eid:1001 1002 1003] pos:`d1`d2`d3;dates:(2001.01.01;2000.05.01;0Nd))',
QKeyedTable(qtable(qlist(numpy.array(['eid']), qtype = QSYMBOL_LIST),
[qlist(numpy.array([1001, 1002, 1003]), qtype = QLONG_LIST)]),
qtable(qlist(numpy.array(['pos', 'dates']), qtype = QSYMBOL_LIST),
[qlist(numpy.array(['d1', 'd2', 'd3']), qtype = QSYMBOL_LIST),
qlist(numpy.array([366, 121, qnull(QDATE)]), qtype = QDATE_LIST)]))
),
))
NUMPY_TEMPORAL_EXPRESSIONS = OrderedDict((
(b'(2001.01m; 0Nm)', qlist(numpy.array([numpy.datetime64('2001-01'), numpy.datetime64('NaT')], dtype='datetime64[M]'), qtype=QMONTH_LIST)),
(b'2001.01m', numpy.datetime64('2001-01', 'M')),
(b'0Nm', numpy.datetime64('NaT', 'M')),
(b'2001.01.01 2000.05.01 0Nd', qlist(numpy.array([numpy.datetime64('2001-01-01'), numpy.datetime64('2000-05-01'), numpy.datetime64('NaT')], dtype='datetime64[D]'), qtype=QDATE_LIST)),
(b'2001.01.01', numpy.datetime64('2001-01-01', 'D')),
(b'0Nd', numpy.datetime64('NaT', 'D')),
(b'2000.01.04T05:36:57.600 0Nz', qlist(numpy.array([numpy.datetime64('2000-01-04T05:36:57.600Z', 'ms'), numpy.datetime64('nat', 'ms')]), qtype = QDATETIME_LIST)),
(b'2000.01.04T05:36:57.600', numpy.datetime64('2000-01-04T05:36:57.600Z', 'ms')),
(b'0Nz', numpy.datetime64('NaT', 'ms')),
(b'12:01 0Nu', qlist(numpy.array([numpy.timedelta64(721, 'm'), numpy.timedelta64('nat', 'm')]), qtype = QMINUTE)),
(b'12:01', numpy.timedelta64(721, 'm')),
(b'0Nu', numpy.timedelta64('NaT', 'm')),
(b'12:05:00 0Nv', qlist(numpy.array([numpy.timedelta64(43500, 's'), numpy.timedelta64('nat', 's')]), qtype = QSECOND)),
(b'12:05:00', numpy.timedelta64(43500, 's')),
(b'0Nv', numpy.timedelta64('nat', 's')),
(b'12:04:59.123 0Nt', qlist(numpy.array([numpy.timedelta64(43499123, 'ms'), numpy.timedelta64('nat', 'ms')]), qtype = QTIME_LIST)),
(b'12:04:59.123', numpy.timedelta64(43499123, 'ms')),
(b'0Nt', numpy.timedelta64('NaT', 'ms')),
(b'2000.01.04D05:36:57.600 0Np', qlist(numpy.array([numpy.datetime64('2000-01-04T05:36:57.600Z', 'ns'), numpy.datetime64('nat', 'ns')]), qtype = QTIMESTAMP_LIST)),
(b'2000.01.04D05:36:57.600', numpy.datetime64('2000-01-04T05:36:57.600Z', 'ns')),
(b'0Np', numpy.datetime64('NaT', 'ns')),
(b'0D05:36:57.600 0Nn', qlist(numpy.array([numpy.timedelta64(20217600000000, 'ns'), numpy.timedelta64('nat', 'ns')]), qtype = QTIMESPAN_LIST)),
(b'0D05:36:57.600', numpy.timedelta64(20217600000000, 'ns')),
(b'0Nn', numpy.timedelta64('NaT', 'ns')),
(b'([] pos:`d1`d2`d3;dates:(2001.01.01;2000.05.01;0Nd))',
qtable(['pos', 'dates'],
[qlist(numpy.array(['d1', 'd2', 'd3']), qtype = QSYMBOL_LIST),
numpy.array([numpy.datetime64('2001-01-01'), numpy.datetime64('2000-05-01'), numpy.datetime64('NaT')], dtype='datetime64[D]')])),
))
COMPRESSED_EXPRESSIONS = OrderedDict((
(b'1000#`q', qlist(numpy.array(['q'] * 1000), qtype=QSYMBOL_LIST)),
(b'([] q:1000#`q)', qtable(qlist(numpy.array(['q']), qtype = QSYMBOL_LIST),
[qlist(numpy.array(['q'] * 1000), qtype=QSYMBOL_LIST)])),
(b'([] a:til 200;b:25+til 200;c:200#`a)', qtable(qlist(numpy.array(['a', 'b', 'c']), qtype = QSYMBOL_LIST),
[qlist(numpy.arange(200), qtype=QLONG_LIST),
qlist(numpy.arange(200) + 25, qtype=QLONG_LIST),
qlist(numpy.array(['a'] * 200), qtype=QSYMBOL_LIST)])),
))
def arrays_equal(left, right):
if type(left) != type(right):
return False
if type(left) == numpy.ndarray and left.dtype != right.dtype:
print('Type comparison failed: %s != %s' % (left.dtype, right.dtype))
return False
if type(left) == QList and left.meta.qtype != right.meta.qtype:
print('QType comparison failed: %s != %s' % (left.meta.qtype, right.meta.qtype))
return False
if len(left) != len(right):
return False
for i in range(len(left)):
if type(left[i]) != type(right[i]):
print('Type comparison failed: %s != %s' % (type(left[i]), type(right[i])))
return False
if not compare(left[i], right[i]):
print('Value comparison failed: %s != %s' % ( left[i], right[i]))
return False
return True
def compare(left, right):
if type(left) in [float, numpy.float32, numpy.float64] and numpy.isnan(left):
return numpy.isnan(right)
if type(left) == QTemporal and isinstance(left.raw, float) and numpy.isnan(left.raw):
return numpy.isnan(right.raw)
elif type(left) in [list, tuple, numpy.ndarray, QList, QTemporalList]:
return arrays_equal(left, right)
elif type(left) == QFunction:
return type(right) == QFunction
else:
return left == right
def test_reading():
BINARY = OrderedDict()
with open('tests/QExpressions3.out', 'rb') as f:
while True:
query = f.readline().strip()
binary = f.readline().strip()
if not binary:
break
BINARY[query] = binary
buffer_reader = qreader.QReader(None)
print('Deserialization')
for query, value in iter(EXPRESSIONS.items()):
buffer_ = BytesIO()
binary = binascii.unhexlify(BINARY[query])
buffer_.write(b'\1\0\0\0')
buffer_.write(struct.pack('i', len(binary) + 8))
buffer_.write(binary)
buffer_.seek(0)
sys.stdout.write( ' %-75s' % query )
try:
header = buffer_reader.read_header(source = buffer_.getvalue())
result = buffer_reader.read_data(message_size = header.size, is_compressed = header.is_compressed, raw = True)
assert compare(buffer_.getvalue()[8:], result), 'raw reading failed: %s' % (query)
stream_reader = qreader.QReader(buffer_)
result = stream_reader.read(raw = True).data
assert compare(buffer_.getvalue()[8:], result), 'raw reading failed: %s' % (query)
result = buffer_reader.read(source = buffer_.getvalue()).data
assert compare(value, result), 'deserialization failed: %s, expected: %s actual: %s' % (query, value, result)
header = buffer_reader.read_header(source = buffer_.getvalue())
result = buffer_reader.read_data(message_size = header.size, is_compressed = header.is_compressed)
assert compare(value, result), 'deserialization failed: %s' % (query)
buffer_.seek(0)
stream_reader = qreader.QReader(buffer_)
result = stream_reader.read().data
assert compare(value, result), 'deserialization failed: %s, expected: %s actual: %s' % (query, value, result)
print('.')
except QException as e:
assert isinstance(value, QException)
assert e.args == value.args
print('.')
def test_reading_numpy_temporals():
BINARY = OrderedDict()
with open('tests/QExpressions3.out', 'rb') as f:
while True:
query = f.readline().strip()
binary = f.readline().strip()
if not binary:
break
BINARY[query] = binary
print('Deserialization (numpy temporals)')
for query, value in iter(NUMPY_TEMPORAL_EXPRESSIONS.items()):
buffer_ = BytesIO()
binary = binascii.unhexlify(BINARY[query])
buffer_.write(b'\1\0\0\0')
buffer_.write(struct.pack('i', len(binary) + 8))
buffer_.write(binary)
buffer_.seek(0)
sys.stdout.write( ' %-75s' % query )
try:
buffer_.seek(0)
stream_reader = qreader.QReader(buffer_)
result = stream_reader.read(numpy_temporals = True).data
assert compare(value, result), 'deserialization failed: %s, expected: %s actual: %s' % (query, value, result)
print('.')
except QException as e:
assert isinstance(value, QException)
assert e.args == value.args
print('.')
def test_reading_compressed():
BINARY = OrderedDict()
with open('tests/QCompressedExpressions3.out', 'rb') as f:
while True:
query = f.readline().strip()
binary = f.readline().strip()
if not binary:
break
BINARY[query] = binary
print('Compressed deserialization')
buffer_reader = qreader.QReader(None)
for query, value in iter(COMPRESSED_EXPRESSIONS.items()):
buffer_ = BytesIO()
binary = binascii.unhexlify(BINARY[query])
buffer_.write(b'\1\0\1\0')
buffer_.write(struct.pack('i', len(binary) + 8))
buffer_.write(binary)
buffer_.seek(0)
sys.stdout.write( ' %-75s' % query )
try:
result = buffer_reader.read(source = buffer_.getvalue()).data
assert compare(value, result), 'deserialization failed: %s' % (query)
header = buffer_reader.read_header(source = buffer_.getvalue())
result = buffer_reader.read_data(message_size = header.size, is_compressed = header.is_compressed)
assert compare(value, result), 'deserialization failed: %s' % (query)
stream_reader = qreader.QReader(buffer_)
result = stream_reader.read().data
assert compare(value, result), 'deserialization failed: %s' % (query)
print('.')
except QException as e:
assert isinstance(value, QException)
assert e.args == value.args
print('.')
test_reading()
test_reading_numpy_temporals()
test_reading_compressed()
|
AdamRTomkins/libSpineML2NK
|
refs/heads/master
|
libSpineML2NK/examples/tests/test_run.py
|
2
|
#from neurokernel.LPU.LPU import LPU
import neurokernel.mpi_relaunch
from libSpineML2NK import nk_executable
e = nk_executable.Executable('experiment0.xml')
e.execute()
print "Done"
|
rahushen/ansible
|
refs/heads/devel
|
lib/ansible/executor/process/worker.py
|
88
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import multiprocessing
import os
import sys
import traceback
from jinja2.exceptions import TemplateNotFound
HAS_PYCRYPTO_ATFORK = False
try:
from Crypto.Random import atfork
HAS_PYCRYPTO_ATFORK = True
except:
# We only need to call atfork if pycrypto is used because it will need to
# reinitialize its RNG. Since old paramiko could be using pycrypto, we
# need to take charge of calling it.
pass
from ansible.errors import AnsibleConnectionFailure
from ansible.executor.task_executor import TaskExecutor
from ansible.executor.task_result import TaskResult
from ansible.module_utils._text import to_text
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
__all__ = ['WorkerProcess']
class WorkerProcess(multiprocessing.Process):
'''
The worker thread class, which uses TaskExecutor to run tasks
read from a job queue and pushes results into a results queue
for reading later.
'''
def __init__(self, rslt_q, task_vars, host, task, play_context, loader, variable_manager, shared_loader_obj):
super(WorkerProcess, self).__init__()
# takes a task queue manager as the sole param:
self._rslt_q = rslt_q
self._task_vars = task_vars
self._host = host
self._task = task
self._play_context = play_context
self._loader = loader
self._variable_manager = variable_manager
self._shared_loader_obj = shared_loader_obj
if sys.stdin.isatty():
# dupe stdin, if we have one
self._new_stdin = sys.stdin
try:
fileno = sys.stdin.fileno()
if fileno is not None:
try:
self._new_stdin = os.fdopen(os.dup(fileno))
except OSError:
# couldn't dupe stdin, most likely because it's
# not a valid file descriptor, so we just rely on
# using the one that was passed in
pass
except (AttributeError, ValueError):
# couldn't get stdin's fileno, so we just carry on
pass
else:
# set to /dev/null
self._new_stdin = os.devnull
def run(self):
'''
Called when the process is started. Pushes the result onto the
results queue. We also remove the host from the blocked hosts list, to
signify that they are ready for their next task.
'''
# import cProfile, pstats, StringIO
# pr = cProfile.Profile()
# pr.enable()
if HAS_PYCRYPTO_ATFORK:
atfork()
try:
# execute the task and build a TaskResult from the result
display.debug("running TaskExecutor() for %s/%s" % (self._host, self._task))
executor_result = TaskExecutor(
self._host,
self._task,
self._task_vars,
self._play_context,
self._new_stdin,
self._loader,
self._shared_loader_obj,
self._rslt_q
).run()
display.debug("done running TaskExecutor() for %s/%s [%s]" % (self._host, self._task, self._task._uuid))
self._host.vars = dict()
self._host.groups = []
task_result = TaskResult(
self._host.name,
self._task._uuid,
executor_result,
task_fields=self._task.dump_attrs(),
)
# put the result on the result queue
display.debug("sending task result for task %s" % self._task._uuid)
self._rslt_q.put(task_result)
display.debug("done sending task result for task %s" % self._task._uuid)
except AnsibleConnectionFailure:
self._host.vars = dict()
self._host.groups = []
task_result = TaskResult(
self._host.name,
self._task._uuid,
dict(unreachable=True),
task_fields=self._task.dump_attrs(),
)
self._rslt_q.put(task_result, block=False)
except Exception as e:
if not isinstance(e, (IOError, EOFError, KeyboardInterrupt, SystemExit)) or isinstance(e, TemplateNotFound):
try:
self._host.vars = dict()
self._host.groups = []
task_result = TaskResult(
self._host.name,
self._task._uuid,
dict(failed=True, exception=to_text(traceback.format_exc()), stdout=''),
task_fields=self._task.dump_attrs(),
)
self._rslt_q.put(task_result, block=False)
except:
display.debug(u"WORKER EXCEPTION: %s" % to_text(e))
display.debug(u"WORKER TRACEBACK: %s" % to_text(traceback.format_exc()))
display.debug("WORKER PROCESS EXITING")
# pr.disable()
# s = StringIO.StringIO()
# sortby = 'time'
# ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
# ps.print_stats()
# with open('worker_%06d.stats' % os.getpid(), 'w') as f:
# f.write(s.getvalue())
|
andylolz/UK-Polling-Stations
|
refs/heads/master
|
polling_stations/apps/data_collection/management/commands/import_lewisham.py
|
1
|
"""
Import Lewisham
"""
from django.contrib.gis.geos import Point
from data_collection.management.commands import BaseAddressCsvImporter
from data_finder.helpers import geocode, geocode_point_only, PostcodeError
from addressbase.models import Address
class Command(BaseAddressCsvImporter):
"""
Imports the Polling Station data from Lewisham Council
"""
council_id = 'E09000023'
addresses_name = 'PropertyPostCodePollingStation-Lewisham.csv'
stations_name = 'PropertyPostCodePollingStation-Lewisham.csv'
csv_delimiter = ','
elections = [
'ref.2016-06-23'
]
def get_station_hash(self, record):
return "-".join([
record.poll_ref,
record.polling_pl,
])
def station_record_to_dict(self, record):
# format address
address = "\n".join([
record.polling_pl,
record.pol_addre,
])
while "\n\n" in address:
address = address.replace("\n\n", "\n").strip()
postcode = " ".join(address.split(' ')[-2:]).strip().split('\n')[-1]
location = None
location_data = None
# no points supplied, so attempt to attach them by geocoding
try:
location_data = geocode_point_only(postcode)
except PostcodeError:
pass
if location_data:
location = Point(
location_data['wgs84_lon'],
location_data['wgs84_lat'],
srid=4326)
return {
'internal_council_id': record.poll_ref,
'postcode' : postcode,
'address' : address,
'location' : location
}
def address_record_to_dict(self, record):
if record.paon.strip() == '0':
address = record.streetname.strip()
else:
address = " ".join([
record.paon.strip(),
record.street_des.strip(),
])
return {
'address' : address,
'postcode' : record.postcode.strip(),
'polling_station_id': record.poll_ref
}
|
alanconway/qpid-proton
|
refs/heads/master
|
python/examples/recurring_timer_tornado.py
|
14
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import print_function
import time
from proton.reactor import Handler
from proton_tornado import TornadoLoop
class Recurring(Handler):
def __init__(self, period):
self.period = period
def on_start(self, event):
self.container = event.container
self.container.schedule(time.time() + self.period, subject=self)
def on_timer(self, event):
print("Tick...")
self.container.schedule(time.time() + self.period, subject=self)
try:
container = TornadoLoop(Recurring(1.0))
container.run()
except KeyboardInterrupt:
container.stop()
print()
|
jeffschulte/protein
|
refs/heads/master
|
pyplots/plot-ave-arrow.py
|
2
|
from __future__ import division
import numpy as np
import matplotlib
#matplotlib.use('Agg')
import matplotlib.pyplot as plt
import os
import sys
import time
import file_loader as load
import math
import re
f_shape = sys.argv[1]
f_param1 = sys.argv[2]
f_param2 = sys.argv[3]
f_param3 = sys.argv[4]
f_param4 = sys.argv[5]
dens_factor = sys.argv[6]
sim_type = sys.argv[7]
f_param6 = sys.argv[8]
f_param7 = sys.argv[9]
#create data objects (see file_loader.py)
print "starting plot ave arrow"
dx =0.05
dump_time_step = 0.5
protein_name = "NflD"
job_string = "data/shape-%s/%s-%s-%s-%s-%s-%s/" % (load.f_shape,f_param1,f_param2,
f_param3,f_param4,dens_factor,sim_type)
p = re.compile('[.]')
job_string = p.sub('_',job_string)
dir_name = job_string + 'plots'
if not os.path.exists(dir_name):
print "making directory "+dir_name+" because doesnt exist"
os.makedirs(dir_name)
start_time = float(f_param6)
input_end_time = float(f_param7)
end_time = 0
contour_values = ""
for end in np.arange(int(start_time+20),int(input_end_time+20),20):
contour_values = job_string +'ave-time/contour-values-' + protein_name +'-'+ str(int(start_time))+'-' \
+str(end)+'.dat'
if os.path.isfile(contour_values):
end_time = end
if end_time == 0:
print "there are no contour files that work for these times!"
exit(1)
print "actually using end time of " + str(end_time) + " because that's the highest that exists"
print contour_values
arrow_file = job_string +'ave-time/ave-time-arrow-'+str(int(start_time))+'-'+str(protein_name)+'.dat'
print arrow_file
c_data = np.loadtxt(contour_values)
a_data = np.loadtxt(arrow_file)
print "here!!! ",contour_values
print arrow_file
last_time = a_data[0,0]
for i in range(1,len(a_data[:,0])):
if (a_data[i,0] <= last_time):
print "The times in the arrow file are not in chronological order! Something's wrong! Exiting."
print i
print last_time
print a_data[i,0]
exit(0)
# end = 520
# for i in range(20):
# end += 20
# test_data = np.loadtxt(job_string +'ave-time/contour-values-' + protein_name +'-'+ str(int(start_time))+'-' \
# +str(end)+'.dat')
# print "for the file ",job_string +'ave-time/contour-values-' + protein_name +'-'+ str(int(start_time))+'-' \
# +str(end)+'.dat'
# for i in range(3):
# print "max ", np.max(test_data)
# print "x ",int(np.argmax(test_data)/test_data.shape[1])
# print "y ",np.argmax(test_data)%test_data.shape[1]
# test_data[int(np.argmax(test_data)/test_data.shape[1]),np.argmax(test_data)%test_data.shape[1]] = 0
time_max = np.max(c_data)
print "hh ",int((8000-start_time)/dump_time_step)
a_data = a_data[int((900-start_time)/dump_time_step):]
arrow_cutoff = 3.5*(np.max(a_data[:,1]))/5.0
print arrow_cutoff
print a_data[:,3]
high_maximas = np.zeros(0)
times = np.zeros(0)
x_vals = np.zeros(0)
y_vals = np.zeros(0)
last_x = 0
last_y = 0
index = 0
for i in range(len(a_data[:,1])):
if (a_data[i,1] > arrow_cutoff):
if ( (a_data[i,2]*dx != last_x or a_data[i,3]*dx != last_y) \
and math.sqrt((a_data[i,2]*dx - last_x)**2 + (a_data[i,3]*dx-last_y)**2) > .2):
x_vals = np.append(x_vals,a_data[i,2]*dx)
y_vals = np.append(y_vals,a_data[i,3]*dx)
times = np.append(times,a_data[i,0])
last_x = a_data[i,2]*dx
last_y = a_data[i,3]*dx
Ny = len(c_data[:,0])
Nz = len(c_data[0,:])
Z, Y = np.meshgrid(np.arange(0,(c_data.shape[1]-.9)*dx,dx),np.arange(0,(c_data.shape[0]-.9)*dx,dx))
plt.contourf(Z, Y, c_data, cmap=plt.cm.jet,origin='lower',levels=np.arange(0,time_max+1.0,1))
for i in range(len(x_vals)-1):
plt.annotate('',xy=(y_vals[i+1],x_vals[i+1]),xytext=(y_vals[i],x_vals[i]),
fontsize=11,
arrowprops=dict(color='red',shrink=0.01, width=.3, headwidth=5.))
plt.clim(0,time_max)
plt.axis('off')
plt.axes().set_aspect('equal', 'datalim')
#ax.get_xaxis().set_visible(True)
#plt.xlim((0,dx*c_data.shape[1]))
#plt.ylim((0,dx*c_data.shape[0]))
plt.subplots_adjust(hspace=0.02)
# left = 0.125 # the left side of the subplots of the figure
# right = 0.9 # the right side of the subplots of the figure
# bottom = 0.1 # the bottom of the subplots of the figure
# top = 0.9 # the top of the subplots of the figure
# wspace = 0.2 # the amount of width reserved for blank space between subplots
# hspace = 0.2 # the amount of height reserved for white space between subplots
#plt.tight_layout()
plt.xlabel("Z grid position")
plt.ylabel("Y grid position")
#plt.title("Local temporal maxima, global spatial maxima view of MinD")
save_file_name = job_string +'plots/plot-time-averaged-arrow-' + protein_name +'-'+ str(int(start_time))+'-' \
+str(end_time)+'.pdf'
print save_file_name
plt.savefig(save_file_name)
#plt.show()
# num = 0
# while (start_time + num/2 < end_time):
# num += 40
# num -= 40
# print arrow_file
# print contour_values
# c_data = np.loadtxt(contour_values)
# print os.path.isfile(arrow_file)
# a_data = np.loadtxt(arrow_file)
# print a_data
# print "this ",a_data[:,0]
# print len(a_data[:,0])
# print "hello",str((end_time-start_time)*dump_time_step)
# a_data = a_data[0:int((end_time-start_time)/dump_time_step)]
# print a_data[:,0]
# print len(a_data[:,0])
# last_time = a_data[0,0]
# for i in range(1,len(a_data[:,0])):
# if (a_data[i,0] <= last_time):
# print "The times in the arrow file are not in chronological order! Something's wrong! Exiting."
# print i
# print last_time
# print a_data[i,0]
# exit(0)
# time_max = np.max(c_data)
# arrow_cutoff = 3.2*(np.max(a_data[:,1]))/5.0
# high_maximas = np.zeros(0)
# times = np.zeros(0)
# x_vals = np.zeros(0)
# y_vals = np.zeros(0)
# last_x = 0
# last_y = 0
# index = 0
# print np.max(a_data[:,1])
# print "arrow cutoff ",arrow_cutoff
# for i in range(len(a_data[:,1])):
# if (a_data[i,1] > arrow_cutoff):
# if ( (a_data[i,2]*dx != last_x or a_data[i,3]*dx != last_y) \
# and math.sqrt((a_data[i,2]*dx - last_x)**2 + (a_data[i,3]*dx-last_y)**2) > .2):
# x_vals = np.append(x_vals,a_data[i,2]*dx)
# y_vals = np.append(y_vals,a_data[i,3]*dx)
# times = np.append(times,a_data[i,0])
# last_x = a_data[i,2]*dx
# last_y = a_data[i,3]*dx
# print x_vals
# print y_vals
# #don't know why but the stad 5.00 nees to switch this second value to c_data.shape[0]-.9 not c_data.shape[0]
# Z, Y = np.meshgrid(np.arange(0,(c_data.shape[1]-.9)*dx,dx),np.arange(0,(c_data.shape[0]-.9)*dx,dx))
# print '*******************************************************************'
# print c_data.shape
# print '*******************************************************************'
# zwidth = Z.max() - Z.min()
# ywidth = Y.max() - Y.min()
# figwidth = 6
# barwidth = 0.2*6
# plt.figure(figsize=(figwidth,(figwidth - barwidth)*ywidth/zwidth)) # leave room for the colorbar!
# plt.clf()
# print "start"
# print len(Z)
# print len(Z[0])
# print c_data.shape[1]
# print c_data.shape[0]
# print len(c_data)
# print len(c_data[0])
# plt.axes().set_aspect('equal', 'datalim')
# cdata = np.array([[0 ,1,1,1],
# [.1 ,1,1,1],
# [.25,0.8,.8,1],
# [.5 ,0,.8,.8],
# [.7 ,1,1,0],
# [.9 ,1,0,0],
# [1 ,0,0,0]])
# cdict = {'red': [],
# 'green': [],
# 'blue': []}
# for i in range(cdata.shape[0]):
# print 'color', i
# cdict['red'] += [(cdata[i, 0], cdata[i, 1], cdata[i, 1])]
# cdict['green'] += [(cdata[i, 0], cdata[i, 2], cdata[i, 2])]
# cdict['blue'] += [(cdata[i, 0], cdata[i, 3], cdata[i, 3])]
# cmap = matplotlib.colors.LinearSegmentedColormap('mine', cdict)
# CS = plt.contourf(Z, Y, c_data, cmap=cmap,origin='lower',levels=np.arange(0,time_max+1.0,1))
# cbar = plt.colorbar(CS)
# for i in range(len(x_vals)-1):
# plt.annotate('%g'%i,xy=(y_vals[i+1],x_vals[i+1]),xytext=(y_vals[i],x_vals[i]),
# fontsize=9,
# arrowprops=dict(color='red',shrink=0.01, width=.3, headwidth=5.))
# plt.clim(0,time_max)
# dir_name = job_string + 'plots'
# if not os.path.exists(dir_name):
# print "making directory "+dir_name+" because doesnt exist"
# os.makedirs(dir_name)
|
isnowfy/pydown
|
refs/heads/master
|
pygments/lexers/_luabuiltins.py
|
26
|
# -*- coding: utf-8 -*-
"""
pygments.lexers._luabuiltins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This file contains the names and modules of lua functions
It is able to re-generate itself, but for adding new functions you
probably have to add some callbacks (see function module_callbacks).
Do not edit the MODULES dict by hand.
:copyright: Copyright 2006-2012 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
MODULES = {'basic': ['_G',
'_VERSION',
'assert',
'collectgarbage',
'dofile',
'error',
'getfenv',
'getmetatable',
'ipairs',
'load',
'loadfile',
'loadstring',
'next',
'pairs',
'pcall',
'print',
'rawequal',
'rawget',
'rawset',
'select',
'setfenv',
'setmetatable',
'tonumber',
'tostring',
'type',
'unpack',
'xpcall'],
'coroutine': ['coroutine.create',
'coroutine.resume',
'coroutine.running',
'coroutine.status',
'coroutine.wrap',
'coroutine.yield'],
'debug': ['debug.debug',
'debug.getfenv',
'debug.gethook',
'debug.getinfo',
'debug.getlocal',
'debug.getmetatable',
'debug.getregistry',
'debug.getupvalue',
'debug.setfenv',
'debug.sethook',
'debug.setlocal',
'debug.setmetatable',
'debug.setupvalue',
'debug.traceback'],
'io': ['io.close',
'io.flush',
'io.input',
'io.lines',
'io.open',
'io.output',
'io.popen',
'io.read',
'io.tmpfile',
'io.type',
'io.write'],
'math': ['math.abs',
'math.acos',
'math.asin',
'math.atan2',
'math.atan',
'math.ceil',
'math.cosh',
'math.cos',
'math.deg',
'math.exp',
'math.floor',
'math.fmod',
'math.frexp',
'math.huge',
'math.ldexp',
'math.log10',
'math.log',
'math.max',
'math.min',
'math.modf',
'math.pi',
'math.pow',
'math.rad',
'math.random',
'math.randomseed',
'math.sinh',
'math.sin',
'math.sqrt',
'math.tanh',
'math.tan'],
'modules': ['module',
'require',
'package.cpath',
'package.loaded',
'package.loadlib',
'package.path',
'package.preload',
'package.seeall'],
'os': ['os.clock',
'os.date',
'os.difftime',
'os.execute',
'os.exit',
'os.getenv',
'os.remove',
'os.rename',
'os.setlocale',
'os.time',
'os.tmpname'],
'string': ['string.byte',
'string.char',
'string.dump',
'string.find',
'string.format',
'string.gmatch',
'string.gsub',
'string.len',
'string.lower',
'string.match',
'string.rep',
'string.reverse',
'string.sub',
'string.upper'],
'table': ['table.concat',
'table.insert',
'table.maxn',
'table.remove',
'table.sort']}
if __name__ == '__main__':
import re
import urllib
import pprint
# you can't generally find out what module a function belongs to if you
# have only its name. Because of this, here are some callback functions
# that recognize if a gioven function belongs to a specific module
def module_callbacks():
def is_in_coroutine_module(name):
return name.startswith('coroutine.')
def is_in_modules_module(name):
if name in ['require', 'module'] or name.startswith('package'):
return True
else:
return False
def is_in_string_module(name):
return name.startswith('string.')
def is_in_table_module(name):
return name.startswith('table.')
def is_in_math_module(name):
return name.startswith('math')
def is_in_io_module(name):
return name.startswith('io.')
def is_in_os_module(name):
return name.startswith('os.')
def is_in_debug_module(name):
return name.startswith('debug.')
return {'coroutine': is_in_coroutine_module,
'modules': is_in_modules_module,
'string': is_in_string_module,
'table': is_in_table_module,
'math': is_in_math_module,
'io': is_in_io_module,
'os': is_in_os_module,
'debug': is_in_debug_module}
def get_newest_version():
f = urllib.urlopen('http://www.lua.org/manual/')
r = re.compile(r'^<A HREF="(\d\.\d)/">Lua \1</A>')
for line in f:
m = r.match(line)
if m is not None:
return m.groups()[0]
def get_lua_functions(version):
f = urllib.urlopen('http://www.lua.org/manual/%s/' % version)
r = re.compile(r'^<A HREF="manual.html#pdf-(.+)">\1</A>')
functions = []
for line in f:
m = r.match(line)
if m is not None:
functions.append(m.groups()[0])
return functions
def get_function_module(name):
for mod, cb in module_callbacks().iteritems():
if cb(name):
return mod
if '.' in name:
return name.split('.')[0]
else:
return 'basic'
def regenerate(filename, modules):
f = open(filename)
try:
content = f.read()
finally:
f.close()
header = content[:content.find('MODULES = {')]
footer = content[content.find("if __name__ == '__main__':"):]
f = open(filename, 'w')
f.write(header)
f.write('MODULES = %s\n\n' % pprint.pformat(modules))
f.write(footer)
f.close()
def run():
version = get_newest_version()
print '> Downloading function index for Lua %s' % version
functions = get_lua_functions(version)
print '> %d functions found:' % len(functions)
modules = {}
for full_function_name in functions:
print '>> %s' % full_function_name
m = get_function_module(full_function_name)
modules.setdefault(m, []).append(full_function_name)
regenerate(__file__, modules)
run()
|
gimite/personfinder
|
refs/heads/master
|
app/vendors/unidecode/x022.py
|
165
|
data = (
'[?]', # 0x00
'[?]', # 0x01
'[?]', # 0x02
'[?]', # 0x03
'[?]', # 0x04
'[?]', # 0x05
'[?]', # 0x06
'[?]', # 0x07
'[?]', # 0x08
'[?]', # 0x09
'[?]', # 0x0a
'[?]', # 0x0b
'[?]', # 0x0c
'[?]', # 0x0d
'[?]', # 0x0e
'[?]', # 0x0f
'[?]', # 0x10
'[?]', # 0x11
'-', # 0x12
'[?]', # 0x13
'[?]', # 0x14
'/', # 0x15
'\\', # 0x16
'*', # 0x17
'[?]', # 0x18
'[?]', # 0x19
'[?]', # 0x1a
'[?]', # 0x1b
'[?]', # 0x1c
'[?]', # 0x1d
'[?]', # 0x1e
'[?]', # 0x1f
'[?]', # 0x20
'[?]', # 0x21
'[?]', # 0x22
'|', # 0x23
'[?]', # 0x24
'[?]', # 0x25
'[?]', # 0x26
'[?]', # 0x27
'[?]', # 0x28
'[?]', # 0x29
'[?]', # 0x2a
'[?]', # 0x2b
'[?]', # 0x2c
'[?]', # 0x2d
'[?]', # 0x2e
'[?]', # 0x2f
'[?]', # 0x30
'[?]', # 0x31
'[?]', # 0x32
'[?]', # 0x33
'[?]', # 0x34
'[?]', # 0x35
':', # 0x36
'[?]', # 0x37
'[?]', # 0x38
'[?]', # 0x39
'[?]', # 0x3a
'[?]', # 0x3b
'~', # 0x3c
'[?]', # 0x3d
'[?]', # 0x3e
'[?]', # 0x3f
'[?]', # 0x40
'[?]', # 0x41
'[?]', # 0x42
'[?]', # 0x43
'[?]', # 0x44
'[?]', # 0x45
'[?]', # 0x46
'[?]', # 0x47
'[?]', # 0x48
'[?]', # 0x49
'[?]', # 0x4a
'[?]', # 0x4b
'[?]', # 0x4c
'[?]', # 0x4d
'[?]', # 0x4e
'[?]', # 0x4f
'[?]', # 0x50
'[?]', # 0x51
'[?]', # 0x52
'[?]', # 0x53
'[?]', # 0x54
'[?]', # 0x55
'[?]', # 0x56
'[?]', # 0x57
'[?]', # 0x58
'[?]', # 0x59
'[?]', # 0x5a
'[?]', # 0x5b
'[?]', # 0x5c
'[?]', # 0x5d
'[?]', # 0x5e
'[?]', # 0x5f
'[?]', # 0x60
'[?]', # 0x61
'[?]', # 0x62
'[?]', # 0x63
'<=', # 0x64
'>=', # 0x65
'<=', # 0x66
'>=', # 0x67
'[?]', # 0x68
'[?]', # 0x69
'[?]', # 0x6a
'[?]', # 0x6b
'[?]', # 0x6c
'[?]', # 0x6d
'[?]', # 0x6e
'[?]', # 0x6f
'[?]', # 0x70
'[?]', # 0x71
'[?]', # 0x72
'[?]', # 0x73
'[?]', # 0x74
'[?]', # 0x75
'[?]', # 0x76
'[?]', # 0x77
'[?]', # 0x78
'[?]', # 0x79
'[?]', # 0x7a
'[?]', # 0x7b
'[?]', # 0x7c
'[?]', # 0x7d
'[?]', # 0x7e
'[?]', # 0x7f
'[?]', # 0x80
'[?]', # 0x81
'[?]', # 0x82
'[?]', # 0x83
'[?]', # 0x84
'[?]', # 0x85
'[?]', # 0x86
'[?]', # 0x87
'[?]', # 0x88
'[?]', # 0x89
'[?]', # 0x8a
'[?]', # 0x8b
'[?]', # 0x8c
'[?]', # 0x8d
'[?]', # 0x8e
'[?]', # 0x8f
'[?]', # 0x90
'[?]', # 0x91
'[?]', # 0x92
'[?]', # 0x93
'[?]', # 0x94
'[?]', # 0x95
'[?]', # 0x96
'[?]', # 0x97
'[?]', # 0x98
'[?]', # 0x99
'[?]', # 0x9a
'[?]', # 0x9b
'[?]', # 0x9c
'[?]', # 0x9d
'[?]', # 0x9e
'[?]', # 0x9f
'[?]', # 0xa0
'[?]', # 0xa1
'[?]', # 0xa2
'[?]', # 0xa3
'[?]', # 0xa4
'[?]', # 0xa5
'[?]', # 0xa6
'[?]', # 0xa7
'[?]', # 0xa8
'[?]', # 0xa9
'[?]', # 0xaa
'[?]', # 0xab
'[?]', # 0xac
'[?]', # 0xad
'[?]', # 0xae
'[?]', # 0xaf
'[?]', # 0xb0
'[?]', # 0xb1
'[?]', # 0xb2
'[?]', # 0xb3
'[?]', # 0xb4
'[?]', # 0xb5
'[?]', # 0xb6
'[?]', # 0xb7
'[?]', # 0xb8
'[?]', # 0xb9
'[?]', # 0xba
'[?]', # 0xbb
'[?]', # 0xbc
'[?]', # 0xbd
'[?]', # 0xbe
'[?]', # 0xbf
'[?]', # 0xc0
'[?]', # 0xc1
'[?]', # 0xc2
'[?]', # 0xc3
'[?]', # 0xc4
'[?]', # 0xc5
'[?]', # 0xc6
'[?]', # 0xc7
'[?]', # 0xc8
'[?]', # 0xc9
'[?]', # 0xca
'[?]', # 0xcb
'[?]', # 0xcc
'[?]', # 0xcd
'[?]', # 0xce
'[?]', # 0xcf
'[?]', # 0xd0
'[?]', # 0xd1
'[?]', # 0xd2
'[?]', # 0xd3
'[?]', # 0xd4
'[?]', # 0xd5
'[?]', # 0xd6
'[?]', # 0xd7
'[?]', # 0xd8
'[?]', # 0xd9
'[?]', # 0xda
'[?]', # 0xdb
'[?]', # 0xdc
'[?]', # 0xdd
'[?]', # 0xde
'[?]', # 0xdf
'[?]', # 0xe0
'[?]', # 0xe1
'[?]', # 0xe2
'[?]', # 0xe3
'[?]', # 0xe4
'[?]', # 0xe5
'[?]', # 0xe6
'[?]', # 0xe7
'[?]', # 0xe8
'[?]', # 0xe9
'[?]', # 0xea
'[?]', # 0xeb
'[?]', # 0xec
'[?]', # 0xed
'[?]', # 0xee
'[?]', # 0xef
'[?]', # 0xf0
'[?]', # 0xf1
'[?]', # 0xf2
'[?]', # 0xf3
'[?]', # 0xf4
'[?]', # 0xf5
'[?]', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)
|
DrMarcII/selenium
|
refs/heads/master
|
py/selenium/webdriver/phantomjs/__init__.py
|
2454
|
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
|
geographika/mappyfile
|
refs/heads/master
|
docs/examples/grammar.py
|
1
|
import os
from lark import Lark
from lark.tree import pydot__tree_to_png
GVIZ_PATH = r"C:\Program Files (x86)\Graphviz2.38\bin"
def graphviz_setup():
os.environ['PATH'] = GVIZ_PATH + ';' + os.environ['PATH']
def main(s, out_fn):
graphviz_setup()
project_root = os.path.normpath(os.path.join(os.path.dirname(__file__), "../../"))
fld = os.path.normpath(project_root + "./mappyfile")
gf = os.path.join(fld, "mapfile.lalr.g")
grammar_text = open(gf).read()
g = Lark(grammar_text, parser="lalr", lexer="contextual")
t = g.parse(s)
print(t)
pydot__tree_to_png(t, os.path.join(project_root, "docs/images", out_fn))
print(t.pretty())
s = "MAP NAME 'Test' END"
# main(s, "tree.png")
main(s, "tree_no_terminals.png") # remove ! from !composite_type rule
print("Done!")
|
cancan101/tensorflow
|
refs/heads/master
|
tensorflow/contrib/learn/python/learn/dataframe/transforms/difference.py
|
90
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A `Transform` that performs subtraction on two `Series`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.dataframe import series
from tensorflow.contrib.learn.python.learn.dataframe import transform
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import sparse_ops
def _negate_sparse(st):
return sparse_tensor.SparseTensor(indices=st.indices,
values=-st.values,
dense_shape=st.dense_shape)
@series.Series.register_binary_op("__sub__")
class Difference(transform.TensorFlowTransform):
"""Subtracts one 'Series` from another."""
def __init__(self):
super(Difference, self).__init__()
@property
def name(self):
return "difference"
@property
def input_valency(self):
return 2
@property
def _output_names(self):
return "output",
def _apply_transform(self, input_tensors, **kwargs):
pair_sparsity = (isinstance(input_tensors[0], sparse_tensor.SparseTensor),
isinstance(input_tensors[1], sparse_tensor.SparseTensor))
if pair_sparsity == (False, False):
result = input_tensors[0] - input_tensors[1]
# note tf.sparse_add accepts the mixed cases,
# so long as at least one input is sparse.
elif not pair_sparsity[1]:
result = sparse_ops.sparse_add(input_tensors[0], - input_tensors[1])
else:
result = sparse_ops.sparse_add(input_tensors[0],
_negate_sparse(input_tensors[1]))
# pylint: disable=not-callable
return self.return_type(result)
|
RawEvan/sharephotos
|
refs/heads/master
|
site-packages/appconf/__init__.py
|
11
|
from __future__ import absolute_import
from .base import AppConf # noqa
__version__ = "1.0.1"
|
sinomiko/project
|
refs/heads/master
|
IdeaProjects/crap/tuchong.py
|
1
|
#-*- encoding: utf-8 -*-
'''''
Created on 2015-7-30
@author: Miko
'''
import urllib.request
import urllib,re,sys,os,time
import uuid
#获取二级页面url
def findUrl2(html):
# https://aisimu.tuchong.com/15583275/?rqt_id=
print(html)
#re1 = r'https://aisimu.tuchong.com/\d+/\?rqt_id='
re1 = 'https://aisimu.tuchong.com/15583275/?rqt_id='
#re1 = r'https://tuchong.com/\d+/\d+/|https://\w+(?<!photos).tuchong.com/\d+/'
url2list = re.findall(re1,html)
url2lstfltr = list(set(url2list))
url2lstfltr.sort(key=url2list.index)
print(url2lstfltr)
return url2lstfltr
#获取html文本
def getHtml(url):
html = urllib.request.urlopen(url).read().decode('utf-8')#解码为utf-8
return html
#下载图片到本地
def download(html_page , pageNo):
#定义文件夹的名字
x = time.localtime(time.time())
foldername = str(x.__getattribute__("tm_year"))+"-"+str(x.__getattribute__("tm_mon"))+"-"+str(x.__getattribute__("tm_mday"))
re2=r'http://photos.tuchong.com/.+/f/.+\.jpg'
imglist=re.findall(re2,html_page)
print (imglist)
download_img=None
for imgurl in imglist:
picpath = 'D:\\TuChong\\%s\\%s' % (foldername,str(pageNo))
filename = str(uuid.uuid1())
if not os.path.exists(picpath):
os.makedirs(picpath)
target = picpath+"\\%s.jpg" % filename
print ("The photos location is:"+target)
download_img = urllib.request.urlretrieve(imgurl, target)#将图片下载到指定路径中
time.sleep(1)
print(imgurl)
return download_img
# def callback(blocknum, blocksize, totalsize):
# '''回调函数
# @blocknum: 已经下载的数据块
# @blocksize: 数据块的大小
# @totalsize: 远程文件的大小
# '''
# print str(blocknum),str(blocksize),str(totalsize)
# if blocknum * blocksize >= totalsize:
# print '下载完成'
def quitit():
print ("Bye!")
exit(0)
if __name__ == '__main__':
print (''''' *****************************************
** Welcome to Spider for TUCHONG **
** Created on 2015-7-30 **
** @author: miko **
*****************************************''')
pageNo ='10' # raw_input("Input the page number you want to scratch (1-100),please input 'quit' if you want to quit>")
while not pageNo.isdigit() or int(pageNo) > 100 :
if pageNo == 'quit':quitit()
print ("Param is invalid , please try again.")
pageNo = raw_input("Input the page number you want to scratch >")
#针对图虫人像模块来爬取
html = getHtml("http://tuchong.com/tags/%E4%BA%BA%E5%83%8F/?page="+str(pageNo))
detllst = findUrl2(html)
for detail in detllst:
html2 = getHtml(detail)
download(html2,pageNo)
print ("Finished.")
|
BehavioralInsightsTeam/edx-platform
|
refs/heads/release-bit
|
common/djangoapps/student/migrations/0003_auto_20160516_0938.py
|
13
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import django.utils.timezone
import model_utils.fields
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('student', '0002_auto_20151208_1034'),
]
operations = [
migrations.CreateModel(
name='UserAttribute',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('name', models.CharField(help_text='Name of this user attribute.', max_length=255)),
('value', models.CharField(help_text='Value of this user attribute.', max_length=255)),
('user', models.ForeignKey(related_name='attributes', to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE)),
],
),
migrations.AlterUniqueTogether(
name='userattribute',
unique_together=set([('user', 'name')]),
),
]
|
donSchoe/p2pool-n
|
refs/heads/master
|
SOAPpy/Types.py
|
289
|
from __future__ import nested_scopes
"""
################################################################################
# Copyright (c) 2003, Pfizer
# Copyright (c) 2001, Cayce Ullman.
# Copyright (c) 2001, Brian Matthews.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of actzero, inc. nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
"""
ident = '$Id: Types.py 1496 2010-03-04 23:46:17Z pooryorick $'
from version import __version__
import UserList
import base64
import cgi
import urllib
import copy
import re
import time
from types import *
# SOAPpy modules
from Errors import *
from NS import NS
from Utilities import encodeHexString, cleanDate
from Config import Config
###############################################################################
# Utility functions
###############################################################################
def isPrivate(name): return name[0]=='_'
def isPublic(name): return name[0]!='_'
###############################################################################
# Types and Wrappers
###############################################################################
class anyType:
_validURIs = (NS.XSD, NS.XSD2, NS.XSD3, NS.ENC)
def __init__(self, data = None, name = None, typed = 1, attrs = None):
if self.__class__ == anyType:
raise Error, "anyType can't be instantiated directly"
if type(name) in (ListType, TupleType):
self._ns, self._name = name
else:
self._ns = self._validURIs[0]
self._name = name
self._typed = typed
self._attrs = {}
self._cache = None
self._type = self._typeName()
self._data = self._checkValueSpace(data)
if attrs != None:
self._setAttrs(attrs)
def __str__(self):
if hasattr(self,'_name') and self._name:
return "<%s %s at %d>" % (self.__class__, self._name, id(self))
return "<%s at %d>" % (self.__class__, id(self))
__repr__ = __str__
def _checkValueSpace(self, data):
return data
def _marshalData(self):
return str(self._data)
def _marshalAttrs(self, ns_map, builder):
a = ''
for attr, value in self._attrs.items():
ns, n = builder.genns(ns_map, attr[0])
a += n + ' %s%s="%s"' % \
(ns, attr[1], cgi.escape(str(value), 1))
return a
def _fixAttr(self, attr):
if type(attr) in (StringType, UnicodeType):
attr = (None, attr)
elif type(attr) == ListType:
attr = tuple(attr)
elif type(attr) != TupleType:
raise AttributeError, "invalid attribute type"
if len(attr) != 2:
raise AttributeError, "invalid attribute length"
if type(attr[0]) not in (NoneType, StringType, UnicodeType):
raise AttributeError, "invalid attribute namespace URI type"
return attr
def _getAttr(self, attr):
attr = self._fixAttr(attr)
try:
return self._attrs[attr]
except:
return None
def _setAttr(self, attr, value):
attr = self._fixAttr(attr)
if type(value) is StringType:
value = unicode(value)
self._attrs[attr] = value
def _setAttrs(self, attrs):
if type(attrs) in (ListType, TupleType):
for i in range(0, len(attrs), 2):
self._setAttr(attrs[i], attrs[i + 1])
return
if type(attrs) == DictType:
d = attrs
elif isinstance(attrs, anyType):
d = attrs._attrs
else:
raise AttributeError, "invalid attribute type"
for attr, value in d.items():
self._setAttr(attr, value)
def _setMustUnderstand(self, val):
self._setAttr((NS.ENV, "mustUnderstand"), val)
def _getMustUnderstand(self):
return self._getAttr((NS.ENV, "mustUnderstand"))
def _setActor(self, val):
self._setAttr((NS.ENV, "actor"), val)
def _getActor(self):
return self._getAttr((NS.ENV, "actor"))
def _typeName(self):
return self.__class__.__name__[:-4]
def _validNamespaceURI(self, URI, strict):
if not hasattr(self, '_typed') or not self._typed:
return None
if URI in self._validURIs:
return URI
if not strict:
return self._ns
raise AttributeError, \
"not a valid namespace for type %s" % self._type
class voidType(anyType):
pass
class stringType(anyType):
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (StringType, UnicodeType):
raise AttributeError, "invalid %s type:" % self._type
return data
def _marshalData(self):
return self._data
class untypedType(stringType):
def __init__(self, data = None, name = None, attrs = None):
stringType.__init__(self, data, name, 0, attrs)
class IDType(stringType): pass
class NCNameType(stringType): pass
class NameType(stringType): pass
class ENTITYType(stringType): pass
class IDREFType(stringType): pass
class languageType(stringType): pass
class NMTOKENType(stringType): pass
class QNameType(stringType): pass
class tokenType(anyType):
_validURIs = (NS.XSD2, NS.XSD3)
__invalidre = '[\n\t]|^ | $| '
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (StringType, UnicodeType):
raise AttributeError, "invalid %s type" % self._type
if type(self.__invalidre) == StringType:
self.__invalidre = re.compile(self.__invalidre)
if self.__invalidre.search(data):
raise ValueError, "invalid %s value" % self._type
return data
class normalizedStringType(anyType):
_validURIs = (NS.XSD3,)
__invalidre = '[\n\r\t]'
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (StringType, UnicodeType):
raise AttributeError, "invalid %s type" % self._type
if type(self.__invalidre) == StringType:
self.__invalidre = re.compile(self.__invalidre)
if self.__invalidre.search(data):
raise ValueError, "invalid %s value" % self._type
return data
class CDATAType(normalizedStringType):
_validURIs = (NS.XSD2,)
class booleanType(anyType):
def __int__(self):
return self._data
__nonzero__ = __int__
def _marshalData(self):
return ['false', 'true'][self._data]
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if data in (0, '0', 'false', ''):
return 0
if data in (1, '1', 'true'):
return 1
raise ValueError, "invalid %s value" % self._type
class decimalType(anyType):
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType, FloatType):
raise Error, "invalid %s value" % self._type
return data
class floatType(anyType):
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType, FloatType) or \
data < -3.4028234663852886E+38 or \
data > 3.4028234663852886E+38:
raise ValueError, "invalid %s value: %s" % (self._type, repr(data))
return data
def _marshalData(self):
return "%.18g" % self._data # More precision
class doubleType(anyType):
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType, FloatType) or \
data < -1.7976931348623158E+308 or \
data > 1.7976931348623157E+308:
raise ValueError, "invalid %s value: %s" % (self._type, repr(data))
return data
def _marshalData(self):
return "%.18g" % self._data # More precision
class durationType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
try:
# A tuple or a scalar is OK, but make them into a list
if type(data) == TupleType:
data = list(data)
elif type(data) != ListType:
data = [data]
if len(data) > 6:
raise Exception, "too many values"
# Now check the types of all the components, and find
# the first nonzero element along the way.
f = -1
for i in range(len(data)):
if data[i] == None:
data[i] = 0
continue
if type(data[i]) not in \
(IntType, LongType, FloatType):
raise Exception, "element %d a bad type" % i
if data[i] and f == -1:
f = i
# If they're all 0, just use zero seconds.
if f == -1:
self._cache = 'PT0S'
return (0,) * 6
# Make sure only the last nonzero element has a decimal fraction
# and only the first element is negative.
d = -1
for i in range(f, len(data)):
if data[i]:
if d != -1:
raise Exception, \
"all except the last nonzero element must be " \
"integers"
if data[i] < 0 and i > f:
raise Exception, \
"only the first nonzero element can be negative"
elif data[i] != long(data[i]):
d = i
# Pad the list on the left if necessary.
if len(data) < 6:
n = 6 - len(data)
f += n
d += n
data = [0] * n + data
# Save index of the first nonzero element and the decimal
# element for _marshalData.
self.__firstnonzero = f
self.__decimal = d
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return tuple(data)
def _marshalData(self):
if self._cache == None:
d = self._data
t = 0
if d[self.__firstnonzero] < 0:
s = '-P'
else:
s = 'P'
t = 0
for i in range(self.__firstnonzero, len(d)):
if d[i]:
if i > 2 and not t:
s += 'T'
t = 1
if self.__decimal == i:
s += "%g" % abs(d[i])
else:
s += "%d" % long(abs(d[i]))
s += ['Y', 'M', 'D', 'H', 'M', 'S'][i]
self._cache = s
return self._cache
class timeDurationType(durationType):
_validURIs = (NS.XSD, NS.XSD2, NS.ENC)
class dateTimeType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
try:
if data == None:
data = time.time()
if (type(data) in (IntType, LongType)):
data = list(time.gmtime(data)[:6])
elif (type(data) == FloatType):
f = data - int(data)
data = list(time.gmtime(int(data))[:6])
data[5] += f
elif type(data) in (ListType, TupleType):
if len(data) < 6:
raise Exception, "not enough values"
if len(data) > 9:
raise Exception, "too many values"
data = list(data[:6])
cleanDate(data)
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return tuple(data)
def _marshalData(self):
if self._cache == None:
d = self._data
s = "%04d-%02d-%02dT%02d:%02d:%02d" % ((abs(d[0]),) + d[1:])
if d[0] < 0:
s = '-' + s
f = d[5] - int(d[5])
if f != 0:
s += ("%g" % f)[1:]
s += 'Z'
self._cache = s
return self._cache
class recurringInstantType(anyType):
_validURIs = (NS.XSD,)
def _checkValueSpace(self, data):
try:
if data == None:
data = list(time.gmtime(time.time())[:6])
if (type(data) in (IntType, LongType)):
data = list(time.gmtime(data)[:6])
elif (type(data) == FloatType):
f = data - int(data)
data = list(time.gmtime(int(data))[:6])
data[5] += f
elif type(data) in (ListType, TupleType):
if len(data) < 1:
raise Exception, "not enough values"
if len(data) > 9:
raise Exception, "too many values"
data = list(data[:6])
if len(data) < 6:
data += [0] * (6 - len(data))
f = len(data)
for i in range(f):
if data[i] == None:
if f < i:
raise Exception, \
"only leftmost elements can be none"
else:
f = i
break
cleanDate(data, f)
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return tuple(data)
def _marshalData(self):
if self._cache == None:
d = self._data
e = list(d)
neg = ''
if not e[0]:
e[0] = '--'
else:
if e[0] < 0:
neg = '-'
e[0] = abs(e[0])
if e[0] < 100:
e[0] = '-' + "%02d" % e[0]
else:
e[0] = "%04d" % e[0]
for i in range(1, len(e)):
if e[i] == None or (i < 3 and e[i] == 0):
e[i] = '-'
else:
if e[i] < 0:
neg = '-'
e[i] = abs(e[i])
e[i] = "%02d" % e[i]
if d[5]:
f = abs(d[5] - int(d[5]))
if f:
e[5] += ("%g" % f)[1:]
s = "%s%s-%s-%sT%s:%s:%sZ" % ((neg,) + tuple(e))
self._cache = s
return self._cache
class timeInstantType(dateTimeType):
_validURIs = (NS.XSD, NS.XSD2, NS.ENC)
class timePeriodType(dateTimeType):
_validURIs = (NS.XSD2, NS.ENC)
class timeType(anyType):
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[3:6]
elif (type(data) == FloatType):
f = data - int(data)
data = list(time.gmtime(int(data))[3:6])
data[2] += f
elif type(data) in (IntType, LongType):
data = time.gmtime(data)[3:6]
elif type(data) in (ListType, TupleType):
if len(data) == 9:
data = data[3:6]
elif len(data) > 3:
raise Exception, "too many values"
data = [None, None, None] + list(data)
if len(data) < 6:
data += [0] * (6 - len(data))
cleanDate(data, 3)
data = data[3:]
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return tuple(data)
def _marshalData(self):
if self._cache == None:
d = self._data
#s = ''
#
#s = time.strftime("%H:%M:%S", (0, 0, 0) + d + (0, 0, -1))
s = "%02d:%02d:%02d" % d
f = d[2] - int(d[2])
if f != 0:
s += ("%g" % f)[1:]
s += 'Z'
self._cache = s
return self._cache
class dateType(anyType):
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[0:3]
elif type(data) in (IntType, LongType, FloatType):
data = time.gmtime(data)[0:3]
elif type(data) in (ListType, TupleType):
if len(data) == 9:
data = data[0:3]
elif len(data) > 3:
raise Exception, "too many values"
data = list(data)
if len(data) < 3:
data += [1, 1, 1][len(data):]
data += [0, 0, 0]
cleanDate(data)
data = data[:3]
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return tuple(data)
def _marshalData(self):
if self._cache == None:
d = self._data
s = "%04d-%02d-%02dZ" % ((abs(d[0]),) + d[1:])
if d[0] < 0:
s = '-' + s
self._cache = s
return self._cache
class gYearMonthType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[0:2]
elif type(data) in (IntType, LongType, FloatType):
data = time.gmtime(data)[0:2]
elif type(data) in (ListType, TupleType):
if len(data) == 9:
data = data[0:2]
elif len(data) > 2:
raise Exception, "too many values"
data = list(data)
if len(data) < 2:
data += [1, 1][len(data):]
data += [1, 0, 0, 0]
cleanDate(data)
data = data[:2]
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return tuple(data)
def _marshalData(self):
if self._cache == None:
d = self._data
s = "%04d-%02dZ" % ((abs(d[0]),) + d[1:])
if d[0] < 0:
s = '-' + s
self._cache = s
return self._cache
class gYearType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[0:1]
elif type(data) in (IntType, LongType, FloatType):
data = [data]
if type(data) in (ListType, TupleType):
if len(data) == 9:
data = data[0:1]
elif len(data) < 1:
raise Exception, "too few values"
elif len(data) > 1:
raise Exception, "too many values"
if type(data[0]) == FloatType:
try: s = int(data[0])
except: s = long(data[0])
if s != data[0]:
raise Exception, "not integral"
data = [s]
elif type(data[0]) not in (IntType, LongType):
raise Exception, "bad type"
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return data[0]
def _marshalData(self):
if self._cache == None:
d = self._data
s = "%04dZ" % abs(d)
if d < 0:
s = '-' + s
self._cache = s
return self._cache
class centuryType(anyType):
_validURIs = (NS.XSD2, NS.ENC)
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[0:1] / 100
elif type(data) in (IntType, LongType, FloatType):
data = [data]
if type(data) in (ListType, TupleType):
if len(data) == 9:
data = data[0:1] / 100
elif len(data) < 1:
raise Exception, "too few values"
elif len(data) > 1:
raise Exception, "too many values"
if type(data[0]) == FloatType:
try: s = int(data[0])
except: s = long(data[0])
if s != data[0]:
raise Exception, "not integral"
data = [s]
elif type(data[0]) not in (IntType, LongType):
raise Exception, "bad type"
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return data[0]
def _marshalData(self):
if self._cache == None:
d = self._data
s = "%02dZ" % abs(d)
if d < 0:
s = '-' + s
self._cache = s
return self._cache
class yearType(gYearType):
_validURIs = (NS.XSD2, NS.ENC)
class gMonthDayType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[1:3]
elif type(data) in (IntType, LongType, FloatType):
data = time.gmtime(data)[1:3]
elif type(data) in (ListType, TupleType):
if len(data) == 9:
data = data[0:2]
elif len(data) > 2:
raise Exception, "too many values"
data = list(data)
if len(data) < 2:
data += [1, 1][len(data):]
data = [0] + data + [0, 0, 0]
cleanDate(data, 1)
data = data[1:3]
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return tuple(data)
def _marshalData(self):
if self._cache == None:
self._cache = "--%02d-%02dZ" % self._data
return self._cache
class recurringDateType(gMonthDayType):
_validURIs = (NS.XSD2, NS.ENC)
class gMonthType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[1:2]
elif type(data) in (IntType, LongType, FloatType):
data = [data]
if type(data) in (ListType, TupleType):
if len(data) == 9:
data = data[1:2]
elif len(data) < 1:
raise Exception, "too few values"
elif len(data) > 1:
raise Exception, "too many values"
if type(data[0]) == FloatType:
try: s = int(data[0])
except: s = long(data[0])
if s != data[0]:
raise Exception, "not integral"
data = [s]
elif type(data[0]) not in (IntType, LongType):
raise Exception, "bad type"
if data[0] < 1 or data[0] > 12:
raise Exception, "bad value"
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return data[0]
def _marshalData(self):
if self._cache == None:
self._cache = "--%02d--Z" % self._data
return self._cache
class monthType(gMonthType):
_validURIs = (NS.XSD2, NS.ENC)
class gDayType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[2:3]
elif type(data) in (IntType, LongType, FloatType):
data = [data]
if type(data) in (ListType, TupleType):
if len(data) == 9:
data = data[2:3]
elif len(data) < 1:
raise Exception, "too few values"
elif len(data) > 1:
raise Exception, "too many values"
if type(data[0]) == FloatType:
try: s = int(data[0])
except: s = long(data[0])
if s != data[0]:
raise Exception, "not integral"
data = [s]
elif type(data[0]) not in (IntType, LongType):
raise Exception, "bad type"
if data[0] < 1 or data[0] > 31:
raise Exception, "bad value"
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return data[0]
def _marshalData(self):
if self._cache == None:
self._cache = "---%02dZ" % self._data
return self._cache
class recurringDayType(gDayType):
_validURIs = (NS.XSD2, NS.ENC)
class hexBinaryType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (StringType, UnicodeType):
raise AttributeError, "invalid %s type" % self._type
return data
def _marshalData(self):
if self._cache == None:
self._cache = encodeHexString(self._data)
return self._cache
class base64BinaryType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (StringType, UnicodeType):
raise AttributeError, "invalid %s type" % self._type
return data
def _marshalData(self):
if self._cache == None:
self._cache = base64.encodestring(self._data)
return self._cache
class base64Type(base64BinaryType):
_validURIs = (NS.ENC,)
class binaryType(anyType):
_validURIs = (NS.XSD, NS.ENC)
def __init__(self, data, name = None, typed = 1, encoding = 'base64',
attrs = None):
anyType.__init__(self, data, name, typed, attrs)
self._setAttr('encoding', encoding)
def _marshalData(self):
if self._cache == None:
if self._getAttr((None, 'encoding')) == 'base64':
self._cache = base64.encodestring(self._data)
else:
self._cache = encodeHexString(self._data)
return self._cache
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (StringType, UnicodeType):
raise AttributeError, "invalid %s type" % self._type
return data
def _setAttr(self, attr, value):
attr = self._fixAttr(attr)
if attr[1] == 'encoding':
if attr[0] != None or value not in ('base64', 'hex'):
raise AttributeError, "invalid encoding"
self._cache = None
anyType._setAttr(self, attr, value)
class anyURIType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (StringType, UnicodeType):
raise AttributeError, "invalid %s type" % self._type
return data
def _marshalData(self):
if self._cache == None:
self._cache = urllib.quote(self._data)
return self._cache
class uriType(anyURIType):
_validURIs = (NS.XSD,)
class uriReferenceType(anyURIType):
_validURIs = (NS.XSD2,)
class NOTATIONType(anyType):
def __init__(self, data, name = None, typed = 1, attrs = None):
if self.__class__ == NOTATIONType:
raise Error, "a NOTATION can't be instantiated directly"
anyType.__init__(self, data, name, typed, attrs)
class ENTITIESType(anyType):
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) in (StringType, UnicodeType):
return (data,)
if type(data) not in (ListType, TupleType) or \
filter (lambda x: type(x) not in (StringType, UnicodeType), data):
raise AttributeError, "invalid %s type" % self._type
return data
def _marshalData(self):
return ' '.join(self._data)
class IDREFSType(ENTITIESType): pass
class NMTOKENSType(ENTITIESType): pass
class integerType(anyType):
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType):
raise ValueError, "invalid %s value" % self._type
return data
class nonPositiveIntegerType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or data > 0:
raise ValueError, "invalid %s value" % self._type
return data
class non_Positive_IntegerType(nonPositiveIntegerType):
_validURIs = (NS.XSD,)
def _typeName(self):
return 'non-positive-integer'
class negativeIntegerType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or data >= 0:
raise ValueError, "invalid %s value" % self._type
return data
class negative_IntegerType(negativeIntegerType):
_validURIs = (NS.XSD,)
def _typeName(self):
return 'negative-integer'
class longType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or \
data < -9223372036854775808L or \
data > 9223372036854775807L:
raise ValueError, "invalid %s value" % self._type
return data
class intType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or \
data < -2147483648L or \
data > 2147483647L:
raise ValueError, "invalid %s value" % self._type
return data
class shortType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or \
data < -32768 or \
data > 32767:
raise ValueError, "invalid %s value" % self._type
return data
class byteType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or \
data < -128 or \
data > 127:
raise ValueError, "invalid %s value" % self._type
return data
class nonNegativeIntegerType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or data < 0:
raise ValueError, "invalid %s value" % self._type
return data
class non_Negative_IntegerType(nonNegativeIntegerType):
_validURIs = (NS.XSD,)
def _typeName(self):
return 'non-negative-integer'
class unsignedLongType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or \
data < 0 or \
data > 18446744073709551615L:
raise ValueError, "invalid %s value" % self._type
return data
class unsignedIntType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or \
data < 0 or \
data > 4294967295L:
raise ValueError, "invalid %s value" % self._type
return data
class unsignedShortType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or \
data < 0 or \
data > 65535:
raise ValueError, "invalid %s value" % self._type
return data
class unsignedByteType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or \
data < 0 or \
data > 255:
raise ValueError, "invalid %s value" % self._type
return data
class positiveIntegerType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or data <= 0:
raise ValueError, "invalid %s value" % self._type
return data
class positive_IntegerType(positiveIntegerType):
_validURIs = (NS.XSD,)
def _typeName(self):
return 'positive-integer'
# Now compound types
class compoundType(anyType):
def __init__(self, data = None, name = None, typed = 1, attrs = None):
if self.__class__ == compoundType:
raise Error, "a compound can't be instantiated directly"
anyType.__init__(self, data, name, typed, attrs)
self._keyord = []
if type(data) == DictType:
self.__dict__.update(data)
def _aslist(self, item=None):
if item is not None:
return self.__dict__[self._keyord[item]]
else:
return map( lambda x: self.__dict__[x], self._keyord)
def _asdict(self, item=None, encoding=Config.dict_encoding):
if item is not None:
if type(item) in (UnicodeType,StringType):
item = item.encode(encoding)
return self.__dict__[item]
else:
retval = {}
def fun(x): retval[x.encode(encoding)] = self.__dict__[x]
if hasattr(self, '_keyord'):
map( fun, self._keyord)
else:
for name in dir(self):
if isPublic(name):
retval[name] = getattr(self,name)
return retval
def __getitem__(self, item):
if type(item) == IntType:
return self.__dict__[self._keyord[item]]
else:
return getattr(self, item)
def __len__(self):
return len(self._keyord)
def __nonzero__(self):
return 1
def _keys(self):
return filter(lambda x: x[0] != '_', self.__dict__.keys())
def _addItem(self, name, value, attrs = None):
if name in self._keyord:
if type(self.__dict__[name]) != ListType:
self.__dict__[name] = [self.__dict__[name]]
self.__dict__[name].append(value)
else:
self.__dict__[name] = value
self._keyord.append(name)
def _placeItem(self, name, value, pos, subpos = 0, attrs = None):
if subpos == 0 and type(self.__dict__[name]) != ListType:
self.__dict__[name] = value
else:
self.__dict__[name][subpos] = value
# only add to key order list if it does not already
# exist in list
if not (name in self._keyord):
if pos < len(x):
self._keyord[pos] = name
else:
self._keyord.append(name)
def _getItemAsList(self, name, default = []):
try:
d = self.__dict__[name]
except:
return default
if type(d) == ListType:
return d
return [d]
def __str__(self):
return anyType.__str__(self) + ": " + str(self._asdict())
def __repr__(self):
return self.__str__()
class structType(compoundType):
pass
class headerType(structType):
_validURIs = (NS.ENV,)
def __init__(self, data = None, typed = 1, attrs = None):
structType.__init__(self, data, "Header", typed, attrs)
class bodyType(structType):
_validURIs = (NS.ENV,)
def __init__(self, data = None, typed = 1, attrs = None):
structType.__init__(self, data, "Body", typed, attrs)
class arrayType(UserList.UserList, compoundType):
def __init__(self, data = None, name = None, attrs = None,
offset = 0, rank = None, asize = 0, elemsname = None):
if data:
if type(data) not in (ListType, TupleType):
raise Error, "Data must be a sequence"
UserList.UserList.__init__(self, data)
compoundType.__init__(self, data, name, 0, attrs)
self._elemsname = elemsname or "item"
if data == None:
self._rank = rank
# According to 5.4.2.2 in the SOAP spec, each element in a
# sparse array must have a position. _posstate keeps track of
# whether we've seen a position or not. It's possible values
# are:
# -1 No elements have been added, so the state is indeterminate
# 0 An element without a position has been added, so no
# elements can have positions
# 1 An element with a position has been added, so all elements
# must have positions
self._posstate = -1
self._full = 0
if asize in ('', None):
asize = '0'
self._dims = map (lambda x: int(x), str(asize).split(','))
self._dims.reverse() # It's easier to work with this way
self._poss = [0] * len(self._dims) # This will end up
# reversed too
for i in range(len(self._dims)):
if self._dims[i] < 0 or \
self._dims[i] == 0 and len(self._dims) > 1:
raise TypeError, "invalid Array dimensions"
if offset > 0:
self._poss[i] = offset % self._dims[i]
offset = int(offset / self._dims[i])
# Don't break out of the loop if offset is 0 so we test all the
# dimensions for > 0.
if offset:
raise AttributeError, "invalid Array offset"
a = [None] * self._dims[0]
for i in range(1, len(self._dims)):
b = []
for j in range(self._dims[i]):
b.append(copy.deepcopy(a))
a = b
self.data = a
def _aslist(self, item=None):
if item is not None:
return self.data[int(item)]
else:
return self.data
def _asdict(self, item=None, encoding=Config.dict_encoding):
if item is not None:
if type(item) in (UnicodeType,StringType):
item = item.encode(encoding)
return self.data[int(item)]
else:
retval = {}
def fun(x): retval[str(x).encode(encoding)] = self.data[x]
map( fun, range(len(self.data)) )
return retval
def __getitem__(self, item):
try:
return self.data[int(item)]
except ValueError:
return getattr(self, item)
def __len__(self):
return len(self.data)
def __nonzero__(self):
return 1
def __str__(self):
return anyType.__str__(self) + ": " + str(self._aslist())
def _keys(self):
return filter(lambda x: x[0] != '_', self.__dict__.keys())
def _addItem(self, name, value, attrs):
if self._full:
raise ValueError, "Array is full"
pos = attrs.get((NS.ENC, 'position'))
if pos != None:
if self._posstate == 0:
raise AttributeError, \
"all elements in a sparse Array must have a " \
"position attribute"
self._posstate = 1
try:
if pos[0] == '[' and pos[-1] == ']':
pos = map (lambda x: int(x), pos[1:-1].split(','))
pos.reverse()
if len(pos) == 1:
pos = pos[0]
curpos = [0] * len(self._dims)
for i in range(len(self._dims)):
curpos[i] = pos % self._dims[i]
pos = int(pos / self._dims[i])
if pos == 0:
break
if pos:
raise Exception
elif len(pos) != len(self._dims):
raise Exception
else:
for i in range(len(self._dims)):
if pos[i] >= self._dims[i]:
raise Exception
curpos = pos
else:
raise Exception
except:
raise AttributeError, \
"invalid Array element position %s" % str(pos)
else:
if self._posstate == 1:
raise AttributeError, \
"only elements in a sparse Array may have a " \
"position attribute"
self._posstate = 0
curpos = self._poss
a = self.data
for i in range(len(self._dims) - 1, 0, -1):
a = a[curpos[i]]
if curpos[0] >= len(a):
a += [None] * (len(a) - curpos[0] + 1)
a[curpos[0]] = value
if pos == None:
self._poss[0] += 1
for i in range(len(self._dims) - 1):
if self._poss[i] < self._dims[i]:
break
self._poss[i] = 0
self._poss[i + 1] += 1
if self._dims[-1] and self._poss[-1] >= self._dims[-1]:
#self._full = 1
#FIXME: why is this occuring?
pass
def _placeItem(self, name, value, pos, subpos, attrs = None):
curpos = [0] * len(self._dims)
for i in range(len(self._dims)):
if self._dims[i] == 0:
curpos[0] = pos
break
curpos[i] = pos % self._dims[i]
pos = int(pos / self._dims[i])
if pos == 0:
break
if self._dims[i] != 0 and pos:
raise Error, "array index out of range"
a = self.data
for i in range(len(self._dims) - 1, 0, -1):
a = a[curpos[i]]
if curpos[0] >= len(a):
a += [None] * (len(a) - curpos[0] + 1)
a[curpos[0]] = value
class typedArrayType(arrayType):
def __init__(self, data = None, name = None, typed = None, attrs = None,
offset = 0, rank = None, asize = 0, elemsname = None, complexType = 0):
arrayType.__init__(self, data, name, attrs, offset, rank, asize,
elemsname)
self._typed = 1
self._type = typed
self._complexType = complexType
class faultType(structType, Error):
def __init__(self, faultcode = "", faultstring = "", detail = None):
self.faultcode = faultcode
self.faultstring = faultstring
if detail != None:
self.detail = detail
structType.__init__(self, None, 0)
def _setDetail(self, detail = None):
if detail != None:
self.detail = detail
else:
try: del self.detail
except AttributeError: pass
def __repr__(self):
if getattr(self, 'detail', None) != None:
return "<Fault %s: %s: %s>" % (self.faultcode,
self.faultstring,
self.detail)
else:
return "<Fault %s: %s>" % (self.faultcode, self.faultstring)
__str__ = __repr__
def __call__(self):
return (self.faultcode, self.faultstring, self.detail)
class SOAPException(Exception):
def __init__(self, code="", string="", detail=None):
self.value = ("SOAPpy SOAP Exception", code, string, detail)
self.code = code
self.string = string
self.detail = detail
def __str__(self):
return repr(self.value)
class RequiredHeaderMismatch(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class MethodNotFound(Exception):
def __init__(self, value):
(val, detail) = value.split(":")
self.value = val
self.detail = detail
def __str__(self):
return repr(self.value, self.detail)
class AuthorizationFailed(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class MethodFailed(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
#######
# Convert complex SOAPpy objects to native python equivalents
#######
def simplify(object, level=0):
"""
Convert the SOAPpy objects and their contents to simple python types.
This function recursively converts the passed 'container' object,
and all public subobjects. (Private subobjects have names that
start with '_'.)
Conversions:
- faultType --> raise python exception
- arrayType --> array
- compoundType --> dictionary
"""
if level > 10:
return object
if isinstance( object, faultType ):
if object.faultstring == "Required Header Misunderstood":
raise RequiredHeaderMismatch(object.detail)
elif object.faultstring == "Method Not Found":
raise MethodNotFound(object.detail)
elif object.faultstring == "Authorization Failed":
raise AuthorizationFailed(object.detail)
elif object.faultstring == "Method Failed":
raise MethodFailed(object.detail)
else:
se = SOAPException(object.faultcode, object.faultstring,
object.detail)
raise se
elif isinstance( object, arrayType ):
data = object._aslist()
for k in range(len(data)):
data[k] = simplify(data[k], level=level+1)
return data
elif isinstance( object, compoundType ) or isinstance(object, structType):
data = object._asdict()
for k in data.keys():
if isPublic(k):
data[k] = simplify(data[k], level=level+1)
return data
elif type(object)==DictType:
for k in object.keys():
if isPublic(k):
object[k] = simplify(object[k])
return object
elif type(object)==list:
for k in range(len(object)):
object[k] = simplify(object[k])
return object
else:
return object
def simplify_contents(object, level=0):
"""
Convert the contents of SOAPpy objects to simple python types.
This function recursively converts the sub-objects contained in a
'container' object to simple python types.
Conversions:
- faultType --> raise python exception
- arrayType --> array
- compoundType --> dictionary
"""
if level>10: return object
if isinstance( object, faultType ):
for k in object._keys():
if isPublic(k):
setattr(object, k, simplify(object[k], level=level+1))
raise object
elif isinstance( object, arrayType ):
data = object._aslist()
for k in range(len(data)):
object[k] = simplify(data[k], level=level+1)
elif isinstance(object, structType):
data = object._asdict()
for k in data.keys():
if isPublic(k):
setattr(object, k, simplify(data[k], level=level+1))
elif isinstance( object, compoundType ) :
data = object._asdict()
for k in data.keys():
if isPublic(k):
object[k] = simplify(data[k], level=level+1)
elif type(object)==DictType:
for k in object.keys():
if isPublic(k):
object[k] = simplify(object[k])
elif type(object)==list:
for k in range(len(object)):
object[k] = simplify(object[k])
return object
|
pomahtuk/py-cooking
|
refs/heads/master
|
pycooking/proj_settings/locals.py
|
1
|
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
|
msebire/intellij-community
|
refs/heads/master
|
python/testData/highlighting/argumentList.py
|
83
|
def comp_args(a=2, (b, c)=(3, 4)):
return a, b, c
|
Azure/azure-sdk-for-python
|
refs/heads/sync-eng/common-js-nightly-docs-2-1768-ForTestPipeline
|
sdk/nspkg/azure-storage-nspkg/azure/storage/__init__.py
|
197
|
__path__ = __import__('pkgutil').extend_path(__path__, __name__)
|
keithroe/vtkoptix
|
refs/heads/master
|
Examples/Infovis/Python/kcore.py
|
27
|
#!/usr/bin/env python
from vtk import *
# generate a random graph
source = vtkRandomGraphSource()
source.SetNumberOfVertices(15000)
source.SetAllowSelfLoops(False)
source.SetEdgeProbability(0.003)
source.SetUseEdgeProbability(True)
source.AllowParallelEdgesOff()
# compute the kcore levels for every vertex in the graph
kcore = vtkKCoreDecomposition()
kcore.AddInputConnection(source.GetOutputPort())
kcore.SetOutputArrayName("kcore")
kcore.CheckInputGraphOn()
# generate x/y coordinates for vertices based on coreness
kcoreLayout = vtkKCoreLayout()
kcoreLayout.SetGraphConnection( kcore.GetOutputPort() )
kcoreLayout.SetCartesian(True)
kcoreLayout.SetEpsilon(0.2)
kcoreLayout.SetUnitRadius(1.0)
# assign coordinats for layout purposes based on the x/y coordinates
# that are created in kcoreLayout
kcoreAssignCoords = vtkAssignCoordinates()
kcoreAssignCoords.SetInputConnection(kcoreLayout.GetOutputPort())
kcoreAssignCoords.SetXCoordArrayName("coord_x")
kcoreAssignCoords.SetYCoordArrayName("coord_y")
kcoreAssignCoords.Update()
# draw it
view = vtkGraphLayoutView()
view.AddRepresentationFromInputConnection(kcoreAssignCoords.GetOutputPort())
view.SetVertexLabelArrayName("kcore")
view.SetVertexLabelVisibility(False)
view.SetVertexColorArrayName("kcore")
view.SetColorVertices(True)
# turn off edge visibility since it isn't useful for this view
view.SetEdgeVisibility(False)
# use the coordinates assigned by kcoreAssignCoords
view.SetLayoutStrategyToPassThrough()
theme = vtkViewTheme.CreateNeonTheme()
theme.SetLineWidth(1)
theme.SetPointSize(5)
view.ApplyViewTheme(theme)
theme.FastDelete()
view.GetRenderWindow().SetSize(600, 600)
view.ResetCamera()
view.Render()
view.GetInteractor().Start()
|
RitwikGupta/pattern
|
refs/heads/master
|
pattern/server/__init__.py
|
21
|
#### PATTERN | SERVER ##############################################################################
# -*- coding: utf-8 -*-
# Copyright (c) 2014 University of Antwerp, Belgium
# Copyright (c) 2014 St. Lucas University College of Art & Design, Antwerp.
# Author: Tom De Smedt <tom@organisms.be>
# License: BSD (see LICENSE.txt for details).
####################################################################################################
from __future__ import with_statement
import __main__
import sys
import os
import re
import time; _time=time
import atexit
import urllib
import hashlib
import base64
import random
import string
import textwrap
import types
import inspect
import threading
import subprocess
import tempfile
import itertools
import collections
import sqlite3 as sqlite
try: # Python 2.x vs 3.x
import htmlentitydefs
except:
from html import entities as htmlentitydefs
try: # Python 2.x vs 3.x
from cStringIO import StringIO
except:
from io import BytesIO as StringIO
try: # Python 2.x vs 3.x
import cPickle as pickle
except:
import pickle
try:
# Folder that contains pattern.server.
MODULE = os.path.dirname(os.path.realpath(__file__))
except:
MODULE = ""
try:
# Folder that contains the script that (indirectly) imports pattern.server.
# This is used as the default App.path.
f = inspect.currentframe()
f = inspect.getouterframes(f)[-1][0]
f = f.f_globals["__file__"]
SCRIPT = os.path.dirname(os.path.abspath(f))
except:
SCRIPT = os.getcwd()
try:
# Import from python2.x/site-packages/cherrypy
import cherrypy; cp=cherrypy
except:
# Import from pattern/server/cherrypy/cherrypy
# Bundled package is "hidden" in a non-package folder,
# otherwise it conflicts with site-packages/cherrypy.
sys.path.insert(0, os.path.join(MODULE, "cherrypy"))
import cherrypy; cp=cherrypy
try: import json # Python 2.6+
except:
try: from pattern.web import json # simplejson
except:
json = None
#### STRING FUNCTIONS ##############################################################################
RE_AMPERSAND = re.compile("\&(?!\#)") # & not followed by #
RE_UNICODE = re.compile(r'&(#?)(x|X?)(\w+);') # É
def encode_entities(string):
""" Encodes HTML entities in the given string ("<" => "<").
For example, to display "<em>hello</em>" in a browser,
we need to pass "<em>hello</em>" (otherwise "hello" in italic is displayed).
"""
if isinstance(string, basestring):
string = RE_AMPERSAND.sub("&", string)
string = string.replace("<", "<")
string = string.replace(">", ">")
string = string.replace('"', """)
string = string.replace("'", "'")
return string
def decode_entities(string):
""" Decodes HTML entities in the given string ("<" => "<").
"""
# http://snippets.dzone.com/posts/show/4569
def replace_entity(match):
hash, hex, name = match.group(1), match.group(2), match.group(3)
if hash == "#" or name.isdigit():
if hex == "":
return unichr(int(name)) # "&" => "&"
if hex.lower() == "x":
return unichr(int("0x" + name, 16)) # "&" = > "&"
else:
cp = htmlentitydefs.name2codepoint.get(name) # "&" => "&"
return unichr(cp) if cp else match.group() # "&foo;" => "&foo;"
if isinstance(string, basestring):
return RE_UNICODE.subn(replace_entity, string)[0]
return string
def encode_url(string):
return urllib.quote_plus(bytestring(string)) # "black/white" => "black%2Fwhite".
def decode_url(string):
return urllib.unquote_plus(string)
_TEMPORARY_FILES = []
def openable(string, **kwargs):
""" Returns the path to a temporary file that contains the given string.
"""
f = tempfile.NamedTemporaryFile(**kwargs)
f.write(string)
f.seek(0)
_TEMPORARY_FILES.append(f) # Delete when program terminates.
return f.name
#### INTROSPECTION #################################################################################
# URL paths are routed to handler functions, whose arguments represent URL path & query parameters.
# So we need to know what the arguments and keywords arguments are at runtime.
def define(f):
""" Returns (name, type, tuple, dict) for the given function,
with a tuple of argument names and a dict of keyword arguments.
If the given function has *args, returns True instead of tuple.
If the given function has **kwargs, returns True instead of dict.
"""
def undecorate(f): # "__closure__" in Py3.
while getattr(f, "func_closure", None):
f = [v.cell_contents for v in getattr(f, "func_closure")]
f = [v for v in f if callable(v)]
f = f[0] # We need guess (arg could also be a function).
return f
f = undecorate(f)
a = inspect.getargspec(f) # (names, *args, **kwargs, values)
i = len(a[0]) - len(a[3] or [])
x = tuple(a[0][:i])
y = dict(zip(a[0][i:], a[3] or []))
x = x if not a[1] else True
y = y if not a[2] else True
return (f.__name__, type(f), x, y)
#### DATABASE ######################################################################################
#--- DATABASE --------------------------------------------------------------------------------------
# A simple wrapper for SQLite and MySQL databases.
# Database type:
SQLITE, MYSQL = "sqlite", "mysql"
# Database host:
LOCALHOST = "127.0.0.1"
class Row(dict):
def __init__(self, cursor, row):
""" Row as dictionary.
"""
d = cursor.description
dict.__init__(self, ((d[i][0], v) for i, v in enumerate(row)))
def __getattr__(self, k):
return self[k] # Row.[field]
class DatabaseError(Exception):
pass
class Database(object):
def __init__(self, name, **kwargs):
""" Creates and opens the SQLite database with the given name.
"""
k = kwargs.get
self._name = name
self._type = k("type", SQLITE)
self._host = k("host", LOCALHOST)
self._port = k("port", 3306)
self._user = k("user", (k("username", "root"), k("password", "")))
self._factory = k("factory", Row)
self._timeout = k("timeout", 10)
self._connection = None
if kwargs.get("connect", True):
self.connect()
if kwargs.get("schema"):
# Database(schema="create table if not exists" `...`)
# initializes the database table and index structure.
for q in kwargs["schema"].split(";"):
self.execute(q+";", commit=False)
self.commit()
@property
def name(self):
""" Yields the database name (for SQLITE, file path).
"""
return self._name
@property
def type(self):
""" Yields the database type (SQLITE or MYSQL).
"""
return self._type
@property
def host(self):
""" Yields the database server host (MYSQL).
"""
return self._host
@property
def port(self):
""" Yields the database server port (MYSQL).
"""
return self._port
@property
def connection(self):
""" Yields the sqlite3.Connection object.
"""
return self._connection
def connect(self):
if self._type == SQLITE:
self._connection = sqlite.connect(self._name, timeout=self._timeout)
self._connection.row_factory = self._factory
if self._type == MYSQL:
import MySQLdb
self._connection = MySQLdb.connect(
host = self._host,
port = self._port,
user = self._user[0],
passwd = self._user[1],
connect_timeout = self._timeout,
use_unicode = True,
charset = "utf8"
)
self._connection.row_factory = self._factory
self._connection.cursor().execute("create database if not exists `%s`" % self._name)
self._connection.cursor().execute("use `%s`" % self._name)
def disconnect(self):
if self._connection is not None:
self._connection.commit()
self._connection.close()
self._connection = None
def execute(self, sql, values=(), first=False, commit=True):
""" Executes the given SQL query string and returns an iterator of rows.
With first=True, returns the first row.
"""
try:
r = self._connection.cursor().execute(sql, values)
if commit:
self._connection.commit()
except Exception as e:
# "OperationalError: database is locked" means that
# SQLite is receiving too many concurrent write ops.
# A write operation locks the entire database;
# other threaded connections may time out waiting.
# In this case you can raise Database(timeout=10),
# lower Application.run(threads=10) or switch to MySQL or Redis.
self._connection.rollback()
raise DatabaseError(str(e))
return r.fetchone() if first else r
def commit(self):
""" Commits changes (pending insert/update/delete queries).
"""
self._connection.commit()
def rollback(self):
""" Discard changes since the last commit.
"""
self._connection.rollback()
def __call__(self, *args, **kwargs):
return self.execute(*args, **kwargs)
def __repr__(self):
return "Database(name=%s)" % repr(self._name)
def __del__(self):
try:
self.disconnect()
except:
pass
@property
def batch(self):
return Database._batch.setdefault(self._name, DatabaseTransaction(self._name, **self.__dict__))
_batch = {} # Shared across all instances.
#--- DATABASE TRANSACTION BUFFER -------------------------------------------------------------------
class DatabaseTransaction(Database):
def __init__(self, name, **kwargs):
""" Database.batch.execute() stores given the SQL query in RAM memory, across threads.
Database.batch.commit() commits all buffered queries.
This can be combined with @app.task() to periodically write batches to the database
(instead of writing on each request).
"""
Database.__init__(self, name, **dict(kwargs, connect=False))
self._queue = []
def execute(self, sql, values=()):
self._queue.append((sql, values))
def commit(self):
q, self._queue = self._queue, []
if q:
try:
Database.connect(self) # Connect in this thread.
for sql, v in q:
Database.execute(self, sql, v, commit=False)
Database.commit(self)
except DatabaseError as e:
Database.rollback(self) # Data in q will be lost.
raise e
def rollback(self):
self._queue = []
def __len__(self):
return len(self._queue)
def __repr__(self):
return "DatabaseTransaction(name=%s)" % repr(self._name)
@property
def batch(self):
raise AttributeError
#---------------------------------------------------------------------------------------------------
# MySQL on Mac OS X installation notes:
# 1) Download Sequel Pro: http://www.sequelpro.com (GUI).
# 2) Download MySQL .dmg: http://dev.mysql.com/downloads/mysql/ (for 64-bit Python, 64-bit MySQL).
# 3) Install the .pkg, startup item and preferences pane.
# 4) Start server in preferences pane (user: "root", password: "").
# 5) Command line: open -a "TextEdit" .bash_profile =>
# 6) export PATH=~/bin:/usr/local/bin:/usr/local/mysql/bin:$PATH
# 7) Command line: sudo pip install MySQL-python
# 8) Command line: sudo ln -s /usr/local/mysql/lib/libmysqlclient.xx.dylib
# /usr/lib/libmysqlclient.xx.dylib
# 9) import MySQLdb
#### RATE LIMITING #################################################################################
# With @app.route(path, limit=True), the decorated URL path handler function calls RateLimit().
# For performance, rate limiting uses a RAM cache of api keys + the time of the last request.
# This will not work with multi-processing, since each process gets its own RAM.
_RATELIMIT_CACHE = {} # RAM cache of request counts.
_RATELIMIT_LOCK = threading.RLock()
SECOND, MINUTE, HOUR, DAY = 1., 60., 60*60., 60*60*24.
class RateLimitError(Exception):
pass
class RateLimitExceeded(RateLimitError):
pass
class RateLimitForbidden(RateLimitError):
pass
class RateLimit(Database):
def __init__(self, name="rate.db", **kwargs):
""" A database for rate limiting API requests.
It manages a table with (key, path, limit, time) entries.
It grants each key a rate (number of requests / time) for a URL path.
It keeps track of the number of requests in local memory (i.e., RAM).
If RateLimit()() is called with the optional limit and time arguments,
unknown keys are temporarily granted this rate.
"""
Database.__init__(self, name, **dict(kwargs, factory=None, schema=(
"create table if not exists `rate` ("
"`key` text," # API key (e.g., ?key="1234").
"`path` text," # API URL path (e.g., "/api/1/").
"`limit` integer," # Maximum number of requests.
"`time` float" # Time frame.
");"
"create index if not exists `rate1` on rate(key);"
"create index if not exists `rate2` on rate(path);")
))
self.load()
@property
def cache(self):
return _RATELIMIT_CACHE
@property
def lock(self):
return _RATELIMIT_LOCK
@property
def key(self, pairs=("rA","aZ","gQ","hH","hG","aR","DD")):
""" Yields a new random key ("ZjNmYTc4ZDk0MTkyYk...").
"""
k = str(random.getrandbits(256))
k = hashlib.sha256(k).hexdigest()
k = base64.b64encode(k, random.choice(pairs)).rstrip('==')
return k
def reset(self):
self.cache.clear()
self.load()
def load(self):
""" For performance, rate limiting is handled in memory (i.e., RAM).
Loads the stored rate limits in memory (100,000 records ~= 5MB RAM).
"""
with self.lock:
if not self.cache:
# Lock concurrent threads when modifying cache.
for r in self.execute("select * from `rate`;"):
self.cache[(r[0], r[1])] = (0, r[2], r[3], _time.time())
self._rowcount = len(self.cache)
def set(self, key, path="/", limit=100, time=HOUR):
""" Sets the rate for the given key and path,
where limit is the maximum number of requests in the given time (e.g., 100/hour).
"""
# Update database.
p = "/" + path.strip("/")
q1 = "delete from `rate` where key=? and path=?;"
q2 = "insert into `rate` values (?, ?, ?, ?);"
self.execute(q1, (key, p), commit=False)
self.execute(q2, (key, p, limit, time))
# Update cache.
with self.lock:
self.cache[(key, p)] = (0, limit, time, _time.time())
self._rowcount += 1
return (key, path, limit, time)
def get(self, key, path="/"):
""" Returns the rate for the given key and path (or None).
"""
p = "/" + path.strip("/")
q = "select * from `rate` where key=? and path=?;"
return self.execute(q, (key, p), first=True, commit=False)
def __setitem__(self, k, v): # (key, path), (limit, time)
return self.set(key, path, limit, time)
def __getitem__(self, k): # (key, path)
return self.get(*k)
def __contains__(self, key, path="%"):
""" Returns True if the given key exists (for the given path).
"""
q = "select * from `rate` where key=? and path like ?;"
return self.execute(q, (key, path), first=True, commit=False) is not None
def __call__(self, key, path="/", limit=None, time=None, reset=100000):
""" Increases the (cached) request count by 1 for the given key and path.
If the request count exceeds its limit, raises RateLimitExceeded.
If the optional limit and time are given, unknown keys (!= None)
are given this rate limit - as long as the cache exists in memory.
Otherwise a RateLimitForbidden is raised.
"""
with self.lock:
t = _time.time()
p = "/" + path.strip("/")
r = self.cache.get((key, p))
# Reset the cache if too large (e.g., 1M+ IP addresses).
if reset and reset < len(self.cache) and reset > self._rowcount:
self.reset()
# Unknown key (apply default limit / time rate).
if r is None and key is not None and limit is not None and time is not None:
self.cache[(key, p)] = r = (0, limit, time, t)
# Unknown key (apply root key, if any).
if r is None and p != "/":
self.cache.get((key, "/"))
if r is None:
raise RateLimitForbidden
# Limit reached within time frame (raise error).
elif r[0] >= r[1] and r[2] > t - r[3]:
raise RateLimitExceeded
# Limit reached out of time frame (reset count).
elif r[0] >= r[1]:
self.cache[(key, p)] = (1, r[1], r[2], t)
# Limit not reached (increment count).
elif r[0] < r[1]:
self.cache[(key, p)] = (r[0] + 1, r[1], r[2], r[3])
#print(self.cache.get((key, path)))
#### ROUTER ########################################################################################
# The @app.route(path) decorator registers each URL path handler in Application.router.
class RouteError(Exception):
pass
class Router(dict):
def __init__(self):
""" A router resolves URL paths to handler functions.
"""
pass
def __setitem__(self, path, handler):
""" Defines the handler function for the given URL path.
The path is a slash-formatted string (e.g., "/api/1/en/parser").
The handler is a function that takes
arguments (path) and keyword arguments (query data).
"""
p = "/" + path.strip("/")
p = p.lower()
p = p.encode("utf8") if isinstance(p, unicode) else p
# Store the handler + its argument names (tuple(args), dict(kwargs)),
# so that we can call this function without (all) keyword arguments,
# if it does not take (all) query data.
if callable(handler):
dict.__setitem__(self, p, (handler, define(handler)[2:]))
else:
dict.__setitem__(self, p, (handler, ((), {})))
def __call__(self, path, **data):
""" Calls the handler function for the given URL path.
If no handler is found, raises a RouteError.
If a base handler is found (e.g., "/api" for "/api/1/en"),
calls the handler with arguments (e.g., handler("1", "en")).
"""
if not isinstance(path, tuple):
path = path.strip("/").split("/") # ["api", "1", "en"]
n = len(path)
for i in xrange(n + 1):
p0 = "/" + "/".join(path[:n-i])
p0 = p0.lower() # "/api/1/en", "/api/1", "/api", ...
p1 = path[n-i:] # [], ["en"], ["1", "en"], ...
if p0 in self:
(handler, (args, kwargs)) = self[p0]
i = len(p1)
j = len(args) if args is not True else i
# Handler takes 1 argument, 0 given (pass None for convenience).
if i == 0 and j == 1:
p1 = (None,); i=j
# Handler does not take path.
if i != j:
continue
# Handler is a string / dict.
if not callable(handler):
return handler
# Handler takes path, but no query data.
if not kwargs:
return handler(*p1)
# Handler takes path and all query data.
if kwargs is True:
return handler(*p1, **data)
# Handler takes path and some query data.
return handler(*p1, **dict((k, v) for k, v in data.items() if k in kwargs))
# No handler.
raise RouteError
#### APPLICATION ###################################################################################
#--- APPLICATION ERRORS & REQUESTS -----------------------------------------------------------------
class HTTPRequest(object):
def __init__(self, app, ip, path="/", method="get", data={}, headers={}):
""" A HTTP request object with metadata returned from app.request.
"""
self.app = app
self.ip = ip
self.path = "/" + path.strip("/")
self.method = method.lower()
self.data = dict(data)
self.headers = dict(headers)
def __repr__(self):
return "HTTPRequest(ip=%s, path=%s)" % repr(self.ip, self.path)
class HTTPRedirect(Exception):
def __init__(self, url, code=303):
""" A HTTP redirect raised in an @app.route() handler.
"""
self.url = url
self.code = code
def __repr__(self):
return "HTTPRedirect(url=%s)" % repr(self.url)
class HTTPError(Exception):
def __init__(self, status="", message="", traceback=""):
""" A HTTP error raised in an @app.route() handler + passed to @app.error().
"""
self.code = int(status.split(" ")[0])
self.status = status
self.message = message
self.traceback = traceback or ""
def __repr__(self):
return "HTTPError(status=%s)" % repr(self.status)
def _HTTPErrorSubclass(status):
return type("HTTP%sError" % status.split(" ")[0], (HTTPError,), {'__init__': \
lambda self, message="", traceback="": HTTPError.__init__(self, status, message, traceback)})
HTTP200OK = _HTTPErrorSubclass("200 OK")
HTTP401Authentication = _HTTPErrorSubclass("401 Authentication")
HTTP403Forbidden = _HTTPErrorSubclass("403 Forbidden")
HTTP404NotFound = _HTTPErrorSubclass("404 Not Found")
HTTP429TooManyRequests = _HTTPErrorSubclass("429 Too Many Requests")
HTTP500InternalServerError = _HTTPErrorSubclass("500 InternalServerError")
HTTP503ServiceUnavailable = _HTTPErrorSubclass("503 ServiceUnavailable")
#--- APPLICATION THREAD-SAFE DATA ------------------------------------------------------------------
# With a multi-threaded server, each thread requires its own local data (i.e., database connection).
# Local data can be initialized with @app.thread(START):
#
# >>> @app.thread(START)
# >>> def db():
# >>> g.db = Database()
# >>>
# >>> @app.route("/")
# >>> def index(*path, db=None):
# >>> print(db) # = Database object.
#
# The thread-safe database connection can then be retrieved from
# app.thread.db, g.db, or as a keyword argument of a URL handler.
class localdict(dict):
def __init__(self, data=None, **kwargs):
""" Thread-safe dictionary.
"""
self.__dict__["_data"] = data if data != None else threading.local()
self.__dict__.update(kwargs) # Attributes are global in every thread.
def items(self):
return self._data.__dict__.items()
def keys(self):
return self._data.__dict__.keys()
def values(self):
return self._data.__dict__.values()
def update(self, d):
return self._data.__dict__.update(d)
def clear(self):
return self._data.__dict__.clear()
def pop(self, *kv):
return self._data.__dict__.pop(*kv)
def setdefault(self, k, v=None):
return self._data.__dict__.setdefault(k, v)
def set(self, k, v):
return setattr(self._data, k, v)
def get(self, k, default=None):
return getattr(self._data, k, default)
def __delitem__(self, k):
return delattr(self._data, k)
def __getitem__(self, k):
return getattr(self._data, k)
def __setitem__(self, k, v):
return setattr(self._data, k, v)
def __delattr__(self, k):
return delattr(self._data, k)
def __getattr__(self, k):
return getattr(self._data, k)
def __setattr__(self, k, v):
return setattr(self._data, k, v)
def __len__(self):
return len(self._data.__dict__)
def __iter__(self):
return iter(self._data.__dict__)
def __contains__(self, k):
return k in self._data.__dict__
def __str__(self):
return repr(self)
def __repr__(self):
return "localdict({%s})" % ", ".join(
("%s: %s" % (repr(k), repr(v)) for k, v in self.items()))
# Global alias for app.thread (Flask-style):
g = localdict(data=cp.thread_data)
def threadsafe(function):
""" The @threadsafe decorator ensures that no two threads execute the function simultaneously.
"""
# In some cases, global data must be available across all threads (e.g., rate limits).
# Atomic operations like dict.get() or list.append() (= single execution step) are thread-safe,
# but some operations like dict[k] += 1 are not, and require a lock.
# http://effbot.org/zone/thread-synchronization.htm
#
# >>> count = defaultdict(int)
# >>> @threadsafe
# >>> def inc(k):
# >>> count[k] += 1
#
lock = threading.RLock()
def decorator(*args, **kwargs):
with lock:
v = function(*args, **kwargs)
return v
return decorator
#--- APPLICATION -----------------------------------------------------------------------------------
# With Apache + mod_wsgi, the Application instance must be named "application".
# Server host.
LOCALHOST = "127.0.0.1"
INTRANET = "0.0.0.0"
# Server thread handlers.
START = "start"
STOP = "stop"
class ApplicationError(Exception):
pass
class Application(object):
def __init__(self, name=None, path=SCRIPT, static="./static", rate="rate.db"):
""" A web app served by a WSGI-server that starts with App.run().
By default, the app is served from the folder of the script that imports pattern.server.
By default, static content is served from the given subfolder.
@App.route(path) defines a URL path handler.
@App.error(code) defines a HTTP error handler.
"""
# RateLimit db resides in app folder:
rate = os.path.join(path, rate)
self._name = name # App name.
self._path = path # App path.
self._host = None # Server host, see App.run().
self._port = None # Server port, see App.run().
self._app = None # CherryPy Application object.
self._up = False # True if server is up & running.
self._cache = {} # Memoize cache.
self._cached = 1000 # Memoize cache size.
self._static = static # Static content folder.
self._rate = rate # RateLimit db name, see also App.route(limit=True).
self.router = Router() # Router object, maps URL paths to handlers.
self.thread = App.Thread() # Thread-safe dictionary.
os.chdir(path)
@property
def name(self):
return self._name
@property
def host(self):
return self._host
@property
def port(self):
return self._port
@property
def up(self):
return self._up
running = up
@property
def path(self):
""" Yields the absolute path to the folder containing the app.
"""
return self._path
@property
def static(self):
""" Yields the absolute path to the folder with static content.
"""
return os.path.join(self._path, self._static)
@property
def session(self):
""" Yields the dictionary of session data.
"""
return cp.session
@property
def request(self):
""" Yields a request object with metadata
(IP address, request path, query data and headers).
"""
r = cp.request # Deep copy (ensures garbage colletion).
return HTTPRequest(
app = self,
ip = r.remote.ip,
path = r.path_info,
method = r.method,
data = r.params,
headers = r.headers)
@property
def response(self):
""" Yields a response object with metadata
(status, headers).
"""
return cp.response
@property
def elapsed(self):
""" Yields the elapsed time since the start of the request.
"""
return time.time() - cp.request.time # See also _request_time().
def _cast(self, v):
""" Returns the given value as a string (used to cast handler functions).
If the value is a dictionary, returns a JSON-string.
If the value is a generator, starts a stream.
If the value is an iterable, joins the values with a space.
"""
if isinstance(v, basestring):
return v
if isinstance(v, cp.lib.file_generator): # serve_file()
return v
if isinstance(v, dict):
cp.response.headers["Content-Type"] = "application/json; charset=utf-8"
cp.response.headers["Access-Control-Allow-Origin"] = "*" # CORS
return json.dumps(v)
if isinstance(v, types.GeneratorType):
cp.response.stream = True
return iter(self._cast(v) for v in v)
if isinstance(v, (list, tuple, set)):
return " ".join(self._cast(v) for v in v)
if isinstance(v, HTTPError):
raise cp.HTTPError(v.status, message=v.message)
if v is None:
return ""
try: # (bool, int, float, object.__unicode__)
return unicode(v)
except:
return encode_entities(repr(v))
@cp.expose
def default(self, *path, **data):
""" Resolves URL paths to handler functions and casts the return value.
"""
# If there is an app.thread.db connection,
# pass it as a keyword argument named "db".
# If there is a query parameter named "db",
# it is overwritten (the reverse is not safe).
for k, v in g.items():
data[k] = v
# Call the handler function for the given path.
# Call @app.error(404) if no handler is found.
# Call @app.error(403) if rate limit forbidden (= no API key).
# Call @app.error(429) if rate limit exceeded.
# Call @app.error(503) if a database error occurs.
try:
v = self.router(path, **data)
except RouteError:
raise cp.HTTPError("404 Not Found")
except RateLimitForbidden:
raise cp.HTTPError("403 Forbidden")
except RateLimitExceeded:
raise cp.HTTPError("429 Too Many Requests")
except DatabaseError as e:
raise cp.HTTPError("503 Service Unavailable", message=str(e))
except HTTPRedirect as e:
raise cp.HTTPRedirect(e.url)
except HTTPError as e:
raise cp.HTTPError(e.status, message=e.message)
v = self._cast(v)
#print(self.elapsed)
return v
def unlimited(self, v=None):
self._ratelimited = False # See App.route() below.
return v
def route(self, path, limit=False, time=None, key=lambda data: data.get("key"), reset=100000):
""" The @app.route(path) decorator defines the handler function for the given path.
The function can take arguments (path) and keyword arguments (query data), e.g.,
if no handler exists for URL "/api/1/en", but a handler exists for URL "/api/1",
this handler will be called with 1 argument: "en".
It returns a string, a generator or a dictionary (which is parsed to a JSON-string).
"""
_a = (key, limit, time, reset) # Avoid ambiguity with key=lambda inside define().
def decorator(handler):
def ratelimited(handler):
# With @app.route(path, limit=True), rate limiting is applied.
# The handler function is wrapped in a function that first calls
# RateLimit()(key, path, limit, time) before calling the handler.
# By default, a query parameter "key" is expected.
# If the key is known, apply rate limiting (429 Too Many Requests).
# If the key is unknown or None, deny access (403 Forbidden).
# If the key is unknown and a default limit and time are given,
# add the key and grant the given credentials, e.g.:
# @app.route(path, limit=100, time=HOUR, key=lambda data: app.request.ip).
# This grants each IP-address a 100 requests per hour.
@self.thread(START)
def connect():
g.rate = RateLimit(name=self._rate)
def wrapper(*args, **kwargs):
self = cp.request.app.root
self._ratelimited = True
v = handler(*args, **kwargs)
if self._ratelimited: # App.unlimited() in handler() sets it to False.
self.rate(
key = _a[0](cp.request.params),
path = "/" + cp.request.path_info.strip("/"),
limit = _a[1], # Default limit for unknown keys.
time = _a[2], # Default time for unknown keys.
reset = _a[3] # Threshold for clearing cache.
)
return v
return wrapper
if limit is True or (limit is not False and limit is not None and time is not None):
handler = ratelimited(handler)
self.router[path] = handler # Register the handler.
return handler
return decorator
def error(self, code="*"):
""" The @app.error(code) decorator defines the handler function for the given HTTP error.
The function takes a HTTPError object and returns a string.
"""
def decorator(handler):
# CherryPy error handlers take keyword arguments.
# Wrap as a HTTPError and pass it to the handler.
def wrapper(status="", message="", traceback="", version=""):
# Avoid CherryPy bug "ValueError: status message was not supplied":
v = handler(HTTPError(status, message, traceback))
v = self._cast(v) if not isinstance(v, HTTPError) else repr(v)
return v
# app.error("*") catches all error codes.
if code in ("*", None):
cp.config.update({"error_page.default": wrapper})
# app.error(404) catches 404 error codes.
elif isinstance(code, (int, basestring)):
cp.config.update({"error_page.%s" % code: wrapper})
# app.error((404, 500)) catches 404 + 500 error codes.
elif isinstance(code, (tuple, list)):
for x in code:
cp.config.update({"error_page.%s" % x: wrapper})
return handler
return decorator
def view(self, template, cached=True):
""" The @app.view(template) decorator defines a template to format the handler function.
The function returns a dict of keyword arguments for Template.render().
"""
def decorator(handler):
def wrapper(*args, **kwargs):
if not hasattr(template, "render"): # bottle.py templates have render() too.
t = Template(template, root=self.static, cached=cached)
else:
t = template
v = handler(*args, **kwargs)
if isinstance(v, dict):
return t.render(**v) # {kwargs}
return t.render(*v) # (globals(), locals(), {kwargs})
return wrapper
return decorator
class Thread(localdict):
""" The @app.thread(event) decorator can be used to initialize thread-safe data.
Get data (e.g., a database connection) with app.thread.[name] or g.[name].
"""
def __init__(self):
localdict.__init__(self, data=cp.thread_data, handlers=set())
def __call__(self, event=START): # START / STOP
def decorator(handler):
def wrapper(id):
return handler()
# If @app.thread() is called twice for
# the same handler, register it only once.
if not (event, handler) in self.handlers:
self.handlers.add((event, handler))
cp.engine.subscribe(event + "_thread", wrapper)
return handler
return decorator
@property
def rate(self, name="rate"):
""" Yields a thread-safe connection to the app's RateLimit db.
"""
if not hasattr(g, name): setattr(g, name, RateLimit(name=self._rate))
return getattr(g, name)
def bind(self, name="db"):
""" The @app.bind(name) decorator binds the given function to a keyword argument
that can be used with @app.route() handlers.
The return value is stored thread-safe in app.thread.[name] & g.[name].
The return value is available in handlers as a keyword argument [name].
"""
# This is useful for multi-threaded database connections:
# >>>
# >>> @app.bind("db")
# >>> def db():
# >>> return Database("products.db")
# >>>
# >>> @app.route("/products")
# >>> def products(id, db=None):
# >>> return db.execute("select * from products where id=?", (id,))
def decorator(handler):
return self.thread(START)(lambda: setattr(g, name, handler()))
return decorator
@property
def cached(self):
""" The @app.cached decorator caches the return value of the given handler.
This is useful if the handler is computationally expensive,
and often called with the same arguments (e.g., recursion).
"""
def decorator(handler):
def wrapper(*args, **kwargs):
# Cache return value for given arguments
# (except db & rate Connection objects).
kw = dict(kwargs)
kw.pop("db", None)
kw.pop("rate", None)
k = (handler, pickle.dumps(args), pickle.dumps(kw))
if len(self._cache) >= self._cached:
self._cache.clear()
if k not in self._cache:
self._cache[k] = handler(*args, **kwargs)
return self._cache[k]
return wrapper
return decorator
memoize = cached
def task(self, interval=MINUTE):
""" The @app.task(interval) decorator will call the given function repeatedly (in a thread).
For example, this can be used to commit a Database.batch periodically,
instead of executing and committing to a Database during each request.
"""
def decorator(handler):
_, _, args, kwargs = define(handler)
def wrapper():
# Bind data from @app.thread(START) or @app.set().
m = cp.process.plugins.ThreadManager(cp.engine)
m.acquire_thread()
# If there is an app.thread.db connection,
# pass it as a keyword argument named "db".
return handler(**dict((k, v) for k, v in g.items() if k in kwargs))
p = cp.process.plugins.BackgroundTask(interval, wrapper)
p.start()
return handler
return decorator
def redirect(path, code=303):
""" Redirects the server to another route handler path
(or to another server for absolute URL's).
"""
raise HTTPRedirect(path, int(code))
def run(self, host=LOCALHOST, port=8080, threads=30, queue=20, timeout=10, sessions=False, embedded=False, ssl=None, debug=True):
""" Starts the server.
Static content (e.g., "g/img.jpg") is served from the App.static subfolder (e.g., "static/g").
With threads=10, the server can handle up to 10 concurrent requests.
With queue=10, the server will queue up to 10 waiting requests.
With embedded=True, runs under Apache mod_wsgi.
With ssl=(key, certificate), runs under https:// (see certificate() function).
With debug=False, starts a production server.
"""
# Do nothing if the app is running.
if self._up:
return
self._host = str(host)
self._port = int(port)
self._up = True
# Production environment disables errors.
if debug is False:
cp.config.update({"environment": "production"})
# Embedded environment (mod_wsgi) disables errors & signal handlers.
if embedded is True:
cp.config.update({"environment": "embedded"})
# Global configuration.
# If more concurrent requests are made than can be queued / handled,
# the server will time out and a "connection reset by peer" occurs.
# Note: SQLite cannot handle many concurrent writes (e.g., UPDATE).
else:
cp.config.update({
"server.socket_host" : self._host,
"server.socket_port" : self._port,
"server.socket_timeout" : max(1, timeout),
"server.socket_queue_size" : max(1, queue),
"server.thread_pool" : max(1, threads),
"server.thread_pool_max" : -1
})
# Secure SSL (https://).
if ssl:
cp.config.update({
"server.ssl_module" : "builtin",
"server.ssl_private_key" : ssl[0] if os.path.exists(ssl[0]) else openable(ssl[0]),
"server.ssl_certificate" : ssl[1] if os.path.exists(ssl[1]) else openable(ssl[1])
})
# Static content is served from the /static subfolder,
# e.g., <img src="g/cat.jpg" /> refers to "/static/g/cat.jpg".
self._app = cp.tree.mount(self, "/",
config={"/": {
"tools.staticdir.on" : self.static is not None,
"tools.staticdir.dir" : self.static,
"tools.sessions.on" : sessions
}})
# Static content can include favicon.ico
self.favicon_ico = cp.tools.staticfile.handler(
os.path.join(self.static, "favicon.ico")
)
# Relative root = project path.
os.chdir(self._path)
# With mod_wsgi, stdout is restriced.
if embedded:
sys.stdout = sys.stderr
else:
atexit.register(self.stop)
cp.engine.start()
cp.engine.block()
def stop(self):
""" Stops the server (registered with atexit).
"""
try:
atexit._exithandlers.remove((self.stop, (), {}))
except:
pass
cp.engine.exit()
sys.stdout = sys.__stdout__
self._host = None
self._port = None
self._app = None
self._up = False
def __call__(self, *args, **kwargs):
# Called when deployed with mod_wsgi.
if self._app is not None:
return self._app(*args, **kwargs)
raise ApplicationError("application not running")
App = Application
#### CERTIFICATE ###################################################################################
# A certificate can be used to secure a web app (i.e., a https:// connection).
# A certificate confirms the owner's identity, as verified by a signer.
# This signer can be trusted third-party (e.g., Comodo) or self-signed.
# The certificate() function yields a free, self-signed certificate.
# Visitors will get a browser warning that the certificate is not signed by a trusted third party.
def certificate(host=LOCALHOST, country=None, state=None, city=None, company=None, contact=None, **kwargs):
""" Returns a (private key, certificate)-tuple for a secure SSL-encrypted https server.
Only works on Unix with OpenSSL.
"""
# Generate private key.
# > openssl genrsa 2048 -out ssl.key
s = subprocess.PIPE
p = ("openssl", "genrsa", "2048")
p = subprocess.Popen(p, stdin=s, stdout=s, stderr=s)
k = kwargs.get("key") or p.communicate()[0]
f = tempfile.NamedTemporaryFile(delete=False)
f.write(k)
f.close()
# Generate certificate.
# > openssl req -new -x509 -days 365 -key ssl.key -out ssl.crt
p = ("openssl", "req", "-new", "-x509", "-days", "365", "-key", f.name)
p = subprocess.Popen(p, stdin=s, stdout=s, stderr=s)
x = p.communicate("%s\n%s\n%s\n%s\n.\n%s\n%s\n" % (
country or ".", # BE
state or ".", # Antwerp
city or ".", # Antwerp
company or ".", # CLiPS
host or LOCALHOST, # Tom De Smedt
contact or "." # tom@organisms.be
))[0]
os.unlink(f.name)
return (k, x)
#k, x = certificate(country="BE", state="Antwerp", company="CLiPS", contact="tom@organisms.be")
#open("ssl.key", "w").write(k)
#open("ssl.crt", "w").write(x)
#app.run(ssl=("ssl.key", "ssl.crt"))
#---------------------------------------------------------------------------------------------------
# Apache + mod_wsgi installation notes (thanks to Frederik De Bleser).
# The APP placeholder is the URL of your app, e.g., pattern.emrg.be.
#
# 1) Create a DNS-record for APP, which maps the url to your server's IP-address.
#
# 2) sudo apt-get install apache2
# sudo apt-get install libapache2-mod-wsgi
#
# 3) sudo mkdir -p /www/APP/static
# sudo mkdir -p /www/APP/log
#
# 4) sudo nano /etc/apache2/sites-available/APP
# > <VirtualHost *:80>
# > ServerName APP
# > DocumentRoot /www/APP/static
# > CustomLog /www/APP/logs/access.log combined
# > ErrorLog /www/APP/logs/error.log
# > WSGIScriptAlias / /www/APP/app.py
# > WSGIDaemonProcess APP processes=1 threads=x
# > WSGIProcessGroup APP
# > </VirtualHost>
#
# 5) sudo nano /www/APP/app.py
# > from pattern.server import App
# > from pattern.text import sentiment
# >
# > app = application = App() # mod_wsgi app must be available as "application"!
# >
# > @app.route("/api/1/sentiment", limit=100, time=HOUR, key=lambda data: app.request.ip)
# > def api_sentiment(q=None, lang="en"):
# > return {"polarity": sentiment(q, language=lang)[0]}
# >
# > app.run(embedded=True)
#
# 6) sudo a2ensite APP
# sudo apache2ctl configtest
# sudo service apache2 restart
#
# 7) Try: http://APP/api/1/sentiment?q=marvelously+extravagant&lang=en
#---------------------------------------------------------------------------------------------------
def redirect(path, code=303):
""" Redirects the server to another route handler path
(or to another server for absolute URL's).
"""
raise HTTPRedirect(path, int(code))
#---------------------------------------------------------------------------------------------------
def static(path, root=None, mimetype=None):
""" Returns the contents of the file at the given absolute path.
To serve relative paths from the app folder, use root=app.path.
"""
p = os.path.join(root or "", path)
p = os.path.realpath(p)
return cp.lib.static.serve_file(p, content_type=mimetype)
#---------------------------------------------------------------------------------------------------
# http://cherrypy.readthedocs.org/en/latest/progguide/extending/customtools.html
def _register(event, handler):
""" Registers the given event handler (e.g., "on_end_request").
"""
k = handler.__name__
setattr(cp.tools, k, cp.Tool(event, handler))
cp.config.update({"tools.%s.on" % k: True})
def _request_start():
# Register request start time.
cp.request.time = time.time()
def _request_end():
#print(time.time() - cp.request.time)
pass
_register("on_start_resource", _request_start)
_register("on_end_request", _request_end)
#---------------------------------------------------------------------------------------------------
# The error template used when the error handler itself raises an error.
cp._cperror._HTTPErrorTemplate = \
"<h1>%(status)s</h1\n><p>%(message)s</p>\n<pre>%(traceback)s</pre>"
#### TEMPLATE ######################################################################################
# A template is a HTML-file with placeholders, which can be variable names or Python source code.
# Based on: http://davidbau.com/archives/2011/09/09/python_templating_with_stringfunction.html
_MARKUP = [
r"\$[_a-z][\w]*", # $var
r"\$\{[_a-z][\w]*\}", # ${var}iable
r"\<\%=.*?\%\>", # <%= var + 1 %>
r"\<\%.*?\%\>", # <% print(var) %>
r"\<\%[^\n]*?" # SyntaxError (no closing tag)
]
# <% if x in y: %> ... <% end if %>
# <% for x in y: %> ... <% end for %>
_MARKUP.insert(0, r"\<\% if (.*?) : \%\>(.*)\<\% end if \%\>") # No "elif", "else" yet.
_MARKUP.insert(1, r"\<\% for (.*?) in (.*?) : \%\>(.*)\<\% end for \%\>")
_MARKUP = (p.replace(" ", r"\s*") for p in _MARKUP)
_MARKUP = "(%s)" % "|".join(_MARKUP)
_MARKUP = re.compile(_MARKUP, re.I | re.S | re.M)
class Template(object):
_cache = {}
def __init__(self, path, root=None, cached=True):
""" A template with placeholders and/or source code loaded from the given string or path.
Placeholders that start with $ are replaced with keyword arguments in Template.render().
Source code enclosed in <?= var + 100 ?> is executed with eval().
Source code enclosed in <? write(var) ?> is executed with exec().
"""
p = os.path.join(root or "", path)
k = hash(p)
b = k in Template._cache
# Caching enabled + template already cached.
if cached is True and b is True:
a = Template._cache[k]
# Caching disabled / template not yet cached.
if cached is False or b is False:
a = "".join(static(p, mimetype="text/html")) if os.path.exists(p) else path
a = self._compile(a)
# Caching enabled + template not yet cached.
if cached is True and b is False:
a = Template._cache.setdefault(k, a)
self._compiled = a
def _escape(self, s):
""" Returns a string with no leading indentation and escaped newlines.
"""
# Used in Template._compile() with eval() and exec().
s = s.replace("\n", "\\n")
s = textwrap.dedent(s)
return s
def _encode(self, v, indent=""):
""" Returns the given value as a string (empty string for None).
"""
# Used in Template._render().
v = "%s" % (v if v is not None else "")
v = v.replace("\n", "\n" + indent) if indent else v
return v
def _dict(self, k="", v=[]):
""" Returns a dictionary of keys k and values v, where k is a string.
Used in Template._render() with <for> blocks.
"""
# For example: "<% for $i, $x in enumerate([1, 2, 3]): %>",
# "$i, $x" is mapped to {"i": 0, "x": 1}, {"i": 1, "x": 2}, ...
# Nested tuples are not supported (e.g., "($i, ($k, $v))").
k = [k.strip("$ ") for k in k.strip("()").split(",")]
return dict(zip(k, v if len(k) > 1 else [v]))
def _compile(self, string):
""" Returns the template string as a (type, value, indent) list,
where type is either <str>, <arg>, <if>, <for>, <eval> or <exec>.
With <eval> and <exec>, value is a compiled code object
that can be executed with eval() or exec() respectively.
"""
a = []
i = 0
for m in _MARKUP.finditer(string):
s = m.group(1)
j = m.start(1)
n = string[:j].count("\n") # line number
w = re.compile(r"(^|\n)(.*?)$") # line indent
w = re.search(w, string[:j])
w = re.sub(r"[^\t]", " ", string[w.start(2):j])
if i != j:
a.append(("<str>", string[i:j], ""))
# $$escaped
if s.startswith("$") and j > 0 and string[j-1] == "$":
a.append(("<str>", s, ""))
# ${var}iable
elif s.startswith("${") and s.endswith("}"):
a.append(("<arg>", s[2:-1], w))
# $var
elif s.startswith("$"):
a.append(("<arg>", s[1:], w))
# <% if x in y: %> ... <% end if %>
elif s.startswith("<%") and m.group(2):
a.append(("<if>", (m.group(2), self._compile(m.group(3).lstrip("\n"))), w))
# <% for x in y: %> ... <% end for %>
elif s.startswith("<%") and m.group(4):
a.append(("<for>", (m.group(4), m.group(5), self._compile(m.group(6).lstrip("\n"))), w))
# <%= var + 1 %>
elif s.startswith("<%=") and s.endswith("%>"):
a.append(("<eval>", compile("\n"*n + self._escape(s[3:-2]), "<string>", "eval"), w))
# <% print(var) %>
elif s.startswith("<%") and s.endswith("%>"):
a.append(("<exec>", compile("\n"*n + self._escape(s[2:-2]), "<string>", "exec"), w))
else:
raise SyntaxError("template has no end tag for '%s' (line %s)" % (s, n+1))
i = m.end(1)
a.append(("<str>", string[i:], ""))
return a
def _render(self, compiled, *args, **kwargs):
""" Returns the rendered string as an iterator.
Replaces template placeholders with keyword arguments (if any).
Replaces source code with the return value of eval() or exec().
"""
k = {}
for d in args:
k.update(d)
k.update(kwargs)
k["template"] = template
indent = kwargs.pop("indent", False)
for cmd, v, w in compiled:
if indent is False:
w = ""
if cmd is None:
continue
elif cmd == "<str>":
yield self._encode(v, w)
elif cmd == "<arg>":
yield self._encode(k.get(v, "$" + v), w)
elif cmd == "<if>":
yield "".join(self._render(v[1], k)) if eval(v[0]) else ""
elif cmd == "<for>":
yield "".join(["".join(self._render(v[2], k, self._dict(v[0], i))) for i in eval(v[1], k)])
elif cmd == "<eval>":
yield self._encode(eval(v, k), w)
elif cmd == "<exec>":
o = StringIO()
k["write"] = o.write # Code blocks use write() for output.
exec(v, k)
yield self._encode(o.getvalue(), w)
del k["write"]
o.close()
def render(self, *args, **kwargs):
""" Returns the rendered template as a string.
Replaces template placeholders with keyword arguments (if any).
Replaces source code with the return value of eval() or exec().
The keyword arguments are used as namespace for eval() and exec().
For example, source code in Template.render(re=re) has access to the regex library.
Multiple dictionaries can be given, e.g.,
Template.render(globals(), locals(), foo="bar").
Code blocks in <? ?> can use write() and template().
"""
return "".join(self._render(self._compiled, *args, **kwargs))
def template(string, *args, **kwargs):
""" Returns the rendered template as a string.
"""
if hasattr(string, "render"):
return string.render(*args, **kwargs)
root, cached = (
kwargs.pop("root", None),
kwargs.pop("cached", None))
if root is None and len(args) > 0 and isinstance(args[0], basestring):
root = args[0]
args = args[1:]
return Template(string, root, cached).render(*args, **kwargs)
#s = """
#<html>
#<head>
# <title>$title</title>
#</head>
#<body>
#<% for $i, $name in enumerate(names): %>
# <b><%= i+1 %>) Hello $name!</b>
#<% end for %>
#</body>
#</html>
#"""
#
#print(template(s.strip(), title="test", names=["Tom", "Walter"]))
#### HTML ##########################################################################################
# Useful HTML generators.
class HTML:
def _attrs(self, **kwargs):
""" Returns a string of HTML element attributes.
Use "css" for the CSS classname (since "class" is a reserved word).
"""
a = []
if "id" in kwargs:
a.append("id=\"%s\"" % kwargs.pop("id"))
if "name" in kwargs:
a.append("name=\"%s\"" % kwargs.pop("name"))
if "css" in kwargs:
a.append("class=\"%s\"" % kwargs.pop("css"))
for k, v in kwargs.items():
a.append("%s=\"%s\"" % (k, v))
return (" " + " ".join(a)).rstrip()
def div(self, content, **attributes):
""" Returns a string with a HTML <div> with the given content.
"""
return "<div%s>\n\t%s\n</div>\n" % (self._attrs(**attributes), content)
def span(self, content, **attributes):
""" Returns a string with a HTML <span> with the given content.
"""
return "<span%s>\n\t%s\n</span>\n" % (self._attrs(**attributes), content)
def table(self, rows=[], headers=[], striped=True, **attributes):
""" Returns a string with a HTML <table> for the given list,
where each item is a list of values.
With striped=True, generates <tr class="even|odd">.
With striped=True and headers, generates <td class="header[i]">.
"""
h = list(headers)
r = list(rows) if not h else [h] + list(rows)
a = ["<table%s>\n" % self._attrs(**attributes)]
if h:
a.append("\t<colgroup>\n")
a.extend("\t\t<col class=\"%s\">\n" % v for v in h)
a.append("\t</colgroup>\n")
for i, row in enumerate(r):
a.append("\t<tr%s>\n" % (" class=\"%s\"" % ("odd", "even")[i % 2] if striped else ""))
for j, v in enumerate(row):
if i == 0 and h:
a.append("\t\t<th>%s</th>\n" % v)
else:
a.append("\t\t<td>%s</td>\n" % v)
a.append("\t</tr>\n")
a.append("</table>\n")
return "".join(a)
def select(self, options={}, selected=None, **attributes):
""" Returns a string with a HTML <select> for the given dictionary,
where each dict item is an <option value="key">value</option>.
"""
a = ["<select%s>\n" % self._attrs(**attributes)]
for k, v in sorted(options.items()):
if k == selected:
a.append("\t<option value=\"%s\" selected>%s</option>\n" % (k, v))
else:
a.append("\t<option value=\"%s\">%s</option>\n" % (k, v))
a.append("</select>\n")
return "".join(a)
dropdown = select
html = HTML()
####################################################################################################
#from pattern.en import sentiment
#
#app = App()
#app.rate[("1234", "/api/en/sentiment")] = (100, MINUTE)
#
#@app.bind("db")
#def db():
# return Database("log.db", schema="create table if not exists `log` (q text);")
#
## http://localhost:8080/whatever
#@app.route("/")
#def index(*path, **data):
# return "%s<br>%s" % (path, data.get("db"))
#
## http://localhost:8080/api/en/sentiment?q=awesome
##@app.route("/api/en/sentiment", limit=True)
#@app.route("/api/en/sentiment", limit=10, time=MINUTE, key=lambda data: app.request.ip)
#def nl_sentiment(q="", db=None):
# polarity, subjectivity = sentiment(q)
# db.batch.execute("insert into `log` (q) values (?);", (q,))
# return {"polarity": polarity}
#
#@app.task(interval=MINUTE)
#def log(db=None):
# print("committing log...")
# db.batch.commit()
#
#@app.error((403, 404, 429, 500, 503))
#def error(e):
# return "<h2>%s</h2><pre>%s</pre>" % (e.status, e.traceback)
#
#app.run(debug=True, threads=100, queue=50)
|
seraphln/wheel
|
refs/heads/master
|
wheel/ext/__init__.py
|
1
|
# coding=utf8
#
from flask_mail import Mail
from wheel.core.db import db
from wheel.core.cache import cache
from wheel.core.admin import configure_admin
from . import (generic, babel, blueprints, error_handlers, context_processors,
template_filters, before_request, views, themes, fixtures,
oauthlib, weasyprint, security, development)
def configure_extensions(app, admin):
cache.init_app(app)
babel.configure(app)
generic.configure(app)
Mail(app)
error_handlers.configure(app)
db.init_app(app)
themes.configure(app)
context_processors.configure(app)
template_filters.configure(app)
security.configure(app, db)
fixtures.configure(app, db)
# blueprints.load_from_packages(app)
blueprints.load_from_folder(app)
weasyprint.configure(app)
configure_admin(app, admin)
development.configure(app, admin)
before_request.configure(app)
views.configure(app)
oauthlib.configure(app)
return app
def configure_extensions_min(app, *args, **kwargs):
db.init_app(app)
security.init_app(app, db)
return app
|
t-wissmann/qutebrowser
|
refs/heads/master
|
tests/end2end/features/test_qutescheme_bdd.py
|
1
|
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2015-2020 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
import pytest_bdd as bdd
from qutebrowser.utils import qtutils
bdd.scenarios('qutescheme.feature')
@bdd.then(bdd.parsers.parse("the {kind} request should be blocked"))
def request_blocked(request, quteproc, kind):
blocking_set_msg = (
"Blocking malicious request from qute://settings/set?* to "
"qute://settings/set?*")
blocking_csrf_msg = (
"Blocking malicious request from "
"http://localhost:*/data/misc/qutescheme_csrf.html to "
"qute://settings/set?*")
blocking_js_msg = (
"[http://localhost:*/data/misc/qutescheme_csrf.html:0] Not allowed to "
"load local resource: qute://settings/set?*"
)
unsafe_redirect_msg = "Load error: ERR_UNSAFE_REDIRECT"
blocked_request_msg = "Load error: ERR_BLOCKED_BY_CLIENT"
webkit_error_invalid = (
"Error while loading qute://settings/set?*: Invalid qute://settings "
"request")
webkit_error_unsupported = (
"Error while loading qute://settings/set?*: Unsupported request type")
if request.config.webengine and qtutils.version_check('5.12'):
# On Qt 5.12, we mark qute:// as a local scheme, causing most requests
# being blocked by Chromium internally (logging to the JS console).
expected_messages = {
'img': [blocking_js_msg],
'link': [blocking_js_msg],
'redirect': [blocking_set_msg, blocked_request_msg],
'form': [blocking_js_msg],
}
if qtutils.version_check('5.15', compiled=False):
# On Qt 5.15, Chromium blocks the redirect as ERR_UNSAFE_REDIRECT
# instead.
expected_messages['redirect'] = [unsafe_redirect_msg]
elif request.config.webengine:
expected_messages = {
'img': [blocking_csrf_msg],
'link': [blocking_set_msg, blocked_request_msg],
'redirect': [blocking_set_msg, blocked_request_msg],
'form': [blocking_set_msg, blocked_request_msg],
}
else: # QtWebKit
expected_messages = {
'img': [blocking_csrf_msg],
'link': [blocking_csrf_msg, webkit_error_invalid],
'redirect': [blocking_csrf_msg, webkit_error_invalid],
'form': [webkit_error_unsupported],
}
for pattern in expected_messages[kind]:
msg = quteproc.wait_for(message=pattern)
msg.expected = True
|
Udala/docforever
|
refs/heads/master
|
pycoin/ecdsa/secp256k1.py
|
32
|
from .ellipticcurve import CurveFp, Point
# Certicom secp256-k1
_a = 0x0000000000000000000000000000000000000000000000000000000000000000
_b = 0x0000000000000000000000000000000000000000000000000000000000000007
_p = 0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f
_Gx = 0x79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798
_Gy = 0x483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8
_r = 0xfffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141
generator_secp256k1 = Point( CurveFp( _p, _a, _b ), _Gx, _Gy, _r )
|
rust-lang/gyp
|
refs/heads/master
|
test/copies/gyptest-updir.py
|
169
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies file copies where the destination is one level above an expansion that
yields a make variable.
"""
import TestGyp
# The Android build system doesn't allow output to go to arbitrary places.
test = TestGyp.TestGyp(formats=['!android'])
test.run_gyp('copies-updir.gyp', chdir='src')
test.relocate('src', 'relocate/src')
test.build('copies-updir.gyp', 'copies_up', chdir='relocate/src')
test.built_file_must_match('../copies-out-updir/file1',
'file1 contents\n',
chdir='relocate/src')
test.pass_test()
|
ingokegel/intellij-community
|
refs/heads/master
|
python/testData/inspections/GoogleDocStringRemovePositionalVararg.py
|
53
|
def f():
"""
Args:
*ar<caret>gs:
"""
|
saurabh6790/frappe
|
refs/heads/develop
|
frappe/desk/form/load.py
|
1
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe, json
import frappe.utils
import frappe.share
import frappe.defaults
import frappe.desk.form.meta
from frappe.model.utils.user_settings import get_user_settings
from frappe.permissions import get_doc_permissions
from frappe.desk.form.document_follow import is_document_followed
from frappe import _
from six.moves.urllib.parse import quote
@frappe.whitelist(allow_guest=True)
def getdoc(doctype, name, user=None):
"""
Loads a doclist for a given document. This method is called directly from the client.
Requries "doctype", "name" as form variables.
Will also call the "onload" method on the document.
"""
if not (doctype and name):
raise Exception('doctype and name required!')
if not name:
name = doctype
if not frappe.db.exists(doctype, name):
return []
try:
doc = frappe.get_doc(doctype, name)
run_onload(doc)
if not doc.has_permission("read"):
frappe.flags.error_message = _('Insufficient Permission for {0}').format(frappe.bold(doctype + ' ' + name))
raise frappe.PermissionError(("read", doctype, name))
doc.apply_fieldlevel_read_permissions()
# add file list
doc.add_viewed()
get_docinfo(doc)
except Exception:
frappe.errprint(frappe.utils.get_traceback())
raise
doc.add_seen()
frappe.response.docs.append(doc)
@frappe.whitelist(allow_guest=True)
def getdoctype(doctype, with_parent=False, cached_timestamp=None):
"""load doctype"""
docs = []
parent_dt = None
# with parent (called from report builder)
if with_parent:
parent_dt = frappe.model.meta.get_parent_dt(doctype)
if parent_dt:
docs = get_meta_bundle(parent_dt)
frappe.response['parent_dt'] = parent_dt
if not docs:
docs = get_meta_bundle(doctype)
frappe.response['user_settings'] = get_user_settings(parent_dt or doctype)
if cached_timestamp and docs[0].modified==cached_timestamp:
return "use_cache"
frappe.response.docs.extend(docs)
def get_meta_bundle(doctype):
bundle = [frappe.desk.form.meta.get_meta(doctype)]
for df in bundle[0].fields:
if df.fieldtype in frappe.model.table_fields:
bundle.append(frappe.desk.form.meta.get_meta(df.options, not frappe.conf.developer_mode))
return bundle
@frappe.whitelist()
def get_docinfo(doc=None, doctype=None, name=None):
if not doc:
doc = frappe.get_doc(doctype, name)
if not doc.has_permission("read"):
raise frappe.PermissionError
all_communications = _get_communications(doc.doctype, doc.name)
automated_messages = filter(lambda x: x['communication_type'] == 'Automated Message', all_communications)
communications_except_auto_messages = filter(lambda x: x['communication_type'] != 'Automated Message', all_communications)
frappe.response["docinfo"] = {
"attachments": get_attachments(doc.doctype, doc.name),
"attachment_logs": get_comments(doc.doctype, doc.name, 'attachment'),
"communications": communications_except_auto_messages,
"automated_messages": automated_messages,
'comments': get_comments(doc.doctype, doc.name),
'total_comments': len(json.loads(doc.get('_comments') or '[]')),
'versions': get_versions(doc),
"assignments": get_assignments(doc.doctype, doc.name),
"assignment_logs": get_comments(doc.doctype, doc.name, 'assignment'),
"permissions": get_doc_permissions(doc),
"shared": frappe.share.get_users(doc.doctype, doc.name),
"info_logs": get_comments(doc.doctype, doc.name, 'Info'),
"share_logs": get_comments(doc.doctype, doc.name, 'share'),
"like_logs": get_comments(doc.doctype, doc.name, 'Like'),
"views": get_view_logs(doc.doctype, doc.name),
"energy_point_logs": get_point_logs(doc.doctype, doc.name),
"additional_timeline_content": get_additional_timeline_content(doc.doctype, doc.name),
"milestones": get_milestones(doc.doctype, doc.name),
"is_document_followed": is_document_followed(doc.doctype, doc.name, frappe.session.user),
"tags": get_tags(doc.doctype, doc.name),
"document_email": get_document_email(doc.doctype, doc.name)
}
def get_milestones(doctype, name):
return frappe.db.get_all('Milestone', fields = ['creation', 'owner', 'track_field', 'value'],
filters=dict(reference_type=doctype, reference_name=name))
def get_attachments(dt, dn):
return frappe.get_all("File", fields=["name", "file_name", "file_url", "is_private"],
filters = {"attached_to_name": dn, "attached_to_doctype": dt})
def get_versions(doc):
return frappe.get_all('Version', filters=dict(ref_doctype=doc.doctype, docname=doc.name),
fields=['name', 'owner', 'creation', 'data'], limit=10, order_by='creation desc')
@frappe.whitelist()
def get_communications(doctype, name, start=0, limit=20):
doc = frappe.get_doc(doctype, name)
if not doc.has_permission("read"):
raise frappe.PermissionError
return _get_communications(doctype, name, start, limit)
def get_comments(doctype, name, comment_type='Comment'):
comment_types = [comment_type]
if comment_type == 'share':
comment_types = ['Shared', 'Unshared']
elif comment_type == 'assignment':
comment_types = ['Assignment Completed', 'Assigned']
elif comment_type == 'attachment':
comment_types = ['Attachment', 'Attachment Removed']
comments = frappe.get_all('Comment', fields = ['name', 'creation', 'content', 'owner', 'comment_type'], filters=dict(
reference_doctype = doctype,
reference_name = name,
comment_type = ['in', comment_types]
))
# convert to markdown (legacy ?)
if comment_type == 'Comment':
for c in comments:
c.content = frappe.utils.markdown(c.content)
return comments
def get_point_logs(doctype, docname):
return frappe.db.get_all('Energy Point Log', filters={
'reference_doctype': doctype,
'reference_name': docname,
'type': ['!=', 'Review']
}, fields=['*'])
def _get_communications(doctype, name, start=0, limit=20):
communications = get_communication_data(doctype, name, start, limit)
for c in communications:
if c.communication_type=="Communication":
c.attachments = json.dumps(frappe.get_all("File",
fields=["file_url", "is_private"],
filters={"attached_to_doctype": "Communication",
"attached_to_name": c.name}
))
return communications
def get_communication_data(doctype, name, start=0, limit=20, after=None, fields=None,
group_by=None, as_dict=True):
'''Returns list of communications for a given document'''
if not fields:
fields = '''
C.name, C.communication_type, C.communication_medium,
C.comment_type, C.communication_date, C.content,
C.sender, C.sender_full_name, C.cc, C.bcc,
C.creation AS creation, C.subject, C.delivery_status,
C._liked_by, C.reference_doctype, C.reference_name,
C.read_by_recipient, C.rating, C.recipients
'''
conditions = ''
if after:
# find after a particular date
conditions += '''
AND C.creation > {0}
'''.format(after)
if doctype=='User':
conditions += '''
AND NOT (C.reference_doctype='User' AND C.communication_type='Communication')
'''
# communications linked to reference_doctype
part1 = '''
SELECT {fields}
FROM `tabCommunication` as C
WHERE C.communication_type IN ('Communication', 'Feedback', 'Automated Message')
AND (C.reference_doctype = %(doctype)s AND C.reference_name = %(name)s)
{conditions}
'''.format(fields=fields, conditions=conditions)
# communications linked in Timeline Links
part2 = '''
SELECT {fields}
FROM `tabCommunication` as C
INNER JOIN `tabCommunication Link` ON C.name=`tabCommunication Link`.parent
WHERE C.communication_type IN ('Communication', 'Feedback', 'Automated Message')
AND `tabCommunication Link`.link_doctype = %(doctype)s AND `tabCommunication Link`.link_name = %(name)s
{conditions}
'''.format(fields=fields, conditions=conditions)
communications = frappe.db.sql('''
SELECT *
FROM (({part1}) UNION ({part2})) AS combined
{group_by}
ORDER BY creation DESC
LIMIT %(limit)s
OFFSET %(start)s
'''.format(part1=part1, part2=part2, group_by=(group_by or '')), dict(
doctype=doctype,
name=name,
start=frappe.utils.cint(start),
limit=limit
), as_dict=as_dict)
return communications
def get_assignments(dt, dn):
cl = frappe.get_all("ToDo",
fields=['name', 'owner', 'description', 'status'],
filters={
'reference_type': dt,
'reference_name': dn,
'status': ('!=', 'Cancelled'),
})
return cl
@frappe.whitelist()
def get_badge_info(doctypes, filters):
filters = json.loads(filters)
doctypes = json.loads(doctypes)
filters["docstatus"] = ["!=", 2]
out = {}
for doctype in doctypes:
out[doctype] = frappe.db.get_value(doctype, filters, "count(*)")
return out
def run_onload(doc):
doc.set("__onload", frappe._dict())
doc.run_method("onload")
def get_view_logs(doctype, docname):
""" get and return the latest view logs if available """
logs = []
if hasattr(frappe.get_meta(doctype), 'track_views') and frappe.get_meta(doctype).track_views:
view_logs = frappe.get_all("View Log", filters={
"reference_doctype": doctype,
"reference_name": docname,
}, fields=["name", "creation", "owner"], order_by="creation desc")
if view_logs:
logs = view_logs
return logs
def get_tags(doctype, name):
tags = [tag.tag for tag in frappe.get_all("Tag Link", filters={
"document_type": doctype,
"document_name": name
}, fields=["tag"])]
return ",".join(tags)
def get_document_email(doctype, name):
email = get_automatic_email_link()
if not email:
return None
email = email.split("@")
return "{0}+{1}+{2}@{3}".format(email[0], quote(doctype), quote(name), email[1])
def get_automatic_email_link():
return frappe.db.get_value("Email Account", {"enable_incoming": 1, "enable_automatic_linking": 1}, "email_id")
def get_additional_timeline_content(doctype, docname):
contents = []
hooks = frappe.get_hooks().get('additional_timeline_content', {})
methods_for_all_doctype = hooks.get('*', [])
methods_for_current_doctype = hooks.get(doctype, [])
for method in methods_for_all_doctype + methods_for_current_doctype:
contents.extend(frappe.get_attr(method)(doctype, docname) or [])
return contents
|
andaag/scikit-learn
|
refs/heads/master
|
examples/cluster/plot_color_quantization.py
|
297
|
# -*- coding: utf-8 -*-
"""
==================================
Color Quantization using K-Means
==================================
Performs a pixel-wise Vector Quantization (VQ) of an image of the summer palace
(China), reducing the number of colors required to show the image from 96,615
unique colors to 64, while preserving the overall appearance quality.
In this example, pixels are represented in a 3D-space and K-means is used to
find 64 color clusters. In the image processing literature, the codebook
obtained from K-means (the cluster centers) is called the color palette. Using
a single byte, up to 256 colors can be addressed, whereas an RGB encoding
requires 3 bytes per pixel. The GIF file format, for example, uses such a
palette.
For comparison, a quantized image using a random codebook (colors picked up
randomly) is also shown.
"""
# Authors: Robert Layton <robertlayton@gmail.com>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
#
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.metrics import pairwise_distances_argmin
from sklearn.datasets import load_sample_image
from sklearn.utils import shuffle
from time import time
n_colors = 64
# Load the Summer Palace photo
china = load_sample_image("china.jpg")
# Convert to floats instead of the default 8 bits integer coding. Dividing by
# 255 is important so that plt.imshow behaves works well on float data (need to
# be in the range [0-1]
china = np.array(china, dtype=np.float64) / 255
# Load Image and transform to a 2D numpy array.
w, h, d = original_shape = tuple(china.shape)
assert d == 3
image_array = np.reshape(china, (w * h, d))
print("Fitting model on a small sub-sample of the data")
t0 = time()
image_array_sample = shuffle(image_array, random_state=0)[:1000]
kmeans = KMeans(n_clusters=n_colors, random_state=0).fit(image_array_sample)
print("done in %0.3fs." % (time() - t0))
# Get labels for all points
print("Predicting color indices on the full image (k-means)")
t0 = time()
labels = kmeans.predict(image_array)
print("done in %0.3fs." % (time() - t0))
codebook_random = shuffle(image_array, random_state=0)[:n_colors + 1]
print("Predicting color indices on the full image (random)")
t0 = time()
labels_random = pairwise_distances_argmin(codebook_random,
image_array,
axis=0)
print("done in %0.3fs." % (time() - t0))
def recreate_image(codebook, labels, w, h):
"""Recreate the (compressed) image from the code book & labels"""
d = codebook.shape[1]
image = np.zeros((w, h, d))
label_idx = 0
for i in range(w):
for j in range(h):
image[i][j] = codebook[labels[label_idx]]
label_idx += 1
return image
# Display all results, alongside original image
plt.figure(1)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Original image (96,615 colors)')
plt.imshow(china)
plt.figure(2)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Quantized image (64 colors, K-Means)')
plt.imshow(recreate_image(kmeans.cluster_centers_, labels, w, h))
plt.figure(3)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Quantized image (64 colors, Random)')
plt.imshow(recreate_image(codebook_random, labels_random, w, h))
plt.show()
|
laffra/pava
|
refs/heads/master
|
pava/implementation/natives/java/awt/Font.py
|
55
|
def add_native_methods(clazz):
def initIDs____(a0):
raise NotImplementedError()
clazz.initIDs____ = staticmethod(initIDs____)
|
itsjeyd/edx-platform
|
refs/heads/master
|
openedx/core/djangoapps/bookmarks/signals.py
|
64
|
"""
Signals for bookmarks.
"""
from importlib import import_module
from django.dispatch.dispatcher import receiver
from xmodule.modulestore.django import SignalHandler
@receiver(SignalHandler.course_published)
def trigger_update_xblocks_cache_task(sender, course_key, **kwargs): # pylint: disable=invalid-name,unused-argument
"""
Trigger update_xblocks_cache() when course_published signal is fired.
"""
tasks = import_module('openedx.core.djangoapps.bookmarks.tasks') # Importing tasks early causes issues in tests.
# Note: The countdown=0 kwarg is set to ensure the method below does not attempt to access the course
# before the signal emitter has finished all operations. This is also necessary to ensure all tests pass.
tasks.update_xblocks_cache.apply_async([unicode(course_key)], countdown=0)
|
agoose77/hivesystem
|
refs/heads/master
|
sparta/assessors/all_.py
|
1
|
import libcontext, bee
from bee.segments import *
class all_(bee.worker):
"""The all assessor returns True if all of its inputs are True"""
outp = output("pull", "bool")
inp1 = antenna("pull", "bool")
inp2 = antenna("pull", "bool")
inp3 = antenna("pull", "bool")
inp4 = antenna("pull", "bool")
b_inp1 = buffer("pull", "bool")
b_inp2 = buffer("pull", "bool")
b_inp3 = buffer("pull", "bool")
b_inp4 = buffer("pull", "bool")
connect(inp1, b_inp1)
connect(inp2, b_inp2)
connect(inp3, b_inp3)
connect(inp4, b_inp4)
v_outp = variable("bool")
connect(v_outp, outp)
# Evaluation function
@modifier
def evaluate(self):
self.v_outp = (self.b_inp1 and self.b_inp2 and self.b_inp3 and self.b_inp4)
# Whenever the output is requested: update the inputs and evaluate
pretrigger(v_outp, b_inp1)
pretrigger(v_outp, b_inp2)
pretrigger(v_outp, b_inp3)
pretrigger(v_outp, b_inp4)
pretrigger(v_outp, evaluate)
# Name the inputs and outputs
guiparams = {
"outp": {"name": "Output"},
"inp1": {"name": "Input 1", "foldable": False},
"inp2": {"name": "Input 2"},
"inp3": {"name": "Input 3", "fold": True},
"inp4": {"name": "Input 4", "fold": True},
}
# Method to manipulate the parameter form as it appears in the GUI
@classmethod
def form(cls, f):
f.inp1.default = True
f.inp2.default = True
f.inp3.default = True
f.inp4.default = True
|
tensorflow/tpu
|
refs/heads/master
|
models/experimental/show_and_tell/train.py
|
1
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Train the model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
# Standard Imports
from absl import app
import tensorflow.compat.v1 as tf
import configuration
import show_and_tell_model
from tensorflow.contrib import cluster_resolver as contrib_cluster_resolver
from tensorflow.contrib import estimator as contrib_estimator
from tensorflow.contrib import tpu as contrib_tpu
from tensorflow.contrib import training as contrib_training
FLAGS = tf.app.flags.FLAGS
tf.flags.DEFINE_string(
"tpu", default=None,
help="The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
tf.flags.DEFINE_string(
"gcp_project", default=None,
help="Project name for the Cloud TPU-enabled project. If not specified, we "
"will attempt to automatically detect the GCE project from metadata.")
tf.flags.DEFINE_string(
"tpu_zone", default=None,
help="GCE zone where the Cloud TPU is located in. If not specified, we "
"will attempt to automatically detect the GCE project from metadata.")
tf.flags.DEFINE_bool("use_tpu", True, "If true, use TPU")
tf.flags.DEFINE_string("mode", "train",
"Execution mode: one of train|evaluate .")
tf.flags.DEFINE_string("input_file_pattern", "",
"File pattern of sharded TFRecord input files.")
tf.flags.DEFINE_string("inception_checkpoint_file", "",
"Path to a pretrained inception_v3 model.")
tf.flags.DEFINE_string("model_dir", "",
"Directory for saving and loading model checkpoints.")
tf.flags.DEFINE_boolean("train_inception", False,
"Whether to train inception submodel variables.")
tf.flags.DEFINE_integer("train_steps", 10000, "Number of batches for training.")
tf.flags.DEFINE_integer("train_batch_size", 1024, "Batch size for training.")
tf.flags.DEFINE_integer("eval_batch_size", 1024, "Batch size for evaluation.")
tf.flags.DEFINE_integer("iterations_per_loop", 100,
"TPU batch iterations per loop.")
MODEKEY_TO_MODE = {
tf.estimator.ModeKeys.PREDICT: "inference",
tf.estimator.ModeKeys.EVAL: "evaluate",
tf.estimator.ModeKeys.TRAIN: "train",
}
def model_fn(features, labels, mode, params):
im_mode = MODEKEY_TO_MODE[mode]
model_config = configuration.ModelConfig()
training_config = configuration.TrainingConfig()
model = show_and_tell_model.ShowAndTellModel(
model_config, mode=im_mode, train_inception=FLAGS.train_inception)
model.build_model_for_tpu(
images=features["images"],
input_seqs=features["input_seqs"],
target_seqs=features["target_seqs"],
input_mask=features["input_mask"])
optimizer = tf.train.GradientDescentOptimizer(
learning_rate=training_config.initial_learning_rate)
optimizer = contrib_estimator.clip_gradients_by_norm(
optimizer, training_config.clip_gradients)
if FLAGS.use_tpu:
optimizer = contrib_tpu.CrossShardOptimizer(optimizer)
train_op = optimizer.minimize(
model.total_loss, global_step=tf.train.get_or_create_global_step())
def scaffold_fn():
"""Load pretrained Inception checkpoint at initialization time."""
return tf.train.Scaffold(init_fn=model.init_fn)
return contrib_tpu.TPUEstimatorSpec(
mode=mode,
loss=model.total_loss,
train_op=train_op,
scaffold_fn=scaffold_fn)
def input_fn(params):
model_config = configuration.ModelConfig()
model_config.input_file_pattern = params["input_file_pattern"]
model_config.batch_size = params["batch_size"]
model_config.mode = params["mode"]
model = show_and_tell_model.ShowAndTellModel(model_config, mode="train")
model.build_inputs()
return {
"images": model.images,
"input_seqs": model.input_seqs,
"target_seqs": model.target_seqs,
"input_mask": model.input_mask
}
def main(unused_argv):
assert FLAGS.input_file_pattern, "--input_file_pattern is required"
assert FLAGS.model_dir, "--model_dir is required"
if FLAGS.use_tpu:
tpu_cluster_resolver = contrib_cluster_resolver.TPUClusterResolver(
FLAGS.tpu, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
tpu_grpc_url = tpu_cluster_resolver.get_master()
else:
tpu_grpc_url = ''
run_config = contrib_tpu.RunConfig(
master=tpu_grpc_url,
model_dir=FLAGS.model_dir,
save_checkpoints_steps=1000,
keep_checkpoint_max=None,
tpu_config=contrib_tpu.TPUConfig(
iterations_per_loop=FLAGS.iterations_per_loop,))
estimator = contrib_tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size,
params={
"input_file_pattern": FLAGS.input_file_pattern,
"use_tpu": FLAGS.use_tpu,
"mode": FLAGS.mode,
})
training_config = configuration.TrainingConfig()
if FLAGS.mode == "train":
estimator.train(
input_fn=input_fn,
max_steps=FLAGS.train_steps,
)
else:
# Run evaluation when there"s a new checkpoint
for ckpt in contrib_training.checkpoints_iterator(FLAGS.model_dir):
tf.logging.info("Starting to evaluate.")
try:
eval_results = estimator.evaluate(
input_fn=input_fn,
steps=(
training_config.num_examples_per_epoch // FLAGS.eval_batch_size
),
checkpoint_path=ckpt)
tf.logging.info("Eval results: %s", eval_results)
current_step = int(os.path.basename(ckpt).split("-")[1])
if current_step >= FLAGS.train_steps:
tf.logging.info(
"Evaluation finished after training step %d" % current_step)
break
except tf.errors.NotFoundError:
tf.logging.info(
"Checkpoint %s no longer exists, skipping checkpoint" % ckpt)
if __name__ == "__main__":
tf.logging.set_verbosity(tf.logging.INFO)
app.run(main)
|
extremewaysback/django
|
refs/heads/master
|
tests/migrations/test_migrations_squashed_complex_multi_apps/app1/4_auto.py
|
385
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("app1", "3_auto")]
operations = [
migrations.RunPython(migrations.RunPython.noop)
]
|
AnalogJ/lexicon
|
refs/heads/master
|
lexicon/tests/providers/test_sakuracloud.py
|
1
|
"""Integration tests for SakuraCloud"""
from unittest import TestCase
import pytest
from lexicon.tests.providers.integration_tests import IntegrationTestsV1
# Hook into testing framework by inheriting unittest.TestCase and reuse
# the tests which *each and every* implementation of the interface must
# pass, by inheritance from define_tests.TheTests
# TODO: migrate to IntegrationTestsV2 and its extended test suite
class SakruaCloudProviderTests(TestCase, IntegrationTestsV1):
"""TestCase for SakuraCloud"""
provider_name = "sakuracloud"
domain = "example.com"
def _filter_headers(self):
return ["Authorization"]
# TODO: enable the skipped tests
@pytest.mark.skip(reason="record id is not exists")
def test_provider_when_calling_delete_record_by_identifier_should_remove_record(
self,
):
return
|
benchoufi/PRJ-medtec_sigproc
|
refs/heads/master
|
echopen-leaderboard/bootcamp/feeds/tests.py
|
24123
|
from django.test import TestCase
# Create your tests here.
|
ventrixcode/yowsup
|
refs/heads/pr/2
|
yowsup/layers/protocol_groups/__init__.py
|
70
|
from .layer import YowGroupsProtocolLayer
|
gridsim/gridsim
|
refs/heads/master
|
demo/thermostat.py
|
1
|
from gridsim.unit import units
from gridsim.util import Position
from gridsim.simulation import Simulator
from gridsim.recorder import PlotRecorder
from gridsim.thermal.element import TimeSeriesThermalProcess
from gridsim.thermal.core import ThermalProcess, ThermalCoupling
from gridsim.electrical.core import AbstractElectricalCPSElement
from gridsim.electrical.network import ElectricalPQBus, \
ElectricalTransmissionLine
from gridsim.electrical.loadflow import DirectLoadFlowCalculator
from gridsim.timeseries import SortedConstantStepTimeSeriesObject
from gridsim.iodata.input import CSVReader
from gridsim.iodata.output import FigureSaver
from gridsim.controller.simulation import AbstractControllerElement
class Thermostat(AbstractControllerElement):
def __init__(self, friendly_name, target_temperature, hysteresis,
thermal_process, subject, attribute,
on_value=True, off_value=False, position=Position()):
"""
A thermostat controller. This class measures the temperature of a
thermal process (typically a room) and controls ANY attribute of any
AbstractSimulationElement depending the measured temperature, the given
target_temperature and the hysteresis.
:param: friendly_name: User friendly name to give to the element.
:type friendly_name: str
:param: target_temperature: The temperature to try to maintain inside
the target ThermalProcess.
:type: target_temperature: temperature see :mod:`gridsim.unit`
:param: hysteresis: The +- hysteresis in order to avoid to fast on/off
switching.
:type: hysteresis: delta temperature see :mod:`gridsim.unit`
:param: thermal_process: The reference to the thermal process to
observe.
:type: thermal_process: :class:`.ThermalProcess`
:param: subject: Reference to the object of which is attribute has to be
changed depending on the temperature.
:type: object
:param: attribute: The name of the attribute to control as string.
:type: str
:param: on_value: The value to set for the attribute in order to turn
the device "on".
:type: on_value: any
:param: off_on_value: The value to set for the attribute in order to
turn the device "off".
:type: off_value: any
:param position: The position of the thermal element.
Defaults to [0,0,0].
:type position: :class:`Position`
"""
super(Thermostat, self).__init__(friendly_name, position)
self.target_temperature = units.value(target_temperature, units.kelvin)
"""
The temperature to try to retain inside the observer thermal process by
conducting an electrothermal element.
"""
self.hysteresis = units.value(hysteresis, units.kelvin)
"""
The +- hysteresis applied to the temperature measure in order to avoid
to fast on/off switching.
"""
if not hasattr(thermal_process, 'temperature'):
raise TypeError('thermal_process')
self.thermal_process = thermal_process
"""
The reference to the thermal process to observe and read the
temperature from.
"""
self.subject = subject
"""
The reference to the element to control.
"""
self.attribute = attribute
"""
Name of the attribute to control.
"""
self.on_value = on_value
"""
Value to set in order to turn the element on.
"""
self.off_value = off_value
"""
Value to set in order to turn the element off.
"""
self._output_value = off_value
# AbstractSimulationElement implementation.
def reset(self):
"""
AbstractSimulationElement implementation
.. seealso:: :func:`gridsim.core.AbstractSimulationElement.reset`.
"""
pass
def calculate(self, time, delta_time):
"""
AbstractSimulationElement implementation
.. seealso:: :func:`gridsim.core.AbstractSimulationElement.calculate`.
"""
actual_temperature = self.thermal_process.temperature
if actual_temperature < (self.target_temperature - self.hysteresis / 2.):
self._output_value = self.on_value
elif actual_temperature > (self.target_temperature + self.hysteresis / 2.):
self._output_value = self.off_value
def update(self, time, delta_time):
"""
AbstractSimulationElement implementation
.. seealso:: :func:`gridsim.core.AbstractSimulationElement.update`.
"""
setattr(self.subject, self.attribute, self._output_value)
class ElectroThermalHeaterCooler(AbstractElectricalCPSElement):
def __init__(self, friendly_name, pwr, efficiency_factor, thermal_process):
super(ElectroThermalHeaterCooler, self).__init__(friendly_name)
self._efficiency_factor = units.value(efficiency_factor)
self._thermal_process = thermal_process
self.power = units.value(pwr, units.watt)
self._on = False
"""
Controls the heater/cooler. If this is True, the heater/cooler is active
and takes energy from the electrical
network to actually heat or cool the thermal process associated.
"""
@property
def on(self):
return self._on
@on.setter
def on(self, on_off):
self._on = on_off
# AbstractSimulationElement implementation.
def reset(self):
super(ElectroThermalHeaterCooler, self).reset()
self.on = False
def calculate(self, time, delta_time):
self._internal_delta_energy = self.power * delta_time
if not self.on:
self._internal_delta_energy = 0
def update(self, time, delta_time):
super(ElectroThermalHeaterCooler, self).update(time, delta_time)
self._thermal_process.add_energy(
self._delta_energy * self._efficiency_factor)
# Gridsim simulator.
sim = Simulator()
sim.electrical.load_flow_calculator = DirectLoadFlowCalculator()
# Create a simple thermal process: A room and a thermal coupling between the
# room and the outside temperature.
# ___________
# | |
# | room |
# | 20 C | outside <= example time series (CSV) file
# | |]- 3 W/K
# |___________|
#
# The room has a surface of 50m2 and a height of 2.5m.
celsius = units(20, units.degC)
room = sim.thermal.add(ThermalProcess.room('room',
50*units.meter*units.meter,
2.5*units.metre,
units.convert(celsius, units.kelvin)))
outside = sim.thermal.add(
TimeSeriesThermalProcess('outside', SortedConstantStepTimeSeriesObject(CSVReader('./data/example_time_series.csv')),
lambda t: t*units.hour,
temperature_calculator=
lambda t: units.convert(units(t, units.degC),
units.kelvin)))
sim.thermal.add(ThermalCoupling('room to outside',
10.0*units.thermal_conductivity,
room, outside))
# Create a minimal electrical simulation network with a thermal heater connected
# to Bus0.
#
# Line0
# SlackBus o----------------o Bus0
# |
# heater (ElectricalHeaterCooler)
#
bus0 = sim.electrical.add(ElectricalPQBus('Bus0'))
sim.electrical.connect("Line0", sim.electrical.bus(0), bus0,
ElectricalTransmissionLine('Line0',
1000*units.metre,
0.2*units.ohm))
heater = sim.electrical.add(ElectroThermalHeaterCooler('heater',
1*units.kilowatt, 1.0,
room))
sim.electrical.attach(bus0, heater)
# Add the thermostat that controls the temperature inside the room and to hold
# it between 16..20 degrees celsius:
# ____________
# | | ____________
# | room | | |
# | o----------| Thermostat |---\
# | | |____________| |
# | |^^^^^^| | |
# |__|heater|__| |
# __|__ |__________________________|
# ---
#
target = units(20, units.degC)
# the hysteresis is a delta of temperature
hysteresis = 1*units.delta_degC
thermostat = sim.controller.add(Thermostat('thermostat',
units.convert(target, units.kelvin),
hysteresis,
room, heater, 'on'))
# Create a plot recorder that records the temperatures of all thermal processes.
temp = PlotRecorder('temperature', units.second, units.degC)
sim.record(temp, sim.thermal.find(has_attribute='temperature'))
# Create a plot recorder that records the control value of the thermostat given
# to the heater.
control = PlotRecorder('on', units.second, bool)
sim.record(control, sim.electrical.find(has_attribute='on'))
# Create a plot recorder that records the power used by the electrical heater.
power = PlotRecorder('delta_energy', units.second, units.joule)
sim.record(power, sim.find(friendly_name='heater'))
print("Running simulation...")
# Run the simulation for an hour with a resolution of 1 second.
sim.reset()
sim.run(5 * units.hour, units.second)
print("Saving data...")
# Create a PDF document, add the two figures of the plot recorder to the
# document and close the document.
FigureSaver(temp, "Temperature").save('./output/thermostat-fig1.pdf')
FigureSaver(control, "Control").save('./output/thermostat-fig2.png')
FigureSaver(power, "Power").save('./output/thermostat-fig3.png')
|
weiliu89/pyvision
|
refs/heads/master
|
experiments/toymarginals.py
|
3
|
from vision import *
from vision.alearn import marginals
from vision import visualize, model
from vision.toymaker import *
import os
import multiprocessing
import logging
import random
import ImageColor
import pylab
import pickle
logging.basicConfig(level = logging.INFO)
g = Geppetto((720,480))
b = Rectangle((400, 100), color="white")
b.linear((400, 800), 20)
g.add(b)
#g = Geppetto()
#b = Rectangle((100, 100))
#b.linear((600, 100), 100)
#g.add(b)
#
#o = Rectangle((100, 350))
#o.linear((600, 350), 100)
#g.add(o)
pool = multiprocessing.Pool(24)
frame, score, path, m = marginals.pick([b[0], b[-1]], g, pool = pool,
pairwisecost = .001,
sigma = .1,
erroroverlap = 0.5)
#visualize.save(visualize.highlight_paths(g, [path, b], width = 3, colors = ["red", "green"]), lambda x: "tmp/path{0}.jpg".format(x))
print "frame {0} with score {1}".format(frame, score)
|
franklingu/leetcode-solutions
|
refs/heads/master
|
questions/group-shifted-strings/Solution.py
|
1
|
"""
None
"""
class Solution:
def groupStrings(self, strings: List[str]) -> List[List[str]]:
track = {}
for s in strings:
diffs = []
for i, c in enumerate(s):
if i == 0:
continue
diffs.append((ord(c) - ord(s[i - 1]) + 26) % 26)
key = tuple(diffs)
if key not in track:
track[key] = []
track[key].append(s)
return list(track.values())
|
gregbdunn/aws-ec2rescue-linux
|
refs/heads/develop
|
lib/boto3/resources/response.py
|
11
|
# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import jmespath
from botocore import xform_name
from .params import get_data_member
def all_not_none(iterable):
"""
Return True if all elements of the iterable are not None (or if the
iterable is empty). This is like the built-in ``all``, except checks
against None, so 0 and False are allowable values.
"""
for element in iterable:
if element is None:
return False
return True
def build_identifiers(identifiers, parent, params=None, raw_response=None):
"""
Builds a mapping of identifier names to values based on the
identifier source location, type, and target. Identifier
values may be scalars or lists depending on the source type
and location.
:type identifiers: list
:param identifiers: List of :py:class:`~boto3.resources.model.Parameter`
definitions
:type parent: ServiceResource
:param parent: The resource instance to which this action is attached.
:type params: dict
:param params: Request parameters sent to the service.
:type raw_response: dict
:param raw_response: Low-level operation response.
:rtype: list
:return: An ordered list of ``(name, value)`` identifier tuples.
"""
results = []
for identifier in identifiers:
source = identifier.source
target = identifier.target
if source == 'response':
value = jmespath.search(identifier.path, raw_response)
elif source == 'requestParameter':
value = jmespath.search(identifier.path, params)
elif source == 'identifier':
value = getattr(parent, xform_name(identifier.name))
elif source == 'data':
# If this is a data member then it may incur a load
# action before returning the value.
value = get_data_member(parent, identifier.path)
elif source == 'input':
# This value is set by the user, so ignore it here
continue
else:
raise NotImplementedError(
'Unsupported source type: {0}'.format(source))
results.append((xform_name(target), value))
return results
def build_empty_response(search_path, operation_name, service_model):
"""
Creates an appropriate empty response for the type that is expected,
based on the service model's shape type. For example, a value that
is normally a list would then return an empty list. A structure would
return an empty dict, and a number would return None.
:type search_path: string
:param search_path: JMESPath expression to search in the response
:type operation_name: string
:param operation_name: Name of the underlying service operation.
:type service_model: :ref:`botocore.model.ServiceModel`
:param service_model: The Botocore service model
:rtype: dict, list, or None
:return: An appropriate empty value
"""
response = None
operation_model = service_model.operation_model(operation_name)
shape = operation_model.output_shape
if search_path:
# Walk the search path and find the final shape. For example, given
# a path of ``foo.bar[0].baz``, we first find the shape for ``foo``,
# then the shape for ``bar`` (ignoring the indexing), and finally
# the shape for ``baz``.
for item in search_path.split('.'):
item = item.strip('[0123456789]$')
if shape.type_name == 'structure':
shape = shape.members[item]
elif shape.type_name == 'list':
shape = shape.member
else:
raise NotImplementedError(
'Search path hits shape type {0} from {1}'.format(
shape.type_name, item))
# Anything not handled here is set to None
if shape.type_name == 'structure':
response = {}
elif shape.type_name == 'list':
response = []
elif shape.type_name == 'map':
response = {}
return response
class RawHandler(object):
"""
A raw action response handler. This passed through the response
dictionary, optionally after performing a JMESPath search if one
has been defined for the action.
:type search_path: string
:param search_path: JMESPath expression to search in the response
:rtype: dict
:return: Service response
"""
def __init__(self, search_path):
self.search_path = search_path
def __call__(self, parent, params, response):
"""
:type parent: ServiceResource
:param parent: The resource instance to which this action is attached.
:type params: dict
:param params: Request parameters sent to the service.
:type response: dict
:param response: Low-level operation response.
"""
# TODO: Remove the '$' check after JMESPath supports it
if self.search_path and self.search_path != '$':
response = jmespath.search(self.search_path, response)
return response
class ResourceHandler(object):
"""
Creates a new resource or list of new resources from the low-level
response based on the given response resource definition.
:type search_path: string
:param search_path: JMESPath expression to search in the response
:type factory: ResourceFactory
:param factory: The factory that created the resource class to which
this action is attached.
:type resource_model: :py:class:`~boto3.resources.model.ResponseResource`
:param resource_model: Response resource model.
:type service_context: :py:class:`~boto3.utils.ServiceContext`
:param service_context: Context about the AWS service
:type operation_name: string
:param operation_name: Name of the underlying service operation, if it
exists.
:rtype: ServiceResource or list
:return: New resource instance(s).
"""
def __init__(self, search_path, factory, resource_model,
service_context, operation_name=None):
self.search_path = search_path
self.factory = factory
self.resource_model = resource_model
self.operation_name = operation_name
self.service_context = service_context
def __call__(self, parent, params, response):
"""
:type parent: ServiceResource
:param parent: The resource instance to which this action is attached.
:type params: dict
:param params: Request parameters sent to the service.
:type response: dict
:param response: Low-level operation response.
"""
resource_name = self.resource_model.type
json_definition = self.service_context.resource_json_definitions.get(
resource_name)
# Load the new resource class that will result from this action.
resource_cls = self.factory.load_from_definition(
resource_name=resource_name,
single_resource_json_definition=json_definition,
service_context=self.service_context
)
raw_response = response
search_response = None
# Anytime a path is defined, it means the response contains the
# resource's attributes, so resource_data gets set here. It
# eventually ends up in resource.meta.data, which is where
# the attribute properties look for data.
if self.search_path:
search_response = jmespath.search(self.search_path, raw_response)
# First, we parse all the identifiers, then create the individual
# response resources using them. Any identifiers that are lists
# will have one item consumed from the front of the list for each
# resource that is instantiated. Items which are not a list will
# be set as the same value on each new resource instance.
identifiers = dict(build_identifiers(
self.resource_model.identifiers, parent, params,
raw_response))
# If any of the identifiers is a list, then the response is plural
plural = [v for v in identifiers.values() if isinstance(v, list)]
if plural:
response = []
# The number of items in an identifier that is a list will
# determine how many resource instances to create.
for i in range(len(plural[0])):
# Response item data is *only* available if a search path
# was given. This prevents accidentally loading unrelated
# data that may be in the response.
response_item = None
if search_response:
response_item = search_response[i]
response.append(
self.handle_response_item(resource_cls, parent,
identifiers, response_item))
elif all_not_none(identifiers.values()):
# All identifiers must always exist, otherwise the resource
# cannot be instantiated.
response = self.handle_response_item(
resource_cls, parent, identifiers, search_response)
else:
# The response should be empty, but that may mean an
# empty dict, list, or None based on whether we make
# a remote service call and what shape it is expected
# to return.
response = None
if self.operation_name is not None:
# A remote service call was made, so try and determine
# its shape.
response = build_empty_response(
self.search_path, self.operation_name,
self.service_context.service_model)
return response
def handle_response_item(self, resource_cls, parent, identifiers,
resource_data):
"""
Handles the creation of a single response item by setting
parameters and creating the appropriate resource instance.
:type resource_cls: ServiceResource subclass
:param resource_cls: The resource class to instantiate.
:type parent: ServiceResource
:param parent: The resource instance to which this action is attached.
:type identifiers: dict
:param identifiers: Map of identifier names to value or values.
:type resource_data: dict or None
:param resource_data: Data for resource attributes.
:rtype: ServiceResource
:return: New resource instance.
"""
kwargs = {
'client': parent.meta.client,
}
for name, value in identifiers.items():
# If value is a list, then consume the next item
if isinstance(value, list):
value = value.pop(0)
kwargs[name] = value
resource = resource_cls(**kwargs)
if resource_data is not None:
resource.meta.data = resource_data
return resource
|
GeorgeZhukov/taskmanager
|
refs/heads/master
|
taskmanager/tasks/models.py
|
1
|
from django.db import models
from django.contrib.auth.models import User
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from datetime import date
# Create your models here.
@python_2_unicode_compatible
class Project(models.Model):
name = models.CharField(max_length=50, verbose_name=_('Project name'))
user = models.ForeignKey(User, null=True, related_name='projects', verbose_name=_('User'))
def __str__(self):
return self.name
class Meta:
verbose_name = _('Project')
verbose_name_plural = _('Projects')
@python_2_unicode_compatible
class Task(models.Model):
project = models.ForeignKey(Project, related_name='tasks', verbose_name=_('Project'))
content = models.CharField(max_length=50, verbose_name=_('Task content'))
deadline = models.DateTimeField(blank=True, null=True, verbose_name=_('Deadline'))
done = models.BooleanField(default=False, verbose_name=_('Is done?'))
order_id = models.PositiveIntegerField(default=1, verbose_name=_('Order id'))
def is_deadline_today(self):
return date.today() == self.deadline.date()
def delete(self, using=None):
tasks_to_reorder = self.project.tasks.filter(order_id__gte=self.order_id)
for task in tasks_to_reorder:
task.order_id -= 1
task.save()
return super(Task, self).delete(using)
def save(self, force_insert=False, force_update=False, using=None,
update_fields=None):
max_order = lambda : self.project.tasks.all().aggregate(models.Max("order_id"))['order_id__max']
# Set order for new model
if not self.pk:
max = max_order()
if max:
self.order_id = int(max) + 1
else:
self.order_id = 1
# validate order id
# if self.pk:
# max = max_order()
# if self.order_id not in range(1, max + 2): # If last item change order
# raise ValueError("Invalid order id") # there is moment where max
# # actually will be incorrect
# # so we use + 2 to keep margin
return super(Task, self).save(force_insert, force_update, using, update_fields)
def __str__(self):
return self.content
class Meta:
verbose_name = _('Task')
verbose_name_plural = _('Tasks')
ordering = ['order_id']
|
timduru/platform-external-chromium_org
|
refs/heads/katkiss-4.4
|
build/android/avd.py
|
35
|
#!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Launches Android Virtual Devices with a set configuration for testing Chrome.
The script will launch a specified number of Android Virtual Devices (AVD's).
"""
import install_emulator_deps
import logging
import optparse
import os
import subprocess
import sys
from pylib import constants
from pylib.utils import emulator
def main(argv):
# ANDROID_SDK_ROOT needs to be set to the location of the SDK used to launch
# the emulator to find the system images upon launch.
emulator_sdk = os.path.join(constants.EMULATOR_SDK_ROOT,
'android_tools', 'sdk')
os.environ['ANDROID_SDK_ROOT'] = emulator_sdk
opt_parser = optparse.OptionParser(description='AVD script.')
opt_parser.add_option('-n', '--num', dest='emulator_count',
help='Number of emulators to launch (default is 1).',
type='int', default='1')
opt_parser.add_option('--abi', default='x86',
help='Platform of emulators to launch (x86 default).')
options, _ = opt_parser.parse_args(argv[1:])
logging.basicConfig(level=logging.INFO,
format='# %(asctime)-15s: %(message)s')
logging.root.setLevel(logging.INFO)
# Check if KVM is enabled for x86 AVD's and check for x86 system images.
if options.abi =='x86':
if not install_emulator_deps.CheckKVM():
logging.critical('ERROR: KVM must be enabled in BIOS, and installed. '
'Enable KVM in BIOS and run install_emulator_deps.py')
return 1
elif not install_emulator_deps.CheckX86Image():
logging.critical('ERROR: System image for x86 AVD not installed. Run '
'install_emulator_deps.py')
return 1
if not install_emulator_deps.CheckSDK():
logging.critical('ERROR: Emulator SDK not installed. Run '
'install_emulator_deps.py.')
return 1
emulator.LaunchEmulators(options.emulator_count, options.abi, True)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
utitankaspk/SPKCAM
|
refs/heads/master
|
src/Spkcam.py
|
2
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2015 Daniel Fernandez (daniel@spkautomatizacion.com), Saul Pilatowsky (saul@spkautomatizacion.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Find us at: http://www.spkautomatizacion.com
import Rhino
import rhinoscriptsyntax as rs
spkcam_id = Rhino.PlugIns.PlugIn.IdFromName("Spkcam")
plugin = Rhino.PlugIns.PlugIn.Find(spkcam_id)
path = plugin.Assembly.Location
ui_path = path.replace("Spkcam.rhp","spkcam_ui.py")
rs.Command("-_RunPythonScript (\"%s\")" % ui_path)
|
nhomar/odoo
|
refs/heads/8.0
|
addons/crm_profiling/crm_profiling.py
|
333
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields,osv
from openerp.osv import orm
from openerp.tools.translate import _
def _get_answers(cr, uid, ids):
"""
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of crm profiling’s IDs """
query = """
select distinct(answer)
from profile_question_yes_rel
where profile IN %s"""
cr.execute(query, (tuple(ids),))
ans_yes = [x[0] for x in cr.fetchall()]
query = """
select distinct(answer)
from profile_question_no_rel
where profile IN %s"""
cr.execute(query, (tuple(ids),))
ans_no = [x[0] for x in cr.fetchall()]
return [ans_yes, ans_no]
def _get_parents(cr, uid, ids):
"""
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of crm profiling’s IDs
@return: Get parents's Id """
ids_to_check = ids
cr.execute("""
select distinct(parent_id)
from crm_segmentation
where parent_id is not null
and id IN %s""",(tuple(ids),))
parent_ids = [x[0] for x in cr.fetchall()]
trigger = False
for x in parent_ids:
if x not in ids_to_check:
ids_to_check.append(x)
trigger = True
if trigger:
ids_to_check = _get_parents(cr, uid, ids_to_check)
return ids_to_check
def test_prof(cr, uid, seg_id, pid, answers_ids=None):
""" return True if the partner pid fetch the segmentation rule seg_id
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param seg_id: Segmentaion's ID
@param pid: partner's ID
@param answers_ids: Answers's IDs
"""
ids_to_check = _get_parents(cr, uid, [seg_id])
[yes_answers, no_answers] = _get_answers(cr, uid, ids_to_check)
temp = True
for y_ans in yes_answers:
if y_ans not in answers_ids:
temp = False
break
if temp:
for ans in answers_ids:
if ans in no_answers:
temp = False
break
if temp:
return True
return False
def _recompute_categ(self, cr, uid, pid, answers_ids):
""" Recompute category
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param pid: partner's ID
@param answers_ids: Answers's IDs
"""
ok = []
cr.execute('''
select r.category_id
from res_partner_res_partner_category_rel r left join crm_segmentation s on (r.category_id = s.categ_id)
where r.partner_id = %s and (s.exclusif = false or s.exclusif is null)
''', (pid,))
for x in cr.fetchall():
ok.append(x[0])
query = '''
select id, categ_id
from crm_segmentation
where profiling_active = true'''
if ok != []:
query = query +''' and categ_id not in(%s)'''% ','.join([str(i) for i in ok ])
query = query + ''' order by id '''
cr.execute(query)
segm_cat_ids = cr.fetchall()
for (segm_id, cat_id) in segm_cat_ids:
if test_prof(cr, uid, segm_id, pid, answers_ids):
ok.append(cat_id)
return ok
class question(osv.osv):
""" Question """
_name="crm_profiling.question"
_description= "Question"
_columns={
'name': fields.char("Question", required=True),
'answers_ids': fields.one2many("crm_profiling.answer", "question_id", "Available Answers", copy=True),
}
class questionnaire(osv.osv):
""" Questionnaire """
_name="crm_profiling.questionnaire"
_description= "Questionnaire"
_columns = {
'name': fields.char("Questionnaire", required=True),
'description':fields.text("Description", required=True),
'questions_ids': fields.many2many('crm_profiling.question','profile_questionnaire_quest_rel',\
'questionnaire', 'question', "Questions"),
}
class answer(osv.osv):
_name="crm_profiling.answer"
_description="Answer"
_columns={
"name": fields.char("Answer", required=True),
"question_id": fields.many2one('crm_profiling.question',"Question"),
}
class partner(osv.osv):
_inherit="res.partner"
_columns={
"answers_ids": fields.many2many("crm_profiling.answer","partner_question_rel",\
"partner","answer","Answers"),
}
def _questionnaire_compute(self, cr, uid, answers, context=None):
"""
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param data: Get Data
@param context: A standard dictionary for contextual values """
partner_id = context.get('active_id')
query = "select answer from partner_question_rel where partner=%s"
cr.execute(query, (partner_id,))
for x in cr.fetchall():
answers.append(x[0])
self.write(cr, uid, [partner_id], {'answers_ids': [[6, 0, answers]]}, context=context)
return {}
def write(self, cr, uid, ids, vals, context=None):
"""
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of crm profiling’s IDs
@param context: A standard dictionary for contextual values """
if 'answers_ids' in vals:
vals['category_id']=[[6, 0, _recompute_categ(self, cr, uid, ids[0], vals['answers_ids'][0][2])]]
return super(partner, self).write(cr, uid, ids, vals, context=context)
class crm_segmentation(osv.osv):
""" CRM Segmentation """
_inherit="crm.segmentation"
_columns={
"answer_yes": fields.many2many("crm_profiling.answer","profile_question_yes_rel",\
"profile","answer","Included Answers"),
"answer_no": fields.many2many("crm_profiling.answer","profile_question_no_rel",\
"profile","answer","Excluded Answers"),
'parent_id': fields.many2one('crm.segmentation', 'Parent Profile'),
'child_ids': fields.one2many('crm.segmentation', 'parent_id', 'Child Profiles'),
'profiling_active': fields.boolean('Use The Profiling Rules', help='Check\
this box if you want to use this tab as part of the \
segmentation rule. If not checked, the criteria beneath will be ignored')
}
_constraints = [
(osv.osv._check_recursion, 'Error ! You cannot create recursive profiles.', ['parent_id'])
]
def process_continue(self, cr, uid, ids, start=False):
"""
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of crm segmentation’s IDs """
partner_obj = self.pool.get('res.partner')
categs = self.read(cr,uid,ids,['categ_id','exclusif','partner_id', \
'sales_purchase_active', 'profiling_active'])
for categ in categs:
if start:
if categ['exclusif']:
cr.execute('delete from res_partner_res_partner_category_rel where \
category_id=%s', (categ['categ_id'][0],))
partner_obj.invalidate_cache(cr, uid, ['category_id'])
id = categ['id']
cr.execute('select id from res_partner order by id ')
partners = [x[0] for x in cr.fetchall()]
if categ['sales_purchase_active']:
to_remove_list=[]
cr.execute('select id from crm_segmentation_line where segmentation_id=%s', (id,))
line_ids = [x[0] for x in cr.fetchall()]
for pid in partners:
if (not self.pool.get('crm.segmentation.line').test(cr, uid, line_ids, pid)):
to_remove_list.append(pid)
for pid in to_remove_list:
partners.remove(pid)
if categ['profiling_active']:
to_remove_list = []
for pid in partners:
cr.execute('select distinct(answer) from partner_question_rel where partner=%s',(pid,))
answers_ids = [x[0] for x in cr.fetchall()]
if (not test_prof(cr, uid, id, pid, answers_ids)):
to_remove_list.append(pid)
for pid in to_remove_list:
partners.remove(pid)
for partner in partner_obj.browse(cr, uid, partners):
category_ids = [categ_id.id for categ_id in partner.category_id]
if categ['categ_id'][0] not in category_ids:
cr.execute('insert into res_partner_res_partner_category_rel (category_id,partner_id) values (%s,%s)', (categ['categ_id'][0],partner.id))
partner_obj.invalidate_cache(cr, uid, ['category_id'], [partner.id])
self.write(cr, uid, [id], {'state':'not running', 'partner_id':0})
return True
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
MarkWh1te/xueqiu_predict
|
refs/heads/master
|
python3_env/lib/python3.4/site-packages/setuptools/command/install_scripts.py
|
454
|
from distutils import log
import distutils.command.install_scripts as orig
import os
import sys
from pkg_resources import Distribution, PathMetadata, ensure_directory
class install_scripts(orig.install_scripts):
"""Do normal script install, plus any egg_info wrapper scripts"""
def initialize_options(self):
orig.install_scripts.initialize_options(self)
self.no_ep = False
def run(self):
import setuptools.command.easy_install as ei
self.run_command("egg_info")
if self.distribution.scripts:
orig.install_scripts.run(self) # run first to set up self.outfiles
else:
self.outfiles = []
if self.no_ep:
# don't install entry point scripts into .egg file!
return
ei_cmd = self.get_finalized_command("egg_info")
dist = Distribution(
ei_cmd.egg_base, PathMetadata(ei_cmd.egg_base, ei_cmd.egg_info),
ei_cmd.egg_name, ei_cmd.egg_version,
)
bs_cmd = self.get_finalized_command('build_scripts')
exec_param = getattr(bs_cmd, 'executable', None)
bw_cmd = self.get_finalized_command("bdist_wininst")
is_wininst = getattr(bw_cmd, '_is_running', False)
writer = ei.ScriptWriter
if is_wininst:
exec_param = "python.exe"
writer = ei.WindowsScriptWriter
if exec_param == sys.executable:
# In case the path to the Python executable contains a space, wrap
# it so it's not split up.
exec_param = [exec_param]
# resolve the writer to the environment
writer = writer.best()
cmd = writer.command_spec_class.best().from_param(exec_param)
for args in writer.get_args(dist, cmd.as_header()):
self.write_script(*args)
def write_script(self, script_name, contents, mode="t", *ignored):
"""Write an executable file to the scripts directory"""
from setuptools.command.easy_install import chmod, current_umask
log.info("Installing %s script to %s", script_name, self.install_dir)
target = os.path.join(self.install_dir, script_name)
self.outfiles.append(target)
mask = current_umask()
if not self.dry_run:
ensure_directory(target)
f = open(target, "w" + mode)
f.write(contents)
f.close()
chmod(target, 0o777 - mask)
|
gautam1858/tensorflow
|
refs/heads/master
|
tensorflow/contrib/kafka/python/ops/kafka_dataset_ops.py
|
4
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Kafka Dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.kafka.python.ops import gen_dataset_ops
from tensorflow.contrib.kafka.python.ops import kafka_op_loader # pylint: disable=unused-import
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import structure
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.util import deprecation
class KafkaDataset(dataset_ops.DatasetSource):
"""A Kafka Dataset that consumes the message.
"""
@deprecation.deprecated(
None,
"tf.contrib.kafka will be removed in 2.0, the support for Apache Kafka "
"will continue to be provided through the tensorflow/io GitHub project.")
def __init__(self,
topics,
servers="localhost",
group="",
eof=False,
timeout=1000):
"""Create a KafkaReader.
Args:
topics: A `tf.string` tensor containing one or more subscriptions,
in the format of [topic:partition:offset:length],
by default length is -1 for unlimited.
servers: A list of bootstrap servers.
group: The consumer group id.
eof: If True, the kafka reader will stop on EOF.
timeout: The timeout value for the Kafka Consumer to wait
(in millisecond).
"""
super(KafkaDataset, self).__init__()
self._topics = ops.convert_to_tensor(
topics, dtype=dtypes.string, name="topics")
self._servers = ops.convert_to_tensor(
servers, dtype=dtypes.string, name="servers")
self._group = ops.convert_to_tensor(
group, dtype=dtypes.string, name="group")
self._eof = ops.convert_to_tensor(eof, dtype=dtypes.bool, name="eof")
self._timeout = ops.convert_to_tensor(
timeout, dtype=dtypes.int64, name="timeout")
def _as_variant_tensor(self):
return gen_dataset_ops.kafka_dataset(self._topics, self._servers,
self._group, self._eof, self._timeout)
@property
def _element_structure(self):
return structure.TensorStructure(dtypes.string, [])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.