text stringlengths 4 1.02M | meta dict |
|---|---|
"""Correctness tests for tf.keras DNN model using DistributionStrategy."""
import numpy as np
import tensorflow.compat.v2 as tf
import keras
from keras import backend
from keras.distribute import keras_correctness_test_base
from keras.distribute import strategy_combinations
from keras.optimizers.optimizer_v2 import (
gradient_descent as gradient_descent_keras,
)
from keras.testing_infra import test_utils
def all_strategy_combinations_with_eager_and_graph_modes():
return tf.__internal__.test.combinations.combine(
distribution=strategy_combinations.all_strategies,
mode=["graph", "eager"],
) + tf.__internal__.test.combinations.combine(
distribution=strategy_combinations.multi_worker_mirrored_strategies,
mode="eager",
)
def all_strategy_combinations_with_graph_mode():
return tf.__internal__.test.combinations.combine(
distribution=keras_correctness_test_base.all_strategies, mode=["graph"]
)
def is_default_strategy(strategy):
with strategy.scope():
return not tf.distribute.has_strategy()
@test_utils.run_all_without_tensor_float_32(
"Uses Dense layers, which call matmul"
)
class TestDistributionStrategyDnnCorrectness(
keras_correctness_test_base.TestDistributionStrategyCorrectnessBase
):
def get_model(
self, initial_weights=None, distribution=None, input_shapes=None
):
with keras_correctness_test_base.MaybeDistributionScope(distribution):
# We add few non-linear layers to make it non-trivial.
model = keras.Sequential()
model.add(
keras.layers.Dense(10, activation="relu", input_shape=(1,))
)
model.add(
keras.layers.Dense(
10,
activation="relu",
kernel_regularizer=keras.regularizers.l2(1e-4),
)
)
model.add(keras.layers.Dense(10, activation="relu"))
model.add(keras.layers.Dense(1))
if initial_weights:
model.set_weights(initial_weights)
model.compile(
loss=keras.losses.mean_squared_error,
optimizer=gradient_descent_keras.SGD(0.05),
metrics=["mse"],
)
return model
def get_data(self):
x_train = np.random.rand(9984, 1).astype("float32")
y_train = 3 * x_train
x_predict = np.array([[1.0], [2.0], [3.0], [4.0]], dtype=np.float32)
return x_train, y_train, x_predict
def get_data_with_partial_last_batch(self):
x_train = np.random.rand(10000, 1).astype("float32")
y_train = 3 * x_train
x_eval = np.random.rand(10000, 1).astype("float32")
y_eval = 3 * x_eval
x_predict = np.array([[1.0], [2.0], [3.0], [4.0]], dtype=np.float32)
return x_train, y_train, x_eval, y_eval, x_predict
def get_data_with_partial_last_batch_eval(self):
x_train = np.random.rand(9984, 1).astype("float32")
y_train = 3 * x_train
x_eval = np.random.rand(10000, 1).astype("float32")
y_eval = 3 * x_eval
x_predict = np.array([[1.0], [2.0], [3.0], [4.0]], dtype=np.float32)
return x_train, y_train, x_eval, y_eval, x_predict
@tf.__internal__.distribute.combinations.generate(
keras_correctness_test_base.all_strategy_and_input_config_combinations()
+ keras_correctness_test_base.multi_worker_mirrored_eager()
)
def test_dnn_correctness(
self, distribution, use_numpy, use_validation_data
):
self.run_correctness_test(distribution, use_numpy, use_validation_data)
@tf.__internal__.distribute.combinations.generate(
keras_correctness_test_base.test_combinations_with_tpu_strategies_graph() # noqa: E501
+ keras_correctness_test_base.multi_worker_mirrored_eager()
)
def test_dnn_correctness_with_partial_last_batch_eval(
self, distribution, use_numpy, use_validation_data
):
self.run_correctness_test(
distribution,
use_numpy,
use_validation_data,
partial_last_batch="eval",
)
@tf.__internal__.distribute.combinations.generate(
keras_correctness_test_base.strategy_minus_tpu_and_input_config_combinations_eager() # noqa: E501
+ keras_correctness_test_base.multi_worker_mirrored_eager()
)
def test_dnn_correctness_with_partial_last_batch(
self, distribution, use_numpy, use_validation_data
):
distribution.extended.experimental_enable_get_next_as_optional = True
self.run_correctness_test(
distribution,
use_numpy,
use_validation_data,
partial_last_batch="train_and_eval",
training_epochs=1,
)
@tf.__internal__.distribute.combinations.generate(
all_strategy_combinations_with_graph_mode()
)
def test_dnn_with_dynamic_learning_rate(self, distribution):
self.run_dynamic_lr_test(distribution)
class TestDistributionStrategyDnnMetricCorrectness(
keras_correctness_test_base.TestDistributionStrategyCorrectnessBase
):
def get_model(self, distribution=None, input_shapes=None):
with distribution.scope():
model = keras.Sequential()
model.add(
keras.layers.Dense(
1, input_shape=(1,), kernel_initializer="ones"
)
)
model.compile(
loss=keras.losses.mean_squared_error,
optimizer=gradient_descent_keras.SGD(0.05),
metrics=[keras.metrics.BinaryAccuracy()],
)
return model
def run_metric_correctness_test(self, distribution):
with self.cached_session():
self.set_up_test_config()
x_train, y_train, _ = self.get_data()
model = self.get_model(distribution=distribution)
batch_size = 64
batch_size = keras_correctness_test_base.get_batch_size(
batch_size, distribution
)
train_dataset = tf.data.Dataset.from_tensor_slices(
(x_train, y_train)
)
train_dataset = keras_correctness_test_base.batch_wrapper(
train_dataset, batch_size
)
history = model.fit(x=train_dataset, epochs=2, steps_per_epoch=10)
self.assertEqual(history.history["binary_accuracy"], [1.0, 1.0])
@tf.__internal__.distribute.combinations.generate(
all_strategy_combinations_with_eager_and_graph_modes()
)
def test_simple_dnn_metric_correctness(self, distribution):
self.run_metric_correctness_test(distribution)
class TestDistributionStrategyDnnMetricEvalCorrectness(
keras_correctness_test_base.TestDistributionStrategyCorrectnessBase
):
def get_model(self, distribution=None, input_shapes=None):
with distribution.scope():
model = keras.Sequential()
model.add(
keras.layers.Dense(
3, activation="relu", input_dim=4, kernel_initializer="ones"
)
)
model.add(
keras.layers.Dense(
1, activation="sigmoid", kernel_initializer="ones"
)
)
model.compile(
loss="mae",
metrics=["accuracy", keras.metrics.BinaryAccuracy()],
optimizer=tf.compat.v1.train.GradientDescentOptimizer(0.001),
)
return model
def run_eval_metrics_correctness_test(self, distribution):
with self.cached_session():
self.set_up_test_config()
model = self.get_model(distribution=distribution)
# verify correctness of stateful and stateless metrics.
x = np.ones((100, 4)).astype("float32")
y = np.ones((100, 1)).astype("float32")
dataset = tf.data.Dataset.from_tensor_slices((x, y)).repeat()
dataset = keras_correctness_test_base.batch_wrapper(dataset, 4)
outs = model.evaluate(dataset, steps=10)
self.assertEqual(outs[1], 1.0)
self.assertEqual(outs[2], 1.0)
y = np.zeros((100, 1)).astype("float32")
dataset = tf.data.Dataset.from_tensor_slices((x, y)).repeat()
dataset = keras_correctness_test_base.batch_wrapper(dataset, 4)
outs = model.evaluate(dataset, steps=10)
self.assertEqual(outs[1], 0.0)
self.assertEqual(outs[2], 0.0)
@tf.__internal__.distribute.combinations.generate(
all_strategy_combinations_with_eager_and_graph_modes()
)
def test_identity_model_metric_eval_correctness(self, distribution):
self.run_eval_metrics_correctness_test(distribution)
class SubclassedModel(keras.Model):
def __init__(self, initial_weights, input_shapes):
super().__init__()
self.dense1 = keras.layers.Dense(
10, activation="relu", input_shape=(1,)
)
self.dense2 = keras.layers.Dense(
10,
activation="relu",
kernel_regularizer=keras.regularizers.l2(1e-4),
)
self.dense3 = keras.layers.Dense(10, activation="relu")
self.dense4 = keras.layers.Dense(1)
if input_shapes:
self.build(input_shapes)
else:
# This covers cases when the input is DatasetV1Adapter.
self.build((None, 1))
if initial_weights:
self.set_weights(initial_weights)
def call(self, inputs):
x = self.dense1(inputs)
x = self.dense2(x)
x = self.dense3(x)
return self.dense4(x)
@test_utils.run_all_without_tensor_float_32(
"Uses Dense layers, which call matmul"
)
class TestDistributionStrategyDnnCorrectnessWithSubclassedModel(
TestDistributionStrategyDnnCorrectness
):
def get_model(
self, initial_weights=None, distribution=None, input_shapes=None
):
with keras_correctness_test_base.MaybeDistributionScope(distribution):
model = SubclassedModel(initial_weights, input_shapes)
model.compile(
loss=keras.losses.mean_squared_error,
optimizer=gradient_descent_keras.SGD(0.05),
metrics=["mse"],
)
return model
@tf.__internal__.distribute.combinations.generate(
keras_correctness_test_base.all_strategy_and_input_config_combinations()
+ keras_correctness_test_base.multi_worker_mirrored_eager()
)
def test_dnn_correctness(
self, distribution, use_numpy, use_validation_data
):
if (tf.executing_eagerly()) or is_default_strategy(distribution):
self.run_correctness_test(
distribution, use_numpy, use_validation_data
)
elif (
backend.is_tpu_strategy(distribution) and not tf.executing_eagerly()
):
with self.assertRaisesRegex(
ValueError,
"Expected `model` argument to be a functional `Model` "
"instance, but got a subclassed model instead.",
):
self.run_correctness_test(
distribution, use_numpy, use_validation_data
)
else:
with self.assertRaisesRegex(
ValueError,
"We currently do not support distribution strategy with a "
"`Sequential` model that is created without `input_shape`/"
"`input_dim` set in its first layer or a subclassed model.",
):
self.run_correctness_test(
distribution, use_numpy, use_validation_data
)
@tf.__internal__.distribute.combinations.generate(
all_strategy_combinations_with_graph_mode()
)
def test_dnn_with_dynamic_learning_rate(self, distribution):
if (
tf.executing_eagerly() and not backend.is_tpu_strategy(distribution)
) or is_default_strategy(distribution):
self.run_dynamic_lr_test(distribution)
elif backend.is_tpu_strategy(distribution):
with self.assertRaisesRegex(
ValueError,
"Expected `model` argument to be a functional `Model` "
"instance, but got a subclassed model instead.",
):
self.run_dynamic_lr_test(distribution)
else:
with self.assertRaisesRegex(
ValueError,
"We currently do not support distribution strategy with a "
"`Sequential` model that is created without `input_shape`/"
"`input_dim` set in its first layer or a subclassed model.",
):
self.run_dynamic_lr_test(distribution)
@tf.__internal__.distribute.combinations.generate(
keras_correctness_test_base.test_combinations_with_tpu_strategies_graph() # noqa: E501
)
def test_dnn_correctness_with_partial_last_batch_eval(
self, distribution, use_numpy, use_validation_data
):
with self.assertRaisesRegex(
ValueError,
"Expected `model` argument to be a functional `Model` instance, "
"but got a subclassed model instead.",
):
self.run_correctness_test(
distribution,
use_numpy,
use_validation_data,
partial_last_batch="eval",
)
if __name__ == "__main__":
tf.__internal__.distribute.multi_process_runner.test_main()
| {
"content_hash": "2048a836bb62ab38a1169e4357698c02",
"timestamp": "",
"source": "github",
"line_count": 362,
"max_line_length": 106,
"avg_line_length": 37.748618784530386,
"alnum_prop": 0.6013172338090012,
"repo_name": "keras-team/keras",
"id": "a08b4c7c925eaf2ff0901e2cf68618bfbed22305",
"size": "14354",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "keras/distribute/keras_dnn_correctness_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "900"
},
{
"name": "Python",
"bytes": "11342063"
},
{
"name": "Shell",
"bytes": "11489"
},
{
"name": "Starlark",
"bytes": "273139"
}
],
"symlink_target": ""
} |
import os
from django.db import models
from django.conf import settings
class TunnelConfig(models.Model):
"""
A configuration object for a specific tunnel. This objects encapsulates the
specific configuration file and options for its tunnel.
"""
def get_tunnel_conf_upload_dir(instance, filename):
"""
The upload path on which to store the :attr:`django_calabar.models.TunnelConfig.tunnel_conf_file`.
Builds the location based on the CALABAR_UPLOAD_DIR setting.
"""
path = os.path.join(
settings.CALABAR_TUNNEL_CONF_UPLOAD_DIR,
'tunnel_confs',
"%s.conf" % instance.name,
)
return path
name = models.CharField(max_length=100, unique=True)
"""The Tunnel's name. Corresponds to its [tunnel:xxx] entry in the calabar configuration"""
tunnel_type = models.CharField(max_length=30)
"""Type of network tunnel this instance represents. Validators are based on this"""
cal_conf = models.TextField()
"""The text for this Tunnels entry in its calabar.conf"""
tunnel_conf = models.TextField()
"""The contents of the configuration file for this specific tunnel type"""
tunnel_conf_file = models.FileField('tunnel configuration file',
upload_to=get_tunnel_conf_upload_dir,
max_length=255,
blank=True, null=True)
"""The tunnel configuration file for this specific tunnel"""
def __unicode__(self):
return u"Tunnel:<%s>" % self.name
class CalabarConfig(models.Model):
"""
A Calabar configuration object representing a set of tunnels and options.
"""
def get_conf_upload_dir(instance, filename):
"""
The upload path on which to store the :attr:`django_calabar.models.TunnelConfig.tunnel_conf_file`.
Builds the location based on the CALABAR_UPLOAD_DIR setting.
"""
path = os.path.join(
settings.CALABAR_TUNNEL_CONF_UPLOAD_DIR,
"%s.conf" % instance.name,
)
return path
name = models.CharField(max_length=100, unique=True)
"""This specific set of Tunnels' unique name"""
global_conf = models.TextField()
"""The text for all non-tunnel configuration, like the ``[vpnc]`` section."""
conf = models.TextField()
"""The entire contents of the configuration file from combining the global_conf
and the configurations of each included tunnel"""
conf_file = models.FileField('tunnel configuration file',
upload_to=get_conf_upload_dir,
max_length=255,
blank=True, null=True)
"""The calabar configuration file."""
tunnels = models.ManyToManyField(TunnelConfig)
def __unicode__(self):
return u"Calabar:<%s>" % self.name
| {
"content_hash": "4b036e7cba63929b7793dea48444a921",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 106,
"avg_line_length": 37.88461538461539,
"alnum_prop": 0.6148900169204737,
"repo_name": "winhamwr/django-calabar",
"id": "b02b338e2872b654136b60693164183e38e827ca",
"size": "2955",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_calabar/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "4837"
}
],
"symlink_target": ""
} |
"""
SALTS XBMC Addon
Copyright (C) 2014 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import scraper
import urlparse
import kodi
import log_utils # @UnusedImport
import dom_parser2
from salts_lib import scraper_utils
from salts_lib.constants import VIDEO_TYPES
from salts_lib.constants import FORCE_NO_MATCH
from salts_lib.constants import QUALITIES
BASE_URL = 'http://veocube.cf'
class Scraper(scraper.Scraper):
base_url = BASE_URL
def __init__(self, timeout=scraper.DEFAULT_TIMEOUT):
self.timeout = timeout
self.base_url = kodi.get_setting('%s-base_url' % (self.get_name()))
@classmethod
def provides(cls):
return frozenset([VIDEO_TYPES.MOVIE])
@classmethod
def get_name(cls):
return 'VeoCube'
def get_sources(self, video):
hosters = []
source_url = self.get_url(video)
if not source_url or source_url == FORCE_NO_MATCH: return hosters
url = scraper_utils.urljoin(self.base_url, source_url)
html = self._http_get(url, cache_limit=8)
hosters += self.__get_sources(html, url)
fragment = dom_parser2.parse_dom(html, 'div', {'class': 'parts-middle'})
if fragment:
for attrs, _content in dom_parser2.parse_dom(fragment[0].content, 'a', req='href'):
url = scraper_utils.urljoin(self.base_url, attrs['href'])
html = self._http_get(url, cache_limit=8)
hosters += self.__get_sources(html, url)
return hosters
def __get_sources(self, html, page_url):
sources = []
fragment = dom_parser2.parse_dom(html, 'div', {'class': 'video-content'})
if fragment:
referer = page_url
iframes = dom_parser2.parse_dom(fragment[0].content, 'iframe', req='src')
for attrs, _content in iframes:
iframe_url = attrs['src']
if self.base_url in iframe_url:
headers = {'Referer': referer}
html = self._http_get(iframe_url, headers=headers, cache_limit=.5)
referer = iframe_url
links = scraper_utils.parse_sources_list(self, html)
if links:
for link, values in links.iteritems():
host = scraper_utils.get_direct_hostname(self, link)
if host == 'gvideo':
quality = scraper_utils.gv_get_quality(link)
else:
quality = values['quality']
source = {'multi-part': False, 'url': link, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'direct': True}
sources.append(source)
else:
iframes += dom_parser2.parse_dom(html, 'iframe', req='src')
else:
host = urlparse.urlparse(iframe_url).hostname
source = {'multi-part': False, 'url': iframe_url, 'host': host, 'class': self, 'quality': QUALITIES.HIGH, 'views': None, 'rating': None, 'direct': False}
sources.append(source)
return sources
def search(self, video_type, title, year, season=''): # @UnusedVariable
results = []
html = self._http_get(self.base_url, params={'s': title}, cache_limit=8)
for _attrs, item in dom_parser2.parse_dom(html, 'div', {'class': 'movie-details'}):
match_url = dom_parser2.parse_dom(item, 'a', req='href')
match_title = dom_parser2.parse_dom(item, 'a')
match_year = dom_parser2.parse_dom(item, 'span', {'class': 'movie-release'})
if match_url and match_title:
match_url = match_url[0].attrs['href']
match_title = match_title[0].content
if match_year:
match_year = match_year[0].content.strip()
else:
match_year = ''
if not year or not match_year or year == match_year:
result = {'title': scraper_utils.cleanse_title(match_title), 'year': match_year, 'url': scraper_utils.pathify_url(match_url)}
results.append(result)
return results
| {
"content_hash": "ae0c178520d59d2a57efec63f25f7bb5",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 173,
"avg_line_length": 45.018181818181816,
"alnum_prop": 0.5718901453957996,
"repo_name": "TheWardoctor/Wardoctors-repo",
"id": "df399b0335a00f6e51ee8428f2b03cf9034ba65b",
"size": "4952",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugin.video.salts/scrapers/veocube_scraper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "3208"
},
{
"name": "JavaScript",
"bytes": "115722"
},
{
"name": "Python",
"bytes": "34405207"
},
{
"name": "Shell",
"bytes": "914"
}
],
"symlink_target": ""
} |
import re, sys, collections
stopwords = set(open('../stop_words.txt').read().split(','))
words = re.findall('[a-z]{2,}', open(sys.argv[1]).read().lower())
counts = collections.Counter(w for w in words if w not in stopwords)
for (w, c) in counts.most_common(25):
print(w, '-', c)
| {
"content_hash": "1df1889f1713a84845aef270e13939fa",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 68,
"avg_line_length": 40.57142857142857,
"alnum_prop": 0.6408450704225352,
"repo_name": "kranthikumar/exercises-in-programming-style",
"id": "9da618ff787c7c5aba608317b54fd41d36876a3b",
"size": "371",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "07-code-golf/tf-07-pn.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "3561"
},
{
"name": "Clojure",
"bytes": "1635"
},
{
"name": "Java",
"bytes": "4859"
},
{
"name": "Makefile",
"bytes": "103"
},
{
"name": "Python",
"bytes": "86840"
},
{
"name": "Ruby",
"bytes": "249"
},
{
"name": "Scala",
"bytes": "2958"
},
{
"name": "Shell",
"bytes": "1994"
}
],
"symlink_target": ""
} |
from ohno.action.baseaction import BaseAction
class Search(BaseAction):
def __init__(self, ohno, times=1):
super(Search, self).__init__(ohno)
# We're searching the squares adjacent to the tile we're standing on at
# the point of pressing s, not the tile we're standing on the next turn
# (we might get teleported for instance.)
self.curtile = self.ohno.dungeon.curtile
self.times = times
def get_command(self):
self.ohno.logger.action(
'[search] Getting command to search %d times..' % self.times
)
return '%ds' % self.times
def done(self, messages):
"""Update adjacent squares with the amount of searching we've done"""
self.curtile.searched += self.times
for neighbor in self.curtile.adjacent():
# TODO: What if we get interrupted?
neighbor.searched += self.times
| {
"content_hash": "cea68ca21d2a83f3868ea970cf53a04e",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 79,
"avg_line_length": 39.69565217391305,
"alnum_prop": 0.6276013143483024,
"repo_name": "helgefmi/ohno",
"id": "7163ac3cd535363802025b1797c1777ead69d320",
"size": "913",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ohno/action/search.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "335981"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('waldur_digitalocean', '0002_extend_description_limits'),
]
operations = [
migrations.AddField(
model_name='droplet',
name='error_traceback',
field=models.TextField(blank=True),
),
]
| {
"content_hash": "83607dbdc2fd3be38e18204c2fe83416",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 66,
"avg_line_length": 22.5625,
"alnum_prop": 0.5955678670360111,
"repo_name": "opennode/waldur-mastermind",
"id": "224822bebe1f5efc9c69749ddc8c53087cf9c828",
"size": "411",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "src/waldur_digitalocean/migrations/0003_droplet_error_traceback.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4429"
},
{
"name": "Dockerfile",
"bytes": "6258"
},
{
"name": "HTML",
"bytes": "42329"
},
{
"name": "JavaScript",
"bytes": "729"
},
{
"name": "Python",
"bytes": "5520019"
},
{
"name": "Shell",
"bytes": "15429"
}
],
"symlink_target": ""
} |
import unittest
from src.graph import Graph
from src.dynamic_programming import max_weighted_independent_set_in_path_graph, \
sequence_alignment, optimal_binary_search_tree, \
binomial_coefficient, maximum_monotone_sequence, min_coins, \
zig_zag, bad_neighbours, linear_partition, \
max_weighted_independent_set_in_tree, longest_common_subsequence
class DynamicProgrammingTest(unittest.TestCase):
def test_max_weighted_independent_set_in_path_graph(self):
weights = [1, 4, 5, 4]
actual = max_weighted_independent_set_in_path_graph(weights)
expected = [8, [4, 4]]
self.assertEqual(actual, expected, 'should compute the max weight '
'of the independent set of vertices')
def test_max_weighted_independent_set_in_path_graph_2(self):
weights = [1, 4, 5, 4, 6, 3, 9]
actual = max_weighted_independent_set_in_path_graph(weights)
expected = [21, [1,5,6,9]]
self.assertEqual(actual, expected, 'should compute the max weight '
'of the independent set of vertices')
def test_max_weighted_independed_set_in_tree(self):
tree = Graph.build(directed=True, edges=[
('r', 'a'), ('r', 'b'), ('a', 'c'), ('a', 'd'), ('a', 'e'),
('d', 'h'), ('d', 'i'), ('b', 'f'), ('b', 'g'), ('g', 'j')
])
values = {'r': 10, 'a': 5, 'c': 3, 'd': 4, 'e': 9, 'h': 2, 'i': 7,
'b': 8, 'f': 3, 'g': 11, 'j': 8}
for (vertex, value) in values.iteritems():
tree.set_vertex_value(vertex, value)
(actual_max_weight, actual_vertex_set) = \
max_weighted_independent_set_in_tree(tree)
expected_max_weight = 40
expected_vertex_set = set(['c', 'e', 'g', 'f', 'i', 'h', 'r'])
self.assertEqual(actual_max_weight, expected_max_weight, \
'should compute the correct max weight')
self.assertEqual(actual_vertex_set, expected_vertex_set, \
'should compute the correct included vertex set')
def test_sequence_alignment(self):
def gap_penalty():
return 10
def mismatch_penalty(x, y):
if x == y:
return 0
return 5
X = 'ABC'
Y = 'AC'
(penalty, X_mod, Y_mod) = sequence_alignment(X, Y, mismatch_penalty, gap_penalty)
self.assertEqual(penalty, 10, 'one gap')
self.assertEqual(X_mod, 'ABC', 'same as input')
self.assertEqual(Y_mod, 'A-C', 'added a gap for alignment')
X = 'AGGGCT'
Y = 'AGGCA'
(penalty, X_mod, Y_mod) = sequence_alignment(X, Y, mismatch_penalty, gap_penalty)
self.assertEqual(penalty, 15, 'one gap and one mismatch')
self.assertEqual(X_mod, 'AGGGCT', 'same as input')
self.assertEqual(Y_mod, 'A-GGCA', 'added a gap for alignment')
X = 'AGGCA'
Y = 'AGGCA'
(penalty, X_mod, Y_mod) = sequence_alignment(X, Y, mismatch_penalty, gap_penalty)
self.assertEqual(penalty, 0, 'strings are identical')
self.assertEqual(X_mod, X, 'no changes')
self.assertEqual(Y_mod, Y, 'no changes')
X = 'AB'
Y = 'CAB'
(penalty, X_mod, Y_mod) = sequence_alignment(X, Y, mismatch_penalty, gap_penalty)
self.assertEqual(penalty, 10, 'completely different')
self.assertEqual(X_mod, '-AB', 'one gap are inserted in the front')
self.assertEqual(Y_mod, 'CAB', 'no changes')
def test_longest_common_subsequence(self):
str1 = 'alexandru'
str2 = 'topliceanu'
(penalty, common) = longest_common_subsequence(str1, str2)
expected_penalty = 12
print '>>>>>>>>', penalty
def test_maximum_monotone_subsequence(self):
s = '243517698'
(max_length, max_sequence) = maximum_monotone_sequence(s)
self.assertEqual(max_length, 5, 'should be the max sequence')
possible_solutions = ['24568', '23569', '23579', '23578'
'24568', '24569', '24579', '24578']
self.assertIn(max_sequence, possible_solutions,
'should be one of the solutions')
def test_linear_partitions(self):
values = [100, 200, 300, 400, 500, 600, 700, 800, 900]
num_partitions = 3
expected_min_max_sum = 1700
expected_partitions = [[100, 200, 300, 400, 500], [600, 700], [800, 900]]
(min_max_sum, partitions) = linear_partition(values, num_partitions)
self.assertEqual(min_max_sum, expected_min_max_sum,
'should compute the expected min max sum of partition values')
self.assertEqual(partitions, expected_partitions,
'should return the correct partitions')
def test_optimal_binary_search_tree(self):
items = [('a', 0.1), ('b', 0.1), ('c', 0.8)]
(optimal_search_cost, pre_order) = optimal_binary_search_tree(items)
expected_optimal_cost = 1.3
self.assertEqual(optimal_search_cost, expected_optimal_cost,
'should not return the best tree')
expected_pre_order = [('c', 0.8), ('a', 0.1), ('b', 0.1)]
self.assertEqual(pre_order, expected_pre_order,
'the vertices are in preorder so to easily to build an optimal BST')
def test_optimal_binary_search_tree_2(self):
items = [('1', 0.05), ('2', 0.4), ('3', 0.08), ('4', 0.04),
('5', 0.1), ('6', 0.1), ('7', 0.223)]
(optimal_search_cost, pre_order) = optimal_binary_search_tree(items)
expected_optimal_cost = 2.166
self.assertEqual(optimal_search_cost, expected_optimal_cost,
'should not return the best tree')
expected_pre_order = [('2', 0.4), ('1', 0.05), ('7', 0.223),
('5', 0.1), ('3', 0.08), ('4', 0.04), ('6', 0.1)]
self.assertEqual(pre_order, expected_pre_order,
'the vertices are in preorder so to easily to build an optimal BST')
def test_optimal_binary_search_tree_from_final_exam(self):
items = [('1', 0.2), ('2', 0.05), ('3', 0.17), ('4', 0.1),
('5', 0.2), ('6', 0.03), ('7', 0.25)]
(optimal_search_cost, _) = optimal_binary_search_tree(items)
expected_optimal_cost = 2.23
self.assertEqual(optimal_search_cost, expected_optimal_cost,
'should not return the best tree')
def test_binomial_coefficient(self):
actual = binomial_coefficient(2, 4)
expected = 6
self.assertEqual(actual, expected, 'should compute the correct value')
def test_min_coins(self):
coins = [1, 3, 5]
total = 11
(min_num_coins, picked_coins) = min_coins(coins, total)
self.assertEqual(min_num_coins, 3, 'only three coins')
self.assertEqual(set(picked_coins), set([5,5,1]), 'picked the correct coins')
def test_is_zig_zag(self):
numbers = [1,7,4,9,2,5]
(max_length, subsequence) = zig_zag(numbers)
self.assertEqual(max_length, 6, 'the whole sequence is zig-zag')
self.assertEqual(subsequence, numbers, 'the whole sequence is zig-zag')
numbers = [1,4,7,2,5,1]
(max_length, subsequence) = zig_zag(numbers)
self.assertEqual(max_length, 5, 'the whole sequence is zig-zag')
possibilities = [[1,4,2,5,1], [1,7,2,5,1]]
self.assertIn(subsequence, possibilities, 'one of the subsequences')
sets = [
[ 1, 7, 4, 9, 2, 5 ],
[ 1, 17, 5, 10, 13, 15, 10, 5, 16, 8 ],
[ 44 ],
[ 1, 2, 3, 4, 5, 6, 7, 8, 9 ],
[ 70, 55, 13, 2, 99, 2, 80, 80, 80, 80, 100, 19,
7, 5, 5, 5, 1000, 32, 32 ],
[ 374, 40, 854, 203, 203, 156, 362, 279, 812, 955,
600, 947, 978, 46, 100, 953, 670, 862, 568, 188,
67, 669, 810, 704, 52, 861, 49, 640, 370, 908,
477, 245, 413, 109, 659, 401, 483, 308, 609, 120,
249, 22, 176, 279, 23, 22, 617, 462, 459, 244 ]
]
expected = [6, 7, 1, 2, 8, 36]
for index in range(len(sets)):
(max_length, seq) = zig_zag(sets[index])
self.assertEqual(max_length, expected[index], 'correct value')
def test_bad_neighbours(self):
sets = [
{'data': [10,3,2,5,7,8], 'max': 19, 'picked': [10,2,7]},
{'data': [1,2,3,4,5,1,2,3,4,5], 'max': 16, 'picked': [3,5,3,5]}
]
for i in range(len(sets)):
(max_value, picked_values) = bad_neighbours(sets[i]['data'])
self.assertEqual(max_value, sets[i]['max'],
'computes max possible obtainable donations')
self.assertEqual(picked_values, sets[i]['picked'],
'computes the selected donations')
| {
"content_hash": "e99a40b4b7b0d734f01747c6c5c82452",
"timestamp": "",
"source": "github",
"line_count": 202,
"max_line_length": 89,
"avg_line_length": 43.79207920792079,
"alnum_prop": 0.5535835405833145,
"repo_name": "topliceanu/learn",
"id": "1b752d3775fcc0e33fd785e712bf24252d632165",
"size": "8871",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/algo/test/test_dynamic_programming.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "2259"
},
{
"name": "C",
"bytes": "50301"
},
{
"name": "CSS",
"bytes": "2376"
},
{
"name": "Clojure",
"bytes": "40105"
},
{
"name": "DIGITAL Command Language",
"bytes": "191608"
},
{
"name": "Dockerfile",
"bytes": "557"
},
{
"name": "Elm",
"bytes": "35974"
},
{
"name": "Forth",
"bytes": "128"
},
{
"name": "Go",
"bytes": "277187"
},
{
"name": "HTML",
"bytes": "750204"
},
{
"name": "Haskell",
"bytes": "57709"
},
{
"name": "JavaScript",
"bytes": "3327208"
},
{
"name": "Makefile",
"bytes": "3280"
},
{
"name": "OCaml",
"bytes": "157277"
},
{
"name": "PowerShell",
"bytes": "3022"
},
{
"name": "Procfile",
"bytes": "230"
},
{
"name": "Pug",
"bytes": "846"
},
{
"name": "Python",
"bytes": "1383229"
},
{
"name": "Racket",
"bytes": "7552"
},
{
"name": "Reason",
"bytes": "43"
},
{
"name": "Roff",
"bytes": "116"
},
{
"name": "Ruby",
"bytes": "134845"
},
{
"name": "Rust",
"bytes": "146828"
},
{
"name": "Shell",
"bytes": "9006"
},
{
"name": "Solidity",
"bytes": "1347"
},
{
"name": "TypeScript",
"bytes": "254"
},
{
"name": "Vue",
"bytes": "1504"
}
],
"symlink_target": ""
} |
class Port:
def __init__(self):
self.vars = []
self.boundaries = []
def add(self, var_dict):
"""
add a new variable to the port
:param var_dict: dictionnary of variables to add. Name as keys, returns values
:return:
"""
for v in var_dict:
setattr(self, v, var_dict[v])
self.vars.append(v)
self.vars = list(set(self.vars))
def get(self, var):
"""
get a variable value
:param var: variable name
:return: variable value
"""
return getattr(self, var)
def remove(self, var):
"""
remove a variable from the port
:param var: variable name
:return:
"""
try:
self.__delattr__(var)
self.vars = [v for v in self.vars if v != var]
self.boundaries = [v for v in self.boundaries if v != var]
except:
pass
def set_value(self, var, val):
"""
set a value to a variable
:param var: variable name
:param val: value to assign
:return:
"""
setattr(self, var, val)
def set_boundary(self, var):
"""
set a variable as boundary
:param var: variable name
:return:
"""
try:
self.vars = [v for v in self.vars if v != var]
self.boundaries.append(var)
except KeyError:
print('error setting boundary: "'+var+'" not found')
pass
def set_port_boundary(self):
"""
set the entire port as boundary. Values of variables are unchanged
:return:
"""
for v in self.vars:
self.set_boundary(v) | {
"content_hash": "c3e33b4af6076ffb06f35af393babd5a",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 86,
"avg_line_length": 26.044776119402986,
"alnum_prop": 0.4997134670487106,
"repo_name": "adriendelsalle/unsysap",
"id": "68408be067d53b61d2df15448c88b5e07f2f4c49",
"size": "1745",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "unsysap/ports_generic.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "63"
},
{
"name": "Python",
"bytes": "29217"
}
],
"symlink_target": ""
} |
"""add fetch values predicate
Revision ID: 732f1c06bcbf
Revises: d6db5a5cdb5d
Create Date: 2017-03-03 09:15:56.800930
"""
# revision identifiers, used by Alembic.
revision = '732f1c06bcbf'
down_revision = 'd6db5a5cdb5d'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('datasources', sa.Column('fetch_values_from', sa.String(length=100), nullable=True))
op.add_column('tables', sa.Column('fetch_values_predicate', sa.String(length=1000), nullable=True))
def downgrade():
op.drop_column('tables', 'fetch_values_predicate')
op.drop_column('datasources', 'fetch_values_from')
| {
"content_hash": "23b9f94f73872aed50aa979c67c65586",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 103,
"avg_line_length": 27.08695652173913,
"alnum_prop": 0.7319422150882825,
"repo_name": "dmigo/incubator-superset",
"id": "7ad56c7f23b6e1f1eceecef9bb7259d10a5f5067",
"size": "647",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "superset/migrations/versions/732f1c06bcbf_add_fetch_values_predicate.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "99154"
},
{
"name": "HTML",
"bytes": "100560"
},
{
"name": "JavaScript",
"bytes": "1557840"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "1075386"
},
{
"name": "Shell",
"bytes": "1557"
},
{
"name": "Smarty",
"bytes": "1048"
}
],
"symlink_target": ""
} |
"""
Django settings for homeinventory project.
Generated by 'django-admin startproject' using Django 1.11.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
from django.urls import reverse_lazy
import environ
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
ROOT_DIR = environ.Path(__file__) - 3
APPS_DIR = ROOT_DIR.path('homeinventory')
# Load operating system environment variables and then prepare to use them
env = environ.Env()
# .env file, should load only in development environment
READ_DOT_ENV_FILE = env.bool('DJANGO_READ_DOT_ENV_FILE', default=True)
if READ_DOT_ENV_FILE:
# Operating System Environment variables have precedence over variables
# defined in the .env file,
# that is to say variables from the .env files will only be used if not
# defined
# as environment variables.
env_file = str(ROOT_DIR.path('.env'))
print('Loading : {}'.format(env_file))
env.read_env(env_file)
print('The .env file has been loaded. See base.py for more information')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'xre(ium)2hf436&-fz6fbpdqn@^r-*_8a^22mw7+g*%t#*xi+6'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
DJANGO_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
THIRD_PARTY_APPS = [
'widget_tweaks',
'django_filters',
'easy_thumbnails',
]
LOCAL_APPS = [
'homeinventory.inventory.apps.InventoryConfig',
'homeinventory.core.apps.CoreConfig',
'homeinventory.accounts.apps.AccountsConfig',
'homeinventory.dashboard.apps.DashboardConfig',
]
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [str(APPS_DIR.path('templates')), ],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': env.db('DATABASE_URL', default='postgres:///homeinventory'),
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME':
'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME':
'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME':
'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
PROJECT_ROOT = os.path.dirname(str(environ.Path(__file__) - 2))
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'staticfiles')
STATIC_URL = '/static/'
# Extra places for collectstatic to find static files.
STATICFILES_DIRS = (
os.path.join(PROJECT_ROOT, 'static'),
)
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# Logging configuration
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'formatters': {
'verbose': {
'format': '[homeinventory] %(levelname)s %(asctime)s %(message)s'
},
},
'handlers': {
# Send all messages to console
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
},
# Send info messages to syslog
# 'syslog': {
# 'level': 'INFO',
# 'class': 'logging.handlers.SysLogHandler',
# 'facility': SysLogHandler.LOG_LOCAL2,
# 'address': '/dev/log',
# 'formatter': 'verbose',
# },
# Warning messages are sent to admin emails
'mail_admins': {
'level': 'WARNING',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler',
},
# critical errors are logged to sentry
# 'sentry': {
# 'level': 'ERROR',
# 'filters': ['require_debug_false'],
# 'class': 'raven.contrib.django.handlers.SentryHandler',
# },
},
'loggers': {
# This is the "catch all" logger
'': {
# 'handlers': ['console', 'syslog', 'mail_admins', 'sentry'],
'handlers': ['console', 'mail_admins'],
'level': 'DEBUG',
'propagate': False,
},
}
}
LOGIN_REDIRECT_URL = reverse_lazy('dashboard')
LOGIN_URL = '/auth/login/'
LOGOUT_REDIRECT_URL = reverse_lazy('home')
# MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'media')
MEDIA_ROOT = os.path.join(os.path.dirname(
str(environ.Path(__file__) - 2)), 'media')
MEDIA_URL = '/media/'
THUMBNAIL_ALIASES = {
'': {
'default': {'size': (50, 50), 'crop': True},
'thumb100': {'size': (100, 100), 'crop': True},
},
}
#UPGRADE DJANGO 3.2
DEFAULT_AUTO_FIELD = 'django.db.models.AutoField'
| {
"content_hash": "ab6ccf9ee2f954c8869e8d9e237f433d",
"timestamp": "",
"source": "github",
"line_count": 246,
"max_line_length": 79,
"avg_line_length": 27.39430894308943,
"alnum_prop": 0.6380768660038582,
"repo_name": "le4ndro/homeinventory",
"id": "a831b97d1ec8544f4082ca0d8a549694da2071ac",
"size": "6739",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "config/settings/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "47067"
},
{
"name": "Makefile",
"bytes": "77"
},
{
"name": "Python",
"bytes": "49210"
}
],
"symlink_target": ""
} |
"""
Create a color strip file from a tsv file that has the sequence ids.
"""
import os
import sys
import argparse
def read_labels(lf, col, verbose=False):
"""
Read the labels file and return a dict with tree labels and values
:param lf: labels file
:param col: the column to use
:param verbose: extra output
:return: a dict
"""
ret = {}
with open(lf, 'r') as f:
for l in f:
p = l.strip().split("\t")
if len(p) < col:
continue
if not p[col]:
continue
ret[p[0]] = p[col]
return ret
def write_output(data, colors, label, lshape, outputfile, verbose):
"""
Write the colorstrip file
:param data: the data dict of leaves and valus
:param colors: the array of colors
:param label: the label for the color strip
:param lshape: the label shape
:param outputfile: the file to write
:param verbose: more output
:return:
"""
vals = list(set(data.values()))
if len(vals) > len(colors):
sys.stderr.write("WARNING: NOT ENOUGH COLORS! We have {} values and {} colors\n".format(len(vals), len(colors)))
sys.exit(-1)
valcols = {v:colors[vals.index(v)] for v in vals}
with open(outputfile, 'w') as out:
out.write("DATASET_COLORSTRIP\n")
out.write("SEPARATOR COMMA\n")
out.write(f"DATASET_LABEL,{label}\n")
out.write("COLOR,#ff0000\n")
out.write(f"LEGEND_TITLE,{label}\n")
out.write("LEGEND_COLORS,{}\n".format(",".join(valcols.values())))
out.write("LEGEND_SHAPES,{}\n".format(",".join([lshape for v in valcols.values()])))
out.write("LEGEND_LABELS,{}\n".format(",".join(vals)))
out.write("STRIP_WIDTH,25\n")
out.write("COLOR_BRANCHES,1\n")
out.write("DATA\n")
for d in data:
out.write("{},{},{}\n".format(d, valcols[data[d]], data[d]))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Create a color strip file")
parser.add_argument('-f', help='The labeled leaves file from fastq2ids.py', required=True)
parser.add_argument('-n', help='Column in the labeled leaves file to use. 0 indexed', required=True, type=int)
parser.add_argument('-l', help='color strip legend (e.g. Kingdom, Fish, Species', required=True)
parser.add_argument('-o', help='Output file', required=True)
parser.add_argument('-s', help='Legend shape (a number). Default = 1', default="1", type=str)
parser.add_argument('-c', help='Colors to use. These will be prepended to our default list', action='append')
parser.add_argument('-v', help='verbose output', action="store_true")
args = parser.parse_args()
colors = ['#e41a1c', '#377eb8', '#4daf4a', '#984ea3', '#ff7f00', '#ffff33', '#a65628', '#f781bf', '#999999']
if args.c:
colors = args.c + colors
data = read_labels(args.f, args.n, args.v)
write_output(data, colors, args.l, args.s, args.o, args.v) | {
"content_hash": "e00025c69c6f882f3b19fbe011fa954e",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 120,
"avg_line_length": 36.670731707317074,
"alnum_prop": 0.6075823079481211,
"repo_name": "linsalrob/EdwardsLab",
"id": "82e2dc817517d353df7ea2a47d7b99fdec00a7fd",
"size": "3007",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jplacer/create_colorstrip.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "227276"
},
{
"name": "C++",
"bytes": "21508"
},
{
"name": "Jupyter Notebook",
"bytes": "490830"
},
{
"name": "Makefile",
"bytes": "936"
},
{
"name": "Perl",
"bytes": "280086"
},
{
"name": "Python",
"bytes": "1102051"
},
{
"name": "Shell",
"bytes": "13759"
}
],
"symlink_target": ""
} |
import os
import sys
import subprocess
from os import access, getenv, X_OK
jar_file = 'DamageProfiler-0.4.6.jar'
default_jvm_mem_opts = ['-Xms512m', '-Xmx1g']
# !!! End of parameter section. No user-serviceable code below this line !!!
def real_dirname(path):
"""Return the symlink-resolved, canonicalized directory-portion of path."""
return os.path.dirname(os.path.realpath(path))
def java_executable():
"""Return the executable name of the Java interpreter."""
java_home = getenv('JAVA_HOME')
java_bin = os.path.join('bin', 'java')
if java_home and access(os.path.join(java_home, java_bin), X_OK):
return os.path.join(java_home, java_bin)
else:
return 'java'
def jvm_opts(argv):
"""Construct list of Java arguments based on our argument list.
The argument list passed in argv must not include the script name.
The return value is a 3-tuple lists of strings of the form:
(memory_options, prop_options, passthrough_options)
"""
mem_opts = []
prop_opts = []
pass_args = []
for arg in argv:
if arg.startswith('-D'):
prop_opts.append(arg)
elif arg.startswith('-XX'):
prop_opts.append(arg)
elif arg.startswith('-Xm'):
mem_opts.append(arg)
else:
pass_args.append(arg)
# In the original shell script the test coded below read:
# if [ "$jvm_mem_opts" == "" ] && [ -z ${_JAVA_OPTIONS+x} ]
# To reproduce the behaviour of the above shell code fragment
# it is important to explictly check for equality with None
# in the second condition, so a null envar value counts as True!
if mem_opts == [] and getenv('_JAVA_OPTIONS') == None:
mem_opts = default_jvm_mem_opts
return (mem_opts, prop_opts, pass_args)
def main():
java = java_executable()
jar_dir = real_dirname(sys.argv[0])
(mem_opts, prop_opts, pass_args) = jvm_opts(sys.argv[1:])
if pass_args != [] and pass_args[0].startswith('eu'):
jar_arg = '-cp'
else:
jar_arg = '-jar'
jar_path = os.path.join(jar_dir, jar_file)
java_args = [java]+ mem_opts + prop_opts + [jar_arg] + [jar_path] + pass_args
if '--jar_dir' in sys.argv[1:]:
print(jar_path)
else:
sys.exit(subprocess.call(java_args))
if __name__ == '__main__':
main()
| {
"content_hash": "51233a5e82a4fe02934003d312413de7",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 81,
"avg_line_length": 29.123456790123456,
"alnum_prop": 0.6146672318779144,
"repo_name": "rvalieris/bioconda-recipes",
"id": "24e4537c5e6027e0d5b605aad3ed3577d6ae3962",
"size": "2663",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "recipes/damageprofiler/damageprofiler.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "237"
},
{
"name": "C",
"bytes": "154"
},
{
"name": "CMake",
"bytes": "13967"
},
{
"name": "Java",
"bytes": "286"
},
{
"name": "M4",
"bytes": "726"
},
{
"name": "Perl",
"bytes": "99944"
},
{
"name": "Perl 6",
"bytes": "23942"
},
{
"name": "Prolog",
"bytes": "1044"
},
{
"name": "Python",
"bytes": "393677"
},
{
"name": "Roff",
"bytes": "996"
},
{
"name": "Shell",
"bytes": "3720561"
}
],
"symlink_target": ""
} |
# Copyright (c) 2012 The Chromium OS Authors.
#
# SPDX-License-Identifier: GPL-2.0+
#
import re
import glob
from HTMLParser import HTMLParser
import os
import sys
import tempfile
import urllib2
import bsettings
import command
import terminal
(PRIORITY_FULL_PREFIX, PRIORITY_PREFIX_GCC, PRIORITY_PREFIX_GCC_PATH,
PRIORITY_CALC) = range(4)
# Simple class to collect links from a page
class MyHTMLParser(HTMLParser):
def __init__(self, arch):
"""Create a new parser
After the parser runs, self.links will be set to a list of the links
to .xz archives found in the page, and self.arch_link will be set to
the one for the given architecture (or None if not found).
Args:
arch: Architecture to search for
"""
HTMLParser.__init__(self)
self.arch_link = None
self.links = []
self._match = '_%s-' % arch
def handle_starttag(self, tag, attrs):
if tag == 'a':
for tag, value in attrs:
if tag == 'href':
if value and value.endswith('.xz'):
self.links.append(value)
if self._match in value:
self.arch_link = value
class Toolchain:
"""A single toolchain
Public members:
gcc: Full path to C compiler
path: Directory path containing C compiler
cross: Cross compile string, e.g. 'arm-linux-'
arch: Architecture of toolchain as determined from the first
component of the filename. E.g. arm-linux-gcc becomes arm
priority: Toolchain priority (0=highest, 20=lowest)
"""
def __init__(self, fname, test, verbose=False, priority=PRIORITY_CALC,
arch=None):
"""Create a new toolchain object.
Args:
fname: Filename of the gcc component
test: True to run the toolchain to test it
verbose: True to print out the information
priority: Priority to use for this toolchain, or PRIORITY_CALC to
calculate it
"""
self.gcc = fname
self.path = os.path.dirname(fname)
# Find the CROSS_COMPILE prefix to use for U-Boot. For example,
# 'arm-linux-gnueabihf-gcc' turns into 'arm-linux-gnueabihf-'.
basename = os.path.basename(fname)
pos = basename.rfind('-')
self.cross = basename[:pos + 1] if pos != -1 else ''
# The architecture is the first part of the name
pos = self.cross.find('-')
if arch:
self.arch = arch
else:
self.arch = self.cross[:pos] if pos != -1 else 'sandbox'
env = self.MakeEnvironment(False)
# As a basic sanity check, run the C compiler with --version
cmd = [fname, '--version']
if priority == PRIORITY_CALC:
self.priority = self.GetPriority(fname)
else:
self.priority = priority
if test:
result = command.RunPipe([cmd], capture=True, env=env,
raise_on_error=False)
self.ok = result.return_code == 0
if verbose:
print 'Tool chain test: ',
if self.ok:
print "OK, arch='%s', priority %d" % (self.arch,
self.priority)
else:
print 'BAD'
print 'Command: ', cmd
print result.stdout
print result.stderr
else:
self.ok = True
def GetPriority(self, fname):
"""Return the priority of the toolchain.
Toolchains are ranked according to their suitability by their
filename prefix.
Args:
fname: Filename of toolchain
Returns:
Priority of toolchain, PRIORITY_CALC=highest, 20=lowest.
"""
priority_list = ['-elf', '-unknown-linux-gnu', '-linux',
'-none-linux-gnueabi', '-none-linux-gnueabihf', '-uclinux',
'-none-eabi', '-gentoo-linux-gnu', '-linux-gnueabi',
'-linux-gnueabihf', '-le-linux', '-uclinux']
for prio in range(len(priority_list)):
if priority_list[prio] in fname:
return PRIORITY_CALC + prio
return PRIORITY_CALC + prio
def GetWrapper(self, show_warning=True):
"""Get toolchain wrapper from the setting file.
"""
value = ''
for name, value in bsettings.GetItems('toolchain-wrapper'):
if not value:
print "Warning: Wrapper not found"
if value:
value = value + ' '
return value
def MakeEnvironment(self, full_path):
"""Returns an environment for using the toolchain.
Thie takes the current environment and adds CROSS_COMPILE so that
the tool chain will operate correctly. This also disables localized
output and possibly unicode encoded output of all build tools by
adding LC_ALL=C.
Args:
full_path: Return the full path in CROSS_COMPILE and don't set
PATH
"""
env = dict(os.environ)
wrapper = self.GetWrapper()
if full_path:
env['CROSS_COMPILE'] = wrapper + os.path.join(self.path, self.cross)
else:
env['CROSS_COMPILE'] = wrapper + self.cross
env['PATH'] = self.path + ':' + env['PATH']
env['LC_ALL'] = 'C'
return env
class Toolchains:
"""Manage a list of toolchains for building U-Boot
We select one toolchain for each architecture type
Public members:
toolchains: Dict of Toolchain objects, keyed by architecture name
prefixes: Dict of prefixes to check, keyed by architecture. This can
be a full path and toolchain prefix, for example
{'x86', 'opt/i386-linux/bin/i386-linux-'}, or the name of
something on the search path, for example
{'arm', 'arm-linux-gnueabihf-'}. Wildcards are not supported.
paths: List of paths to check for toolchains (may contain wildcards)
"""
def __init__(self):
self.toolchains = {}
self.prefixes = {}
self.paths = []
self._make_flags = dict(bsettings.GetItems('make-flags'))
def GetPathList(self, show_warning=True):
"""Get a list of available toolchain paths
Args:
show_warning: True to show a warning if there are no tool chains.
Returns:
List of strings, each a path to a toolchain mentioned in the
[toolchain] section of the settings file.
"""
toolchains = bsettings.GetItems('toolchain')
if show_warning and not toolchains:
print ("Warning: No tool chains. Please run 'buildman "
"--fetch-arch all' to download all available toolchains, or "
"add a [toolchain] section to your buildman config file "
"%s. See README for details" %
bsettings.config_fname)
paths = []
for name, value in toolchains:
if '*' in value:
paths += glob.glob(value)
else:
paths.append(value)
return paths
def GetSettings(self, show_warning=True):
"""Get toolchain settings from the settings file.
Args:
show_warning: True to show a warning if there are no tool chains.
"""
self.prefixes = bsettings.GetItems('toolchain-prefix')
self.paths += self.GetPathList(show_warning)
def Add(self, fname, test=True, verbose=False, priority=PRIORITY_CALC,
arch=None):
"""Add a toolchain to our list
We select the given toolchain as our preferred one for its
architecture if it is a higher priority than the others.
Args:
fname: Filename of toolchain's gcc driver
test: True to run the toolchain to test it
priority: Priority to use for this toolchain
arch: Toolchain architecture, or None if not known
"""
toolchain = Toolchain(fname, test, verbose, priority, arch)
add_it = toolchain.ok
if toolchain.arch in self.toolchains:
add_it = (toolchain.priority <
self.toolchains[toolchain.arch].priority)
if add_it:
self.toolchains[toolchain.arch] = toolchain
elif verbose:
print ("Toolchain '%s' at priority %d will be ignored because "
"another toolchain for arch '%s' has priority %d" %
(toolchain.gcc, toolchain.priority, toolchain.arch,
self.toolchains[toolchain.arch].priority))
def ScanPath(self, path, verbose):
"""Scan a path for a valid toolchain
Args:
path: Path to scan
verbose: True to print out progress information
Returns:
Filename of C compiler if found, else None
"""
fnames = []
for subdir in ['.', 'bin', 'usr/bin']:
dirname = os.path.join(path, subdir)
if verbose: print " - looking in '%s'" % dirname
for fname in glob.glob(dirname + '/*gcc'):
if verbose: print " - found '%s'" % fname
fnames.append(fname)
return fnames
def ScanPathEnv(self, fname):
"""Scan the PATH environment variable for a given filename.
Args:
fname: Filename to scan for
Returns:
List of matching pathanames, or [] if none
"""
pathname_list = []
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
pathname = os.path.join(path, fname)
if os.path.exists(pathname):
pathname_list.append(pathname)
return pathname_list
def Scan(self, verbose):
"""Scan for available toolchains and select the best for each arch.
We look for all the toolchains we can file, figure out the
architecture for each, and whether it works. Then we select the
highest priority toolchain for each arch.
Args:
verbose: True to print out progress information
"""
if verbose: print 'Scanning for tool chains'
for name, value in self.prefixes:
if verbose: print " - scanning prefix '%s'" % value
if os.path.exists(value):
self.Add(value, True, verbose, PRIORITY_FULL_PREFIX, name)
continue
fname = value + 'gcc'
if os.path.exists(fname):
self.Add(fname, True, verbose, PRIORITY_PREFIX_GCC, name)
continue
fname_list = self.ScanPathEnv(fname)
for f in fname_list:
self.Add(f, True, verbose, PRIORITY_PREFIX_GCC_PATH, name)
if not fname_list:
raise ValueError, ("No tool chain found for prefix '%s'" %
value)
for path in self.paths:
if verbose: print " - scanning path '%s'" % path
fnames = self.ScanPath(path, verbose)
for fname in fnames:
self.Add(fname, True, verbose)
def List(self):
"""List out the selected toolchains for each architecture"""
col = terminal.Color()
print col.Color(col.BLUE, 'List of available toolchains (%d):' %
len(self.toolchains))
if len(self.toolchains):
for key, value in sorted(self.toolchains.iteritems()):
print '%-10s: %s' % (key, value.gcc)
else:
print 'None'
def Select(self, arch):
"""Returns the toolchain for a given architecture
Args:
args: Name of architecture (e.g. 'arm', 'ppc_8xx')
returns:
toolchain object, or None if none found
"""
for tag, value in bsettings.GetItems('toolchain-alias'):
if arch == tag:
for alias in value.split():
if alias in self.toolchains:
return self.toolchains[alias]
if not arch in self.toolchains:
raise ValueError, ("No tool chain found for arch '%s'" % arch)
return self.toolchains[arch]
def ResolveReferences(self, var_dict, args):
"""Resolve variable references in a string
This converts ${blah} within the string to the value of blah.
This function works recursively.
Args:
var_dict: Dictionary containing variables and their values
args: String containing make arguments
Returns:
Resolved string
>>> bsettings.Setup()
>>> tcs = Toolchains()
>>> tcs.Add('fred', False)
>>> var_dict = {'oblique' : 'OBLIQUE', 'first' : 'fi${second}rst', \
'second' : '2nd'}
>>> tcs.ResolveReferences(var_dict, 'this=${oblique}_set')
'this=OBLIQUE_set'
>>> tcs.ResolveReferences(var_dict, 'this=${oblique}_set${first}nd')
'this=OBLIQUE_setfi2ndrstnd'
"""
re_var = re.compile('(\$\{[-_a-z0-9A-Z]{1,}\})')
while True:
m = re_var.search(args)
if not m:
break
lookup = m.group(0)[2:-1]
value = var_dict.get(lookup, '')
args = args[:m.start(0)] + value + args[m.end(0):]
return args
def GetMakeArguments(self, board):
"""Returns 'make' arguments for a given board
The flags are in a section called 'make-flags'. Flags are named
after the target they represent, for example snapper9260=TESTING=1
will pass TESTING=1 to make when building the snapper9260 board.
References to other boards can be added in the string also. For
example:
[make-flags]
at91-boards=ENABLE_AT91_TEST=1
snapper9260=${at91-boards} BUILD_TAG=442
snapper9g45=${at91-boards} BUILD_TAG=443
This will return 'ENABLE_AT91_TEST=1 BUILD_TAG=442' for snapper9260
and 'ENABLE_AT91_TEST=1 BUILD_TAG=443' for snapper9g45.
A special 'target' variable is set to the board target.
Args:
board: Board object for the board to check.
Returns:
'make' flags for that board, or '' if none
"""
self._make_flags['target'] = board.target
arg_str = self.ResolveReferences(self._make_flags,
self._make_flags.get(board.target, ''))
args = arg_str.split(' ')
i = 0
while i < len(args):
if not args[i]:
del args[i]
else:
i += 1
return args
def LocateArchUrl(self, fetch_arch):
"""Find a toolchain available online
Look in standard places for available toolchains. At present the
only standard place is at kernel.org.
Args:
arch: Architecture to look for, or 'list' for all
Returns:
If fetch_arch is 'list', a tuple:
Machine architecture (e.g. x86_64)
List of toolchains
else
URL containing this toolchain, if avaialble, else None
"""
arch = command.OutputOneLine('uname', '-m')
base = 'https://www.kernel.org/pub/tools/crosstool/files/bin'
versions = ['4.9.0', '4.6.3', '4.6.2', '4.5.1', '4.2.4']
links = []
for version in versions:
url = '%s/%s/%s/' % (base, arch, version)
print 'Checking: %s' % url
response = urllib2.urlopen(url)
html = response.read()
parser = MyHTMLParser(fetch_arch)
parser.feed(html)
if fetch_arch == 'list':
links += parser.links
elif parser.arch_link:
return url + parser.arch_link
if fetch_arch == 'list':
return arch, links
return None
def Download(self, url):
"""Download a file to a temporary directory
Args:
url: URL to download
Returns:
Tuple:
Temporary directory name
Full path to the downloaded archive file in that directory,
or None if there was an error while downloading
"""
print 'Downloading: %s' % url
leaf = url.split('/')[-1]
tmpdir = tempfile.mkdtemp('.buildman')
response = urllib2.urlopen(url)
fname = os.path.join(tmpdir, leaf)
fd = open(fname, 'wb')
meta = response.info()
size = int(meta.getheaders('Content-Length')[0])
done = 0
block_size = 1 << 16
status = ''
# Read the file in chunks and show progress as we go
while True:
buffer = response.read(block_size)
if not buffer:
print chr(8) * (len(status) + 1), '\r',
break
done += len(buffer)
fd.write(buffer)
status = r'%10d MiB [%3d%%]' % (done / 1024 / 1024,
done * 100 / size)
status = status + chr(8) * (len(status) + 1)
print status,
sys.stdout.flush()
fd.close()
if done != size:
print 'Error, failed to download'
os.remove(fname)
fname = None
return tmpdir, fname
def Unpack(self, fname, dest):
"""Unpack a tar file
Args:
fname: Filename to unpack
dest: Destination directory
Returns:
Directory name of the first entry in the archive, without the
trailing /
"""
stdout = command.Output('tar', 'xvfJ', fname, '-C', dest)
return stdout.splitlines()[0][:-1]
def TestSettingsHasPath(self, path):
"""Check if buildman will find this toolchain
Returns:
True if the path is in settings, False if not
"""
paths = self.GetPathList(False)
return path in paths
def ListArchs(self):
"""List architectures with available toolchains to download"""
host_arch, archives = self.LocateArchUrl('list')
re_arch = re.compile('[-a-z0-9.]*_([^-]*)-.*')
arch_set = set()
for archive in archives:
# Remove the host architecture from the start
arch = re_arch.match(archive[len(host_arch):])
if arch:
arch_set.add(arch.group(1))
return sorted(arch_set)
def FetchAndInstall(self, arch):
"""Fetch and install a new toolchain
arch:
Architecture to fetch, or 'list' to list
"""
# Fist get the URL for this architecture
col = terminal.Color()
print col.Color(col.BLUE, "Downloading toolchain for arch '%s'" % arch)
url = self.LocateArchUrl(arch)
if not url:
print ("Cannot find toolchain for arch '%s' - use 'list' to list" %
arch)
return 2
home = os.environ['HOME']
dest = os.path.join(home, '.buildman-toolchains')
if not os.path.exists(dest):
os.mkdir(dest)
# Download the tar file for this toolchain and unpack it
tmpdir, tarfile = self.Download(url)
if not tarfile:
return 1
print col.Color(col.GREEN, 'Unpacking to: %s' % dest),
sys.stdout.flush()
path = self.Unpack(tarfile, dest)
os.remove(tarfile)
os.rmdir(tmpdir)
print
# Check that the toolchain works
print col.Color(col.GREEN, 'Testing')
dirpath = os.path.join(dest, path)
compiler_fname_list = self.ScanPath(dirpath, True)
if not compiler_fname_list:
print 'Could not locate C compiler - fetch failed.'
return 1
if len(compiler_fname_list) != 1:
print col.Color(col.RED, 'Warning, ambiguous toolchains: %s' %
', '.join(compiler_fname_list))
toolchain = Toolchain(compiler_fname_list[0], True, True)
# Make sure that it will be found by buildman
if not self.TestSettingsHasPath(dirpath):
print ("Adding 'download' to config file '%s'" %
bsettings.config_fname)
bsettings.SetItem('toolchain', 'download', '%s/*/*' % dest)
return 0
| {
"content_hash": "31180a54a8f9e8bba803cf0c8df8581d",
"timestamp": "",
"source": "github",
"line_count": 576,
"max_line_length": 80,
"avg_line_length": 35.74652777777778,
"alnum_prop": 0.5521126760563381,
"repo_name": "guileschool/beagleboard",
"id": "2076323d5d39825f17b0460d09ce3dd383bc50b3",
"size": "20590",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "u-boot/tools/buildman/toolchain.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "960094"
},
{
"name": "Awk",
"bytes": "269"
},
{
"name": "Batchfile",
"bytes": "3451"
},
{
"name": "C",
"bytes": "62720528"
},
{
"name": "C++",
"bytes": "5261365"
},
{
"name": "CSS",
"bytes": "8362"
},
{
"name": "GDB",
"bytes": "3642"
},
{
"name": "HTML",
"bytes": "237884"
},
{
"name": "Lex",
"bytes": "13917"
},
{
"name": "Makefile",
"bytes": "429363"
},
{
"name": "Objective-C",
"bytes": "370078"
},
{
"name": "Perl",
"bytes": "358570"
},
{
"name": "Python",
"bytes": "884691"
},
{
"name": "Roff",
"bytes": "9384"
},
{
"name": "Shell",
"bytes": "96042"
},
{
"name": "Tcl",
"bytes": "967"
},
{
"name": "XSLT",
"bytes": "445"
},
{
"name": "Yacc",
"bytes": "26163"
}
],
"symlink_target": ""
} |
from google.appengine.ext import ndb
def improve(result, identificador = ""): #explota con las sesiones
result["id"] = identificador
for key, value in result.iteritems():
if type(value) is list:
i = 0
for item in value:
if isinstance(item,ndb.Key):
value[i] = improve(item.get().to_dict())
value[i]["id"] = item.id()
elif type(item) is not unicode:
value[i] = improve(result = item)
i = i + 1
if isinstance(value,ndb.Key):
result[key] = improve(result[key].get().to_dict())
result[key]["id"] = value.id()
return result | {
"content_hash": "f1e3c4ea749d7f72836a5fe04d7b4b32",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 66,
"avg_line_length": 38,
"alnum_prop": 0.5013850415512465,
"repo_name": "hermagrini/pochoclo-system",
"id": "c8033f2d8a1aae64d788ba0638e05bb43e25dc6f",
"size": "722",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api/lib/custom_handler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "11679"
},
{
"name": "HTML",
"bytes": "13592"
},
{
"name": "JavaScript",
"bytes": "13446"
},
{
"name": "Python",
"bytes": "288750"
}
],
"symlink_target": ""
} |
"""Tests for Tradfri setup."""
from unittest.mock import patch
from homeassistant.components import tradfri
from homeassistant.helpers import device_registry as dr
from . import GATEWAY_ID
from tests.common import MockConfigEntry
async def test_entry_setup_unload(hass, mock_api_factory):
"""Test config entry setup and unload."""
entry = MockConfigEntry(
domain=tradfri.DOMAIN,
data={
tradfri.CONF_HOST: "mock-host",
tradfri.CONF_IDENTITY: "mock-identity",
tradfri.CONF_KEY: "mock-key",
tradfri.CONF_IMPORT_GROUPS: True,
tradfri.CONF_GATEWAY_ID: GATEWAY_ID,
},
)
entry.add_to_hass(hass)
with patch.object(
hass.config_entries, "async_forward_entry_setup", return_value=True
) as setup:
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
assert setup.call_count == len(tradfri.PLATFORMS)
dev_reg = dr.async_get(hass)
dev_entries = dr.async_entries_for_config_entry(dev_reg, entry.entry_id)
assert dev_entries
dev_entry = dev_entries[0]
assert dev_entry.identifiers == {
(tradfri.DOMAIN, entry.data[tradfri.CONF_GATEWAY_ID])
}
assert dev_entry.manufacturer == tradfri.ATTR_TRADFRI_MANUFACTURER
assert dev_entry.name == tradfri.ATTR_TRADFRI_GATEWAY
assert dev_entry.model == tradfri.ATTR_TRADFRI_GATEWAY_MODEL
with patch.object(
hass.config_entries, "async_forward_entry_unload", return_value=True
) as unload:
assert await hass.config_entries.async_unload(entry.entry_id)
await hass.async_block_till_done()
assert unload.call_count == len(tradfri.PLATFORMS)
assert mock_api_factory.shutdown.call_count == 1
async def test_remove_stale_devices(hass, mock_api_factory):
"""Test remove stale device registry entries."""
entry = MockConfigEntry(
domain=tradfri.DOMAIN,
data={
tradfri.CONF_HOST: "mock-host",
tradfri.CONF_IDENTITY: "mock-identity",
tradfri.CONF_KEY: "mock-key",
tradfri.CONF_IMPORT_GROUPS: True,
tradfri.CONF_GATEWAY_ID: GATEWAY_ID,
},
)
entry.add_to_hass(hass)
dev_reg = dr.async_get(hass)
dev_reg.async_get_or_create(
config_entry_id=entry.entry_id,
identifiers={(tradfri.DOMAIN, "stale_device_id")},
)
dev_entries = dr.async_entries_for_config_entry(dev_reg, entry.entry_id)
assert len(dev_entries) == 1
dev_entry = dev_entries[0]
assert dev_entry.identifiers == {(tradfri.DOMAIN, "stale_device_id")}
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
dev_entries = dr.async_entries_for_config_entry(dev_reg, entry.entry_id)
# Check that only the gateway device entry remains.
assert len(dev_entries) == 1
dev_entry = dev_entries[0]
assert dev_entry.identifiers == {
(tradfri.DOMAIN, entry.data[tradfri.CONF_GATEWAY_ID])
}
assert dev_entry.manufacturer == tradfri.ATTR_TRADFRI_MANUFACTURER
assert dev_entry.name == tradfri.ATTR_TRADFRI_GATEWAY
assert dev_entry.model == tradfri.ATTR_TRADFRI_GATEWAY_MODEL
| {
"content_hash": "d6876ce96e1af39b12d703fbd2bfd826",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 76,
"avg_line_length": 35.20652173913044,
"alnum_prop": 0.6640938561284347,
"repo_name": "rohitranjan1991/home-assistant",
"id": "2a26391c43f84e1486d86b1ab19bc658a23dbd7b",
"size": "3239",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "tests/components/tradfri/test_init.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1017265"
},
{
"name": "Python",
"bytes": "1051086"
},
{
"name": "Shell",
"bytes": "3946"
}
],
"symlink_target": ""
} |
"""
Test package for features
"""
| {
"content_hash": "599e155983dac44b59f43277a7a294b2",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 25,
"avg_line_length": 11.333333333333334,
"alnum_prop": 0.6470588235294118,
"repo_name": "bossjones/scarlett",
"id": "866151bee385354bcf22f7456ca966fb360d6dd4",
"size": "34",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/features/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1392"
},
{
"name": "Python",
"bytes": "200377"
},
{
"name": "Ruby",
"bytes": "3715"
},
{
"name": "Shell",
"bytes": "80483"
}
],
"symlink_target": ""
} |
from extern import extern
extern("dblookup.py")
| {
"content_hash": "9a7a6568bb165e0775202f95cc704ad1",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 25,
"avg_line_length": 16.333333333333332,
"alnum_prop": 0.7755102040816326,
"repo_name": "esatterly/splunk-cassandra",
"id": "6d193c08fb9c030fed8d73173255be5aadf1a392",
"size": "650",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/dblookup-command.py",
"mode": "33261",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
import gtaskpool
import random
from datetime import datetime
import json
class Interface(object):
def __init__(self, get_proxy, proxy_feedback, get_useragent):
self.get_proxy = get_proxy
self.proxy_feedback = proxy_feedback
self.get_useragent = get_useragent
def get_useragent_wrapper(useragents):
def get_useragent():
if len(useragents) == 0:
return None
idx = random.randint(0, len(useragents)-1)
return useragents[idx]
return get_useragent
def get_proxy_wrapper(next_proxy):
def get_proxy(url):
return next_proxy(url).proxy
return get_proxy
def get_interfaces(proxymgr, useragents):
return Interface(
get_proxy = get_proxy_wrapper(proxymgr.next_proxy),
proxy_feedback = proxymgr.feedback,
get_useragent = get_useragent_wrapper(useragents))
def retry_task(task, task_log, max_try):
trycnt = 0
while trycnt != max_try:
res = task()
res['try_idx'] = trycnt + 1
if trycnt+1 == max_try or res['finish']:
res['last_try'] = True
else:
res['last_try'] = False
log_task_result(res, task_log)
if res['finish']:
return
trycnt += 1
def log_task_result(result, filehandle):
result['ts'] = str(datetime.now())
jstr = json.dumps(result, ensure_ascii=False).encode('utf-8')
filehandle.write(jstr + "\n")
def runtasks(task_generator, task_log, max_try=10):
def gen_task():
while True:
try:
task = task_generator.next()
except StopIteration, e:
return
yield gtaskpool.Task(retry_task, [task, task_log, max_try])
gtaskpool.runtasks(gen_task())
| {
"content_hash": "a5e40fc47b79997fd942a3493a229b8d",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 71,
"avg_line_length": 27.78125,
"alnum_prop": 0.5961754780652418,
"repo_name": "WalnutATiie/google_search",
"id": "d14ca93c2d2b02cce47e6850643da039bbbd375a",
"size": "1815",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gtaskpool/gtaskpool_helper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "28608"
}
],
"symlink_target": ""
} |
"""
This allows a simple component to be moved around the screen.
"""
from enable.example_support import DemoFrame, demo_main
from traits.api import Float
from enable.api import Component, Pointer, Window
class Box(Component):
"""
The box moves wherever the user clicks and drags.
"""
normal_pointer = Pointer("arrow")
moving_pointer = Pointer("hand")
offset_x = Float
offset_y = Float
fill_color = (0.8, 0.0, 0.1, 1.0)
moving_color = (0.0, 0.8, 0.1, 1.0)
resizable = ""
def _draw_mainlayer(self, gc, view_bounds=None, mode="default"):
with gc:
if self.event_state == "moving":
gc.set_fill_color(self.moving_color)
else:
gc.set_fill_color(self.fill_color)
dx, dy = self.bounds
x, y = self.position
gc.rect(x, y, dx, dy)
gc.fill_path()
# draw line around outer box
gc.set_stroke_color((0,0,0,1))
gc.rect(self.outer_x, self.outer_y, self.outer_width,
self.outer_height)
gc.stroke_path()
return
def normal_key_pressed(self, event):
print "Key:", event.character
def normal_left_down(self, event):
self.event_state = "moving"
event.window.set_pointer(self.moving_pointer)
event.window.set_mouse_owner(self, event.net_transform())
self.offset_x = event.x - self.x
self.offset_y = event.y - self.y
event.handled = True
return
def moving_mouse_move(self, event):
self.position = [event.x-self.offset_x, event.y-self.offset_y]
event.handled = True
self.request_redraw()
return
def moving_left_up(self, event):
self.event_state = "normal"
event.window.set_pointer(self.normal_pointer)
event.window.set_mouse_owner(None)
event.handled = True
self.request_redraw()
return
def moving_mouse_leave(self, event):
self.moving_left_up(event)
event.handled = True
return
class MyFrame(DemoFrame):
def _create_window(self):
box = Box(bounds=[100,100], position=[50,50], padding=15)
return Window(self, -1, component=box)
if __name__ == "__main__":
# Save demo so that it doesn't get garbage collected when run within
# existing event loop (i.e. from ipython).
demo = demo_main(MyFrame, title="Click and drag to move the box")
| {
"content_hash": "9042ea607eafef965e5f79d02e474cf3",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 72,
"avg_line_length": 28.813953488372093,
"alnum_prop": 0.5907990314769975,
"repo_name": "tommy-u/enable",
"id": "f3fcf69e3cb6876097156a1bc2b5a4ab92cbe1f4",
"size": "2478",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/enable/basic_move.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "240"
},
{
"name": "C",
"bytes": "5526949"
},
{
"name": "C++",
"bytes": "3058044"
},
{
"name": "DIGITAL Command Language",
"bytes": "35819"
},
{
"name": "Groff",
"bytes": "236"
},
{
"name": "Makefile",
"bytes": "58238"
},
{
"name": "Objective-C",
"bytes": "16551"
},
{
"name": "Python",
"bytes": "2202660"
},
{
"name": "Shell",
"bytes": "6286"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
import django.db.models.deletion
import taggit.managers
class Migration(migrations.Migration):
replaces = [('proposals', '0001_initial'), ('proposals', '0002_proposaladditionalspeakerthrough'), ('proposals', '0003_remove_under_represented_questions')]
dependencies = [
('symposion_proposals', '0003_standardize_markdown_links'),
('symposion_proposals', '0001_initial'),
('contenttypes', '0002_remove_content_type_name'),
]
operations = [
migrations.CreateModel(
name='ProposalKeyword',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, unique=True, verbose_name='Name')),
('slug', models.SlugField(max_length=100, unique=True, verbose_name='Slug')),
('official', models.BooleanField(default=False)),
],
options={
'verbose_name': 'Keyword',
'verbose_name_plural': 'Keywords',
},
),
migrations.CreateModel(
name='TaggedProposal',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('object_id', models.IntegerField(db_index=True, verbose_name='Object id')),
('content_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='proposals_taggedproposal_tagged_items', to='contenttypes.ContentType', verbose_name='Content type')),
('tag', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='proposals_taggedproposal_items', to='proposals.ProposalKeyword')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='UserTaggedProposal',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('object_id', models.IntegerField(db_index=True, verbose_name='Object id')),
('content_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='proposals_usertaggedproposal_tagged_items', to='contenttypes.ContentType', verbose_name='Content type')),
('tag', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='proposals_usertaggedproposal_items', to='proposals.ProposalKeyword')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='EditorTaggedProposal',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('object_id', models.IntegerField(db_index=True, verbose_name='Object id')),
('content_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='proposals_editortaggedproposal_tagged_items', to='contenttypes.ContentType', verbose_name='Content type')),
('tag', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='proposals_editortaggedproposal_items', to='proposals.ProposalKeyword')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='ProposalAdditionalSpeakerThrough',
fields=[
],
options={
'constraints': [],
'proxy': True,
'verbose_name': 'Additional Speaker',
'indexes': [],
},
bases=('symposion_proposals.additionalspeaker',),
),
migrations.CreateModel(
name='Proposal',
fields=[
('proposalbase_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='symposion_proposals.ProposalBase')),
('audience_level', models.IntegerField(choices=[(1, 'Novice'), (3, 'Intermediate'), (2, 'Experienced')])),
('slides_url', models.URLField(blank=True, default='', help_text='Location of slides for this proposal (e.g. SlideShare, Google Drive).', max_length=2083, verbose_name='Slides')),
('code_url', models.URLField(blank=True, default='', help_text="Location of this proposal's code repository (e.g. Github).", max_length=2083, verbose_name='Repository')),
('first_time_at_pydata', models.CharField(blank=True, choices=[('', '----'), ('Y', 'Yes'), ('N', 'No'), ('O', 'Prefer not to say')], default='', max_length=1, verbose_name='Is this your first time speaking at a PyData event?')),
('affiliation', models.CharField(max_length=200)),
('phone_number', models.CharField(blank=True, default='', max_length=100, verbose_name='Phone number - to be used for last-minute schedule changes')),
('recording_release', models.BooleanField(default=True, help_text='By submitting your proposal, you agree to give permission to the conference organizers to record, edit, and release audio and/or video of your presentation. If you do not agree to this, please uncheck this box.')),
('date_created', models.DateTimeField(auto_now_add=True, null=True, verbose_name='Created')),
('date_last_modified', models.DateTimeField(auto_now=True, null=True, verbose_name='Last Modified')),
('editor_keywords', taggit.managers.TaggableManager(blank=True, help_text='A comma-separated list of tags.', related_name='editor_tagged_proposals', through='proposals.EditorTaggedProposal', to='proposals.ProposalKeyword', verbose_name='Editor Keywords')),
('official_keywords', taggit.managers.TaggableManager(blank=True, help_text='A comma-separated list of tags.', related_name='official_tagged_proposals', through='proposals.TaggedProposal', to='proposals.ProposalKeyword', verbose_name='Official Keywords')),
('user_keywords', taggit.managers.TaggableManager(blank=True, help_text='Please add keywords as a comma-separated list.', related_name='user_tagged_proposals', through='proposals.UserTaggedProposal', to='proposals.ProposalKeyword', verbose_name='Additional Keywords')),
],
bases=('symposion_proposals.proposalbase',),
),
]
| {
"content_hash": "97e79741b14eb84e022b5c9f22fe8544",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 297,
"avg_line_length": 68.0721649484536,
"alnum_prop": 0.6259276086627291,
"repo_name": "pydata/conf_site",
"id": "6bd12d844aa5e943685767b37a8a1a81b0dc6dc0",
"size": "6652",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "conf_site/proposals/migrations/0001_initial_squashed_0003_remove_under_represented_questions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "24945"
},
{
"name": "HTML",
"bytes": "115341"
},
{
"name": "JavaScript",
"bytes": "244408"
},
{
"name": "Jinja",
"bytes": "901"
},
{
"name": "Python",
"bytes": "309825"
}
],
"symlink_target": ""
} |
from metrics_consumer import *
| {
"content_hash": "efa48e1a41d6917a2e5255de3f6c2629",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 30,
"avg_line_length": 31,
"alnum_prop": 0.8064516129032258,
"repo_name": "lmacken/moksha",
"id": "e8f4382ac90585671e9c212a2e397ba8aaad45a8",
"size": "31",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "moksha/apps/metrics/moksha/apps/metrics/consumers/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "1347981"
},
{
"name": "Python",
"bytes": "653985"
},
{
"name": "Shell",
"bytes": "3879"
}
],
"symlink_target": ""
} |
from .videos import load, find_image, clip, duration, extract_frame | {
"content_hash": "97e827576274c16096afad4a788f52e9",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 67,
"avg_line_length": 67,
"alnum_prop": 0.7910447761194029,
"repo_name": "groupe-sii/ogham",
"id": "0795efc19e81452ab0c8e6fd7de8d634b72bf820",
"size": "67",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": ".tools/showcase-recorder/showcase-launcher/utils/videos/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "11322"
},
{
"name": "Dockerfile",
"bytes": "206"
},
{
"name": "FreeMarker",
"bytes": "34582"
},
{
"name": "Groovy",
"bytes": "478648"
},
{
"name": "HTML",
"bytes": "1651483"
},
{
"name": "Java",
"bytes": "3886709"
},
{
"name": "JavaScript",
"bytes": "14796"
},
{
"name": "Python",
"bytes": "33553"
},
{
"name": "SCSS",
"bytes": "28548"
},
{
"name": "Shell",
"bytes": "13021"
}
],
"symlink_target": ""
} |
from __future__ import division, print_function, absolute_import
import sys
from collections import namedtuple
from turtle import TurtleScreen, RawTurtle, TK
from ninjaturtle.render import BaseRenderer
if sys.version_info[0] < 3:
from Tkinter import Tk, mainloop
else:
from tkinter import Tk
mainloop = False
TkTurtle = namedtuple('TkTurtle', 'turtle data')
class TkRenderer(BaseRenderer):
def __init__(self, width, height, title="NinjaTurtle"):
self.width = width
self.height = height
self.window_title = title
root = Tk()
root.wm_title(self.window_title)
window = TK.Canvas(master=root, width=self.width, height=self.height)
window.pack()
self.screen = TurtleScreen(window)
self.screen.tracer(0, 0)
self.turtles = dict()
def create_turtle(self, model, init=None, shape='classic'):
# TODO use init
backend = RawTurtle(canvas=self.screen)
model.backend = backend
self.turtles[model.id] = model
def render(self):
for model in self.turtles.values():
data = model.data
turtle = model.backend
if data[0] != turtle.xcor() or data[1] != turtle.ycor():
turtle.setpos(data[0], data[1])
if turtle.heading() != data[4]:
turtle.setheading(data[4])
self.screen.update()
| {
"content_hash": "e2f674b70642cc944793d7186d4895d1",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 77,
"avg_line_length": 29.1875,
"alnum_prop": 0.6216987865810135,
"repo_name": "AllTheWayDown/ninjaturtle",
"id": "d328d72758f6c7f9bf6e482b15a1341eb66e2295",
"size": "1401",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ninjaturtle/tk_renderer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "23000"
}
],
"symlink_target": ""
} |
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get project root and add it to the PYTHONPATH so that we can import the package
# and obtain version information from it directly.
project_root = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
sys.path.insert(0, project_root)
import djangocms_contact
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'djangocms-contact'
copyright = u'2015, Authors name'
author = u'Authors name'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = djangocms_contact.__version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'djangocms-contactdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'djangocms-contact.tex', u'djangocms-contact Documentation',
u'', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, '', u' Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'djangocms-contact', u'djangocms-contact Documentation',
author, 'djangocms-contact', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| {
"content_hash": "3ab5d963fe2587f7d7f833e063dfa978",
"timestamp": "",
"source": "github",
"line_count": 290,
"max_line_length": 81,
"avg_line_length": 32.19655172413793,
"alnum_prop": 0.7067580593338332,
"repo_name": "aaronsgithub/djangocms-contact",
"id": "fa3bc433a6ccbfba54aa4ba4d18a564113c373df",
"size": "9755",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1334"
},
{
"name": "Python",
"bytes": "1880"
}
],
"symlink_target": ""
} |
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('Ben Parisi', 'bparisi@live.unc.edu'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'crime',
'USER': 'ben',
'PASSWORD': 'nerRaWhB',
'HOST': '/var/run/mysqld/mysqld.sock',
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.4/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/New_York'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = False
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = '/home/vaughn.hagerty/django/PoliceReportScraper/odnc_police/odnc_police/collectedstatic/'
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
"/home/vaughn.hagerty/django/PoliceReportScraper/odnc_police/odnc_police/static/"
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '9+gb)l!y71yrd!j3(i@hprj4o-e&w2=-vehxwowd4r=hqsr-nt'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'odnc_police.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'odnc_police.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
'home/vaughn.hagerty/django/PoliceReportScraper/odnc_police/odnc_police/templates',
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
'django.contrib.admindocs',
'odnc_police',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
"""
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
"""
| {
"content_hash": "8239719c610caa2a5fd3d3f1dac79fc1",
"timestamp": "",
"source": "github",
"line_count": 157,
"max_line_length": 105,
"avg_line_length": 33.7515923566879,
"alnum_prop": 0.701641819211172,
"repo_name": "OpenData-NC/PoliceReportScraper",
"id": "c1c74ddb1e25da4566f68063b5b269b53d9d1342",
"size": "5343",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "odnc_police/odnc_police/settings.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "6697"
},
{
"name": "CSS",
"bytes": "35928"
},
{
"name": "HTML",
"bytes": "1247983"
},
{
"name": "JavaScript",
"bytes": "118237"
},
{
"name": "Makefile",
"bytes": "6931"
},
{
"name": "Python",
"bytes": "121695"
},
{
"name": "Shell",
"bytes": "6195"
}
],
"symlink_target": ""
} |
import numpy as np
import random
import itertools
import scipy.misc
import matplotlib.pyplot as plt
class gameOb():
def __init__(self,coordinates,size,intensity,channel,reward,name):
self.x = coordinates[0]
self.y = coordinates[1]
self.size = size
self.intensity = intensity
self.channel = channel
self.reward = reward
self.name = name
class gameEnv():
def __init__(self,partial,size):
self.sizeX = size
self.sizeY = size
self.actions = 4
self.objects = []
self.partial = partial
a = self.reset()
plt.imshow(a,interpolation="nearest")
def reset(self):
self.objects = []
hero = gameOb(self.newPosition(),1,1,2,None,'hero')
self.objects.append(hero)
bug = gameOb(self.newPosition(),1,1,1,1,'goal')
self.objects.append(bug)
hole = gameOb(self.newPosition(),1,1,0,-1,'fire')
self.objects.append(hole)
bug2 = gameOb(self.newPosition(),1,1,1,1,'goal')
self.objects.append(bug2)
hole2 = gameOb(self.newPosition(),1,1,0,-1,'fire')
self.objects.append(hole2)
bug3 = gameOb(self.newPosition(),1,1,1,1,'goal')
self.objects.append(bug3)
bug4 = gameOb(self.newPosition(),1,1,1,1,'goal')
self.objects.append(bug4)
state = self.renderEnv()
self.state = state
return state
def moveChar(self,direction):
# 0 - up, 1 - down, 2 - left, 3 - right
hero = self.objects[0]
heroX = hero.x
heroY = hero.y
penalize = 0.
if direction == 0 and hero.y >= 1:
hero.y -= 1
if direction == 1 and hero.y <= self.sizeY-2:
hero.y += 1
if direction == 2 and hero.x >= 1:
hero.x -= 1
if direction == 3 and hero.x <= self.sizeX-2:
hero.x += 1
if hero.x == heroX and hero.y == heroY:
penalize = 0.0
self.objects[0] = hero
return penalize
def newPosition(self):
iterables = [ range(self.sizeX), range(self.sizeY)]
points = []
for t in itertools.product(*iterables):
points.append(t)
currentPositions = []
for objectA in self.objects:
if (objectA.x,objectA.y) not in currentPositions:
currentPositions.append((objectA.x,objectA.y))
for pos in currentPositions:
points.remove(pos)
location = np.random.choice(range(len(points)),replace=False)
return points[location]
def checkGoal(self):
others = []
for obj in self.objects:
if obj.name == 'hero':
hero = obj
else:
others.append(obj)
ended = False
for other in others:
if hero.x == other.x and hero.y == other.y:
self.objects.remove(other)
if other.reward == 1:
self.objects.append(gameOb(self.newPosition(),1,1,1,1,'goal'))
else:
self.objects.append(gameOb(self.newPosition(),1,1,0,-1,'fire'))
return other.reward,False
if ended == False:
return 0.0,False
def renderEnv(self):
#a = np.zeros([self.sizeY,self.sizeX,3])
a = np.ones([self.sizeY+2,self.sizeX+2,3])
a[1:-1,1:-1,:] = 0
hero = None
for item in self.objects:
a[item.y+1:item.y+item.size+1,item.x+1:item.x+item.size+1,item.channel] = item.intensity
if item.name == 'hero':
hero = item
if self.partial == True:
a = a[hero.y:hero.y+3,hero.x:hero.x+3,:]
b = scipy.misc.imresize(a[:,:,0],[84,84,1],interp='nearest')
c = scipy.misc.imresize(a[:,:,1],[84,84,1],interp='nearest')
d = scipy.misc.imresize(a[:,:,2],[84,84,1],interp='nearest')
a = np.stack([b,c,d],axis=2)
return a
def step(self,action):
penalty = self.moveChar(action)
reward,done = self.checkGoal()
state = self.renderEnv()
if reward == None:
print(done)
print(reward)
print(penalty)
return state,(reward+penalty),done
else:
return state,(reward+penalty),done | {
"content_hash": "334b445a0be827a7d98469ed91398949",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 100,
"avg_line_length": 34,
"alnum_prop": 0.5342371323529411,
"repo_name": "awjuliani/DeepRL-Agents",
"id": "2ff25f662ea2db5e414f2cfd46679f2ca628db82",
"size": "4352",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gridworld.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "228202"
},
{
"name": "Python",
"bytes": "8709"
}
],
"symlink_target": ""
} |
"""Utility to provide submission and comment statistics in a subreddit."""
from __future__ import print_function
import codecs
import re
import sys
import time
import webbrowser
from collections import defaultdict
from datetime import datetime
from praw import Reddit
from praw.errors import ExceptionList, RateLimitExceeded
from praw.helpers import flatten_tree
from praw.objects import Redditor
from requests.exceptions import HTTPError
from six import iteritems, itervalues, text_type as tt
from update_checker import update_check
from . import __version__
from .helpers import arg_parser
DAYS_IN_SECONDS = 60 * 60 * 24
MAX_BODY_SIZE = 40000
def safe_title(submission):
"""Return titles with newlines replaced by spaces and stripped."""
return submission.title.replace('\n', ' ').strip()
class SubRedditStats(object):
"""Contain all the functionality of the subreddit_stats command."""
post_prefix = tt('Subreddit Stats:')
post_header = tt('---\n###{0}\n')
post_footer = tt('>Generated with [BBoe](/u/bboe)\'s [Subreddit Stats]'
'(https://github.com/praw-dev/prawtools) \n{0}'
'SRS Marker: {1}')
re_marker = re.compile(r'SRS Marker: (\d+)')
@staticmethod
def _previous_max(submission):
try:
val = SubRedditStats.re_marker.findall(submission.selftext)[-1]
return float(val)
except (IndexError, TypeError):
print('End marker not found in previous submission. Aborting')
sys.exit(1)
@staticmethod
def _permalink(permalink):
tokens = permalink.split('/')
if tokens[8] == '': # submission
return tt('/{0}').format(tokens[6])
else: # comment
return tt('/comments/{0}/_/{1}?context=999').format(tokens[6],
tokens[8])
@staticmethod
def _pts(points):
return '1 pt' if points == 1 else '{0} pts'.format(points)
@staticmethod
def _user(user):
if user is None:
return '_deleted_'
elif isinstance(user, Redditor):
user = str(user)
return tt('u/{1}').format(user.replace('_', r'\_'), user)
@staticmethod
def _submit(func, *args, **kwargs):
def sleep(sleep_time):
print('\tSleeping for {0} seconds'.format(sleep_time))
time.sleep(sleep_time)
while True:
try:
return func(*args, **kwargs)
except RateLimitExceeded as error:
sleep(error.sleep_time)
except ExceptionList as exception_list:
for error in exception_list.errors:
if isinstance(error, RateLimitExceeded):
sleep(error.sleep_time)
break
else:
raise
def __init__(self, subreddit, site, verbosity, distinguished):
"""Initialize the SubRedditStats instance with config options."""
self.reddit = Reddit(str(self), site, disable_update_check=True, api_request_delay=1)
self.subreddit = self.reddit.get_subreddit(subreddit)
self.verbosity = verbosity
self.distinguished = distinguished
self.submissions = []
self.comments = []
self.submitters = defaultdict(list)
self.commenters = defaultdict(list)
self.min_date = 0
self.max_date = time.time() - DAYS_IN_SECONDS * 3
self.prev_srs = None
def login(self, id, secret):
"""Login and provide debugging output if so wanted."""
if self.verbosity > 0:
print('Logging in using OAuth...')
if self.verbosity > 1:
print('Client ID: ' + id)
self.reddit.set_oauth_app_info(client_id=id, client_secret=secret,
redirect_uri='http://127.0.0.1:65010/authorize_callback')
url = self.reddit.get_authorize_url('srs', ['identity', 'read', 'submit'], True)
webbrowser.open(url)
sys.stdout.write('Code: ')
sys.stdout.flush()
code = sys.stdin.readline().strip()
access_info = self.reddit.get_access_information(code)
self.reddit.set_access_credentials(**access_info)
print("Logged in as " + self.reddit.get_me().name + '.')
def msg(self, msg, level, overwrite=False):
"""Output a messaage to the screen if the verbosity is sufficient."""
if self.verbosity and self.verbosity >= level:
sys.stdout.write(msg)
if overwrite:
sys.stdout.write('\r')
sys.stdout.flush()
else:
sys.stdout.write('\n')
def prev_stat(self, prev_url):
"""Load the previous subreddit stats page."""
submission = self.reddit.get_submission(prev_url)
self.min_date = self._previous_max(submission)
self.prev_srs = prev_url
def fetch_recent_submissions(self, max_duration, after, exclude_self,
exclude_link, since_last=True):
"""Fetch recent submissions in subreddit with boundaries.
Does not include posts within the last three days as their scores may
not be representative.
:param max_duration: When set, specifies the number of days to include
:param after: When set, fetch all submission after this submission id.
:param exclude_self: When true, don't include self posts.
:param exclude_link: When true, don't include links.
:param since_last: When true use info from last submission to determine
the stop point
:returns: True if any submissions were found.
"""
if exclude_self and exclude_link:
raise TypeError('Cannot set both exclude_self and exclude_link.')
if max_duration:
self.min_date = self.max_date - DAYS_IN_SECONDS * max_duration
params = {'after': after} if after else None
self.msg('DEBUG: Fetching submissions', 1)
for submission in self.subreddit.get_new(limit=None, params=params):
if submission.created_utc > self.max_date:
continue
if submission.created_utc <= self.min_date:
break
if since_last and str(submission.author) == str(self.reddit.user) \
and submission.title.startswith(self.post_prefix):
# Use info in this post to update the min_date
# And don't include this post
self.msg(tt('Found previous: {0}')
.format(safe_title(submission)), 2)
if self.prev_srs is None: # Only use the most recent
self.min_date = max(self.min_date,
self._previous_max(submission))
self.prev_srs = submission.permalink
continue
if exclude_self and submission.is_self:
continue
if exclude_link and not submission.is_self:
continue
self.submissions.append(submission)
num_submissions = len(self.submissions)
self.msg('DEBUG: Found {0} submissions'.format(num_submissions), 1)
if num_submissions == 0:
return False
# Update real min and max dates
self.submissions.sort(key=lambda x: x.created_utc)
self.min_date = self.submissions[0].created_utc
self.max_date = self.submissions[-1].created_utc
return True
def fetch_top_submissions(self, top, exclude_self, exclude_link):
"""Fetch top 1000 submissions by some top value.
:param top: One of week, month, year, all
:param exclude_self: When true, don't include self posts.
:param exclude_link: When true, include only self posts
:returns: True if any submissions were found.
"""
if exclude_self and exclude_link:
raise TypeError('Cannot set both exclude_self and exclude_link.')
if top not in ('day', 'week', 'month', 'year', 'all'):
raise TypeError('{0!r} is not a valid top value'.format(top))
self.msg('DEBUG: Fetching submissions', 1)
params = {'t': top}
for submission in self.subreddit.get_top(limit=None, params=params):
if exclude_self and submission.is_self:
continue
if exclude_link and not submission.is_self:
continue
self.submissions.append(submission)
num_submissions = len(self.submissions)
self.msg('DEBUG: Found {0} submissions'.format(num_submissions), 1)
if num_submissions == 0:
return False
# Update real min and max dates
self.submissions.sort(key=lambda x: x.created_utc)
self.min_date = self.submissions[0].created_utc
self.max_date = self.submissions[-1].created_utc
return True
def process_submitters(self):
"""Group submissions by author."""
self.msg('DEBUG: Processing Submitters', 1)
for submission in self.submissions:
if submission.author and (self.distinguished or
submission.distinguished is None):
self.submitters[str(submission.author)].append(submission)
def process_commenters(self):
"""Group comments by author."""
num = len(self.submissions)
self.msg('DEBUG: Processing Commenters on {0} submissions'.format(num),
1)
for i, submission in enumerate(self.submissions):
# Explicitly fetch as many comments as possible by top sort
# Note that this is the first time the complete submission object
# is obtained. Only a partial object was returned when getting the
# subreddit listings.
try:
submission = self.reddit.get_submission(submission.permalink,
comment_limit=None,
comment_sort='top')
except HTTPError as exc:
print('Ignoring comments on {0} due to HTTP status {1}'
.format(submission.url, exc.response.status_code))
continue
self.msg('{0}/{1} submissions'.format(i + 1, num), 2,
overwrite=True)
if submission.num_comments == 0:
continue
skipped = submission.replace_more_comments()
if skipped:
skip_num = sum(x.count for x in skipped)
print('Ignored {0} comments ({1} MoreComment objects)'
.format(skip_num, len(skipped)))
comments = [x for x in flatten_tree(submission.comments) if
self.distinguished or x.distinguished is None]
self.comments.extend(comments)
# pylint: disable=W0212
for orphans in itervalues(submission._orphaned):
self.comments.extend(orphans)
# pylint: enable=W0212
for comment in self.comments:
if comment.author:
self.commenters[str(comment.author)].append(comment)
def basic_stats(self):
"""Return a markdown representation of simple statistics."""
sub_score = sum(x.score for x in self.submissions)
comm_score = sum(x.score for x in self.comments)
sub_duration = self.max_date - self.min_date
sub_rate = (86400. * len(self.submissions) / sub_duration
if sub_duration else len(self.submissions))
# Compute comment rate
if self.comments:
self.comments.sort(key=lambda x: x.created_utc)
duration = (self.comments[-1].created_utc -
self.comments[0].created_utc)
comm_rate = (86400. * len(self.comments) / duration
if duration else len(self.comments))
else:
comm_rate = 0
values = [('Total', len(self.submissions), len(self.comments)),
('Rate (per day)', '{0:.2f}'.format(sub_rate),
'{0:.2f}'.format(comm_rate)),
('Unique Redditors', len(self.submitters),
len(self.commenters)),
('Combined Score', sub_score, comm_score)]
retval = 'Period: {0:.2f} days\n\n'.format(sub_duration / 86400.)
retval += '||Submissions|Comments|\n:-:|--:|--:\n'
for quad in values:
# pylint: disable=W0142
retval += '__{0}__|{1}|{2}\n'.format(*quad)
# pylint: enable=W0142
return retval + '\n'
def top_submitters(self, num, num_submissions):
"""Return a markdown representation of the top submitters."""
num = min(num, len(self.submitters))
if num <= 0:
return ''
top_submitters = sorted(iteritems(self.submitters), reverse=True,
key=lambda x: (sum(y.score for y in x[1]),
len(x[1])))[:num]
retval = self.post_header.format('Top Submitters\' Top Submissions')
for (author, submissions) in top_submitters:
retval += '0. {0}, {1} submission{2}: {3}\n'.format(
self._pts(sum(x.score for x in submissions)), len(submissions),
's' if len(submissions) > 1 else '', self._user(author))
for sub in sorted(submissions, reverse=True,
key=lambda x: x.score)[:num_submissions]:
title = safe_title(sub)
if sub.permalink != sub.url:
retval += tt(' 0. [{0}]({1})').format(title, sub.url)
else:
retval += tt(' 0. {0}').format(title)
retval += ' ({0}, [{1} comment{2}]({3}))\n'.format(
self._pts(sub.score), sub.num_comments,
's' if sub.num_comments > 1 else '',
self._permalink(sub.permalink))
retval += '\n'
return retval
def top_commenters(self, num):
"""Return a markdown representation of the top commenters."""
score = lambda x: x.score
num = min(num, len(self.commenters))
if num <= 0:
return ''
top_commenters = sorted(iteritems(self.commenters), reverse=True,
key=lambda x: (sum(score(y) for y in x[1]),
len(x[1])))[:num]
retval = self.post_header.format('Top Commenters')
for author, comments in top_commenters:
retval += '0. {0} ({1}, {2} comment{3})\n'.format(
self._user(author), self._pts(sum(score(x) for x in comments)),
len(comments), 's' if len(comments) > 1 else '')
return '{0}\n'.format(retval)
def top_submissions(self, num):
"""Return a markdown representation of the top submissions."""
num = min(num, len(self.submissions))
if num <= 0:
return ''
top_submissions = sorted(
[x for x in self.submissions if self.distinguished or
x.distinguished is None],
reverse=True, key=lambda x: x.score)[:num]
if not top_submissions:
return ''
retval = self.post_header.format('Top Submissions')
for sub in top_submissions:
title = safe_title(sub)
if sub.permalink != sub.url:
retval += tt('0. [{0}]({1})').format(title, sub.url)
else:
retval += tt('0. {0}').format(title)
retval += ' by {0} ({1}, [{2} comment{3}]({4}))\n'.format(
self._user(sub.author), self._pts(sub.score), sub.num_comments,
's' if sub.num_comments > 1 else '',
self._permalink(sub.permalink))
return tt('{0}\n').format(retval)
def top_comments(self, num):
"""Return a markdown representation of the top comments."""
score = lambda x: x.score
num = min(num, len(self.comments))
if num <= 0:
return ''
top_comments = sorted(self.comments, reverse=True,
key=score)[:num]
retval = self.post_header.format('Top Comments')
for comment in top_comments:
title = safe_title(comment.submission)
retval += tt('0. {0}: {1}\'s [comment]({2}) in {3}\n').format(
self._pts(score(comment)), self._user(comment.author),
self._permalink(comment.permalink), title)
return tt('{0}\n').format(retval)
def publish_results(self, subreddit, submitters, commenters, submissions,
comments, top, debug=False):
"""Submit the results to the subreddit. Has no return value (None)."""
def timef(timestamp, date_only=False):
"""Return a suitable string representaation of the timestamp."""
dtime = datetime.fromtimestamp(timestamp)
if date_only:
retval = dtime.strftime('%Y-%m-%d')
else:
retval = dtime.strftime('%Y-%m-%d %H:%M PDT')
return retval
if self.prev_srs:
prev = '[Prev SRS]({0}) \n'.format(self._permalink(self.prev_srs))
else:
prev = ''
basic = self.basic_stats()
t_commenters = self.top_commenters(commenters)
t_submissions = self.top_submissions(submissions)
t_comments = self.top_comments(comments)
footer = self.post_footer.format(prev, self.max_date)
body = ''
num_submissions = 10
while body == '' or len(body) > MAX_BODY_SIZE and num_submissions > 2:
t_submitters = self.top_submitters(submitters, num_submissions)
body = (basic + t_submitters + t_commenters + t_submissions +
t_comments + footer)
num_submissions -= 1
if len(body) > MAX_BODY_SIZE:
print('The resulting message is too big. Not submitting.')
debug = True
# Set the initial title
base_title = '{0} {1} {2}posts from {3} to {4}'.format(
self.post_prefix, str(self.subreddit),
'top ' if top else '', timef(self.min_date, True),
timef(self.max_date))
submitted = False
while not debug and not submitted:
if subreddit: # Verify the user wants to submit to the subreddit
msg = ('You are about to submit to subreddit {0!r} as {1!r}.\n'
'Are you sure? yes/[no]: '
.format(subreddit, str(self.reddit.user)))
sys.stdout.write(msg)
sys.stdout.flush()
if sys.stdin.readline().strip().lower() not in ['y', 'yes']:
subreddit = None
elif not subreddit: # Prompt for the subreddit to submit to
msg = ('Please enter a subreddit to submit to (press return to'
' abort): ')
sys.stdout.write(msg)
sys.stdout.flush()
subreddit = sys.stdin.readline().strip()
if not subreddit:
print('Submission aborted\n')
debug = True
# Vary the title depending on where posting
if str(self.subreddit) == subreddit:
title = '{0} {1}posts from {2} to {3}'.format(
self.post_prefix, 'top ' if top else '',
timef(self.min_date, True), timef(self.max_date))
else:
title = base_title
if subreddit:
# Attempt to make the submission
try:
res = self._submit(self.reddit.submit, subreddit, title,
text=body)
print(res.permalink)
submitted = True
except Exception as error: # pylint: disable=W0703
print('The submission failed:' + str(error))
subreddit = None
if not submitted:
print(base_title)
print(body)
def save_csv(self, filename):
"""Create csv file containing comments and submissions by author."""
redditors = set(self.submitters.keys()).union(self.commenters.keys())
mapping = dict((x.lower(), x) for x in redditors)
with codecs.open(filename, 'w', encoding='utf-8') as outfile:
outfile.write('username, type, permalink, score\n')
for _, redditor in sorted(mapping.items()):
for submission in self.submitters.get(redditor, []):
outfile.write(u'{0}, submission, {1}, {2}\n'
.format(redditor, submission.permalink,
submission.score))
for comment in self.commenters.get(redditor, []):
outfile.write(u'{0}, comment, {1}, {2}\n'
.format(redditor, comment.permalink,
comment.score))
def main():
"""Provide the entry point to the subreddit_stats command.
:returns: 0 on success, 1 otherwise
"""
parser = arg_parser(usage='usage: %prog [options] [SUBREDDIT]')
parser.add_option('-s', '--submitters', type='int', default=5,
help='Number of top submitters to display '
'[default %default]')
parser.add_option('-q', '--topsubmissions', type='int', default=10,
help='Number of top submittions to display '
'[default %default]')
parser.add_option('-c', '--commenters', type='int', default=10,
help='Number of top commenters to display '
'[default %default]')
parser.add_option('-w', '--topcomments', type='int', default=10,
help='Number of top comments to display '
'[default %default]')
parser.add_option('-a', '--after',
help='Submission ID to fetch after')
parser.add_option('-d', '--days', type='int', default=32,
help=('Number of previous days to include submissions '
'from. Use 0 for unlimited. Default: %default'))
parser.add_option('-D', '--debug', action='store_true',
help='Enable debugging mode. Does not post stats.')
parser.add_option('-R', '--submission-reddit',
help=('Subreddit to submit to. If not present, '
'submits to the subreddit processed'))
parser.add_option('-t', '--top',
help=('Run on top submissions either by day, week, '
'month, year, or all'))
parser.add_option('', '--distinguished', action='store_true',
help=('Include distinguished submissions and '
'comments (default: False). Note that regular '
'comments of distinguished submissions will still '
'be included.'))
parser.add_option('', '--no-self', action='store_true',
help=('Do not include self posts (and their comments) in'
' the calculation.'))
parser.add_option('', '--no-link', action='store_true',
help=('Only include self posts (and their comments) in '
'the calculation.'))
parser.add_option('', '--prev',
help='Statically provide the URL of previous SRS page.')
parser.add_option('', '--include-prev', action='store_true',
help='Don\'t try to avoid overlap with a previous SRS.')
parser.add_option('-o', '--output',
help='Save result csv to named file.')
options, args = parser.parse_args()
if len(args) != 1:
sys.stdout.write('Enter subreddit name: ')
sys.stdout.flush()
subject_reddit = sys.stdin.readline().strip()
if not subject_reddit:
parser.error('No subreddit name entered')
else:
subject_reddit = args[0]
if not options.disable_update_check: # Check for updates
update_check('prawtools', __version__)
print('You chose to analyze this subreddit: {0}'.format(subject_reddit))
if options.no_link and options.no_self:
parser.error('You are choosing to exclude self posts but also only '
'include self posts. Consider checking your arguments.')
if options.submission_reddit:
submission_reddit = options.submission_reddit
else:
submission_reddit = subject_reddit
srs = SubRedditStats(subject_reddit, options.site, options.verbose,
options.distinguished)
srs.login(options.client_id, options.client_secret)
if options.prev:
srs.prev_stat(options.prev)
if options.top:
found = srs.fetch_top_submissions(options.top, options.no_self,
options.no_link)
else:
since_last = not options.include_prev
found = srs.fetch_recent_submissions(max_duration=options.days,
after=options.after,
exclude_self=options.no_self,
exclude_link=options.no_link,
since_last=since_last)
if not found:
print('No submissions were found.')
return 1
srs.process_submitters()
if options.commenters > 0:
srs.process_commenters()
if options.output:
srs.save_csv(options.output)
srs.publish_results(submission_reddit, options.submitters,
options.commenters, options.topsubmissions, options.topcomments, options.top, options.debug)
| {
"content_hash": "83e56f6c10ad65a66558d172371838ca",
"timestamp": "",
"source": "github",
"line_count": 594,
"max_line_length": 116,
"avg_line_length": 44.02693602693603,
"alnum_prop": 0.548676965432854,
"repo_name": "amici-ursi/prawtools",
"id": "b2e8cf51727c8cd850c77d260241301d61a76f6f",
"size": "26152",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "prawtools/stats.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "48669"
},
{
"name": "Shell",
"bytes": "423"
}
],
"symlink_target": ""
} |
{
'!=': '!=',
'!langcode!': 'it',
'!langname!': 'Italiano',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" è un\'espressione opzionale come "campo1=\'nuovo valore\'". Non si può fare "update" o "delete" dei risultati di un JOIN ',
'%(nrows)s records found': '%(nrows)s record trovati',
'%d seconds ago': '%d secondi fa',
'%s %%{row} deleted': '%s righe ("record") cancellate',
'%s %%{row} updated': '%s righe ("record") modificate',
'%s selected': '%s selezionato',
'%Y-%m-%d': '%d/%m/%Y',
'%Y-%m-%d %H:%M:%S': '%d/%m/%Y %H:%M:%S',
'<': '<',
'<=': '<=',
'=': '=',
'>': '>',
'>=': '>=',
'@markmin\x01Number of entries: **%s**': 'Numero di entità: **%s**',
'About': 'About',
'Access Control': 'Controllo Accessi',
'Add': 'Aggiungi',
'Administrative Interface': 'Interfaccia Amministrativa',
'Administrative interface': 'Interfaccia amministrativa',
'Ajax Recipes': 'Ajax Recipes',
'An error occured, please %s the page': 'È stato rilevato un errore, prego %s la pagina',
'And': 'E',
'appadmin is disabled because insecure channel': 'Amministrazione (appadmin) disabilitata: comunicazione non sicura',
'Are you sure you want to delete this object?': 'Sicuro di voler cancellare questo oggetto ?',
'Available Databases and Tables': 'Database e tabelle disponibili',
'Back': 'Indietro',
'Buy this book': 'Compra questo libro',
'cache': 'cache',
'Cache': 'Cache',
'Cache Keys': 'Cache Keys',
'Cannot be empty': 'Non può essere vuoto',
'Change password': 'Cambia Password',
'change password': 'Cambia password',
'Check to delete': 'Seleziona per cancellare',
'Clear': 'Resetta',
'Clear CACHE?': 'Resetta CACHE?',
'Clear DISK': 'Resetta DISK',
'Clear RAM': 'Resetta RAM',
'Client IP': 'Client IP',
'Close': 'Chiudi',
'Cognome': 'Cognome',
'Community': 'Community',
'Components and Plugins': 'Componenti and Plugin',
'contains': 'contiene',
'Controller': 'Controller',
'Copyright': 'Copyright',
'Created By': 'Creato Da',
'Created On': 'Creato Il',
'CSV': 'CSV',
'CSV (hidden cols)': 'CSV (hidden cols)',
'Current request': 'Richiesta (request) corrente',
'Current response': 'Risposta (response) corrente',
'Current session': 'Sessione (session) corrente',
'customize me!': 'Personalizzami!',
'data uploaded': 'dati caricati',
'Database': 'Database',
'Database %s select': 'Database %s select',
'db': 'db',
'DB Model': 'Modello di DB',
'Delete': 'Cancella',
'Delete:': 'Cancella:',
'Demo': 'Demo',
'Deployment Recipes': 'Deployment Recipes',
'Description': 'Descrizione',
'design': 'progetta',
'DISK': 'DISK',
'Disk Cache Keys': 'Disk Cache Keys',
'Disk Cleared': 'Disk Cleared',
'Documentation': 'Documentazione',
"Don't know what to do?": 'Non sai cosa fare?',
'done!': 'fatto!',
'Download': 'Download',
'E-mail': 'E-mail',
'Edit': 'Modifica',
'Edit current record': 'Modifica record corrente',
'edit profile': 'modifica profilo',
'Edit This App': 'Modifica questa applicazione',
'Email and SMS': 'Email e SMS',
'Email non valida': 'Email non valida',
'enter a number between %(min)g and %(max)g': 'enter a number between %(min)g and %(max)g',
'enter an integer between %(min)g and %(max)g': 'inserisci un intero tra %(min)g e %(max)g',
'Errors': 'Errori',
'Errors in form, please check it out.': 'Errori nel form, ricontrollalo',
'export as csv file': 'esporta come file CSV',
'Export:': 'Esporta:',
'FAQ': 'FAQ',
'First name': 'Nome',
'Forgot username?': 'Dimenticato lo username?',
'Forms and Validators': 'Forms and Validators',
'Free Applications': 'Free Applications',
'Graph Model': 'Graph Model',
'Group %(group_id)s created': 'Group %(group_id)s created',
'Group ID': 'ID Gruppo',
'Group uniquely assigned to user %(id)s': 'Group uniquely assigned to user %(id)s',
'Groups': 'Groups',
'hello': 'hello',
'hello world': 'salve mondo',
'Hello World': 'Salve Mondo',
'Hello World in a flash!': 'Salve Mondo in un flash!',
'Home': 'Home',
'How did you get here?': 'Come sei arrivato qui?',
'HTML': 'HTML',
'import': 'importa',
'Import/Export': 'Importa/Esporta',
'Index': 'Indice',
'insert new': 'inserisci nuovo',
'insert new %s': 'inserisci nuovo %s',
'Internal State': 'Stato interno',
'Introduction': 'Introduzione',
'Invalid email': 'Email non valida',
'Invalid login': 'Login non valido',
'Invalid Query': 'Richiesta (query) non valida',
'invalid request': 'richiesta non valida',
'Is Active': "E' attivo",
'Key': 'Chiave',
'Last name': 'Cognome',
'Layout': 'Layout',
'Layout Plugins': 'Layout Plugins',
'Layouts': 'Layouts',
'Live Chat': 'Live Chat',
'Logged in': 'Loggato',
'Logged out': 'Disconnesso',
'login': 'accesso',
'Login': 'Login',
'logout': 'uscita',
'Logout': 'Logout',
'Lost Password': 'Password Smarrita',
'Lost password?': 'Password smarrita?',
'lost password?': 'dimenticato la password?',
'Main Menu': 'Menu principale',
'Manage Cache': 'Manage Cache',
'Menu Model': 'Menu Modelli',
'Modified By': 'Modificato da',
'Modified On': 'Modificato il',
'My Sites': 'My Sites',
'Name': 'Nome',
'New': 'Nuovo',
'New password': 'Nuova password',
'New Record': 'Nuovo elemento (record)',
'new record inserted': 'nuovo record inserito',
'next 100 rows': 'prossime 100 righe',
'No databases in this application': 'Nessun database presente in questa applicazione',
'No records found': 'Nessun record trovato',
'Nome': 'Nome',
'Non può essere vuoto': 'Non può essere vuoto',
'not authorized': 'non autorizzato',
'Object or table name': 'Oggeto o nome tabella',
'Old password': 'Vecchia password',
'Online examples': 'Vedere gli esempi',
'Or': 'O',
'or import from csv file': 'oppure importa da file CSV',
'Origin': 'Origine',
'Other Plugins': 'Other Plugins',
'Other Recipes': 'Other Recipes',
'Overview': 'Overview',
'Password': 'Password',
"Password fields don't match": 'I campi password non sono uguali',
'please input your password again': 'perfavore reimmeti la tua password',
'Plugins': 'Plugins',
'Powered by': 'Powered by',
'Preface': 'Preface',
'previous 100 rows': '100 righe precedenti',
'Profile': 'Profilo',
'pygraphviz library not found': 'pygraphviz library not found',
'Python': 'Python',
'Query:': 'Richiesta (query):',
'Quick Examples': 'Quick Examples',
'RAM': 'RAM',
'RAM Cache Keys': 'RAM Cache Keys',
'Ram Cleared': 'Ram Cleared',
'Recipes': 'Recipes',
'Record': 'Record',
'record does not exist': 'il record non esiste',
'Record ID': 'Record ID',
'Record id': 'Record id',
'Register': 'Registrati',
'register': 'registrazione',
'Registration identifier': 'Registration identifier',
'Registration key': 'Chiave di Registazione',
'Registration successful': 'Registrazione avvenuta',
'reload': 'reload',
'Remember me (for 30 days)': 'Ricordami (per 30 giorni)',
'Request reset password': 'Richiedi il reset della password',
'Reset Password key': 'Resetta chiave Password ',
'Role': 'Ruolo',
'Rows in Table': 'Righe nella tabella',
'Rows selected': 'Righe selezionate',
'Save model as...': 'Salva modello come...',
'Save profile': 'Salva profilo',
'Search': 'Ricerca',
'Semantic': 'Semantic',
'Services': 'Servizi',
'Size of cache:': 'Size of cache:',
'starts with': 'comincia con',
'state': 'stato',
'Statistics': 'Statistics',
'Stylesheet': 'Foglio di stile (stylesheet)',
'submit': 'Inviai',
'Submit': 'Invia',
'Support': 'Support',
'Sure you want to delete this object?': 'Vuoi veramente cancellare questo oggetto?',
'Table': 'tabella',
'Table name': 'Nome tabella',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': 'La richiesta (query) è una condizione come ad esempio "db.tabella1.campo1==\'valore\'". Una condizione come "db.tabella1.campo1==db.tabella2.campo2" produce un "JOIN" SQL.',
'The Core': 'The Core',
'The output of the file is a dictionary that was rendered by the view %s': 'L\'output del file è un "dictionary" che è stato visualizzato dalla vista %s',
'The Views': 'The Views',
'This App': 'This App',
'This is a copy of the scaffolding application': "Questa è una copia dell'applicazione di base (scaffold)",
'Time in Cache (h:m:s)': 'Time in Cache (h:m:s)',
'Timestamp': 'Ora (timestamp)',
'too short': 'troppo corto',
'Traceback': 'Traceback',
'TSV (Excel compatible)': 'TSV (Excel compatibile)',
'TSV (Excel compatible, hidden cols)': 'TSV (Excel compatibile, hidden cols)',
'Twitter': 'Twitter',
'unable to parse csv file': 'non riesco a decodificare questo file CSV',
'Update': 'Aggiorna',
'Update:': 'Aggiorna:',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Per costruire richieste (query) più complesse si usano (...)&(...) come "e" (AND), (...)|(...) come "o" (OR), e ~(...) come negazione (NOT).',
'User %(id)s Logged-in': 'User %(id)s Logged-in',
'User %(id)s Logged-out': 'User %(id)s Logged-out',
'User %(id)s Password changed': 'User %(id)s Password changed',
'User %(id)s Password reset': 'User %(id)s Password reset',
'User %(id)s Profile updated': 'User %(id)s Profile updated',
'User %(id)s Registered': 'User %(id)s Registered',
'User ID': 'ID Utente',
'value already in database or empty': 'valore già presente nel database o vuoto',
'Verify Password': 'Verifica Password',
'Videos': 'Videos',
'View': 'Vista',
'Welcome': 'Welcome',
'Welcome %s': 'Benvenuto %s',
'Welcome to web2py': 'Benvenuto su web2py',
'Welcome to web2py!': 'Benvenuto in web2py!',
'Which called the function %s located in the file %s': 'che ha chiamato la funzione %s presente nel file %s',
'XML': 'XML',
'You are successfully running web2py': 'Stai eseguendo web2py con successo',
'You can modify this application and adapt it to your needs': 'Puoi modificare questa applicazione adattandola alle tue necessità',
'You visited the url %s': "Hai visitato l'URL %s",
}
| {
"content_hash": "7e978b6c8868e29995191b8435f2ef74",
"timestamp": "",
"source": "github",
"line_count": 245,
"max_line_length": 316,
"avg_line_length": 39.73877551020408,
"alnum_prop": 0.6772801972062449,
"repo_name": "vtslab/sensafety-midterm",
"id": "8ba02131dc7aa4928cd6dee8e4b61978593a18ca",
"size": "9765",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "WebMonitor/languages/it.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "19219688"
},
{
"name": "C++",
"bytes": "4757546"
},
{
"name": "CSS",
"bytes": "26571"
},
{
"name": "Clean",
"bytes": "6801"
},
{
"name": "Component Pascal",
"bytes": "1754"
},
{
"name": "D",
"bytes": "2232"
},
{
"name": "Erlang",
"bytes": "169"
},
{
"name": "FORTRAN",
"bytes": "10216"
},
{
"name": "JavaScript",
"bytes": "62412"
},
{
"name": "Max",
"bytes": "296"
},
{
"name": "Objective-C",
"bytes": "59128"
},
{
"name": "PHP",
"bytes": "19144"
},
{
"name": "Perl",
"bytes": "3028"
},
{
"name": "Python",
"bytes": "1625891"
},
{
"name": "Ruby",
"bytes": "273"
},
{
"name": "Shell",
"bytes": "815331"
},
{
"name": "VCL",
"bytes": "4153"
},
{
"name": "XC",
"bytes": "17615"
},
{
"name": "XSLT",
"bytes": "179445"
}
],
"symlink_target": ""
} |
from random import choice, randint
from xierpa3.adapters.adapter import Adapter
from xierpa3.contributions.filibuster.blurb import Blurb
class BlurbAdapter(Adapter):
u"""
The Adapter classes connect the templates to Blurb content.
Note that an adapter always returns plain text/content elements (which can include plain HTML), not components. The conversion needs to be done by the calling component.
"""
IMAGES = [
'//lib.xierpaweb.com.s3.amazonaws.com/_images/newspaper/images/news/bahrain.jpg',
'//lib.xierpaweb.com.s3.amazonaws.com/_images/newspaper/images/news/bush.jpg',
'//lib.xierpaweb.com.s3.amazonaws.com/_images/newspaper/images/news/chinatown.jpg',
'//lib.xierpaweb.com.s3.amazonaws.com/_images/newspaper/images/news/earthquake.jpg',
'//lib.xierpaweb.com.s3.amazonaws.com/_images/newspaper/images/news/egypt.jpg',
'//lib.xierpaweb.com.s3.amazonaws.com/_images/newspaper/images/news/electrical-storm.jpg',
'//lib.xierpaweb.com.s3.amazonaws.com/_images/newspaper/images/news/felix-graph-3.jpg',
'//lib.xierpaweb.com.s3.amazonaws.com/_images/newspaper/images/news/googleglass.jpg',
'//lib.xierpaweb.com.s3.amazonaws.com/_images/newspaper/images/news/googleglass2.jpg',
'//lib.xierpaweb.com.s3.amazonaws.com/_images/newspaper/images/news/googleglass3.jpg',
'//lib.xierpaweb.com.s3.amazonaws.com/_images/newspaper/images/news/googleglass4.jpg',
'//lib.xierpaweb.com.s3.amazonaws.com/_images/newspaper/images/news/guaguacrater.jpg',
'//lib.xierpaweb.com.s3.amazonaws.com/_images/newspaper/images/news/katrina.jpg',
'//lib.xierpaweb.com.s3.amazonaws.com/_images/newspaper/images/news/kiss.jpg',
'//lib.xierpaweb.com.s3.amazonaws.com/_images/newspaper/images/news/libya.jpg',
'//lib.xierpaweb.com.s3.amazonaws.com/_images/newspaper/images/news/little_italy.jpg',
'//lib.xierpaweb.com.s3.amazonaws.com/_images/newspaper/images/news/news.jpg',
'//lib.xierpaweb.com.s3.amazonaws.com/_images/newspaper/images/news/newspaper.jpg',
'//lib.xierpaweb.com.s3.amazonaws.com/_images/newspaper/images/news/nkorealaunch.jpg',
'//lib.xierpaweb.com.s3.amazonaws.com/_images/newspaper/images/news/obama.jpg',
'//lib.xierpaweb.com.s3.amazonaws.com/_images/newspaper/images/news/obama2.jpg',
'//lib.xierpaweb.com.s3.amazonaws.com/_images/newspaper/images/news/obama03.jpg',
'//lib.xierpaweb.com.s3.amazonaws.com/_images/newspaper/images/news/olympicpast.jpg',
'//lib.xierpaweb.com.s3.amazonaws.com/_images/newspaper/images/news/olympicpast2.jpg',
'//lib.xierpaweb.com.s3.amazonaws.com/_images/newspaper/images/news/perfect_storm1_large.jpg',
'//lib.xierpaweb.com.s3.amazonaws.com/_images/newspaper/images/news/polarbears.jpg',
'//lib.xierpaweb.com.s3.amazonaws.com/_images/newspaper/images/news/rahm.jpg',
'//lib.xierpaweb.com.s3.amazonaws.com/_images/newspaper/images/news/rolyaluk2.jpg',
'//lib.xierpaweb.com.s3.amazonaws.com/_images/newspaper/images/news/royaluk.jpg',
'//lib.xierpaweb.com.s3.amazonaws.com/_images/newspaper/images/news/shuttle.jpg',
'//lib.xierpaweb.com.s3.amazonaws.com/_images/newspaper/images/news/storm-nbpier.jpg',
'//lib.xierpaweb.com.s3.amazonaws.com/_images/newspaper/images/news/swarmybigshot.jpg',
'//lib.xierpaweb.com.s3.amazonaws.com/_images/newspaper/images/news/tibet.jpg',
'//lib.xierpaweb.com.s3.amazonaws.com/_images/newspaper/images/news/transport.jpg',
'//lib.xierpaweb.com.s3.amazonaws.com/_images/newspaper/images/news/travel.jpg',
'//lib.xierpaweb.com.s3.amazonaws.com/_images/newspaper/images/news/verticalfashionshow.jpg',
]
def __init__(self):
Adapter.__init__(self)
self.blurb = Blurb()
def getMessage(self, count=1, **kwargs):
return self.newArticle(text=u'English is not native. For corrections on disaster misspellings please contact buro (at) petr.com')
def getSocialMedia(self, count=1, **kwargs):
return self.newArticle(text=self.blurb.getBlurb('design_theory_title'))
def getTagCloud(self, count=10, **kwargs):
# Answer count tagCloud list entries as tuple (word, emphasisNumber)
cloud = []
for _ in range(10):
cloud.append(self.newArticle(text=self.blurb.getBlurb('design_magazines'), emphasis=randint(10, 24)))
return cloud
def getArticleIds(self, start=0, count=1, selector=None, order=None, **kwargs):
ids = []
for index in range(start, count):
ids.append(self.blurb.getBlurb('news_headline', 10))
return ids
def getArticle(self, id=None, index=0, selector=None, order=None, **kwargs):
article = self.newArticle(
headline=self.blurb.getBlurb('news_headline', 10),
poster=choice(self.IMAGES),
ankeiler=self.blurb.getBlurb('article_ankeiler', 30),
text=self.blurb.getBlurb('article'),
)
return article
def getChapter(self, index=0, **kwargs):
u"""Answer a blurb article as chapter."""
return self.getArticle()
# P A G E S T U F F
def getFooter(self, count=1, **kwargs):
return self.newArticle(text='Footer: ' + self.blurb.getBlurb('events_headline'))
def getLogo(self, **kwargs):
return self.newArticle(url='//petr.com/_images/contact.png')
def getPageTitle(self, **kwargs):
return self.newArticle(text=self.blurb.getBlurb('news_headline'))
def getDescription(self, **kwargs):
u"""Answer a blurb description of the site."""
return self.newArticle(text=self.blurb.getBlurb('article_ankeiler', 40))
def getKeyWords(self, **kwargs):
u"""Answer a blurb set of keywords of the site, comma-space separated."""
return self.newArticle(text=self.blurb.getBlurb('news_headline', 60).replace(' ', ', '))
| {
"content_hash": "d3c24b5c75e29998fe71cd84bbde83bd",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 173,
"avg_line_length": 58.77450980392157,
"alnum_prop": 0.6862385321100918,
"repo_name": "petrvanblokland/Xierpa3",
"id": "780f2ee5be3178f423b87955694d092a58deffc2",
"size": "6358",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xierpa3/adapters/blurbadapter.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "41394"
},
{
"name": "JavaScript",
"bytes": "1507"
},
{
"name": "Python",
"bytes": "1349828"
}
],
"symlink_target": ""
} |
from telemetry.page import page as page_module
from telemetry.page import page_set as page_set_module
class SkiaBuildbotDesktopPage(page_module.Page):
def __init__(self, url, page_set):
super(SkiaBuildbotDesktopPage, self).__init__(
url=url,
page_set=page_set,
credentials_path='data/credentials.json')
self.user_agent_type = 'desktop'
self.archive_data_file = 'data/skia_youtube_desktop.json'
def RunNavigateSteps(self, action_runner):
action_runner.NavigateToPage(self)
action_runner.Wait(25)
class SkiaYoutubeDesktopPageSet(page_set_module.PageSet):
""" Pages designed to represent the median, not highly optimized web """
def __init__(self):
super(SkiaYoutubeDesktopPageSet, self).__init__(
user_agent_type='desktop',
archive_data_file='data/skia_youtube_desktop.json')
urls_list = [
# Why: #3 (Alexa global)
'http://www.youtube.com/watch?v=PC57z-oDPLs',
]
for url in urls_list:
self.AddUserStory(SkiaBuildbotDesktopPage(url, self))
| {
"content_hash": "2a14aa5ea78fd4aafc5e80215c12e8ac",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 74,
"avg_line_length": 29.857142857142858,
"alnum_prop": 0.6899521531100479,
"repo_name": "scroggo/skia",
"id": "335dc68fa108c83ce67a3e678219e553812db3d7",
"size": "1239",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "tools/skp/page_sets/skia_youtube_desktop.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "10339"
},
{
"name": "C",
"bytes": "579203"
},
{
"name": "C++",
"bytes": "25772025"
},
{
"name": "CSS",
"bytes": "2042"
},
{
"name": "Go",
"bytes": "677"
},
{
"name": "HTML",
"bytes": "24562"
},
{
"name": "Java",
"bytes": "24340"
},
{
"name": "JavaScript",
"bytes": "7593"
},
{
"name": "Lua",
"bytes": "25531"
},
{
"name": "Makefile",
"bytes": "8694"
},
{
"name": "Objective-C",
"bytes": "22720"
},
{
"name": "Objective-C++",
"bytes": "107323"
},
{
"name": "Python",
"bytes": "321701"
},
{
"name": "Shell",
"bytes": "41399"
}
],
"symlink_target": ""
} |
from sensomatic.sources.utils import data_source
@data_source
class FileContent:
"""
Example of source. Provides file content for the file path specified in the constructor.
"""
provides = 'file_content'
def __init__(self, path):
self.path = path
def __next__(self):
return open(self.path).read().strip()
def __iter__(self):
return self
| {
"content_hash": "5ee08c275e04678c9b50e2d5f56c6520",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 92,
"avg_line_length": 21.88888888888889,
"alnum_prop": 0.6243654822335025,
"repo_name": "rrader/sens-o-matic",
"id": "71982913233673ffdd95ec0f4bcdf92eab60ee25",
"size": "394",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sensomatic/sources/file_content.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "896"
},
{
"name": "HTML",
"bytes": "1792"
},
{
"name": "JavaScript",
"bytes": "3946"
},
{
"name": "Python",
"bytes": "18984"
}
],
"symlink_target": ""
} |
import json
import subprocess
import re
import sys
def addToKnownHosts(hostname):
"""
Sets all hosts given in hostlist as hosts known to Ansible by editing the /etc/ansible/hosts
:param hostname: A hostname to add to the list of known hosts for ansible
:type hostname: str
"""
#run with json
subprocess.call('ansible-playbook /etc/insalata/template/ansible/host.yml --extra-vars "host={0}"'.format(hostname), shell=True)
#Run an arbitrary ansible adhoc command
def runAnsibleCommand(host, module):
addToKnownHosts(host)
process = subprocess.Popen(["ansible", host, "-m", module], stdout=subprocess.PIPE)
output = process.communicate()[0]
return parseAnsibleCommand(output.decode(sys.stdout.encoding))
#return the json from an ansible module output
def parseAnsibleCommand(out):
hosts = list()
out = out.replace("\n}\n", "}--")
out = out.replace("\n", "")
hostReturns = re.findall('.*?{.*?}--', out)
for host in hostReturns:
host = host.replace("}--", "}")
val = re.split(' \| | => ', host)
hosts.append({'host': val[0], 'status': val[1], 'json': json.loads(val[2])})
return hosts
def copyFile(user, filename):
subprocess.call(["ansible " + "user " + "-m copy " + "-a " + "src=" +filename + "dest=. mode=744"], shell=True) | {
"content_hash": "b32e7fb636f2998460efb9152ef7fb86",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 132,
"avg_line_length": 34.8421052631579,
"alnum_prop": 0.6518126888217523,
"repo_name": "tumi8/INSALATA",
"id": "1882a01c2f49b2c9a61a35ae02681e1f282a9431",
"size": "1324",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/insalata/helper/ansibleWrapper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "368324"
},
{
"name": "Shell",
"bytes": "718"
}
],
"symlink_target": ""
} |
import subprocess
from sys import platform
def ping(server, count=10, timeout_in_seconds=1):
''' ping a server on any platform
Returns:
1: server is alive (True)
0: server is not alive (False)
-1: error
'''
if platform.lower().startswith('win'):
return windows_ping(server, count=count, timeout_in_miliseconds=timeout_in_seconds * 1000)
else:
return linux_ping(server, count=count, timeout_in_seconds=timeout_in_seconds)
def windows_ping(server, count=1, timeout_in_miliseconds=1000):
''' ping a server on windows
Returns:
1: server is alive (True)
0: server is not alive (False)
-1: error
'''
try:
r=subprocess.Popen(['ping', '-n', str(count), '-w', str(timeout_in_miliseconds), server ], stdout=subprocess.PIPE).stdout.read()
# print(r)
return 0 if '100%' in str(r) else 1
except:
return -1
def linux_ping(server, count=1, timeout_in_seconds=1):
''' ping a server on linux
Returns:
1: server is alive (True)
0: server is not alive (False)
-1: error
'''
timeout_token = '-t' if platform.startswith('win') else '-w'
try:
r=subprocess.Popen(['ping', '-c', str(count), timeout_token, str(timeout_in_seconds), server ], stdout=subprocess.PIPE).stdout.read()
# print(r)
return 0 if '100%' in str(r) else 1
except:
return -1
if __name__ == "__main__":
msg= ''' ping a server on any platform
Returns:
1: server is alive (True)
0: server is not alive (False)
-1: error
'''
print(msg)
print('Ping response: {}'.format(ping('www.google.com', count=20, timeout_in_seconds=3)))
| {
"content_hash": "1923f1607dbfc904eb8e7f114c26601a",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 141,
"avg_line_length": 33.716981132075475,
"alnum_prop": 0.5769445998880806,
"repo_name": "coderholic/pyradio",
"id": "ef80e45b936067e56eefc1668a9cecc1476803c9",
"size": "1787",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyradio/ping.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "6852"
},
{
"name": "HTML",
"bytes": "73121"
},
{
"name": "Makefile",
"bytes": "895"
},
{
"name": "Python",
"bytes": "1278623"
},
{
"name": "Roff",
"bytes": "75253"
},
{
"name": "Shell",
"bytes": "22299"
}
],
"symlink_target": ""
} |
import paramiko
import getpass
def ssh_execute(hostname,username,password,cmds=[],port=22):
rt_list=[]
#创建远程ssh对象
ssh=paramiko.SSHClient()
#设定ssh连接方式
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
#连接服务器
ssh.connect(hostname,port,username,password)
for cmd in cmds:
stdin,stdout,stderr=ssh.exec_command(cmd)
rt_list.append([cmd,stdout.readlines(),stderr.readlines()])
ssh.close()
return rt_list
def upload(hostname,username,password,files=[],port=22):
t=paramiko.Transport((hostname,port))
t.connect(username,password)
sftp=paramiko.SFTPClient.from_transport(t)
for _local,_remote in files:
sftp.put(_local,_remote)
t.close()
if __name__=="__main__":
host='192.168.0.101'
username='root'
files=['log.txt','/tmp/zz']
#password=getpass.getpass('请输入密码:')
password='321100'
for cmd,stdout,stderr in ssh_execute(host,username,password,cmds=['pwd','id']):
print (cmd,stdout,stderr)
#upload(hostname=host,username=username,password=password,files=files,port=22)
| {
"content_hash": "a1d4e4622e5e748efea8fee59a6f3b2d",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 83,
"avg_line_length": 26.11904761904762,
"alnum_prop": 0.6709206927985415,
"repo_name": "51reboot/actual_09_homework",
"id": "27d299180b7f8ef44e72d2a7eedc8a7cc6a2be5d",
"size": "1155",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "11/zhouyang/ssh_remote.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4623850"
},
{
"name": "HTML",
"bytes": "90670692"
},
{
"name": "JavaScript",
"bytes": "31827839"
},
{
"name": "Nginx",
"bytes": "1073"
},
{
"name": "PHP",
"bytes": "349512"
},
{
"name": "Python",
"bytes": "1705997"
},
{
"name": "Shell",
"bytes": "10001"
},
{
"name": "Smarty",
"bytes": "342164"
}
],
"symlink_target": ""
} |
from optimus.utils import display_settings
class DummySettings(object):
"""
Empty settings object
"""
def __init__(self):
pass
def test_basic(caplog):
"""
Basic initialize without ressources to sync
"""
conf = DummySettings()
conf.STRING = "ok"
conf.INTEGER = 42
conf.SEQ = [1, "hello", 42]
conf.MAP = {"hello": "world"}
conf.LOOSE = "meh"
display_settings(conf, ["STRING", "INTEGER", "SEQ", "MAP", "NOPE"])
# Check base setting directories
assert caplog.record_tuples == [
("optimus", 10, " - Settings.STRING = ok"),
("optimus", 10, " - Settings.INTEGER = 42"),
("optimus", 10, " - Settings.SEQ = [1, 'hello', 42]"),
("optimus", 10, " - Settings.MAP = {'hello': 'world'}"),
("optimus", 10, " - Settings.NOPE = NOT SET"),
]
| {
"content_hash": "6bb9f8696694190ceb427c41dc798d29",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 71,
"avg_line_length": 24.941176470588236,
"alnum_prop": 0.5495283018867925,
"repo_name": "sveetch/Optimus",
"id": "c92fd7f7ddcd94ac83d53a72a22847708de6dd65",
"size": "848",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/0100_utils/05_display_settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "14380"
},
{
"name": "HTML",
"bytes": "16553"
},
{
"name": "JavaScript",
"bytes": "101904"
},
{
"name": "Makefile",
"bytes": "1564"
},
{
"name": "Python",
"bytes": "245913"
},
{
"name": "Ruby",
"bytes": "855"
},
{
"name": "Smarty",
"bytes": "8827"
}
],
"symlink_target": ""
} |
import unittest
import lighter.main as lighter
import lighter.secretary as secretary
import lighter.util as util
class SecretaryTest(unittest.TestCase):
def testAddMasterKey(self):
service = lighter.parse_service('src/resources/yaml/staging/myservice.yml')
self.assertEquals(service.config['env']['MASTER_PUBLIC_KEY'], 'pq01FdTbzF7q29HiX8f01oDfQyHgVFw03vEZes7OtnQ=')
def testAddDeployKey(self):
service = lighter.parse_service('src/resources/yaml/staging/myservice.yml')
self.assertIsNotNone(service.config['env']['DEPLOY_PUBLIC_KEY'])
self.assertIsNotNone(service.config['env']['DEPLOY_PRIVATE_KEY'])
def testRedeployWithoutChange(self):
service1 = lighter.parse_service('src/resources/yaml/staging/myservice-servicekey.yml')
service2 = lighter.parse_service('src/resources/yaml/staging/myservice-servicekey.yml')
self.assertNotEqual(service1.config, service2.config)
checksum1 = util.rget(service1.config, 'labels', 'com.meltwater.lighter.checksum')
self.assertIsNotNone(checksum1)
self.assertEqual(checksum1, util.rget(service2.config, 'labels', 'com.meltwater.lighter.checksum'))
self.assertNotEqual(service1.config['env']['DEPLOY_PUBLIC_KEY'], service2.config['env']['DEPLOY_PUBLIC_KEY'])
self.assertNotEqual(service1.config['env']['DEPLOY_PRIVATE_KEY'], service2.config['env']['DEPLOY_PRIVATE_KEY'])
def testServiceWithoutSecrets(self):
service = lighter.parse_service('src/resources/yaml/staging/myservice-nosecret.yml')
self.assertFalse('SECRETARY_URL' in service.config['env'])
self.assertFalse('MASTER_PRIVATE_KEY' in service.config['env'])
self.assertFalse('DEPLOY_PUBLIC_KEY' in service.config['env'])
self.assertFalse('DEPLOY_PRIVATE_KEY' in service.config['env'])
def testServiceWithEmbeddedSecret(self):
service = lighter.parse_service('src/resources/yaml/staging/myservice-embedded-encrypted-url.yml')
self.assertTrue('SECRETARY_URL' in service.config['env'])
self.assertTrue('DEPLOY_PUBLIC_KEY' in service.config['env'])
self.assertTrue('DEPLOY_PRIVATE_KEY' in service.config['env'])
def testExtractEnvelopes(self):
envelopes = secretary.extractEnvelopes("amqp://ENC[NACL,uSr123+/=]:ENC[NACL,pWd123+/=]@rabbit:5672/")
self.assertEqual(2, len(envelopes))
self.assertEqual(["ENC[NACL,uSr123+/=]", "ENC[NACL,pWd123+/=]"], envelopes)
envelopes = secretary.extractEnvelopes("amqp://ENC[NACL,uSr123+/=]:ENC[NACL,pWd123+/=]@rabbit:5672/ENC[KMS,123abc+/=]")
self.assertEqual(3, len(envelopes))
self.assertEqual(["ENC[NACL,uSr123+/=]", "ENC[NACL,pWd123+/=]", "ENC[KMS,123abc+/=]"], envelopes)
envelopes = secretary.extractEnvelopes("amqp://ENC[NACL,]:ENC[NACL,pWd123+/=]@rabbit:5672/")
self.assertEqual(1, len(envelopes))
self.assertEqual(["ENC[NACL,pWd123+/=]"], envelopes)
envelopes = secretary.extractEnvelopes("amqp://ENC[NACL,:ENC[NACL,pWd123+/=]@rabbit:5672/")
self.assertEqual(1, len(envelopes))
self.assertEqual(["ENC[NACL,pWd123+/=]"], envelopes)
envelopes = secretary.extractEnvelopes("amqp://NC[NACL,]:ENC[NACL,pWd123+/=]@rabbit:5672/")
self.assertEqual(1, len(envelopes))
self.assertEqual(["ENC[NACL,pWd123+/=]"], envelopes)
envelopes = secretary.extractEnvelopes("amqp://ENC[NACL,abc:ENC[NACL,pWd123+/=]@rabbit:5672/")
self.assertEqual(1, len(envelopes))
self.assertEqual(["ENC[NACL,pWd123+/=]"], envelopes)
def testServiceWithEnvvarDots(self):
try:
lighter.parse_service('src/resources/yaml/staging/myservice-encrypted-dots.yml')
except RuntimeError as e:
self.assertEquals(
"The env var 'database.uri' has an encrypted value but its name is not a valid shell script identifier and not supported by Secretary. " +
"Only alphanumeric characters and underscores are supported, starting with an alphabetic or underscore character." +
"Please check https://github.com/meltwater/lighter#secrets-management .", e.message)
else:
self.fail("Expected exception RuntimeError")
def testNonStringValue(self):
try:
secretary.extractEnvelopes({1: 2})
except ValueError as e:
self.assertEquals("Input must be str or unicode, was dict({1: 2})", str(e))
else:
self.fail("Expected exception ValueError")
| {
"content_hash": "865c0f624b108df5ed8961ddc7670edb",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 154,
"avg_line_length": 54.734939759036145,
"alnum_prop": 0.6823684789786485,
"repo_name": "meltwater/lighter",
"id": "a75c875b291d1c006ec4c83562ac9a4908218247",
"size": "4543",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/lighter/test/secretary_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "272"
},
{
"name": "Makefile",
"bytes": "247"
},
{
"name": "Python",
"bytes": "101880"
},
{
"name": "Shell",
"bytes": "3259"
}
],
"symlink_target": ""
} |
from msrest.serialization import Model
class OfferTermInfo(Model):
"""Describes the offer term.
:param effective_date: Indicates the date from which the offer term is
effective.
:type effective_date: datetime
:param name: Polymorphic Discriminator
:type name: str
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'effective_date': {'key': 'EffectiveDate', 'type': 'iso-8601'},
'name': {'key': 'Name', 'type': 'str'},
}
_subtype_map = {
'name': {'Monetary Credit': 'MonetaryCredit', 'Monetary Commitment': 'MonetaryCommitment', 'Recurring Charge': 'RecurringCharge'}
}
def __init__(self, effective_date=None):
self.effective_date = effective_date
self.name = None
| {
"content_hash": "10c8aa057d5d7785a587b6891678309b",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 137,
"avg_line_length": 27.413793103448278,
"alnum_prop": 0.6125786163522012,
"repo_name": "SUSE/azure-sdk-for-python",
"id": "58267f0882ed2ea6806a2d3f55515712d6483367",
"size": "1269",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "azure-mgmt-commerce/azure/mgmt/commerce/models/offer_term_info.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9090161"
}
],
"symlink_target": ""
} |
__all__ = ('MapManager')
from pocketthrone.tools.maploader import MapLoader
from pocketthrone.managers.pipe import L
from pocketthrone.managers.filemanager import FileManager
from pocketthrone.managers.eventmanager import EventManager
from pocketthrone.entities.event import *
from pocketthrone.entities.enum import TileLandscape, TileBiome, Compass
class MapManager:
_tag = "[MapManager] "
initialized = False
has_selected_tile = False
selected_tile = None
tilemap = None
enable_postloading = True
enable_land_elevation = True
enable_terrain_bridges = False
# actual scrolling of the map
viewport = []
tiles_in_viewport = {}
tiles_in_viewport_incomplete = False
last_scrolling = {"x": None, "y": None}
scrolling = {"x": 0, "y": 0}
# number of tiles that are fitting in the map size
grid_width = 0
grid_height = 0
def __init__(self, map_name=None):
# register in EventManager
EventManager.register(self)
# set self._map; abort when none
if map_name == None:
print(self._tag + "ABORT map name to load = None")
else:
self.load_map(map_name)
self.postload_map()
def initialize(self):
'''flag as initialized'''
self.initialized = True
def abort_when_uninitialized(self):
'''abort method when uninitialized'''
print(self._tag + "ABORT. ")
def load_map(self, map_name):
'''loads a map by its name'''
if self.initialized:
return
# get selected mod & map
selected_mod = L.ModManager.get_selected_mod()
tilemap = MapLoader(map_name=map_name).get_tilemap()
# set map in Locator and fire MapLoadedEvent
if tilemap:
print(self._tag + "SUCCESS MAP " + map_name + " loaded. Is now initialized.")
# TileMap postloading
self.tilemap = tilemap
L.TileMap = tilemap
# flag WidgetManager as initialized return TileMap
self.initialized = True
EventManager.fire(MapLoadedEvent(tilemap))
def postload_map(self):
'''tweak the TileMap after loading it with MapLoader (not neccessary)'''
# set terrain bridges in map
self.tilemap._initialize_neighbors()
continent_lds = ["G", "F", "M"]
bridge_directions = [Compass.DIRECTION_NORTH, Compass.DIRECTION_SOUTH]
# override image paths on bridge tiles
if self.enable_postloading:
elev_counter = 0
bridge_counter = 0
print(self._tag + "POSTLOADING is on")
for tile in self.tilemap.tiles:
# LAND ELEVATION
if self.enable_land_elevation:
water_tiles = ["W", "=", "H"]
counter = 0
if tile.get_landscape() == TileLandscape.LANDSCAPE_WATER:
print(self._tag + "self " + tile.get_landscape())
neighbor_north = tile.get_neighbor(Compass.DIRECTION_NORTH)
if neighbor_north and neighbor_north not in water_tiles:
print(self._tag + tile.get_neighbor(Compass.DIRECTION_NORTH))
tile.image_override = "tile_w_north_g"
elev_counter += 1
else:
print("water on water")
# TERRAIN BRIDGES
if self.enable_terrain_bridges:
if tile.get_landscape() == TileLandscape.LANDSCAPE_SNOW:
if tile.get_neighbor(Compass.DIRECTION_NORTH) != TileLandscape.LANDSCAPE_SNOW:
tile.image_override = "tile_s_north_g"
elif tile.get_neighbor(Compass.DIRECTION_SOUTH) != TileLandscape.LANDSCAPE_SNOW:
tile.image_override = "tile_s_south_g"
print(self._tag + str(elev_counter) + " water elevation tiles")
def get_tilemap(self):
'''returns the TileMap instance of this game'''
return self.tilemap
def get_scrolling(self):
'''returns the actual scrolling offset'''
return self.scrolling
def scroll(self, (rel_x, rel_y)):
'''scrolls by relative position'''
mapwidget = L.WidgetManager.get_widget("mapwidget")
mapwidget.scroll((rel_x, rel_y))
def scroll_at(self, (grid_x, grid_y)):
'''scrolls at given grid position'''
mapwidget = L.WidgetManager.get_widget("mapwidget")
mapwidget.scroll_at((grid_x, grid_y))
def select_tile_at(self, (pos_x, pos_y)):
'''set tile at given position tuple as selected'''
self.selected_tile = self.get_tile_at((pos_x, pos_y))
# return None when tile isn't in map
if self.selected_tile == None:
self.has_selected_tile = False
return None
# set has_selected_tile flag
self.has_selected_tile = True
# fire TileUnselectedEvent
EventManager.fire(TileUnselectedEvent())
# fire TileSelectedEvent
EventManager.fire(TileSelectedEvent(self.selected_tile, (pos_x, pos_y)))
# return selected tile
return self.selected_tile
def get_tile_at(self, (pos_x, pos_y)):
'''returns tile at given position tuple'''
return self.tilemap.get_tile_at((pos_x, pos_y))
def get_selected_tile(self):
'''returns selected tile'''
return self.selected_tile
def has_selected_tile(self):
'''returns whether a tile is selected'''
if self.selected_tile == None:
return False
return True
def unselect_tile(self):
'''unselects selected tile'''
self.has_selected_tile = False
self.selected = None
EventManager.fire(TileUnselectedEvent())
def on_event(self, event):
# map was scrolled after user input
if isinstance(event, MapScrolledEvent):
# update previous scrolling cache
prev_scrolling = {"x": int(event.prev_x), "y": int(event.prev_y)}
new_scrolling = {"x": int(event.new_x), "y": int(event.new_y)}
# update scrolling cache
self.prev_scrolling = prev_scrolling
self.scrolling = new_scrolling
| {
"content_hash": "32f2cd841e5759a3d36544b072e39d83",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 86,
"avg_line_length": 32.024096385542165,
"alnum_prop": 0.6980812641083521,
"repo_name": "herrschr/prey-game",
"id": "b4bf1339ca61d039037c787506fd86004616c463",
"size": "5316",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pocketthrone/managers/mapmanager.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "109218"
},
{
"name": "Shell",
"bytes": "22"
}
],
"symlink_target": ""
} |
"""Self-test suite for Cryptodome.PublicKey.RSA"""
__revision__ = "$Id$"
import os
import pickle
from pickle import PicklingError
from Cryptodome.Util.py3compat import *
import unittest
from Cryptodome.SelfTest.st_common import list_test_cases, a2b_hex, b2a_hex
class RSATest(unittest.TestCase):
# Test vectors from "RSA-OAEP and RSA-PSS test vectors (.zip file)"
# ftp://ftp.rsasecurity.com/pub/pkcs/pkcs-1/pkcs-1v2-1-vec.zip
# See RSADSI's PKCS#1 page at
# http://www.rsa.com/rsalabs/node.asp?id=2125
# from oaep-int.txt
# TODO: PyCryptodome treats the message as starting *after* the leading "00"
# TODO: That behaviour should probably be changed in the future.
plaintext = """
eb 7a 19 ac e9 e3 00 63 50 e3 29 50 4b 45 e2
ca 82 31 0b 26 dc d8 7d 5c 68 f1 ee a8 f5 52 67
c3 1b 2e 8b b4 25 1f 84 d7 e0 b2 c0 46 26 f5 af
f9 3e dc fb 25 c9 c2 b3 ff 8a e1 0e 83 9a 2d db
4c dc fe 4f f4 77 28 b4 a1 b7 c1 36 2b aa d2 9a
b4 8d 28 69 d5 02 41 21 43 58 11 59 1b e3 92 f9
82 fb 3e 87 d0 95 ae b4 04 48 db 97 2f 3a c1 4f
7b c2 75 19 52 81 ce 32 d2 f1 b7 6d 4d 35 3e 2d
"""
ciphertext = """
12 53 e0 4d c0 a5 39 7b b4 4a 7a b8 7e 9b f2 a0
39 a3 3d 1e 99 6f c8 2a 94 cc d3 00 74 c9 5d f7
63 72 20 17 06 9e 52 68 da 5d 1c 0b 4f 87 2c f6
53 c1 1d f8 23 14 a6 79 68 df ea e2 8d ef 04 bb
6d 84 b1 c3 1d 65 4a 19 70 e5 78 3b d6 eb 96 a0
24 c2 ca 2f 4a 90 fe 9f 2e f5 c9 c1 40 e5 bb 48
da 95 36 ad 87 00 c8 4f c9 13 0a de a7 4e 55 8d
51 a7 4d df 85 d8 b5 0d e9 68 38 d6 06 3e 09 55
"""
modulus = """
bb f8 2f 09 06 82 ce 9c 23 38 ac 2b 9d a8 71 f7
36 8d 07 ee d4 10 43 a4 40 d6 b6 f0 74 54 f5 1f
b8 df ba af 03 5c 02 ab 61 ea 48 ce eb 6f cd 48
76 ed 52 0d 60 e1 ec 46 19 71 9d 8a 5b 8b 80 7f
af b8 e0 a3 df c7 37 72 3e e6 b4 b7 d9 3a 25 84
ee 6a 64 9d 06 09 53 74 88 34 b2 45 45 98 39 4e
e0 aa b1 2d 7b 61 a5 1f 52 7a 9a 41 f6 c1 68 7f
e2 53 72 98 ca 2a 8f 59 46 f8 e5 fd 09 1d bd cb
"""
e = 0x11L # public exponent
prime_factor = """
c9 7f b1 f0 27 f4 53 f6 34 12 33 ea aa d1 d9 35
3f 6c 42 d0 88 66 b1 d0 5a 0f 20 35 02 8b 9d 86
98 40 b4 16 66 b4 2e 92 ea 0d a3 b4 32 04 b5 cf
ce 33 52 52 4d 04 16 a5 a4 41 e7 00 af 46 15 03
"""
def setUp(self):
global RSA, Random, bytes_to_long
from Cryptodome.PublicKey import RSA
from Cryptodome import Random
from Cryptodome.Util.number import bytes_to_long, inverse
self.n = bytes_to_long(a2b_hex(self.modulus))
self.p = bytes_to_long(a2b_hex(self.prime_factor))
# Compute q, d, and u from n, e, and p
self.q = self.n // self.p
self.d = inverse(self.e, (self.p-1)*(self.q-1))
self.u = inverse(self.p, self.q) # u = e**-1 (mod q)
self.rsa = RSA
def test_generate_1arg(self):
"""RSA (default implementation) generated key (1 argument)"""
rsaObj = self.rsa.generate(1024)
self._check_private_key(rsaObj)
self._exercise_primitive(rsaObj)
pub = rsaObj.publickey()
self._check_public_key(pub)
self._exercise_public_primitive(rsaObj)
def test_generate_2arg(self):
"""RSA (default implementation) generated key (2 arguments)"""
rsaObj = self.rsa.generate(1024, Random.new().read)
self._check_private_key(rsaObj)
self._exercise_primitive(rsaObj)
pub = rsaObj.publickey()
self._check_public_key(pub)
self._exercise_public_primitive(rsaObj)
def test_generate_3args(self):
rsaObj = self.rsa.generate(1024, Random.new().read,e=65537)
self._check_private_key(rsaObj)
self._exercise_primitive(rsaObj)
pub = rsaObj.publickey()
self._check_public_key(pub)
self._exercise_public_primitive(rsaObj)
self.assertEqual(65537,rsaObj.e)
def test_construct_2tuple(self):
"""RSA (default implementation) constructed key (2-tuple)"""
pub = self.rsa.construct((self.n, self.e))
self._check_public_key(pub)
self._check_encryption(pub)
def test_construct_3tuple(self):
"""RSA (default implementation) constructed key (3-tuple)"""
rsaObj = self.rsa.construct((self.n, self.e, self.d))
self._check_encryption(rsaObj)
self._check_decryption(rsaObj)
def test_construct_4tuple(self):
"""RSA (default implementation) constructed key (4-tuple)"""
rsaObj = self.rsa.construct((self.n, self.e, self.d, self.p))
self._check_encryption(rsaObj)
self._check_decryption(rsaObj)
def test_construct_5tuple(self):
"""RSA (default implementation) constructed key (5-tuple)"""
rsaObj = self.rsa.construct((self.n, self.e, self.d, self.p, self.q))
self._check_private_key(rsaObj)
self._check_encryption(rsaObj)
self._check_decryption(rsaObj)
def test_construct_6tuple(self):
"""RSA (default implementation) constructed key (6-tuple)"""
rsaObj = self.rsa.construct((self.n, self.e, self.d, self.p, self.q, self.u))
self._check_private_key(rsaObj)
self._check_encryption(rsaObj)
self._check_decryption(rsaObj)
def test_construct_bad_key2(self):
tup = (self.n, 1L)
self.assertRaises(ValueError, self.rsa.construct, tup)
# An even modulus is wrong
tup = (self.n+1, self.e)
self.assertRaises(ValueError, self.rsa.construct, tup)
def test_construct_bad_key3(self):
tup = (self.n, self.e, self.d+1)
self.assertRaises(ValueError, self.rsa.construct, tup)
def test_construct_bad_key5(self):
tup = (self.n, self.e, self.d, self.p, self.p)
self.assertRaises(ValueError, self.rsa.construct, tup)
tup = (self.p*self.p, self.e, self.p, self.p)
self.assertRaises(ValueError, self.rsa.construct, tup)
tup = (self.p*self.p, 3L, self.p, self.q)
self.assertRaises(ValueError, self.rsa.construct, tup)
def test_construct_bad_key6(self):
tup = (self.n, self.e, self.d, self.p, self.q, 10L)
self.assertRaises(ValueError, self.rsa.construct, tup)
from Cryptodome.Util.number import inverse
tup = (self.n, self.e, self.d, self.p, self.q, inverse(self.q, self.p))
self.assertRaises(ValueError, self.rsa.construct, tup)
def test_factoring(self):
rsaObj = self.rsa.construct([self.n, self.e, self.d])
self.failUnless(rsaObj.p==self.p or rsaObj.p==self.q)
self.failUnless(rsaObj.q==self.p or rsaObj.q==self.q)
self.failUnless(rsaObj.q*rsaObj.p == self.n)
self.assertRaises(ValueError, self.rsa.construct, [self.n, self.e, self.n-1])
def test_repr(self):
rsaObj = self.rsa.construct((self.n, self.e, self.d, self.p, self.q))
repr(rsaObj)
def test_serialization(self):
"""RSA keys are unpickable"""
rsa_key = self.rsa.generate(1024)
self.assertRaises(PicklingError, pickle.dumps, rsa_key)
def test_raw_rsa_boundary(self):
# The argument of every RSA raw operation (encrypt/decrypt) must be positive
# and no larger than the modulus
rsa_obj = self.rsa.generate(1024)
self.assertRaises(ValueError, rsa_obj._decrypt, rsa_obj.n)
self.assertRaises(ValueError, rsa_obj._encrypt, rsa_obj.n)
self.assertRaises(ValueError, rsa_obj._decrypt, 0)
self.assertRaises(ValueError, rsa_obj._encrypt, 0)
def test_size(self):
pub = self.rsa.construct((self.n, self.e))
self.assertEquals(pub.size_in_bits(), 1024)
self.assertEquals(pub.size_in_bytes(), 128)
def _check_private_key(self, rsaObj):
from Cryptodome.Math.Numbers import Integer
# Check capabilities
self.assertEqual(1, rsaObj.has_private())
# Sanity check key data
self.assertEqual(rsaObj.n, rsaObj.p * rsaObj.q) # n = pq
lcm = int(Integer(rsaObj.p-1).lcm(rsaObj.q-1))
self.assertEqual(1, rsaObj.d * rsaObj.e % lcm) # ed = 1 (mod LCM(p-1, q-1))
self.assertEqual(1, rsaObj.p * rsaObj.u % rsaObj.q) # pu = 1 (mod q)
self.assertEqual(1, rsaObj.p > 1) # p > 1
self.assertEqual(1, rsaObj.q > 1) # q > 1
self.assertEqual(1, rsaObj.e > 1) # e > 1
self.assertEqual(1, rsaObj.d > 1) # d > 1
def _check_public_key(self, rsaObj):
ciphertext = a2b_hex(self.ciphertext)
# Check capabilities
self.assertEqual(0, rsaObj.has_private())
# Check rsaObj.[ne] -> rsaObj.[ne] mapping
self.assertEqual(rsaObj.n, rsaObj.n)
self.assertEqual(rsaObj.e, rsaObj.e)
# Check that private parameters are all missing
self.assertEqual(0, hasattr(rsaObj, 'd'))
self.assertEqual(0, hasattr(rsaObj, 'p'))
self.assertEqual(0, hasattr(rsaObj, 'q'))
self.assertEqual(0, hasattr(rsaObj, 'u'))
# Sanity check key data
self.assertEqual(1, rsaObj.e > 1) # e > 1
# Public keys should not be able to sign or decrypt
self.assertRaises(TypeError, rsaObj._decrypt,
bytes_to_long(ciphertext))
# Check __eq__ and __ne__
self.assertEqual(rsaObj.publickey() == rsaObj.publickey(),True) # assert_
self.assertEqual(rsaObj.publickey() != rsaObj.publickey(),False) # failIf
def _exercise_primitive(self, rsaObj):
# Since we're using a randomly-generated key, we can't check the test
# vector, but we can make sure encryption and decryption are inverse
# operations.
ciphertext = bytes_to_long(a2b_hex(self.ciphertext))
# Test decryption
plaintext = rsaObj._decrypt(ciphertext)
# Test encryption (2 arguments)
new_ciphertext2 = rsaObj._encrypt(plaintext)
self.assertEqual(ciphertext, new_ciphertext2)
def _exercise_public_primitive(self, rsaObj):
plaintext = a2b_hex(self.plaintext)
# Test encryption (2 arguments)
new_ciphertext2 = rsaObj._encrypt(bytes_to_long(plaintext))
def _check_encryption(self, rsaObj):
plaintext = a2b_hex(self.plaintext)
ciphertext = a2b_hex(self.ciphertext)
# Test encryption
new_ciphertext2 = rsaObj._encrypt(bytes_to_long(plaintext))
self.assertEqual(bytes_to_long(ciphertext), new_ciphertext2)
def _check_decryption(self, rsaObj):
plaintext = bytes_to_long(a2b_hex(self.plaintext))
ciphertext = bytes_to_long(a2b_hex(self.ciphertext))
# Test plain decryption
new_plaintext = rsaObj._decrypt(ciphertext)
self.assertEqual(plaintext, new_plaintext)
def get_tests(config={}):
tests = []
tests += list_test_cases(RSATest)
return tests
if __name__ == '__main__':
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
# vim:set ts=4 sw=4 sts=4 expandtab:
| {
"content_hash": "1b0961654760b1aa4ec2340a8507abf2",
"timestamp": "",
"source": "github",
"line_count": 291,
"max_line_length": 85,
"avg_line_length": 38.13745704467354,
"alnum_prop": 0.6206523697963597,
"repo_name": "chronicwaffle/PokemonGo-DesktopMap",
"id": "8067a6647cc5c75f3eccccbc8f37208cee7f7df2",
"size": "12218",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "app/pylibs/osx64/Cryptodome/SelfTest/PublicKey/test_RSA.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "29260"
},
{
"name": "JavaScript",
"bytes": "52980"
},
{
"name": "Python",
"bytes": "11998498"
},
{
"name": "Shell",
"bytes": "4097"
}
],
"symlink_target": ""
} |
""" The abstract interface for all pyface dialogs. """
# Enthought library imports.
from enthought.traits.api import Bool, Enum, Int, Str, Unicode
# Local imports.
from constant import OK
from i_window import IWindow
class IDialog(IWindow):
""" The abstract interface for all pyface dialogs.
Usage: Sub-class this class and either override '_create_contents' or
more simply, just override the two methods that do the real work:-
1) '_create_dialog_area' creates the main content of the dialog.
2) '_create_buttons' creates the dialog buttons.
"""
#### 'IDialog' interface ##################################################
# The label for the 'cancel' button. The default is toolkit specific.
cancel_label = Unicode
# The context sensitive help Id (the 'Help' button is only shown iff this
# is set).
help_id = Str
# The label for the 'help' button. The default is toolkit specific.
help_label = Unicode
# The label for the 'ok' button. The default is toolkit specific.
ok_label = Unicode
# Is the dialog resizeable?
resizeable = Bool(True)
# The return code after the window is closed to indicate whether the dialog
# was closed via 'Ok' or 'Cancel').
return_code = Int(OK)
# The dialog style (is it modal or not).
# FIXME v3: It doesn't seem possible to use non-modal dialogs. (How do you
# get access to the buttons?)
style = Enum('modal', 'nonmodal')
###########################################################################
# 'IDialog' interface.
###########################################################################
def open(self):
""" Opens the dialog.
If the dialog is modal then the dialog's event loop is entered and the
dialog closed afterwards. The 'return_code' trait is updated according
to the button the user pressed and this value is also returned.
If the dialog is non-modal 'OK' is returned.
"""
###########################################################################
# Protected 'IDialog' interface.
###########################################################################
def _create_buttons(self, parent):
""" Create and return the buttons.
parent is the parent control.
"""
def _create_contents(self, parent):
""" Create the dialog contents.
parent is the parent control.
"""
def _create_dialog_area(self, parent):
""" Create and return the main content of the dialog.
parent is the parent control.
"""
def _show_modal(self):
""" Opens the dialog as a modal dialog and returns the return code. """
class MDialog(object):
""" The mixin class that contains common code for toolkit specific
implementations of the IDialog interface.
Implements: open()
Reimplements: _add_event_listeners(), _create()
"""
###########################################################################
# 'IDialog' interface.
###########################################################################
def open(self):
""" Opens the dialog. """
if self.control is None:
self._create()
if self.style == 'modal':
self.return_code = self._show_modal()
self.close()
else:
self.show(True)
self.return_code = OK
return self.return_code
###########################################################################
# Protected 'IWidget' interface.
###########################################################################
def _create(self):
""" Creates the window's widget hierarchy. """
super(MDialog, self)._create()
self._create_contents(self.control)
###########################################################################
# Protected 'IWindow' interface.
###########################################################################
def _add_event_listeners(self):
""" Adds any event listeners required by the window. """
# We don't bother for dialogs.
pass
#### EOF ######################################################################
| {
"content_hash": "553c52e0244abefdb93cba4d1320defb",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 79,
"avg_line_length": 31.08695652173913,
"alnum_prop": 0.49184149184149184,
"repo_name": "enthought/traitsgui",
"id": "78107bcfc7bd45227581de03ffa52dd053be6348",
"size": "4930",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "enthought/pyface/i_dialog.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1196658"
}
],
"symlink_target": ""
} |
"""Generic Node base class for all workers that run on hosts."""
import inspect
import os
import random
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_service import loopingcall
from oslo_service import service
from oslo_utils import importutils
import osprofiler.notifier
from osprofiler import profiler
import osprofiler.web
from cinder import context
from cinder import db
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.objects import base as objects_base
from cinder import rpc
from cinder import version
from cinder import wsgi
LOG = logging.getLogger(__name__)
service_opts = [
cfg.IntOpt('report_interval',
default=10,
help='Interval, in seconds, between nodes reporting state '
'to datastore'),
cfg.IntOpt('periodic_interval',
default=60,
help='Interval, in seconds, between running periodic tasks'),
cfg.IntOpt('periodic_fuzzy_delay',
default=60,
help='Range, in seconds, to randomly delay when starting the'
' periodic task scheduler to reduce stampeding.'
' (Disable by setting to 0)'),
cfg.StrOpt('osapi_volume_listen',
default="0.0.0.0",
help='IP address on which OpenStack Volume API listens'),
cfg.IntOpt('osapi_volume_listen_port',
default=8776,
min=1, max=65535,
help='Port on which OpenStack Volume API listens'),
cfg.IntOpt('osapi_volume_workers',
help='Number of workers for OpenStack Volume API service. '
'The default is equal to the number of CPUs available.'), ]
profiler_opts = [
cfg.BoolOpt("profiler_enabled", default=False,
help=_('If False fully disable profiling feature.')),
cfg.BoolOpt("trace_sqlalchemy", default=False,
help=_("If False doesn't trace SQL requests."))
]
CONF = cfg.CONF
CONF.register_opts(service_opts)
CONF.register_opts(profiler_opts, group="profiler")
def setup_profiler(binary, host):
if CONF.profiler.profiler_enabled:
_notifier = osprofiler.notifier.create(
"Messaging", messaging, context.get_admin_context().to_dict(),
rpc.TRANSPORT, "cinder", binary, host)
osprofiler.notifier.set(_notifier)
LOG.warning(
_LW("OSProfiler is enabled.\nIt means that person who knows "
"any of hmac_keys that are specified in "
"/etc/cinder/api-paste.ini can trace his requests. \n"
"In real life only operator can read this file so there "
"is no security issue. Note that even if person can "
"trigger profiler, only admin user can retrieve trace "
"information.\n"
"To disable OSprofiler set in cinder.conf:\n"
"[profiler]\nenabled=false"))
else:
osprofiler.web.disable()
class Service(service.Service):
"""Service object for binaries running on hosts.
A service takes a manager and enables rpc by listening to queues based
on topic. It also periodically runs tasks on the manager and reports
it state to the database services table.
"""
def __init__(self, host, binary, topic, manager, report_interval=None,
periodic_interval=None, periodic_fuzzy_delay=None,
service_name=None, *args, **kwargs):
super(Service, self).__init__()
if not rpc.initialized():
rpc.init(CONF)
self.host = host
self.binary = binary
self.topic = topic
self.manager_class_name = manager
manager_class = importutils.import_class(self.manager_class_name)
manager_class = profiler.trace_cls("rpc")(manager_class)
self.manager = manager_class(host=self.host,
service_name=service_name,
*args, **kwargs)
self.report_interval = report_interval
self.periodic_interval = periodic_interval
self.periodic_fuzzy_delay = periodic_fuzzy_delay
self.basic_config_check()
self.saved_args, self.saved_kwargs = args, kwargs
self.timers = []
setup_profiler(binary, host)
self.rpcserver = None
def start(self):
version_string = version.version_string()
LOG.info(_LI('Starting %(topic)s node (version %(version_string)s)'),
{'topic': self.topic, 'version_string': version_string})
self.model_disconnected = False
self.manager.init_host()
ctxt = context.get_admin_context()
try:
service_ref = db.service_get_by_args(ctxt,
self.host,
self.binary)
self.service_id = service_ref['id']
except exception.NotFound:
self._create_service_ref(ctxt)
LOG.debug("Creating RPC server for service %s", self.topic)
target = messaging.Target(topic=self.topic, server=self.host)
endpoints = [self.manager]
endpoints.extend(self.manager.additional_endpoints)
serializer = objects_base.CinderObjectSerializer()
self.rpcserver = rpc.get_server(target, endpoints, serializer)
self.rpcserver.start()
self.manager.init_host_with_rpc()
if self.report_interval:
pulse = loopingcall.FixedIntervalLoopingCall(
self.report_state)
pulse.start(interval=self.report_interval,
initial_delay=self.report_interval)
self.timers.append(pulse)
if self.periodic_interval:
if self.periodic_fuzzy_delay:
initial_delay = random.randint(0, self.periodic_fuzzy_delay)
else:
initial_delay = None
periodic = loopingcall.FixedIntervalLoopingCall(
self.periodic_tasks)
periodic.start(interval=self.periodic_interval,
initial_delay=initial_delay)
self.timers.append(periodic)
def basic_config_check(self):
"""Perform basic config checks before starting service."""
# Make sure report interval is less than service down time
if self.report_interval:
if CONF.service_down_time <= self.report_interval:
new_down_time = int(self.report_interval * 2.5)
LOG.warning(
_LW("Report interval must be less than service down "
"time. Current config service_down_time: "
"%(service_down_time)s, report_interval for this: "
"service is: %(report_interval)s. Setting global "
"service_down_time to: %(new_down_time)s"),
{'service_down_time': CONF.service_down_time,
'report_interval': self.report_interval,
'new_down_time': new_down_time})
CONF.set_override('service_down_time', new_down_time)
def _create_service_ref(self, context):
zone = CONF.storage_availability_zone
service_ref = db.service_create(context,
{'host': self.host,
'binary': self.binary,
'topic': self.topic,
'report_count': 0,
'availability_zone': zone})
self.service_id = service_ref['id']
def __getattr__(self, key):
manager = self.__dict__.get('manager', None)
return getattr(manager, key)
@classmethod
def create(cls, host=None, binary=None, topic=None, manager=None,
report_interval=None, periodic_interval=None,
periodic_fuzzy_delay=None, service_name=None):
"""Instantiates class and passes back application object.
:param host: defaults to CONF.host
:param binary: defaults to basename of executable
:param topic: defaults to bin_name - 'cinder-' part
:param manager: defaults to CONF.<topic>_manager
:param report_interval: defaults to CONF.report_interval
:param periodic_interval: defaults to CONF.periodic_interval
:param periodic_fuzzy_delay: defaults to CONF.periodic_fuzzy_delay
"""
if not host:
host = CONF.host
if not binary:
binary = os.path.basename(inspect.stack()[-1][1])
if not topic:
topic = binary
if not manager:
subtopic = topic.rpartition('cinder-')[2]
manager = CONF.get('%s_manager' % subtopic, None)
if report_interval is None:
report_interval = CONF.report_interval
if periodic_interval is None:
periodic_interval = CONF.periodic_interval
if periodic_fuzzy_delay is None:
periodic_fuzzy_delay = CONF.periodic_fuzzy_delay
service_obj = cls(host, binary, topic, manager,
report_interval=report_interval,
periodic_interval=periodic_interval,
periodic_fuzzy_delay=periodic_fuzzy_delay,
service_name=service_name)
return service_obj
def kill(self):
"""Destroy the service object in the datastore."""
self.stop()
try:
db.service_destroy(context.get_admin_context(), self.service_id)
except exception.NotFound:
LOG.warning(_LW('Service killed that has no database entry'))
def stop(self):
# Try to shut the connection down, but if we get any sort of
# errors, go ahead and ignore them.. as we're shutting down anyway
try:
self.rpcserver.stop()
except Exception:
pass
for x in self.timers:
try:
x.stop()
except Exception:
pass
self.timers = []
super(Service, self).stop()
def wait(self):
for x in self.timers:
try:
x.wait()
except Exception:
pass
if self.rpcserver:
self.rpcserver.wait()
def periodic_tasks(self, raise_on_error=False):
"""Tasks to be run at a periodic interval."""
ctxt = context.get_admin_context()
self.manager.periodic_tasks(ctxt, raise_on_error=raise_on_error)
def report_state(self):
"""Update the state of this service in the datastore."""
if not self.manager.is_working():
# NOTE(dulek): If manager reports a problem we're not sending
# heartbeats - to indicate that service is actually down.
LOG.error(_LE('Manager for service %(binary)s %(host)s is '
'reporting problems, not sending heartbeat. '
'Service will appear "down".'),
{'binary': self.binary,
'host': self.host})
return
ctxt = context.get_admin_context()
zone = CONF.storage_availability_zone
state_catalog = {}
try:
try:
service_ref = db.service_get(ctxt, self.service_id)
except exception.NotFound:
LOG.debug('The service database object disappeared, '
'recreating it.')
self._create_service_ref(ctxt)
service_ref = db.service_get(ctxt, self.service_id)
state_catalog['report_count'] = service_ref['report_count'] + 1
if zone != service_ref['availability_zone']:
state_catalog['availability_zone'] = zone
db.service_update(ctxt,
self.service_id, state_catalog)
# TODO(termie): make this pattern be more elegant.
if getattr(self, 'model_disconnected', False):
self.model_disconnected = False
LOG.error(_LE('Recovered model server connection!'))
except db_exc.DBConnectionError:
if not getattr(self, 'model_disconnected', False):
self.model_disconnected = True
LOG.exception(_LE('model server went away'))
# NOTE(jsbryant) Other DB errors can happen in HA configurations.
# such errors shouldn't kill this thread, so we handle them here.
except db_exc.DBError:
if not getattr(self, 'model_disconnected', False):
self.model_disconnected = True
LOG.exception(_LE('DBError encountered: '))
except Exception:
if not getattr(self, 'model_disconnected', False):
self.model_disconnected = True
LOG.exception(_LE('Exception encountered: '))
class WSGIService(service.ServiceBase):
"""Provides ability to launch API from a 'paste' configuration."""
def __init__(self, name, loader=None):
"""Initialize, but do not start the WSGI server.
:param name: The name of the WSGI server given to the loader.
:param loader: Loads the WSGI application using the given name.
:returns: None
"""
self.name = name
self.manager = self._get_manager()
self.loader = loader or wsgi.Loader()
self.app = self.loader.load_app(name)
self.host = getattr(CONF, '%s_listen' % name, "0.0.0.0")
self.port = getattr(CONF, '%s_listen_port' % name, 0)
self.workers = (getattr(CONF, '%s_workers' % name, None) or
processutils.get_worker_count())
if self.workers and self.workers < 1:
worker_name = '%s_workers' % name
msg = (_("%(worker_name)s value of %(workers)d is invalid, "
"must be greater than 0.") %
{'worker_name': worker_name,
'workers': self.workers})
raise exception.InvalidInput(msg)
setup_profiler(name, self.host)
self.server = wsgi.Server(name,
self.app,
host=self.host,
port=self.port)
def _get_manager(self):
"""Initialize a Manager object appropriate for this service.
Use the service name to look up a Manager subclass from the
configuration and initialize an instance. If no class name
is configured, just return None.
:returns: a Manager instance, or None.
"""
fl = '%s_manager' % self.name
if fl not in CONF:
return None
manager_class_name = CONF.get(fl, None)
if not manager_class_name:
return None
manager_class = importutils.import_class(manager_class_name)
return manager_class()
def start(self):
"""Start serving this service using loaded configuration.
Also, retrieve updated port number in case '0' was passed in, which
indicates a random port should be used.
:returns: None
"""
if self.manager:
self.manager.init_host()
self.server.start()
self.port = self.server.port
def stop(self):
"""Stop serving this API.
:returns: None
"""
self.server.stop()
def wait(self):
"""Wait for the service to stop serving this API.
:returns: None
"""
self.server.wait()
def reset(self):
"""Reset server greenpool size to default.
:returns: None
"""
self.server.reset()
def process_launcher():
return service.ProcessLauncher(CONF)
# NOTE(vish): the global launcher is to maintain the existing
# functionality of calling service.serve +
# service.wait
_launcher = None
def serve(server, workers=None):
global _launcher
if _launcher:
raise RuntimeError(_('serve() can only be called once'))
_launcher = service.launch(CONF, server, workers=workers)
def wait():
LOG.debug('Full set of CONF:')
for flag in CONF:
flag_get = CONF.get(flag, None)
# hide flag contents from log if contains a password
# should use secret flag when switch over to openstack-common
if ("_password" in flag or "_key" in flag or
(flag == "sql_connection" and
("mysql:" in flag_get or "postgresql:" in flag_get))):
LOG.debug('%s : FLAG SET ', flag)
else:
LOG.debug('%(flag)s : %(flag_get)s',
{'flag': flag, 'flag_get': flag_get})
try:
_launcher.wait()
except KeyboardInterrupt:
_launcher.stop()
rpc.cleanup()
class Launcher(object):
def __init__(self):
self.launch_service = serve
self.wait = wait
def get_launcher():
# Note(lpetrut): ProcessLauncher uses green pipes which fail on Windows
# due to missing support of non-blocking I/O pipes. For this reason, the
# service must be spawned differently on Windows, using the ServiceLauncher
# class instead.
if os.name == 'nt':
return Launcher()
else:
return process_launcher()
| {
"content_hash": "67670a3f3c81fc3883af080047293939",
"timestamp": "",
"source": "github",
"line_count": 474,
"max_line_length": 79,
"avg_line_length": 36.949367088607595,
"alnum_prop": 0.5768528034715085,
"repo_name": "tlakshman26/cinder-new-branch",
"id": "54eadb66ce4604cf717e74a71461ec4ed1de3c19",
"size": "18284",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "cinder/service.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "12371447"
},
{
"name": "Shell",
"bytes": "8172"
}
],
"symlink_target": ""
} |
from datetime import datetime
from django.http import HttpResponseRedirect, HttpResponse
from django.utils.translation import ugettext as _
from django.utils.log import getLogger
from django.contrib.auth.decorators import login_required, permission_required
from django.views.decorators.http import require_POST
from django.shortcuts import get_object_or_404
from django.views.generic import ListView, DetailView
from TeaScrum.product.models import Product
from TeaScrum.backlog.models import Backlog, Task
from TeaScrum.utils import render, get_active_product, error_response, get_velocity
from models import Sprint
from forms import SprintEditForm
logger = getLogger('TeaScrum')
class SprintListView(ListView):
context_object_name = "sprint_list"
template_name = "sprint/sprint_list.html"
def get_queryset(self):
""" Get a queryset of all sprints for a product.
Product ID can be specified with either url kwargs['pid'] or GET parameter ?pid=n.
If not given, current active product in session is used.
"""
pid = self.kwargs.get('pid', None)
if not pid:
pid = self.request.GET.get('pid', None)
if pid:
self.product = get_object_or_404(Product, pk=pid)
else:
self.product = get_active_product(self.request)
return self.product.sprint_set.get_query_set()
def get_context_data(self, **kwargs):
context = super(SprintListView, self).get_context_data(**kwargs)
context['product'] = self.product
return context
class SprintBacklogView(ListView):
context_object_name = "backlog"
template_name = "sprint/sprint_backlog.html"
def get_queryset(self):
#usr = self.request.user
self.sprint = get_object_or_404(Sprint, pk=self.kwargs['pk'])
return Backlog.objects.filter(sprint=self.sprint)
def get_context_data(self, **kwargs):
context = super(SprintBacklogView, self).get_context_data(**kwargs)
context['sprint'] = self.sprint
context['product'] = self.sprint.product
context['velocity'] = get_velocity(self.sprint.product)
return context
@permission_required('sprint.add_sprint')
def edit_sprint(request, sid=None, pid=None):
""" Add a new Sprint entity or edit an existing one.
"""
if pid:
product = get_object_or_404(Product, pk=pid)
else:
product = get_active_product(request)
if not product.owner_or_master(request.user):
return render(request, 'error', {'err':_('Permission denied')})
if sid:
sprint = get_object_or_404(Sprint, pk=sid)
if sprint.product != product:
logger.error('edit_sprint, sprint %s not belong to active product'%sid)
return render(request, 'error', {'err':_('Not active product')})
else:
sprint = None
if request.method == 'POST':
form = SprintEditForm(request.POST, instance=sprint)
if form.is_valid():
sp = form.save(commit=False)
if not hasattr(sp, 'product'):
setattr(sp, 'product', product)
if not hasattr(sp, 'master'):
setattr(sp, 'master', request.user)
sp.save()
form.save_m2m()
return HttpResponseRedirect('/sprint/%s' % sid or form.cleaned_data['pk'])
logger.debug('edit_sprint invalid form')
else:
form = SprintEditForm(instance=sprint)
params = {'form':form, 'sid':sid, 'product':product, 'sprint':sprint}
return render(request, 'sprint_edit', params, 'sprint/')
@permission_required('sprint.delete_sprint')
def remove_sprint(request, sid):
""" Remove Sprint entity by ID.
User permission: delete_sprint, and must also be scrum master for this sprint.
"""
if not sid:
return render(request, 'error', {'err':'No sid'})
product = get_active_product(request)
if not product.owner_or_master(request.user):
return render(request, 'error', {'err':_('Permission denied')})
try:
sp = get_object_or_404(Sprint, pk=sid)
if sp.product != product:
logger.error('remove_sprint, sprint %s not belong to active product'%sid)
return render(request, 'error', {'err':_('Not active product')})
if sp.master != request.user:
return render(request, 'error', {'err':_('Permission denied')})
sp.delete()
except Exception,e:
logger.error('sprint.views.remove_sprint(%s) error: %s' % (sid, e))
return HttpResponseRedirect('/sprint/')
@permission_required('sprint.change_sprint')
def select_backlog(request, sid):
""" Select a group of backlog items to fit in the sprint timebox based on the team velocity.
Velocity is passed as argument or fetched from the team or global settings.
"""
vs = request.GET.get('v', None)
if not vs:
velocity = get_velocity(get_active_product(request))
else:
velocity = float(vs)
est = 0.0
items = []
sp = get_object_or_404(Sprint, pk=sid)
# collect existing sprint backlog items
for itm in sp.backlog_set.get_query_set():
est += itm.estimate
if est > velocity:
# remove if overruns velocity
itm.sprint = None
itm.save()
else:
items.append(itm)
if est < velocity:
# add more if velocity allows
for itm in Backlog.objects.filter(sprint=None):
est += itm.estimate
if est > velocity:
break
items.append(itm)
itm.sprint = sp
itm.save()
data = {'backlog':items, 'sprint':sp, 'product':sp.product}
return render(request, 'sprint_backlog', data, 'sprint/')
@permission_required('sprint.change_sprint')
def include_backlog(request, sid, bid):
""" Include a backlog item into this sprint.
If this item is assigned to another sprint, returns an error.
User permission: change_sprint and must be scrum master of the sprint.
"""
try:
bitem = get_object_or_404(Backlog, pk=bid)
if bitem.sprint:
logger.error('include_backlog(sid=%s,bid=%s), item already assigned to %s'%bitem.sprint)
return error_response(_('This item is already assigned to another sprint'))
sprint = get_object_or_404(Sprint, pk=sid)
if sprint.master != request.user:
logger.error('include_backlog() user Not sprint master')
return error_response(_('Permission denied'))
bitem.sprint = sprint
bitem.save()
except Exception, e:
logger.error('include_backlog(sid=%s,bid=%s) failed:%s' % (sid,bid,e))
return error_response(_('Error saving backlog item in the sprint.'))
return HttpResponseRedirect('/sprint/%s/backlog'%sid)
@permission_required('sprint.change_sprint')
def exclude_backlog(request, sid, bid):
""" Remove a backlog item from this sprint.
User permission: change_sprint, and must be scrum master of the sprint.
"""
try:
# sprint = get_object_or_404(Sprint, pk=sid)
bitem = get_object_or_404(Backlog, pk=bid)
if str(bitem.sprint.pk) == sid and request.user == bitem.sprint.master:
bitem.sprint = None
bitem.save()
else:
logger.error('remove_backlog(sid=%s,bid=%s),Backlog.sprint.id=%s,master=%s,request.user=%s'%(sid,bid,bitem.sprint.pk,bitem.sprint.master,request.user))
return render(request, 'error', {'err':_('Permission denied')})
except Exception,e:
logger.error('sprint.remove_backlog: %s'%e)
return HttpResponseRedirect('/sprint/%s/backlog'%sid)
@login_required
def sprint_tasks(request, sid):
""" Show a list of all tasks grouped by backlog items for the sprint.
"""
sprint = get_object_or_404(Sprint, pk=sid)
tasks = Task.objects.filter(item__sprint=sprint)
return render(request, 'sprint_tasks', {'tasks':tasks,'sprint':sprint,'product':sprint.product}, 'sprint/')
def jsonstr(st):
""" Convert double quote marks into HTML encoding "
"""
return st.replace('"', '"')
@permission_required('sprint.change_sprint')
def submit_retro(request):
""" The scrum master submits the retrospectives for the sprint review.
request.path: /sprint/retro/submit
This is done through Ajax call and pass data in JSON.
User permission: change_sprint
"""
if request.method == 'POST':
good = request.POST.get('good','')
bad = request.POST.get('bad','')
advice = request.POST.get('next','')
sid = request.POST.get('sid')
try:
sp = Sprint.objects.get(pk=sid)
if sp.master != request.user:
return render(request, 'error', {'err':_('Permission denied')}, fmt='json')
sp.retro = '{"good":"%s","bad":"%s","next":"%s"}' % (jsonstr(good), jsonstr(bad), jsonstr(advice))
sp.save()
return HttpResponse('{"status":"OK"}')
except Exception,e:
logger.error('submit_retro failed: %s' % e)
return error_response(_('Error saving submits, retry later.'), fmt='json')
else:
return error_response(_('POST only'))
| {
"content_hash": "49fba4b21f43778cadf5d180a3015dbc",
"timestamp": "",
"source": "github",
"line_count": 223,
"max_line_length": 163,
"avg_line_length": 41.681614349775785,
"alnum_prop": 0.628294782140936,
"repo_name": "tedwen/tea-scrum",
"id": "051279ee11db622f9334288d3ab1e249e74323d5",
"size": "9320",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "TeaScrum/sprint/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "107669"
},
{
"name": "Python",
"bytes": "141487"
}
],
"symlink_target": ""
} |
import subprocess
import socket
import time
import inspect, os, sys
# From http://stackoverflow.com/questions/279237/python-import-a-module-from-a-folder
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile( inspect.currentframe() ))[0],"..")))
if cmd_subfolder not in sys.path:
sys.path.insert(0, cmd_subfolder)
import mosq_test
rc = 1
keepalive = 60
mid = 16
connect_packet = mosq_test.gen_connect("retain-qos0-test", keepalive=keepalive)
connack_packet = mosq_test.gen_connack(rc=0)
publish_packet = mosq_test.gen_publish("retain/qos0/test", qos=0, payload="retained message", retain=True)
subscribe_packet = mosq_test.gen_subscribe(mid, "retain/qos0/test", 0)
suback_packet = mosq_test.gen_suback(mid, 0)
broker = subprocess.Popen(['../../src/mosquitto', '-p', '1888'], stderr=subprocess.PIPE)
try:
time.sleep(0.5)
sock = mosq_test.do_client_connect(connect_packet, connack_packet)
sock.send(publish_packet)
sock.send(subscribe_packet)
if mosq_test.expect_packet(sock, "suback", suback_packet):
if mosq_test.expect_packet(sock, "publish", publish_packet):
rc = 0
sock.close()
finally:
broker.terminate()
broker.wait()
if rc:
(stdo, stde) = broker.communicate()
print(stde)
exit(rc)
| {
"content_hash": "f0ac429a153fa3e9be585f6972679b47",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 129,
"avg_line_length": 29.795454545454547,
"alnum_prop": 0.6979405034324943,
"repo_name": "thejinchao/moboair_mosquitto",
"id": "6b4823285428cda3d81749b5fcc6df0707f556b6",
"size": "1413",
"binary": false,
"copies": "18",
"ref": "refs/heads/master",
"path": "test/broker/04-retain-qos0.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "726027"
},
{
"name": "C++",
"bytes": "35238"
},
{
"name": "JavaScript",
"bytes": "8597"
},
{
"name": "Perl",
"bytes": "3271"
},
{
"name": "Python",
"bytes": "261812"
},
{
"name": "Shell",
"bytes": "3927"
},
{
"name": "XSLT",
"bytes": "1189"
}
],
"symlink_target": ""
} |
"""Runs a JAXline experiment to perform robust adversarial training."""
import functools
from absl import app
from absl import flags
from jaxline import platform
import tensorflow.compat.v2 as tf
from adversarial_robustness.jax import experiment
if __name__ == '__main__':
flags.mark_flag_as_required('config')
try:
tf.config.set_visible_devices([], 'GPU') # Prevent TF from using the GPU.
except tf.errors.NotFoundError:
pass
app.run(functools.partial(platform.main, experiment.Experiment))
| {
"content_hash": "307a35e33fb2ebb7240299242ad9ad31",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 78,
"avg_line_length": 28.5,
"alnum_prop": 0.746588693957115,
"repo_name": "deepmind/deepmind-research",
"id": "26e45d0c18c325b0de07b15ac7fa38076ef3d1b8",
"size": "1108",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "adversarial_robustness/jax/train.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1002"
},
{
"name": "C++",
"bytes": "5765"
},
{
"name": "Jupyter Notebook",
"bytes": "12330730"
},
{
"name": "Lua",
"bytes": "76186"
},
{
"name": "OpenEdge ABL",
"bytes": "15630"
},
{
"name": "PureBasic",
"bytes": "8"
},
{
"name": "Python",
"bytes": "3419119"
},
{
"name": "Racket",
"bytes": "226692"
},
{
"name": "Shell",
"bytes": "84450"
},
{
"name": "Starlark",
"bytes": "3463"
}
],
"symlink_target": ""
} |
__version__ = 'bokpipe_v1.0'
from .bokoscan import BokOverscanSubtract
from .badpixels import build_mask_from_flat
import bokutil
import bokproc
import bokmkimage
import bokphot
import bokastrom
import bokgnostic
import bokpl
| {
"content_hash": "f44bc6cc067449a2b212dd21af545a57",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 43,
"avg_line_length": 19,
"alnum_prop": 0.8201754385964912,
"repo_name": "legacysurvey/rapala",
"id": "7b1502a7180eae41d753ff92126b3b91cda1b782",
"size": "251",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bokpipe/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "61875"
},
{
"name": "Makefile",
"bytes": "7025"
},
{
"name": "Python",
"bytes": "448324"
},
{
"name": "Shell",
"bytes": "3337"
}
],
"symlink_target": ""
} |
import xmltool
from xmltool import *
import codetools
from codetools import *
import copy
'''
About this script:
This script was developed to be a magic bullet for taking LAPACK fortran
and LAPACKE C code and documentation and turning it into the
ChaLAPACK and LAPACK interface modules.
It is not intended to be 'general puropse' and may break with other (maybe newer)
versions of LAPACK.
The idea here was to adopt a static-pass pattern that would be applied to an XML tree.
This is the pattern to be adopted by all Pass classes
class GenericPass ( Pass ):
dependencies = [] # list of Pass inheriting classes that must
# be completed before this pass is run
complete = False # static variable signifying that the pass
# had been successfully completed
@staticmethod
def apply( xml_tree ):
selfname = GenericPass
Pass.resolve( selfname, xml_tree ) # Resolve all of this passes dependencies
# potentially resolving their dependencies
print "[",selfname,"]"
# Work to be done in this pass
selfname.complete = True # Signify that this pass was completed successfully.
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
Pass is the parent class of all pass classes, and contains the parsed
input.xml file (input_xml), where pass specific inputs are found
'''
# Relative paths to LAPACK and its subdirectories.
lapack_root = "../LAPACK"
lapack_src = lapack_root + "/SRC"
lapack_matgen_src = lapack_root + "/TESTING/MATGEN"
lapack_install_src = lapack_root + "/INSTALL"
lapacke_include = lapack_root + "/lapacke/include"
blas_src = lapack_root + "/BLAS/SRC"
# Parses the documentation out from the text, stripping the comment tokens
# Captures: [1,text] raw documentation text, stripped of comment tokens
f_comment_regex = re.compile( r"(?:^|\n)\*>?(?P<text>.*)" )
# Parses the source out from the text
# Captures: [1,text] raw source code text
f_source_regex = re.compile( r"(?:^|\n)(?!\*>?)(?P<text>.*)" )
# Parses function declaration from the c header file
# Captures: [1,returns] return type, [2,name] function name, [3,arguments] full unparsed param list
c_func_decl_regex = re.compile( r"(?P<returns>\w+)\s+(?P<name>\w+)\s*\(\s*(?P<arguments>(?:\w+\s+)?\w+(?:\s*\*\s*|\s+)\w+(?:\s*,\s*(?:\w+\s+)?\w+(?:\s*\*\s*|\s+)\w+)*)?\s*\)\s*;" )
# Parses arguments to a function
# Captures: [1,modifier]? const modifier, [2,type] type, [3,refdepth] string containing whitespace and/or astierisk(s), [3,name] param name
c_args_regex = re.compile( r"(?:(?P<modifier>const)\s+)?(?P<type>\w+)(?P<refdepth>(?:\s*\*+\s*)|\s+)(?P<name>\w+)" )
# Parsers function declaration and argument documentation from the fortran code (Does not yet work with source. requires $ delimiting as well)
# Captures: [1,type]? return type, [2,name] function name, [3,arguments] full unparsed argument names,
doc_func_regex = re.compile( r"(?:(?P<type>(?:[\w\*]+)(?: +\w+)*)\s+)?(?:(?:SUBROUTINE)|(?:FUNCTION))\s+(?P<name>\w+)\(\s*(?P<arguments>(?:\w+\s*(?:,\s*\w+\s*)*)?)\s*\)" )
# Parses the scalar arguments from the documentation (TODO determine if source works too.)
# Captures: [1,body] full unparsed text of scalar arguments documentation
doc_scalarargs_regex = re.compile( r"Scalar Arguments\s+\.\.\s+(?P<body>(?:.|\n)*?)\s+\.\." )
doc_scalarargs_decls_regex = re.compile( r"(?P<type>(?:[\w\*]+)(?: +\w+)*) +(?P<names>\w+(?:\s*,(?:\s*\$\s*)?\s*\w+)*)" )
# Parses the array arguments from the documentation (TODO determine if source works too.)
# Captures: [1,body] full unparsed text of array arguments documentation
doc_arrayargs_regex = re.compile( r"Array Arguments\s+\.\.\s+(?P<body>(?:.|\n)*?)\s+\.\." )
doc_arrayargs_decls_regex = re.compile( r"(?P<type>(?:[\w\*]+)(?: +\w+)*) +(?P<names>\w+(?:\([\s\S]*?\))?(?:\s*,(?:\s*\$\s*)?\s*\w+(?:\([\s\S]*?\))?)*)" )
doc_arrayargs_decls_names_dims_regex = re.compile( r"(?P<name>\w+)(?:\((?P<dimensions>.*?)\))?," )
doc_functionargs_regex = re.compile( r"Function Arguments\s+\.\.\s+(?P<body>(?:.|\n)*?)\s+\.\." )
doc_functionargs_decls_regex = doc_scalarargs_decls_regex
# Parses the argument information from the documentation
# Captures: [1,name] name, [2,intent] intent, [3,body] full unparsed body of argument document
doc_args_regex = re.compile( r"\s+\\param\[(?P<intent>\w+(?:,\w+)*)\]\s+(?P<name>\w+)\s+\\verbatim\s+(?P<body>(?:[\s\S])+?)\s*\\endverbatim" )
# Parses the typeinfo group of doc_args_regex
# Captures: [1,name] argument name, [2,type] type, [3,array]? captures array keyword if exists, [4,dimension] captures text describing dimensions
doc_args_typeinfo_regex = re.compile( r"(?P<name>\w+)\s+is\s+(?P<type>\w+)(?: (?P<array>array), dimension\s+(?P<dimensions>.*))?" )
# Parses the argument documentation and provides the matrix size of an array (if there is one)
# Captures: [1] 1st dimension, [2] 2nd dimension
doc_args_dimensions_regex = re.compile( r"(\w+)[- ]by[- ](\w+)(?: coefficient)? matrix" )
# Parses the human documentation of the fortran base of scalar ints to determine how (if at all) they relate to matrix arrays
# Captures: [1,what] the semantic information of relation (order, rows, columns, rank) [2,who] an unbroken sentence of names referring to matrices/arrays
scalar_matrix_relation_regex = re.compile( r"(?:number\s+of\s+)?(?P<what>\w+)\s+(?:(?:of)|(?:in))\s+(?:the\s+)?(?:input\s+)?(?:(?:matrix)|(?:matrices)|(?:submatrix))?(?:\s+)?(?P<who>(?:(?:(?:\w+\( \w+ \))|(?:\w+))\s*)+)" );
# Parses the function purpose documentation from the documentation
# Captures: [1,body] the human readable text documenting the purpose of the function
doc_purpose_regex = re.compile( r"\\par\s+Purpose:\s+=+\s+\\verbatim\s+(?P<body>(?:[\s\S]+?))\s+\\endverbatim" )
# Parses function names
# Captures: [1,type] literal type of matrix, [2,config] configuration type of matrix, [3,function] function group
#func_name_group_regex = re.compile( r"^(?P<type>[dszc]|(?:ds)|(?:zc))(?P<config>(?:bd)|(?:di)|(?:gb)|(?:ge)|(?:gg)|(?:gt)|(?:hb)|(?:he)|(?:hg)|(?:hp)|(?:hs)|(?:op)|(?:or)|(?:pb)|(?:po)|(?:pp)|(?:pt)|(?:sb)|(?:sp)|(?:st)|(?:sy)|(?:tb)|(?:tg)|(?:tp)|(?:tr)|(?:tz)|(?:un)|(?:up))(?P<function>.+)" )
func_name_group_regex = re.compile( r"^(?P<type>(?:(?:ds)|(?:zc)|[dszc]))(?P<config>\w\w)(?P<function>\w\w\w*)" )
'''
class ResolutionFailure ( Exception )
Purpose:
Exception for errors encountered during Pass.resolve( ) calls.
Member Functions:
__init__( self, value ):
constructor. Value is the Pass class who errored during resolution
__str__( self ):
returns string stating which Pass class had an error.
Member Variables:
value:
the Pass class that errored during resolution
'''
class ResolutionFailure ( Exception ):
def __init__(self, value):
self.value = value
def __str__(self):
return "Error applying " + repr(self.value) + " to tree"
'''
class GeneralPassFailure ( Exception )
Purpose:
Generic exception class that is thrown when passes encounter critical errors
Member Functions:
__init__( self, message ):
constructor. Message is the message from the Pass to the user
__str__( self ):
returns the message to from the Pass to the user
Member Variables:
message:
the message to from the Pass to the user
'''
class GeneralPassFailure ( Exception ):
def __init__(self, message):
self.message = message
def __str__(self):
return repr(self.message)
'''
class Pass
Purpose:
Parent of all other pass classes.
Container of input xml file.
Dependency resolver.
Member Functions:
resolve( staticclass, xml_tree ):
recursively resolves dependencies of pass staticclass onto xml_tree
apply( xml_tree ):
abstract static method(?).
raises NotImplementedError exception
Member Variables:
compete:
boolean has-complete-pass-somewhere-once.
if false, pass has never been performed or failed to perform
if true, pass has been performed once.
dependencies:
list of dependencies that must be completed before pass can be applied
input_xml:
the user input xml that give input from the user to the passes
especially for passes that could not do automated finding in the source text
'''
class Pass:
complete = False
dependencies = []
input_xml = loadxml( "./input.xml" )
@staticmethod
def resolve( staticclass, xml_tree ):
print "Resolving", staticclass, "Dependencies"
for dep in staticclass.dependencies:
if not dep.complete:
dep.apply( xml_tree )
if not dep.complete:
raise ResolutionFailure( dep )
print "Resolved", staticclass, "Dependencies"
@staticmethod
def apply( xml_tree ):
raise NotImplementedError
'''
class CreateTreePas ( Pass )
Purpose:
takes in xmlelement with a root node, creates <LAPACK> and <LAPACKE> root nodes
'''
class CreateTreePass ( Pass ):
dependencies = []
complete = False
@staticmethod
def apply( xml_tree ):
selfname = CreateTreePass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
SubElement( xml_tree, "LAPACK" )
SubElement( xml_tree, "LAPACKE" )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class DocumentSplitPass ( Pass )
Purpose:
Strip documentation (fortran comments) from code into seperate nodes
under common file node under LAPACK node.
'''
class DocumentSplitPass ( Pass ):
dependencies = [CreateTreePass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = DocumentSplitPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
text_node = SubElement( xml_tree.find( "./LAPACK" ), "text" )
src_files = []
source_dirs = [lapack_src, lapack_matgen_src, blas_src]
for dir in source_dirs:
for file in os.listdir( dir ):
if fnmatch.fnmatch( file, '*.f' ):
src_files.append( dir + "/" + file )
file_count = 1
for file in src_files:
sys.stdout.write("%s ( %d : %d ) \r" % (file, file_count, len(src_files) ) )
sys.stdout.flush()
file_node = SubElement( text_node, "file" )
file_node.set( "name", file )
src_node = SubElement( file_node, "source" )
doc_node = SubElement( file_node, "documentation" )
src_node.text = str()
doc_node.text = str()
file_read = open( file ).read()
for doc_match in f_comment_regex.finditer( file_read ):
doc_node.text += doc_match.group( "text" ) + "\n"
# Disabled the disabling# Disabled. Works. No use. Unnecessary load on tree.
for src_match in f_source_regex.finditer( file_read ):
src_node.text += src_match.group( "text" ) + "\n"
file_count += 1
sys.stdout.write(" \r")
sys.stdout.flush()
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class LAPACKFunctionDefinePass ( Pass )
Purpose:
find fortran functions in the documentation and put them in the
<procedures> node under <LAPACK>
'''
class LAPACKFunctionDefinePass ( Pass ):
dependencies = [DocumentSplitPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = LAPACKFunctionDefinePass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_node = xml_tree.find( "./LAPACK" )
text_node = lapack_node.find( "./text" )
procs_node = SubElement( lapack_node, "procedures" )
file_nodes = text_node.findall( "./file" )
file_count = 1
for file_node in file_nodes:
sys.stdout.write("%s ( %d : %d ) \r" % (file_node.get("name"), file_count, len(file_nodes) ) )
sys.stdout.flush()
file_doc = file_node.find( "./documentation" )
for proc_decl in doc_func_regex.finditer( file_doc.text ):
proc_node = SubElement( procs_node, "procedure" )
proc_node.set( "name", proc_decl.group( "name" ) )
proc_node.set( "file-name", file_node.get( "name" ) )
if proc_decl.group( "type" ) != None:
proc_node.set( "return-type", proc_decl.group( "type" ) )
#print "\t", proc_decl.group("name")
arguments = proc_decl.group( "arguments" ).split( "," );
if len( arguments ) >= 1 and arguments[0] != "":
args_node = SubElement( proc_node, "arguments-list" )
arg_counter = 0
for arg in arguments:
#print "\t\t",arg.strip()
arg_node = SubElement( args_node, "argument" )
arg_node.set( "name", arg.strip() )
arg_node.set( "position", str(arg_counter) )
arg_counter += 1
#SubElement( proc_node, "documentation" )
file_count += 1
sys.stdout.write(" \r")
sys.stdout.flush()
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class FuncPurposeDocPass ( Pass )
Purpose:
collect function purpose documentation from fortran text
'''
class FuncPurposeDocPass ( Pass ):
dependencies = [LAPACKFunctionDefinePass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = FuncPurposeDocPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_node = xml_tree.find( "./LAPACK" )
text_node = lapack_node.find( "./text" )
procs_node = lapack_node.find( "./procedures" )
for proc_node in procs_node.findall( "./procedure" ):
proc_file_name = proc_node.get( "file-name" )
doc_node = text_node.find( "./file/[@name='" + proc_file_name + "']/documentation" )
purpose_match = doc_purpose_regex.search( doc_node.text )
purpose = purpose_match.group( "body" ) if purpose_match != None else "Unspecified"
SubElement( proc_node, "purpose" ).text = purpose
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class FuncArgsDocPass ( Pass ):
Purpose:
collect argument documentation from fortran text
'''
class FuncArgsDocPass ( Pass ):
dependencies = [LAPACKFunctionDefinePass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = FuncArgsDocPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_node = xml_tree.find( "./LAPACK" )
text_node = lapack_node.find( "./text" )
procs_node = lapack_node.find( "./procedures" )
for proc_node in procs_node.findall( "./procedure" ):
proc_file_name = proc_node.get( "file-name" )
doc_node = text_node.find( "./file/[@name='" + proc_file_name + "']/documentation" )
for arg_match in doc_args_regex.finditer( doc_node.text ):
#print "\"",proc_file_name,"\"", arg_match.group()
arg_name = arg_match.group( "name" ).strip()
arg_node = proc_node.find( "./arguments-list/argument/[@name='" + arg_name + "']" )
arg_node.set( "intent", arg_match.group( "intent" ) )
dim_match = doc_args_dimensions_regex.search( arg_match.group( "body" ) )
if dim_match != None:
arg_node.set( "matrix-size", dim_match.group(1) +"," + dim_match.group(2) )
SubElement( arg_node, "documentation" ).text = arg_match.group( "body" )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class FuncArgsTypePass ( Pass )
NON FUNCTIONAL
Purpose:
collect argument names and types under the Scalar Arguments
and Array Arguments header and include in tree for semantic understanding
'''
class FuncArgsTypePass ( Pass ):
dependencies = [LAPACKFunctionDefinePass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = FuncArgsTypePass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_node = xml_tree.find( "./LAPACK" )
text_node = lapack_node.find( "./text" )
procs_node = lapack_node.find( "./procedures" )
for proc_node in procs_node.findall( "./procedure" ):
proc_file_name = proc_node.get( "file-name" )
doc_node = text_node.find( "./file/[@name='" + proc_file_name + "']/documentation" )
# attribute scalar arguments
scalars = doc_scalarargs_regex.search( doc_node.text )
if scalars != None:
for line in doc_scalarargs_decls_regex.finditer( scalars.group( "body" ) ):
names_list = re.sub( r"[\s$]", "", line.group("names") ).split( "," )
#print line.group( "type" ), ":", names_list
type = line.group( "type" )
#skip any "IMPLICIT" 'typed' arguments
if type.lower() == "implicit":
continue
for name in names_list:
arg_node = proc_node.find( "./arguments-list/argument/[@name='" + name + "']" )
if arg_node == None:
#print "Non-match: argument", name, "of", proc_node.get( "name" ), "in", proc_file_name
#prettyprintxml( proc_node.find("./arguments-list") )
continue
arg_node.set( "type", type )
arg_node.set( "semantic", "scalar" )
# attribute array arguments
arrays = doc_arrayargs_regex.search( doc_node.text )
if arrays != None:
for line in doc_arrayargs_decls_regex.finditer( arrays.group( "body" ) ):
name_list = re.sub( r"[\s$]", "", line.group("names") ) + ","
type = line.group( "type" )
for name_match in doc_arrayargs_decls_names_dims_regex.finditer( name_list ):
name = name_match.group( "name" )
arg_node = proc_node.find( "./arguments-list/argument/[@name='" + name + "']" )
if arg_node == None:
#print "Non-match: argument", name, "of", proc_node.get( "name" ), "in", proc_file_name
continue
dimensions = name_match.group( "dimensions") if name_match.group( "dimensions") != None else ""
arg_node.set( "type", type )
arg_node.set( "semantic", "array" )
arg_node.set( "dimensions", dimensions )
# attribute function arguments
functions = doc_functionargs_regex.search( doc_node.text )
if functions != None:
for line in doc_functionargs_decls_regex.finditer( functions.group( "body" ) ):
names_list = re.sub( r"[\s$]", "", line.group("names") ).split( "," )
#print line.group( "type" ), ":", names_list
type = line.group( "type" )
#skip any "IMPLICIT" 'typed' arguments
if type.lower() == "external":
continue
for name in names_list:
arg_node = proc_node.find( "./arguments-list/argument/[@name='" + name + "']" )
if arg_node == None:
#print "Non-match: argument", name, "of", proc_node.get( "name" ), "in", proc_file_name
#prettyprintxml( proc_node.find("./arguments-list") )
continue
arg_node.set( "type", type )
arg_node.set( "semantic", "function" )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class AssociateArgsToArrayPass ( Pass ):
Purpose:
Threshes out scalar-argument pairing for array concepts.
'''
class AssociateArgsToArrayPass ( Pass ):
dependencies = [FuncArgsTypePass, FuncArgsDocPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = AssociateArgsToArrayPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_node = xml_tree.find( "./LAPACK" )
text_node = lapack_node.find( "./text" )
procs_node = lapack_node.find( "./procedures" )
#proc_info = {} # {func_name} => { arg name } => [ what, what, who ]
for proc_node in procs_node.findall( "./procedure" ):
proc_name = proc_node.get( "name" )
'''
if not proc_name in proc_info:
proc_info[ proc_name ] = {}
'''
base_name = proc_name.lower()
match = func_name_group_regex.search( base_name );
if match == None:
#print proc_name, "(", base_name, ") does not match regex"
continue
func = match.group( "function" )
config = match.group( "config" )
type = match.group( "type" )
if not config.startswith( "ge" ):
pass
arg_names = [ arg.get("name") for arg in proc_node.findall( "./arguments-list/argument" ) ]
for arg_node in proc_node.findall( "./arguments-list/argument" ):
doc_node = arg_node.find( "documentation" )
if doc_node == None or arg_node.get("semantic") != "scalar" or arg_node.get("type").lower() != "integer":
continue
what = []
who = []
string = []
for m in scalar_matrix_relation_regex.finditer( doc_node.text ):
if not m.group( "what" ) in ["rows", "columns", "order", "rank"] :
continue
names = m.group( "who" ).strip()
names_list = []
if " and " in names:
names_list = [ name.strip() for name in names.split( "and" ) ]
else:
names_list = [ names ]
nameHasSpace = False
for name in names_list:
if " " in name:
nameHasSpace = True
break
if nameHasSpace:
#print names, " contains non names. Skipping."
continue
removes = []
for name in names_list:
if not name in arg_names:
removes.append( name )
for rm in removes:
names_list.remove( rm )
if len( names_list ) == 0:
#print "Names list had no argument names. Skipping"
continue
what.append( m.group( "what" ) )
who.append( names_list )
string.append( re.sub( "\s+", " ", m.group(0) ) )
if len( what ) == 0 and len( who ) == 0:
continue
#proc_info[ proc_name ][ arg_node.get( "name" ) ] = [ what, who, string]
associate_array = str()
associate_field = str()
first = True
for i in range( len( who ) ):
for array in who[i]:
associate_array += ( "," if not first else "" ) + array
associate_field += ( "," if not first else "" ) + what[i]
first = False
arg_node.set( "associate-array", associate_array )
arg_node.set( "associate-field", associate_field )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class BaseLAPACKPass ( Pass )
Purpose:
Tie together all passes over the LAPACK fortran source code and
resulting semantic analysis
'''
class BaseLAPACKPass ( Pass ):
dependencies = [FuncArgsTypePass, FuncArgsDocPass, AssociateArgsToArrayPass, FuncPurposeDocPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = BaseLAPACKPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class LAPACKEFunctionDefinePass ( Pass ):
Purpose:
from lapacke.h text define all C function decls
in under the <LAPACKE> tree.
'''
class LAPACKEFunctionDefinePass ( Pass ):
dependencies = [CreateTreePass] # TODO include BaseLAPACKPass when the two need to meet
complete = False
@staticmethod
def apply( xml_tree ):
selfname = LAPACKEFunctionDefinePass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapacke_root = xml_tree.find( "./LAPACKE" )
procs_node = SubElement( lapacke_root, "procedures" )
lapacke_header = open( lapacke_include + "/lapacke.h" ).read()
for func_decl in c_func_decl_regex.finditer( lapacke_header ):
#if func_decl.group( "name" ).lower().startswith( "lapacke_" ): continue
if procs_node.find( "./procedure/[@name='" + func_decl.group( "name" ) + "']" ) != None:
#print "proc", func_decl.group( "name" ), "redefined. Skipping"
continue
proc_node = SubElement( procs_node, "procedure" )
proc_node.set( "name", func_decl.group( "name" ) )
proc_node.set( "return-type", func_decl.group( "returns" ) )
args_node = SubElement( proc_node, "arguments-list" )
arg_count = 0
for arg in c_args_regex.finditer( func_decl.group( "arguments" ) ):
arg_node = SubElement( args_node, "argument" )
arg_node.set( "name", arg.group( "name" ) )
arg_node.set( "type", arg.group( "type" ) )
arg_node.set( "refdepth", str( arg.group( "refdepth" ).count("*") ) )
if arg.group( "modifier" ) != None:
arg_node.set( "modifier", arg.group( "modifier" ) )
arg_node.set( "position", str(arg_count) )
arg_count += 1
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class BaseLAPACKEPass ( Pass )
Purpose:
Ties together all passes over the lapacke.h text
and any basic analysis
'''
class BaseLAPACKEPass ( Pass ):
dependencies = [LAPACKEFunctionDefinePass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = BaseLAPACKEPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class CharacterArraySemanticsCorrectionPass ( Pass )
Purpose:
lapack fortran documentation defines character*1 (single characters) under
array semantics. This corrects that to be a scalar.
'''
class CharacterArraySemanticsCorrectionPass ( Pass ):
dependencies = [BaseLAPACKPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = CharacterArraySemanticsCorrectionPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_f_procs = xml_tree.find( "./LAPACK/procedures" )
for f_proc in lapack_f_procs.findall( "./procedure" ):
for f_arg in f_proc.findall( "./arguments-list/argument" ):
if f_arg.get( "type" ) == None:
continue
#if f_arg.get( "name" ) == "JOBA":
# print f_proc.get( "name" ), f_arg.get( "name" ), f_arg.get( "type" ).lower()
if f_arg.get( "type" ).lower() == "character*1":
# print f_proc.get( "name" ), f_arg.get( "name" ), f_arg.get( "type" ), f_arg.get( "semantic" ), f_arg.get( "intent" ), f_arg.get( "dimensions" )
if f_arg.get( "semantic" ) == "array":
f_arg.set( "semantic", "scalar" )
if f_arg.get( "dimensions" ) != None:
f_arg.unset( "dimensions" )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class ArgumentSemanticsBucketingPass ( Pass )
Purpose:
Huristically assign argument semantics by bucketing all arguments.
Any argument who only has a None bucket and one other bucket can
'safely' have those in the None assigned from as semantics of the other.
'''
class ArgumentSemanticsBucketingPass ( Pass ):
dependencies = [CharacterArraySemanticsCorrectionPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = BucketArgumentsSemanticsPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_f_procs = xml_tree.find( "./LAPACK/procedures" )
variables = {}
for proc in lapack_f_procs.findall( "./procedure" ):
proc_name = proc.get( "name" )
for arg in proc.findall( "./arguments-list/argument" ):
arg_name = arg.get( "name" )
semantic = arg.get( "semantic" ) if arg.get( "semantic" ) != None else "none"
if not arg_name in variables:
variables[ arg_name ] = {}
if not semantic in variables[ arg_name ]:
variables[ arg_name ][ semantic ] = []
variables[ arg_name ][ semantic ].append( proc_name )
for arg in variables:
if len( variables[ arg ] ) > 2:
print arg
for semantic in variables[ arg ]:
print " \"" + semantic + "\"", ":",variables[ arg ][ semantic ]
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class AssociateLAPACKtoLAPACKEPASS ( Pass )
Purpose:
link functions and args from both the C and Fortran world
together with paths from root.
'''
class AssociateFunctionsLAPACKtoLAPACKEPass ( Pass ):
dependencies = [BaseLAPACKEPass, BaseLAPACKPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = AssociateFunctionsLAPACKtoLAPACKEPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_c_root = xml_tree.find( "./LAPACKE" )
lapack_c_procs = lapack_c_root.find( "./procedures" )
lapack_f_root = xml_tree.find( "./LAPACK" )
lapack_f_procs = lapack_f_root.find( "./procedures" )
for lapack_c_proc in lapack_c_procs.findall( "./procedure" ):
proc_name = lapack_c_proc.get( "name" ).lower()
base_name = str()
if proc_name.startswith( "lapack_" ):
base_name = proc_name.replace( "lapack_", "" )
elif proc_name.startswith( "lapacke_" ):
base_name = proc_name.replace( "lapacke_", "" )
else:
print "Unknown root of name:", lapack_c_proc.get( "name" )
continue
base_name = base_name.replace( "_work", "" )
base_name = base_name.upper()
#print lapack_c_proc.get("name"), proc_name, base_name
lapack_f_proc = lapack_f_procs.find( "./procedure/[@name='" + base_name + "']" )
if lapack_f_proc == None:
#print "Could not find the fortran analogue of C function", lapack_c_proc.get( "name" ), "from base-name", base_name
continue
SubElement( SubElementUnique( lapack_c_proc, "analogues" ), "analogue" ).text = "./LAPACK/procedures/procedure/[@name='" + lapack_f_proc.get( "name" ) + "']"
SubElement( SubElementUnique( lapack_f_proc, "analogues" ), "analogue" ).text = "./LAPACKE/procedures/procedure/[@name='" + lapack_c_proc.get( "name" ) + "']"
'''
misses = []
for f_arg in lapack_f_proc.findall( "./arguments-list/argument" ):
f_arg_name = f_arg.get( "name" );
c_arg = lapack_c_proc.find( "./arguments-list/argument/[@name='" + f_arg_name.lower() + "']" )
# skip non-analogous args.
# TODO solve/mention matching failure somewhere? Maybe...
if c_arg == None:
#misses.append( f_arg_name )
continue
# ?_ana_node is the analogue record under ? language (ie c_ana_node notes the argument in the fortran tree, but lives in the C tree)
# Note that it is totally possible to create the path string from the two atributes of the tag.
# easier to create once here, instead of manytimes everywhere else.
c_ana_node = SubElement( SubElementUnique( c_arg, "analogues" ), "analogue" )
c_ana_node.text = "./LAPACK/procedures/procedure/[@name='" + lapack_f_proc.get( "name" ) + "']/arguments-list/argument/[@name='" + f_arg.get( "name" ) + "']"
c_ana_node.set( "function", lapack_f_proc.get( "name" ) )
c_ana_node.set( "name", f_arg.get( "name" ) )
f_ana_node = SubElement( SubElementUnique( f_arg, "analogues" ), "analogue" )
f_ana_node.text = "./LAPACKE/procedures/procedure/[@name='" + lapack_c_proc.get( "name" ) + "']/arguments-list/argument/[@name='" + c_arg.get( "name" ) + "']"
f_ana_node.set( "function", lapack_c_proc.get( "name" ) )
f_ana_node.set( "name", c_arg.get( "name" ) )
'''
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class DestroyUnassociatedCFunctionsTreePass ( Pass )
Purpose:
Remove procedures from LAPACKE subtree that do not have Fortran analogues
UNUSED
'''
class DestroyUnassociatedCFunctionsTreePass ( Pass ):
dependencies = [AssociateFunctionsLAPACKtoLAPACKEPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = DestroyUnassociatedCFunctionsTreePass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_f_root = xml_tree.find( "./LAPACKE" )
lapack_f_procs = lapack_f_root.find( "./procedures" )
for f_proc in lapack_f_procs.findall( "./procedure" ):
if f_proc.find( "./analogues" ) == None:
lapack_f_procs.remove( f_proc )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class DestroyUnassociatedFortranFunctionsTreePass ( Pass )
Purpose:
Remove procedures from LAPACK subtree that do not have C analogues
'''
class DestroyUnassociatedFortranFunctionsTreePass ( Pass ):
dependencies = [AssociateFunctionsLAPACKtoLAPACKEPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = DestroyUnassociatedFortranFunctionsTreePass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_f_root = xml_tree.find( "./LAPACK" )
lapack_f_procs = lapack_f_root.find( "./procedures" )
for f_proc in lapack_f_procs.findall( "./procedure" ):
if f_proc.find( "./analogues" ) == None:
lapack_f_procs.remove( f_proc )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class EasyAssociateArgsPass ( Pass )
Purpose:
Create association between C and Fortran analogue function arguments
when that association is easy (ie they have the same name)
'''
class EasyAssociateArgsPass ( Pass ):
dependencies = [DestroyUnassociatedFortranFunctionsTreePass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = EasyAssociateArgsPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
c_procs = xml_tree.find( "./LAPACKE/procedures" )
for c_proc in c_procs.findall( "./procedure" ):
proc_name = c_proc.get( "name" ).lower()
supposed_f_ana_node = c_proc.find( "./analogues/analogue" )
if supposed_f_ana_node == None:
#print "Proc", c_proc.get( "name" ), "has no Fortran analogues. Skipping"
continue
f_proc = xml_tree.find( supposed_f_ana_node.text )
if f_proc == None:
print "BAD! No analogue where analogue should exist"
return
#continue
for f_arg in f_proc.findall( "./arguments-list/argument" ):
f_arg_name = f_arg.get( "name" );
c_arg = c_proc.find( "./arguments-list/argument/[@name='" + f_arg_name.lower() + "']" )
# skip non-analogous args.
if c_arg == None:
continue
# ?_ana_node is the analogue record under ? language (ie c_ana_node notes the argument in the fortran tree, but lives in the C tree)
# Note that it is totally possible to create the path string from the two atributes of the tag.
# easier to create once here, instead of manytimes everywhere else.
c_ana_node = SubElement( SubElementUnique( c_arg, "analogues" ), "analogue" )
c_ana_node.text = "./LAPACK/procedures/procedure/[@name='" + f_proc.get( "name" ) + "']/arguments-list/argument/[@name='" + f_arg.get( "name" ) + "']"
c_ana_node.set( "function", f_proc.get( "name" ) )
c_ana_node.set( "name", f_arg.get( "name" ) )
f_ana_node = SubElement( SubElementUnique( f_arg, "analogues" ), "analogue" )
f_ana_node.text = "./LAPACKE/procedures/procedure/[@name='" + c_proc.get( "name" ) + "']/arguments-list/argument/[@name='" + c_arg.get( "name" ) + "']"
f_ana_node.set( "function", c_proc.get( "name" ) )
f_ana_node.set( "name", c_arg.get( "name" ) )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class ImportArgumentAnaloguesPass ( Pass )
Purpose:
Create argument associations of name per the input.xml
using the function association created automatically during runtime
'''
class ImportArgumentAssociationsPass ( Pass ):
dependencies = [EasyAssociateArgsPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = ImportArgumentAssociationsPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
c_procs = xml_tree.find( "./LAPACKE/procedures" )
pass_input = Pass.input_xml.find( "./pass/[@name='ImportArgumentAnaloguesPass']" )
for in_proc in pass_input.findall( "./procedure" ):
c_proc = c_procs.find( "./procedure/[@name='" + in_proc.get( "name" ) + "']" )
f_proc = xml_tree.find( c_proc.find( "./analogues/analogue" ).text )
for in_arg in in_proc.findall( "./argument" ):
c_arg = c_proc.find( "./arguments-list/argument/[@name='" + in_arg.get( "name" ) + "']" )
f_arg = f_proc.find( "./arguments-list/argument/[@name='" + in_arg.get( "substitution" ) + "']" )
#prettyprintxml( c_arg )
if c_arg == None or f_arg == None:
raise GeneralPassFailure( "Argument speficied in input not found in tree." + c_proc.get("name") +":"+ c_arg.get("name") )
c_ana_node = SubElement( SubElementUnique( c_arg, "analogues" ), "analogue" )
c_ana_node.text = "./LAPACK/procedures/procedure/[@name='" + f_proc.get( "name" ) + "']/arguments-list/argument/[@name='" + f_arg.get( "name" ) + "']"
c_ana_node.set( "function", f_proc.get( "name" ) )
c_ana_node.set( "name", f_arg.get( "name" ) )
f_ana_node = SubElement( SubElementUnique( f_arg, "analogues" ), "analogue" )
f_ana_node.text = "./LAPACKE/procedures/procedure/[@name='" + c_proc.get( "name" ) + "']/arguments-list/argument/[@name='" + c_arg.get( "name" ) + "']"
f_ana_node.set( "function", c_proc.get( "name" ) )
f_ana_node.set( "name", c_arg.get( "name" ) )
#prettyprintxml( c_proc )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class BaseAssociatePass ( Pass )
Purpose:
Ties together all association of analogues pass
'''
class BaseAssociatePass ( Pass ):
dependencies = [EasyAssociateArgsPass, ImportArgumentAssociationsPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = BaseAssociatePass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class FoldLAPACKtoLAPACKEPass ( Pass ):
Purpose:
take the semantics derived from FuncArgsTypePass and
FuncArgumentDocToSemanticsPass over the LAPACK information
and apply them to functions found in the lapacke.h code.
Especially important for the LAPACK_* C functions.
Also important for any LAPACKE_* C functions that take pointers
to scalars.
'''
class FoldLAPACKSemanticsIntentsToLAPACKEPass ( Pass ):
dependencies = [CharacterArraySemanticsCorrectionPass, BaseAssociatePass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = FoldLAPACKSemanticsIntentsToLAPACKEPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_c_root = xml_tree.find( "./LAPACKE" )
lapack_c_procs = lapack_c_root.find( "./procedures" )
for c_proc in lapack_c_procs.findall( "./procedure" ):
analogues = c_proc.findall( "./analogues/analogue" )
if len( analogues ) > 1:
#print "proc", c_proc.get( "name" ), "has", len( analogues ), "analogues. skipping"
continue
elif len( analogues ) == 0:
#print "skipping", c_proc.get( "name" )
continue
f_proc = xml_tree.find( analogues[0].text )
for c_arg in c_proc.findall( "./arguments-list/argument" ):
analogues = c_arg.findall( "./analogues/analogue" )
if len( analogues ) > 1:
#print "arg", c_arg.get( "name" ), "has", len( analogues ), "analogues. skipping"
#prettyprintxml( c_proc )
continue
elif len( analogues ) == 0:
continue
f_arg = xml_tree.find( analogues[0].text )
semantic = f_arg.get( "semantic" )
if semantic != None:
c_arg.set( "semantic", semantic )
if semantic == "array":
c_arg.set( "dimensions", f_arg.get( "dimensions" ) )
intent = f_arg.get( "intent" )
if intent != None:
c_arg.set( "intent", intent )
dimensions = f_arg.get( "dimensions" )
if dimensions != None:
c_arg.set( "dimensions", dimensions )
matrix_size = f_arg.get( "matrix-size" )
if matrix_size != None:
c_arg.set( "matrix-size", matrix_size )
associate_array = f_arg.get( "associate-array" )
if associate_array != None:
c_arg.set( "associate-array", associate_array )
associate_field = f_arg.get( "associate-field" )
if associate_field != None:
c_arg.set( "associate-field", associate_field )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class ImportLAPACKESemanticsIntentsPass ( Pass )
Purpose:
Import semantics and intents for LAPACKE arguments
that will may be unspecified after folding through
associations.
Will over-write semantics and intents issued by
FoldLAPACKSemanticsIntentsToLAPACKEPass
'''
class ImportLAPACKESemanticsIntentsPass ( Pass ):
dependencies = [FoldLAPACKSemanticsIntentsToLAPACKEPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = ImportLAPACKESemanticsIntentsPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
pass_input = Pass.input_xml.find( "./pass/[@name='ImportLAPACKESemanticsIntentsPass']" )
for assign in pass_input.findall( "./assign" ):
for arg in xml_tree.findall( assign.get( "path" ) ):
semantic = assign.get( "semantic" )
intent = assign.get( "intent" )
if semantic == None and intent == None:
raise GeneralPassFailure( "assignment contains no semantic or intent attributes" + assign.get( "path" ) )
if semantic != None:
arg.set( "semantic", semantic )
if intent != None:
arg.set( "intent", intent )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class TypeSubstitutionPass ( Pass )
Purpose:
Token replacement pass of type tokens.
find-replace pairs are directly taken from input_xml file,
not inferred, detected, found, what-have-you.
No defining of types, purely text replacement.
applied to argument types and return-types of functions.
if replacement occurs, creates original-type and original-return-type
attributes that take the original value of the type and return-type attributes
Developer Note:
May move placement of TypeSubstitutionPass
since it is more related to Chapelizing that semantic
transformations like folding.
'''
class TypeSubstitutionPass ( Pass ):
dependencies = [BaseLAPACKEPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = TypeSubstitutionPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
pass_input = Pass.input_xml.find( "./pass/[@name='TypeSubstitutionPass']" )
procs = xml_tree.find( "./LAPACKE/procedures" )
subs = {}
for sub in pass_input.findall( "./substitution" ):
subs[ sub.get( "find" ) ] = sub.get( "replace" )
for proc in procs.findall( "./procedure" ):
proc_type = proc.get( "return-type" )
if proc_type in subs:
proc.set( "original-return-type", proc_type )
proc.set( "return-type", subs[ proc_type ] )
for arg in proc.findall( "./arguments-list/argument" ):
arg_type = arg.get( "type" )
if arg_type in subs:
arg.set( "original-type", arg_type )
arg.set( "type", subs[ arg_type ] )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class ImportTypeArgumentPass ( Pass )
Purpose:
take arguments from the input file and retype
all arguments of the same name within the LAPACKE
tree to be of the type specified.
'''
class ImportArgumentTypePass ( Pass ):
dependencies = [TypeSubstitutionPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = ImportArgumentTypePass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
pass_input = Pass.input_xml.find( "./pass/[@name='ImportArgumentTypePass']" )
for proc in xml_tree.findall( "./LAPACKE/procedures/procedure" ):
for arg in proc.findall( "./arguments-list/argument" ):
find = pass_input.find( "./argument/[@name='" + arg.get("name") + "']" )
if find == None:
continue
arg.set("type", find.get("type" ) )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class BaseTransformLAPACKEPass ( Pass )
Purpose:
Ties together any transformation passes on the LAPACKE tree
that are unrelated to Chapelizing
Developer Note:
May move placement of TypeSubstitutionPass
since it is more related to Chapelizing that semantic
transformations like folding.
'''
class BaseTransformLAPACKEPass( Pass ):
dependencies = [BaseLAPACKEPass, TypeSubstitutionPass, FoldLAPACKSemanticsIntentsToLAPACKEPass, ImportLAPACKESemanticsIntentsPass, TypeSubstitutionPass, ImportArgumentTypePass ]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = BaseTransformLAPACKEPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class CreateChapelModuleTreePass ( Pass )
Purpose:
Create chapel-module root, procedures, type-defines, const-defines subtrees
general setup for Chapelization and code generation
'''
class CreateChapelModuleTreePass ( Pass ):
dependencies = [ CreateTreePass ]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = CreateChapelModuleTreePass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
chpl_module = SubElement( xml_tree, "chapel-module" )
procedures = SubElement( chpl_module, "procedures" )
types = SubElement( chpl_module, "type-defines" )
defines = SubElement( chpl_module, "const-defines" )
enums = SubElement( chpl_module, "enum-defines")
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class ChapelizeLAPACKE_FunctionsPass ( Pass )
Purpose:
take all LAPACKE_* functions defined in <LAPACKE> tree,
bust them apart to provide most information for later passes on
Chapelizing the LAPACKE_functions
'''
class ChapelizeLAPACKEFunctionsPass ( Pass ):
dependencies = [BaseTransformLAPACKEPass, CreateChapelModuleTreePass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = ChapelizeLAPACKEFunctionsPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapacke_root = xml_tree.find( "./LAPACKE" )
lapacke_procs = lapacke_root.find( "./procedures" )
module_root = xml_tree.find( "./chapel-module" )
module_procs = module_root.find( "./procedures" )
proc_count = 0
for proc in lapacke_procs.findall( "./procedure" ):
proc_name = proc.get( "name" )
module_proc = SubElement( module_procs, "procedure" )
module_proc.set("name", proc_name)
module_proc.set("return-type", proc.get( "return-type" ) )
module_proc_args = SubElement( module_proc, "arguments-list" )
for arg in proc.findall( "./arguments-list/argument" ):
#prettyprintxml( arg )
module_arg = SubElement( module_proc_args, "argument" )
module_arg.set( "name", arg.get("name") )
module_arg.set( "position", arg.get( "position" ) )
module_arg.set( "type", arg.get( "type" ) )
arg_semantic = arg.get( "semantic" ) if arg.get( "semantic" ) != None else ""
arg_intent = arg.get( "intent" ) if arg.get( "intent" ) != None else ""
arg_refs = int( arg.get( "refdepth" ) )
dimensions = arg.get( "dimensions" )
if dimensions != None:
module_arg.set( "dimensions", dimensions )
matrix_size = arg.get( "matrix-size" )
if matrix_size != None:
module_arg.set( "matrix-size", matrix_size )
associate_array = arg.get( "associate-array" )
if associate_array != None:
module_arg.set( "associate-array", associate_array )
associate_field = arg.get( "associate-field" )
if associate_field != None:
module_arg.set( "associate-field", associate_field )
intent = None #"BADSTATE " + arg_semantic + " " + arg_intent + " " + arg_refs
semantic = None #"BADSTATE " + arg_semantic + " " + arg_intent + " " + arg_refs
if arg_refs == 0:
if arg_semantic == "array":
raise GeneralPassFailure( "Attempted array semantic with 0 refdepth " + proc_name + " " +arg.get("name") )
semantic = "scalar"
intent = "none"
if arg_refs == 1:
if arg_semantic == "array":
semantic = "array"
intent = "none"
else:
semantic = "scalar"
intent = "ref"
module_arg.set( "intent", intent )
module_arg.set( "semantic", semantic )
#module_proc.set( "category", "direct" )
proc_count += 1
print "Chapelized", proc_count, "LAPACKE functions"
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class TranslateChapelKeywordsPass ( Pass ):
Purpose:
taking from the input xml file a list of chapel keywords
changes the text of argument names
'''
class TranslateChapelKeywordsPass ( Pass ):
dependencies = [ChapelizeLAPACKEFunctionsPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = TranslateChapelKeywordsPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
pass_input = Pass.input_xml.find( "./pass/[@name='TranslateChapelKeywordsPass']" )
global_info = pass_input.find( "./global" )
chpl_module_procs = xml_tree.find( "./chapel-module/procedures" )
global_pre = "" if global_info == None \
or global_info.get( "prefix" ) == None \
else global_info.get( "prefix" )
global_suf = "" if global_info == None \
or global_info.get( "suffix" ) == None \
else global_info.get( "suffix" )
keywords = {}
for keyword in pass_input.findall( "./keyword" ):
symbol = keyword.get( "symbol" )
replacement = "" if keyword.get( "replacement" ) == None \
else keyword.get( "replacement" )
if replacement == "" and global_pre == "" and global_suf == "":
raise GeneralPassFailure( "If no global prefix or suffix is defined, a replacement for a symbol must be defined. (" + symbol + ")" )
keywords[ symbol ] = replacement
for proc in chpl_module_procs.findall( "./procedure" ):
proc_name = proc.get( "name" )
#print proc_name
# Note: This will break includes if we go
# about replacing their names.
# arguments are fine because nobody cares about
# their names at the late stage of linking
'''
if proc_name in keywords:
if keywords[ proc_name ] == "":
proc_name = global_pre + proc_name + global_suf
else:
proc_name = keywords[ proc_name ]
proc.set( "name", proc_name )
'''
for arg in proc.findall( "./arguments-list/argument" ):
arg_name = arg.get( "name" )
#print "\t",arg_name
if arg_name in keywords:
if keywords[ arg_name ] == "":
arg_name = global_pre + arg_name + global_suf
else:
arg_name = keywords[ arg_name ]
#print "\t\t=>",arg_name
arg.set( "name", arg_name )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class ChapelerrificLAPACKEFunctionsPass ( Pass ):
Purpose:
Create Chapel-errific, LAPACKE Functions that take chapel arrays and abstract the
dimensions of the arrays and matrices that are stored within.
'''
class ChapelerrificLAPACKEFunctionsPass ( Pass ):
dependencies = [ChapelizeLAPACKEFunctionsPass, TranslateChapelKeywordsPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = ChapelerrificLAPACKEFunctionsPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
chapel_module = xml_tree.find( "./chapel-module" );
chapel_procedures = chapel_module.find( "./procedures" )
for proc in chapel_procedures.findall( "./procedure" ):
proc_name = proc.get( "name" )
if proc_name.startswith( "LAPACK_" ) or proc_name.endswith( "_work" ):
continue
base_name = proc_name.replace( "LAPACKE_", "" )
match = func_name_group_regex.search( base_name );
if match == None:
#print proc_name, "(", base_name, ") does not match regex"
continue
func = match.group( "function" )
config = match.group( "config" )
type = match.group( "type" )
if not config.startswith( "ge" ):
pass
#proc = copy.deepcopy( chpl_proc )
args_node = proc.find( "./arguments-list" )
args_list = [ ]
args_names = []
remove_list = set()
pass_through = {}
for arg in args_node.findall( "./argument" ):
args_list.append( arg )
args_names.append( arg.get("name") )
pass_through[ arg.get("name") ] = arg.get( "name" )
for arg in args_list:
if arg.get( "semantic" ) != "array" :
continue
if arg.get( "dimensions" ) != None:
dimensions = arg.get( "dimensions" ).lower().split(",")
for i in range( len(dimensions) ):
dimension = dimensions[i]
if dimension == "*":
continue
removeVar = None
for find in args_list:
if find.get( "name" ) == dimension:
removeVar = find
break
if removeVar != None:
remove_list.add( removeVar.get("name") )
pass_through[ dimension ] = "(" + arg.get("name") + ".domain.dim("+str(2-i)+").size) : c_int"
'''
else:
print ( dimension + " is not described in the arguments of "+proc.get( "name" ) + " for argument " + arg.get("name") )
'''
if arg.get( "matrix-size" ) != None:
matrix_size = arg.get( "matrix-size" ).lower()
rows = matrix_size.split(",")[0].strip()
cols = matrix_size.split(",")[1].strip()
removeRows = None
removeCols = None
for find in args_list:
if find.get( "name" ) == rows:
removeRows = find
if find.get( "name" ) == cols:
removeCols = find
if removeRows != None and removeCols != None:
pass_through[ rows ] = "(if matrix_order == lapack_memory_order.row_major then " + arg.get("name") + ".domain.dim(1).size else " + arg.get("name") + ".domain.dim(2).size) : c_int"
pass_through[ cols ] = "(if matrix_order == lapack_memory_order.row_major then " + arg.get("name") + ".domain.dim(2).size else " + arg.get("name") + ".domain.dim(1).size) : c_int"
remove_list.add( removeRows.get("name") )
remove_list.add( removeCols.get("name") )
'''
else:
print ( rows + " and " + cols + " are not described in the arguments of "+proc.get( "name" ) )
'''
for arg in args_list:
if arg.get( "semantic" ) != "scalar" :
continue
if arg.get( "type" ) == "c_char":
pass_through[ arg.get("name") ] = "ascii(" + arg.get( "name" ) + ") : c_char"
associate_array_str = arg.get( "associate-array" )
associate_field_str = arg.get( "associate-field" )
if associate_array_str != None:
array_field_map = {}
arrays = associate_array_str.split(",")
fields = associate_field_str.split(",")
array = ""
field = ""
for i in range( len( arrays ) ) :
arrays[i] = arrays[i].lower()
fields[i] = fields[i].lower()
array_field_map[ arrays[i] ] = fields[i]
for associate_array in arrays:
if associate_array in args_names:
array = associate_array
field = fields[ arrays.index( array ) ]
break;
if field == "rows":
pass_through[ arg.get("name") ] = "(if matrix_order == lapack_memory_order.row_major then " + array + ".domain.dim(1).size else " + array + ".domain.dim(2).size) : c_int"
elif field == "columns":
pass_through[ arg.get("name") ] = "(if matrix_order == lapack_memory_order.row_major then " + array + ".domain.dim(2).size else " + array + ".domain.dim(1).size) : c_int"
elif field == "order" or field == "rank":
pass_through[ arg.get("name") ] = "(" + array + ".domain.dim(1).size) : c_int"
else:
raise GeneralPassFailure( field + " is not a recognized array association field" )
remove_list.add( arg.get("name") )
pass_through_node = SubElement( proc, "pass-through-arguments-list" )
for arg in args_node.findall( "./argument" ):
passing = copy.deepcopy( arg )
passing.text = pass_through[ arg.get( "name" ) ]
pass_through_node.append( passing )
for arg in args_node:
if arg.get("name") in remove_list:
arg.set( "pass-up", "false" )
else:
arg.set( "pass-up", "true" )
'''
for arg in args_node:
if arg.get( "name" ) == "matrix_order":
arg.text = "LAPACK_ROW_MAJOR"
'''
#proc.set( "category", "chapelerrific" )
#proc.set( "call", proc.get("name") )
#proc.set( "name", proc.get("name").replace( "LAPACKE_", "" ) )
#chapel_procedures.append( proc )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class BaseChapelizePass ( Pass )
Purpose:
Tie together all chapelization passes.
After this point, no more transformations on the
code should occur.
'''
class BaseChapelizePass ( Pass ):
dependencies = [ CreateChapelModuleTreePass, ChapelizeLAPACKEFunctionsPass, TranslateChapelKeywordsPass, ChapelerrificLAPACKEFunctionsPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = BaseChapelizePass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class ImportTypeDefinesPass ( Pass )
Purpose:
from input xml sets up tags that will be used
to generate typedefs in the module
'''
class ImportTypeDefinesPass ( Pass ):
dependencies = [CreateChapelModuleTreePass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = ImportTypeDefinesPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
pass_input = Pass.input_xml.find( "./pass/[@name='ImportTypeDefinesPass']" )
module_types = xml_tree.find( "./chapel-module/type-defines" )
for define in pass_input.findall( "./define" ):
module_types.append( define )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class ImportExternConstDefinesPass ( Pass )
Purpose:
from input xml set up tags that will be used
to generate extern const definitions
'''
class ImportExternConstDefinesPass ( Pass ):
dependencies = [CreateChapelModuleTreePass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = ImportExternConstDefinesPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
pass_input = Pass.input_xml.find( "./pass/[@name='ImportExternConstDefinesPass']" )
module_defs = xml_tree.find( "./chapel-module/const-defines" )
for define in pass_input.findall( "./define" ):
module_defs.append( define )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class ImportEnumeratedTypeDefinesPass ( Pass )
Purpose:
from input xml set up tags that will be used
to generate local enum definitions
'''
class ImportEnumeratedTypeDefinesPass ( Pass ):
dependencies = [CreateChapelModuleTreePass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = ImportEnumeratedTypeDefinesPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
pass_input = Pass.input_xml.find( "./pass/[@name='ImportEnumeratedTypeDefinesPass']" )
module_defs = xml_tree.find( "./chapel-module/enum-defines" )
for enumeration in pass_input.findall( "./enumeration" ):
module_defs.append( enumeration )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class BaseImportPass ( Pass )
Purpose:
Ties together all passes that import from input xml
'''
class BaseImportPass ( Pass ):
dependencies = [ImportTypeDefinesPass, ImportExternConstDefinesPass, ImportEnumeratedTypeDefinesPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = BaseImportPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class BaseCodegenReadyPass ( Pass )
Purpose:
Ties together all passes that must be completed before all codegen could be done
'''
class BaseCodegenReadyPass ( Pass ):
dependencies = [BaseImportPass, BaseChapelizePass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = BaseCodegenReadyPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class ChapelModuleExternProcPass ( Pass )
Purpose:
generate chapel code at each procedure in the <chapel-module>
from the details of each procedure.
these are raw, basic extern procs of these functions.
'''
class ChapelModuleExternProcPass ( Pass ):
dependencies = [BaseChapelizePass, BaseImportPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = ChapelModuleExternProcPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapacke_root = xml_tree.find( "./LAPACKE" )
lapacke_procs = lapacke_root.find( "./procedures" )
module_root = xml_tree.find( "./chapel-module" )
module_procs = module_root.find( "./procedures" )
proc_count = 0;
for proc in module_procs.findall( "./procedure" ):
proc_name = proc.get( "name" )
basename = proc_name.replace( "LAPACK_", "" ).replace( "LAPACKE_", "" ).upper()
lapack_node = xml_tree.find( "./LAPACK/procedures/procedure/[@name='" + basename + "']" )
purpose = "" #"For more information, see the documentation for :proc:`" + proc_name + "`, or consult the Netlibs or Intel documentation.\n"
''' #TODO get legal approval for Documentation inclusion.
if lapack_node == None or lapack_node.find( "./purpose" ) == None or lapack_node.find( "./purpose" ).text == None:
purpose = ""
else:
purpose = re.sub( r"[ \t]+", " ", lapack_node.find( "./purpose" ).text )
'''
proc_args = proc.findall( "./arguments-list/argument" )
ordered_args = [None] * len( proc_args )
for arg in proc_args:
ordered_args[ int( arg.get( "position" ) ) ] = arg;
def_code = SegmentProducer( "extern proc " + proc_name )
args_code = ListProducer( ", ", "(", ")" )
for arg in ordered_args:
args_code.append( SegmentProducer(
("" if arg.get("intent") == "none" else arg.get("intent") + " ") + \
arg.get("name") + " : " + \
("[] " if arg.get("semantic") == "array" else "") + \
arg.get("type")
)
)
return_code = LineProducer( " : " + proc.get( "return-type" ) + ";" )
#doc_comment = CommentProducer( "\nExternal Procedure to " + proc_name + "\n" + ("\nOriginal Fortran LAPACK documentation for " + basename + "::\n\n " + purpose + "\n\n" if purpose != "" else "") )
#doc_comment = CommentProducer( "\nExternal Procedure to " + proc_name + "\n" + purpose + "\n" )
code = SequenceOfProducers()
#code.append( doc_comment )
code.append( def_code )
code.append( args_code )
code.append( return_code )
code_node = SubElement( proc, "code" )
code_node.set( "category", "extern proc" )
code_node.text = code.generate()
proc_count += 1
print "Generated code for", proc_count, "functions"
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class ChapelModuleStringToCharWraperProcPass ( Pass )
Purpose:
Create string wrappers to all of the generate external procs from
ChapelModuleExternProcPass
'''
class ChapelModuleStringToCharWraperProcPass ( Pass ):
dependencies = [ChapelModuleExternProcPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = ChapelModuleStringToCharWraperProcPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
module_root = xml_tree.find( "./chapel-module" )
module_procs = module_root.find( "./procedures" )
proc_count = 0
for proc in module_procs.findall( "./procedure" ):
proc_name = proc.get( "name" )
if proc_name.startswith( "LAPACK_" ):
continue
proc_args = proc.findall( "./arguments-list/argument" )
ordered_args = [None] * len( proc_args )
char_flag = False
for arg in proc_args:
ordered_args[ int( arg.get( "position" ) ) ] = arg;
char_flag = arg.get( "type" ) == "c_char" or char_flag
# skip procedures that dont have char arguments
if not char_flag:
continue
code = SequenceOfProducers()
code.append( SegmentProducer( "inline proc " + proc_name ) )
args_code = ListProducer( ", ", "(", ")" )
for arg in ordered_args:
args_code.append( SegmentProducer(
("" if arg.get("intent") == "none" else arg.get("intent") + " ") + \
arg.get("name") + " : " + \
("[] " if arg.get("semantic") == "array" else "") + \
( arg.get("type") if arg.get("type") != "c_char" else "string" )
)
)
code.append( args_code )
code.append( SegmentProducer( " : " + proc.get( "return-type" ) ) )
func_body = ScopeProducer()
call_args_producer = ListProducer( ", ", "(", ")" )
for pass_arg in ordered_args:
call_args_producer.append( SegmentProducer( ( pass_arg.get("name" ) if pass_arg.get("type") != "c_char" else "ascii(" + pass_arg.get( "name" ) + ") : c_char" ) ) )
func_body.append( SegmentProducer( ( "return " if proc.get("return-type") != "void" else "" ) + proc.get("name") ) + call_args_producer + LineProducer( ";" ) )
code.append( func_body )
#code.prepend( CommentProducer( "\nString wrapped procedure of " + proc_name + "\n" ) )
code_node = SubElement( proc, "code" )
code_node.set( "category", "string wrapped" )
code_node.text = code.generate()
proc_count += 1
print "Generated code for", proc_count, "functions"
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class ChapelModuleChapelerrificProcPass ( Pass )
Purpose:
Generate code for Chapel-errific upward facing procedures
'''
class ChapelModuleChapelerrificProcPass ( Pass ):
dependencies = [ChapelModuleExternProcPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = ChapelModuleChapelerrificProcPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
module_root = xml_tree.find( "./chapel-module" )
module_procs = module_root.find( "./procedures" )
pass_info = Pass.input_xml.find( "./pass/[@name='ChapelModuleChapelerrificProcPass']" )
helper_use = pass_info.find("./use").text
proc_count = 0
no_repeat = set()
iterative_functions = set()
for case in pass_info.findall("./cases/case" ):
iterative_functions.add( case.get("name") )
for proc in module_procs.findall( "./procedure" ):
if proc.find( "./pass-through-arguments-list" ) == None:
continue
base_name = proc.get("name").replace( "LAPACKE_", "" )
match = func_name_group_regex.search( base_name );
if match == None:
#print proc.get("name"), "(", base_name, ") does not match regex"
continue
func = match.group( "function" )
config = match.group( "config" )
type = match.group( "type" )
typeToTypeString = { "s" : "real(32)",
"d" : "real(64)",
"c" : "complex(64)",
"z" : "complex(128)",
"ds" : "real(64)",
"zc" : "complex(128)"
}
typeMap = {
"c_float" : "real(32)",
"c_double" : "real(64)",
"c_char" : "string"
}
if (type == "ds" or type == "zc") and not config+func in iterative_functions:
temp_type = type[0]
temp_config = type[1]+config[0]
temp_func = config[1] + func
type = temp_type
config = temp_config
func = temp_func
for name_category in [ (config+func, "untyped chapelerrific") ]: # (type+config+func, "chapelerrific")
[proc_name, category_name] = name_category
code = SequenceOfProducers()
purpose = ""
lapack_node = xml_tree.find( "./LAPACK/procedures/procedure/[@name='" + base_name.upper() + "']" )
purpose = "" #"For more information, see the documentation for :proc:`" + proc_name + "`, or consult the Netlibs or Intel documentation.\n"
''' #TODO get legal approval for Documentation inclusion.
if proc_name in no_repeat:
purpose = "For more information, see the documentation for :proc:`" + proc_name + "`, or consult the Netlibs or Intel documentation.\n"
elif lapack_node == None or lapack_node.find( "./purpose" ) == None or lapack_node.find( "./purpose" ).text == None:
prupose = ""
else:
purpose = ("Original Fortran LAPACK purpose documentation for " + base_name.upper() + "::\n\n " + re.sub( r"[ \t]+", " ", lapack_node.find( "./purpose" ).text ) + "\n\n" )
'''
proc_args = proc.findall( "./arguments-list/argument" )
ordered_args = [None] * len( proc_args )
for arg in proc_args:
ordered_args[ int( arg.get( "position" ) ) ] = arg;
code.append( SegmentProducer( "inline proc " + proc_name ) )
args_doc = str()
args_producer = ListProducer(", ", "(", ")")
for arg in ordered_args:
if arg.get("pass-up") == "true":
args_producer.append( SegmentProducer(
("" if arg.get("intent") == "none" else arg.get("intent") + " ") + \
arg.get("name") + " : " + \
("[] " if arg.get("semantic") == "array" else "") + \
( arg.get("type") if not arg.get("type") in typeMap else typeMap[ arg.get("type") ] ) + \
( " = " + arg.text if arg.text != None and arg.text.strip() != "" else "" )
)
)
if lapack_node == None or arg.get("name") == "matrix_order":
continue
#print "./arguments-list/argument/[@name='" + arg.get("name").upper() + "']"
lapack_arg_node = lapack_node.find( "./arguments-list/argument/[@name='" + arg.get("name").upper() + "']" )
if lapack_arg_node == None:
continue
#prettyprintxml( lapack_arg_node )
''' #TODO get legal approval for Documentation inclusion.
if (not proc_name in no_repeat) and lapack_arg_node.find( "./documentation" ) != None:
#arg_doc = " " + arg.get(arg.get("name").upper() + " : " + arg.get("type") + ( "" if arg.get("intent") == "none" else arg.get("intent").strip() ) + "\n"
text = re.sub( r"\n", "\n ", re.sub( r"[ \t]+", " ", lapack_node.find( "./arguments-list/argument/[@name='" + arg.get("name").upper() + "']/documentation" ).text ) )
arg_doc = " " + text + "\n\n"
if args_doc == "":
args_doc = "Original Fortran LAPACK argument documentation for " + base_name.upper() + "::\n\n"
args_doc += arg_doc
'''
#args_doc += "\n\n"
#code.prepend( CommentProducer( "\n" + ("Polymorphic " if category_name == "untyped chapelerrific" else "" ) + "Chapel idiomatic procedure of " + proc.get("name") + " for the type " + typeToTypeString[type] + ".\n\n" + purpose + args_doc ) )
code.prepend( CommentProducer( "\n" + "Wrapped procedure of " + proc.get("name") + " for the type " + typeToTypeString[type] + ".\n") )
code.append( args_producer )
code.append( SegmentProducer( ": " + proc.get( "return-type" ) ) )
func_body = ScopeProducer()
call_args_producer = ListProducer( ", ", "(", ")" )
for pass_arg in proc.findall( "./pass-through-arguments-list/argument" ):
call_args_producer.append( SegmentProducer( pass_arg.text ) )
func_body.append( SegmentProducer( ( "return " if proc.get("return-type") != "void" else "" ) + helper_use + "." + proc.get("name") ) + call_args_producer + LineProducer( ";" ) )
code.append( func_body )
code_node = SubElement( proc, "code" )
code_node.set( "category", category_name )
code_node.text = code.generate()
no_repeat.add( proc_name )
proc_count += 1
print "Generated code for", proc_count, "functions"
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class ChapelModuleExternTypeDefinesPass ( Pass )
Purpose:
from the imported external type defines generate
external type code at each define tag
'''
class ChapelModuleExternTypeDefinesPass ( Pass ):
dependencies = [BaseChapelizePass, BaseImportPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = ChapelModuleExternTypeDefinesPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
module_defs = xml_tree.find( "./chapel-module/type-defines" )
for define in module_defs.findall( "./define" ):
def_str = ("/*"+ define.find("./description").text + "*/\n" if define.find("./description") != None else "")
if define.get( "external" ) != None and define.get("external").lower() == "yes":
def_str += "extern "
def_str += "type " + define.get( "alias" ) + " "
if define.get( "base-type" ) != None:
def_str += "= " + define.get( "base-type" )
def_str += ";"
SubElement( define, "code" ).text = def_str
#prettyprintxml( module_defs )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class ChapelModuleExternConstDefinesPass ( Pass ):
Purpose:
from the imported external const defines generate
eternal const code at each define tag
'''
class ChapelModuleExternConstDefinesPass ( Pass ):
dependencies = [BaseChapelizePass, BaseImportPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = ChapelModuleExternConstDefinesPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
module_defs = xml_tree.find( "./chapel-module/const-defines" )
for define in module_defs.findall( "./define" ):
def_str = ("/*"+ define.find("./description").text + "*/\n" if define.find("./description") != None else "")
if define.get( "external" ) != None and define.get( "external" ).lower() == "yes":
def_str += "extern "
def_str += "const " + define.get( "symbol" ) + " : " + define.get( "type" ) + " "
if define.get( "value" ) != None:
def_str += " = " + define.get( "value" )
def_str += ";"
SubElement( define, "code" ).text = def_str
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class ChapelModuleEnumDefinesPass ( Pass ):
Purpose:
from the imported enumeration defines generate
enum code at each enumeration tag
'''
class ChapelModuleEnumDefinesPass ( Pass ):
dependencies = [BaseChapelizePass, BaseImportPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = ChapelModuleEnumDefinesPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
module_defs = xml_tree.find( "./chapel-module/enum-defines" )
for define in module_defs.findall( "./enumeration" ):
values = ListProducer(", ", "{", "}")
for value in define.findall( "./value" ):
values.append( SegmentProducer( value.get("name") + ( " = " + value.text if value.text != None and value.text.strip() != "" else "" ) ) )
description_node = define.find("./description")
if description_node != None:
SubElement( define, "code" ).text = CommentProducer( description_node.text ).generate() + ( SegmentProducer( "enum " + define.get("name") ) + values + LineProducer(";") ).generate()
else:
SubElement( define, "code" ).text = ( SegmentProducer( "enum " + define.get("name") ) + values + LineProducer(";") ).generate()
#prettyprintxml( module_defs )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class BaseCodeGenerationPass ( Pass )
Purpose:
Ties together all code generation passes before
any text dumping into a file.
'''
class BaseCodeGenerationPass ( Pass ):
dependencies = [ChapelModuleExternProcPass, ChapelModuleStringToCharWraperProcPass, ChapelModuleChapelerrificProcPass, ChapelModuleExternTypeDefinesPass, ChapelModuleExternConstDefinesPass, ChapelModuleEnumDefinesPass ]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = BaseCodeGenerationPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class DumpCodePass ( Pass )
Purpose:
traverses <chapel-module> tree, collecting generated code text
and gently places it into the file defined in the input xml
pass information
'''
class DumpCodePass ( Pass ):
dependencies = [BaseCodeGenerationPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = DumpCodePass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
pass_input = Pass.input_xml.find( "./pass/[@name='DumpCodePass']" )
module_root = xml_tree.find( "./chapel-module" )
ChaLAPACK_info = pass_input.find( "./main-module" )
helper_info = pass_input.find( "./helper-module" )
module_name = ChaLAPACK_info.get( "name" )
module_file = open( ChaLAPACK_info.get( "file-name" ), "w" )
module_file.write( pass_input.find("copyright").text )
module_file.write( "/*\n" + ChaLAPACK_info.find("./description").text + "\n*/\n" )
module_file.write( "module " + module_name + " {\n" )
for use in ChaLAPACK_info.findall( "./use" ):
module_file.write( "use " + use.text + ";\n" )
module_file.write( "\n" )
# inject types, consts, enums
for defn in module_root.findall( "./type-defines/define" ):
module_file.write( defn.find("./code").text + "\n" )
module_file.write( "\n\n" )
for defn in module_root.findall( "./const-defines/define" ):
module_file.write( defn.find("./code").text + "\n" )
module_file.write( "\n\n" )
for defn in module_root.findall( "./enum-defines/enumeration" ):
module_file.write( defn.find("./code").text + "\n" )
module_file.write( "\n" )
# inject helper module
if helper_info.get("no-doc") == "all":
module_file.write( "pragma \"no doc\"\n" )
module_file.write( "/*\n" + helper_info.find( "./description" ).text + "\n*/\n" )
module_file.write( "module " + helper_info.get("name") + " {\n" )
for use in helper_info.findall( "./use" ):
module_file.write( "use " + use.text + ";\n" )
module_file.write( "\n" )
nodoc_helper_procs = helper_info.get("no-doc") == "internals" or helper_info.get("no-doc") == "procedures" or helper_info.get("no-doc") == "all"
for proc in module_root.findall( "./procedures/procedure" ):
code = proc.find( "./code/[@category='extern proc']")
if code != None:
if nodoc_helper_procs:
module_file.write( "pragma \"no doc\"\n" )
module_file.write( code.text + "\n" )
code = proc.find( "./code/[@category='string wrapped']")
if code != None:
if nodoc_helper_procs:
module_file.write( "pragma \"no doc\"\n" )
module_file.write( code.text + "\n" )
module_file.write( "} // " + helper_info.get("name") + "\n" )
for proc in module_root.findall( "./procedures/procedure" ):
code = proc.find( "./code/[@category='untyped chapelerrific']" )
if code != None:
module_file.write( code.text + "\n" )
module_file.write("} // " + module_name + "\n")
module_file.close()
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
Below are passes that were used to explore the LAPACK source, or are Passes that were removed from the main set.
EXTREME caution is advised if using them. They may (probably) not work with the current set of main passes
'''
class CountFunctions( Pass ):
dependencies = [BaseLAPACKPass, BaseLAPACKEPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = CountFunctions
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack = 0
for proc in xml_tree.findall( "./LAPACK/procedures/procedure" ):
lapack += 1
lapacke = 0
for proc in xml_tree.findall( "./LAPACKE/procedures/procedure" ):
lapacke += 1
print "LAPACK", lapack, "LAPACKE", lapacke
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class CreateAbstractLAPACKTreePass ( Pass )
Purpose:
Create Abstract-LAPACK tree
'''
class CreateAbstractLAPACKTreePass ( Pass ):
dependencies = [BaseChapelizePass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = CreateAbstractLAPACKTreePass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
abstract_lapack = SubElement( xml_tree, "Abstract-LAPACK" )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class BucketLAPACKFunctionGroups ( Pass )
Purpose:
bucket LAPACK functions by their base function, type, and matrix type
'''
class BucketLAPACKFunctionGroupsPass ( Pass ):
dependencies = [CreateAbstractLAPACKTreePass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = BucketLAPACKFunctionGroupsPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
abstract_lapack = xml_tree.find( "./Abstract-LAPACK" )
module_root = xml_tree.find( "./chapel-module" )
module_procs = module_root.find( "./procedures" )
funcs = set()
for proc in module_procs.findall( "./procedure" ):
proc_name = proc.get( "name" )
#print proc_name
if proc_name in funcs:
raise GeneralPassFailure( "DOUBLE HIT " + proc_name )
else:
funcs.add( proc_name )
# we only care about LAPACKE_ functions
if proc_name.startswith( "LAPACK_" ) or proc_name.endswith( "_work" ):
continue
base_name = proc_name.replace( "LAPACKE_", "" ) #.replace( "_work", "" )
match = func_name_group_regex.search( base_name );
if match == None:
print proc_name, "(", base_name, ") does not match regex"
continue
func = match.group( "function" )
config = match.group( "config" )
type = match.group( "type" )
if config != "ge":
continue
group_node = abstract_lapack.find( "./group/[@name='" + func + "']" )
if group_node == None:
group_node = SubElement( abstract_lapack, "group" )
group_node.set("name", func )
config_node = group_node.find( "./matrix-configuration/[@name='" + config + "']" )
if config_node == None:
config_node = SubElement( group_node, "matrix-configuration" )
config_node.set( "name", config )
if config_node.find( "./types/type/[@name='" + type + "']" ) != None:
print "Double declaration of abstract LAPACK function", type, config, func, base_name, proc_name
continue
#prettyprintxml( config_node.find( "./type/[@name='" + type + "']" ) )
types_node = SubElementUnique( config_node, "types" )
type_node = SubElement( types_node, "type" )
type_node.set( "name", type )
type_node.set( "analogue", "./chapel-module/procedures/procedure/[@name='" + proc_name + "']" )
#prettyprintxml( abstract_lapack )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
class ImportAbstractLAPACKFunctionsPass ( Pass ):
dependencies = [BaseCodegenReadyPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = ImportAbstractLAPACKFunctionsPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
module_defs = xml_tree.find( "./chapel-module/procedures" )
group_input = Pass.input_xml.find( "./pass/[@name='ImportAbstractLAPACKFunctionsPass']" )
proc_count = 0
for group in group_input.findall( "./group" ):
for config in group.findall( "./matrix-configuration" ):
code = SequenceOfProducers()
proc_name = config.get( "name" ) + group.get( "name" )
code.append( SegmentProducer( "proc " + proc_name ) )
args_producer = ListProducer(", ", "(", ")")
for arg in config.findall( "./method-arguments/argument" ):
args_producer.append( SegmentProducer(
arg.get("intent") + " " + \
arg.get("name") + " : " + \
("[] " if arg.get("semantic") == "array" else "") + \
arg.get("type") + \
( " = " + arg.text if arg.text != None and arg.text.strip() != "" else "" )
)
)
code.append( args_producer )
code.append( LineProducer( ": lapack_int" ) )
code.append( SegmentProducer( "where " ) )
where_producer = ListProducer( " || ", "", "" )
for type in config.findall("./types/type"):
where_producer.append( SegmentProducer( "T == " + type.get( "type" ) ) )
code.append( where_producer )
info_var = config.get( "name" ) + group.get( "name" ) + "_return_info"
func_body = ScopeProducer()
func_body.append( LineProducer( "var " + info_var + " : lapack_int;" ) )
#if_bodies = SequenceOfProducers()
arg_relates = {}
ana_args = []
for arg in config.findall( "./analogue-arguments-list/argument" ):
arg_name = arg.get("name")
arg_relates[ arg_name ] = config.find( "./arguments-relationships/argument/[@name='" + arg_name + "']" )
ana_args.append( arg );
for type in config.findall("./types/type"):
chpl_ana = xml_tree.find( type.get( "analogue" ) )
if_condition = LineProducer( "if ( T == " + type.get("type") + " )" )
func_body.append( if_condition )
if_body = ScopeProducer()
call_equals = SegmentProducer( info_var + " = " + chpl_ana.get( "name" ) )
call_seq = ListProducer( ", ", "(", ")" )
for ana_arg in ana_args:
call_seq.append( SegmentProducer(
"(" + arg_relates[ana_arg.get("name")].text.strip() + ")" + \
(" : " + ana_arg.get("type") if ana_arg.get("semantic") != "array" else "")
)
)
if_body.append( call_equals + call_seq + LineProducer( ";" ) )
func_body.append( if_body )
func_body.append( LineProducer( "return " + info_var + ";" ) )
code.append( func_body )
module_proc = SubElement( module_defs, "procedure" )
module_proc.set( "name", proc_name )
code_node = SubElement( module_proc, "code" )
code_node.set( "category", "upward facing" )
code_node.text = code.generate()
proc_count += 1
print "Generated", proc_count, "procedures"
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class GroupsArgumentCollectionPass ( Pass )
Purpose:
collect common arguments into the function groups
'''
class CommonArgumentCollectionPass ( Pass ):
dependencies = [BucketLAPACKFunctionGroupsPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = CommonArgumentCollectionPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
abstract_lapack = xml_tree.find( "./Abstract-LAPACK" )
for group_node in abstract_lapack.findall( "./group" ):
for config_node in group_node.findall( "./matrix-configuration" ):
args_tree = ET.Element( "analogue-arguments-list" )
''''
prefix_type_map = {
"s" : "float",
"d" : "double",
"c" : "complex_float",
"z" : "complex_double",
"float" : "s",
"double" : "d",
"complex_float" : "c",
"complex_double" : "z"
}
'''
all_args = {} # dictionary to set [arg_name]=>set( function_names )
all_funcs = set() # set of all functions encountered
func_args_type = {} # {func_name} => {arg_name} => type_name
for type_func in config_node.findall( "./types/type" ):
type_name = type_func.get( "name" );
all_funcs.add( type_name );
func_args_type[ type_name ] = {};
chapel_func = xml_tree.find( type_func.get( "analogue" ) )
if chapel_func == None:
raise GeneralPassFailure( type_name + config_node.get( "name" ) + group_node.get( "name" ) + " does not have chapel analogue" )
for arg in chapel_func.findall( "./arguments-list/argument" ):
func_args_type[ type_name ][ arg.get("name") ] = arg.get("type")
args_type
find = args_tree.find( "./argument/[@name='" + arg.get( "name" ) + "']" )
if find == None:
args_tree.append( arg )
elif arg.get( "type" ) != find.get( "type" ):
find.set( "type", "?T" )
abstract_arg = ET.Element( "argument" )
arg_name = arg.get( "name" )
if not arg_name in all_args:
all_args[arg_name] = set()
all_args[arg_name].add(type_name)
for arg_name in all_args:
if all_args[ arg_name ] != all_funcs:
arg = args_tree.find( "./argument/[@name='" + arg_name + "']" )
args_tree.remove( arg )
for type_func_name in all_args[ arg_name ]:
#print "find", type_func_name
#prettyprintxml( config_node )
type_func = config_node.find( "./types/type/[@name='" + type_func_name + "']" )
args_list = SubElementUnique( type_func, "arguments-list" )
args_list.append( arg )
config_node.append( args_tree )
#prettyprintxml( abstract_lapack.find( "./group/[@name='sv']" ) )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class BaseLAPACKAbstractPass ( Pass )
Purpose:
Ties together all passes that populate the Abstract-LAPACK classes
for upward facing LAPACK chapel procedures
'''
class BaseAbstractLAPACKPass ( Pass ):
dependencies = [BucketLAPACKFunctionGroupsPass, CommonArgumentCollectionPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = BaseAbstractLAPACKPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
class DropAttemptedAssociations ( Pass ):
dependencies = [BaseChapelizePass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = DropAttemptedAssociations
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
output_xml = ET.Element( "pass" );
output_xml.set( "name", "DropAttemptedAssociations" )
output_procs = SubElement( output_xml, "procedures" );
for chpl_proc in xml_tree.findall( "./chapel-module/procedures/procedure" ):
proc_name = chpl_proc.get( "name" )
if proc_name.startswith( "LAPACK_" ) or proc_name.endswith( "_work" ):
continue
base_name = proc_name.replace( "LAPACKE_", "" )
match = func_name_group_regex.search( base_name );
if match == None:
print proc_name, "(", base_name, ") does not match regex"
continue
func = match.group( "function" )
config = match.group( "config" )
type = match.group( "type" )
if not config.startswith( "ge" ):
continue
proc = copy.deepcopy( chpl_proc )
print proc.get( "name" )
#prettyprintxml( proc )
args_node = proc.find( "./arguments-list" )
args_list = [ ]
args_names = []
remove_list = set()
pass_through = {}
for arg in args_node.findall( "./argument" ):
args_list.append( arg )
args_names.append( arg.get("name") )
pass_through[ arg.get("name") ] = arg.get( "name" )
for arg in args_list:
if arg.get( "semantic" ) != "array" :
continue
if arg.get( "dimensions" ) != None:
dimensions = arg.get( "dimensions" ).lower().split(",")
for i in range( len(dimensions) ):
dimension = dimensions[i]
if dimension == "*":
continue
removeVar = None
for find in args_list:
if find.get( "name" ) == dimension:
removeVar = find
break
if removeVar != None:
remove_list.add( removeVar )
pass_through[ dimension ] = arg.get("name") + ".domain.dim("+str(i+1)+").size"
else:
print ( dimension + " is not described in the arguments of "+proc.get( "name" ) )
if arg.get( "matrix-size" ) != None:
matrix_size = arg.get( "matrix-size" ).lower()
rows = matrix_size.split(",")[0].strip()
cols = matrix_size.split(",")[1].strip()
removeRows = None
removeCols = None
for find in args_list:
if find.get( "name" ) == rows:
removeRows = find
if find.get( "name" ) == cols:
removeCols = find
if removeRows != None and removeCols != None:
pass_through[ rows ] = "if matrix_order == LAPACK_ROW_MAJOR then " + arg.get("name") + ".domain.dim(1).size else " + arg.get("name") + ".domain.dim(2).size "
pass_through[ cols ] = "if matrix_order == LAPACK_ROW_MAJOR then " + arg.get("name") + ".domain.dim(2).size else " + arg.get("name") + ".domain.dim(1).size "
remove_list.add( removeRows )
remove_list.add( removeCols )
else:
print ( rows + " and " + cols + " are not described in the arguments of "+proc.get( "name" ) )
for arg in args_list:
if arg.get( "semantic" ) != "scalar" :
continue
if arg.get( "type" ) == "c_char":
arg.set( "type", "string" )
pass_through[ arg.get("name") ] = "ascii(" + arg.get( "name" ) + ") : c_char"
associate_array_str = arg.get( "associate-array" )
associate_field_str = arg.get( "associate-field" )
if associate_array_str != None:
array_field_map = {}
arrays = associate_array_str.split(",")
fields = associate_field_str.split(",")
array = ""
field = ""
for i in range( len( arrays ) ) :
arrays[i] = arrays[i].lower()
fields[i] = fields[i].lower()
array_field_map[ arrays[i] ] = fields[i]
for associate_array in arrays:
if associate_array in args_names:
array = associate_array
field = fields[ arrays.index( array ) ]
break;
if field == "rows":
pass_through[ arg.get("name") ] = "if matrix_order == LAPACK_ROW_MAJOR then " + array + ".domain.dim(1).size else " + array + ".domain.dim(2).size "
elif field == "columns":
pass_through[ arg.get("name") ] = "if matrix_order == LAPACK_ROW_MAJOR then " + array + ".domain.dim(2).size else " + array + ".domain.dim(1).size "
elif field == "order" or field == "rank":
pass_through[ arg.get("name") ] = array + ".domain.dim(1).size"
else:
raise GeneralPassFailure( field + " is not a recognized array association field" )
remove_list.add( arg )
pass_through_node = SubElement( proc, "pass-through" )
for arg in args_node.findall( "./argument" ):
passing = copy.deepcopy( arg )
passing.text = pass_through[ arg.get( "name" ) ]
pass_through_node.append( passing )
for rm_arg in remove_list:
args_node.remove( args_node.find( "./argument/[@name='" + rm_arg.get( "name" ) + "']" ) )
count = 0
for arg in args_node:
arg.set("position", str( count ) )
count += 1
if arg.get( "name" ) == "matrix_order":
arg.text = "LAPACK_ROW_MAJOR"
#print pass_through
#prettyprintxml( proc )
#print pass_through, "\n", "==="*20, "\n"
output_procs.append( proc )
prettywritexml( output_xml, "DropAttemptedAssociations_output.xml" )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
class AbstractDropAttemptedAssociations ( Pass ):
dependencies = [BaseAbstractLAPACKPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = DropAttemptedAssociations
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
abstract_lapack = xml_tree.find( "./Abstract-LAPACK" )
for group_node in abstract_lapack.findall( "./group" ):
for config_node in group_node.findall( "./matrix-configuration" ):
if config_node.findall( "./types/type/arguments-list" ) != [] :
print config_node.get("name") + group_node.get("name"), " has typed functions with non common arguments. Skipping."
continue
full_func_name = config_node.get("name") + group_node.get("name")
all_args = []
array_args = set()
method_args = []
pass_through = {} # str => str
removed = {} # str => bool
for arg in config_node.findall( "./analogue-arguments-list/argument" ):
all_args.append( arg.get("name") )
if arg.get( "semantic" ) == "array" :
array_args.add( arg.get("name" ) )
removed[ arg.get("name") ] = False
method_args.append( arg )
for arg in config_node.findall( "./analogue-arguments-list/argument" ):
if removed[ arg.get("name") ] or arg.get( "semantic" ) != "array":
continue
pass_through[ arg.get("name") ] = arg.get( "name" )
if arg.get( "dimensions" ) != None:
dimensions = arg.get( "dimensions" ).split(",")
for i in range( len(dimensions) ):
dimension = dimensions[i].lower()
if dimension == "*":
continue
pass_through[ dimension ] = arg.get("name") + ".domain.dim("+str(i+1)+").size"
removed[ dimension ] = True
matrix_size = arg.get( "matrix-size" )
if matrix_size != None:
rows = matrix_size.split(",")[0].strip().lower()
cols = matrix_size.split(",")[1].strip().lower()
pass_through[ rows ] = "if matrix_order == LAPACK_ROW_MAJOR then " + arg.get("name") + ".domain.dim(1).size else " + arg.get("name") + ".domain.dim(2).size "
pass_through[ cols ] = "if matrix_order == LAPACK_ROW_MAJOR then " + arg.get("name") + ".domain.dim(2).size else " + arg.get("name") + ".domain.dim(1).size "
removed[ rows ] = True
removed[ cols ] = True
for arg in config_node.findall( "./analogue-arguments-list/argument" ):
if removed[ arg.get("name") ] or arg.get( "semantic" ) != "scalar":
continue
pass_through[ arg.get("name") ] = arg.get("name")
for rm in removed:
if not removed[rm]:
continue
for i in range( len( method_args ) ):
if method_args[i].get("name") == rm:
method_args.remove( method_args[i] )
break;
interface_node = SubElement( config_node, "method-arguments" )
for arg in method_args :
argument = SubElement( interface_node, "argument" )
argument.set( "name", arg.get("name") )
argument.set( "intent" , arg.get("intent") )
argument.set( "semantic", arg.get("semantic") )
argument.set( "type", arg.get("type") )
argument.text = " " if arg.get("name") != "matrix_order" else "LAPACK_ROW_MAJOR"
pass_through_node = SubElement( config_node, "arguments-relationships" )
for arg in config_node.findall( "./analogue-arguments-list/argument" ):
arg_name = arg.get( "name" )
arg_relate = SubElement( pass_through_node, "argument" )
arg_relate.set( "name", arg_name )
arg_relate.text = pass_through[arg_name]
prettywritexml( abstract_lapack, "AbstractDropAttemptedAssociations_output.xml" )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class FindNeedsArgsDocPatchPass ( Pass )
Purpose:
Was used to find fortran files with incorrect argument documentation
( \param[intent] blah blah )
'''
class FindNeedsArgsDocPatchPass ( Pass ):
dependencies = [BaseLAPACKPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = FindNeedsArgsDocPatchPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_root_procs = xml_tree.find( "./LAPACK/procedures" )
patch = []
for proc in lapack_root_procs.findall( "./procedure" ):
printed = False
#prettyprintxml( proc )
for arg in proc.findall( "./arguments-list/argument" ):
if arg.find( "./documentation" ) == None:
if not printed:
print proc.get( "name" ), proc.get( "file-name" )
printed = True
print arg.get( "name" ), "MISSING"
patch.append( (proc.get("name"), proc.get("file-name"), arg.get("name") ) )
print patch
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class FindNeedsFuncArgsTypePatchPass ( Pass )
Purpose:
Was used to find fortran files with incorrect argument type documentation
( ie ..Scalar Arguments.. blah blah )
'''
class FindNeedsFuncArgsTypePatchPass ( Pass ):
dependencies = [LAPACKFunctionDefinePass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = FindNeedsFuncArgsTypePatchPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_node = xml_tree.find( "./LAPACK" )
text_node = lapack_node.find( "./text" )
procs_node = lapack_node.find( "./procedures" )
none_scalar = []
none_array = []
for proc_node in procs_node.findall( "./procedure" ):
proc_file_name = proc_node.get( "file-name" )
doc_node = text_node.find( "./file/[@name='" + proc_file_name + "']/documentation" )
scalars = doc_scalarargs_regex.search( doc_node.text )
arrays = doc_arrayargs_regex.search( doc_node.text )
if scalars == None:
none_scalar.append( (proc_node.get( "name"), proc_file_name) )
if arrays == None:
none_array.append( (proc_node.get( "name"), proc_file_name) )
print "none_scalars", none_scalar,"\n\nnone_arrays", none_array
print "="*100
for i in none_scalar:
sys.stdout.write( i[1] + "," )
print "\n"*2
for i in none_array:
sys.stdout.write( i[1] + "," )
print "\n"*2
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class FindAllLAPACKETypesPass ( Pass )
Purpose:
was used to collect all the types named in LAPACKE.h
to put into input xml type defines etc.
'''
class FindAllLAPACKETypesPass ( Pass ):
dependencies = [BaseLAPACKEPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = FindAllLAPACKETypesPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapacke_procs_root = xml_tree.find( "./LAPACKE/procedures" )
types = set()
for proc in lapacke_procs_root.findall( "./procedure" ):
types.add( proc.get( "return-type" ) )
for arg in proc.findall( "./arguments-list/argument" ):
types.add( arg.get( "type" ) )
print types
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class FindScalarOutIntentsPass ( Pass )
Purpose:
Find scalars in the fortran code with 'out' intents, that
are also not INFOs
Explore if there are LAPACKE scalars that are out intents
'''
class FindScalarOutIntentsPass ( Pass ):
dependencies = [BaseLAPACKPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = FindScalarOutIntentsPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_procs_root = xml_tree.find( "./LAPACK/procedures" )
print lapack_procs_root
outs = []
for proc in lapack_procs_root.findall( "./procedure" ):
for arg in proc.findall( "./arguments-list/argument" ):
if arg.get( "semantic" ) == "scalar" \
and "out" in arg.get( "intent" ) \
and arg.get("name").lower() != "info":
outs.append( (proc.get( "name" ), arg.get( "name" ), proc.get( "file-name" ) ) )
print outs
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class FindIntentSetPass ( Pass ):
Purpose:
find the set of all intents that exist in LAPACKE fold
'''
class FindIntentSetPass ( Pass ):
dependencies = [FoldLAPACKSemanticsIntentsToLAPACKEPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = FindIntentSetPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_procs_root = xml_tree.find( "./LAPACKE/procedures" )
intents = set()
for proc in lapack_procs_root.findall( "./procedure" ):
#print proc.get( "name" )
#prettyprintxml( proc )
for arg in proc.findall( "./arguments-list/argument" ):
#print arg.get( "name" )
if arg.get( "intent" ) != None:
intents.add( arg.get( "intent" ) )
print intents
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class FindIntentSemanticsRefsSetPass ( Pass ):
Purpose:
find the set of all combinations of intenst that exist in LAPACKE fold
'''
class FindIntentSemanticsRefsSetPass ( Pass ):
dependencies = [FoldLAPACKSemanticsIntentsToLAPACKEPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = FindIntentSemanticsRefsSetPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_procs_root = xml_tree.find( "./LAPACKE/procedures" )
combos = {}
for proc in lapack_procs_root.findall( "./procedure" ):
for arg in proc.findall( "./arguments-list/argument" ):
intent = arg.get( "intent" )
semantic = arg.get( "semantic" )
refdepth = arg.get( "refdepth" )
combos[ (intent, semantic, refdepth ) ] = (proc, arg)
for key in combos:
print key, "(", combos[ key ][0].get( "name" ), combos[key][1].get( "name" ), ")"
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class FindBadIntentSemanticsCodePass ( Pass ):
Purpose:
find fortran code where intent or semantic are None
'''
class FindBadIntentSemanticsCodePass ( Pass ):
dependencies = [FoldLAPACKSemanticsIntentsToLAPACKEPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = FindBadIntentSemanticsCodePass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_procs_root = xml_tree.find( "./LAPACKE/procedures" )
list = {}
for proc in lapack_procs_root.findall( "./procedure" ):
proc_name = proc.get("name")
for arg in proc.findall( "./arguments-list/argument" ):
arg_name = arg.get( "name" )
intent = arg.get( "intent" )
semantic = arg.get( "semantic" )
if arg_name != "matrix_order" and ( (intent == None) ^ (semantic == None) ):
if not proc_name in list:
list[proc_name] = []
list[proc_name].append( (arg_name, intent, semantic, proc) )
files_str = str()
for key in list:
proc = list[key][1][3]
analogue_txt = proc.find( "./analogues/analogue" ).text
analogue = xml_tree.find( analogue_txt )
files_str += analogue.get( "file-name" ) + ","
print key, analogue_txt, analogue.get( "file-name" )
#prettyprintxml( proc )
#prettyprintxml( analogue )
for elem in list[key]:
print "\t",elem
print ""
print files_str
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class FindNonAnalouges ( Pass ):
Purpose:
find all C lapack procedures with no fortran analogues
'''
class FindPassByRefNonAnalouges ( Pass ):
dependencies = [BaseAssociatePass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = FindPassByRefNonAnalouges
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_procs_root = xml_tree.find( "./LAPACKE/procedures" )
list = []
for proc in lapack_procs_root.findall( "./procedure" ):
proc_name = proc.get( "name" )
if proc.find( "./analogues" ) == None:
#list.add( proc.get( "name" ) )
print "Function", proc_name, "has no fortran analogue"
continue
printed = False
for arg in proc.findall( "./arguments-list/argument" ):
if arg.find( "./analogues" ) == None and \
int(arg.get( "refdepth" )) > 0 :
if not printed:
printed = True
print "In function", proc_name, ":"
print "\tArgument", arg.get( "name" ), "of refdepth", int(arg.get( "refdepth" )), "has no fortran analogue"
if printed:
print ""
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class FindFortranNoTypes ( Pass )
Purpose:
find any fortran arguments with no associated type
'''
class FindFortranNoTypes ( Pass ):
dependencies = [BaseLAPACKPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = FindFortranNoTypes
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_f_procs = xml_tree.find( "./LAPACK/procedures" )
print lapack_f_procs
for f_proc in lapack_f_procs.findall( "./procedure" ):
for f_arg in f_proc.findall( "./arguments-list/argument" ):
if f_arg.get( "type" ) == None:
print f_proc.get( "name" ), f_proc.get( "file-name" ), f_arg.get( "name" )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class BucketVariblesSemanticsPass ( Pass )
Purpose:
Find and bucket arguments by semantic
'''
class BucketArgumentsSemanticsPass ( Pass ):
dependencies = [FoldLAPACKSemanticsIntentsToLAPACKEPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = BucketArgumentsSemanticsPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_f_procs = xml_tree.find( "./LAPACKE/procedures" )
variables = {}
for proc in lapack_f_procs.findall( "./procedure" ):
proc_name = proc.get( "name" )
for arg in proc.findall( "./arguments-list/argument" ):
arg_name = arg.get( "name" )
semantic = arg.get( "semantic" ) if arg.get( "semantic" ) != None else "none"
if not arg_name in variables:
variables[ arg_name ] = {}
if not semantic in variables[ arg_name ]:
variables[ arg_name ][ semantic ] = []
variables[ arg_name ][ semantic ].append( proc_name )
for arg in variables:
if len( variables[ arg ] ) > 2:
print arg
for semantic in variables[ arg ]:
print " \"" + semantic + "\"", ":", len( variables[ arg ][ semantic ] )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class BucketFortranTypes ( Pass )
Purpose:
find all fortran types
'''
class BucketFortranTypes ( Pass ):
dependencies = [DestroyUnassociatedFortranFunctionsTreePass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = BucketFortranTypes
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_f_procs = xml_tree.find( "./LAPACK/procedures" )
lapack_c_procs = xml_tree.find( "./LAPACKE/procedures" )
f_types = set()
c_types = set()
for f_proc in lapack_f_procs.findall( "./procedure" ):
#if f_proc.get( "return-type" ) != None:
# f_types.add( f_proc.get( "return-type" ) )
for f_arg in f_proc.findall( "./arguments-list/argument" ):
if f_arg.get( "type" ) != None:
f_types.add( f_arg.get( "type" ) )
else:
f_types.add( "~BAD. None for type~" )
for c_proc in lapack_c_procs.findall( "./procedure" ):
#if c_proc.get( "return-type" ) != None:
# c_types.add( c_proc.get( "return-type" ) )
for c_arg in c_proc.findall( "./arguments-list/argument" ):
if c_arg.get( "type" ) != None:
c_types.add( c_arg.get( "type" ) )
else:
c_types.add( "~BAD. None for type~" )
print "C types", c_types
print "Fortran types", f_types,"\n"
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class FindAFortranType ( Pass )
Purpose:
find a fortran type
'''
class FindAFortranType ( Pass ):
dependencies = [ BaseAssociatePass ]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = FindAFortranType
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_f_procs = xml_tree.find( "./LAPACK/procedures" )
#lapack_c_procs = xml_tree.find( "./LAPACKE/procedures" )
f_types = set()
#c_types = set()
find = "RECURSIVE"
for f_proc in lapack_f_procs.findall( "./procedure" ):
if f_proc.get( "return-type" ) == find:
print f_proc.get( "name" )
#return
for f_arg in f_proc.findall( "./arguments-list/argument" ):
if f_arg.get( "type" ) == find:
print f_proc.get( "name" ), f_arg.get( "name" )
#return
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class FindUnmatchedArgsPass ( Pass )
Purpose:
Find unresolved matches arising from misnames
'''
class FindUnmatchedArgsPass ( Pass ):
dependencies = [BaseAssociatePass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = FindUnmatchedArgsPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_c_root = xml_tree.find( "./LAPACKE" )
lapack_c_procs = lapack_c_root.find( "./procedures" )
lapack_f_root = xml_tree.find( "./LAPACK" )
lapack_f_procs = lapack_f_root.find( "./procedures" )
for c_proc in lapack_c_procs.findall( "./procedure" ):
# Find the fortran analogue.
# Note: there should only be one path from C -> Fortran.
# though there may be many paths from Fortran -> C
f_proc_ana = c_proc.find( "./analogues/analogue" )
if f_proc_ana == None:
continue
f_proc = xml_tree.find( f_proc_ana.text )
c_no_match = []
f_no_match = []
for c_arg in c_proc.findall( "./arguments-list/argument" ):
#print c_arg.get( "name" )
if c_arg.find( "./analogues/" ) == None \
and c_arg.get( "name" ) != "matrix_order" :
#print "has none"
c_no_match.append( c_arg )
#prettyprintxml( c_arg )
for f_arg in f_proc.findall( "./arguments-list/argument" ):
f_ana_node = f_arg.find( "./analogues" )
# if zero analogues add no_match
if f_ana_node == None:
f_no_match.append( f_arg )
continue
# if no analogues to this function add no_match
if f_ana_node.find( "./analogue/[@function='" + c_proc.get( "name" ) + "']" ) == None \
and f_arg.get( "name" ) != "INFO":
f_no_match.append( f_arg )
if c_no_match == []: continue
print c_proc.get( "name" ), ":", f_proc.get( "name" )
print "+",c_proc.get( "name" )
for m in c_no_match:
#prettyprintxml( m )
print "\t-", m.get( "name" )
print "+",f_proc.get( "name" )
for m in f_no_match:
#prettyprintxml( m )
print "\t-", m.get( "name" )
print "\n"
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class FindNoneLAPACKESementicsPass ( Pass )
Purpose:
Find LAPACKE arguments of procedures with no semantics
this arises when the function or the arguments do not
have analogues or they have not been imported
'''
class FindNoneLAPACKESementicsPass ( Pass ):
dependencies = [BaseTransformLAPACKEPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = FindNoneLAPACKESementicsPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_c_root = xml_tree.find( "./LAPACKE" )
lapack_c_procs = lapack_c_root.find( "./procedures" )
for c_proc in lapack_c_procs.findall( "./procedure" ):
#if c_proc.find( "./analogues/analogue" ) == None: continue
printed = False
for c_arg in c_proc.findall( "./arguments-list/argument" ):
if c_arg.get( "semantic" ) == None:
if not printed:
print c_proc.get( "name" )
printed = True
print "Missing sementic on", c_arg.get( "name" )
if printed:
print ""
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
class FindHasNoFortranAnaloguePass ( Pass ):
dependencies = [BaseAssociatePass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = FindHasNoFortranAnaloguePass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_c_root = xml_tree.find( "./LAPACKE" )
lapack_c_procs = lapack_c_root.find( "./procedures" )
list = set()
for c_proc in lapack_c_procs.findall( "./procedure" ):
if c_proc.find( "./analogues/analogue" ) == None:
#list.add( c_proc.get("name").replace( "LAPACKE_", "" ).replace( "LAPACK_", "" ).replace( "_work", "" ) )
print c_proc.get( "name" )
for i in list:
print i
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class DropFileOfCTreeUnmatchedArgsPass ( Pass )
Purpose:
export an xml file that has most skeleton of xml tree that could
be installed into the input xml for the ImportArgumentAnaloguesPass
'''
class DropFileOfCTreeUnmatchedArgsPass ( Pass ):
dependencies = [BaseAssociatePass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = DropFileOfCTreeUnmatchedArgsPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_c_root = xml_tree.find( "./LAPACKE" )
lapack_c_procs = lapack_c_root.find( "./procedures" )
output_tree = ET.Element( "pass-output" )
pass_output = SubElement( output_tree, "pass" )
pass_output.set( "name", "DropFileOfCTreeUnmatchedArgsPass" )
for c_proc in lapack_c_procs.findall( "./procedure" ):
# Find the fortran analogue.
# Note: there should only be one path from C -> Fortran.
# though there may be many paths from Fortran -> C
f_proc_ana = c_proc.find( "./analogues/analogue" )
if f_proc_ana == None:
continue
f_proc = xml_tree.find( f_proc_ana.text )
c_no_match = []
f_no_match = []
for c_arg in c_proc.findall( "./arguments-list/argument" ):
if c_arg.find( "./analogues/" ) == None \
and c_arg.get( "name" ) != "matrix_order" :
c_no_match.append( c_arg )
for f_arg in f_proc.findall( "./arguments-list/argument" ):
f_ana_node = f_arg.find( "./analogues" )
# if zero analogues add no_match
if f_ana_node == None:
f_no_match.append( f_arg )
continue
# if no analogues to this function add no_match
if f_ana_node.find( "./analogue/[@function='" + c_proc.get( "name" ) + "']" ) == None \
and f_arg.get( "name" ) != "INFO":
f_no_match.append( f_arg )
if c_no_match == []: #and f_no_match == []:
continue
proc_node = SubElement( pass_output, "procedure" )
proc_node.set("name", c_proc.get( "name" ) )
proc_node.set( "analogue-path", f_proc_ana.text )
for c_arg in c_no_match:
arg_node = SubElement( proc_node, "argument" )
arg_node.set( "name", c_arg.get("name") )
possible = SubElement( arg_node, "possible_substitutions" )
for f_arg in f_no_match :
f_arg_node = SubElement( possible, "option" )
f_arg_node.set( "name", f_arg.get( "name" ) )
f_arg_node.set( "semantic", f_arg.get( "semantic" ) )
f_arg_node.set( "intent", f_arg.get( "intent" ) )
f_arg_node.set( "type", f_arg.get( "type" ) )
f_arg_node.set( "substitution", f_arg.get( "name" ) )
prettywritexml( output_tree, "DropFileOfCTreeUnmatchedArgsPass_output.xml" )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
'''
class DropFileOfCTreeUnmatchedArgsWithSuggestionsPass ( Pass )
Purpose:
export an xml file that has most skeleton of xml tree that could
be installed into the input xml for the ImportArgumentAnaloguesPass
and also include suggestions based on name-score and type union heuristics
that were used as an attempt to solve the issue automatically but were
found to be over-matchy
'''
class DropFileOfCTreeUnmatchedArgsWithSuggestionsPass ( Pass ):
dependencies = [BaseAssociatePass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = DropFileOfCTreeUnmatchedArgsWithSuggestionsPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
output_tree = ET.Element( "pass-output" )
pass_output = SubElement( output_tree, "pass" )
pass_output.set( "name", "DropFileOfCTreeUnmatchedArgsPass" )
lapack_c_root = xml_tree.find( "./LAPACKE" )
lapack_c_procs = lapack_c_root.find( "./procedures" )
lapack_f_root = xml_tree.find( "./LAPACK" )
lapack_f_procs = lapack_f_root.find( "./procedures" )
for c_proc in lapack_c_procs.findall( "./procedure" ):
# Find the fortran analogue.
# Note: there should only be one path from C -> Fortran.
# though there may be many paths from Fortran -> C
f_proc_ana = c_proc.find( "./analogues/analogue" )
if f_proc_ana == None:
continue
f_proc = xml_tree.find( f_proc_ana.text )
c_no_match = []
f_no_match = []
for c_arg in c_proc.findall( "./arguments-list/argument" ):
if c_arg.find( "./analogues/" ) == None \
and c_arg.get( "name" ) != "matrix_order" :
c_no_match.append( c_arg )
for f_arg in f_proc.findall( "./arguments-list/argument" ):
f_ana_node = f_arg.find( "./analogues" )
# if zero analogues add no_match
if f_ana_node == None:
f_no_match.append( f_arg )
continue
# if no analogues to this function add no_match
if f_ana_node.find( "./analogue/[@function='" + c_proc.get( "name" ) + "']" ) == None \
and f_arg.get( "name" ) != "INFO" :
f_no_match.append( f_arg )
if c_no_match == [] :
continue
proc_node = SubElement( output_tree, "procedure" )
proc_node.set( "name", c_proc.get( "name" ) )
proc_node.set( "path", "./LAPACKE/procedures/procedure/[@name='" + c_proc.get( "name" ) + "']")
for c_arg in c_no_match:
arg_node = SubElement( proc_node, "argument" )
arg_info = SubElement( arg_node, "argument-info" )
arg_node.set( "name", c_arg.get( "name" ) )
arg_node.set( "substitution", "????" )
arg_node.set( "substitution-path", "????")
arg_info.set( "path", proc_node.get( "path" ) + "/arguments-list/argument/[@name='" + c_arg.get( "name" ) + "']" )
arg_info.set( "type", c_arg.get( "type" ) )
arg_info.set( "refdepth", c_arg.get("refdepth") )
if f_no_match != None:
possibles = SubElement( arg_node, "possible-analogues" )
for f_arg in f_no_match:
possible = SubElement( possibles, "possible" )
possible.set( "name", f_arg.get( "name" ) )
possible.set( "path", "./LAPACK/procedures/procedure/[@name='" + f_proc.get( "name" ) + "']/arguments-list/argument/[@name='" + f_arg.get( "name" ) + "']" )
possible.set( "semantic", f_arg.get( "semantic" ) )
possible.set( "intent", f_arg.get( "intent" ) )
possible.set( "type", f_arg.get( "type" ) )
print c_proc.get( "name" ), f_proc.get( "name" )
type_map = {
"LOGICAL" : "booelan",
"lapack_int" : "int32",
"INTEGER" : "int32",
"lapack_complex_double" : "complex128",
"lapack_complex_float" : "complex64",
"COMPLEX*16" : "complex128",
"COMPLEX" : "complex64",
"DOUBLE" : "real32",
"DOUBLE PRECISION" : "real64",
"REAL" : "real32",
"SINGLE PRECISION" : "real32",
"double" : "real64",
"float" : "real32",
"char" : "char",
"CHARACTER" : "char",
"CHARACTER*1" : "char"
}
t_sets = {}
for arg in c_no_match:
type = type_map[ arg.get( "type" ) ]
if not type in t_sets:
t_sets[ type ] = set()
t_sets[ type ].add( arg )
for arg in f_no_match:
#print f_proc.get("name"), arg.get("name")
type = type_map[ arg.get( "type" ) ]
if not type in t_sets:
t_sets[ type ] = set()
t_sets[ type ].add( arg )
for type in t_sets:
# when there only exists a pair of arguments in a type,
# and those arguments are each in opposite code trees (fortran/C)
# it can heuristically be assumed that those arguments can be associated
if len( t_sets[ type ] ) == 2:
arg_1 = t_sets[ type ].pop()
arg_2 = t_sets[ type ].pop()
if (arg_1 in c_no_match and arg_2 in f_no_match ) ^ \
(arg_2 in c_no_match and arg_1 in f_no_match ):
c_arg = arg_1 if arg_1 in c_no_match else arg_2
f_arg = arg_2 if arg_2 in f_no_match else arg_1
print "match", c_arg.get("name"), "to", f_arg.get("name"),"unique type union"
# ?_ana_node is the analogue record under ? language (ie c_ana_node notes the argument in the fortran tree, but lives in the C tree)
# Note that it is totally possible to create the path string from the two atributes of the tag.
# easier to create once here, instead of manytimes everywhere else.
arg_node = proc_node.find( "./argument/[@name='" + c_arg.get( "name" ) + "']" )
arg_node.set( "substitution", f_arg.get( "name" ) )
arg_node.set( "substitution-path", "./LAPACK/procedures/procedure/[@name='" + f_proc.get( "name" ) + "']/arguments-list/argument/[@name='" + f_arg.get( "name" ) + "']" )
'''
c_ana_node = SubElement( SubElementUnique( c_arg, "analogues" ), "analogue" )
c_ana_node.text = "./LAPACK/procedures/procedure/[@name='" + f_proc.get( "name" ) + "']/arguments-list/argument/[@name='" + f_arg.get( "name" ) + "']"
c_ana_node.set( "function", f_proc.get( "name" ) )
c_ana_node.set( "name", f_arg.get( "name" ) )
f_ana_node = SubElement( SubElementUnique( f_arg, "analogues" ), "analogue" )
f_ana_node.text = "./LAPACKE/procedures/procedure/[@name='" + c_proc.get( "name" ) + "']/arguments-list/argument/[@name='" + c_arg.get( "name" ) + "']"
f_ana_node.set( "function", c_proc.get( "name" ) )
f_ana_node.set( "name", c_arg.get( "name" ) )
'''
c_no_match.remove( c_arg )
f_no_match.remove( f_arg )
# if there are more than two arguments in a type
# we can try to match the strings from the
elif len( t_sets[ type ] ) > 2 :
change = True # True to emulate do-while
iter = 1
while change:
print "Iteration:", iter
change = False
c_removes = []
f_removes = []
for c_arg in c_no_match:
min_list = []
min_score = 10**1000
for f_arg in f_no_match:
score = score_string( c_arg.get("name").lower(), f_arg.get("name" ).lower() )
if score < min_score:
min_score = score
min_list = [ f_arg ]
elif score == min_score:
min_list.append( f_arg )
if len( min_list ) >1 :
print "BOTCHED matching for", c_arg.get("name"),": args",
for arg in min_list:
print arg.get("name"),",",
print "have same score", min_score
continue
min = min_list[0]
if min_score > 2:
print "FAILED to match", c_arg.get("name"), "to", min.get("name"), "score", min_score, "was too bad"
continue
change = True
print "match", c_arg.get("name"), "to", min.get("name"), "score", min_score
f_arg = min
# ?_ana_node is the analogue record under ? language (ie c_ana_node notes the argument in the fortran tree, but lives in the C tree)
# Note that it is totally possible to create the path string from the two atributes of the tag.
# easier to create once here, instead of manytimes everywhere else.
arg_node = proc_node.find( "./argument/[@name='" + c_arg.get( "name" ) + "']" )
arg_node.set( "substitution", f_arg.get( "name" ) )
arg_node.set( "substitution-path", "./LAPACK/procedures/procedure/[@name='" + f_proc.get( "name" ) + "']/arguments-list/argument/[@name='" + f_arg.get( "name" ) + "']" )
'''
c_ana_node = SubElement( SubElementUnique( c_arg, "analogues" ), "analogue" )
c_ana_node.text = "./LAPACK/procedures/procedure/[@name='" + f_proc.get( "name" ) + "']/arguments-list/argument/[@name='" + f_arg.get( "name" ) + "']"
c_ana_node.set( "function", f_proc.get( "name" ) )
c_ana_node.set( "name", f_arg.get( "name" ) )
f_ana_node = SubElement( SubElementUnique( f_arg, "analogues" ), "analogue" )
f_ana_node.text = "./LAPACKE/procedures/procedure/[@name='" + c_proc.get( "name" ) + "']/arguments-list/argument/[@name='" + c_arg.get( "name" ) + "']"
f_ana_node.set( "function", c_proc.get( "name" ) )
f_ana_node.set( "name", c_arg.get( "name" ) )
'''
c_removes.append( c_arg )
f_removes.append( f_arg )
for r in c_removes:
c_no_match.remove( r )
for r in f_removes:
f_no_match.remove( r )
iter += 1
print "No changes"
for c_arg in c_no_match:
print "Could not match", c_arg.get( "name" )
for f_arg in f_no_match:
print "Could not match", f_arg.get( "name" )
print ""
prettywritexml( output_tree, "DropFileOfCTreeUnmatchedArgsPass_output.xml" )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
class FindLAPACKFunctionGroups ( Pass ):
dependencies = [ChapelizeLAPACKEFunctionsPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = FindLAPACKFunctionGroups
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapacke_root = xml_tree.find( "./LAPACKE" )
lapacke_procs = lapacke_root.find( "./procedures" )
module_root = xml_tree.find( "./chapel-module" )
module_procs = module_root.find( "./procedures" )
names = set()
groups = {}
for proc in lapacke_procs.findall( "./procedure" ):
proc_name = proc.get( "name" )
base_name = proc_name.replace( "LAPACK_", "" ).replace( "LAPACKE_", "" ).replace( "_work", "" )
match = func_name_group_regex.search( base_name );
if match == None:
print proc_name, "ie", base_name, "does not match regex"
continue
#names.add( base_name );
func = match.group( "function" )
config = match.group( "mtype" )
if not func in groups:
groups[ func ] = {}
if not config in groups[func] :
groups[func][config] = []
groups[func][config].append( proc_name )
group_counts = 0
config_count = 0
type_counts = 0
for func in groups:
print func
group_counts += 1
for config in groups[func]:
print "\t", config
config_counts += 1
for name in groups[func][config]:
print "\t\t", name
type_counts += 1
print group_counts, config_counts, type_counts
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
class FindDuplicateLAPACKEFunctions ( Pass ):
dependencies = [BaseLAPACKEPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = FindDuplicateLAPACKEFunctions
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
root_procs = xml_tree.find( "./LAPACKE/procedures" )
proc_names = set()
for proc in root_procs.findall( "./procedure" ):
proc_name = proc.get( "name" )
#print proc_name
if proc_name in proc_names:
raise GeneralPassFailure( proc_name )
else:
proc_names.add( proc_name )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
class PrintArgDoc ( Pass ):
dependencies = [FuncArgsDocPass, FuncArgsTypePass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = PrintArgDoc
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_node = xml_tree.find( "./LAPACK" )
text_node = lapack_node.find( "./text" )
procs_node = lapack_node.find( "./procedures" )
doc_set = set();
total = 0
docked = 0
arg="A"
for proc_node in procs_node.findall( "./procedure" ):
for arg_node in proc_node.findall( "./arguments-list/argument/[@name='"+ arg +"']" ):
doc_node = arg_node.find( "documentation" )
total += 1
if doc_node == None:
print proc_node.get( "name" ), "/", arg, "has no documentation"
continue
doc = doc_node.text.lower()
doc = doc.replace( "-", "" )
doc = doc.replace( "the", "" )
doc = re.sub( "lda\s+is\s+integer", "", doc )
doc = re.sub( "\s+", "", doc )
doc_set.add( doc )
docked += 1
doc_list = sorted( list( doc_set ), key=len )
for i in doc_list:
print i
print len( doc_list ), "/", docked, "/", total
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
class FindDifferenArgumentsPass ( Pass ):
dependencies = [BucketLAPACKFunctionGroupsPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = FindDifferenArgumentsPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
abstract_lapack = xml_tree.find( "./Abstract-LAPACK" )
for group_node in abstract_lapack.findall( "./group/[@name='sv']" ):
for config_node in group_node.findall( "./matrix-configuration" ):
name_to_args = {} # function_name => { order => args_node }
name_to_counts = {} # function_name => number
for type_node in config_node.findall( "./type" ):
chap_func = xml_tree.find( type_node.get( "analogue" ) )
name_to_args[ type_node.get( "name" ) ] = {}
for arg in chap_func.findall( "./arguments-list/argument" ):
name_to_args[ type_node.get( "name" ) ][ arg.get("position") ] = arg
name_to_counts[ type_node.get( "name" ) ] = len( name_to_args[ type_node.get( "name" ) ] )
all_same = True
all_count = 0
names = name_to_counts.keys()
for i in range( len( names ) - 1 ):
all_same = all_same and ( name_to_counts[ names[i] ] == name_to_counts[ names[i+1] ] )
print all_same
all_count = name_to_counts[ names[1] ] # grab arbitrary count if all the same
for pos in range( all_count ):
is_same = True
for i in range( len(names)-1):
is_same = is_same and ( name_to_args[names[i]].get("name") == name_to_args[names[i+1]].get("name") ) \
and ( name_to_args[names[i]].get("semantic") == name_to_args[names[i+1]].get("semantic") ) \
and ( name_to_args[names[i]].get("intent") == name_to_args[names[i+1]].get("intent") )
print pos, is_same
if not is_same:
return
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
class CountGroups ( Pass ):
dependencies = [BucketLAPACKFunctionGroupsPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = FindDifferenArgumentsPass
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
abstract_lapack = xml_tree.find( "./Abstract-LAPACK" )
groups = 0
configs = 0
types = 0
for group_node in abstract_lapack.findall( "./group" ):
groups += 1
for config_node in group_node.findall( "./matrix-configuration" ):
configs += 1
for type_node in config_node.findall( "./type" ):
types += 1
print groups, configs, types
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
class DropFileOfGroups ( Pass ):
dependencies = [BaseAbstractLAPACKPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = DropFileOfGroups
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
abstract_lapack = xml_tree.find( "./Abstract-LAPACK" )
for group_node in abstract_lapack.findall( "./group/[@name='sv']" ):
for config_node in group_node.findall( "./matrix-configuration" ):
interface_tree = SubElement( config_node, "method-arguments" )
argument = SubElement( interface_tree, "argument" )
argument.set( "name", "?" )
argument.set( "intent" , "?" )
argument.set( "semantic", "?" )
argument.set( "type", "?" )
relation_tree = SubElement( config_node, "arguments-relationships" )
for arg in config_node.findall( "./analogue-arguments-list/argument" ):
arg_relate = SubElement( relation_tree, "argument" )
arg_relate.set( "name", arg.get("name") )
arg_relate.text = "RELATIONSHIP"
prettyprintxml( abstract_lapack)
prettywritexml( abstract_lapack, "DropFilesOfGroups.xml" )
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
class TryMatrixArgsUnion ( Pass ):
dependencies = [BaseAbstractLAPACKPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = TryMatrixArgsUnion
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
abstract_lapack = xml_tree.find( "./Abstract-LAPACK" )
for group_node in abstract_lapack.findall( "./group" ):
for config_node in group_node.findall( "./matrix-configuration" ):
for arg in config_node.findall( "./analogue-arguments-list/argument" ):
pass
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
class SolveArgsUnionFor ( Pass ):
dependencies = [BaseAbstractLAPACKPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = SolveArgsUnionFor
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
abstract_lapack = xml_tree.find( "./Abstract-LAPACK" )
unique = set()
co = set()
non = set()
unset = True;
for group_node in abstract_lapack.findall( "./group" ):
for config_node in group_node.findall( "./matrix-configuration" ):
print config_node.get( "name" ) + group_node.get( "name" )
config_args = set()
array_args = set()
array_dims = {}
for arg in config_node.findall( "./analogue-arguments-list/argument" ):
config_args.add( arg.get( "name" ).lower() )
if arg.get( "semantic" ) == "array":
array_args.add( arg.get("name") )
#prettyprintxml( arg )
if "m" in config_args:
print array_args
for elem in array_args:
print elem
co |= array_args
if unset:
unique |= array_args
unset = False;
else:
unique &= array_args
print unique, "\n"
print "="*10
print unique, "\n"
print co, "\n"
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
class FindDifferentLengthCalls ( Pass ):
dependencies = [BaseLAPACKEPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = FindDifferentLengthCalls
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
procs_dict = {}
fams = set()
for proc in xml_tree.findall( "./LAPACKE/procedures/procedure" ):
if proc.get("name").startswith("LAPACK_"):
continue
base_name = proc.get("name").replace( "LAPACKE_", "" )
match = func_name_group_regex.search( base_name );
if match == None:
#print proc.get("name"), "(", base_name, ") does not match regex"
continue
func = match.group( "function" )
config = match.group( "config" )
type = match.group( "type" )
name = config + func
if not name in procs_dict:
procs_dict[ name ] = set()
procs_dict[name].add( len( proc.findall( "./arguments-list/argument" ) ) )
if len( procs_dict[name] ) > 1 :
fams.add( name )
#return
#print procs_dict
for fam in fams:
print fam
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
class IsNOrMEverTheSame ( Pass ):
dependencies = [BaseLAPACKPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = IsNorMEverTheSame
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
lapack_node = xml_tree.find( "./LAPACK" )
text_node = lapack_node.find( "./text" )
procs_node = lapack_node.find( "./procedures" )
for proc_node in procs_node.findall( "./procedure" ):
proc_name = proc_node.get( "name" )
'''
base_name = proc_name.lower()
match = func_name_group_regex.search( base_name );
if match == None:
#print proc_name, "(", base_name, ") does not match regex"
continue
func = match.group( "function" )
config = match.group( "config" )
type = match.group( "type" )
if not config.startswith( "ge" ):
continue
'''
arg_names = [ arg.get("name") for arg in proc_node.findall( "./arguments-list/argument" ) ]
for arg_node in proc_node.findall( "./arguments-list/argument" ):
doc_node = arg_node.find( "documentation" )
if doc_node == None or arg_node.get("semantic") != "scalar" or arg_node.get("type").lower() != "integer":
continue
what = []
who = []
string = []
for m in scalar_matrix_relation_regex.finditer( doc_node.text ):
if not m.group( "what" ) in ["rows", "columns", "order", "rank"] :
continue
names = m.group( "who" ).strip()
names_list = []
if " and " in names:
names_list = [ name.strip() for name in names.split( "and" ) ]
else:
names_list = [ names ]
nameHasSpace = False
for name in names_list:
if " " in name:
nameHasSpace = True
break
if nameHasSpace:
print names, " contains non names. Skipping."
continue
removes = []
for name in names_list:
if not name in arg_names:
removes.append( name )
for rm in removes:
names_list.remove( rm )
if len( names_list ) == 0:
print "Names list had no argument names. Skipping"
continue
what.append( m.group( "what" ) )
who.append( names_list )
string.append( re.sub( "\s+", " ", m.group(0) ) )
if len( what ) == 0 and len( who ) == 0:
continue
#proc_info[ proc_name ][ arg_node.get( "name" ) ] = [ what, who, string]
associate_array = str()
associate_field = str()
first = True
for i in range( len( who ) ):
for array in who[i]:
associate_array += ( "," if not first else "" ) + array
associate_field += ( "," if not first else "" ) + what[i]
first = False
arg_node.set( "associate-array", associate_array )
arg_node.set( "associate-field", associate_field )
prettyprintxml( proc_node )
'''
for func in proc_info:
if proc_info[func] == {}:
continue
print func
for arg in proc_info[func]:
print "\t", arg
for elem in proc_info[func][arg]:
print "\t\t", elem
'''
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
class FindGroupsWithUncommon ( Pass ):
dependencies = [BaseAbstractLAPACKPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = PretendCreate
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
abstract_lapack = xml_tree.find( "./Abstract-LAPACK" )
for group_node in abstract_lapack.findall( "./group" ):
for config_node in group_node.findall( "./matrix-configuration" ):
printed = False
for type in config_node.findall( "./types/type" ):
if type.find( "./arguments-list" ) != None:
if not printed:
print config_node.get("name") + group_node.get("name")
printed = True
print "\t", type.get("name")
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
class TestInputGroupsGen ( Pass ):
dependencies = [BaseCodegenReadyPass]
complete = False
@staticmethod
def apply( xml_tree ):
selfname = TestInputGroupsGen
Pass.resolve( selfname, xml_tree )
print "[",selfname,"]"
#abstract_lapack = xml_tree.find( "./Abstract-LAPACK" )
group_input = loadxml( "DropAttemptedAssociations_input.xml" )
for group in group_input.findall( "./group" ):
for config in group.findall( "./matrix-configuration" ):
code = SequenceOfProducers()
print config.get( "name" ) + group.get( "name" )
code.append( SegmentProducer( "proc " + config.get( "name" ) + group.get( "name" ) ) )
args_producer = ListProducer(", ", "(", ")")
for arg in config.findall( "./method-arguments/argument" ):
args_producer.append( SegmentProducer(
arg.get("intent") + " " + \
arg.get("name") + " : " + \
("[] " if arg.get("semantic") == "array" else "") + \
arg.get("type") + \
( " = " + arg.text if arg.text != None and arg.text.strip() != "" else "" )
)
)
code.append( args_producer )
code.append( LineProducer( ": lapack_int" ) )
code.append( SegmentProducer( "where " ) )
where_producer = ListProducer( " || ", "", "" )
for type in config.findall("./types/type"):
where_producer.append( SegmentProducer( "T == " + type.get( "type" ) ) )
code.append( where_producer )
info_var = config.get( "name" ) + group.get( "name" ) + "_return_info"
func_body = ScopeProducer()
func_body.append( LineProducer( "var " + info_var + " : lapack_int;" ) )
#if_bodies = SequenceOfProducers()
arg_relates = {}
ana_args = []
for arg in config.findall( "./analogue-arguments-list/argument" ):
arg_name = arg.get("name")
arg_relates[ arg_name ] = config.find( "./arguments-relationships/argument/[@name='" + arg_name + "']" )
ana_args.append( arg );
for type in config.findall("./types/type"):
chpl_ana = xml_tree.find( type.get( "analogue" ) )
if_condition = LineProducer( "if ( T == " + type.get("type") + " )" )
func_body.append( if_condition )
if_body = ScopeProducer()
call_equals = SegmentProducer( info_var + " = " + chpl_ana.get( "name" ) )
call_seq = ListProducer( ", ", "(", ")" )
for ana_arg in ana_args:
call_seq.append( SegmentProducer(
"(" + arg_relates[ana_arg.get("name")].text.strip() + ")" + \
(" : " + ana_arg.get("type") if ana_arg.get("semantic") != "array" else "")
)
)
if_body.append( call_equals + call_seq + LineProducer( ";" ) )
func_body.append( if_body )
func_body.append( LineProducer( "return " + info_var + ";" ) )
code.append( func_body )
print code.generate()
selfname.complete = True
print "[",selfname,":", "Completed" if selfname.complete else "FAILED", "]\n"
| {
"content_hash": "db039c26e21f1dbc64c497ba680754da",
"timestamp": "",
"source": "github",
"line_count": 4379,
"max_line_length": 296,
"avg_line_length": 35.955469285224936,
"alnum_prop": 0.5759198216565364,
"repo_name": "chizarlicious/chapel",
"id": "8825060ab58017e21351f882b2d4fee572837d0c",
"size": "157449",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "util/misc/gen-LAPACK/extern-tool/Passes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "2072"
},
{
"name": "C",
"bytes": "3689430"
},
{
"name": "C++",
"bytes": "3493095"
},
{
"name": "CSS",
"bytes": "919"
},
{
"name": "Chapel",
"bytes": "11905780"
},
{
"name": "Cuda",
"bytes": "4304"
},
{
"name": "Emacs Lisp",
"bytes": "14304"
},
{
"name": "FORTRAN",
"bytes": "18153"
},
{
"name": "Gnuplot",
"bytes": "5536"
},
{
"name": "HTML",
"bytes": "2419"
},
{
"name": "JavaScript",
"bytes": "50663"
},
{
"name": "LLVM",
"bytes": "16367"
},
{
"name": "Lex",
"bytes": "37600"
},
{
"name": "Makefile",
"bytes": "108072"
},
{
"name": "Mathematica",
"bytes": "4971"
},
{
"name": "Perl",
"bytes": "240233"
},
{
"name": "Python",
"bytes": "646199"
},
{
"name": "Shell",
"bytes": "174157"
},
{
"name": "TeX",
"bytes": "869966"
},
{
"name": "VimL",
"bytes": "14876"
},
{
"name": "Yacc",
"bytes": "2337"
},
{
"name": "Zimpl",
"bytes": "1115"
}
],
"symlink_target": ""
} |
from __future__ import print_function, division
from neuralnilm import Net, RealApplianceSource, BLSTMLayer, SubsampleLayer, DimshuffleLayer
from lasagne.nonlinearities import sigmoid
from lasagne.objectives import crossentropy
from lasagne.init import Uniform, Normal
from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer
"""
Setup:
* in_to_cell init weights are now Normal(1.0)
* output all appliances
* fix bug in RealApplianceSource
* use cross-entropy
* smaller network
* power targets
* trying without first two sigmoid layers.
* updated to craffel/nntools commit 097aca480d60fdfada513c20070f8132d71a26b0
which fixes LSTM bug.
https://github.com/craffel/nntools/commit/097aca480d60fdfada513c20070f8132d71a26b0
* Subsampling *bidirectional* LSTM
* Output every sequence in the batch
* Change W_in_to_cell from Normal(1.0) to Uniform(5)
* put back the two sigmoid layers
* use Conv1D to create a hierarchical subsampling LSTM
* Using LSTM (not BLSTM) to speed up training while testing
Changes:
* simplify. Just use conv at input
"""
source = RealApplianceSource(
'/data/dk3810/ukdale.h5',
['fridge freezer', 'hair straighteners', 'television'],
max_input_power=1000, max_appliance_powers=[300, 500, 200],
window=("2013-06-01", "2014-07-01"),
output_one_appliance=False,
boolean_targets=False,
min_on_duration=60,
subsample_target=5
)
net = Net(
experiment_name="e50a",
source=source,
learning_rate=1e-1,
save_plot_interval=50,
loss_function=crossentropy,
layers_config=[
{
'type': ReshapeLayer,
'shape': (5, 1, 1000)
},
{
'type': Conv1DLayer,
'num_filters': 20,
'filter_length': 5,
'stride': 5
},
{ # TODO: I think this should perhaps be dimshuffle, not reshape???
'type': ReshapeLayer,
'shape': (5, 200, 20)
},
{
'type': LSTMLayer,
'num_units': 40,
'W_in_to_cell': Uniform(5)
},
{
'type': LSTMLayer,
'num_units': 80,
'W_in_to_cell': Uniform(5)
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
}
]
)
net.print_net()
net.compile()
net.fit()
| {
"content_hash": "205a9c97b3d8a3c8111fd156863492ea",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 92,
"avg_line_length": 27.61627906976744,
"alnum_prop": 0.6298947368421053,
"repo_name": "JackKelly/neuralnilm_prototype",
"id": "e6bbeaaa2786710475d28c9ee83bd29804ea4c55",
"size": "2375",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scripts/e50.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4536723"
}
],
"symlink_target": ""
} |
class TestTemplate:
config = """
templates:
test_series:
series:
test:
- House
settings:
test:
identified_by: ep
tasks:
test:
template: test_series
next_series_episodes:
from_start: yes
rerun: 0
"""
def test_config_template_hash_check(self, manager, execute_task):
task = execute_task('test')
assert len(task.entries) == 1, 'Should have emitted House S01E01'
manager.config['templates']['test_series']['series']['test'].append('Hawaii Five-O')
task = execute_task('test')
assert len(task.entries) == 2, 'Should have emitted House S01E02 and Hawaii Five-O S01E01'
| {
"content_hash": "2fa71756eb0b46ea9d87376b48fdd875",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 98,
"avg_line_length": 29.444444444444443,
"alnum_prop": 0.519496855345912,
"repo_name": "ianstalk/Flexget",
"id": "73f4cd1584ef4cf084b1624fb468fef9ba7e34ba",
"size": "795",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "flexget/tests/test_task.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "56725"
},
{
"name": "HTML",
"bytes": "35670"
},
{
"name": "JavaScript",
"bytes": "455222"
},
{
"name": "Python",
"bytes": "2063551"
}
],
"symlink_target": ""
} |
from euler import *
if __name__ == '__main__':
primes = PrimesBelow(8e5)
# Remove terminal 1's and 9's
# Remove beginning 1's and 9's
maybeTruncatable = [prime for prime in primes if prime % 10 not in (1, 9)]
maybeTruncatable = [prime for prime in maybeTruncatable if str(prime)[0] not in ('1', '9')]
truncatables = []
for prime in maybeTruncatable:
if IsTruncatable(prime, primes):
truncatables.append(prime)
if len(truncatables) > 11 + 3:
break
print(sum(prime for prime in truncatables if prime > 9))
| {
"content_hash": "4c99cb6b18e1ac9e8c2b7ac31626d121",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 95,
"avg_line_length": 29.1,
"alnum_prop": 0.6202749140893471,
"repo_name": "jeffseif/projectEuler",
"id": "0a0f5affacccdefa2af51d7c3841bc1ff0c8ad03",
"size": "1027",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/p037.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "59760"
},
{
"name": "Shell",
"bytes": "612"
}
],
"symlink_target": ""
} |
"""
Volume driver for IBM FlashSystem storage systems with iSCSI protocol.
Limitations:
1. Cinder driver only works when open_access_enabled=off.
"""
import random
import threading
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
import six
from cinder import exception
from cinder.i18n import _, _LE, _LW
from cinder import interface
from cinder import utils
from cinder.volume.drivers.ibm import flashsystem_common as fscommon
from cinder.volume.drivers.san import san
LOG = logging.getLogger(__name__)
flashsystem_iscsi_opts = [
cfg.IntOpt('flashsystem_iscsi_portid',
default=0,
help='Default iSCSI Port ID of FlashSystem. '
'(Default port is 0.)')
]
CONF = cfg.CONF
CONF.register_opts(flashsystem_iscsi_opts)
@interface.volumedriver
class FlashSystemISCSIDriver(fscommon.FlashSystemDriver):
"""IBM FlashSystem iSCSI volume driver.
Version history:
.. code-block:: none
1.0.0 - Initial driver
1.0.1 - Code clean up
1.0.2 - Add lock into vdisk map/unmap, connection
initialize/terminate
1.0.3 - Initial driver for iSCSI
1.0.4 - Split Flashsystem driver into common and FC
1.0.5 - Report capability of volume multiattach
1.0.6 - Fix bug #1469581, add I/T mapping check in
terminate_connection
1.0.7 - Fix bug #1505477, add host name check in
_find_host_exhaustive for FC
1.0.8 - Fix bug #1572743, multi-attach attribute
should not be hardcoded, only in iSCSI
1.0.9 - Fix bug #1570574, Cleanup host resource
leaking, changes only in iSCSI
1.0.10 - Fix bug #1585085, add host name check in
_find_host_exhaustive for iSCSI
1.0.11 - Update driver to use ABC metaclasses
1.0.12 - Update driver to support Manage/Unmanage
existing volume
"""
VERSION = "1.0.12"
# ThirdPartySystems wiki page
CI_WIKI_NAME = "IBM_FlashSystem_CI"
def __init__(self, *args, **kwargs):
super(FlashSystemISCSIDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(fscommon.flashsystem_opts)
self.configuration.append_config_values(flashsystem_iscsi_opts)
self.configuration.append_config_values(san.san_opts)
def _check_vdisk_params(self, params):
# Check that the requested protocol is enabled
if not params['protocol'] in self._protocol:
msg = (_("'%(prot)s' is invalid for "
"flashsystem_connection_protocol "
"in config file. valid value(s) are "
"%(enabled)s.")
% {'prot': params['protocol'],
'enabled': self._protocol})
raise exception.InvalidInput(reason=msg)
# Check if iscsi_ip is set when protocol is iSCSI
if params['protocol'] == 'iSCSI' and params['iscsi_ip'] == 'None':
msg = _("iscsi_ip_address must be set in config file when "
"using protocol 'iSCSI'.")
raise exception.InvalidInput(reason=msg)
def _create_host(self, connector):
"""Create a new host on the storage system.
We create a host and associate it with the given connection
information.
"""
LOG.debug('enter: _create_host: host %s.', connector['host'])
rand_id = six.text_type(random.randint(0, 99999999)).zfill(8)
host_name = '%s-%s' % (self._connector_to_hostname_prefix(connector),
rand_id)
ports = []
if 'iSCSI' == self._protocol and 'initiator' in connector:
ports.append('-iscsiname %s' % connector['initiator'])
self._driver_assert(ports,
(_('_create_host: No connector ports.')))
port1 = ports.pop(0)
arg_name, arg_val = port1.split()
ssh_cmd = ['svctask', 'mkhost', '-force', arg_name, arg_val, '-name',
'"%s"' % host_name]
out, err = self._ssh(ssh_cmd)
self._assert_ssh_return('successfully created' in out,
'_create_host', ssh_cmd, out, err)
for port in ports:
arg_name, arg_val = port.split()
ssh_cmd = ['svctask', 'addhostport', '-force',
arg_name, arg_val, host_name]
out, err = self._ssh(ssh_cmd)
self._assert_ssh_return(
(not out.strip()),
'_create_host', ssh_cmd, out, err)
LOG.debug(
'leave: _create_host: host %(host)s - %(host_name)s.',
{'host': connector['host'], 'host_name': host_name})
return host_name
def _find_host_exhaustive(self, connector, hosts):
LOG.debug('enter: _find_host_exhaustive hosts: %s.', hosts)
hname = connector['host']
hnames = [ihost[0:ihost.rfind('-')] for ihost in hosts]
if hname in hnames:
host = hosts[hnames.index(hname)]
ssh_cmd = ['svcinfo', 'lshost', '-delim', '!', host]
out, err = self._ssh(ssh_cmd)
self._assert_ssh_return(
out.strip(),
'_find_host_exhaustive', ssh_cmd, out, err)
for attr_line in out.split('\n'):
attr_name, foo, attr_val = attr_line.partition('!')
if (attr_name == 'iscsi_name' and
'initiator' in connector and
attr_val == connector['initiator']):
LOG.debug(
'leave: _find_host_exhaustive connector: %s.',
connector)
return host
else:
LOG.warning(_LW('Host %(host)s was not found on backend storage.'),
{'host': hname})
return None
def _get_vdisk_map_properties(
self, connector, lun_id, vdisk_name, vdisk_id, vdisk_params):
"""Get the map properties of vdisk."""
LOG.debug(
'enter: _get_vdisk_map_properties: vdisk '
'%(vdisk_name)s.', {'vdisk_name': vdisk_name})
preferred_node = '0'
IO_group = '0'
# Get preferred node and other nodes in I/O group
preferred_node_entry = None
io_group_nodes = []
for k, node in self._storage_nodes.items():
if vdisk_params['protocol'] != node['protocol']:
continue
if node['id'] == preferred_node:
preferred_node_entry = node
if node['IO_group'] == IO_group:
io_group_nodes.append(node)
if not io_group_nodes:
msg = (_('No node found in I/O group %(gid)s for volume %(vol)s.')
% {'gid': IO_group, 'vol': vdisk_name})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
if not preferred_node_entry:
# Get 1st node in I/O group
preferred_node_entry = io_group_nodes[0]
LOG.warning(_LW('_get_vdisk_map_properties: Did not find a '
'preferred node for vdisk %s.'), vdisk_name)
properties = {
'target_discovered': False,
'target_lun': lun_id,
'volume_id': vdisk_id,
}
type_str = 'iscsi'
if preferred_node_entry['ipv4']:
ipaddr = preferred_node_entry['ipv4'][0]
else:
ipaddr = preferred_node_entry['ipv6'][0]
iscsi_port = self.configuration.iscsi_port
properties['target_portal'] = '%s:%s' % (ipaddr, iscsi_port)
properties['target_iqn'] = preferred_node_entry['iscsi_name']
LOG.debug(
'leave: _get_vdisk_map_properties: vdisk '
'%(vdisk_name)s.', {'vdisk_name': vdisk_name})
return {'driver_volume_type': type_str, 'data': properties}
@utils.synchronized('flashsystem-init-conn', external=True)
def initialize_connection(self, volume, connector):
"""Perform work so that an iSCSI connection can be made.
To be able to create an iSCSI connection from a given host to a
volume, we must:
1. Translate the given iSCSI name to a host name
2. Create new host on the storage system if it does not yet exist
3. Map the volume to the host if it is not already done
4. Return the connection information for relevant nodes (in the
proper I/O group)
"""
LOG.debug(
'enter: initialize_connection: volume %(vol)s with '
'connector %(conn)s.', {'vol': volume, 'conn': connector})
vdisk_name = volume['name']
vdisk_id = volume['id']
vdisk_params = self._get_vdisk_params(volume['volume_type_id'])
self._wait_vdisk_copy_completed(vdisk_name)
self._driver_assert(
self._is_vdisk_defined(vdisk_name),
(_('vdisk %s is not defined.')
% vdisk_name))
lun_id = self._map_vdisk_to_host(vdisk_name, connector)
properties = {}
try:
properties = self._get_vdisk_map_properties(
connector, lun_id, vdisk_name, vdisk_id, vdisk_params)
except exception.VolumeBackendAPIException:
with excutils.save_and_reraise_exception():
self.terminate_connection(volume, connector)
LOG.error(_LE('Failed to collect return properties for '
'volume %(vol)s and connector %(conn)s.'),
{'vol': volume, 'conn': connector})
LOG.debug(
'leave: initialize_connection:\n volume: %(vol)s\n connector '
'%(conn)s\n properties: %(prop)s.',
{'vol': volume,
'conn': connector,
'prop': properties})
return properties
@utils.synchronized('flashsystem-term-conn', external=True)
def terminate_connection(self, volume, connector, **kwargs):
"""Cleanup after connection has been terminated.
When we clean up a terminated connection between a given connector
and volume, we:
1. Translate the given connector to a host name
2. Remove the volume-to-host mapping if it exists
3. Delete the host if it has no more mappings (hosts are created
automatically by this driver when mappings are created)
"""
LOG.debug(
'enter: terminate_connection: volume %(vol)s with '
'connector %(conn)s.',
{'vol': volume, 'conn': connector})
vdisk_name = volume['name']
self._wait_vdisk_copy_completed(vdisk_name)
host_name = self._unmap_vdisk_from_host(vdisk_name, connector)
# checking if host_name none, if not then, check if the host has
# any mappings, if not the host gets deleted.
if host_name:
if not self._get_hostvdisk_mappings(host_name):
self._delete_host(host_name)
LOG.debug(
'leave: terminate_connection: volume %(vol)s with '
'connector %(conn)s.', {'vol': volume, 'conn': connector})
return {'driver_volume_type': 'iscsi'}
def _get_iscsi_ip_addrs(self):
"""get ip address of iSCSI interface."""
LOG.debug('enter: _get_iscsi_ip_addrs')
cmd = ['svcinfo', 'lsportip']
generator = self._port_conf_generator(cmd)
header = next(generator, None)
if not header:
return
for key in self._storage_nodes:
if self._storage_nodes[key]['config_node'] == 'yes':
node = self._storage_nodes[key]
break
if node is None:
msg = _('No config node found.')
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
for port_data in generator:
try:
port_ipv4 = port_data['IP_address']
port_ipv6 = port_data['IP_address_6']
state = port_data['state']
speed = port_data['speed']
except KeyError:
self._handle_keyerror('lsportip', header)
if port_ipv4 == self.configuration.iscsi_ip_address and (
port_data['id'] == (
six.text_type(
self.configuration.flashsystem_iscsi_portid))):
if state not in ('configured', 'online'):
msg = (_('State of node is wrong. Current state is %s.')
% state)
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
if state in ('configured', 'online') and speed != 'NONE':
if port_ipv4:
node['ipv4'].append(port_ipv4)
if port_ipv6:
node['ipv6'].append(port_ipv6)
break
if not (len(node['ipv4']) or len(node['ipv6'])):
msg = _('No ip address found.')
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
LOG.debug('leave: _get_iscsi_ip_addrs')
def do_setup(self, ctxt):
"""Check that we have all configuration details from the storage."""
LOG.debug('enter: do_setup')
self._context = ctxt
# Get data of configured node
self._get_node_data()
# Get the iSCSI IP addresses of the FlashSystem nodes
self._get_iscsi_ip_addrs()
for k, node in self._storage_nodes.items():
if self.configuration.flashsystem_connection_protocol == 'iSCSI':
if (len(node['ipv4']) or len(node['ipv6']) and
len(node['iscsi_name'])):
node['protocol'] = 'iSCSI'
self._protocol = 'iSCSI'
# Set for vdisk synchronization
self._vdisk_copy_in_progress = set()
self._vdisk_copy_lock = threading.Lock()
self._check_lock_interval = 5
LOG.debug('leave: do_setup')
def _build_default_params(self):
protocol = self.configuration.flashsystem_connection_protocol
if protocol.lower() == 'iscsi':
protocol = 'iSCSI'
return {
'protocol': protocol,
'iscsi_ip': self.configuration.iscsi_ip_address,
'iscsi_port': self.configuration.iscsi_port,
'iscsi_ported': self.configuration.flashsystem_iscsi_portid,
}
def validate_connector(self, connector):
"""Check connector for enabled protocol."""
valid = False
if 'iSCSI' == self._protocol and 'initiator' in connector:
valid = True
if not valid:
msg = _LE('The connector does not contain the '
'required information: initiator is missing')
LOG.error(msg)
raise exception.InvalidConnectorException(missing=(
'initiator'))
| {
"content_hash": "b9c1d9d5eca8a01a266b7b7eed7a12e4",
"timestamp": "",
"source": "github",
"line_count": 398,
"max_line_length": 79,
"avg_line_length": 37.969849246231156,
"alnum_prop": 0.5548570672313393,
"repo_name": "cloudbase/cinder",
"id": "dc4dd2b9757d2ff9247bf3d2494a1fcd59b1d2e0",
"size": "15740",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "cinder/volume/drivers/ibm/flashsystem_iscsi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "17586629"
},
{
"name": "Shell",
"bytes": "8187"
}
],
"symlink_target": ""
} |
import hr_expense_report | {
"content_hash": "64bffc6c3257f776918710254643c7a3",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 24,
"avg_line_length": 24,
"alnum_prop": 0.875,
"repo_name": "vileopratama/vitech",
"id": "7f74fc905a4a69fbcd8e9219748ce3a3080dd092",
"size": "124",
"binary": false,
"copies": "29",
"ref": "refs/heads/master",
"path": "src/addons/hr_expense/report/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "9611"
},
{
"name": "CSS",
"bytes": "2125999"
},
{
"name": "HTML",
"bytes": "252393"
},
{
"name": "Java",
"bytes": "1840167"
},
{
"name": "JavaScript",
"bytes": "6176224"
},
{
"name": "Makefile",
"bytes": "19072"
},
{
"name": "Mako",
"bytes": "7659"
},
{
"name": "NSIS",
"bytes": "16782"
},
{
"name": "Python",
"bytes": "9438805"
},
{
"name": "Ruby",
"bytes": "220"
},
{
"name": "Shell",
"bytes": "22312"
},
{
"name": "Vim script",
"bytes": "406"
},
{
"name": "XSLT",
"bytes": "11489"
}
],
"symlink_target": ""
} |
class Disk(GCPResource):
'''Object to represent a gcp disk'''
resource_type = "compute.beta.disk"
# pylint: disable=too-many-arguments
def __init__(self,
rname,
project,
zone,
size,
disk_type='pd-standard', #pd-ssd, local-ssd
persistent=True,
auto_delete=True,
boot=False,
device_name=None,
image=None,
labels=None,
label_finger_print=None,
index=None,
):
'''constructor for gcp resource'''
super(Disk, self).__init__(rname,
Disk.resource_type,
project,
zone)
if persistent:
self._persistent = 'PERSISTENT'
else:
self._persistent = 'SCRATCH'
self._size = size
self._boot = boot
self._image = image
self._device_name = device_name
self._disk_type = disk_type
self._disk_url = None
self._auto_delete = auto_delete
self._labels = labels
self._label_finger_print = label_finger_print
self._index = index
@property
def persistent(self):
'''property for resource if boot device is persistent'''
return self._persistent
@property
def index(self):
'''property for index of disk'''
return self._index
@property
def device_name(self):
'''property for resource device name'''
return self._device_name
@property
def boot(self):
'''property for resource is a boot device'''
return self._boot
@property
def image(self):
'''property for resource image'''
return self._image
@property
def disk_type(self):
'''property for resource disk type'''
return self._disk_type
@property
def disk_url(self):
'''property for resource disk url'''
if self._disk_url == None:
self._disk_url = Utils.zonal_compute_url(self.project, self.zone, 'diskTypes', self.disk_type)
return self._disk_url
@property
def size(self):
'''property for resource disk size'''
return self._size
@property
def labels(self):
'''property for labels on a disk'''
if self._labels == None:
self._labels = {}
return self._labels
@property
def label_finger_print(self):
'''property for label_finger_print on a disk'''
if self._labels == None:
self._label_finger_print = '42WmSpB8rSM='
return self._label_finger_print
@property
def auto_delete(self):
'''property for resource disk auto delete'''
return self._auto_delete
def get_instance_disk(self):
'''return in vminstance format'''
return {'deviceName': self.device_name,
'type': self.persistent,
'autoDelete': self.auto_delete,
'boot': self.boot,
'sizeGb': self.size,
'initializeParams': {'diskName': self.name,
'sourceImage': Utils.global_compute_url(self.project,
'images',
self.image)
},
'labels': self.labels,
}
def get_supplement_disk(self):
'''return in vminstance format'''
disk = {'deviceName': self.device_name,
'type': self.persistent,
'source': '$(ref.%s.selfLink)' % self.name,
'autoDelete': self.auto_delete,
'labels': self.labels,
}
if self.label_finger_print:
disk['labelFingerprint'] = self.label_finger_print
if self.index:
disk['index'] = self.index
if self.boot:
disk['boot'] = self.boot
return disk
def to_resource(self):
""" return the resource representation"""
disk = {'name': self.name,
'type': Disk.resource_type,
'properties': {'zone': self.zone,
'sizeGb': self.size,
'type': self.disk_url,
'autoDelete': self.auto_delete,
'labels': self.labels,
#'labelFingerprint': self.label_finger_print,
}
}
if self.label_finger_print:
disk['properties']['labelFingerprint'] = self.label_finger_print
if self.boot:
disk['properties']['sourceImage'] = Utils.global_compute_url(self.project, 'images', self.image)
return disk
| {
"content_hash": "864136c697de46481ffaaf6e44cde4ed",
"timestamp": "",
"source": "github",
"line_count": 159,
"max_line_length": 108,
"avg_line_length": 31.440251572327043,
"alnum_prop": 0.48469693938787756,
"repo_name": "themurph/openshift-tools",
"id": "b9fb967f896ebd5b9344f01978b9595fbb20e4ee",
"size": "5067",
"binary": false,
"copies": "13",
"ref": "refs/heads/prod",
"path": "ansible/roles/lib_gcloud/build/lib/disk.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "108987"
},
{
"name": "CSS",
"bytes": "588"
},
{
"name": "Groovy",
"bytes": "6322"
},
{
"name": "HTML",
"bytes": "43950"
},
{
"name": "JavaScript",
"bytes": "229"
},
{
"name": "PHP",
"bytes": "35793"
},
{
"name": "Python",
"bytes": "11349806"
},
{
"name": "Shell",
"bytes": "752773"
},
{
"name": "Vim script",
"bytes": "1836"
}
],
"symlink_target": ""
} |
import weakref
from .anyQt.QtGui import QCompleter
def get_typestrings0(type_):
if isinstance(type_, str):
return [type_]
elif isinstance(type_, tuple):
ret = [[], [], []]
for t in type_:
if isinstance(t, tuple):
rr1, rr2, rr3 = get_typestrings0(t)
ret[0] += rr1
ret[1] += rr2
ret[2] += rr3
elif isinstance(t, str):
ret[0].append(str(t))
ret[1].append('"' + str(t) + '"')
ret[2].append("'" + str(t) + "'")
else:
raise Exception(t)
return ret[0], ret[1], ret[2]
else:
raise Exception(type_)
def get_typestrings(type_):
r = get_typestrings0(type_)
if isinstance(r, list):
return r
elif isinstance(r, tuple):
assert len(r) == 3
s = []
for sep in ",", ", ":
for rr in r:
s.append(sep.join(rr))
ret = []
for parenth in True, False:
s0 = "(" if parenth else ""
s1 = ")" if parenth else ""
for ss in s:
assert isinstance(ss, str), ss
ret.append(s0 + ss + s1)
return ret
else:
raise Exception(r)
class TypeCompleter(object):
def __init__(self):
self.widgets = weakref.WeakValueDictionary() # use WeakSet when 2.6 compat is dropped
self.completer = None
def set_typelist(self, typelist):
self.typelist = typelist
tl = []
for t in self.typelist:
tt = get_typestrings(t)
tl += tt
self.typestrings = tl
self.completer = QCompleter(self.typestrings)
for widget in self.widgets.values():
widget.setCompleter(self.completer)
def add_completers(self, v, form, path=None):
if path is None:
ppath = ()
else:
ppath = tuple(path)
if form.arraycount > 0:
for n in range(form.length):
ppath2 = ppath + (n,)
try:
f = form[n]
mv = v[n]
except (KeyError, IndexError, AttributeError, TypeError) as exc:
raise type(exc)(*exc.args + (ppath2,))
self.add_completers(mv, f, ppath2)
return
for pname, f in form._members.items():
ppath2 = ppath + (pname,)
if hasattr(f, "type") and f.type == "type" or \
hasattr(f, "typeinfo") and f.typeinfo == "type":
try:
widget = getattr(v, pname).widget
except AttributeError as exc:
raise type(exc)(*exc.args + (ppath2,))
self.widgets[id(widget)] = widget
if self.completer:
widget.setCompleter(self.completer)
elif hasattr(f, "_members") and f._members is not None:
try:
mv = getattr(v, pname)
except AttributeError as exc:
raise type(exc)(*exc.args + (ppath2,))
self.add_completers(mv, f, ppath2)
def widgetmodifier(self, mode, parwidget, controller):
if mode == "new":
v = controller._view()
form = controller._form
self.add_completers(v, form)
elif mode == "delete":
return
else:
raise Exception
| {
"content_hash": "613a2d08184e6ecd3df544ec3f259d8c",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 94,
"avg_line_length": 32.027522935779814,
"alnum_prop": 0.47608135204812374,
"repo_name": "agoose77/hivesystem",
"id": "54a4a1d84107b74781a3ac8a488d47492ea20725",
"size": "3491",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hiveguilib/PQt/TypeCompleter.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "2491478"
},
{
"name": "Shell",
"bytes": "1164"
}
],
"symlink_target": ""
} |
import os
import sys
import warnings
import numpy as np
from numpy.testing import assert_allclose, assert_almost_equal
from numpy.testing import assert_array_almost_equal, assert_array_equal
import scipy.sparse as sp
from scipy import linalg, optimize, sparse
import pytest
from sklearn.base import clone
from sklearn.datasets import load_iris, make_classification
from sklearn.metrics import log_loss
from sklearn.metrics import get_scorer
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.preprocessing import LabelEncoder, StandardScaler
from sklearn.utils import compute_class_weight, _IS_32BIT
from sklearn.utils._testing import assert_raise_message
from sklearn.utils._testing import assert_raises
from sklearn.utils._testing import assert_warns
from sklearn.utils._testing import ignore_warnings
from sklearn.utils._testing import assert_warns_message
from sklearn.utils import shuffle
from sklearn.linear_model import SGDClassifier
from sklearn.preprocessing import scale
from sklearn.utils._testing import skip_if_no_parallel
from sklearn.exceptions import ConvergenceWarning
from sklearn.linear_model._logistic import (
LogisticRegression,
_logistic_regression_path, LogisticRegressionCV,
_logistic_loss_and_grad, _logistic_grad_hess,
_multinomial_grad_hess, _logistic_loss,
_log_reg_scoring_path)
X = [[-1, 0], [0, 1], [1, 1]]
X_sp = sp.csr_matrix(X)
Y1 = [0, 1, 1]
Y2 = [2, 1, 0]
iris = load_iris()
def check_predictions(clf, X, y):
"""Check that the model is able to fit the classification data"""
n_samples = len(y)
classes = np.unique(y)
n_classes = classes.shape[0]
predicted = clf.fit(X, y).predict(X)
assert_array_equal(clf.classes_, classes)
assert predicted.shape == (n_samples,)
assert_array_equal(predicted, y)
probabilities = clf.predict_proba(X)
assert probabilities.shape == (n_samples, n_classes)
assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples))
assert_array_equal(probabilities.argmax(axis=1), y)
def test_predict_2_classes():
# Simple sanity check on a 2 classes dataset
# Make sure it predicts the correct result on simple datasets.
check_predictions(LogisticRegression(random_state=0), X, Y1)
check_predictions(LogisticRegression(random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X_sp, Y1)
def test_error():
# Test for appropriate exception on errors
msg = "Penalty term must be positive"
assert_raise_message(ValueError, msg,
LogisticRegression(C=-1).fit, X, Y1)
assert_raise_message(ValueError, msg,
LogisticRegression(C="test").fit, X, Y1)
msg = "is not a valid scoring value"
assert_raise_message(ValueError, msg,
LogisticRegressionCV(scoring='bad-scorer', cv=2).fit,
X, Y1)
for LR in [LogisticRegression, LogisticRegressionCV]:
msg = "Tolerance for stopping criteria must be positive"
assert_raise_message(ValueError, msg, LR(tol=-1).fit, X, Y1)
assert_raise_message(ValueError, msg, LR(tol="test").fit, X, Y1)
msg = "Maximum number of iteration must be positive"
assert_raise_message(ValueError, msg, LR(max_iter=-1).fit, X, Y1)
assert_raise_message(ValueError, msg, LR(max_iter="test").fit, X, Y1)
def test_logistic_cv_mock_scorer():
class MockScorer:
def __init__(self):
self.calls = 0
self.scores = [0.1, 0.4, 0.8, 0.5]
def __call__(self, model, X, y, sample_weight=None):
score = self.scores[self.calls % len(self.scores)]
self.calls += 1
return score
mock_scorer = MockScorer()
Cs = [1, 2, 3, 4]
cv = 2
lr = LogisticRegressionCV(Cs=Cs, scoring=mock_scorer, cv=cv)
lr.fit(X, Y1)
# Cs[2] has the highest score (0.8) from MockScorer
assert lr.C_[0] == Cs[2]
# scorer called 8 times (cv*len(Cs))
assert mock_scorer.calls == cv * len(Cs)
# reset mock_scorer
mock_scorer.calls = 0
custom_score = lr.score(X, lr.predict(X))
assert custom_score == mock_scorer.scores[0]
assert mock_scorer.calls == 1
def test_logistic_cv_score_does_not_warn_by_default():
lr = LogisticRegressionCV(cv=2)
lr.fit(X, Y1)
with pytest.warns(None) as record:
lr.score(X, lr.predict(X))
assert len(record) == 0
@skip_if_no_parallel
def test_lr_liblinear_warning():
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
lr = LogisticRegression(solver='liblinear', n_jobs=2)
assert_warns_message(UserWarning,
"'n_jobs' > 1 does not have any effect when"
" 'solver' is set to 'liblinear'. Got 'n_jobs'"
" = 2.",
lr.fit, iris.data, target)
def test_predict_3_classes():
check_predictions(LogisticRegression(C=10), X, Y2)
check_predictions(LogisticRegression(C=10), X_sp, Y2)
def test_predict_iris():
# Test logistic regression with the iris dataset
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
# Test that both multinomial and OvR solvers handle
# multiclass data correctly and give good accuracy
# score (>0.95) for the training data.
for clf in [LogisticRegression(C=len(iris.data), solver='liblinear',
multi_class='ovr'),
LogisticRegression(C=len(iris.data), solver='lbfgs',
multi_class='multinomial'),
LogisticRegression(C=len(iris.data), solver='newton-cg',
multi_class='multinomial'),
LogisticRegression(C=len(iris.data), solver='sag', tol=1e-2,
multi_class='ovr', random_state=42),
LogisticRegression(C=len(iris.data), solver='saga', tol=1e-2,
multi_class='ovr', random_state=42)
]:
clf.fit(iris.data, target)
assert_array_equal(np.unique(target), clf.classes_)
pred = clf.predict(iris.data)
assert np.mean(pred == target) > .95
probabilities = clf.predict_proba(iris.data)
assert_array_almost_equal(probabilities.sum(axis=1),
np.ones(n_samples))
pred = iris.target_names[probabilities.argmax(axis=1)]
assert np.mean(pred == target) > .95
@pytest.mark.parametrize('solver', ['lbfgs', 'newton-cg', 'sag', 'saga'])
def test_multinomial_validation(solver):
lr = LogisticRegression(C=-1, solver=solver, multi_class='multinomial')
assert_raises(ValueError, lr.fit, [[0, 1], [1, 0]], [0, 1])
@pytest.mark.parametrize('LR', [LogisticRegression, LogisticRegressionCV])
def test_check_solver_option(LR):
X, y = iris.data, iris.target
msg = ("Logistic Regression supports only solvers in ['liblinear', "
"'newton-cg', 'lbfgs', 'sag', 'saga'], got wrong_name.")
lr = LR(solver="wrong_name", multi_class="ovr")
assert_raise_message(ValueError, msg, lr.fit, X, y)
msg = ("multi_class should be 'multinomial', 'ovr' or 'auto'. "
"Got wrong_name")
lr = LR(solver='newton-cg', multi_class="wrong_name")
assert_raise_message(ValueError, msg, lr.fit, X, y)
# only 'liblinear' solver
msg = "Solver liblinear does not support a multinomial backend."
lr = LR(solver='liblinear', multi_class='multinomial')
assert_raise_message(ValueError, msg, lr.fit, X, y)
# all solvers except 'liblinear' and 'saga'
for solver in ['newton-cg', 'lbfgs', 'sag']:
msg = ("Solver %s supports only 'l2' or 'none' penalties," %
solver)
lr = LR(solver=solver, penalty='l1', multi_class='ovr')
assert_raise_message(ValueError, msg, lr.fit, X, y)
for solver in ['newton-cg', 'lbfgs', 'sag', 'saga']:
msg = ("Solver %s supports only dual=False, got dual=True" %
solver)
lr = LR(solver=solver, dual=True, multi_class='ovr')
assert_raise_message(ValueError, msg, lr.fit, X, y)
# only saga supports elasticnet. We only test for liblinear because the
# error is raised before for the other solvers (solver %s supports only l2
# penalties)
for solver in ['liblinear']:
msg = ("Only 'saga' solver supports elasticnet penalty, got "
"solver={}.".format(solver))
lr = LR(solver=solver, penalty='elasticnet')
assert_raise_message(ValueError, msg, lr.fit, X, y)
# liblinear does not support penalty='none'
msg = "penalty='none' is not supported for the liblinear solver"
lr = LR(penalty='none', solver='liblinear')
assert_raise_message(ValueError, msg, lr.fit, X, y)
@pytest.mark.parametrize('solver', ['lbfgs', 'newton-cg', 'sag', 'saga'])
def test_multinomial_binary(solver):
# Test multinomial LR on a binary problem.
target = (iris.target > 0).astype(np.intp)
target = np.array(["setosa", "not-setosa"])[target]
clf = LogisticRegression(solver=solver, multi_class='multinomial',
random_state=42, max_iter=2000)
clf.fit(iris.data, target)
assert clf.coef_.shape == (1, iris.data.shape[1])
assert clf.intercept_.shape == (1,)
assert_array_equal(clf.predict(iris.data), target)
mlr = LogisticRegression(solver=solver, multi_class='multinomial',
random_state=42, fit_intercept=False)
mlr.fit(iris.data, target)
pred = clf.classes_[np.argmax(clf.predict_log_proba(iris.data),
axis=1)]
assert np.mean(pred == target) > .9
def test_multinomial_binary_probabilities():
# Test multinomial LR gives expected probabilities based on the
# decision function, for a binary problem.
X, y = make_classification()
clf = LogisticRegression(multi_class='multinomial', solver='saga')
clf.fit(X, y)
decision = clf.decision_function(X)
proba = clf.predict_proba(X)
expected_proba_class_1 = (np.exp(decision) /
(np.exp(decision) + np.exp(-decision)))
expected_proba = np.c_[1 - expected_proba_class_1, expected_proba_class_1]
assert_almost_equal(proba, expected_proba)
def test_sparsify():
# Test sparsify and densify members.
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
clf = LogisticRegression(random_state=0).fit(iris.data, target)
pred_d_d = clf.decision_function(iris.data)
clf.sparsify()
assert sp.issparse(clf.coef_)
pred_s_d = clf.decision_function(iris.data)
sp_data = sp.coo_matrix(iris.data)
pred_s_s = clf.decision_function(sp_data)
clf.densify()
pred_d_s = clf.decision_function(sp_data)
assert_array_almost_equal(pred_d_d, pred_s_d)
assert_array_almost_equal(pred_d_d, pred_s_s)
assert_array_almost_equal(pred_d_d, pred_d_s)
def test_inconsistent_input():
# Test that an exception is raised on inconsistent input
rng = np.random.RandomState(0)
X_ = rng.random_sample((5, 10))
y_ = np.ones(X_.shape[0])
y_[0] = 0
clf = LogisticRegression(random_state=0)
# Wrong dimensions for training data
y_wrong = y_[:-1]
assert_raises(ValueError, clf.fit, X, y_wrong)
# Wrong dimensions for test data
assert_raises(ValueError, clf.fit(X_, y_).predict,
rng.random_sample((3, 12)))
def test_write_parameters():
# Test that we can write to coef_ and intercept_
clf = LogisticRegression(random_state=0)
clf.fit(X, Y1)
clf.coef_[:] = 0
clf.intercept_[:] = 0
assert_array_almost_equal(clf.decision_function(X), 0)
def test_nan():
# Test proper NaN handling.
# Regression test for Issue #252: fit used to go into an infinite loop.
Xnan = np.array(X, dtype=np.float64)
Xnan[0, 1] = np.nan
logistic = LogisticRegression(random_state=0)
assert_raises(ValueError, logistic.fit, Xnan, Y1)
def test_consistency_path():
# Test that the path algorithm is consistent
rng = np.random.RandomState(0)
X = np.concatenate((rng.randn(100, 2) + [1, 1], rng.randn(100, 2)))
y = [1] * 100 + [-1] * 100
Cs = np.logspace(0, 4, 10)
f = ignore_warnings
# can't test with fit_intercept=True since LIBLINEAR
# penalizes the intercept
for solver in ['sag', 'saga']:
coefs, Cs, _ = f(_logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=False, tol=1e-5, solver=solver,
max_iter=1000, multi_class='ovr', random_state=0)
for i, C in enumerate(Cs):
lr = LogisticRegression(C=C, fit_intercept=False, tol=1e-5,
solver=solver, multi_class='ovr',
random_state=0, max_iter=1000)
lr.fit(X, y)
lr_coef = lr.coef_.ravel()
assert_array_almost_equal(lr_coef, coefs[i], decimal=4,
err_msg="with solver = %s" % solver)
# test for fit_intercept=True
for solver in ('lbfgs', 'newton-cg', 'liblinear', 'sag', 'saga'):
Cs = [1e3]
coefs, Cs, _ = f(_logistic_regression_path)(
X, y, Cs=Cs, tol=1e-6, solver=solver,
intercept_scaling=10000., random_state=0, multi_class='ovr')
lr = LogisticRegression(C=Cs[0], tol=1e-4,
intercept_scaling=10000., random_state=0,
multi_class='ovr', solver=solver)
lr.fit(X, y)
lr_coef = np.concatenate([lr.coef_.ravel(), lr.intercept_])
assert_array_almost_equal(lr_coef, coefs[0], decimal=4,
err_msg="with solver = %s" % solver)
def test_logistic_regression_path_convergence_fail():
rng = np.random.RandomState(0)
X = np.concatenate((rng.randn(100, 2) + [1, 1], rng.randn(100, 2)))
y = [1] * 100 + [-1] * 100
Cs = [1e3]
# Check that the convergence message points to both a model agnostic
# advice (scaling the data) and to the logistic regression specific
# documentation that includes hints on the solver configuration.
with pytest.warns(ConvergenceWarning) as record:
with warnings.catch_warnings():
# scipy 1.3.0 uses tostring which is deprecated in numpy
warnings.filterwarnings("ignore", "tostring", DeprecationWarning)
_logistic_regression_path(
X, y, Cs=Cs, tol=0., max_iter=1, random_state=0, verbose=0)
assert len(record) == 1
warn_msg = record[0].message.args[0]
assert "lbfgs failed to converge" in warn_msg
assert "Increase the number of iterations" in warn_msg
assert "scale the data" in warn_msg
assert "linear_model.html#logistic-regression" in warn_msg
def test_liblinear_dual_random_state():
# random_state is relevant for liblinear solver only if dual=True
X, y = make_classification(n_samples=20, random_state=0)
lr1 = LogisticRegression(random_state=0, dual=True, max_iter=1, tol=1e-15,
solver='liblinear', multi_class='ovr')
lr1.fit(X, y)
lr2 = LogisticRegression(random_state=0, dual=True, max_iter=1, tol=1e-15,
solver='liblinear', multi_class='ovr')
lr2.fit(X, y)
lr3 = LogisticRegression(random_state=8, dual=True, max_iter=1, tol=1e-15,
solver='liblinear', multi_class='ovr')
lr3.fit(X, y)
# same result for same random state
assert_array_almost_equal(lr1.coef_, lr2.coef_)
# different results for different random states
msg = "Arrays are not almost equal to 6 decimals"
assert_raise_message(AssertionError, msg,
assert_array_almost_equal, lr1.coef_, lr3.coef_)
def test_logistic_loss_and_grad():
X_ref, y = make_classification(n_samples=20, random_state=0)
n_features = X_ref.shape[1]
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = np.zeros(n_features)
# First check that our derivation of the grad is correct
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad, approx_grad, decimal=2)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(
w, X, y, alpha=1.
)
assert_array_almost_equal(loss, loss_interp)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad_interp, approx_grad, decimal=2)
def test_logistic_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features = 50, 5
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = np.full(n_features, .1)
# First check that _logistic_grad_hess is consistent
# with _logistic_loss_and_grad
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
grad_2, hess = _logistic_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(grad, grad_2)
# Now check our hessian along the second direction of the grad
vector = np.zeros_like(grad)
vector[1] = 1
hess_col = hess(vector)
# Computation of the Hessian is particularly fragile to numerical
# errors when doing simple finite differences. Here we compute the
# grad along a path in the direction of the vector and then use a
# least-square regression to estimate the slope
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_logistic_loss_and_grad(w + t * vector, X, y, alpha=1.)[1]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(approx_hess_col, hess_col, decimal=3)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(w, X, y, alpha=1.)
loss_interp_2 = _logistic_loss(w, X, y, alpha=1.)
grad_interp_2, hess = _logistic_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(loss_interp, loss_interp_2)
assert_array_almost_equal(grad_interp, grad_interp_2)
def test_logistic_cv():
# test for LogisticRegressionCV object
n_samples, n_features = 50, 5
rng = np.random.RandomState(0)
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
lr_cv = LogisticRegressionCV(Cs=[1.], fit_intercept=False,
solver='liblinear', multi_class='ovr', cv=3)
lr_cv.fit(X_ref, y)
lr = LogisticRegression(C=1., fit_intercept=False,
solver='liblinear', multi_class='ovr')
lr.fit(X_ref, y)
assert_array_almost_equal(lr.coef_, lr_cv.coef_)
assert_array_equal(lr_cv.coef_.shape, (1, n_features))
assert_array_equal(lr_cv.classes_, [-1, 1])
assert len(lr_cv.classes_) == 2
coefs_paths = np.asarray(list(lr_cv.coefs_paths_.values()))
assert_array_equal(coefs_paths.shape, (1, 3, 1, n_features))
assert_array_equal(lr_cv.Cs_.shape, (1,))
scores = np.asarray(list(lr_cv.scores_.values()))
assert_array_equal(scores.shape, (1, 3, 1))
@pytest.mark.parametrize('scoring, multiclass_agg_list',
[('accuracy', ['']),
('precision', ['_macro', '_weighted']),
# no need to test for micro averaging because it
# is the same as accuracy for f1, precision,
# and recall (see https://github.com/
# scikit-learn/scikit-learn/pull/
# 11578#discussion_r203250062)
('f1', ['_macro', '_weighted']),
('neg_log_loss', ['']),
('recall', ['_macro', '_weighted'])])
def test_logistic_cv_multinomial_score(scoring, multiclass_agg_list):
# test that LogisticRegressionCV uses the right score to compute its
# cross-validation scores when using a multinomial scoring
# see https://github.com/scikit-learn/scikit-learn/issues/8720
X, y = make_classification(n_samples=100, random_state=0, n_classes=3,
n_informative=6)
train, test = np.arange(80), np.arange(80, 100)
lr = LogisticRegression(C=1., multi_class='multinomial')
# we use lbfgs to support multinomial
params = lr.get_params()
# we store the params to set them further in _log_reg_scoring_path
for key in ['C', 'n_jobs', 'warm_start']:
del params[key]
lr.fit(X[train], y[train])
for averaging in multiclass_agg_list:
scorer = get_scorer(scoring + averaging)
assert_array_almost_equal(
_log_reg_scoring_path(X, y, train, test, Cs=[1.],
scoring=scorer, **params)[2][0],
scorer(lr, X[test], y[test]))
def test_multinomial_logistic_regression_string_inputs():
# Test with string labels for LogisticRegression(CV)
n_samples, n_features, n_classes = 50, 5, 3
X_ref, y = make_classification(n_samples=n_samples, n_features=n_features,
n_classes=n_classes, n_informative=3,
random_state=0)
y_str = LabelEncoder().fit(['bar', 'baz', 'foo']).inverse_transform(y)
# For numerical labels, let y values be taken from set (-1, 0, 1)
y = np.array(y) - 1
# Test for string labels
lr = LogisticRegression(multi_class='multinomial')
lr_cv = LogisticRegressionCV(multi_class='multinomial', Cs=3)
lr_str = LogisticRegression(multi_class='multinomial')
lr_cv_str = LogisticRegressionCV(multi_class='multinomial', Cs=3)
lr.fit(X_ref, y)
lr_cv.fit(X_ref, y)
lr_str.fit(X_ref, y_str)
lr_cv_str.fit(X_ref, y_str)
assert_array_almost_equal(lr.coef_, lr_str.coef_)
assert sorted(lr_str.classes_) == ['bar', 'baz', 'foo']
assert_array_almost_equal(lr_cv.coef_, lr_cv_str.coef_)
assert sorted(lr_str.classes_) == ['bar', 'baz', 'foo']
assert sorted(lr_cv_str.classes_) == ['bar', 'baz', 'foo']
# The predictions should be in original labels
assert sorted(np.unique(lr_str.predict(X_ref))) == ['bar', 'baz', 'foo']
assert sorted(np.unique(lr_cv_str.predict(X_ref))) == ['bar', 'baz', 'foo']
# Make sure class weights can be given with string labels
lr_cv_str = LogisticRegression(
class_weight={'bar': 1, 'baz': 2, 'foo': 0},
multi_class='multinomial').fit(X_ref, y_str)
assert sorted(np.unique(lr_cv_str.predict(X_ref))) == ['bar', 'baz']
def test_logistic_cv_sparse():
X, y = make_classification(n_samples=50, n_features=5,
random_state=0)
X[X < 1.0] = 0.0
csr = sp.csr_matrix(X)
clf = LogisticRegressionCV()
clf.fit(X, y)
clfs = LogisticRegressionCV()
clfs.fit(csr, y)
assert_array_almost_equal(clfs.coef_, clf.coef_)
assert_array_almost_equal(clfs.intercept_, clf.intercept_)
assert clfs.C_ == clf.C_
def test_intercept_logistic_helper():
n_samples, n_features = 10, 5
X, y = make_classification(n_samples=n_samples, n_features=n_features,
random_state=0)
# Fit intercept case.
alpha = 1.
w = np.ones(n_features + 1)
grad_interp, hess_interp = _logistic_grad_hess(w, X, y, alpha)
loss_interp = _logistic_loss(w, X, y, alpha)
# Do not fit intercept. This can be considered equivalent to adding
# a feature vector of ones, i.e column of one vectors.
X_ = np.hstack((X, np.ones(10)[:, np.newaxis]))
grad, hess = _logistic_grad_hess(w, X_, y, alpha)
loss = _logistic_loss(w, X_, y, alpha)
# In the fit_intercept=False case, the feature vector of ones is
# penalized. This should be taken care of.
assert_almost_equal(loss_interp + 0.5 * (w[-1] ** 2), loss)
# Check gradient.
assert_array_almost_equal(grad_interp[:n_features], grad[:n_features])
assert_almost_equal(grad_interp[-1] + alpha * w[-1], grad[-1])
rng = np.random.RandomState(0)
grad = rng.rand(n_features + 1)
hess_interp = hess_interp(grad)
hess = hess(grad)
assert_array_almost_equal(hess_interp[:n_features], hess[:n_features])
assert_almost_equal(hess_interp[-1] + alpha * grad[-1], hess[-1])
def test_ovr_multinomial_iris():
# Test that OvR and multinomial are correct using the iris dataset.
train, target = iris.data, iris.target
n_samples, n_features = train.shape
# The cv indices from stratified kfold (where stratification is done based
# on the fine-grained iris classes, i.e, before the classes 0 and 1 are
# conflated) is used for both clf and clf1
n_cv = 2
cv = StratifiedKFold(n_cv)
precomputed_folds = list(cv.split(train, target))
# Train clf on the original dataset where classes 0 and 1 are separated
clf = LogisticRegressionCV(cv=precomputed_folds, multi_class='ovr')
clf.fit(train, target)
# Conflate classes 0 and 1 and train clf1 on this modified dataset
clf1 = LogisticRegressionCV(cv=precomputed_folds, multi_class='ovr')
target_copy = target.copy()
target_copy[target_copy == 0] = 1
clf1.fit(train, target_copy)
# Ensure that what OvR learns for class2 is same regardless of whether
# classes 0 and 1 are separated or not
assert_allclose(clf.scores_[2], clf1.scores_[2])
assert_allclose(clf.intercept_[2:], clf1.intercept_)
assert_allclose(clf.coef_[2][np.newaxis, :], clf1.coef_)
# Test the shape of various attributes.
assert clf.coef_.shape == (3, n_features)
assert_array_equal(clf.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf.coefs_paths_.values()))
assert coefs_paths.shape == (3, n_cv, 10, n_features + 1)
assert clf.Cs_.shape == (10,)
scores = np.asarray(list(clf.scores_.values()))
assert scores.shape == (3, n_cv, 10)
# Test that for the iris data multinomial gives a better accuracy than OvR
for solver in ['lbfgs', 'newton-cg', 'sag', 'saga']:
max_iter = 500 if solver in ['sag', 'saga'] else 15
clf_multi = LogisticRegressionCV(
solver=solver, multi_class='multinomial', max_iter=max_iter,
random_state=42, tol=1e-3 if solver in ['sag', 'saga'] else 1e-2,
cv=2)
clf_multi.fit(train, target)
multi_score = clf_multi.score(train, target)
ovr_score = clf.score(train, target)
assert multi_score > ovr_score
# Test attributes of LogisticRegressionCV
assert clf.coef_.shape == clf_multi.coef_.shape
assert_array_equal(clf_multi.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf_multi.coefs_paths_.values()))
assert coefs_paths.shape == (3, n_cv, 10, n_features + 1)
assert clf_multi.Cs_.shape == (10,)
scores = np.asarray(list(clf_multi.scores_.values()))
assert scores.shape == (3, n_cv, 10)
def test_logistic_regression_solvers():
X, y = make_classification(n_features=10, n_informative=5, random_state=0)
params = dict(fit_intercept=False, random_state=42, multi_class='ovr')
ncg = LogisticRegression(solver='newton-cg', **params)
lbf = LogisticRegression(solver='lbfgs', **params)
lib = LogisticRegression(solver='liblinear', **params)
sag = LogisticRegression(solver='sag', **params)
saga = LogisticRegression(solver='saga', **params)
ncg.fit(X, y)
lbf.fit(X, y)
sag.fit(X, y)
saga.fit(X, y)
lib.fit(X, y)
assert_array_almost_equal(ncg.coef_, lib.coef_, decimal=3)
assert_array_almost_equal(lib.coef_, lbf.coef_, decimal=3)
assert_array_almost_equal(ncg.coef_, lbf.coef_, decimal=3)
assert_array_almost_equal(sag.coef_, lib.coef_, decimal=3)
assert_array_almost_equal(sag.coef_, ncg.coef_, decimal=3)
assert_array_almost_equal(sag.coef_, lbf.coef_, decimal=3)
assert_array_almost_equal(saga.coef_, sag.coef_, decimal=3)
assert_array_almost_equal(saga.coef_, lbf.coef_, decimal=3)
assert_array_almost_equal(saga.coef_, ncg.coef_, decimal=3)
assert_array_almost_equal(saga.coef_, lib.coef_, decimal=3)
def test_logistic_regression_solvers_multiclass():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
tol = 1e-7
params = dict(fit_intercept=False, tol=tol, random_state=42,
multi_class='ovr')
ncg = LogisticRegression(solver='newton-cg', **params)
lbf = LogisticRegression(solver='lbfgs', **params)
lib = LogisticRegression(solver='liblinear', **params)
sag = LogisticRegression(solver='sag', max_iter=1000, **params)
saga = LogisticRegression(solver='saga', max_iter=10000, **params)
ncg.fit(X, y)
lbf.fit(X, y)
sag.fit(X, y)
saga.fit(X, y)
lib.fit(X, y)
assert_array_almost_equal(ncg.coef_, lib.coef_, decimal=4)
assert_array_almost_equal(lib.coef_, lbf.coef_, decimal=4)
assert_array_almost_equal(ncg.coef_, lbf.coef_, decimal=4)
assert_array_almost_equal(sag.coef_, lib.coef_, decimal=4)
assert_array_almost_equal(sag.coef_, ncg.coef_, decimal=4)
assert_array_almost_equal(sag.coef_, lbf.coef_, decimal=4)
assert_array_almost_equal(saga.coef_, sag.coef_, decimal=4)
assert_array_almost_equal(saga.coef_, lbf.coef_, decimal=4)
assert_array_almost_equal(saga.coef_, ncg.coef_, decimal=4)
assert_array_almost_equal(saga.coef_, lib.coef_, decimal=4)
def test_logistic_regressioncv_class_weights():
for weight in [{0: 0.1, 1: 0.2}, {0: 0.1, 1: 0.2, 2: 0.5}]:
n_classes = len(weight)
for class_weight in (weight, 'balanced'):
X, y = make_classification(n_samples=30, n_features=3,
n_repeated=0,
n_informative=3, n_redundant=0,
n_classes=n_classes, random_state=0)
clf_lbf = LogisticRegressionCV(solver='lbfgs', Cs=1,
fit_intercept=False,
multi_class='ovr',
class_weight=class_weight)
clf_ncg = LogisticRegressionCV(solver='newton-cg', Cs=1,
fit_intercept=False,
multi_class='ovr',
class_weight=class_weight)
clf_lib = LogisticRegressionCV(solver='liblinear', Cs=1,
fit_intercept=False,
multi_class='ovr',
class_weight=class_weight)
clf_sag = LogisticRegressionCV(solver='sag', Cs=1,
fit_intercept=False,
multi_class='ovr',
class_weight=class_weight,
tol=1e-5, max_iter=10000,
random_state=0)
clf_saga = LogisticRegressionCV(solver='saga', Cs=1,
fit_intercept=False,
multi_class='ovr',
class_weight=class_weight,
tol=1e-5, max_iter=10000,
random_state=0)
clf_lbf.fit(X, y)
clf_ncg.fit(X, y)
clf_lib.fit(X, y)
clf_sag.fit(X, y)
clf_saga.fit(X, y)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=4)
assert_array_almost_equal(clf_ncg.coef_, clf_lbf.coef_, decimal=4)
assert_array_almost_equal(clf_sag.coef_, clf_lbf.coef_, decimal=4)
assert_array_almost_equal(clf_saga.coef_, clf_lbf.coef_, decimal=4)
def test_logistic_regression_sample_weights():
X, y = make_classification(n_samples=20, n_features=5, n_informative=3,
n_classes=2, random_state=0)
sample_weight = y + 1
for LR in [LogisticRegression, LogisticRegressionCV]:
kw = {'random_state': 42, 'fit_intercept': False, 'multi_class': 'ovr'}
if LR is LogisticRegressionCV:
kw.update({'Cs': 3, 'cv': 3})
# Test that passing sample_weight as ones is the same as
# not passing them at all (default None)
for solver in ['lbfgs', 'liblinear']:
clf_sw_none = LR(solver=solver, **kw)
clf_sw_ones = LR(solver=solver, **kw)
clf_sw_none.fit(X, y)
clf_sw_ones.fit(X, y, sample_weight=np.ones(y.shape[0]))
assert_array_almost_equal(
clf_sw_none.coef_, clf_sw_ones.coef_, decimal=4)
# Test that sample weights work the same with the lbfgs,
# newton-cg, and 'sag' solvers
clf_sw_lbfgs = LR(**kw)
clf_sw_lbfgs.fit(X, y, sample_weight=sample_weight)
clf_sw_n = LR(solver='newton-cg', **kw)
clf_sw_n.fit(X, y, sample_weight=sample_weight)
clf_sw_sag = LR(solver='sag', tol=1e-10, **kw)
# ignore convergence warning due to small dataset
with ignore_warnings():
clf_sw_sag.fit(X, y, sample_weight=sample_weight)
clf_sw_liblinear = LR(solver='liblinear', **kw)
clf_sw_liblinear.fit(X, y, sample_weight=sample_weight)
assert_array_almost_equal(
clf_sw_lbfgs.coef_, clf_sw_n.coef_, decimal=4)
assert_array_almost_equal(
clf_sw_lbfgs.coef_, clf_sw_sag.coef_, decimal=4)
assert_array_almost_equal(
clf_sw_lbfgs.coef_, clf_sw_liblinear.coef_, decimal=4)
# Test that passing class_weight as [1,2] is the same as
# passing class weight = [1,1] but adjusting sample weights
# to be 2 for all instances of class 2
for solver in ['lbfgs', 'liblinear']:
clf_cw_12 = LR(solver=solver, class_weight={0: 1, 1: 2}, **kw)
clf_cw_12.fit(X, y)
clf_sw_12 = LR(solver=solver, **kw)
clf_sw_12.fit(X, y, sample_weight=sample_weight)
assert_array_almost_equal(
clf_cw_12.coef_, clf_sw_12.coef_, decimal=4)
# Test the above for l1 penalty and l2 penalty with dual=True.
# since the patched liblinear code is different.
clf_cw = LogisticRegression(
solver="liblinear", fit_intercept=False, class_weight={0: 1, 1: 2},
penalty="l1", tol=1e-5, random_state=42, multi_class='ovr')
clf_cw.fit(X, y)
clf_sw = LogisticRegression(
solver="liblinear", fit_intercept=False, penalty="l1", tol=1e-5,
random_state=42, multi_class='ovr')
clf_sw.fit(X, y, sample_weight)
assert_array_almost_equal(clf_cw.coef_, clf_sw.coef_, decimal=4)
clf_cw = LogisticRegression(
solver="liblinear", fit_intercept=False, class_weight={0: 1, 1: 2},
penalty="l2", dual=True, random_state=42, multi_class='ovr')
clf_cw.fit(X, y)
clf_sw = LogisticRegression(
solver="liblinear", fit_intercept=False, penalty="l2", dual=True,
random_state=42, multi_class='ovr')
clf_sw.fit(X, y, sample_weight)
assert_array_almost_equal(clf_cw.coef_, clf_sw.coef_, decimal=4)
def _compute_class_weight_dictionary(y):
# helper for returning a dictionary instead of an array
classes = np.unique(y)
class_weight = compute_class_weight("balanced", classes=classes, y=y)
class_weight_dict = dict(zip(classes, class_weight))
return class_weight_dict
def test_logistic_regression_class_weights():
# Multinomial case: remove 90% of class 0
X = iris.data[45:, :]
y = iris.target[45:]
solvers = ("lbfgs", "newton-cg")
class_weight_dict = _compute_class_weight_dictionary(y)
for solver in solvers:
clf1 = LogisticRegression(solver=solver, multi_class="multinomial",
class_weight="balanced")
clf2 = LogisticRegression(solver=solver, multi_class="multinomial",
class_weight=class_weight_dict)
clf1.fit(X, y)
clf2.fit(X, y)
assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=4)
# Binary case: remove 90% of class 0 and 100% of class 2
X = iris.data[45:100, :]
y = iris.target[45:100]
solvers = ("lbfgs", "newton-cg", "liblinear")
class_weight_dict = _compute_class_weight_dictionary(y)
for solver in solvers:
clf1 = LogisticRegression(solver=solver, multi_class="ovr",
class_weight="balanced")
clf2 = LogisticRegression(solver=solver, multi_class="ovr",
class_weight=class_weight_dict)
clf1.fit(X, y)
clf2.fit(X, y)
assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=6)
def test_logistic_regression_multinomial():
# Tests for the multinomial option in logistic regression
# Some basic attributes of Logistic Regression
n_samples, n_features, n_classes = 50, 20, 3
X, y = make_classification(n_samples=n_samples,
n_features=n_features,
n_informative=10,
n_classes=n_classes, random_state=0)
X = StandardScaler(with_mean=False).fit_transform(X)
# 'lbfgs' is used as a referenced
solver = 'lbfgs'
ref_i = LogisticRegression(solver=solver, multi_class='multinomial')
ref_w = LogisticRegression(solver=solver, multi_class='multinomial',
fit_intercept=False)
ref_i.fit(X, y)
ref_w.fit(X, y)
assert ref_i.coef_.shape == (n_classes, n_features)
assert ref_w.coef_.shape == (n_classes, n_features)
for solver in ['sag', 'saga', 'newton-cg']:
clf_i = LogisticRegression(solver=solver, multi_class='multinomial',
random_state=42, max_iter=2000, tol=1e-7,
)
clf_w = LogisticRegression(solver=solver, multi_class='multinomial',
random_state=42, max_iter=2000, tol=1e-7,
fit_intercept=False)
clf_i.fit(X, y)
clf_w.fit(X, y)
assert clf_i.coef_.shape == (n_classes, n_features)
assert clf_w.coef_.shape == (n_classes, n_features)
# Compare solutions between lbfgs and the other solvers
assert_allclose(ref_i.coef_, clf_i.coef_, rtol=1e-2)
assert_allclose(ref_w.coef_, clf_w.coef_, rtol=1e-2)
assert_allclose(ref_i.intercept_, clf_i.intercept_, rtol=1e-2)
# Test that the path give almost the same results. However since in this
# case we take the average of the coefs after fitting across all the
# folds, it need not be exactly the same.
for solver in ['lbfgs', 'newton-cg', 'sag', 'saga']:
clf_path = LogisticRegressionCV(solver=solver, max_iter=2000, tol=1e-6,
multi_class='multinomial', Cs=[1.])
clf_path.fit(X, y)
assert_allclose(clf_path.coef_, ref_i.coef_, rtol=2e-2)
assert_allclose(clf_path.intercept_, ref_i.intercept_, rtol=2e-2)
def test_multinomial_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features, n_classes = 100, 5, 3
X = rng.randn(n_samples, n_features)
w = rng.rand(n_classes, n_features)
Y = np.zeros((n_samples, n_classes))
ind = np.argmax(np.dot(X, w.T), axis=1)
Y[range(0, n_samples), ind] = 1
w = w.ravel()
sample_weights = np.ones(X.shape[0])
grad, hessp = _multinomial_grad_hess(w, X, Y, alpha=1.,
sample_weight=sample_weights)
# extract first column of hessian matrix
vec = np.zeros(n_features * n_classes)
vec[0] = 1
hess_col = hessp(vec)
# Estimate hessian using least squares as done in
# test_logistic_grad_hess
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_multinomial_grad_hess(w + t * vec, X, Y, alpha=1.,
sample_weight=sample_weights)[0]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(hess_col, approx_hess_col)
def test_liblinear_decision_function_zero():
# Test negative prediction when decision_function values are zero.
# Liblinear predicts the positive class when decision_function values
# are zero. This is a test to verify that we do not do the same.
# See Issue: https://github.com/scikit-learn/scikit-learn/issues/3600
# and the PR https://github.com/scikit-learn/scikit-learn/pull/3623
X, y = make_classification(n_samples=5, n_features=5, random_state=0)
clf = LogisticRegression(fit_intercept=False, solver='liblinear',
multi_class='ovr')
clf.fit(X, y)
# Dummy data such that the decision function becomes zero.
X = np.zeros((5, 5))
assert_array_equal(clf.predict(X), np.zeros(5))
def test_liblinear_logregcv_sparse():
# Test LogRegCV with solver='liblinear' works for sparse matrices
X, y = make_classification(n_samples=10, n_features=5, random_state=0)
clf = LogisticRegressionCV(solver='liblinear', multi_class='ovr')
clf.fit(sparse.csr_matrix(X), y)
def test_saga_sparse():
# Test LogRegCV with solver='liblinear' works for sparse matrices
X, y = make_classification(n_samples=10, n_features=5, random_state=0)
clf = LogisticRegressionCV(solver='saga')
clf.fit(sparse.csr_matrix(X), y)
def test_logreg_intercept_scaling():
# Test that the right error message is thrown when intercept_scaling <= 0
for i in [-1, 0]:
clf = LogisticRegression(intercept_scaling=i, solver='liblinear',
multi_class='ovr')
msg = ('Intercept scaling is %r but needs to be greater than 0.'
' To disable fitting an intercept,'
' set fit_intercept=False.' % clf.intercept_scaling)
assert_raise_message(ValueError, msg, clf.fit, X, Y1)
def test_logreg_intercept_scaling_zero():
# Test that intercept_scaling is ignored when fit_intercept is False
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, Y1)
assert clf.intercept_ == 0.
def test_logreg_l1():
# Because liblinear penalizes the intercept and saga does not, we do not
# fit the intercept to make it possible to compare the coefficients of
# the two models at convergence.
rng = np.random.RandomState(42)
n_samples = 50
X, y = make_classification(n_samples=n_samples, n_features=20,
random_state=0)
X_noise = rng.normal(size=(n_samples, 3))
X_constant = np.ones(shape=(n_samples, 2))
X = np.concatenate((X, X_noise, X_constant), axis=1)
lr_liblinear = LogisticRegression(penalty="l1", C=1.0, solver='liblinear',
fit_intercept=False, multi_class='ovr',
tol=1e-10)
lr_liblinear.fit(X, y)
lr_saga = LogisticRegression(penalty="l1", C=1.0, solver='saga',
fit_intercept=False, multi_class='ovr',
max_iter=1000, tol=1e-10)
lr_saga.fit(X, y)
assert_array_almost_equal(lr_saga.coef_, lr_liblinear.coef_)
# Noise and constant features should be regularized to zero by the l1
# penalty
assert_array_almost_equal(lr_liblinear.coef_[0, -5:], np.zeros(5))
assert_array_almost_equal(lr_saga.coef_[0, -5:], np.zeros(5))
def test_logreg_l1_sparse_data():
# Because liblinear penalizes the intercept and saga does not, we do not
# fit the intercept to make it possible to compare the coefficients of
# the two models at convergence.
rng = np.random.RandomState(42)
n_samples = 50
X, y = make_classification(n_samples=n_samples, n_features=20,
random_state=0)
X_noise = rng.normal(scale=0.1, size=(n_samples, 3))
X_constant = np.zeros(shape=(n_samples, 2))
X = np.concatenate((X, X_noise, X_constant), axis=1)
X[X < 1] = 0
X = sparse.csr_matrix(X)
lr_liblinear = LogisticRegression(penalty="l1", C=1.0, solver='liblinear',
fit_intercept=False, multi_class='ovr',
tol=1e-10)
lr_liblinear.fit(X, y)
lr_saga = LogisticRegression(penalty="l1", C=1.0, solver='saga',
fit_intercept=False, multi_class='ovr',
max_iter=1000, tol=1e-10)
lr_saga.fit(X, y)
assert_array_almost_equal(lr_saga.coef_, lr_liblinear.coef_)
# Noise and constant features should be regularized to zero by the l1
# penalty
assert_array_almost_equal(lr_liblinear.coef_[0, -5:], np.zeros(5))
assert_array_almost_equal(lr_saga.coef_[0, -5:], np.zeros(5))
# Check that solving on the sparse and dense data yield the same results
lr_saga_dense = LogisticRegression(penalty="l1", C=1.0, solver='saga',
fit_intercept=False, multi_class='ovr',
max_iter=1000, tol=1e-10)
lr_saga_dense.fit(X.toarray(), y)
assert_array_almost_equal(lr_saga.coef_, lr_saga_dense.coef_)
@pytest.mark.parametrize("random_seed", [42])
@pytest.mark.parametrize("penalty", ["l1", "l2"])
def test_logistic_regression_cv_refit(random_seed, penalty):
# Test that when refit=True, logistic regression cv with the saga solver
# converges to the same solution as logistic regression with a fixed
# regularization parameter.
# Internally the LogisticRegressionCV model uses a warm start to refit on
# the full data model with the optimal C found by CV. As the penalized
# logistic regression loss is convex, we should still recover exactly
# the same solution as long as the stopping criterion is strict enough (and
# that there are no exactly duplicated features when penalty='l1').
X, y = make_classification(n_samples=100, n_features=20,
random_state=random_seed)
common_params = dict(
solver='saga',
penalty=penalty,
random_state=random_seed,
max_iter=1000,
tol=1e-12,
)
lr_cv = LogisticRegressionCV(Cs=[1.0], refit=True, **common_params)
lr_cv.fit(X, y)
lr = LogisticRegression(C=1.0, **common_params)
lr.fit(X, y)
assert_array_almost_equal(lr_cv.coef_, lr.coef_)
def test_logreg_predict_proba_multinomial():
X, y = make_classification(n_samples=10, n_features=20, random_state=0,
n_classes=3, n_informative=10)
# Predicted probabilities using the true-entropy loss should give a
# smaller loss than those using the ovr method.
clf_multi = LogisticRegression(multi_class="multinomial", solver="lbfgs")
clf_multi.fit(X, y)
clf_multi_loss = log_loss(y, clf_multi.predict_proba(X))
clf_ovr = LogisticRegression(multi_class="ovr", solver="lbfgs")
clf_ovr.fit(X, y)
clf_ovr_loss = log_loss(y, clf_ovr.predict_proba(X))
assert clf_ovr_loss > clf_multi_loss
# Predicted probabilities using the soft-max function should give a
# smaller loss than those using the logistic function.
clf_multi_loss = log_loss(y, clf_multi.predict_proba(X))
clf_wrong_loss = log_loss(y, clf_multi._predict_proba_lr(X))
assert clf_wrong_loss > clf_multi_loss
def test_max_iter():
# Test that the maximum number of iteration is reached
X, y_bin = iris.data, iris.target.copy()
y_bin[y_bin == 2] = 0
solvers = ['newton-cg', 'liblinear', 'sag', 'saga', 'lbfgs']
for max_iter in range(1, 5):
for solver in solvers:
for multi_class in ['ovr', 'multinomial']:
if solver == 'liblinear' and multi_class == 'multinomial':
continue
lr = LogisticRegression(max_iter=max_iter, tol=1e-15,
multi_class=multi_class,
random_state=0, solver=solver)
assert_warns(ConvergenceWarning, lr.fit, X, y_bin)
assert lr.n_iter_[0] == max_iter
@pytest.mark.parametrize('solver',
['newton-cg', 'liblinear', 'sag', 'saga', 'lbfgs'])
def test_n_iter(solver):
# Test that self.n_iter_ has the correct format.
X, y = iris.data, iris.target
y_bin = y.copy()
y_bin[y_bin == 2] = 0
n_Cs = 4
n_cv_fold = 2
# OvR case
n_classes = 1 if solver == 'liblinear' else np.unique(y).shape[0]
clf = LogisticRegression(tol=1e-2, multi_class='ovr',
solver=solver, C=1.,
random_state=42, max_iter=100)
clf.fit(X, y)
assert clf.n_iter_.shape == (n_classes,)
n_classes = np.unique(y).shape[0]
clf = LogisticRegressionCV(tol=1e-2, multi_class='ovr',
solver=solver, Cs=n_Cs, cv=n_cv_fold,
random_state=42, max_iter=100)
clf.fit(X, y)
assert clf.n_iter_.shape == (n_classes, n_cv_fold, n_Cs)
clf.fit(X, y_bin)
assert clf.n_iter_.shape == (1, n_cv_fold, n_Cs)
# multinomial case
n_classes = 1
if solver in ('liblinear', 'sag', 'saga'):
return
clf = LogisticRegression(tol=1e-2, multi_class='multinomial',
solver=solver, C=1.,
random_state=42, max_iter=100)
clf.fit(X, y)
assert clf.n_iter_.shape == (n_classes,)
clf = LogisticRegressionCV(tol=1e-2, multi_class='multinomial',
solver=solver, Cs=n_Cs, cv=n_cv_fold,
random_state=42, max_iter=100)
clf.fit(X, y)
assert clf.n_iter_.shape == (n_classes, n_cv_fold, n_Cs)
clf.fit(X, y_bin)
assert clf.n_iter_.shape == (1, n_cv_fold, n_Cs)
@pytest.mark.parametrize('solver', ('newton-cg', 'sag', 'saga', 'lbfgs'))
@pytest.mark.parametrize('warm_start', (True, False))
@pytest.mark.parametrize('fit_intercept', (True, False))
@pytest.mark.parametrize('multi_class', ['ovr', 'multinomial'])
def test_warm_start(solver, warm_start, fit_intercept, multi_class):
# A 1-iteration second fit on same data should give almost same result
# with warm starting, and quite different result without warm starting.
# Warm starting does not work with liblinear solver.
X, y = iris.data, iris.target
clf = LogisticRegression(tol=1e-4, multi_class=multi_class,
warm_start=warm_start,
solver=solver,
random_state=42, max_iter=100,
fit_intercept=fit_intercept)
with ignore_warnings(category=ConvergenceWarning):
clf.fit(X, y)
coef_1 = clf.coef_
clf.max_iter = 1
clf.fit(X, y)
cum_diff = np.sum(np.abs(coef_1 - clf.coef_))
msg = ("Warm starting issue with %s solver in %s mode "
"with fit_intercept=%s and warm_start=%s"
% (solver, multi_class, str(fit_intercept),
str(warm_start)))
if warm_start:
assert 2.0 > cum_diff, msg
else:
assert cum_diff > 2.0, msg
def test_saga_vs_liblinear():
iris = load_iris()
X, y = iris.data, iris.target
X = np.concatenate([X] * 3)
y = np.concatenate([y] * 3)
X_bin = X[y <= 1]
y_bin = y[y <= 1] * 2 - 1
X_sparse, y_sparse = make_classification(n_samples=50, n_features=20,
random_state=0)
X_sparse = sparse.csr_matrix(X_sparse)
for (X, y) in ((X_bin, y_bin), (X_sparse, y_sparse)):
for penalty in ['l1', 'l2']:
n_samples = X.shape[0]
# alpha=1e-3 is time consuming
for alpha in np.logspace(-1, 1, 3):
saga = LogisticRegression(
C=1. / (n_samples * alpha),
solver='saga',
multi_class='ovr',
max_iter=200,
fit_intercept=False,
penalty=penalty, random_state=0, tol=1e-24)
liblinear = LogisticRegression(
C=1. / (n_samples * alpha),
solver='liblinear',
multi_class='ovr',
max_iter=200,
fit_intercept=False,
penalty=penalty, random_state=0, tol=1e-24)
saga.fit(X, y)
liblinear.fit(X, y)
# Convergence for alpha=1e-3 is very slow
assert_array_almost_equal(saga.coef_, liblinear.coef_, 3)
@pytest.mark.parametrize('multi_class', ['ovr', 'multinomial'])
@pytest.mark.parametrize('solver', ['newton-cg', 'liblinear', 'saga'])
@pytest.mark.parametrize('fit_intercept', [False, True])
def test_dtype_match(solver, multi_class, fit_intercept):
# Test that np.float32 input data is not cast to np.float64 when possible
# and that the output is approximately the same no matter the input format.
if solver == 'liblinear' and multi_class == 'multinomial':
pytest.skip('liblinear does not support multinomial logistic')
out32_type = np.float64 if solver == 'liblinear' else np.float32
X_32 = np.array(X).astype(np.float32)
y_32 = np.array(Y1).astype(np.float32)
X_64 = np.array(X).astype(np.float64)
y_64 = np.array(Y1).astype(np.float64)
X_sparse_32 = sp.csr_matrix(X, dtype=np.float32)
X_sparse_64 = sp.csr_matrix(X, dtype=np.float64)
solver_tol = 5e-4
lr_templ = LogisticRegression(
solver=solver, multi_class=multi_class,
random_state=42, tol=solver_tol, fit_intercept=fit_intercept)
# Check 32-bit type consistency
lr_32 = clone(lr_templ)
lr_32.fit(X_32, y_32)
assert lr_32.coef_.dtype == out32_type
# Check 32-bit type consistency with sparsity
lr_32_sparse = clone(lr_templ)
lr_32_sparse.fit(X_sparse_32, y_32)
assert lr_32_sparse.coef_.dtype == out32_type
# Check 64-bit type consistency
lr_64 = clone(lr_templ)
lr_64.fit(X_64, y_64)
assert lr_64.coef_.dtype == np.float64
# Check 64-bit type consistency with sparsity
lr_64_sparse = clone(lr_templ)
lr_64_sparse.fit(X_sparse_64, y_64)
assert lr_64_sparse.coef_.dtype == np.float64
# solver_tol bounds the norm of the loss gradient
# dw ~= inv(H)*grad ==> |dw| ~= |inv(H)| * solver_tol, where H - hessian
#
# See https://github.com/scikit-learn/scikit-learn/pull/13645
#
# with Z = np.hstack((np.ones((3,1)), np.array(X)))
# In [8]: np.linalg.norm(np.diag([0,2,2]) + np.linalg.inv((Z.T @ Z)/4))
# Out[8]: 1.7193336918135917
# factor of 2 to get the ball diameter
atol = 2 * 1.72 * solver_tol
if os.name == 'nt' and _IS_32BIT:
# FIXME
atol = 1e-2
# Check accuracy consistency
assert_allclose(lr_32.coef_, lr_64.coef_.astype(np.float32), atol=atol)
if solver == 'saga' and fit_intercept:
# FIXME: SAGA on sparse data fits the intercept inaccurately with the
# default tol and max_iter parameters.
atol = 1e-1
assert_allclose(lr_32.coef_, lr_32_sparse.coef_, atol=atol)
assert_allclose(lr_64.coef_, lr_64_sparse.coef_, atol=atol)
def test_warm_start_converge_LR():
# Test to see that the logistic regression converges on warm start,
# with multi_class='multinomial'. Non-regressive test for #10836
rng = np.random.RandomState(0)
X = np.concatenate((rng.randn(100, 2) + [1, 1], rng.randn(100, 2)))
y = np.array([1] * 100 + [-1] * 100)
lr_no_ws = LogisticRegression(multi_class='multinomial',
solver='sag', warm_start=False,
random_state=0)
lr_ws = LogisticRegression(multi_class='multinomial',
solver='sag', warm_start=True,
random_state=0)
lr_no_ws_loss = log_loss(y, lr_no_ws.fit(X, y).predict_proba(X))
for i in range(5):
lr_ws.fit(X, y)
lr_ws_loss = log_loss(y, lr_ws.predict_proba(X))
assert_allclose(lr_no_ws_loss, lr_ws_loss, rtol=1e-5)
def test_elastic_net_coeffs():
# make sure elasticnet penalty gives different coefficients from l1 and l2
# with saga solver (l1_ratio different from 0 or 1)
X, y = make_classification(random_state=0)
C = 2.
l1_ratio = .5
coeffs = list()
for penalty in ('elasticnet', 'l1', 'l2'):
lr = LogisticRegression(penalty=penalty, C=C, solver='saga',
random_state=0, l1_ratio=l1_ratio)
lr.fit(X, y)
coeffs.append(lr.coef_)
elastic_net_coeffs, l1_coeffs, l2_coeffs = coeffs
# make sure coeffs differ by at least .1
assert not np.allclose(elastic_net_coeffs, l1_coeffs, rtol=0, atol=.1)
assert not np.allclose(elastic_net_coeffs, l2_coeffs, rtol=0, atol=.1)
assert not np.allclose(l2_coeffs, l1_coeffs, rtol=0, atol=.1)
@pytest.mark.parametrize('C', [.001, .1, 1, 10, 100, 1000, 1e6])
@pytest.mark.parametrize('penalty, l1_ratio',
[('l1', 1),
('l2', 0)])
def test_elastic_net_l1_l2_equivalence(C, penalty, l1_ratio):
# Make sure elasticnet is equivalent to l1 when l1_ratio=1 and to l2 when
# l1_ratio=0.
X, y = make_classification(random_state=0)
lr_enet = LogisticRegression(penalty='elasticnet', C=C, l1_ratio=l1_ratio,
solver='saga', random_state=0)
lr_expected = LogisticRegression(penalty=penalty, C=C, solver='saga',
random_state=0)
lr_enet.fit(X, y)
lr_expected.fit(X, y)
assert_array_almost_equal(lr_enet.coef_, lr_expected.coef_)
@pytest.mark.parametrize('C', [.001, 1, 100, 1e6])
def test_elastic_net_vs_l1_l2(C):
# Make sure that elasticnet with grid search on l1_ratio gives same or
# better results than just l1 or just l2.
X, y = make_classification(500, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
param_grid = {'l1_ratio': np.linspace(0, 1, 5)}
enet_clf = LogisticRegression(penalty='elasticnet', C=C, solver='saga',
random_state=0)
gs = GridSearchCV(enet_clf, param_grid, refit=True)
l1_clf = LogisticRegression(penalty='l1', C=C, solver='saga',
random_state=0)
l2_clf = LogisticRegression(penalty='l2', C=C, solver='saga',
random_state=0)
for clf in (gs, l1_clf, l2_clf):
clf.fit(X_train, y_train)
assert gs.score(X_test, y_test) >= l1_clf.score(X_test, y_test)
assert gs.score(X_test, y_test) >= l2_clf.score(X_test, y_test)
@pytest.mark.parametrize('C', np.logspace(-3, 2, 4))
@pytest.mark.parametrize('l1_ratio', [.1, .5, .9])
def test_LogisticRegression_elastic_net_objective(C, l1_ratio):
# Check that training with a penalty matching the objective leads
# to a lower objective.
# Here we train a logistic regression with l2 (a) and elasticnet (b)
# penalties, and compute the elasticnet objective. That of a should be
# greater than that of b (both objectives are convex).
X, y = make_classification(n_samples=1000, n_classes=2, n_features=20,
n_informative=10, n_redundant=0,
n_repeated=0, random_state=0)
X = scale(X)
lr_enet = LogisticRegression(penalty='elasticnet', solver='saga',
random_state=0, C=C, l1_ratio=l1_ratio,
fit_intercept=False)
lr_l2 = LogisticRegression(penalty='l2', solver='saga', random_state=0,
C=C, fit_intercept=False)
lr_enet.fit(X, y)
lr_l2.fit(X, y)
def enet_objective(lr):
coef = lr.coef_.ravel()
obj = C * log_loss(y, lr.predict_proba(X))
obj += l1_ratio * np.sum(np.abs(coef))
obj += (1. - l1_ratio) * 0.5 * np.dot(coef, coef)
return obj
assert enet_objective(lr_enet) < enet_objective(lr_l2)
@pytest.mark.parametrize('multi_class', ('ovr', 'multinomial'))
def test_LogisticRegressionCV_GridSearchCV_elastic_net(multi_class):
# make sure LogisticRegressionCV gives same best params (l1 and C) as
# GridSearchCV when penalty is elasticnet
if multi_class == 'ovr':
# This is actually binary classification, ovr multiclass is treated in
# test_LogisticRegressionCV_GridSearchCV_elastic_net_ovr
X, y = make_classification(random_state=0)
else:
X, y = make_classification(n_samples=100, n_classes=3, n_informative=3,
random_state=0)
cv = StratifiedKFold(5)
l1_ratios = np.linspace(0, 1, 3)
Cs = np.logspace(-4, 4, 3)
lrcv = LogisticRegressionCV(penalty='elasticnet', Cs=Cs, solver='saga',
cv=cv, l1_ratios=l1_ratios, random_state=0,
multi_class=multi_class)
lrcv.fit(X, y)
param_grid = {'C': Cs, 'l1_ratio': l1_ratios}
lr = LogisticRegression(penalty='elasticnet', solver='saga',
random_state=0, multi_class=multi_class)
gs = GridSearchCV(lr, param_grid, cv=cv)
gs.fit(X, y)
assert gs.best_params_['l1_ratio'] == lrcv.l1_ratio_[0]
assert gs.best_params_['C'] == lrcv.C_[0]
def test_LogisticRegressionCV_GridSearchCV_elastic_net_ovr():
# make sure LogisticRegressionCV gives same best params (l1 and C) as
# GridSearchCV when penalty is elasticnet and multiclass is ovr. We can't
# compare best_params like in the previous test because
# LogisticRegressionCV with multi_class='ovr' will have one C and one
# l1_param for each class, while LogisticRegression will share the
# parameters over the *n_classes* classifiers.
X, y = make_classification(n_samples=100, n_classes=3, n_informative=3,
random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
cv = StratifiedKFold(5)
l1_ratios = np.linspace(0, 1, 3)
Cs = np.logspace(-4, 4, 3)
lrcv = LogisticRegressionCV(penalty='elasticnet', Cs=Cs, solver='saga',
cv=cv, l1_ratios=l1_ratios, random_state=0,
multi_class='ovr')
lrcv.fit(X_train, y_train)
param_grid = {'C': Cs, 'l1_ratio': l1_ratios}
lr = LogisticRegression(penalty='elasticnet', solver='saga',
random_state=0, multi_class='ovr')
gs = GridSearchCV(lr, param_grid, cv=cv)
gs.fit(X_train, y_train)
# Check that predictions are 80% the same
assert (lrcv.predict(X_train) == gs.predict(X_train)).mean() >= .8
assert (lrcv.predict(X_test) == gs.predict(X_test)).mean() >= .8
@pytest.mark.parametrize('penalty', ('l2', 'elasticnet'))
@pytest.mark.parametrize('multi_class', ('ovr', 'multinomial', 'auto'))
def test_LogisticRegressionCV_no_refit(penalty, multi_class):
# Test LogisticRegressionCV attribute shapes when refit is False
n_classes = 3
n_features = 20
X, y = make_classification(n_samples=200, n_classes=n_classes,
n_informative=n_classes, n_features=n_features,
random_state=0)
Cs = np.logspace(-4, 4, 3)
if penalty == 'elasticnet':
l1_ratios = np.linspace(0, 1, 2)
else:
l1_ratios = None
lrcv = LogisticRegressionCV(penalty=penalty, Cs=Cs, solver='saga',
l1_ratios=l1_ratios, random_state=0,
multi_class=multi_class, refit=False)
lrcv.fit(X, y)
assert lrcv.C_.shape == (n_classes,)
assert lrcv.l1_ratio_.shape == (n_classes,)
assert lrcv.coef_.shape == (n_classes, n_features)
def test_LogisticRegressionCV_elasticnet_attribute_shapes():
# Make sure the shapes of scores_ and coefs_paths_ attributes are correct
# when using elasticnet (added one dimension for l1_ratios)
n_classes = 3
n_features = 20
X, y = make_classification(n_samples=200, n_classes=n_classes,
n_informative=n_classes, n_features=n_features,
random_state=0)
Cs = np.logspace(-4, 4, 3)
l1_ratios = np.linspace(0, 1, 2)
n_folds = 2
lrcv = LogisticRegressionCV(penalty='elasticnet', Cs=Cs, solver='saga',
cv=n_folds, l1_ratios=l1_ratios,
multi_class='ovr', random_state=0)
lrcv.fit(X, y)
coefs_paths = np.asarray(list(lrcv.coefs_paths_.values()))
assert coefs_paths.shape == (n_classes, n_folds, Cs.size,
l1_ratios.size, n_features + 1)
scores = np.asarray(list(lrcv.scores_.values()))
assert scores.shape == (n_classes, n_folds, Cs.size, l1_ratios.size)
assert lrcv.n_iter_.shape == (n_classes, n_folds, Cs.size, l1_ratios.size)
@pytest.mark.parametrize('l1_ratio', (-1, 2, None, 'something_wrong'))
def test_l1_ratio_param(l1_ratio):
msg = "l1_ratio must be between 0 and 1; got (l1_ratio=%r)" % l1_ratio
assert_raise_message(ValueError, msg,
LogisticRegression(penalty='elasticnet',
solver='saga',
l1_ratio=l1_ratio).fit, X, Y1)
if l1_ratio is not None:
msg = ("l1_ratio parameter is only used when penalty is 'elasticnet'."
" Got (penalty=l1)")
assert_warns_message(UserWarning, msg,
LogisticRegression(penalty='l1', solver='saga',
l1_ratio=l1_ratio).fit, X, Y1)
@pytest.mark.parametrize('l1_ratios', ([], [.5, 2], None, 'something_wrong'))
def test_l1_ratios_param(l1_ratios):
msg = ("l1_ratios must be a list of numbers between 0 and 1; got "
"(l1_ratios=%r)" % l1_ratios)
assert_raise_message(ValueError, msg,
LogisticRegressionCV(penalty='elasticnet',
solver='saga',
l1_ratios=l1_ratios, cv=2).fit,
X, Y1)
if l1_ratios is not None:
msg = ("l1_ratios parameter is only used when penalty is "
"'elasticnet'. Got (penalty=l1)")
function = LogisticRegressionCV(penalty='l1', solver='saga',
l1_ratios=l1_ratios, cv=2).fit
assert_warns_message(UserWarning, msg, function, X, Y1)
@pytest.mark.parametrize('C', np.logspace(-3, 2, 4))
@pytest.mark.parametrize('l1_ratio', [.1, .5, .9])
def test_elastic_net_versus_sgd(C, l1_ratio):
# Compare elasticnet penalty in LogisticRegression() and SGD(loss='log')
n_samples = 500
X, y = make_classification(n_samples=n_samples, n_classes=2, n_features=5,
n_informative=5, n_redundant=0, n_repeated=0,
random_state=1)
X = scale(X)
sgd = SGDClassifier(
penalty='elasticnet', random_state=1, fit_intercept=False, tol=-np.inf,
max_iter=2000, l1_ratio=l1_ratio, alpha=1. / C / n_samples, loss='log')
log = LogisticRegression(
penalty='elasticnet', random_state=1, fit_intercept=False, tol=1e-5,
max_iter=1000, l1_ratio=l1_ratio, C=C, solver='saga')
sgd.fit(X, y)
log.fit(X, y)
assert_array_almost_equal(sgd.coef_, log.coef_, decimal=1)
def test_logistic_regression_path_coefs_multinomial():
# Make sure that the returned coefs by logistic_regression_path when
# multi_class='multinomial' don't override each other (used to be a
# bug).
X, y = make_classification(n_samples=200, n_classes=3, n_informative=2,
n_redundant=0, n_clusters_per_class=1,
random_state=0, n_features=2)
Cs = [.00001, 1, 10000]
coefs, _, _ = _logistic_regression_path(X, y, penalty='l1', Cs=Cs,
solver='saga', random_state=0,
multi_class='multinomial')
with pytest.raises(AssertionError):
assert_array_almost_equal(coefs[0], coefs[1], decimal=1)
with pytest.raises(AssertionError):
assert_array_almost_equal(coefs[0], coefs[2], decimal=1)
with pytest.raises(AssertionError):
assert_array_almost_equal(coefs[1], coefs[2], decimal=1)
@pytest.mark.parametrize('est',
[LogisticRegression(random_state=0),
LogisticRegressionCV(random_state=0, cv=3,
Cs=3, tol=1e-3)],
ids=lambda x: x.__class__.__name__)
@pytest.mark.parametrize('solver', ['liblinear', 'lbfgs', 'newton-cg', 'sag',
'saga'])
def test_logistic_regression_multi_class_auto(est, solver):
# check multi_class='auto' => multi_class='ovr' iff binary y or liblinear
def fit(X, y, **kw):
return clone(est).set_params(**kw).fit(X, y)
X = iris.data[::10]
X2 = iris.data[1::10]
y_multi = iris.target[::10]
y_bin = y_multi == 0
est_auto_bin = fit(X, y_bin, multi_class='auto', solver=solver)
est_ovr_bin = fit(X, y_bin, multi_class='ovr', solver=solver)
assert_allclose(est_auto_bin.coef_, est_ovr_bin.coef_)
assert_allclose(est_auto_bin.predict_proba(X2),
est_ovr_bin.predict_proba(X2))
est_auto_multi = fit(X, y_multi, multi_class='auto', solver=solver)
if solver == 'liblinear':
est_ovr_multi = fit(X, y_multi, multi_class='ovr', solver=solver)
assert_allclose(est_auto_multi.coef_, est_ovr_multi.coef_)
assert_allclose(est_auto_multi.predict_proba(X2),
est_ovr_multi.predict_proba(X2))
else:
est_multi_multi = fit(X, y_multi, multi_class='multinomial',
solver=solver)
if sys.platform == 'darwin' and solver == 'lbfgs':
pytest.xfail('Issue #11924: LogisticRegressionCV(solver="lbfgs", '
'multi_class="multinomial") is nondeterministic on '
'MacOS.')
assert_allclose(est_auto_multi.coef_, est_multi_multi.coef_)
assert_allclose(est_auto_multi.predict_proba(X2),
est_multi_multi.predict_proba(X2))
# Make sure multi_class='ovr' is distinct from ='multinomial'
assert not np.allclose(est_auto_bin.coef_,
fit(X, y_bin, multi_class='multinomial',
solver=solver).coef_)
assert not np.allclose(est_auto_bin.coef_,
fit(X, y_multi, multi_class='multinomial',
solver=solver).coef_)
@pytest.mark.parametrize('solver', ('lbfgs', 'newton-cg', 'sag', 'saga'))
def test_penalty_none(solver):
# - Make sure warning is raised if penalty='none' and C is set to a
# non-default value.
# - Make sure setting penalty='none' is equivalent to setting C=np.inf with
# l2 penalty.
X, y = make_classification(n_samples=1000, random_state=0)
msg = "Setting penalty='none' will ignore the C"
lr = LogisticRegression(penalty='none', solver=solver, C=4)
assert_warns_message(UserWarning, msg, lr.fit, X, y)
lr_none = LogisticRegression(penalty='none', solver=solver,
random_state=0)
lr_l2_C_inf = LogisticRegression(penalty='l2', C=np.inf, solver=solver,
random_state=0)
pred_none = lr_none.fit(X, y).predict(X)
pred_l2_C_inf = lr_l2_C_inf.fit(X, y).predict(X)
assert_array_equal(pred_none, pred_l2_C_inf)
lr = LogisticRegressionCV(penalty='none')
assert_raise_message(
ValueError,
"penalty='none' is not useful and not supported by "
"LogisticRegressionCV",
lr.fit, X, y
)
@pytest.mark.parametrize(
"params",
[{'penalty': 'l1', 'dual': False, 'tol': 1e-12, 'max_iter': 1000},
{'penalty': 'l2', 'dual': True, 'tol': 1e-12, 'max_iter': 1000},
{'penalty': 'l2', 'dual': False, 'tol': 1e-12, 'max_iter': 1000}]
)
def test_logisticregression_liblinear_sample_weight(params):
# check that we support sample_weight with liblinear in all possible cases:
# l1-primal, l2-primal, l2-dual
X = np.array([[1, 3], [1, 3], [1, 3], [1, 3],
[2, 1], [2, 1], [2, 1], [2, 1],
[3, 3], [3, 3], [3, 3], [3, 3],
[4, 1], [4, 1], [4, 1], [4, 1]], dtype=np.dtype('float'))
y = np.array([1, 1, 1, 1, 2, 2, 2, 2,
1, 1, 1, 1, 2, 2, 2, 2], dtype=np.dtype('int'))
X2 = np.vstack([X, X])
y2 = np.hstack([y, 3 - y])
sample_weight = np.ones(shape=len(y) * 2)
sample_weight[len(y):] = 0
X2, y2, sample_weight = shuffle(X2, y2, sample_weight, random_state=0)
base_clf = LogisticRegression(solver='liblinear', random_state=42)
base_clf.set_params(**params)
clf_no_weight = clone(base_clf).fit(X, y)
clf_with_weight = clone(base_clf).fit(X2, y2, sample_weight=sample_weight)
for method in ("predict", "predict_proba", "decision_function"):
X_clf_no_weight = getattr(clf_no_weight, method)(X)
X_clf_with_weight = getattr(clf_with_weight, method)(X)
assert_allclose(X_clf_no_weight, X_clf_with_weight)
def test_scores_attribute_layout_elasticnet():
# Non regression test for issue #14955.
# when penalty is elastic net the scores_ attribute has shape
# (n_classes, n_Cs, n_l1_ratios)
# We here make sure that the second dimension indeed corresponds to Cs and
# the third dimension corresponds to l1_ratios.
X, y = make_classification(n_samples=1000, random_state=0)
cv = StratifiedKFold(n_splits=5)
l1_ratios = [.1, .9]
Cs = [.1, 1, 10]
lrcv = LogisticRegressionCV(penalty='elasticnet', solver='saga',
l1_ratios=l1_ratios, Cs=Cs, cv=cv,
random_state=0)
lrcv.fit(X, y)
avg_scores_lrcv = lrcv.scores_[1].mean(axis=0) # average over folds
for i, C in enumerate(Cs):
for j, l1_ratio in enumerate(l1_ratios):
lr = LogisticRegression(penalty='elasticnet', solver='saga', C=C,
l1_ratio=l1_ratio, random_state=0)
avg_score_lr = cross_val_score(lr, X, y, cv=cv).mean()
assert avg_scores_lrcv[i, j] == pytest.approx(avg_score_lr)
@pytest.mark.parametrize("fit_intercept", [False, True])
def test_multinomial_identifiability_on_iris(fit_intercept):
"""Test that the multinomial classification is identifiable.
A multinomial with c classes can be modeled with
probability_k = exp(X@coef_k) / sum(exp(X@coef_l), l=1..c) for k=1..c.
This is not identifiable, unless one chooses a further constraint.
According to [1], the maximum of the L2 penalized likelihood automatically
satisfies the symmetric constraint:
sum(coef_k, k=1..c) = 0
Further details can be found in the appendix of [2].
Reference
---------
.. [1] Zhu, Ji and Trevor J. Hastie. "Classification of gene microarrays by
penalized logistic regression". Biostatistics 5 3 (2004): 427-43.
https://doi.org/10.1093/biostatistics%2Fkxg046
.. [2] Powers, Scott, Trevor J. Hastie and Robert Tibshirani. "Nuclear
penalized multinomial regression with an application to predicting at bat
outcomes in baseball." Statistical modelling 18 5-6 (2017): 388-410 .
https://arxiv.org/pdf/1706.10272.pdf
"""
# Test logistic regression with the iris dataset
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
clf = LogisticRegression(C=len(iris.data), solver='lbfgs', max_iter=300,
multi_class='multinomial',
fit_intercept=fit_intercept
)
clf.fit(iris.data, target)
# axis=0 is sum over classes
assert_allclose(clf.coef_.sum(axis=0), 0, atol=1e-10)
if fit_intercept:
clf.intercept_.sum(axis=0) == pytest.approx(0, abs=1e-15)
| {
"content_hash": "fade8561ddec26d0b58f34509b914e32",
"timestamp": "",
"source": "github",
"line_count": 1871,
"max_line_length": 79,
"avg_line_length": 41.667022982362376,
"alnum_prop": 0.5975833450916508,
"repo_name": "xuewei4d/scikit-learn",
"id": "480472504a92b0d208ef10268ab8809a0e48b345",
"size": "77959",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "sklearn/linear_model/tests/test_logistic.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "C",
"bytes": "394526"
},
{
"name": "C++",
"bytes": "139482"
},
{
"name": "Makefile",
"bytes": "1388"
},
{
"name": "PowerShell",
"bytes": "17312"
},
{
"name": "Python",
"bytes": "5827083"
},
{
"name": "Shell",
"bytes": "4031"
}
],
"symlink_target": ""
} |
""" Blinks an LED """
from myhdl import *
def top(pins):
@always(pins.clk.posedge)
def count():
""" Increments counter every time the clock rises """
bob.next = 1
return instances()
| {
"content_hash": "ea07d314e1e13009746500f739a524b2",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 61,
"avg_line_length": 19.363636363636363,
"alnum_prop": 0.5915492957746479,
"repo_name": "nturley/synthia",
"id": "de72fbf2eb4c60d74a9823c30f42ef4e848c1f11",
"size": "213",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/tests/syntax.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11563"
}
],
"symlink_target": ""
} |
from __future__ import division, print_function, absolute_import
from scipy._lib.six import string_types, exec_, PY3
from scipy._lib._util import getargspec_no_self as _getargspec
import sys
import keyword
import re
import types
import warnings
from scipy.misc import doccer
from ._distr_params import distcont, distdiscrete
from scipy._lib._util import check_random_state, _lazywhere, _lazyselect
from scipy._lib._util import _valarray as valarray
from scipy.special import (comb, chndtr, entr, rel_entr, kl_div, xlogy, ive)
# for root finding for discrete distribution ppf, and max likelihood estimation
from scipy import optimize
# for functions of continuous distributions (e.g. moments, entropy, cdf)
from scipy import integrate
# to approximate the pdf of a continuous distribution given its cdf
from scipy.misc import derivative
from numpy import (arange, putmask, ravel, take, ones, shape, ndarray,
product, reshape, zeros, floor, logical_and, log, sqrt, exp)
from numpy import (place, argsort, argmax, vectorize,
asarray, nan, inf, isinf, NINF, empty)
import numpy as np
from ._constants import _XMAX
if PY3:
def instancemethod(func, obj, cls):
return types.MethodType(func, obj)
else:
instancemethod = types.MethodType
# These are the docstring parts used for substitution in specific
# distribution docstrings
docheaders = {'methods': """\nMethods\n-------\n""",
'notes': """\nNotes\n-----\n""",
'examples': """\nExamples\n--------\n"""}
_doc_rvs = """\
``rvs(%(shapes)s, loc=0, scale=1, size=1, random_state=None)``
Random variates.
"""
_doc_pdf = """\
``pdf(x, %(shapes)s, loc=0, scale=1)``
Probability density function.
"""
_doc_logpdf = """\
``logpdf(x, %(shapes)s, loc=0, scale=1)``
Log of the probability density function.
"""
_doc_pmf = """\
``pmf(k, %(shapes)s, loc=0, scale=1)``
Probability mass function.
"""
_doc_logpmf = """\
``logpmf(k, %(shapes)s, loc=0, scale=1)``
Log of the probability mass function.
"""
_doc_cdf = """\
``cdf(x, %(shapes)s, loc=0, scale=1)``
Cumulative distribution function.
"""
_doc_logcdf = """\
``logcdf(x, %(shapes)s, loc=0, scale=1)``
Log of the cumulative distribution function.
"""
_doc_sf = """\
``sf(x, %(shapes)s, loc=0, scale=1)``
Survival function (also defined as ``1 - cdf``, but `sf` is sometimes more accurate).
"""
_doc_logsf = """\
``logsf(x, %(shapes)s, loc=0, scale=1)``
Log of the survival function.
"""
_doc_ppf = """\
``ppf(q, %(shapes)s, loc=0, scale=1)``
Percent point function (inverse of ``cdf`` --- percentiles).
"""
_doc_isf = """\
``isf(q, %(shapes)s, loc=0, scale=1)``
Inverse survival function (inverse of ``sf``).
"""
_doc_moment = """\
``moment(n, %(shapes)s, loc=0, scale=1)``
Non-central moment of order n
"""
_doc_stats = """\
``stats(%(shapes)s, loc=0, scale=1, moments='mv')``
Mean('m'), variance('v'), skew('s'), and/or kurtosis('k').
"""
_doc_entropy = """\
``entropy(%(shapes)s, loc=0, scale=1)``
(Differential) entropy of the RV.
"""
_doc_fit = """\
``fit(data, %(shapes)s, loc=0, scale=1)``
Parameter estimates for generic data.
"""
_doc_expect = """\
``expect(func, args=(%(shapes_)s), loc=0, scale=1, lb=None, ub=None, conditional=False, **kwds)``
Expected value of a function (of one argument) with respect to the distribution.
"""
_doc_expect_discrete = """\
``expect(func, args=(%(shapes_)s), loc=0, lb=None, ub=None, conditional=False)``
Expected value of a function (of one argument) with respect to the distribution.
"""
_doc_median = """\
``median(%(shapes)s, loc=0, scale=1)``
Median of the distribution.
"""
_doc_mean = """\
``mean(%(shapes)s, loc=0, scale=1)``
Mean of the distribution.
"""
_doc_var = """\
``var(%(shapes)s, loc=0, scale=1)``
Variance of the distribution.
"""
_doc_std = """\
``std(%(shapes)s, loc=0, scale=1)``
Standard deviation of the distribution.
"""
_doc_interval = """\
``interval(alpha, %(shapes)s, loc=0, scale=1)``
Endpoints of the range that contains alpha percent of the distribution
"""
_doc_allmethods = ''.join([docheaders['methods'], _doc_rvs, _doc_pdf,
_doc_logpdf, _doc_cdf, _doc_logcdf, _doc_sf,
_doc_logsf, _doc_ppf, _doc_isf, _doc_moment,
_doc_stats, _doc_entropy, _doc_fit,
_doc_expect, _doc_median,
_doc_mean, _doc_var, _doc_std, _doc_interval])
_doc_default_longsummary = """\
As an instance of the `rv_continuous` class, `%(name)s` object inherits from it
a collection of generic methods (see below for the full list),
and completes them with details specific for this particular distribution.
"""
_doc_default_frozen_note = """
Alternatively, the object may be called (as a function) to fix the shape,
location, and scale parameters returning a "frozen" continuous RV object:
rv = %(name)s(%(shapes)s, loc=0, scale=1)
- Frozen RV object with the same methods but holding the given shape,
location, and scale fixed.
"""
_doc_default_example = """\
Examples
--------
>>> from scipy.stats import %(name)s
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
Calculate a few first moments:
%(set_vals_stmt)s
>>> mean, var, skew, kurt = %(name)s.stats(%(shapes)s, moments='mvsk')
Display the probability density function (``pdf``):
>>> x = np.linspace(%(name)s.ppf(0.01, %(shapes)s),
... %(name)s.ppf(0.99, %(shapes)s), 100)
>>> ax.plot(x, %(name)s.pdf(x, %(shapes)s),
... 'r-', lw=5, alpha=0.6, label='%(name)s pdf')
Alternatively, the distribution object can be called (as a function)
to fix the shape, location and scale parameters. This returns a "frozen"
RV object holding the given parameters fixed.
Freeze the distribution and display the frozen ``pdf``:
>>> rv = %(name)s(%(shapes)s)
>>> ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')
Check accuracy of ``cdf`` and ``ppf``:
>>> vals = %(name)s.ppf([0.001, 0.5, 0.999], %(shapes)s)
>>> np.allclose([0.001, 0.5, 0.999], %(name)s.cdf(vals, %(shapes)s))
True
Generate random numbers:
>>> r = %(name)s.rvs(%(shapes)s, size=1000)
And compare the histogram:
>>> ax.hist(r, normed=True, histtype='stepfilled', alpha=0.2)
>>> ax.legend(loc='best', frameon=False)
>>> plt.show()
"""
_doc_default_locscale = """\
The probability density above is defined in the "standardized" form. To shift
and/or scale the distribution use the ``loc`` and ``scale`` parameters.
Specifically, ``%(name)s.pdf(x, %(shapes)s, loc, scale)`` is identically
equivalent to ``%(name)s.pdf(y, %(shapes)s) / scale`` with
``y = (x - loc) / scale``.
"""
_doc_default = ''.join([_doc_default_longsummary,
_doc_allmethods,
'\n',
_doc_default_example])
_doc_default_before_notes = ''.join([_doc_default_longsummary,
_doc_allmethods])
docdict = {
'rvs': _doc_rvs,
'pdf': _doc_pdf,
'logpdf': _doc_logpdf,
'cdf': _doc_cdf,
'logcdf': _doc_logcdf,
'sf': _doc_sf,
'logsf': _doc_logsf,
'ppf': _doc_ppf,
'isf': _doc_isf,
'stats': _doc_stats,
'entropy': _doc_entropy,
'fit': _doc_fit,
'moment': _doc_moment,
'expect': _doc_expect,
'interval': _doc_interval,
'mean': _doc_mean,
'std': _doc_std,
'var': _doc_var,
'median': _doc_median,
'allmethods': _doc_allmethods,
'longsummary': _doc_default_longsummary,
'frozennote': _doc_default_frozen_note,
'example': _doc_default_example,
'default': _doc_default,
'before_notes': _doc_default_before_notes,
'after_notes': _doc_default_locscale
}
# Reuse common content between continuous and discrete docs, change some
# minor bits.
docdict_discrete = docdict.copy()
docdict_discrete['pmf'] = _doc_pmf
docdict_discrete['logpmf'] = _doc_logpmf
docdict_discrete['expect'] = _doc_expect_discrete
_doc_disc_methods = ['rvs', 'pmf', 'logpmf', 'cdf', 'logcdf', 'sf', 'logsf',
'ppf', 'isf', 'stats', 'entropy', 'expect', 'median',
'mean', 'var', 'std', 'interval']
for obj in _doc_disc_methods:
docdict_discrete[obj] = docdict_discrete[obj].replace(', scale=1', '')
_doc_disc_methods_err_varname = ['cdf', 'logcdf', 'sf', 'logsf']
for obj in _doc_disc_methods_err_varname:
docdict_discrete[obj] = docdict_discrete[obj].replace('(x, ', '(k, ')
docdict_discrete.pop('pdf')
docdict_discrete.pop('logpdf')
_doc_allmethods = ''.join([docdict_discrete[obj] for obj in _doc_disc_methods])
docdict_discrete['allmethods'] = docheaders['methods'] + _doc_allmethods
docdict_discrete['longsummary'] = _doc_default_longsummary.replace(
'rv_continuous', 'rv_discrete')
_doc_default_frozen_note = """
Alternatively, the object may be called (as a function) to fix the shape and
location parameters returning a "frozen" discrete RV object:
rv = %(name)s(%(shapes)s, loc=0)
- Frozen RV object with the same methods but holding the given shape and
location fixed.
"""
docdict_discrete['frozennote'] = _doc_default_frozen_note
_doc_default_discrete_example = """\
Examples
--------
>>> from scipy.stats import %(name)s
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
Calculate a few first moments:
%(set_vals_stmt)s
>>> mean, var, skew, kurt = %(name)s.stats(%(shapes)s, moments='mvsk')
Display the probability mass function (``pmf``):
>>> x = np.arange(%(name)s.ppf(0.01, %(shapes)s),
... %(name)s.ppf(0.99, %(shapes)s))
>>> ax.plot(x, %(name)s.pmf(x, %(shapes)s), 'bo', ms=8, label='%(name)s pmf')
>>> ax.vlines(x, 0, %(name)s.pmf(x, %(shapes)s), colors='b', lw=5, alpha=0.5)
Alternatively, the distribution object can be called (as a function)
to fix the shape and location. This returns a "frozen" RV object holding
the given parameters fixed.
Freeze the distribution and display the frozen ``pmf``:
>>> rv = %(name)s(%(shapes)s)
>>> ax.vlines(x, 0, rv.pmf(x), colors='k', linestyles='-', lw=1,
... label='frozen pmf')
>>> ax.legend(loc='best', frameon=False)
>>> plt.show()
Check accuracy of ``cdf`` and ``ppf``:
>>> prob = %(name)s.cdf(x, %(shapes)s)
>>> np.allclose(x, %(name)s.ppf(prob, %(shapes)s))
True
Generate random numbers:
>>> r = %(name)s.rvs(%(shapes)s, size=1000)
"""
_doc_default_discrete_locscale = """\
The probability mass function above is defined in the "standardized" form.
To shift distribution use the ``loc`` parameter.
Specifically, ``%(name)s.pmf(k, %(shapes)s, loc)`` is identically
equivalent to ``%(name)s.pmf(k - loc, %(shapes)s)``.
"""
docdict_discrete['example'] = _doc_default_discrete_example
docdict_discrete['after_notes'] = _doc_default_discrete_locscale
_doc_default_before_notes = ''.join([docdict_discrete['longsummary'],
docdict_discrete['allmethods']])
docdict_discrete['before_notes'] = _doc_default_before_notes
_doc_default_disc = ''.join([docdict_discrete['longsummary'],
docdict_discrete['allmethods'],
docdict_discrete['frozennote'],
docdict_discrete['example']])
docdict_discrete['default'] = _doc_default_disc
# clean up all the separate docstring elements, we do not need them anymore
for obj in [s for s in dir() if s.startswith('_doc_')]:
exec('del ' + obj)
del obj
try:
del s
except NameError:
# in Python 3, loop variables are not visible after the loop
pass
def _moment(data, n, mu=None):
if mu is None:
mu = data.mean()
return ((data - mu)**n).mean()
def _moment_from_stats(n, mu, mu2, g1, g2, moment_func, args):
if (n == 0):
return 1.0
elif (n == 1):
if mu is None:
val = moment_func(1, *args)
else:
val = mu
elif (n == 2):
if mu2 is None or mu is None:
val = moment_func(2, *args)
else:
val = mu2 + mu*mu
elif (n == 3):
if g1 is None or mu2 is None or mu is None:
val = moment_func(3, *args)
else:
mu3 = g1 * np.power(mu2, 1.5) # 3rd central moment
val = mu3+3*mu*mu2+mu*mu*mu # 3rd non-central moment
elif (n == 4):
if g1 is None or g2 is None or mu2 is None or mu is None:
val = moment_func(4, *args)
else:
mu4 = (g2+3.0)*(mu2**2.0) # 4th central moment
mu3 = g1*np.power(mu2, 1.5) # 3rd central moment
val = mu4+4*mu*mu3+6*mu*mu*mu2+mu*mu*mu*mu
else:
val = moment_func(n, *args)
return val
def _skew(data):
"""
skew is third central moment / variance**(1.5)
"""
data = np.ravel(data)
mu = data.mean()
m2 = ((data - mu)**2).mean()
m3 = ((data - mu)**3).mean()
return m3 / np.power(m2, 1.5)
def _kurtosis(data):
"""
kurtosis is fourth central moment / variance**2 - 3
"""
data = np.ravel(data)
mu = data.mean()
m2 = ((data - mu)**2).mean()
m4 = ((data - mu)**4).mean()
return m4 / m2**2 - 3
# Frozen RV class
class rv_frozen(object):
def __init__(self, dist, *args, **kwds):
self.args = args
self.kwds = kwds
# create a new instance
self.dist = dist.__class__(**dist._updated_ctor_param())
# a, b may be set in _argcheck, depending on *args, **kwds. Ouch.
shapes, _, _ = self.dist._parse_args(*args, **kwds)
self.dist._argcheck(*shapes)
self.a, self.b = self.dist.a, self.dist.b
@property
def random_state(self):
return self.dist._random_state
@random_state.setter
def random_state(self, seed):
self.dist._random_state = check_random_state(seed)
def pdf(self, x): # raises AttributeError in frozen discrete distribution
return self.dist.pdf(x, *self.args, **self.kwds)
def logpdf(self, x):
return self.dist.logpdf(x, *self.args, **self.kwds)
def cdf(self, x):
return self.dist.cdf(x, *self.args, **self.kwds)
def logcdf(self, x):
return self.dist.logcdf(x, *self.args, **self.kwds)
def ppf(self, q):
return self.dist.ppf(q, *self.args, **self.kwds)
def isf(self, q):
return self.dist.isf(q, *self.args, **self.kwds)
def rvs(self, size=None, random_state=None):
kwds = self.kwds.copy()
kwds.update({'size': size, 'random_state': random_state})
return self.dist.rvs(*self.args, **kwds)
def sf(self, x):
return self.dist.sf(x, *self.args, **self.kwds)
def logsf(self, x):
return self.dist.logsf(x, *self.args, **self.kwds)
def stats(self, moments='mv'):
kwds = self.kwds.copy()
kwds.update({'moments': moments})
return self.dist.stats(*self.args, **kwds)
def median(self):
return self.dist.median(*self.args, **self.kwds)
def mean(self):
return self.dist.mean(*self.args, **self.kwds)
def var(self):
return self.dist.var(*self.args, **self.kwds)
def std(self):
return self.dist.std(*self.args, **self.kwds)
def moment(self, n):
return self.dist.moment(n, *self.args, **self.kwds)
def entropy(self):
return self.dist.entropy(*self.args, **self.kwds)
def pmf(self, k):
return self.dist.pmf(k, *self.args, **self.kwds)
def logpmf(self, k):
return self.dist.logpmf(k, *self.args, **self.kwds)
def interval(self, alpha):
return self.dist.interval(alpha, *self.args, **self.kwds)
def expect(self, func=None, lb=None, ub=None, conditional=False, **kwds):
# expect method only accepts shape parameters as positional args
# hence convert self.args, self.kwds, also loc/scale
# See the .expect method docstrings for the meaning of
# other parameters.
a, loc, scale = self.dist._parse_args(*self.args, **self.kwds)
if isinstance(self.dist, rv_discrete):
return self.dist.expect(func, a, loc, lb, ub, conditional, **kwds)
else:
return self.dist.expect(func, a, loc, scale, lb, ub,
conditional, **kwds)
# This should be rewritten
def argsreduce(cond, *args):
"""Return the sequence of ravel(args[i]) where ravel(condition) is
True in 1D.
Examples
--------
>>> import numpy as np
>>> rand = np.random.random_sample
>>> A = rand((4, 5))
>>> B = 2
>>> C = rand((1, 5))
>>> cond = np.ones(A.shape)
>>> [A1, B1, C1] = argsreduce(cond, A, B, C)
>>> B1.shape
(20,)
>>> cond[2,:] = 0
>>> [A2, B2, C2] = argsreduce(cond, A, B, C)
>>> B2.shape
(15,)
"""
newargs = np.atleast_1d(*args)
if not isinstance(newargs, list):
newargs = [newargs, ]
expand_arr = (cond == cond)
return [np.extract(cond, arr1 * expand_arr) for arr1 in newargs]
parse_arg_template = """
def _parse_args(self, %(shape_arg_str)s %(locscale_in)s):
return (%(shape_arg_str)s), %(locscale_out)s
def _parse_args_rvs(self, %(shape_arg_str)s %(locscale_in)s, size=None):
return self._argcheck_rvs(%(shape_arg_str)s %(locscale_out)s, size=size)
def _parse_args_stats(self, %(shape_arg_str)s %(locscale_in)s, moments='mv'):
return (%(shape_arg_str)s), %(locscale_out)s, moments
"""
# Both the continuous and discrete distributions depend on ncx2.
# I think the function name ncx2 is an abbreviation for noncentral chi squared.
def _ncx2_log_pdf(x, df, nc):
# We use (xs**2 + ns**2)/2 = (xs - ns)**2/2 + xs*ns, and include the factor
# of exp(-xs*ns) into the ive function to improve numerical stability
# at large values of xs. See also `rice.pdf`.
df2 = df/2.0 - 1.0
xs, ns = np.sqrt(x), np.sqrt(nc)
res = xlogy(df2/2.0, x/nc) - 0.5*(xs - ns)**2
res += np.log(ive(df2, xs*ns) / 2.0)
return res
def _ncx2_pdf(x, df, nc):
return np.exp(_ncx2_log_pdf(x, df, nc))
def _ncx2_cdf(x, df, nc):
return chndtr(x, df, nc)
class rv_generic(object):
"""Class which encapsulates common functionality between rv_discrete
and rv_continuous.
"""
def __init__(self, seed=None):
super(rv_generic, self).__init__()
# figure out if _stats signature has 'moments' keyword
sign = _getargspec(self._stats)
self._stats_has_moments = ((sign[2] is not None) or
('moments' in sign[0]))
self._random_state = check_random_state(seed)
@property
def random_state(self):
""" Get or set the RandomState object for generating random variates.
This can be either None or an existing RandomState object.
If None (or np.random), use the RandomState singleton used by np.random.
If already a RandomState instance, use it.
If an int, use a new RandomState instance seeded with seed.
"""
return self._random_state
@random_state.setter
def random_state(self, seed):
self._random_state = check_random_state(seed)
def __getstate__(self):
return self._updated_ctor_param(), self._random_state
def __setstate__(self, state):
ctor_param, r = state
self.__init__(**ctor_param)
self._random_state = r
return self
def _construct_argparser(
self, meths_to_inspect, locscale_in, locscale_out):
"""Construct the parser for the shape arguments.
Generates the argument-parsing functions dynamically and attaches
them to the instance.
Is supposed to be called in __init__ of a class for each distribution.
If self.shapes is a non-empty string, interprets it as a
comma-separated list of shape parameters.
Otherwise inspects the call signatures of `meths_to_inspect`
and constructs the argument-parsing functions from these.
In this case also sets `shapes` and `numargs`.
"""
if self.shapes:
# sanitize the user-supplied shapes
if not isinstance(self.shapes, string_types):
raise TypeError('shapes must be a string.')
shapes = self.shapes.replace(',', ' ').split()
for field in shapes:
if keyword.iskeyword(field):
raise SyntaxError('keywords cannot be used as shapes.')
if not re.match('^[_a-zA-Z][_a-zA-Z0-9]*$', field):
raise SyntaxError(
'shapes must be valid python identifiers')
else:
# find out the call signatures (_pdf, _cdf etc), deduce shape
# arguments. Generic methods only have 'self, x', any further args
# are shapes.
shapes_list = []
for meth in meths_to_inspect:
shapes_args = _getargspec(meth) # NB: does not contain self
args = shapes_args.args[1:] # peel off 'x', too
if args:
shapes_list.append(args)
# *args or **kwargs are not allowed w/automatic shapes
if shapes_args.varargs is not None:
raise TypeError(
'*args are not allowed w/out explicit shapes')
if shapes_args.keywords is not None:
raise TypeError(
'**kwds are not allowed w/out explicit shapes')
if shapes_args.defaults is not None:
raise TypeError('defaults are not allowed for shapes')
if shapes_list:
shapes = shapes_list[0]
# make sure the signatures are consistent
for item in shapes_list:
if item != shapes:
raise TypeError('Shape arguments are inconsistent.')
else:
shapes = []
# have the arguments, construct the method from template
shapes_str = ', '.join(shapes) + ', ' if shapes else '' # NB: not None
dct = dict(shape_arg_str=shapes_str,
locscale_in=locscale_in,
locscale_out=locscale_out,
)
ns = {}
exec_(parse_arg_template % dct, ns)
# NB: attach to the instance, not class
for name in ['_parse_args', '_parse_args_stats', '_parse_args_rvs']:
setattr(self, name,
instancemethod(ns[name], self, self.__class__)
)
self.shapes = ', '.join(shapes) if shapes else None
if not hasattr(self, 'numargs'):
# allows more general subclassing with *args
self.numargs = len(shapes)
def _construct_doc(self, docdict, shapes_vals=None):
"""Construct the instance docstring with string substitutions."""
tempdict = docdict.copy()
tempdict['name'] = self.name or 'distname'
tempdict['shapes'] = self.shapes or ''
if shapes_vals is None:
shapes_vals = ()
vals = ', '.join('%.3g' % val for val in shapes_vals)
tempdict['vals'] = vals
tempdict['shapes_'] = self.shapes or ''
if self.shapes and self.numargs == 1:
tempdict['shapes_'] += ','
if self.shapes:
tempdict['set_vals_stmt'] = '>>> %s = %s' % (self.shapes, vals)
else:
tempdict['set_vals_stmt'] = ''
if self.shapes is None:
# remove shapes from call parameters if there are none
for item in ['default', 'before_notes']:
tempdict[item] = tempdict[item].replace(
"\n%(shapes)s : array_like\n shape parameters", "")
for i in range(2):
if self.shapes is None:
# necessary because we use %(shapes)s in two forms (w w/o ", ")
self.__doc__ = self.__doc__.replace("%(shapes)s, ", "")
self.__doc__ = doccer.docformat(self.__doc__, tempdict)
# correct for empty shapes
self.__doc__ = self.__doc__.replace('(, ', '(').replace(', )', ')')
def _construct_default_doc(self, longname=None, extradoc=None,
docdict=None, discrete='continuous'):
"""Construct instance docstring from the default template."""
if longname is None:
longname = 'A'
if extradoc is None:
extradoc = ''
if extradoc.startswith('\n\n'):
extradoc = extradoc[2:]
self.__doc__ = ''.join(['%s %s random variable.' % (longname, discrete),
'\n\n%(before_notes)s\n', docheaders['notes'],
extradoc, '\n%(example)s'])
self._construct_doc(docdict)
def freeze(self, *args, **kwds):
"""Freeze the distribution for the given arguments.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution. Should include all
the non-optional arguments, may include ``loc`` and ``scale``.
Returns
-------
rv_frozen : rv_frozen instance
The frozen distribution.
"""
return rv_frozen(self, *args, **kwds)
def __call__(self, *args, **kwds):
return self.freeze(*args, **kwds)
__call__.__doc__ = freeze.__doc__
# The actual calculation functions (no basic checking need be done)
# If these are defined, the others won't be looked at.
# Otherwise, the other set can be defined.
def _stats(self, *args, **kwds):
return None, None, None, None
# Central moments
def _munp(self, n, *args):
# Silence floating point warnings from integration.
olderr = np.seterr(all='ignore')
vals = self.generic_moment(n, *args)
np.seterr(**olderr)
return vals
def _argcheck_rvs(self, *args, **kwargs):
# Handle broadcasting and size validation of the rvs method.
# Subclasses should not have to override this method.
# The rule is that if `size` is not None, then `size` gives the
# shape of the result (integer values of `size` are treated as
# tuples with length 1; i.e. `size=3` is the same as `size=(3,)`.)
#
# `args` is expected to contain the shape parameters (if any), the
# location and the scale in a flat tuple (e.g. if there are two
# shape parameters `a` and `b`, `args` will be `(a, b, loc, scale)`).
# The only keyword argument expected is 'size'.
size = kwargs.get('size', None)
all_bcast = np.broadcast_arrays(*args)
def squeeze_left(a):
while a.ndim > 0 and a.shape[0] == 1:
a = a[0]
return a
# Eliminate trivial leading dimensions. In the convention
# used by numpy's random variate generators, trivial leading
# dimensions are effectively ignored. In other words, when `size`
# is given, trivial leading dimensions of the broadcast parameters
# in excess of the number of dimensions in size are ignored, e.g.
# >>> np.random.normal([[1, 3, 5]], [[[[0.01]]]], size=3)
# array([ 1.00104267, 3.00422496, 4.99799278])
# If `size` is not given, the exact broadcast shape is preserved:
# >>> np.random.normal([[1, 3, 5]], [[[[0.01]]]])
# array([[[[ 1.00862899, 3.00061431, 4.99867122]]]])
#
all_bcast = [squeeze_left(a) for a in all_bcast]
bcast_shape = all_bcast[0].shape
bcast_ndim = all_bcast[0].ndim
if size is None:
size_ = bcast_shape
else:
size_ = tuple(np.atleast_1d(size))
# Check compatibility of size_ with the broadcast shape of all
# the parameters. This check is intended to be consistent with
# how the numpy random variate generators (e.g. np.random.normal,
# np.random.beta) handle their arguments. The rule is that, if size
# is given, it determines the shape of the output. Broadcasting
# can't change the output size.
# This is the standard broadcasting convention of extending the
# shape with fewer dimensions with enough dimensions of length 1
# so that the two shapes have the same number of dimensions.
ndiff = bcast_ndim - len(size_)
if ndiff < 0:
bcast_shape = (1,)*(-ndiff) + bcast_shape
elif ndiff > 0:
size_ = (1,)*ndiff + size_
# This compatibility test is not standard. In "regular" broadcasting,
# two shapes are compatible if for each dimension, the lengths are the
# same or one of the lengths is 1. Here, the length of a dimension in
# size_ must not be less than the corresponding length in bcast_shape.
ok = all([bcdim == 1 or bcdim == szdim
for (bcdim, szdim) in zip(bcast_shape, size_)])
if not ok:
raise ValueError("size does not match the broadcast shape of "
"the parameters.")
param_bcast = all_bcast[:-2]
loc_bcast = all_bcast[-2]
scale_bcast = all_bcast[-1]
return param_bcast, loc_bcast, scale_bcast, size_
## These are the methods you must define (standard form functions)
## NB: generic _pdf, _logpdf, _cdf are different for
## rv_continuous and rv_discrete hence are defined in there
def _argcheck(self, *args):
"""Default check for correct values on args and keywords.
Returns condition array of 1's where arguments are correct and
0's where they are not.
"""
cond = 1
for arg in args:
cond = logical_and(cond, (asarray(arg) > 0))
return cond
def _support_mask(self, x):
return (self.a <= x) & (x <= self.b)
def _open_support_mask(self, x):
return (self.a < x) & (x < self.b)
def _rvs(self, *args):
# This method must handle self._size being a tuple, and it must
# properly broadcast *args and self._size. self._size might be
# an empty tuple, which means a scalar random variate is to be
# generated.
## Use basic inverse cdf algorithm for RV generation as default.
U = self._random_state.random_sample(self._size)
Y = self._ppf(U, *args)
return Y
def _logcdf(self, x, *args):
return log(self._cdf(x, *args))
def _sf(self, x, *args):
return 1.0-self._cdf(x, *args)
def _logsf(self, x, *args):
return log(self._sf(x, *args))
def _ppf(self, q, *args):
return self._ppfvec(q, *args)
def _isf(self, q, *args):
return self._ppf(1.0-q, *args) # use correct _ppf for subclasses
# These are actually called, and should not be overwritten if you
# want to keep error checking.
def rvs(self, *args, **kwds):
"""
Random variates of given type.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
scale : array_like, optional
Scale parameter (default=1).
size : int or tuple of ints, optional
Defining number of random variates (default is 1).
random_state : None or int or ``np.random.RandomState`` instance, optional
If int or RandomState, use it for drawing the random variates.
If None, rely on ``self.random_state``.
Default is None.
Returns
-------
rvs : ndarray or scalar
Random variates of given `size`.
"""
discrete = kwds.pop('discrete', None)
rndm = kwds.pop('random_state', None)
args, loc, scale, size = self._parse_args_rvs(*args, **kwds)
cond = logical_and(self._argcheck(*args), (scale >= 0))
if not np.all(cond):
raise ValueError("Domain error in arguments.")
if np.all(scale == 0):
return loc*ones(size, 'd')
# extra gymnastics needed for a custom random_state
if rndm is not None:
random_state_saved = self._random_state
self._random_state = check_random_state(rndm)
# `size` should just be an argument to _rvs(), but for, um,
# historical reasons, it is made an attribute that is read
# by _rvs().
self._size = size
vals = self._rvs(*args)
vals = vals * scale + loc
# do not forget to restore the _random_state
if rndm is not None:
self._random_state = random_state_saved
# Cast to int if discrete
if discrete:
if size == ():
vals = int(vals)
else:
vals = vals.astype(int)
return vals
def stats(self, *args, **kwds):
"""
Some statistics of the given RV.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional (continuous RVs only)
scale parameter (default=1)
moments : str, optional
composed of letters ['mvsk'] defining which moments to compute:
'm' = mean,
'v' = variance,
's' = (Fisher's) skew,
'k' = (Fisher's) kurtosis.
(default is 'mv')
Returns
-------
stats : sequence
of requested moments.
"""
args, loc, scale, moments = self._parse_args_stats(*args, **kwds)
# scale = 1 by construction for discrete RVs
loc, scale = map(asarray, (loc, scale))
args = tuple(map(asarray, args))
cond = self._argcheck(*args) & (scale > 0) & (loc == loc)
output = []
default = valarray(shape(cond), self.badvalue)
# Use only entries that are valid in calculation
if np.any(cond):
goodargs = argsreduce(cond, *(args+(scale, loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
if self._stats_has_moments:
mu, mu2, g1, g2 = self._stats(*goodargs,
**{'moments': moments})
else:
mu, mu2, g1, g2 = self._stats(*goodargs)
if g1 is None:
mu3 = None
else:
if mu2 is None:
mu2 = self._munp(2, *goodargs)
if g2 is None:
# (mu2**1.5) breaks down for nan and inf
mu3 = g1 * np.power(mu2, 1.5)
if 'm' in moments:
if mu is None:
mu = self._munp(1, *goodargs)
out0 = default.copy()
place(out0, cond, mu * scale + loc)
output.append(out0)
if 'v' in moments:
if mu2 is None:
mu2p = self._munp(2, *goodargs)
if mu is None:
mu = self._munp(1, *goodargs)
mu2 = mu2p - mu * mu
if np.isinf(mu):
# if mean is inf then var is also inf
mu2 = np.inf
out0 = default.copy()
place(out0, cond, mu2 * scale * scale)
output.append(out0)
if 's' in moments:
if g1 is None:
mu3p = self._munp(3, *goodargs)
if mu is None:
mu = self._munp(1, *goodargs)
if mu2 is None:
mu2p = self._munp(2, *goodargs)
mu2 = mu2p - mu * mu
mu3 = mu3p - 3 * mu * mu2 - mu**3
g1 = mu3 / np.power(mu2, 1.5)
out0 = default.copy()
place(out0, cond, g1)
output.append(out0)
if 'k' in moments:
if g2 is None:
mu4p = self._munp(4, *goodargs)
if mu is None:
mu = self._munp(1, *goodargs)
if mu2 is None:
mu2p = self._munp(2, *goodargs)
mu2 = mu2p - mu * mu
if mu3 is None:
mu3p = self._munp(3, *goodargs)
mu3 = mu3p - 3 * mu * mu2 - mu**3
mu4 = mu4p - 4 * mu * mu3 - 6 * mu * mu * mu2 - mu**4
g2 = mu4 / mu2**2.0 - 3.0
out0 = default.copy()
place(out0, cond, g2)
output.append(out0)
else: # no valid args
output = []
for _ in moments:
out0 = default.copy()
output.append(out0)
if len(output) == 1:
return output[0]
else:
return tuple(output)
def entropy(self, *args, **kwds):
"""
Differential entropy of the RV.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
scale : array_like, optional (continuous distributions only).
Scale parameter (default=1).
Notes
-----
Entropy is defined base `e`:
>>> drv = rv_discrete(values=((0, 1), (0.5, 0.5)))
>>> np.allclose(drv.entropy(), np.log(2.0))
True
"""
args, loc, scale = self._parse_args(*args, **kwds)
# NB: for discrete distributions scale=1 by construction in _parse_args
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
output = zeros(shape(cond0), 'd')
place(output, (1-cond0), self.badvalue)
goodargs = argsreduce(cond0, *args)
place(output, cond0, self.vecentropy(*goodargs) + log(scale))
return output
def moment(self, n, *args, **kwds):
"""
n-th order non-central moment of distribution.
Parameters
----------
n : int, n >= 1
Order of moment.
arg1, arg2, arg3,... : float
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
"""
args, loc, scale = self._parse_args(*args, **kwds)
if not (self._argcheck(*args) and (scale > 0)):
return nan
if (floor(n) != n):
raise ValueError("Moment must be an integer.")
if (n < 0):
raise ValueError("Moment must be positive.")
mu, mu2, g1, g2 = None, None, None, None
if (n > 0) and (n < 5):
if self._stats_has_moments:
mdict = {'moments': {1: 'm', 2: 'v', 3: 'vs', 4: 'vk'}[n]}
else:
mdict = {}
mu, mu2, g1, g2 = self._stats(*args, **mdict)
val = _moment_from_stats(n, mu, mu2, g1, g2, self._munp, args)
# Convert to transformed X = L + S*Y
# E[X^n] = E[(L+S*Y)^n] = L^n sum(comb(n, k)*(S/L)^k E[Y^k], k=0...n)
if loc == 0:
return scale**n * val
else:
result = 0
fac = float(scale) / float(loc)
for k in range(n):
valk = _moment_from_stats(k, mu, mu2, g1, g2, self._munp, args)
result += comb(n, k, exact=True)*(fac**k) * valk
result += fac**n * val
return result * loc**n
def median(self, *args, **kwds):
"""
Median of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
Location parameter, Default is 0.
scale : array_like, optional
Scale parameter, Default is 1.
Returns
-------
median : float
The median of the distribution.
See Also
--------
stats.distributions.rv_discrete.ppf
Inverse of the CDF
"""
return self.ppf(0.5, *args, **kwds)
def mean(self, *args, **kwds):
"""
Mean of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
mean : float
the mean of the distribution
"""
kwds['moments'] = 'm'
res = self.stats(*args, **kwds)
if isinstance(res, ndarray) and res.ndim == 0:
return res[()]
return res
def var(self, *args, **kwds):
"""
Variance of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
var : float
the variance of the distribution
"""
kwds['moments'] = 'v'
res = self.stats(*args, **kwds)
if isinstance(res, ndarray) and res.ndim == 0:
return res[()]
return res
def std(self, *args, **kwds):
"""
Standard deviation of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
std : float
standard deviation of the distribution
"""
kwds['moments'] = 'v'
res = sqrt(self.stats(*args, **kwds))
return res
def interval(self, alpha, *args, **kwds):
"""
Confidence interval with equal areas around the median.
Parameters
----------
alpha : array_like of float
Probability that an rv will be drawn from the returned range.
Each value should be in the range [0, 1].
arg1, arg2, ... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
location parameter, Default is 0.
scale : array_like, optional
scale parameter, Default is 1.
Returns
-------
a, b : ndarray of float
end-points of range that contain ``100 * alpha %`` of the rv's
possible values.
"""
alpha = asarray(alpha)
if np.any((alpha > 1) | (alpha < 0)):
raise ValueError("alpha must be between 0 and 1 inclusive")
q1 = (1.0-alpha)/2
q2 = (1.0+alpha)/2
a = self.ppf(q1, *args, **kwds)
b = self.ppf(q2, *args, **kwds)
return a, b
## continuous random variables: implement maybe later
##
## hf --- Hazard Function (PDF / SF)
## chf --- Cumulative hazard function (-log(SF))
## psf --- Probability sparsity function (reciprocal of the pdf) in
## units of percent-point-function (as a function of q).
## Also, the derivative of the percent-point function.
class rv_continuous(rv_generic):
"""
A generic continuous random variable class meant for subclassing.
`rv_continuous` is a base class to construct specific distribution classes
and instances for continuous random variables. It cannot be used
directly as a distribution.
Parameters
----------
momtype : int, optional
The type of generic moment calculation to use: 0 for pdf, 1 (default)
for ppf.
a : float, optional
Lower bound of the support of the distribution, default is minus
infinity.
b : float, optional
Upper bound of the support of the distribution, default is plus
infinity.
xtol : float, optional
The tolerance for fixed point calculation for generic ppf.
badvalue : float, optional
The value in a result arrays that indicates a value that for which
some argument restriction is violated, default is np.nan.
name : str, optional
The name of the instance. This string is used to construct the default
example for distributions.
longname : str, optional
This string is used as part of the first line of the docstring returned
when a subclass has no docstring of its own. Note: `longname` exists
for backwards compatibility, do not use for new subclasses.
shapes : str, optional
The shape of the distribution. For example ``"m, n"`` for a
distribution that takes two integers as the two shape arguments for all
its methods. If not provided, shape parameters will be inferred from
the signature of the private methods, ``_pdf`` and ``_cdf`` of the
instance.
extradoc : str, optional, deprecated
This string is used as the last part of the docstring returned when a
subclass has no docstring of its own. Note: `extradoc` exists for
backwards compatibility, do not use for new subclasses.
seed : None or int or ``numpy.random.RandomState`` instance, optional
This parameter defines the RandomState object to use for drawing
random variates.
If None (or np.random), the global np.random state is used.
If integer, it is used to seed the local RandomState instance.
Default is None.
Methods
-------
rvs
pdf
logpdf
cdf
logcdf
sf
logsf
ppf
isf
moment
stats
entropy
expect
median
mean
std
var
interval
__call__
fit
fit_loc_scale
nnlf
Notes
-----
Public methods of an instance of a distribution class (e.g., ``pdf``,
``cdf``) check their arguments and pass valid arguments to private,
computational methods (``_pdf``, ``_cdf``). For ``pdf(x)``, ``x`` is valid
if it is within the support of a distribution, ``self.a <= x <= self.b``.
Whether a shape parameter is valid is decided by an ``_argcheck`` method
(which defaults to checking that its arguments are strictly positive.)
**Subclassing**
New random variables can be defined by subclassing the `rv_continuous` class
and re-defining at least the ``_pdf`` or the ``_cdf`` method (normalized
to location 0 and scale 1).
If positive argument checking is not correct for your RV
then you will also need to re-define the ``_argcheck`` method.
Correct, but potentially slow defaults exist for the remaining
methods but for speed and/or accuracy you can over-ride::
_logpdf, _cdf, _logcdf, _ppf, _rvs, _isf, _sf, _logsf
Rarely would you override ``_isf``, ``_sf`` or ``_logsf``, but you could.
**Methods that can be overwritten by subclasses**
::
_rvs
_pdf
_cdf
_sf
_ppf
_isf
_stats
_munp
_entropy
_argcheck
There are additional (internal and private) generic methods that can
be useful for cross-checking and for debugging, but might work in all
cases when directly called.
A note on ``shapes``: subclasses need not specify them explicitly. In this
case, `shapes` will be automatically deduced from the signatures of the
overridden methods (`pdf`, `cdf` etc).
If, for some reason, you prefer to avoid relying on introspection, you can
specify ``shapes`` explicitly as an argument to the instance constructor.
**Frozen Distributions**
Normally, you must provide shape parameters (and, optionally, location and
scale parameters to each call of a method of a distribution.
Alternatively, the object may be called (as a function) to fix the shape,
location, and scale parameters returning a "frozen" continuous RV object:
rv = generic(<shape(s)>, loc=0, scale=1)
frozen RV object with the same methods but holding the given shape,
location, and scale fixed
**Statistics**
Statistics are computed using numerical integration by default.
For speed you can redefine this using ``_stats``:
- take shape parameters and return mu, mu2, g1, g2
- If you can't compute one of these, return it as None
- Can also be defined with a keyword argument ``moments``, which is a
string composed of "m", "v", "s", and/or "k".
Only the components appearing in string should be computed and
returned in the order "m", "v", "s", or "k" with missing values
returned as None.
Alternatively, you can override ``_munp``, which takes ``n`` and shape
parameters and returns the n-th non-central moment of the distribution.
Examples
--------
To create a new Gaussian distribution, we would do the following:
>>> from scipy.stats import rv_continuous
>>> class gaussian_gen(rv_continuous):
... "Gaussian distribution"
... def _pdf(self, x):
... return np.exp(-x**2 / 2.) / np.sqrt(2.0 * np.pi)
>>> gaussian = gaussian_gen(name='gaussian')
``scipy.stats`` distributions are *instances*, so here we subclass
`rv_continuous` and create an instance. With this, we now have
a fully functional distribution with all relevant methods automagically
generated by the framework.
Note that above we defined a standard normal distribution, with zero mean
and unit variance. Shifting and scaling of the distribution can be done
by using ``loc`` and ``scale`` parameters: ``gaussian.pdf(x, loc, scale)``
essentially computes ``y = (x - loc) / scale`` and
``gaussian._pdf(y) / scale``.
"""
def __init__(self, momtype=1, a=None, b=None, xtol=1e-14,
badvalue=None, name=None, longname=None,
shapes=None, extradoc=None, seed=None):
super(rv_continuous, self).__init__(seed)
# save the ctor parameters, cf generic freeze
self._ctor_param = dict(
momtype=momtype, a=a, b=b, xtol=xtol,
badvalue=badvalue, name=name, longname=longname,
shapes=shapes, extradoc=extradoc, seed=seed)
if badvalue is None:
badvalue = nan
if name is None:
name = 'Distribution'
self.badvalue = badvalue
self.name = name
self.a = a
self.b = b
if a is None:
self.a = -inf
if b is None:
self.b = inf
self.xtol = xtol
self.moment_type = momtype
self.shapes = shapes
self._construct_argparser(meths_to_inspect=[self._pdf, self._cdf],
locscale_in='loc=0, scale=1',
locscale_out='loc, scale')
# nin correction
self._ppfvec = vectorize(self._ppf_single, otypes='d')
self._ppfvec.nin = self.numargs + 1
self.vecentropy = vectorize(self._entropy, otypes='d')
self._cdfvec = vectorize(self._cdf_single, otypes='d')
self._cdfvec.nin = self.numargs + 1
self.extradoc = extradoc
if momtype == 0:
self.generic_moment = vectorize(self._mom0_sc, otypes='d')
else:
self.generic_moment = vectorize(self._mom1_sc, otypes='d')
# Because of the *args argument of _mom0_sc, vectorize cannot count the
# number of arguments correctly.
self.generic_moment.nin = self.numargs + 1
if longname is None:
if name[0] in ['aeiouAEIOU']:
hstr = "An "
else:
hstr = "A "
longname = hstr + name
if sys.flags.optimize < 2:
# Skip adding docstrings if interpreter is run with -OO
if self.__doc__ is None:
self._construct_default_doc(longname=longname,
extradoc=extradoc,
docdict=docdict,
discrete='continuous')
else:
dct = dict(distcont)
self._construct_doc(docdict, dct.get(self.name))
def _updated_ctor_param(self):
""" Return the current version of _ctor_param, possibly updated by user.
Used by freezing and pickling.
Keep this in sync with the signature of __init__.
"""
dct = self._ctor_param.copy()
dct['a'] = self.a
dct['b'] = self.b
dct['xtol'] = self.xtol
dct['badvalue'] = self.badvalue
dct['name'] = self.name
dct['shapes'] = self.shapes
dct['extradoc'] = self.extradoc
return dct
def _ppf_to_solve(self, x, q, *args):
return self.cdf(*(x, )+args)-q
def _ppf_single(self, q, *args):
left = right = None
if self.a > -np.inf:
left = self.a
if self.b < np.inf:
right = self.b
factor = 10.
if not left: # i.e. self.a = -inf
left = -1.*factor
while self._ppf_to_solve(left, q, *args) > 0.:
right = left
left *= factor
# left is now such that cdf(left) < q
if not right: # i.e. self.b = inf
right = factor
while self._ppf_to_solve(right, q, *args) < 0.:
left = right
right *= factor
# right is now such that cdf(right) > q
return optimize.brentq(self._ppf_to_solve,
left, right, args=(q,)+args, xtol=self.xtol)
# moment from definition
def _mom_integ0(self, x, m, *args):
return x**m * self.pdf(x, *args)
def _mom0_sc(self, m, *args):
return integrate.quad(self._mom_integ0, self.a, self.b,
args=(m,)+args)[0]
# moment calculated using ppf
def _mom_integ1(self, q, m, *args):
return (self.ppf(q, *args))**m
def _mom1_sc(self, m, *args):
return integrate.quad(self._mom_integ1, 0, 1, args=(m,)+args)[0]
def _pdf(self, x, *args):
return derivative(self._cdf, x, dx=1e-5, args=args, order=5)
## Could also define any of these
def _logpdf(self, x, *args):
return log(self._pdf(x, *args))
def _cdf_single(self, x, *args):
return integrate.quad(self._pdf, self.a, x, args=args)[0]
def _cdf(self, x, *args):
return self._cdfvec(x, *args)
## generic _argcheck, _logcdf, _sf, _logsf, _ppf, _isf, _rvs are defined
## in rv_generic
def pdf(self, x, *args, **kwds):
"""
Probability density function at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
pdf : ndarray
Probability density function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._support_mask(x) & (scale > 0)
cond = cond0 & cond1
output = zeros(shape(cond), dtyp)
putmask(output, (1-cond0)+np.isnan(x), self.badvalue)
if np.any(cond):
goodargs = argsreduce(cond, *((x,)+args+(scale,)))
scale, goodargs = goodargs[-1], goodargs[:-1]
place(output, cond, self._pdf(*goodargs) / scale)
if output.ndim == 0:
return output[()]
return output
def logpdf(self, x, *args, **kwds):
"""
Log of the probability density function at x of the given RV.
This uses a more numerically accurate calculation if available.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logpdf : array_like
Log of the probability density function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._support_mask(x) & (scale > 0)
cond = cond0 & cond1
output = empty(shape(cond), dtyp)
output.fill(NINF)
putmask(output, (1-cond0)+np.isnan(x), self.badvalue)
if np.any(cond):
goodargs = argsreduce(cond, *((x,)+args+(scale,)))
scale, goodargs = goodargs[-1], goodargs[:-1]
place(output, cond, self._logpdf(*goodargs) - log(scale))
if output.ndim == 0:
return output[()]
return output
def cdf(self, x, *args, **kwds):
"""
Cumulative distribution function of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
cdf : ndarray
Cumulative distribution function evaluated at `x`
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._open_support_mask(x) & (scale > 0)
cond2 = (x >= self.b) & cond0
cond = cond0 & cond1
output = zeros(shape(cond), dtyp)
place(output, (1-cond0)+np.isnan(x), self.badvalue)
place(output, cond2, 1.0)
if np.any(cond): # call only if at least 1 entry
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._cdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logcdf(self, x, *args, **kwds):
"""
Log of the cumulative distribution function at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logcdf : array_like
Log of the cumulative distribution function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._open_support_mask(x) & (scale > 0)
cond2 = (x >= self.b) & cond0
cond = cond0 & cond1
output = empty(shape(cond), dtyp)
output.fill(NINF)
place(output, (1-cond0)*(cond1 == cond1)+np.isnan(x), self.badvalue)
place(output, cond2, 0.0)
if np.any(cond): # call only if at least 1 entry
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._logcdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def sf(self, x, *args, **kwds):
"""
Survival function (1 - `cdf`) at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
sf : array_like
Survival function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._open_support_mask(x) & (scale > 0)
cond2 = cond0 & (x <= self.a)
cond = cond0 & cond1
output = zeros(shape(cond), dtyp)
place(output, (1-cond0)+np.isnan(x), self.badvalue)
place(output, cond2, 1.0)
if np.any(cond):
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._sf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logsf(self, x, *args, **kwds):
"""
Log of the survival function of the given RV.
Returns the log of the "survival function," defined as (1 - `cdf`),
evaluated at `x`.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logsf : ndarray
Log of the survival function evaluated at `x`.
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._open_support_mask(x) & (scale > 0)
cond2 = cond0 & (x <= self.a)
cond = cond0 & cond1
output = empty(shape(cond), dtyp)
output.fill(NINF)
place(output, (1-cond0)+np.isnan(x), self.badvalue)
place(output, cond2, 0.0)
if np.any(cond):
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._logsf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def ppf(self, q, *args, **kwds):
"""
Percent point function (inverse of `cdf`) at q of the given RV.
Parameters
----------
q : array_like
lower tail probability
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
x : array_like
quantile corresponding to the lower tail probability q.
"""
args, loc, scale = self._parse_args(*args, **kwds)
q, loc, scale = map(asarray, (q, loc, scale))
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
cond1 = (0 < q) & (q < 1)
cond2 = cond0 & (q == 0)
cond3 = cond0 & (q == 1)
cond = cond0 & cond1
output = valarray(shape(cond), value=self.badvalue)
lower_bound = self.a * scale + loc
upper_bound = self.b * scale + loc
place(output, cond2, argsreduce(cond2, lower_bound)[0])
place(output, cond3, argsreduce(cond3, upper_bound)[0])
if np.any(cond): # call only if at least 1 entry
goodargs = argsreduce(cond, *((q,)+args+(scale, loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
place(output, cond, self._ppf(*goodargs) * scale + loc)
if output.ndim == 0:
return output[()]
return output
def isf(self, q, *args, **kwds):
"""
Inverse survival function (inverse of `sf`) at q of the given RV.
Parameters
----------
q : array_like
upper tail probability
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
x : ndarray or scalar
Quantile corresponding to the upper tail probability q.
"""
args, loc, scale = self._parse_args(*args, **kwds)
q, loc, scale = map(asarray, (q, loc, scale))
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
cond1 = (0 < q) & (q < 1)
cond2 = cond0 & (q == 1)
cond3 = cond0 & (q == 0)
cond = cond0 & cond1
output = valarray(shape(cond), value=self.badvalue)
lower_bound = self.a * scale + loc
upper_bound = self.b * scale + loc
place(output, cond2, argsreduce(cond2, lower_bound)[0])
place(output, cond3, argsreduce(cond3, upper_bound)[0])
if np.any(cond):
goodargs = argsreduce(cond, *((q,)+args+(scale, loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
place(output, cond, self._isf(*goodargs) * scale + loc)
if output.ndim == 0:
return output[()]
return output
def _nnlf(self, x, *args):
return -np.sum(self._logpdf(x, *args), axis=0)
def _unpack_loc_scale(self, theta):
try:
loc = theta[-2]
scale = theta[-1]
args = tuple(theta[:-2])
except IndexError:
raise ValueError("Not enough input arguments.")
return loc, scale, args
def nnlf(self, theta, x):
'''Return negative loglikelihood function.
Notes
-----
This is ``-sum(log pdf(x, theta), axis=0)`` where `theta` are the
parameters (including loc and scale).
'''
loc, scale, args = self._unpack_loc_scale(theta)
if not self._argcheck(*args) or scale <= 0:
return inf
x = asarray((x-loc) / scale)
n_log_scale = len(x) * log(scale)
if np.any(~self._support_mask(x)):
return inf
return self._nnlf(x, *args) + n_log_scale
def _nnlf_and_penalty(self, x, args):
cond0 = ~self._support_mask(x)
n_bad = np.count_nonzero(cond0, axis=0)
if n_bad > 0:
x = argsreduce(~cond0, x)[0]
logpdf = self._logpdf(x, *args)
finite_logpdf = np.isfinite(logpdf)
n_bad += np.sum(~finite_logpdf, axis=0)
if n_bad > 0:
penalty = n_bad * log(_XMAX) * 100
return -np.sum(logpdf[finite_logpdf], axis=0) + penalty
return -np.sum(logpdf, axis=0)
def _penalized_nnlf(self, theta, x):
''' Return penalized negative loglikelihood function,
i.e., - sum (log pdf(x, theta), axis=0) + penalty
where theta are the parameters (including loc and scale)
'''
loc, scale, args = self._unpack_loc_scale(theta)
if not self._argcheck(*args) or scale <= 0:
return inf
x = asarray((x-loc) / scale)
n_log_scale = len(x) * log(scale)
return self._nnlf_and_penalty(x, args) + n_log_scale
# return starting point for fit (shape arguments + loc + scale)
def _fitstart(self, data, args=None):
if args is None:
args = (1.0,)*self.numargs
loc, scale = self._fit_loc_scale_support(data, *args)
return args + (loc, scale)
# Return the (possibly reduced) function to optimize in order to find MLE
# estimates for the .fit method
def _reduce_func(self, args, kwds):
# First of all, convert fshapes params to fnum: eg for stats.beta,
# shapes='a, b'. To fix `a`, can specify either `f1` or `fa`.
# Convert the latter into the former.
if self.shapes:
shapes = self.shapes.replace(',', ' ').split()
for j, s in enumerate(shapes):
val = kwds.pop('f' + s, None) or kwds.pop('fix_' + s, None)
if val is not None:
key = 'f%d' % j
if key in kwds:
raise ValueError("Duplicate entry for %s." % key)
else:
kwds[key] = val
args = list(args)
Nargs = len(args)
fixedn = []
names = ['f%d' % n for n in range(Nargs - 2)] + ['floc', 'fscale']
x0 = []
for n, key in enumerate(names):
if key in kwds:
fixedn.append(n)
args[n] = kwds.pop(key)
else:
x0.append(args[n])
if len(fixedn) == 0:
func = self._penalized_nnlf
restore = None
else:
if len(fixedn) == Nargs:
raise ValueError(
"All parameters fixed. There is nothing to optimize.")
def restore(args, theta):
# Replace with theta for all numbers not in fixedn
# This allows the non-fixed values to vary, but
# we still call self.nnlf with all parameters.
i = 0
for n in range(Nargs):
if n not in fixedn:
args[n] = theta[i]
i += 1
return args
def func(theta, x):
newtheta = restore(args[:], theta)
return self._penalized_nnlf(newtheta, x)
return x0, func, restore, args
def fit(self, data, *args, **kwds):
"""
Return MLEs for shape (if applicable), location, and scale
parameters from data.
MLE stands for Maximum Likelihood Estimate. Starting estimates for
the fit are given by input arguments; for any arguments not provided
with starting estimates, ``self._fitstart(data)`` is called to generate
such.
One can hold some parameters fixed to specific values by passing in
keyword arguments ``f0``, ``f1``, ..., ``fn`` (for shape parameters)
and ``floc`` and ``fscale`` (for location and scale parameters,
respectively).
Parameters
----------
data : array_like
Data to use in calculating the MLEs.
args : floats, optional
Starting value(s) for any shape-characterizing arguments (those not
provided will be determined by a call to ``_fitstart(data)``).
No default value.
kwds : floats, optional
Starting values for the location and scale parameters; no default.
Special keyword arguments are recognized as holding certain
parameters fixed:
- f0...fn : hold respective shape parameters fixed.
Alternatively, shape parameters to fix can be specified by name.
For example, if ``self.shapes == "a, b"``, ``fa``and ``fix_a``
are equivalent to ``f0``, and ``fb`` and ``fix_b`` are
equivalent to ``f1``.
- floc : hold location parameter fixed to specified value.
- fscale : hold scale parameter fixed to specified value.
- optimizer : The optimizer to use. The optimizer must take ``func``,
and starting position as the first two arguments,
plus ``args`` (for extra arguments to pass to the
function to be optimized) and ``disp=0`` to suppress
output as keyword arguments.
Returns
-------
mle_tuple : tuple of floats
MLEs for any shape parameters (if applicable), followed by those
for location and scale. For most random variables, shape statistics
will be returned, but there are exceptions (e.g. ``norm``).
Notes
-----
This fit is computed by maximizing a log-likelihood function, with
penalty applied for samples outside of range of the distribution. The
returned answer is not guaranteed to be the globally optimal MLE, it
may only be locally optimal, or the optimization may fail altogether.
Examples
--------
Generate some data to fit: draw random variates from the `beta`
distribution
>>> from scipy.stats import beta
>>> a, b = 1., 2.
>>> x = beta.rvs(a, b, size=1000)
Now we can fit all four parameters (``a``, ``b``, ``loc`` and ``scale``):
>>> a1, b1, loc1, scale1 = beta.fit(x)
We can also use some prior knowledge about the dataset: let's keep
``loc`` and ``scale`` fixed:
>>> a1, b1, loc1, scale1 = beta.fit(x, floc=0, fscale=1)
>>> loc1, scale1
(0, 1)
We can also keep shape parameters fixed by using ``f``-keywords. To
keep the zero-th shape parameter ``a`` equal 1, use ``f0=1`` or,
equivalently, ``fa=1``:
>>> a1, b1, loc1, scale1 = beta.fit(x, fa=1, floc=0, fscale=1)
>>> a1
1
Not all distributions return estimates for the shape parameters.
``norm`` for example just returns estimates for location and scale:
>>> from scipy.stats import norm
>>> x = norm.rvs(a, b, size=1000, random_state=123)
>>> loc1, scale1 = norm.fit(x)
>>> loc1, scale1
(0.92087172783841631, 2.0015750750324668)
"""
Narg = len(args)
if Narg > self.numargs:
raise TypeError("Too many input arguments.")
start = [None]*2
if (Narg < self.numargs) or not ('loc' in kwds and
'scale' in kwds):
# get distribution specific starting locations
start = self._fitstart(data)
args += start[Narg:-2]
loc = kwds.pop('loc', start[-2])
scale = kwds.pop('scale', start[-1])
args += (loc, scale)
x0, func, restore, args = self._reduce_func(args, kwds)
optimizer = kwds.pop('optimizer', optimize.fmin)
# convert string to function in scipy.optimize
if not callable(optimizer) and isinstance(optimizer, string_types):
if not optimizer.startswith('fmin_'):
optimizer = "fmin_"+optimizer
if optimizer == 'fmin_':
optimizer = 'fmin'
try:
optimizer = getattr(optimize, optimizer)
except AttributeError:
raise ValueError("%s is not a valid optimizer" % optimizer)
# by now kwds must be empty, since everybody took what they needed
if kwds:
raise TypeError("Unknown arguments: %s." % kwds)
vals = optimizer(func, x0, args=(ravel(data),), disp=0)
if restore is not None:
vals = restore(args, vals)
vals = tuple(vals)
return vals
def _fit_loc_scale_support(self, data, *args):
"""
Estimate loc and scale parameters from data accounting for support.
Parameters
----------
data : array_like
Data to fit.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
Returns
-------
Lhat : float
Estimated location parameter for the data.
Shat : float
Estimated scale parameter for the data.
"""
data = np.asarray(data)
# Estimate location and scale according to the method of moments.
loc_hat, scale_hat = self.fit_loc_scale(data, *args)
# Compute the support according to the shape parameters.
self._argcheck(*args)
a, b = self.a, self.b
support_width = b - a
# If the support is empty then return the moment-based estimates.
if support_width <= 0:
return loc_hat, scale_hat
# Compute the proposed support according to the loc and scale estimates.
a_hat = loc_hat + a * scale_hat
b_hat = loc_hat + b * scale_hat
# Use the moment-based estimates if they are compatible with the data.
data_a = np.min(data)
data_b = np.max(data)
if a_hat < data_a and data_b < b_hat:
return loc_hat, scale_hat
# Otherwise find other estimates that are compatible with the data.
data_width = data_b - data_a
rel_margin = 0.1
margin = data_width * rel_margin
# For a finite interval, both the location and scale
# should have interesting values.
if support_width < np.inf:
loc_hat = (data_a - a) - margin
scale_hat = (data_width + 2 * margin) / support_width
return loc_hat, scale_hat
# For a one-sided interval, use only an interesting location parameter.
if a > -np.inf:
return (data_a - a) - margin, 1
elif b < np.inf:
return (data_b - b) + margin, 1
else:
raise RuntimeError
def fit_loc_scale(self, data, *args):
"""
Estimate loc and scale parameters from data using 1st and 2nd moments.
Parameters
----------
data : array_like
Data to fit.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
Returns
-------
Lhat : float
Estimated location parameter for the data.
Shat : float
Estimated scale parameter for the data.
"""
mu, mu2 = self.stats(*args, **{'moments': 'mv'})
tmp = asarray(data)
muhat = tmp.mean()
mu2hat = tmp.var()
Shat = sqrt(mu2hat / mu2)
Lhat = muhat - Shat*mu
if not np.isfinite(Lhat):
Lhat = 0
if not (np.isfinite(Shat) and (0 < Shat)):
Shat = 1
return Lhat, Shat
def _entropy(self, *args):
def integ(x):
val = self._pdf(x, *args)
return entr(val)
# upper limit is often inf, so suppress warnings when integrating
olderr = np.seterr(over='ignore')
h = integrate.quad(integ, self.a, self.b)[0]
np.seterr(**olderr)
if not np.isnan(h):
return h
else:
# try with different limits if integration problems
low, upp = self.ppf([1e-10, 1. - 1e-10], *args)
if np.isinf(self.b):
upper = upp
else:
upper = self.b
if np.isinf(self.a):
lower = low
else:
lower = self.a
return integrate.quad(integ, lower, upper)[0]
def expect(self, func=None, args=(), loc=0, scale=1, lb=None, ub=None,
conditional=False, **kwds):
"""Calculate expected value of a function with respect to the
distribution.
The expected value of a function ``f(x)`` with respect to a
distribution ``dist`` is defined as::
ubound
E[x] = Integral(f(x) * dist.pdf(x))
lbound
Parameters
----------
func : callable, optional
Function for which integral is calculated. Takes only one argument.
The default is the identity mapping f(x) = x.
args : tuple, optional
Shape parameters of the distribution.
loc : float, optional
Location parameter (default=0).
scale : float, optional
Scale parameter (default=1).
lb, ub : scalar, optional
Lower and upper bound for integration. Default is set to the
support of the distribution.
conditional : bool, optional
If True, the integral is corrected by the conditional probability
of the integration interval. The return value is the expectation
of the function, conditional on being in the given interval.
Default is False.
Additional keyword arguments are passed to the integration routine.
Returns
-------
expect : float
The calculated expected value.
Notes
-----
The integration behavior of this function is inherited from
`integrate.quad`.
"""
lockwds = {'loc': loc,
'scale': scale}
self._argcheck(*args)
if func is None:
def fun(x, *args):
return x * self.pdf(x, *args, **lockwds)
else:
def fun(x, *args):
return func(x) * self.pdf(x, *args, **lockwds)
if lb is None:
lb = loc + self.a * scale
if ub is None:
ub = loc + self.b * scale
if conditional:
invfac = (self.sf(lb, *args, **lockwds)
- self.sf(ub, *args, **lockwds))
else:
invfac = 1.0
kwds['args'] = args
# Silence floating point warnings from integration.
olderr = np.seterr(all='ignore')
vals = integrate.quad(fun, lb, ub, **kwds)[0] / invfac
np.seterr(**olderr)
return vals
# Helpers for the discrete distributions
def _drv2_moment(self, n, *args):
"""Non-central moment of discrete distribution."""
def fun(x):
return np.power(x, n) * self._pmf(x, *args)
return _expect(fun, self.a, self.b, self.ppf(0.5, *args), self.inc)
def _drv2_ppfsingle(self, q, *args): # Use basic bisection algorithm
b = self.b
a = self.a
if isinf(b): # Be sure ending point is > q
b = int(max(100*q, 10))
while 1:
if b >= self.b:
qb = 1.0
break
qb = self._cdf(b, *args)
if (qb < q):
b += 10
else:
break
else:
qb = 1.0
if isinf(a): # be sure starting point < q
a = int(min(-100*q, -10))
while 1:
if a <= self.a:
qb = 0.0
break
qa = self._cdf(a, *args)
if (qa > q):
a -= 10
else:
break
else:
qa = self._cdf(a, *args)
while 1:
if (qa == q):
return a
if (qb == q):
return b
if b <= a+1:
# testcase: return wrong number at lower index
# python -c "from scipy.stats import zipf;print zipf.ppf(0.01, 2)" wrong
# python -c "from scipy.stats import zipf;print zipf.ppf([0.01, 0.61, 0.77, 0.83], 2)"
# python -c "from scipy.stats import logser;print logser.ppf([0.1, 0.66, 0.86, 0.93], 0.6)"
if qa > q:
return a
else:
return b
c = int((a+b)/2.0)
qc = self._cdf(c, *args)
if (qc < q):
if a != c:
a = c
else:
raise RuntimeError('updating stopped, endless loop')
qa = qc
elif (qc > q):
if b != c:
b = c
else:
raise RuntimeError('updating stopped, endless loop')
qb = qc
else:
return c
def entropy(pk, qk=None, base=None):
"""Calculate the entropy of a distribution for given probability values.
If only probabilities `pk` are given, the entropy is calculated as
``S = -sum(pk * log(pk), axis=0)``.
If `qk` is not None, then compute the Kullback-Leibler divergence
``S = sum(pk * log(pk / qk), axis=0)``.
This routine will normalize `pk` and `qk` if they don't sum to 1.
Parameters
----------
pk : sequence
Defines the (discrete) distribution. ``pk[i]`` is the (possibly
unnormalized) probability of event ``i``.
qk : sequence, optional
Sequence against which the relative entropy is computed. Should be in
the same format as `pk`.
base : float, optional
The logarithmic base to use, defaults to ``e`` (natural logarithm).
Returns
-------
S : float
The calculated entropy.
"""
pk = asarray(pk)
pk = 1.0*pk / np.sum(pk, axis=0)
if qk is None:
vec = entr(pk)
else:
qk = asarray(qk)
if len(qk) != len(pk):
raise ValueError("qk and pk must have same length.")
qk = 1.0*qk / np.sum(qk, axis=0)
vec = rel_entr(pk, qk)
S = np.sum(vec, axis=0)
if base is not None:
S /= log(base)
return S
# Must over-ride one of _pmf or _cdf or pass in
# x_k, p(x_k) lists in initialization
class rv_discrete(rv_generic):
"""
A generic discrete random variable class meant for subclassing.
`rv_discrete` is a base class to construct specific distribution classes
and instances for discrete random variables. It can also be used
to construct an arbitrary distribution defined by a list of support
points and corresponding probabilities.
Parameters
----------
a : float, optional
Lower bound of the support of the distribution, default: 0
b : float, optional
Upper bound of the support of the distribution, default: plus infinity
moment_tol : float, optional
The tolerance for the generic calculation of moments.
values : tuple of two array_like, optional
``(xk, pk)`` where ``xk`` are integers with non-zero
probabilities ``pk`` with ``sum(pk) = 1``.
inc : integer, optional
Increment for the support of the distribution.
Default is 1. (other values have not been tested)
badvalue : float, optional
The value in a result arrays that indicates a value that for which
some argument restriction is violated, default is np.nan.
name : str, optional
The name of the instance. This string is used to construct the default
example for distributions.
longname : str, optional
This string is used as part of the first line of the docstring returned
when a subclass has no docstring of its own. Note: `longname` exists
for backwards compatibility, do not use for new subclasses.
shapes : str, optional
The shape of the distribution. For example "m, n" for a distribution
that takes two integers as the two shape arguments for all its methods
If not provided, shape parameters will be inferred from
the signatures of the private methods, ``_pmf`` and ``_cdf`` of
the instance.
extradoc : str, optional
This string is used as the last part of the docstring returned when a
subclass has no docstring of its own. Note: `extradoc` exists for
backwards compatibility, do not use for new subclasses.
seed : None or int or ``numpy.random.RandomState`` instance, optional
This parameter defines the RandomState object to use for drawing
random variates.
If None, the global np.random state is used.
If integer, it is used to seed the local RandomState instance.
Default is None.
Methods
-------
rvs
pmf
logpmf
cdf
logcdf
sf
logsf
ppf
isf
moment
stats
entropy
expect
median
mean
std
var
interval
__call__
Notes
-----
This class is similar to `rv_continuous`, the main differences being:
- the support of the distribution is a set of integers
- instead of the probability density function, ``pdf`` (and the
corresponding private ``_pdf``), this class defines the
*probability mass function*, `pmf` (and the corresponding
private ``_pmf``.)
- scale parameter is not defined.
To create a new discrete distribution, we would do the following:
>>> from scipy.stats import rv_discrete
>>> class poisson_gen(rv_discrete):
... "Poisson distribution"
... def _pmf(self, k, mu):
... return exp(-mu) * mu**k / factorial(k)
and create an instance::
>>> poisson = poisson_gen(name="poisson")
Note that above we defined the Poisson distribution in the standard form.
Shifting the distribution can be done by providing the ``loc`` parameter
to the methods of the instance. For example, ``poisson.pmf(x, mu, loc)``
delegates the work to ``poisson._pmf(x-loc, mu)``.
**Discrete distributions from a list of probabilities**
Alternatively, you can construct an arbitrary discrete rv defined
on a finite set of values ``xk`` with ``Prob{X=xk} = pk`` by using the
``values`` keyword argument to the `rv_discrete` constructor.
Examples
--------
Custom made discrete distribution:
>>> from scipy import stats
>>> xk = np.arange(7)
>>> pk = (0.1, 0.2, 0.3, 0.1, 0.1, 0.0, 0.2)
>>> custm = stats.rv_discrete(name='custm', values=(xk, pk))
>>>
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
>>> ax.plot(xk, custm.pmf(xk), 'ro', ms=12, mec='r')
>>> ax.vlines(xk, 0, custm.pmf(xk), colors='r', lw=4)
>>> plt.show()
Random number generation:
>>> R = custm.rvs(size=100)
"""
def __new__(cls, a=0, b=inf, name=None, badvalue=None,
moment_tol=1e-8, values=None, inc=1, longname=None,
shapes=None, extradoc=None, seed=None):
if values is not None:
# dispatch to a subclass
return super(rv_discrete, cls).__new__(rv_sample)
else:
# business as usual
return super(rv_discrete, cls).__new__(cls)
def __init__(self, a=0, b=inf, name=None, badvalue=None,
moment_tol=1e-8, values=None, inc=1, longname=None,
shapes=None, extradoc=None, seed=None):
super(rv_discrete, self).__init__(seed)
# cf generic freeze
self._ctor_param = dict(
a=a, b=b, name=name, badvalue=badvalue,
moment_tol=moment_tol, values=values, inc=inc,
longname=longname, shapes=shapes, extradoc=extradoc, seed=seed)
if badvalue is None:
badvalue = nan
self.badvalue = badvalue
self.a = a
self.b = b
self.moment_tol = moment_tol
self.inc = inc
self._cdfvec = vectorize(self._cdf_single, otypes='d')
self.vecentropy = vectorize(self._entropy)
self.shapes = shapes
if values is not None:
raise ValueError("rv_discrete.__init__(..., values != None, ...)")
self._construct_argparser(meths_to_inspect=[self._pmf, self._cdf],
locscale_in='loc=0',
# scale=1 for discrete RVs
locscale_out='loc, 1')
# nin correction needs to be after we know numargs
# correct nin for generic moment vectorization
_vec_generic_moment = vectorize(_drv2_moment, otypes='d')
_vec_generic_moment.nin = self.numargs + 2
self.generic_moment = instancemethod(_vec_generic_moment,
self, rv_discrete)
# correct nin for ppf vectorization
_vppf = vectorize(_drv2_ppfsingle, otypes='d')
_vppf.nin = self.numargs + 2
self._ppfvec = instancemethod(_vppf,
self, rv_discrete)
# now that self.numargs is defined, we can adjust nin
self._cdfvec.nin = self.numargs + 1
self._construct_docstrings(name, longname, extradoc)
def _construct_docstrings(self, name, longname, extradoc):
if name is None:
name = 'Distribution'
self.name = name
self.extradoc = extradoc
# generate docstring for subclass instances
if longname is None:
if name[0] in ['aeiouAEIOU']:
hstr = "An "
else:
hstr = "A "
longname = hstr + name
if sys.flags.optimize < 2:
# Skip adding docstrings if interpreter is run with -OO
if self.__doc__ is None:
self._construct_default_doc(longname=longname,
extradoc=extradoc,
docdict=docdict_discrete,
discrete='discrete')
else:
dct = dict(distdiscrete)
self._construct_doc(docdict_discrete, dct.get(self.name))
# discrete RV do not have the scale parameter, remove it
self.__doc__ = self.__doc__.replace(
'\n scale : array_like, '
'optional\n scale parameter (default=1)', '')
@property
@np.deprecate(message="`return_integers` attribute is not used anywhere any "
" longer and is deprecated in scipy 0.18.")
def return_integers(self):
return 1
def _updated_ctor_param(self):
""" Return the current version of _ctor_param, possibly updated by user.
Used by freezing and pickling.
Keep this in sync with the signature of __init__.
"""
dct = self._ctor_param.copy()
dct['a'] = self.a
dct['b'] = self.b
dct['badvalue'] = self.badvalue
dct['moment_tol'] = self.moment_tol
dct['inc'] = self.inc
dct['name'] = self.name
dct['shapes'] = self.shapes
dct['extradoc'] = self.extradoc
return dct
def _nonzero(self, k, *args):
return floor(k) == k
def _pmf(self, k, *args):
return self._cdf(k, *args) - self._cdf(k-1, *args)
def _logpmf(self, k, *args):
return log(self._pmf(k, *args))
def _cdf_single(self, k, *args):
m = arange(int(self.a), k+1)
return np.sum(self._pmf(m, *args), axis=0)
def _cdf(self, x, *args):
k = floor(x)
return self._cdfvec(k, *args)
# generic _logcdf, _sf, _logsf, _ppf, _isf, _rvs defined in rv_generic
def rvs(self, *args, **kwargs):
"""
Random variates of given type.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
size : int or tuple of ints, optional
Defining number of random variates (Default is 1). Note that `size`
has to be given as keyword, not as positional argument.
random_state : None or int or ``np.random.RandomState`` instance, optional
If int or RandomState, use it for drawing the random variates.
If None, rely on ``self.random_state``.
Default is None.
Returns
-------
rvs : ndarray or scalar
Random variates of given `size`.
"""
kwargs['discrete'] = True
return super(rv_discrete, self).rvs(*args, **kwargs)
def pmf(self, k, *args, **kwds):
"""
Probability mass function at k of the given RV.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
Location parameter (default=0).
Returns
-------
pmf : array_like
Probability mass function evaluated at k
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k <= self.b) & self._nonzero(k, *args)
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, (1-cond0) + np.isnan(k), self.badvalue)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, np.clip(self._pmf(*goodargs), 0, 1))
if output.ndim == 0:
return output[()]
return output
def logpmf(self, k, *args, **kwds):
"""
Log of the probability mass function at k of the given RV.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter. Default is 0.
Returns
-------
logpmf : array_like
Log of the probability mass function evaluated at k.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k <= self.b) & self._nonzero(k, *args)
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, self._logpmf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def cdf(self, k, *args, **kwds):
"""
Cumulative distribution function of the given RV.
Parameters
----------
k : array_like, int
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
cdf : ndarray
Cumulative distribution function evaluated at `k`.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k < self.b)
cond2 = (k >= self.b)
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2*(cond0 == cond0), 1.0)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, np.clip(self._cdf(*goodargs), 0, 1))
if output.ndim == 0:
return output[()]
return output
def logcdf(self, k, *args, **kwds):
"""
Log of the cumulative distribution function at k of the given RV.
Parameters
----------
k : array_like, int
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
logcdf : array_like
Log of the cumulative distribution function evaluated at k.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k < self.b)
cond2 = (k >= self.b)
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2*(cond0 == cond0), 0.0)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, self._logcdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def sf(self, k, *args, **kwds):
"""
Survival function (1 - `cdf`) at k of the given RV.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
sf : array_like
Survival function evaluated at k.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray(k-loc)
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k < self.b)
cond2 = (k < self.a) & cond0
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2, 1.0)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, np.clip(self._sf(*goodargs), 0, 1))
if output.ndim == 0:
return output[()]
return output
def logsf(self, k, *args, **kwds):
"""
Log of the survival function of the given RV.
Returns the log of the "survival function," defined as 1 - `cdf`,
evaluated at `k`.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
logsf : ndarray
Log of the survival function evaluated at `k`.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray(k-loc)
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k < self.b)
cond2 = (k < self.a) & cond0
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2, 0.0)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, self._logsf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def ppf(self, q, *args, **kwds):
"""
Percent point function (inverse of `cdf`) at q of the given RV.
Parameters
----------
q : array_like
Lower tail probability.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
k : array_like
Quantile corresponding to the lower tail probability, q.
"""
args, loc, _ = self._parse_args(*args, **kwds)
q, loc = map(asarray, (q, loc))
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (loc == loc)
cond1 = (q > 0) & (q < 1)
cond2 = (q == 1) & cond0
cond = cond0 & cond1
output = valarray(shape(cond), value=self.badvalue, typecode='d')
# output type 'd' to handle nin and inf
place(output, (q == 0)*(cond == cond), self.a-1)
place(output, cond2, self.b)
if np.any(cond):
goodargs = argsreduce(cond, *((q,)+args+(loc,)))
loc, goodargs = goodargs[-1], goodargs[:-1]
place(output, cond, self._ppf(*goodargs) + loc)
if output.ndim == 0:
return output[()]
return output
def isf(self, q, *args, **kwds):
"""
Inverse survival function (inverse of `sf`) at q of the given RV.
Parameters
----------
q : array_like
Upper tail probability.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
k : ndarray or scalar
Quantile corresponding to the upper tail probability, q.
"""
args, loc, _ = self._parse_args(*args, **kwds)
q, loc = map(asarray, (q, loc))
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (loc == loc)
cond1 = (q > 0) & (q < 1)
cond2 = (q == 1) & cond0
cond = cond0 & cond1
# same problem as with ppf; copied from ppf and changed
output = valarray(shape(cond), value=self.badvalue, typecode='d')
# output type 'd' to handle nin and inf
place(output, (q == 0)*(cond == cond), self.b)
place(output, cond2, self.a-1)
# call place only if at least 1 valid argument
if np.any(cond):
goodargs = argsreduce(cond, *((q,)+args+(loc,)))
loc, goodargs = goodargs[-1], goodargs[:-1]
# PB same as ticket 766
place(output, cond, self._isf(*goodargs) + loc)
if output.ndim == 0:
return output[()]
return output
def _entropy(self, *args):
if hasattr(self, 'pk'):
return entropy(self.pk)
else:
return _expect(lambda x: entr(self.pmf(x, *args)),
self.a, self.b, self.ppf(0.5, *args), self.inc)
def expect(self, func=None, args=(), loc=0, lb=None, ub=None,
conditional=False, maxcount=1000, tolerance=1e-10, chunksize=32):
"""
Calculate expected value of a function with respect to the distribution
for discrete distribution.
Parameters
----------
func : callable, optional
Function for which the expectation value is calculated.
Takes only one argument.
The default is the identity mapping f(k) = k.
args : tuple, optional
Shape parameters of the distribution.
loc : float, optional
Location parameter.
Default is 0.
lb, ub : int, optional
Lower and upper bound for the summation, default is set to the
support of the distribution, inclusive (``ul <= k <= ub``).
conditional : bool, optional
If true then the expectation is corrected by the conditional
probability of the summation interval. The return value is the
expectation of the function, `func`, conditional on being in
the given interval (k such that ``ul <= k <= ub``).
Default is False.
maxcount : int, optional
Maximal number of terms to evaluate (to avoid an endless loop for
an infinite sum). Default is 1000.
tolerance : float, optional
Absolute tolerance for the summation. Default is 1e-10.
chunksize : int, optional
Iterate over the support of a distributions in chunks of this size.
Default is 32.
Returns
-------
expect : float
Expected value.
Notes
-----
For heavy-tailed distributions, the expected value may or may not exist,
depending on the function, `func`. If it does exist, but the sum converges
slowly, the accuracy of the result may be rather low. For instance, for
``zipf(4)``, accuracy for mean, variance in example is only 1e-5.
increasing `maxcount` and/or `chunksize` may improve the result, but may also
make zipf very slow.
The function is not vectorized.
"""
if func is None:
def fun(x):
# loc and args from outer scope
return (x+loc)*self._pmf(x, *args)
else:
def fun(x):
# loc and args from outer scope
return func(x+loc)*self._pmf(x, *args)
# used pmf because _pmf does not check support in randint and there
# might be problems(?) with correct self.a, self.b at this stage maybe
# not anymore, seems to work now with _pmf
self._argcheck(*args) # (re)generate scalar self.a and self.b
if lb is None:
lb = self.a
else:
lb = lb - loc # convert bound for standardized distribution
if ub is None:
ub = self.b
else:
ub = ub - loc # convert bound for standardized distribution
if conditional:
invfac = self.sf(lb-1, *args) - self.sf(ub, *args)
else:
invfac = 1.0
# iterate over the support, starting from the median
x0 = self.ppf(0.5, *args)
res = _expect(fun, lb, ub, x0, self.inc, maxcount, tolerance, chunksize)
return res / invfac
def _expect(fun, lb, ub, x0, inc, maxcount=1000, tolerance=1e-10,
chunksize=32):
"""Helper for computing the expectation value of `fun`."""
# short-circuit if the support size is small enough
if (ub - lb) <= chunksize:
supp = np.arange(lb, ub+1, inc)
vals = fun(supp)
return np.sum(vals)
# otherwise, iterate starting from x0
if x0 < lb:
x0 = lb
if x0 > ub:
x0 = ub
count, tot = 0, 0.
# iterate over [x0, ub] inclusive
for x in _iter_chunked(x0, ub+1, chunksize=chunksize, inc=inc):
count += x.size
delta = np.sum(fun(x))
tot += delta
if abs(delta) < tolerance * x.size:
break
if count > maxcount:
warnings.warn('expect(): sum did not converge', RuntimeWarning)
return tot
# iterate over [lb, x0)
for x in _iter_chunked(x0-1, lb-1, chunksize=chunksize, inc=-inc):
count += x.size
delta = np.sum(fun(x))
tot += delta
if abs(delta) < tolerance * x.size:
break
if count > maxcount:
warnings.warn('expect(): sum did not converge', RuntimeWarning)
break
return tot
def _iter_chunked(x0, x1, chunksize=4, inc=1):
"""Iterate from x0 to x1 in chunks of chunksize and steps inc.
x0 must be finite, x1 need not be. In the latter case, the iterator is infinite.
Handles both x0 < x1 and x0 > x1. In the latter case, iterates downwards
(make sure to set inc < 0.)
>>> [x for x in _iter_chunked(2, 5, inc=2)]
[array([2, 4])]
>>> [x for x in _iter_chunked(2, 11, inc=2)]
[array([2, 4, 6, 8]), array([10])]
>>> [x for x in _iter_chunked(2, -5, inc=-2)]
[array([ 2, 0, -2, -4])]
>>> [x for x in _iter_chunked(2, -9, inc=-2)]
[array([ 2, 0, -2, -4]), array([-6, -8])]
"""
if inc == 0:
raise ValueError('Cannot increment by zero.')
if chunksize <= 0:
raise ValueError('Chunk size must be positive; got %s.' % chunksize)
s = 1 if inc > 0 else -1
stepsize = abs(chunksize * inc)
x = x0
while (x - x1) * inc < 0:
delta = min(stepsize, abs(x - x1))
step = delta * s
supp = np.arange(x, x + step, inc)
x += step
yield supp
class rv_sample(rv_discrete):
"""A 'sample' discrete distribution defined by the support and values.
The ctor ignores most of the arguments, only needs the `values` argument.
"""
def __init__(self, a=0, b=inf, name=None, badvalue=None,
moment_tol=1e-8, values=None, inc=1, longname=None,
shapes=None, extradoc=None, seed=None):
super(rv_discrete, self).__init__(seed)
if values is None:
raise ValueError("rv_sample.__init__(..., values=None,...)")
# cf generic freeze
self._ctor_param = dict(
a=a, b=b, name=name, badvalue=badvalue,
moment_tol=moment_tol, values=values, inc=inc,
longname=longname, shapes=shapes, extradoc=extradoc, seed=seed)
if badvalue is None:
badvalue = nan
self.badvalue = badvalue
self.moment_tol = moment_tol
self.inc = inc
self.shapes = shapes
self.vecentropy = self._entropy
xk, pk = values
if len(xk) != len(pk):
raise ValueError("xk and pk need to have the same length.")
if not np.allclose(np.sum(pk), 1):
raise ValueError("The sum of provided pk is not 1.")
indx = np.argsort(np.ravel(xk))
self.xk = np.take(np.ravel(xk), indx, 0)
self.pk = np.take(np.ravel(pk), indx, 0)
self.a = self.xk[0]
self.b = self.xk[-1]
self.qvals = np.cumsum(self.pk, axis=0)
self.shapes = ' ' # bypass inspection
self._construct_argparser(meths_to_inspect=[self._pmf],
locscale_in='loc=0',
# scale=1 for discrete RVs
locscale_out='loc, 1')
self._construct_docstrings(name, longname, extradoc)
@property
@np.deprecate(message="`return_integers` attribute is not used anywhere any"
" longer and is deprecated in scipy 0.18.")
def return_integers(self):
return 0
def _pmf(self, x):
return np.select([x == k for k in self.xk],
[np.broadcast_arrays(p, x)[0] for p in self.pk], 0)
def _cdf(self, x):
xx, xxk = np.broadcast_arrays(x[:, None], self.xk)
indx = np.argmax(xxk > xx, axis=-1) - 1
return self.qvals[indx]
def _ppf(self, q):
qq, sqq = np.broadcast_arrays(q[..., None], self.qvals)
indx = argmax(sqq >= qq, axis=-1)
return self.xk[indx]
def _rvs(self):
# Need to define it explicitly, otherwise .rvs() with size=None
# fails due to explicit broadcasting in _ppf
U = self._random_state.random_sample(self._size)
if self._size is None:
U = np.array(U, ndmin=1)
Y = self._ppf(U)[0]
else:
Y = self._ppf(U)
return Y
def _entropy(self):
return entropy(self.pk)
def generic_moment(self, n):
n = asarray(n)
return np.sum(self.xk**n[np.newaxis, ...] * self.pk, axis=0)
@np.deprecate(message="moment_gen method is not used anywhere any more "
"and is deprecated in scipy 0.18.")
def moment_gen(self, t):
t = asarray(t)
return np.sum(exp(self.xk * t[np.newaxis, ...]) * self.pk, axis=0)
@property
@np.deprecate(message="F attribute is not used anywhere any longer and "
"is deprecated in scipy 0.18.")
def F(self):
return dict(zip(self.xk, self.qvals))
@property
@np.deprecate(message="Finv attribute is not used anywhere any longer and "
"is deprecated in scipy 0.18.")
def Finv(self):
decreasing_keys = sorted(self.F.keys(), reverse=True)
return dict((self.F[k], k) for k in decreasing_keys)
def get_distribution_names(namespace_pairs, rv_base_class):
"""
Collect names of statistical distributions and their generators.
Parameters
----------
namespace_pairs : sequence
A snapshot of (name, value) pairs in the namespace of a module.
rv_base_class : class
The base class of random variable generator classes in a module.
Returns
-------
distn_names : list of strings
Names of the statistical distributions.
distn_gen_names : list of strings
Names of the generators of the statistical distributions.
Note that these are not simply the names of the statistical
distributions, with a _gen suffix added.
"""
distn_names = []
distn_gen_names = []
for name, value in namespace_pairs:
if name.startswith('_'):
continue
if name.endswith('_gen') and issubclass(value, rv_base_class):
distn_gen_names.append(name)
if isinstance(value, rv_base_class):
distn_names.append(name)
return distn_names, distn_gen_names
| {
"content_hash": "794c4f82701658f58c835a39e9b61894",
"timestamp": "",
"source": "github",
"line_count": 3430,
"max_line_length": 103,
"avg_line_length": 34.81049562682216,
"alnum_prop": 0.5537102177554439,
"repo_name": "boomsbloom/dtm-fmri",
"id": "4a353bbbb3887a1d57f955580ab39914330fac95",
"size": "119504",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "DTM/for_gensim/lib/python2.7/site-packages/scipy/stats/_distn_infrastructure.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "413670"
},
{
"name": "C++",
"bytes": "262666"
},
{
"name": "CSS",
"bytes": "5382"
},
{
"name": "Fortran",
"bytes": "14725"
},
{
"name": "HTML",
"bytes": "555708"
},
{
"name": "JavaScript",
"bytes": "23921"
},
{
"name": "Jupyter Notebook",
"bytes": "16254"
},
{
"name": "Makefile",
"bytes": "1302"
},
{
"name": "Matlab",
"bytes": "36260"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "Python",
"bytes": "46698963"
},
{
"name": "R",
"bytes": "199"
},
{
"name": "Shell",
"bytes": "11728"
},
{
"name": "TeX",
"bytes": "18567"
}
],
"symlink_target": ""
} |
from django.test import TestCase
from .templatetags import core_tags
class CoreTagsTestCase(TestCase):
def setUp(self):
self.data = [
{
'strs': (
'first error',
'second error',
'third error',
'fourth error',
),
'join_word': None,
'last_join_word': ' and ',
'result': 'first error, second error, third error and fourth error',
},
{
'strs': (
'first error',
'second error',
'third error',
'fourth error',
),
'join_word': ' and ',
'last_join_word': None,
'result': 'first error and second error and third error and fourth error',
},
{
'strs': (
'first error',
'second error',
'third error',
'fourth error',
),
'join_word': '-',
'last_join_word': '|',
'result': 'first error-second error-third error|fourth error',
},
{
'strs': (
'first error',
),
'join_word': None,
'last_join_word': None,
'result': 'first error',
}
]
def test_join_errors_string_creation(self):
for i in self.data:
self.assertEqual(
core_tags.join_strings(i['strs'],
i['last_join_word'],
i['join_word']),
i['result'])
| {
"content_hash": "4fb58f7dc852ce626076e3211e1889fe",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 90,
"avg_line_length": 29.629032258064516,
"alnum_prop": 0.3538377789874796,
"repo_name": "calendall/calendall",
"id": "9e61b2a609cab493f54c07eb54740a58ce617376",
"size": "1837",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "calendall/core/test_templatetags.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "663"
},
{
"name": "JavaScript",
"bytes": "347"
},
{
"name": "Python",
"bytes": "140955"
},
{
"name": "Shell",
"bytes": "522"
}
],
"symlink_target": ""
} |
import os
from pathlib import Path
from unittest import mock
from airflow_breeze.utils.path_utils import find_airflow_sources_root_to_operate_on
ACTUAL_AIRFLOW_SOURCES = Path(__file__).parents[3].resolve()
ROOT_PATH = Path(Path(__file__).root)
def test_find_airflow_root_upwards_from_cwd(capsys):
os.chdir(Path(__file__).parent)
sources = find_airflow_sources_root_to_operate_on()
assert sources == ACTUAL_AIRFLOW_SOURCES
output = str(capsys.readouterr().out)
assert output == ''
def test_find_airflow_root_upwards_from_file(capsys):
os.chdir(Path(__file__).root)
sources = find_airflow_sources_root_to_operate_on()
assert sources == ACTUAL_AIRFLOW_SOURCES
output = str(capsys.readouterr().out)
assert output == ''
@mock.patch('airflow_breeze.utils.path_utils.AIRFLOW_CFG_FILE', "bad_name.cfg")
@mock.patch('airflow_breeze.utils.path_utils.Path.cwd')
def test_find_airflow_root_from_installation_dir(mock_cwd, capsys):
mock_cwd.return_value = ROOT_PATH
sources = find_airflow_sources_root_to_operate_on()
assert sources == ACTUAL_AIRFLOW_SOURCES
| {
"content_hash": "cc15958cced44354cf76c66735cef101",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 83,
"avg_line_length": 34.5625,
"alnum_prop": 0.7188065099457505,
"repo_name": "Acehaidrey/incubator-airflow",
"id": "99da5ce04e521549d9b8f18379bbf2435405d71f",
"size": "1892",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "dev/breeze/tests/test_find_airflow_directory.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25785"
},
{
"name": "Dockerfile",
"bytes": "76693"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "164512"
},
{
"name": "JavaScript",
"bytes": "236992"
},
{
"name": "Jinja",
"bytes": "37155"
},
{
"name": "Jupyter Notebook",
"bytes": "2929"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "21727510"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "495253"
},
{
"name": "TypeScript",
"bytes": "326556"
}
],
"symlink_target": ""
} |
class Solution:
# @param s, a string
# @return an integer
def longestValidParentheses(self, s):
stack = []
result = [-1 for i in xrange(len(s))]
maximum = 0
curr = 0
for i in xrange(len(s)):
if s[i] == "(":
stack.append(i)
else:
if stack == []:
continue
j = stack.pop()
result[j] = j
result[i] = i
for i in xrange(len(result)):
if result[i] == i:
curr += 1
else:
maximum = max(maximum, curr)
curr = 0
return max(curr, maximum)
if __name__ == "__main__":
solution = Solution()
#print solution.longestValidParentheses("()")
print solution.longestValidParentheses(")")
#print solution.longestValidParentheses("()(()")
| {
"content_hash": "b8404a62dfb8b1951877f7c414321fe1",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 52,
"avg_line_length": 26.705882352941178,
"alnum_prop": 0.45044052863436124,
"repo_name": "sureleo/leetcode",
"id": "cf28d85db7c04861ecfd58174f5caf3d8b986067",
"size": "908",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "archive/python/stack/LongestValidParentheses.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "61783"
},
{
"name": "Java",
"bytes": "219556"
},
{
"name": "Python",
"bytes": "156659"
}
],
"symlink_target": ""
} |
import asyncio
from demonhunter import DemonHunter
from demonhunter.nodes.honeypots.telnet import TelnetHoneypot, MicrosoftTelnet
from demonhunter.nodes.honeypots.vnc import VNCHoneypot
from demonhunter.nodes.honeypots.http import HTTPHoneypot, Apache
loop = asyncio.get_event_loop()
hp = DemonHunter(loop)
vnc = VNCHoneypot(interfaces=["0.0.0.0"])
hp.add_honeypot(vnc)
telnet = TelnetHoneypot(port=23, handler=MicrosoftTelnet, interfaces=["0.0.0.0"])
hp.add_honeypot(telnet)
http = HTTPHoneypot(handler=Apache, www_folder="/var/fakewww/", interfaces=["0.0.0.0"])
hp.add_honeypot(http)
hp.start()
try:
# Run The Loop
loop.run_forever()
except KeyboardInterrupt:
hp.stop()
print("\nServer Closed")
loop.close() | {
"content_hash": "7592ba8f149397daa60564f532c8b7dc",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 87,
"avg_line_length": 24.166666666666668,
"alnum_prop": 0.7655172413793103,
"repo_name": "RevengeComing/DemonHunter",
"id": "4e0583d45d7edfa636189ac851616404157f2f21",
"size": "725",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/honeypot/my_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "497"
},
{
"name": "HTML",
"bytes": "28499"
},
{
"name": "JavaScript",
"bytes": "857"
},
{
"name": "Python",
"bytes": "41181"
}
],
"symlink_target": ""
} |
"""
This module implements a set of :class:`revscoring.Feature`
for use in scoring revisions. :class:`revscoring.Feature`
lists can be provided to a :func:`revscoring.dependencies.solve`, or
more commonly, to a :class:`revscoring.Extractor` to obtain simple
numerical/boolean values that can be used when modeling revision
scores. The provided features are split conceptually into a set of modules:
Feature collections
+++++++++++++++++++
:mod:`~revscoring.features.revision_oriented`
Basic features of revisions. E.g. ``revision.user.text_matches(r'.*Bot')``
:mod:`~revscoring.features.bytes`
Features of the number of bytes of content, byte length of characters,
etc.
:mod:`~revscoring.features.temporal`
Features of the time between events of a interest. E.g.
``revision.user.last_revision.seconds_since``
:mod:`~revscoring.features.wikibase`
Features of wikibase items and changes made to them. E.g.
``revision.diff.property_changed('P31')``
:mod:`~revscoring.features.wikitext`
Features of wikitext content and differences between revisions. E.g.
``revision.diff.uppercase_words_added``
Functions
+++++++++
.. automodule:: revscoring.features.functions
Meta-features
+++++++++++++
Meta-Features are classes that extend :class:`~revscoring.Feature` and
implement common operations on :class:`~revscoring.Datasource` like
:class:`~revscoring.features.meta.aggregators.sum` and
:class:`~revscoring.features.meta.bools.item_in_set`. See
:mod:`revscoring.features.meta` for the full list.
Modifiers
+++++++++
Modifiers are functions that can be applied to a :class:`revscoring.Feature`
to modify the value. E.g. :class:`~revscoring.features.modifiers.log`,
:class:`~revscoring.features.modifiers.max` and
:class:`~revscoring.features.modifiers.add`.
See :mod:`~revscoring.features.modifiers` for the full list.
Base classes
++++++++++++
.. automodule:: revscoring.features.feature
.. automodule:: revscoring.features.feature_vector
"""
from .feature import Constant, Feature, Modifier
from .feature_vector import FeatureVector
from .functions import trim, vectorize_values
__all__ = [Feature, Modifier, Constant, FeatureVector, trim, vectorize_values]
| {
"content_hash": "027d481afd633d3060d2ba788d949f0a",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 78,
"avg_line_length": 36.71666666666667,
"alnum_prop": 0.7453472537448933,
"repo_name": "he7d3r/revscoring",
"id": "02e767de0a991e0997704392868080ad5483e592",
"size": "2203",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "revscoring/features/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "19605"
},
{
"name": "Python",
"bytes": "341481"
}
],
"symlink_target": ""
} |
from types import FunctionType, LambdaType
#--- LIST OPERATIONS ---------------------------------------------------------------------------------
def sorted(list, cmp=None, reversed=False):
""" Returns a sorted copy of the list.
"""
list = [x for x in list]
list.sort(cmp)
if reversed: list.reverse()
return list
def unique(list):
""" Returns a copy of the list without duplicates.
"""
unique = []; [unique.append(x) for x in list if x not in unique]
return unique
#--- SET THEORY --------------------------------------------------------------------------------------
def flatten(node, distance=1):
""" Recursively lists the node and its links.
Distance of 0 will return the given [node].
Distance of 1 will return a list of the node and all its links.
Distance of 2 will also include the linked nodes' links, etc.
"""
# When you pass a graph it returns all the node id's in it.
if hasattr(node, "nodes") and hasattr(node, "edges"):
return [n.id for n in node.nodes]
all = [node]
if distance >= 1:
for n in node.links:
all += n.flatten(distance-1)
return unique(all)
def intersection(a, b):
""" Returns the intersection of lists.
a & b -> elements that appear in a as well as in b.
"""
return filter(lambda x: x in a, b)
def union(a, b):
""" Returns the union of lists.
a | b -> all elements from a and all the elements from b.
"""
return a + filter(lambda x: x not in a, b)
def difference(a, b):
""" Returns the difference of lists.
a - b -> elements that appear in a but not in b.
"""
return filter(lambda x: x not in b, a)
#--- SUBGRAPH ----------------------------------------------------------------------------------------
def subgraph(graph, id, distance=1):
""" Creates the subgraph of the flattened node with given id (or list of id's).
Finds all the edges between the nodes that make up the subgraph.
"""
g = graph.copy(empty=True)
if isinstance(id, (FunctionType, LambdaType)):
# id can also be a lambda or function that returns True or False
# for each node in the graph. We take the id's of nodes that pass.
id = [node.id for node in filter(id, graph.nodes)]
if not isinstance(id, (list, tuple)):
id = [id]
for id in id:
for n in flatten(graph[id], distance):
g.add_node(n.id, n.r, n.style, n.category, n.label, (n==graph.root), n.__dict__)
for e in graph.edges:
if g.has_key(e.node1.id) and \
g.has_key(e.node2.id):
g.add_edge(e.node1.id, e.node2.id, e.weight, e.length, e.label, e.__dict__)
# Should we look for shortest paths between nodes here?
return g
#--- CLIQUE ----------------------------------------------------------------------------------------
def is_clique(graph):
""" A clique is a set of nodes in which each node is connected to all other nodes.
"""
#for n1 in graph.nodes:
# for n2 in graph.nodes:
# if n1 != n2 and graph.edge(n1.id, n2.id) == None:
# return False
if graph.density < 1.0:
return False
return True
def clique(graph, id):
""" Returns the largest possible clique for the node with given id.
"""
clique = [id]
for n in graph.nodes:
friend = True
for id in clique:
if n.id == id or graph.edge(n.id, id) == None:
friend = False
break
if friend:
clique.append(n.id)
return clique
def cliques(graph, threshold=3):
""" Returns all the cliques in the graph of at least the given size.
"""
cliques = []
for n in graph.nodes:
c = clique(graph, n.id)
if len(c) >= threshold:
c.sort()
if c not in cliques:
cliques.append(c)
return cliques
#--- UNCONNECTED SUBGRAPHS -------------------------------------------------------------------------
def partition(graph):
""" Splits unconnected subgraphs.
For each node in the graph, make a list of its id and all directly connected id's.
If one of the nodes in this list intersects with a subgraph,
they are all part of that subgraph.
Otherwise, this list is part of a new subgraph.
Return a list of subgraphs sorted by size (biggest-first).
"""
g = []
for n in graph.nodes:
c = [n.id for n in flatten(n)]
f = False
for i in range(len(g)):
if len(intersection(g[i], c)) > 0:
g[i] = union(g[i], c)
f = True
break
if not f:
g.append(c)
# If 1 is directly connected to 2 and 3,
# and 4 is directly connected to 5 and 6, these are separate subgraphs.
# If we later find that 7 is directly connected to 3 and 6,
# it will be attached to [1, 2, 3] yielding
# [1, 2, 3, 6, 7] and [4, 5, 6].
# These two subgraphs are connected and need to be merged.
merged = []
for i in range(len(g)):
merged.append(g[i])
for j in range(i+1, len(g)):
if len(intersection(g[i], g[j])) > 0:
merged[-1].extend(g[j])
g[j] = []
g = merged
g = [graph.sub(g, distance=0) for g in g]
g.sort(lambda a, b: len(b) - len(a))
return g | {
"content_hash": "db391f441b548473c9cdcecd3ee44cc7",
"timestamp": "",
"source": "github",
"line_count": 183,
"max_line_length": 102,
"avg_line_length": 30.333333333333332,
"alnum_prop": 0.5175644028103045,
"repo_name": "est/nodebox-gl",
"id": "41e39f2ccdda6245c469ce08837ddbd8cd80b509",
"size": "5618",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "libraries/graph/cluster.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "998878"
},
{
"name": "Perl",
"bytes": "48202"
},
{
"name": "Python",
"bytes": "858930"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import time
from mcrouter.test.MCProcess import Memcached
from mcrouter.test.McrouterTestCase import McrouterTestCase
class TestModifyExptime(McrouterTestCase):
config = './mcrouter/test/test_modify_exptime.json'
def setUp(self):
self.mc = self.add_server(Memcached())
self.mcr = self.add_mcrouter(self.config)
def test_modify_negative_exptime(self):
self.assertTrue(self.mc.set("a", "value"))
self.assertEqual(self.mcr.get("a"), "value")
self.assertTrue(self.mcr.set("a", "value2"))
self.assertIsNone(self.mc.get("a"))
self.assertIsNone(self.mcr.get("a"))
def test_modify_infinite_exptime(self):
self.assertTrue(self.mcr.set("b", "value", exptime=-1))
self.assertEqual(self.mc.get("b"), "value")
self.assertEqual(self.mcr.get("b"), "value")
def test_modify_smaller_exptime(self):
self.assertTrue(self.mcr.set("c", "value"))
self.assertEqual(self.mc.get("c"), "value")
self.assertEqual(self.mcr.get("c"), "value")
# wait for the value to expire
time.sleep(5)
self.assertIsNone(self.mc.get("c"))
self.assertIsNone(self.mcr.get("c"))
def test_modify_min_exptime(self):
self.assertTrue(self.mcr.set("d", "value"))
self.assertEqual(self.mc.get("d"), "value")
self.assertEqual(self.mcr.get("d"), "value")
# wait for the value to expire
time.sleep(5)
self.assertIsNone(self.mc.get("d"))
self.assertIsNone(self.mcr.get("d"))
| {
"content_hash": "d1b6872a11c0de196c856d2bfb373e95",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 63,
"avg_line_length": 33.254901960784316,
"alnum_prop": 0.6403301886792453,
"repo_name": "yqzhang/mcrouter",
"id": "d46ed129cf857a2186234bcd95075559aeafb9f9",
"size": "1984",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "mcrouter/test/test_modify_exptime.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "129005"
},
{
"name": "C++",
"bytes": "2024129"
},
{
"name": "M4",
"bytes": "61682"
},
{
"name": "Makefile",
"bytes": "19358"
},
{
"name": "Python",
"bytes": "202097"
},
{
"name": "Ragel in Ruby Host",
"bytes": "31516"
},
{
"name": "Shell",
"bytes": "20766"
},
{
"name": "Thrift",
"bytes": "5717"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.dispatch import Signal
local_site_user_added = Signal(providing_args=['user', 'localsite'])
| {
"content_hash": "70b7db6f2ea80d2175c06e6005edea21",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 68,
"avg_line_length": 24.5,
"alnum_prop": 0.7551020408163265,
"repo_name": "davidt/reviewboard",
"id": "7d8be334787697dc963eafce785e060a7ea22fa2",
"size": "147",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "reviewboard/site/signals.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "206392"
},
{
"name": "HTML",
"bytes": "182334"
},
{
"name": "JavaScript",
"bytes": "1770499"
},
{
"name": "Python",
"bytes": "3842787"
},
{
"name": "Shell",
"bytes": "20225"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('userprofile', '0005_auto_20160205_0651'),
]
operations = [
migrations.AlterField(
model_name='user',
name='last_login',
field=models.DateTimeField(blank=True, null=True, verbose_name='last login'),
),
migrations.AlterField(
model_name='user',
name='password',
field=models.CharField(max_length=128, verbose_name='password'),
),
]
| {
"content_hash": "5ae43829f28908d36a91b7e2a647977f",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 89,
"avg_line_length": 26.17391304347826,
"alnum_prop": 0.5880398671096345,
"repo_name": "HyperManTT/ECommerceSaleor",
"id": "6fe664c82f448940a881194b07ab09d6590f5468",
"size": "673",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "saleor/userprofile/migrations/0006_auto_20160829_0819.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "63630"
},
{
"name": "HTML",
"bytes": "390654"
},
{
"name": "JavaScript",
"bytes": "186251"
},
{
"name": "Python",
"bytes": "659522"
}
],
"symlink_target": ""
} |
"""RNN helpers for TensorFlow models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.util import nest
# pylint: disable=protected-access
_state_size_with_prefix = rnn_cell._state_size_with_prefix
# pylint: enable=protected-access
def _infer_state_dtype(explicit_dtype, state):
"""Infer the dtype of an RNN state.
Args:
explicit_dtype: explicitly declared dtype or None.
state: RNN's hidden state. Must be a Tensor or a nested iterable containing
Tensors.
Returns:
dtype: inferred dtype of hidden state.
Raises:
ValueError: if `state` has heterogeneous dtypes or is empty.
"""
if explicit_dtype is not None:
return explicit_dtype
elif nest.is_sequence(state):
inferred_dtypes = [element.dtype for element in nest.flatten(state)]
if not inferred_dtypes:
raise ValueError("Unable to infer dtype from empty state.")
all_same = all([x == inferred_dtypes[0] for x in inferred_dtypes])
if not all_same:
raise ValueError(
"State has tensors of different inferred_dtypes. Unable to infer a "
"single representative dtype.")
return inferred_dtypes[0]
else:
return state.dtype
import logging
def rnn(cell, inputs, initial_state=None, dtype=None,
sequence_length=None, scope=None, bucket_length=None, reverse=False):
"""Creates a recurrent neural network specified by RNNCell `cell`.
The simplest form of RNN network generated is:
```python
state = cell.zero_state(...)
outputs = []
for input_ in inputs:
output, state = cell(input_, state)
outputs.append(output)
return (outputs, state)
```
However, a few other options are available:
An initial state can be provided.
If the sequence_length vector is provided, dynamic calculation is performed.
This method of calculation does not compute the RNN steps past the maximum
sequence length of the minibatch (thus saving computational time),
and properly propagates the state at an example's sequence length
to the final state output.
The dynamic calculation performed is, at time `t` for batch row `b`,
```python
(output, state)(b, t) =
(t >= sequence_length(b))
? (zeros(cell.output_size), states(b, sequence_length(b) - 1))
: cell(input(b, t), state(b, t - 1))
```
Args:
cell: An instance of RNNCell.
inputs: A length T list of inputs, each a `Tensor` of shape
`[batch_size, input_size]`, or a nested tuple of such elements.
initial_state: (optional) An initial state for the RNN.
If `cell.state_size` is an integer, this must be
a `Tensor` of appropriate type and shape `[batch_size, cell.state_size]`.
If `cell.state_size` is a tuple, this should be a tuple of
tensors having shapes `[batch_size, s] for s in cell.state_size`.
dtype: (optional) The data type for the initial state and expected output.
Required if initial_state is not provided or RNN state has a heterogeneous
dtype.
sequence_length: Specifies the length of each sequence in inputs.
An int32 or int64 vector (tensor) size `[batch_size]`, values in `[0, T)`.
scope: VariableScope for the created subgraph; defaults to "RNN".
Returns:
A pair (outputs, state) where:
- outputs is a length T list of outputs (one for each input), or a nested
tuple of such elements.
- state is the final state
Raises:
TypeError: If `cell` is not an instance of RNNCell.
ValueError: If `inputs` is `None` or an empty list, or if the input depth
(column size) cannot be inferred from inputs via shape inference.
"""
if not isinstance(cell, rnn_cell.RNNCell):
raise TypeError("cell must be an instance of RNNCell")
if not nest.is_sequence(inputs):
raise TypeError("inputs must be a sequence")
if not inputs:
raise ValueError("inputs must not be empty")
outputs = []
# Create a new scope in which the caching device is either
# determined by the parent scope, or is set to place the cached
# Variable using the same placement as for the rest of the RNN.
with vs.variable_scope(scope or "RNN") as varscope:
if varscope.caching_device is None:
varscope.set_caching_device(lambda op: op.device)
# Obtain the first sequence of the input
first_input = inputs
while nest.is_sequence(first_input):
first_input = first_input[0]
# Temporarily avoid EmbeddingWrapper and seq2seq badness
# TODO(lukaszkaiser): remove EmbeddingWrapper
if first_input.get_shape().ndims != 1:
input_shape = first_input.get_shape().with_rank_at_least(2)
fixed_batch_size = input_shape[0]
flat_inputs = nest.flatten(inputs)
for flat_input in flat_inputs:
input_shape = flat_input.get_shape().with_rank_at_least(2)
batch_size, input_size = input_shape[0], input_shape[1:]
fixed_batch_size.merge_with(batch_size)
for i, size in enumerate(input_size):
if size.value is None:
raise ValueError(
"Input size (dimension %d of inputs) must be accessible via "
"shape inference, but saw value None." % i)
else:
fixed_batch_size = first_input.get_shape().with_rank_at_least(1)[0]
if fixed_batch_size.value:
batch_size = fixed_batch_size.value
else:
batch_size = array_ops.shape(first_input)[0]
if initial_state is not None:
state = initial_state
else:
if not dtype:
raise ValueError("If no initial_state is provided, "
"dtype must be specified")
state = cell.zero_state(batch_size, dtype)
if sequence_length is not None: # Prepare variables
sequence_length = ops.convert_to_tensor(
sequence_length, name="sequence_length")
if sequence_length.get_shape().ndims not in (None, 1):
raise ValueError(
"sequence_length must be a vector of length batch_size")
def _create_zero_output(output_size):
# convert int to TensorShape if necessary
size = _state_size_with_prefix(output_size, prefix=[batch_size])
output = array_ops.zeros(
array_ops.pack(size), _infer_state_dtype(dtype, state))
shape = _state_size_with_prefix(
output_size, prefix=[fixed_batch_size.value])
output.set_shape(tensor_shape.TensorShape(shape))
return output
output_size = cell.output_size
flat_output_size = nest.flatten(output_size)
flat_zero_output = tuple(
_create_zero_output(size) for size in flat_output_size)
zero_output = nest.pack_sequence_as(structure=output_size,
flat_sequence=flat_zero_output)
sequence_length = math_ops.to_int32(sequence_length)
# TODO: check if the new version does the same as this
# zero_output = array_ops.zeros(
# array_ops.pack([batch_size, cell.output_size]), dtypes.float32)
# zero_output.set_shape(
# tensor_shape.TensorShape([fixed_batch_size.value, cell.output_size]))
min_sequence_length = math_ops.reduce_min(sequence_length)
max_sequence_length = math_ops.reduce_max(sequence_length)
for time, input_ in enumerate(inputs):
if time > 0: varscope.reuse_variables()
# pylint: disable=cell-var-from-loop
call_cell = lambda: cell(input_, state)
# pylint: enable=cell-var-from-loop
if sequence_length is not None:
(output, state) = _rnn_step(
time=time,
sequence_length=sequence_length,
min_sequence_length=min_sequence_length,
max_sequence_length=max_sequence_length,
zero_output=zero_output,
state=state,
call_cell=call_cell,
state_size=cell.state_size,
bucket_length=bucket_length,
reverse=reverse)
else:
(output, state) = call_cell()
outputs.append(output)
return (outputs, state)
def state_saving_rnn(cell, inputs, state_saver, state_name,
sequence_length=None, scope=None):
"""RNN that accepts a state saver for time-truncated RNN calculation.
Args:
cell: An instance of `RNNCell`.
inputs: A length T list of inputs, each a `Tensor` of shape
`[batch_size, input_size]`.
state_saver: A state saver object with methods `state` and `save_state`.
state_name: Python string or tuple of strings. The name to use with the
state_saver. If the cell returns tuples of states (i.e.,
`cell.state_size` is a tuple) then `state_name` should be a tuple of
strings having the same length as `cell.state_size`. Otherwise it should
be a single string.
sequence_length: (optional) An int32/int64 vector size [batch_size].
See the documentation for rnn() for more details about sequence_length.
scope: VariableScope for the created subgraph; defaults to "RNN".
Returns:
A pair (outputs, state) where:
outputs is a length T list of outputs (one for each input)
states is the final state
Raises:
TypeError: If `cell` is not an instance of RNNCell.
ValueError: If `inputs` is `None` or an empty list, or if the arity and
type of `state_name` does not match that of `cell.state_size`.
"""
state_size = cell.state_size
state_is_tuple = nest.is_sequence(state_size)
state_name_tuple = nest.is_sequence(state_name)
if state_is_tuple != state_name_tuple:
raise ValueError(
"state_name should be the same type as cell.state_size. "
"state_name: %s, cell.state_size: %s"
% (str(state_name), str(state_size)))
if state_is_tuple:
state_name_flat = nest.flatten(state_name)
state_size_flat = nest.flatten(state_size)
if len(state_name_flat) != len(state_size_flat):
raise ValueError("#elems(state_name) != #elems(state_size): %d vs. %d"
% (len(state_name_flat), len(state_size_flat)))
initial_state = nest.pack_sequence_as(
structure=state_size,
flat_sequence=[state_saver.state(s) for s in state_name_flat])
else:
initial_state = state_saver.state(state_name)
(outputs, state) = rnn(cell, inputs, initial_state=initial_state,
sequence_length=sequence_length, scope=scope)
if state_is_tuple:
flat_state = nest.flatten(state)
state_name = nest.flatten(state_name)
save_state = [state_saver.save_state(name, substate)
for name, substate in zip(state_name, flat_state)]
else:
save_state = [state_saver.save_state(state_name, state)]
with ops.control_dependencies(save_state):
last_output = outputs[-1]
flat_last_output = nest.flatten(last_output)
flat_last_output = [
array_ops.identity(output) for output in flat_last_output]
outputs[-1] = nest.pack_sequence_as(structure=last_output,
flat_sequence=flat_last_output)
return (outputs, state)
# pylint: disable=unused-argument
def _rnn_step(
time, sequence_length, min_sequence_length, max_sequence_length,
zero_output, state, call_cell, state_size, skip_conditionals=False,
bucket_length=None, reverse=False):
"""Calculate one step of a dynamic RNN minibatch.
Returns an (output, state) pair conditioned on the sequence_lengths.
When skip_conditionals=False, the pseudocode is something like:
if t >= max_sequence_length:
return (zero_output, state)
if t < min_sequence_length:
return call_cell()
# Selectively output zeros or output, old state or new state depending
# on if we've finished calculating each row.
new_output, new_state = call_cell()
final_output = np.vstack([
zero_output if time >= sequence_lengths[r] else new_output_r
for r, new_output_r in enumerate(new_output)
])
final_state = np.vstack([
state[r] if time >= sequence_lengths[r] else new_state_r
for r, new_state_r in enumerate(new_state)
])
return (final_output, final_state)
Args:
time: Python int, the current time step
sequence_length: int32 `Tensor` vector of size [batch_size]
min_sequence_length: int32 `Tensor` scalar, min of sequence_length
max_sequence_length: int32 `Tensor` scalar, max of sequence_length
zero_output: `Tensor` vector of shape [output_size]
state: Either a single `Tensor` matrix of shape `[batch_size, state_size]`,
or a list/tuple of such tensors.
call_cell: lambda returning tuple of (new_output, new_state) where
new_output is a `Tensor` matrix of shape `[batch_size, output_size]`.
new_state is a `Tensor` matrix of shape `[batch_size, state_size]`.
state_size: The `cell.state_size` associated with the state.
skip_conditionals: Python bool, whether to skip using the conditional
calculations. This is useful for `dynamic_rnn`, where the input tensor
matches `max_sequence_length`, and using conditionals just slows
everything down.
Returns:
A tuple of (`final_output`, `final_state`) as given by the pseudocode above:
final_output is a `Tensor` matrix of shape [batch_size, output_size]
final_state is either a single `Tensor` matrix, or a tuple of such
matrices (matching length and shapes of input `state`).
Raises:
ValueError: If the cell returns a state tuple whose length does not match
that returned by `state_size`.
"""
# Convert state to a list for ease of use
flat_state = nest.flatten(state)
flat_zero_output = nest.flatten(zero_output)
def _copy_one_through(output, new_output, reverse):
if reverse:
copy_cond = (time < (bucket_length - sequence_length))
else:
# time is 0-indexed, length is not
copy_cond = (time >= sequence_length)
return math_ops.select(copy_cond, output, new_output)
def _copy_some_through(flat_new_output, flat_new_state):
# Use broadcasting select to determine which values should get
# the previous state & zero output, and which values should get
# a calculated state & output.
flat_new_output = [
_copy_one_through(zero_output, new_output, reverse)
for zero_output, new_output in zip(flat_zero_output, flat_new_output)]
flat_new_state = [
_copy_one_through(state, new_state, reverse)
for state, new_state in zip(flat_state, flat_new_state)]
return flat_new_output + flat_new_state
def _maybe_copy_some_through():
"""Run RNN step. Pass through either no or some past state."""
new_output, new_state = call_cell()
nest.assert_same_structure(state, new_state)
flat_new_state = nest.flatten(new_state)
flat_new_output = nest.flatten(new_output)
if reverse:
return control_flow_ops.cond(
time >= (bucket_length - min_sequence_length), lambda: flat_new_output + flat_new_state,
lambda: _copy_some_through(flat_new_output, flat_new_state))
else:
return control_flow_ops.cond(
# if t < min_seq_len: calculate and return everything
time < min_sequence_length, lambda: flat_new_output + flat_new_state,
# else copy some of it through
lambda: _copy_some_through(flat_new_output, flat_new_state))
# TODO(ebrevdo): skipping these conditionals may cause a slowdown,
# but benefits from removing cond() and its gradient. We should
# profile with and without this switch here.
if skip_conditionals:
# Instead of using conditionals, perform the selective copy at all time
# steps. This is faster when max_seq_len is equal to the number of unrolls
# (which is typical for dynamic_rnn).
new_output, new_state = call_cell()
nest.assert_same_structure(state, new_state)
new_state = nest.flatten(new_state)
new_output = nest.flatten(new_output)
final_output_and_state = _copy_some_through(new_output, new_state)
else:
empty_update = lambda: flat_zero_output + flat_state
if reverse:
final_output_and_state = control_flow_ops.cond(
time < (bucket_length - max_sequence_length), empty_update,
_maybe_copy_some_through)
else:
final_output_and_state = control_flow_ops.cond(
# if t >= max_seq_len: copy all state through, output zeros
time >= max_sequence_length, empty_update,
# otherwise calculation is required: copy some or all of it through
_maybe_copy_some_through)
if len(final_output_and_state) != len(flat_zero_output) + len(flat_state):
raise ValueError("Internal error: state and output were not concatenated "
"correctly.")
final_output = final_output_and_state[:len(flat_zero_output)]
final_state = final_output_and_state[len(flat_zero_output):]
for output, flat_output in zip(final_output, flat_zero_output):
output.set_shape(flat_output.get_shape())
for substate, flat_substate in zip(final_state, flat_state):
substate.set_shape(flat_substate.get_shape())
final_output = nest.pack_sequence_as(
structure=zero_output, flat_sequence=final_output)
final_state = nest.pack_sequence_as(
structure=state, flat_sequence=final_state)
return final_output, final_state
def _reverse_seq(input_seq, lengths):
"""Reverse a list of Tensors up to specified lengths.
Args:
input_seq: Sequence of seq_len tensors of dimension (batch_size, n_features)
or nested tuples of tensors.
lengths: A `Tensor` of dimension batch_size, containing lengths for each
sequence in the batch. If "None" is specified, simply reverses
the list.
Returns:
time-reversed sequence
"""
if lengths is None:
return list(reversed(input_seq))
flat_input_seq = tuple(nest.flatten(input_) for input_ in input_seq)
flat_results = [[] for _ in range(len(input_seq))]
for sequence in zip(*flat_input_seq):
input_shape = tensor_shape.unknown_shape(
ndims=sequence[0].get_shape().ndims)
for input_ in sequence:
input_shape.merge_with(input_.get_shape())
input_.set_shape(input_shape)
# Join into (time, batch_size, depth)
s_joined = array_ops.pack(sequence)
# TODO(schuster, ebrevdo): Remove cast when reverse_sequence takes int32
if lengths is not None:
lengths = math_ops.to_int64(lengths)
# Reverse along dimension 0
s_reversed = array_ops.reverse_sequence(s_joined, lengths, 0, 1)
# Split again into list
result = array_ops.unpack(s_reversed)
for r, flat_result in zip(result, flat_results):
r.set_shape(input_shape)
flat_result.append(r)
results = [nest.pack_sequence_as(structure=input_, flat_sequence=flat_result)
for input_, flat_result in zip(input_seq, flat_results)]
return results
def bidirectional_rnn(cell_fw, cell_bw, inputs,
initial_state_fw=None, initial_state_bw=None,
dtype=None, sequence_length=None, scope=None, bucket_length=None,
legacy=False):
"""Creates a bidirectional recurrent neural network.
Similar to the unidirectional case above (rnn) but takes input and builds
independent forward and backward RNNs with the final forward and backward
outputs depth-concatenated, such that the output will have the format
[time][batch][cell_fw.output_size + cell_bw.output_size]. The input_size of
forward and backward cell must match. The initial state for both directions
is zero by default (but can be set optionally) and no intermediate states are
ever returned -- the network is fully unrolled for the given (passed in)
length(s) of the sequence(s) or completely unrolled if length(s) is not given.
Args:
cell_fw: An instance of RNNCell, to be used for forward direction.
cell_bw: An instance of RNNCell, to be used for backward direction.
inputs: A length T list of inputs, each a tensor of shape
[batch_size, input_size], or a nested tuple of such elements.
initial_state_fw: (optional) An initial state for the forward RNN.
This must be a tensor of appropriate type and shape
`[batch_size, cell_fw.state_size]`.
If `cell_fw.state_size` is a tuple, this should be a tuple of
tensors having shapes `[batch_size, s] for s in cell_fw.state_size`.
initial_state_bw: (optional) Same as for `initial_state_fw`, but using
the corresponding properties of `cell_bw`.
dtype: (optional) The data type for the initial state. Required if
either of the initial states are not provided.
sequence_length: (optional) An int32/int64 vector, size `[batch_size]`,
containing the actual lengths for each of the sequences.
scope: VariableScope for the created subgraph; defaults to "BiRNN"
Returns:
A tuple (outputs, output_state_fw, output_state_bw) where:
outputs is a length `T` list of outputs (one for each input), which
are depth-concatenated forward and backward outputs.
output_state_fw is the final state of the forward rnn.
output_state_bw is the final state of the backward rnn.
Raises:
TypeError: If `cell_fw` or `cell_bw` is not an instance of `RNNCell`.
ValueError: If inputs is None or an empty list.
"""
if not isinstance(cell_fw, rnn_cell.RNNCell):
raise TypeError("cell_fw must be an instance of RNNCell")
if not isinstance(cell_bw, rnn_cell.RNNCell):
raise TypeError("cell_bw must be an instance of RNNCell")
if not nest.is_sequence(inputs):
raise TypeError("inputs must be a sequence")
if not inputs:
raise ValueError("inputs must not be empty")
if legacy:
# read legacy models
with vs.variable_scope(scope or "BiRNN_FW") as fw_scope:
# Forward direction
output_fw, output_state_fw = rnn(cell_fw, inputs, initial_state_fw, dtype,
sequence_length, scope=fw_scope)
with vs.variable_scope(scope or "BiRNN_BW") as bw_scope:
# Backward direction
reversed_inputs = _reverse_seq(inputs, sequence_length)
tmp, output_state_bw = rnn(cell_bw, reversed_inputs, initial_state_bw,
dtype, sequence_length, scope=bw_scope)
else:
with vs.variable_scope(scope or "BiRNN"):
# Forward direction
with vs.variable_scope("FW") as fw_scope:
output_fw, output_state_fw = rnn(cell_fw, inputs, initial_state_fw, dtype,
sequence_length, scope=fw_scope)
# Backward direction
with vs.variable_scope("BW") as bw_scope:
reversed_inputs = _reverse_seq(inputs, sequence_length)
tmp, output_state_bw = rnn(cell_bw, reversed_inputs, initial_state_bw,
dtype, sequence_length, scope=bw_scope)
output_bw = _reverse_seq(tmp, sequence_length)
# Concat each of the forward/backward outputs
flat_output_fw = nest.flatten(output_fw)
flat_output_bw = nest.flatten(output_bw)
flat_outputs = tuple(array_ops.concat(1, [fw, bw])
for fw, bw in zip(flat_output_fw, flat_output_bw))
outputs = nest.pack_sequence_as(structure=output_fw,
flat_sequence=flat_outputs)
return (outputs, output_state_fw, output_state_bw)
def bidirectional_dynamic_rnn(cell_fw, cell_bw, inputs, sequence_length=None,
initial_state_fw=None, initial_state_bw=None,
dtype=None, parallel_iterations=None,
swap_memory=False, time_major=False, scope=None):
"""Creates a dynamic version of bidirectional recurrent neural network.
Similar to the unidirectional case above (rnn) but takes input and builds
independent forward and backward RNNs. The input_size of forward and
backward cell must match. The initial state for both directions is zero by
default (but can be set optionally) and no intermediate states are ever
returned -- the network is fully unrolled for the given (passed in)
length(s) of the sequence(s) or completely unrolled if length(s) is not
given.
Args:
cell_fw: An instance of RNNCell, to be used for forward direction.
cell_bw: An instance of RNNCell, to be used for backward direction.
inputs: The RNN inputs.
If time_major == False (default), this must be a tensor of shape:
`[batch_size, max_time, input_size]`.
If time_major == True, this must be a tensor of shape:
`[max_time, batch_size, input_size]`.
[batch_size, input_size].
sequence_length: An int32/int64 vector, size `[batch_size]`,
containing the actual lengths for each of the sequences.
initial_state_fw: (optional) An initial state for the forward RNN.
This must be a tensor of appropriate type and shape
`[batch_size, cell_fw.state_size]`.
If `cell_fw.state_size` is a tuple, this should be a tuple of
tensors having shapes `[batch_size, s] for s in cell_fw.state_size`.
initial_state_bw: (optional) Same as for `initial_state_fw`, but using
the corresponding properties of `cell_bw`.
dtype: (optional) The data type for the initial states and expected output.
Required if initial_states are not provided or RNN states have a
heterogeneous dtype.
parallel_iterations: (Default: 32). The number of iterations to run in
parallel. Those operations which do not have any temporal dependency
and can be run in parallel, will be. This parameter trades off
time for space. Values >> 1 use more memory but take less time,
while smaller values use less memory but computations take longer.
swap_memory: Transparently swap the tensors produced in forward inference
but needed for back prop from GPU to CPU. This allows training RNNs
which would typically not fit on a single GPU, with very minimal (or no)
performance penalty.
time_major: The shape format of the `inputs` and `outputs` Tensors.
If true, these `Tensors` must be shaped `[max_time, batch_size, depth]`.
If false, these `Tensors` must be shaped `[batch_size, max_time, depth]`.
Using `time_major = True` is a bit more efficient because it avoids
transposes at the beginning and end of the RNN calculation. However,
most TensorFlow data is batch-major, so by default this function
accepts input and emits output in batch-major form.
dtype: (optional) The data type for the initial state. Required if
either of the initial states are not provided.
scope: VariableScope for the created subgraph; defaults to "BiRNN"
Returns:
A tuple (outputs, output_states) where:
outputs: A tuple (output_fw, output_bw) containing the forward and
the backward rnn output `Tensor`.
If time_major == False (default),
output_fw will be a `Tensor` shaped:
`[batch_size, max_time, cell_fw.output_size]`
and output_bw will be a `Tensor` shaped:
`[batch_size, max_time, cell_bw.output_size]`.
If time_major == True,
output_fw will be a `Tensor` shaped:
`[max_time, batch_size, cell_fw.output_size]`
and output_bw will be a `Tensor` shaped:
`[max_time, batch_size, cell_bw.output_size]`.
It returns a tuple instead of a single concatenated `Tensor`, unlike
in the `bidirectional_rnn`. If the concatenated one is preferred,
the forward and backward outputs can be concatenated as
`tf.concat(2, outputs)`.
output_states: A tuple (output_state_fw, output_state_bw) containing
the forward and the backward final states of bidirectional rnn.
Raises:
TypeError: If `cell_fw` or `cell_bw` is not an instance of `RNNCell`.
"""
if not isinstance(cell_fw, rnn_cell.RNNCell):
raise TypeError("cell_fw must be an instance of RNNCell")
if not isinstance(cell_bw, rnn_cell.RNNCell):
raise TypeError("cell_bw must be an instance of RNNCell")
with vs.variable_scope(scope or "BiRNN"):
# Forward direction
with vs.variable_scope("FW") as fw_scope:
output_fw, output_state_fw = dynamic_rnn(
cell=cell_fw, inputs=inputs, sequence_length=sequence_length,
initial_state=initial_state_fw, dtype=dtype,
parallel_iterations=parallel_iterations, swap_memory=swap_memory,
time_major=time_major, scope=fw_scope)
# Backward direction
if not time_major:
time_dim = 1
batch_dim = 0
else:
time_dim = 0
batch_dim = 1
with vs.variable_scope("BW") as bw_scope:
inputs_reverse = array_ops.reverse_sequence(
input=inputs, seq_lengths=sequence_length,
seq_dim=time_dim, batch_dim=batch_dim)
tmp, output_state_bw = dynamic_rnn(
cell=cell_bw, inputs=inputs_reverse, sequence_length=sequence_length,
initial_state=initial_state_bw, dtype=dtype,
parallel_iterations=parallel_iterations, swap_memory=swap_memory,
time_major=time_major, scope=bw_scope)
output_bw = array_ops.reverse_sequence(
input=tmp, seq_lengths=sequence_length,
seq_dim=time_dim, batch_dim=batch_dim)
outputs = (output_fw, output_bw)
output_states = (output_state_fw, output_state_bw)
return (outputs, output_states)
def dynamic_rnn(cell, inputs, sequence_length=None, initial_state=None,
dtype=None, parallel_iterations=None, swap_memory=False,
time_major=False, scope=None):
"""Creates a recurrent neural network specified by RNNCell `cell`.
This function is functionally identical to the function `rnn` above, but
performs fully dynamic unrolling of `inputs`.
Unlike `rnn`, the input `inputs` is not a Python list of `Tensors`, one for
each frame. Instead, `inputs` may be a single `Tensor` where
the maximum time is either the first or second dimension (see the parameter
`time_major`). Alternatively, it may be a (possibly nested) tuple of
Tensors, each of them having matching batch and time dimensions.
The corresponding output is either a single `Tensor` having the same number
of time steps and batch size, or a (possibly nested) tuple of such tensors,
matching the nested structure of `cell.output_size`.
The parameter `sequence_length` is optional and is used to copy-through state
and zero-out outputs when past a batch element's sequence length. So it's more
for correctness than performance, unlike in rnn().
Args:
cell: An instance of RNNCell.
inputs: The RNN inputs.
If `time_major == False` (default), this must be a `Tensor` of shape:
`[batch_size, max_time, ...]`, or a nested tuple of such
elements.
If `time_major == True`, this must be a `Tensor` of shape:
`[max_time, batch_size, ...]`, or a nested tuple of such
elements.
This may also be a (possibly nested) tuple of Tensors satisfying
this property. The first two dimensions must match across all the inputs,
but otherwise the ranks and other shape components may differ.
In this case, input to `cell` at each time-step will replicate the
structure of these tuples, except for the time dimension (from which the
time is taken).
The input to `cell` at each time step will be a `Tensor` or (possibly
nested) tuple of Tensors each with dimensions `[batch_size, ...]`.
sequence_length: (optional) An int32/int64 vector sized `[batch_size]`.
initial_state: (optional) An initial state for the RNN.
If `cell.state_size` is an integer, this must be
a `Tensor` of appropriate type and shape `[batch_size, cell.state_size]`.
If `cell.state_size` is a tuple, this should be a tuple of
tensors having shapes `[batch_size, s] for s in cell.state_size`.
dtype: (optional) The data type for the initial state and expected output.
Required if initial_state is not provided or RNN state has a heterogeneous
dtype.
parallel_iterations: (Default: 32). The number of iterations to run in
parallel. Those operations which do not have any temporal dependency
and can be run in parallel, will be. This parameter trades off
time for space. Values >> 1 use more memory but take less time,
while smaller values use less memory but computations take longer.
swap_memory: Transparently swap the tensors produced in forward inference
but needed for back prop from GPU to CPU. This allows training RNNs
which would typically not fit on a single GPU, with very minimal (or no)
performance penalty.
time_major: The shape format of the `inputs` and `outputs` Tensors.
If true, these `Tensors` must be shaped `[max_time, batch_size, depth]`.
If false, these `Tensors` must be shaped `[batch_size, max_time, depth]`.
Using `time_major = True` is a bit more efficient because it avoids
transposes at the beginning and end of the RNN calculation. However,
most TensorFlow data is batch-major, so by default this function
accepts input and emits output in batch-major form.
scope: VariableScope for the created subgraph; defaults to "RNN".
Returns:
A pair (outputs, state) where:
outputs: The RNN output `Tensor`.
If time_major == False (default), this will be a `Tensor` shaped:
`[batch_size, max_time, cell.output_size]`.
If time_major == True, this will be a `Tensor` shaped:
`[max_time, batch_size, cell.output_size]`.
Note, if `cell.output_size` is a (possibly nested) tuple of integers
or `TensorShape` objects, then `outputs` will be a tuple having the
same structure as `cell.output_size`, containing Tensors having shapes
corresponding to the shape data in `cell.output_size`.
state: The final state. If `cell.state_size` is an int, this
will be shaped `[batch_size, cell.state_size]`. If it is a
`TensorShape`, this will be shaped `[batch_size] + cell.state_size`.
If it is a (possibly nested) tuple of ints or `TensorShape`, this will
be a tuple having the corresponding shapes.
Raises:
TypeError: If `cell` is not an instance of RNNCell.
ValueError: If inputs is None or an empty list.
"""
if not isinstance(cell, rnn_cell.RNNCell):
raise TypeError("cell must be an instance of RNNCell")
# By default, time_major==False and inputs are batch-major: shaped
# [batch, time, depth]
# For internal calculations, we transpose to [time, batch, depth]
flat_input = nest.flatten(inputs)
if not time_major:
# (B,T,D) => (T,B,D)
flat_input = tuple(array_ops.transpose(input_, [1, 0, 2])
for input_ in flat_input)
parallel_iterations = parallel_iterations or 32
if sequence_length is not None:
sequence_length = math_ops.to_int32(sequence_length)
if sequence_length.get_shape().ndims not in (None, 1):
raise ValueError(
"sequence_length must be a vector of length batch_size, "
"but saw shape: %s" % sequence_length.get_shape())
sequence_length = array_ops.identity( # Just to find it in the graph.
sequence_length, name="sequence_length")
# Create a new scope in which the caching device is either
# determined by the parent scope, or is set to place the cached
# Variable using the same placement as for the rest of the RNN.
with vs.variable_scope(scope or "RNN") as varscope:
if varscope.caching_device is None:
varscope.set_caching_device(lambda op: op.device)
input_shape = tuple(array_ops.shape(input_) for input_ in flat_input)
batch_size = input_shape[0][1]
for input_ in input_shape:
if input_[1].get_shape() != batch_size.get_shape():
raise ValueError("All inputs should have the same batch size")
if initial_state is not None:
state = initial_state
else:
if not dtype:
raise ValueError("If no initial_state is provided, dtype must be.")
state = cell.zero_state(batch_size, dtype)
def _assert_has_shape(x, shape):
x_shape = array_ops.shape(x)
packed_shape = array_ops.pack(shape)
return control_flow_ops.Assert(
math_ops.reduce_all(math_ops.equal(x_shape, packed_shape)),
["Expected shape for Tensor %s is " % x.name,
packed_shape, " but saw shape: ", x_shape])
if sequence_length is not None:
# Perform some shape validation
with ops.control_dependencies(
[_assert_has_shape(sequence_length, [batch_size])]):
sequence_length = array_ops.identity(
sequence_length, name="CheckSeqLen")
inputs = nest.pack_sequence_as(structure=inputs, flat_sequence=flat_input)
(outputs, final_state) = _dynamic_rnn_loop(
cell,
inputs,
state,
parallel_iterations=parallel_iterations,
swap_memory=swap_memory,
sequence_length=sequence_length,
dtype=dtype)
# Outputs of _dynamic_rnn_loop are always shaped [time, batch, depth].
# If we are performing batch-major calculations, transpose output back
# to shape [batch, time, depth]
if not time_major:
# (T,B,D) => (B,T,D)
flat_output = nest.flatten(outputs)
flat_output = [array_ops.transpose(output, [1, 0, 2])
for output in flat_output]
outputs = nest.pack_sequence_as(
structure=outputs, flat_sequence=flat_output)
return (outputs, final_state)
def _dynamic_rnn_loop(cell,
inputs,
initial_state,
parallel_iterations,
swap_memory,
sequence_length=None,
dtype=None):
"""Internal implementation of Dynamic RNN.
Args:
cell: An instance of RNNCell.
inputs: A `Tensor` of shape [time, batch_size, input_size], or a nested
tuple of such elements.
initial_state: A `Tensor` of shape `[batch_size, state_size]`, or if
`cell.state_size` is a tuple, then this should be a tuple of
tensors having shapes `[batch_size, s] for s in cell.state_size`.
parallel_iterations: Positive Python int.
swap_memory: A Python boolean
sequence_length: (optional) An `int32` `Tensor` of shape [batch_size].
dtype: (optional) Expected dtype of output. If not specified, inferred from
initial_state.
Returns:
Tuple `(final_outputs, final_state)`.
final_outputs:
A `Tensor` of shape `[time, batch_size, cell.output_size]`. If
`cell.output_size` is a (possibly nested) tuple of ints or `TensorShape`
objects, then this returns a (possibly nsted) tuple of Tensors matching
the corresponding shapes.
final_state:
A `Tensor`, or possibly nested tuple of Tensors, matching in length
and shapes to `initial_state`.
Raises:
ValueError: If the input depth cannot be inferred via shape inference
from the inputs.
"""
state = initial_state
assert isinstance(parallel_iterations, int), "parallel_iterations must be int"
state_size = cell.state_size
flat_input = nest.flatten(inputs)
flat_output_size = nest.flatten(cell.output_size)
# Construct an initial output
input_shape = array_ops.shape(flat_input[0])
time_steps = input_shape[0]
batch_size = input_shape[1]
inputs_got_shape = tuple(input_.get_shape().with_rank_at_least(3)
for input_ in flat_input)
const_time_steps, const_batch_size = inputs_got_shape[0].as_list()[:2]
for shape in inputs_got_shape:
if not shape[2:].is_fully_defined():
raise ValueError(
"Input size (depth of inputs) must be accessible via shape inference,"
" but saw value None.")
got_time_steps = shape[0].value
got_batch_size = shape[1].value
if const_time_steps != got_time_steps:
raise ValueError(
"Time steps is not the same for all the elements in the input in a "
"batch.")
if const_batch_size != got_batch_size:
raise ValueError(
"Batch_size is not the same for all the elements in the input.")
# Prepare dynamic conditional copying of state & output
def _create_zero_arrays(size):
size = _state_size_with_prefix(size, prefix=[batch_size])
return array_ops.zeros(
array_ops.pack(size), _infer_state_dtype(dtype, state))
flat_zero_output = tuple(_create_zero_arrays(output)
for output in flat_output_size)
zero_output = nest.pack_sequence_as(structure=cell.output_size,
flat_sequence=flat_zero_output)
if sequence_length is not None:
min_sequence_length = math_ops.reduce_min(sequence_length)
max_sequence_length = math_ops.reduce_max(sequence_length)
time = array_ops.constant(0, dtype=dtypes.int32, name="time")
with ops.name_scope("dynamic_rnn") as scope:
base_name = scope
def _create_ta(name, dtype):
return tensor_array_ops.TensorArray(dtype=dtype,
size=time_steps,
tensor_array_name=base_name + name)
output_ta = tuple(_create_ta("output_%d" % i,
_infer_state_dtype(dtype, state))
for i in range(len(flat_output_size)))
input_ta = tuple(_create_ta("input_%d" % i, flat_input[0].dtype)
for i in range(len(flat_input)))
input_ta = tuple(ta.unpack(input_)
for ta, input_ in zip(input_ta, flat_input))
def _time_step(time, output_ta_t, state):
"""Take a time step of the dynamic RNN.
Args:
time: int32 scalar Tensor.
output_ta_t: List of `TensorArray`s that represent the output.
state: nested tuple of vector tensors that represent the state.
Returns:
The tuple (time + 1, output_ta_t with updated flow, new_state).
"""
input_t = tuple(ta.read(time) for ta in input_ta)
# Restore some shape information
for input_, shape in zip(input_t, inputs_got_shape):
input_.set_shape(shape[1:])
input_t = nest.pack_sequence_as(structure=inputs, flat_sequence=input_t)
call_cell = lambda: cell(input_t, state)
if sequence_length is not None:
(output, new_state) = _rnn_step(
time=time,
sequence_length=sequence_length,
min_sequence_length=min_sequence_length,
max_sequence_length=max_sequence_length,
zero_output=zero_output,
state=state,
call_cell=call_cell,
state_size=state_size,
skip_conditionals=True)
else:
(output, new_state) = call_cell()
# Pack state if using state tuples
output = nest.flatten(output)
output_ta_t = tuple(
ta.write(time, out) for ta, out in zip(output_ta_t, output))
return (time + 1, output_ta_t, new_state)
_, output_final_ta, final_state = control_flow_ops.while_loop(
cond=lambda time, *_: time < time_steps,
body=_time_step,
loop_vars=(time, output_ta, state),
parallel_iterations=parallel_iterations,
swap_memory=swap_memory)
# Unpack final output if not using output tuples.
final_outputs = tuple(ta.pack() for ta in output_final_ta)
# Restore some shape information
for output, output_size in zip(final_outputs, flat_output_size):
shape = _state_size_with_prefix(
output_size, prefix=[const_time_steps, const_batch_size])
output.set_shape(shape)
final_outputs = nest.pack_sequence_as(
structure=cell.output_size, flat_sequence=final_outputs)
return (final_outputs, final_state)
def raw_rnn(cell, loop_fn,
parallel_iterations=None, swap_memory=False, scope=None):
"""Creates an `RNN` specified by RNNCell `cell` and loop function `loop_fn`.
**NOTE: This method is still in testing, and the API may change.**
This function is a more primitive version of `dynamic_rnn` that provides
more direct access to the inputs each iteration. It also provides more
control over when to start and finish reading the sequence, and
what to emit for the output.
For example, it can be used to implement the dynamic decoder of a seq2seq
model.
Instead of working with `Tensor` objects, most operations work with
`TensorArray` objects directly.
The operation of `raw_rnn`, in pseudo-code, is basically the following:
```python
time = tf.constant(0, dtype=tf.int32)
(finished, next_input, initial_state, _, loop_state) = loop_fn(
time=time, cell_output=None, cell_state=None, loop_state=None)
emit_ta = TensorArray(dynamic_size=True, dtype=initial_state.dtype)
state = initial_state
while not all(finished):
(output, cell_state) = cell(next_input, state)
(next_finished, next_input, next_state, emit, loop_state) = loop_fn(
time=time + 1, cell_output=output, cell_state=cell_state,
loop_state=loop_state)
# Emit zeros and copy forward state for minibatch entries that are finished.
state = tf.where(finished, state, next_state)
emit = tf.where(finished, tf.zeros_like(emit), emit)
emit_ta = emit_ta.write(time, emit)
# If any new minibatch entries are marked as finished, mark these.
finished = tf.logical_or(finished, next_finished)
time += 1
return (emit_ta, state, loop_state)
```
with the additional properties that output and state may be (possibly nested)
tuples, as determined by `cell.output_size` and `cell.state_size`, and
as a result the final `state` and `emit_ta` may themselves be tuples.
A simple implementation of `dynamic_rnn` via `raw_rnn` looks like this:
```python
inputs = tf.placeholder(shape=(max_time, batch_size, input_depth),
dtype=tf.float32)
sequence_length = tf.placeholder(shape=(batch_size,), dtype=tf.int32)
inputs_ta = tf.TensorArray(dtype=tf.float32, size=max_time)
inputs_ta = inputs_ta.unpack(inputs)
cell = tf.nn.rnn_cell.LSTMCell(num_units)
def loop_fn(time, cell_output, cell_state, loop_state):
emit_output = cell_output # == None for time == 0
if cell_output is None: # time == 0
next_cell_state = cell.zero_state(batch_size, tf.float32)
else:
next_cell_state = cell_state
elements_finished = (time >= sequence_length)
finished = tf.reduce_all(elements_finished)
next_input = tf.cond(
finished,
lambda: tf.zeros([batch_size, input_depth], dtype=tf.float32),
lambda: inputs_ta.read(time))
next_loop_state = None
return (elements_finished, next_input, next_cell_state,
emit_output, next_loop_state)
outputs_ta, final_state, _ = raw_rnn(cell, loop_fn)
outputs = outputs_ta.pack()
```
Args:
cell: An instance of RNNCell.
loop_fn: A callable that takes inputs
`(time, cell_output, cell_state, loop_state)`
and returns the tuple
`(finished, next_input, next_cell_state, emit_output, next_loop_state)`.
Here `time` is an int32 scalar `Tensor`, `cell_output` is a
`Tensor` or (possibly nested) tuple of tensors as determined by
`cell.output_size`, and `cell_state` is a `Tensor`
or (possibly nested) tuple of tensors, as determined by the `loop_fn`
on its first call (and should match `cell.state_size`).
The outputs are: `finished`, a boolean `Tensor` of
shape `[batch_size]`, `next_input`: the next input to feed to `cell`,
`next_cell_state`: the next state to feed to `cell`,
and `emit_output`: the output to store for this iteration.
Note that `emit_output` should be a `Tensor` or (possibly nested)
tuple of tensors with shapes and structure matching `cell.output_size`
and `cell_output` above. The parameter `cell_state` and output
`next_cell_state` may be either a single or (possibly nested) tuple
of tensors. The parameter `loop_state` and
output `next_loop_state` may be either a single or (possibly nested) tuple
of `Tensor` and `TensorArray` objects. This last parameter
may be ignored by `loop_fn` and the return value may be `None`. If it
is not `None`, then the `loop_state` will be propagated through the RNN
loop, for use purely by `loop_fn` to keep track of its own state.
The `next_loop_state` parameter returned may be `None`.
The first call to `loop_fn` will be `time = 0`, `cell_output = None`,
`cell_state = None`, and `loop_state = None`. For this call:
The `next_cell_state` value should be the value with which to initialize
the cell's state. It may be a final state from a previous RNN or it
may be the output of `cell.zero_state()`. It should be a
(possibly nested) tuple structure of tensors.
If `cell.state_size` is an integer, this must be
a `Tensor` of appropriate type and shape `[batch_size, cell.state_size]`.
If `cell.state_size` is a `TensorShape`, this must be a `Tensor` of
appropriate type and shape `[batch_size] + cell.state_size`.
If `cell.state_size` is a (possibly nested) tuple of ints or
`TensorShape`, this will be a tuple having the corresponding shapes.
The `emit_output` value may be either `None` or a (possibly nested)
tuple structure of tensors, e.g.,
`(tf.zeros(shape_0, dtype=dtype_0), tf.zeros(shape_1, dtype=dtype_1))`.
If this first `emit_output` return value is `None`,
then the `emit_ta` result of `raw_rnn` will have the same structure and
dtypes as `cell.output_size`. Otherwise `emit_ta` will have the same
structure, shapes (prepended with a `batch_size` dimension), and dtypes
as `emit_output`. The actual values returned for `emit_output` at this
initializing call are ignored. Note, this emit structure must be
consistent across all time steps.
parallel_iterations: (Default: 32). The number of iterations to run in
parallel. Those operations which do not have any temporal dependency
and can be run in parallel, will be. This parameter trades off
time for space. Values >> 1 use more memory but take less time,
while smaller values use less memory but computations take longer.
swap_memory: Transparently swap the tensors produced in forward inference
but needed for back prop from GPU to CPU. This allows training RNNs
which would typically not fit on a single GPU, with very minimal (or no)
performance penalty.
scope: VariableScope for the created subgraph; defaults to "RNN".
Returns:
A tuple `(emit_ta, final_state, final_loop_state)` where:
`emit_ta`: The RNN output `TensorArray`.
If `loop_fn` returns a (possibly nested) set of Tensors for
`emit_output` during initialization, (inputs `time = 0`,
`cell_output = None`, and `loop_state = None`), then `emit_ta` will
have the same structure, dtypes, and shapes as `emit_output` instead.
If `loop_fn` returns `emit_output = None` during this call,
the structure of `cell.output_size` is used:
If `cell.output_size` is a (possibly nested) tuple of integers
or `TensorShape` objects, then `emit_ta` will be a tuple having the
same structure as `cell.output_size`, containing TensorArrays whose
elements' shapes correspond to the shape data in `cell.output_size`.
`final_state`: The final cell state. If `cell.state_size` is an int, this
will be shaped `[batch_size, cell.state_size]`. If it is a
`TensorShape`, this will be shaped `[batch_size] + cell.state_size`.
If it is a (possibly nested) tuple of ints or `TensorShape`, this will
be a tuple having the corresponding shapes.
`final_loop_state`: The final loop state as returned by `loop_fn`.
Raises:
TypeError: If `cell` is not an instance of RNNCell, or `loop_fn` is not
a `callable`.
"""
if not isinstance(cell, rnn_cell.RNNCell):
raise TypeError("cell must be an instance of RNNCell")
if not callable(loop_fn):
raise TypeError("loop_fn must be a callable")
parallel_iterations = parallel_iterations or 32
# Create a new scope in which the caching device is either
# determined by the parent scope, or is set to place the cached
# Variable using the same placement as for the rest of the RNN.
with vs.variable_scope(scope or "RNN") as varscope:
if varscope.caching_device is None:
varscope.set_caching_device(lambda op: op.device)
time = constant_op.constant(0, dtype=dtypes.int32)
(elements_finished, next_input, initial_state, emit_structure,
init_loop_state) = loop_fn(
time, None, None, None) # time, cell_output, cell_state, loop_state
flat_input = nest.flatten(next_input)
# Need a surrogate loop state for the while_loop if none is available.
loop_state = (init_loop_state if init_loop_state is not None
else constant_op.constant(0, dtype=dtypes.int32))
input_shape = [input_.get_shape() for input_ in flat_input]
static_batch_size = input_shape[0][0]
for input_shape_i in input_shape:
# Static verification that batch sizes all match
static_batch_size.merge_with(input_shape_i[0])
batch_size = static_batch_size.value
if batch_size is None:
batch_size = array_ops.shape(flat_input[0])[0]
nest.assert_same_structure(initial_state, cell.state_size)
state = initial_state
flat_state = nest.flatten(state)
flat_state = [ops.convert_to_tensor(s) for s in flat_state]
state = nest.pack_sequence_as(structure=state,
flat_sequence=flat_state)
if emit_structure is not None:
flat_emit_structure = nest.flatten(emit_structure)
flat_emit_size = [emit.get_shape() for emit in flat_emit_structure]
flat_emit_dtypes = [emit.dtype for emit in flat_emit_structure]
else:
emit_structure = cell.output_size
flat_emit_size = nest.flatten(emit_structure)
flat_emit_dtypes = [flat_state[0].dtype] * len(flat_emit_size)
flat_emit_ta = [
tensor_array_ops.TensorArray(
dtype=dtype_i, dynamic_size=True, size=0, name="rnn_output_%d" % i)
for i, dtype_i in enumerate(flat_emit_dtypes)]
emit_ta = nest.pack_sequence_as(structure=emit_structure,
flat_sequence=flat_emit_ta)
flat_zero_emit = [
array_ops.zeros(
_state_size_with_prefix(size_i, prefix=[batch_size]),
dtype_i)
for size_i, dtype_i in zip(flat_emit_size, flat_emit_dtypes)]
zero_emit = nest.pack_sequence_as(structure=emit_structure,
flat_sequence=flat_zero_emit)
def condition(unused_time, elements_finished, *_):
return math_ops.logical_not(math_ops.reduce_all(elements_finished))
def body(time, elements_finished, current_input,
emit_ta, state, loop_state):
"""Internal while loop body for raw_rnn.
Args:
time: time scalar.
elements_finished: batch-size vector.
current_input: possibly nested tuple of input tensors.
emit_ta: possibly nested tuple of output TensorArrays.
state: possibly nested tuple of state tensors.
loop_state: possibly nested tuple of loop state tensors.
Returns:
Tuple having the same size as Args but with updated values.
"""
(next_output, cell_state) = cell(current_input, state)
nest.assert_same_structure(state, cell_state)
nest.assert_same_structure(cell.output_size, next_output)
next_time = time + 1
(next_finished, next_input, next_state, emit_output,
next_loop_state) = loop_fn(
next_time, next_output, cell_state, loop_state)
nest.assert_same_structure(state, next_state)
nest.assert_same_structure(current_input, next_input)
nest.assert_same_structure(emit_ta, emit_output)
# If loop_fn returns None for next_loop_state, just reuse the
# previous one.
loop_state = loop_state if next_loop_state is None else next_loop_state
def _copy_some_through(current, candidate):
current_flat = nest.flatten(current)
candidate_flat = nest.flatten(candidate)
result_flat = [
math_ops.select(elements_finished, current_i, candidate_i)
for (current_i, candidate_i) in zip(current_flat, candidate_flat)]
return nest.pack_sequence_as(
structure=current, flat_sequence=result_flat)
emit_output = _copy_some_through(zero_emit, emit_output)
next_state = _copy_some_through(state, next_state)
emit_output_flat = nest.flatten(emit_output)
emit_ta_flat = nest.flatten(emit_ta)
elements_finished = math_ops.logical_or(elements_finished, next_finished)
emit_ta_flat = [
ta.write(time, emit)
for (ta, emit) in zip(emit_ta_flat, emit_output_flat)]
emit_ta = nest.pack_sequence_as(
structure=emit_structure, flat_sequence=emit_ta_flat)
return (next_time, elements_finished, next_input,
emit_ta, next_state, loop_state)
returned = control_flow_ops.while_loop(
condition, body, loop_vars=[
time, elements_finished, next_input,
emit_ta, state, loop_state],
parallel_iterations=parallel_iterations,
swap_memory=swap_memory)
(emit_ta, final_state, final_loop_state) = returned[-3:]
if init_loop_state is None:
final_loop_state = None
return (emit_ta, final_state, final_loop_state)
| {
"content_hash": "a66fc9baefc2bc56e27230883671004f",
"timestamp": "",
"source": "github",
"line_count": 1362,
"max_line_length": 96,
"avg_line_length": 42.59251101321586,
"alnum_prop": 0.6681491441278378,
"repo_name": "DCSaunders/tensorflow",
"id": "12b0243ae60c531f5f78d88b620c541250aaa5d8",
"size": "58701",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/ops/rnn.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "6641"
},
{
"name": "C",
"bytes": "90766"
},
{
"name": "C++",
"bytes": "13984868"
},
{
"name": "CMake",
"bytes": "110983"
},
{
"name": "CSS",
"bytes": "774"
},
{
"name": "Go",
"bytes": "87708"
},
{
"name": "HTML",
"bytes": "534592"
},
{
"name": "Java",
"bytes": "57002"
},
{
"name": "JavaScript",
"bytes": "13406"
},
{
"name": "Jupyter Notebook",
"bytes": "1833475"
},
{
"name": "Makefile",
"bytes": "26235"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "64592"
},
{
"name": "Perl",
"bytes": "4412"
},
{
"name": "Protocol Buffer",
"bytes": "143116"
},
{
"name": "Python",
"bytes": "13808086"
},
{
"name": "Shell",
"bytes": "276793"
},
{
"name": "TypeScript",
"bytes": "749115"
}
],
"symlink_target": ""
} |
"""TensorFlow Debugger (tfdbg) Stepper Module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import six
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.debug.lib import debug_data
from tensorflow.python.framework import ops
from tensorflow.python.ops import session_ops
# TODO(cais): Use nest.flatten once it handles nest Dicts correctly.
def _flatten_fetches(fetches):
"""Flatten list, tuple of fetches, or a single fetch into a list of fetches.
Args:
fetches: The fetches to flatten: Can be a single Tensor, Op, or a
potentially nested list, tuple or dict of such individual fetches.
Returns:
The fetches flattened to a list.
"""
flattened = []
if isinstance(fetches, (list, tuple)):
for fetch in fetches:
flattened.extend(_flatten_fetches(fetch))
elif isinstance(fetches, dict):
for key in fetches:
flattened.extend(_flatten_fetches(fetches[key]))
else:
flattened.append(fetches)
return flattened
class NodeStepper(object):
"""TensorFlow Debugger (tfdbg) stepper.
The stepper provides ability to perform "continue to" actions on a graph,
given fetch and feeds. The stepper calculates the transitive closure of the
fetch. cont() (continue to) calls can only be performed on members of the
transitive closure.
On a cont() call, the stepper performs depth-first tracing of the input
tree of the target. When it reaches an input where one of the following is
available, it will supply the available value to the feed_dict of the cont()
call:
(1) TensorHandles from previous cont() calls.
(2) Overriding (injected) values from the client.
(3) Feeds supplied during the construction of the stepper instance.
Once the tracing is complete, it will issue a run() call on the
underlying session, using the aforementioned feed_dict prepared by the input
tracing, to achieve the "continue-to" action. The above process takes into
account whether the transitive closure of an input contains Variables that
are updated during previous cont() calls on this stepper instance. If such
updates exist, we say the transitive closure is "dirty" and the stepper
can restore the "clean" state of the Variable and avoid using the
TensorHandle.
Example of basic usage:
a = tf.Variable(1.0, name="a")
b = tf.Variable(2.0, anme="b")
c = tf.add(a, b, name="c")
d = tf.multiply(a, c, name="d")
sess = tf.Session()
sess.run(tf.initialize_all_varialbes())
stepper = NodeStepper(sess, d)
stepper.cont(c) # Caches the handle to Tensor c:0.
stepper.cont(d) # Uses handle to Tensor c:0, avoiding recomputing c.
"""
# Possible types of feed used during cont() calls.
FEED_TYPE_CLIENT = "client"
FEED_TYPE_HANDLE = "handle"
FEED_TYPE_OVERRIDE = "override"
# TODO(cais): The following member constant is currently unused. Use it when
# the stepper is capable of using dumped intermediate tensors.
FEED_TYPE_INTERMEDIATE = "intermediate"
def __init__(self, sess, fetches, feed_dict=None):
"""Constructor for Debugger.
Args:
sess: (Session) the TensorFlow Session to step in.
fetches: Same as the fetches input argument to `Session.run()`.
feed_dict: Same as the feed_dict input argument to `Session.run()`.
"""
self._sess = sess
self._fetches = fetches
flattened_fetches = _flatten_fetches(fetches)
self._fetch_names, self._fetch_list = self._get_fetch_and_name_lists(
flattened_fetches)
# A map from Variable name to initializer op.
self._variable_initializers = {}
# A map from Variable name to initial value, used when overriding or
# restoring Variable values.
self._variable_initial_values = {}
# Initialize the map for output recipients (targets).
self._non_control_output_targets = {}
# Sorted transitive closure of the fetched node.
self._sorted_nodes, self._closure_elements = self._dfs_visit(
self._sess.graph, self._fetch_list)
self._transitive_closure_set = set(self._sorted_nodes)
# A map from Variable name to the old values (before any cont() calls).
self._cached_variable_values = {}
# A cache map from tensor name to what variables may invalidate the tensor
self._cached_invalidation_path = {}
# Keep track of which variables are in a dirty state.
self._dirty_variables = set()
# Cached tensor handles: a dict with keys as tensor names and values as
# tensor handles.
self._tensor_handles = {}
# Feed dict from the client.
self._client_feed_dict = {}
if feed_dict:
for key in feed_dict:
if isinstance(key, ops.Tensor):
self._client_feed_dict[key.name] = feed_dict[key]
else:
self._client_feed_dict[key] = feed_dict[key]
# Overriding tensor values.
self._override_tensors = {}
# What the feed types were used by the last cont() call.
self._last_feed_types = {}
def _get_fetch_and_name_lists(self, flattened_fetches):
"""Get the lists of fetches and their names.
Args:
flattened_fetches: A list of fetches or their names. Can mix fetches and
names.
Returns:
(list of str): A list of the names of the fetches.
(list): A list of the fetches.
"""
fetch_names = []
fetch_list = []
for fetch in flattened_fetches:
if isinstance(fetch, six.string_types):
fetch_names.append(fetch)
fetch_list.append(self._sess.graph.as_graph_element(fetch))
else:
fetch_names.append(fetch.name)
fetch_list.append(fetch)
return fetch_names, fetch_list
def _dfs_visit(self, graph, elem_list):
"""Trace back the input of a graph element, using depth-first search.
Uses non-recursive implementation to prevent stack overflow for deep
graphs.
Also performs the following action(s):
1) When encountering a Variable, obtain its initializer op, to
facilitate possible subsequent restoration / overriding of variable
value.
Args:
graph: A TF graph instance.
elem_list: list of graph elements: a Tensor or an Operation.
Returns:
(list of str) A topologically-sorted list of all nodes (not tensors)
in the transitive closure of elem_list. Obviously, the topological sort
is not unique in general. The return value here is just an arbitrary
one of potentially many possible topological sorts.
(list of str) A list of all graph elements (nodes and/or tensors) in the
transitive closure.
"""
# These set should hold only strings, i.e, names of the nodes.
done = set() # Keep track of visited graph elements.
# A list of str: Names of the topologically-sorted graph elements.
node_inputs = dict() # New: Input map of nodes in the transitive closure.
elem_stack = copy.copy(elem_list)
# Graph elements in the transitive closure, including the nodes and tensors.
closure_elements = [elem.name for elem in elem_list]
while elem_stack:
curr_elem = elem_stack.pop()
curr_node = self._get_node(curr_elem)
done.add(curr_node.name)
non_control_inputs = [inp for inp in curr_node.inputs]
control_inputs = [inp for inp in curr_node.control_inputs]
all_inputs = set(non_control_inputs + control_inputs)
if curr_node.name not in node_inputs:
all_input_nodes = set()
for inp in all_inputs:
all_input_nodes.add(self._get_node(inp).name)
node_inputs[curr_node.name] = all_input_nodes
# Iterate through the (non-control) inputs.
for inp in all_inputs:
is_non_control_input = inp in non_control_inputs
# Set up the non-control output map.
if is_non_control_input:
if inp.name not in self._non_control_output_targets:
self._non_control_output_targets[inp.name] = set([curr_elem.name])
else:
self._non_control_output_targets[inp.name].add(curr_elem.name)
if (inp.op.type in ["Variable", "VariableV2"] and
inp.name not in self._variable_initializers):
# Obtain the initializer op of the variable, in case the Variable's
# value needs to be restored later.
initializer = graph.as_graph_element(inp.op.name + "/Assign")
self._variable_initializers[inp.name] = initializer
self._variable_initial_values[inp.name] = initializer.inputs[1]
inp_node = self._get_node(inp)
if inp_node.name in done:
# Already visited.
continue
elem_stack.append(inp)
closure_elements.append(inp.name)
# Now that we have traversed the transitive closure and obtained the
# node-input map, we can topologically sort them.
sorted_nodes = []
stack = []
for node in node_inputs:
if not node_inputs[node]:
stack.append(node)
for node in stack:
del node_inputs[node]
while stack:
curr_node = stack.pop()
sorted_nodes.append(curr_node)
# Iterate through the node-input map and remove the child.
pushes = []
for node in node_inputs:
if curr_node in node_inputs[node]:
node_inputs[node].remove(curr_node)
if not node_inputs[node]:
pushes.append(node)
# Delete new pushes from node-input map.
for node in pushes:
del node_inputs[node]
stack.extend(pushes)
return sorted_nodes, closure_elements
def sorted_nodes(self):
"""Get a topologically-sorted list of node names of the stepper.
These are the names of the nodes (i.e., not Tensors) in the transitive
closure of the stepper, in a topologically-sorted order.
Returns:
(list of str): Sorted transitive inputs to the fetch of the stepper
instance. The fetch itself is included in the list.
"""
return self._sorted_nodes
def closure_elements(self):
"""Get a name list of the graph elements of the stepper.
Returns:
(list of str): names of the graph elements (i.e., nodes and tensors) in
the transitive closure of the stepper, in a random order.
"""
return self._closure_elements
def output_slots_in_closure(self, node_name):
"""Get the output tensors in the transitive closure from node.
Args:
node_name: (str) Name of the node in question.
Returns:
(list of int) Output slots of the output tensors of the node that are in
the transitive closure of the stepper.
"""
node = self._sess.graph.as_graph_element(node_name)
tensor_slots = []
for i, _ in enumerate(node.outputs):
tensor_name = node_name + ":%d" % i
if tensor_name in self._closure_elements:
tensor_slots.append(i)
return tensor_slots
def is_feedable(self, name):
"""Determine if a graph element if feedable.
Args:
name: (str) name of the graph element (Tensor or Operation)
Returns:
(bool) whether the graph element is feedable.
"""
if not isinstance(name, six.string_types):
raise TypeError("Expected type str; got type %s" % type(name))
elem = self._sess.graph.as_graph_element(name)
return self._sess.graph.is_feedable(elem)
def override_tensor(self, tensor_name, overriding_val):
"""Override the value of a tensor.
Args:
tensor_name: (str) Name of the tensor to override.
overriding_val: (numpy.ndarray) Overriding tensor value.
Raises:
ValueError: If tensor_name does not correspond to a tensor in the input
tree to the fetched graph element of this stepper instance.
"""
if not isinstance(tensor_name, six.string_types):
raise TypeError("Expected type str; got type %s" % type(tensor_name))
node_name = self._get_node_name(tensor_name)
if node_name not in self._transitive_closure_set:
raise ValueError(
"Cannot override tensor \"%s\" because it does not exist in the "
"input tree to the fetch \"%s\"" %
(tensor_name, repr(self._fetch_names)))
self._override_tensors[tensor_name] = overriding_val
# Invalidate cache by tracing outputs.
self._invalidate_transitively_outgoing_cache(tensor_name)
def remove_override(self, tensor_name):
"""Remove the overriding value on a tensor.
Args:
tensor_name: (str) name of the tensor to remove the overriding value
from.
Raises:
ValueError: If no overriding value exists for tensor_name.
"""
if tensor_name not in self._override_tensors:
raise ValueError("No overriding value exists for tensor \"%s\"." %
tensor_name)
del self._override_tensors[tensor_name]
# Invalidate cache by tracing outputs.
self._invalidate_transitively_outgoing_cache(tensor_name)
def last_feed_types(self):
"""Obtain information about the feed in the last cont() call.
Returns:
(dict) A dict mapping tensor names to feed types.
"""
return self._last_feed_types
def cont(self,
target,
use_tensor_handles=True,
use_overrides=True,
restore_variable_values=False):
"""Continue till the completion of the specified target tensor.
Args:
target: A single fetched Tensor or Op, or a name (str) representing the
Tensor or Op. In the case of a name str, the graph will be searched
to find the corresponding Tensor or Op.
# TODO(cais): Support multiple fetches as in Session.run() interface.
use_tensor_handles: (bool) Whether this cont() run will use cached tensor
handles to avoid recomputation. Default: True.
use_overrides: (bool) Whether the overriding tensor values supplied by
the client are to be used in this cont() call. Default: True.
restore_variable_values: (bool) Whether the old values of the variables
(before any cont() calls in this object) are to be restored.
Returns:
Value from Session.run() of the target.
Raises:
ValueError: If the target is specified as a string and the string does
not correspond to any tensors in the Session graph.
Or if the target of this cont() is not in the input list of the Stepper
object's target.
Or if target is a Placeholder.
"""
self._last_feed_types = {}
# The feeds to be used in the Session.run() call.
feeds = {}
if isinstance(target, six.string_types):
# Fetch target is a string. Assume it is the name of the Tensor or Op and
# will attempt to find it in the Session's graph.
target_name = target
else:
target_name = target.name
graph_element = self._sess.graph.as_graph_element(target_name)
# Any additional tensor handles to obtain in this cont() action.
additional_handle_requests = []
if (isinstance(graph_element, ops.Tensor) and
graph_element.op.type == "Placeholder"):
self._last_feed_types[graph_element.name] = self.FEED_TYPE_CLIENT
return self._client_feed_dict[graph_element.name]
elif (isinstance(graph_element, ops.Operation) and
graph_element.type == "Placeholder"):
tensor_name = graph_element.name + ":0"
self._last_feed_types[tensor_name] = self.FEED_TYPE_CLIENT
return self._client_feed_dict[tensor_name]
if isinstance(graph_element, ops.Operation) and graph_element.outputs:
# Check if this op has any output tensors that also fall into this
# stepper's transitive closure.
node_outputs = [
output.name for output in graph_element.outputs
if output.name in self._closure_elements
]
if node_outputs:
# The target is an op with at least one output within the transitive
# closure. The cont() action will amount to using the 0-th
# output Tensor as the target, as well as obtaining handles to it
# and to the rest of the outputs tensors in the transitive closure
# (if any).
target_name = node_outputs[0]
additional_handle_requests = node_outputs[1:]
# Verify that the target is in the transitive closure of the stepper's
# fetch.
target_node_name = self._get_node_name(target_name)
if target_node_name not in self._transitive_closure_set:
raise ValueError(
"Target \"%s\" is not in the transitive closure for the fetch of the "
"stepper: \"%s\"." % (target_name, repr(self._fetch_names)))
# Check if a cached tensor handle can be used on the fetch directly.
if use_tensor_handles and target_name in self._tensor_handles:
self._last_feed_types[target_name] = self.FEED_TYPE_HANDLE
return self._tensor_handles[target_name].eval()
# Check if an overriding tensor value can be used directly.
if use_overrides and target_name in self._override_tensors:
# Override is available. Return the value right away.
self._last_feed_types[target_name] = self.FEED_TYPE_OVERRIDE
return self._override_tensors[target_name]
# Keep track of which variables are restored in this cont() call.
restored_variables = set()
# Keep track of which variables are "touched" (i.e., possibly updated) in
# this cont() call.
touched_variables = set()
# =========================================================================
# Use a non-recursive method to trace the inputs from the node and set up
# the feeds.
fetched = self._sess.graph.as_graph_element(target_name)
elem_stack = [fetched]
done = set()
while elem_stack:
curr_elem = elem_stack.pop()
curr_node = self._get_node(curr_elem)
done.add(curr_node.name)
non_control_inputs = [inp for inp in curr_node.inputs]
control_inputs = [inp for inp in curr_node.control_inputs]
all_inputs = set(non_control_inputs + control_inputs)
# Iterate through the (non-control) inputs.
for inp in all_inputs:
# Determine whether the input is feedable. Reference-type tensors,
# e.g., Variables, should not be fed, because they can change.
if isinstance(inp, ops.Tensor):
is_inp_ref = inp.dtype._is_ref_dtype # pylint: disable=protected-access
can_feed = self._sess.graph.is_feedable(inp) and not is_inp_ref
else:
is_inp_ref = False
can_feed = False
if (restore_variable_values and inp.name in self._dirty_variables and
inp.name not in restored_variables and
inp.name not in touched_variables):
# Do not restore Variables touched or restored previously in this
# cont() call.
initializer_op = self._variable_initializers[inp.name]
initial_value_tensor = self._variable_initial_values[inp.name]
self._sess.run(initializer_op,
feed_dict={
initial_value_tensor:
self._cached_variable_values[inp.name]
})
# Mark the variable as restored.
restored_variables.add(inp.name)
# Determine if this is a reference-type input from a variable, and
# the recipient node is not Identity. In that case, the Variable
# needs to be marked as dirty and its current value recorded, due to
# the fact that the receiving op may mutate the value of the Variable.
if (is_inp_ref and inp.op.type in ["Variable", "VariableV2"] and
curr_node.type != "Identity"):
# Mark the variable as dirty.
touched_variables.add(inp.name)
# Obtain the old value of the variable and cache it.
if inp.name not in self._cached_variable_values:
old_value = self._sess.run(inp)
self._cached_variable_values[inp.name] = old_value
# N.B.: The order of the logical branches matters. For example,
# _client_feed_dict comes after _tensor_handles, so that tensor
# handles stored in cont() calls can override the original client
# feeds. Also for example, _override_tensors comes the first, so
# the manual overriding, if exists, can always take effect.
if use_overrides and can_feed and inp.name in self._override_tensors:
# Use client-supplied overriding tensor value.
feeds[inp] = self._override_tensors[inp.name]
self._last_feed_types[inp.name] = self.FEED_TYPE_OVERRIDE
elif (use_tensor_handles and can_feed and
inp.name in self._tensor_handles and inp not in feeds):
# Tensor handle found in cache.
feeds[inp] = self._tensor_handles[inp.name].eval()
self._last_feed_types[inp.name] = self.FEED_TYPE_HANDLE
elif inp.name in self._client_feed_dict:
# This input is available in the client feed_dict.
feeds[inp] = self._client_feed_dict[inp.name]
self._last_feed_types[inp.name] = self.FEED_TYPE_CLIENT
else:
# There is no feed available for this input. So keep tracing its
# input(s).
inp_node = self._get_node(inp)
if inp_node.name in done:
# Already visited.
continue
elem_stack.append(inp)
done.add(inp_node.name)
# =========================================================================
if touched_variables:
self._dirty_variables.update(touched_variables)
for variable in restored_variables:
self._dirty_variables.remove(variable)
# Prepare RunOptions for DebugTensorWatches
run_options = config_pb2.RunOptions()
# TODO(cais): Add fields for watching intermediate tensors.
if isinstance(fetched, ops.Operation):
# The fetched is an Operation: Will not get tensor handle.
self._sess.run(fetched, feed_dict=feeds, options=run_options)
# No return value for a run of an Operation
else:
# This is a Tensor: Will get tensor handle and cache it.
# Will also get the additional requested tensor handles (if any).
tensors_to_get_handles_for = [fetched]
handle_names = [target_name]
tensors_to_get_handles_for.extend([
self._sess.graph.as_graph_element(h)
for h in additional_handle_requests
])
handle_names.extend(additional_handle_requests)
for handle_name, tensor in zip(handle_names, tensors_to_get_handles_for):
handle = self._sess.run(session_ops.get_session_handle(tensor),
feed_dict=feeds,
options=run_options)
self._tensor_handles[handle_name] = handle
return self._tensor_handles[target_name].eval()
# Invalidate caches at the end.
for touched_variable in touched_variables:
self._invalidate_transitively_outgoing_cache(touched_variable)
def _get_node_name(self, graph_element_name):
return graph_element_name.split(":")[0]
def _invalidate_transitively_outgoing_cache(self, source_element):
"""Invalidate the cached tensor handles by tracing output.
This method is used to invalidate caches such as cached TensorHandles
and intermediate tensor values when Variable mutation happens or when
client overrides tensor values.
Uses non-recursive implementation to avoid stack overflow on deep networks.
TODO(cais): Currently, only TensorHandle caches are invalidated. Invalidate
cached intermediate tensor values from dumps when dumps are added.
Args:
source_element: The source graph element (e.g., a Variable output slot)
to trace the output from.
"""
if not self._tensor_handles:
return
# First, use cached invalidation paths to eliminate some cached tensor
# handles.
to_delete = []
for handle_name in self._tensor_handles:
if (handle_name in self._cached_invalidation_path and
source_element in self._cached_invalidation_path[handle_name]):
to_delete.append(handle_name)
for handle_name in to_delete:
del self._tensor_handles[handle_name]
if not self._tensor_handles:
return
stack = [source_element]
done = set()
while stack:
curr_element = stack.pop()
done.add(curr_element)
if curr_element in self._tensor_handles:
# Cache the invalidation path for potential future use.
if curr_element not in self._cached_invalidation_path:
self._cached_invalidation_path[curr_element] = set([source_element])
else:
self._cached_invalidation_path[curr_element].add(source_element)
del self._tensor_handles[curr_element]
targets = self._non_control_output_targets.get(curr_element, [])
for target in targets:
if target in done:
continue
else:
stack.append(target)
def finalize(self):
"""Run the final fetch(es).
Restore the dirty variables; ignore the client-supplied overriding tensor
values.
Returns:
The same return value as self.cont() as called on the final fetch.
"""
self.restore_variable_values()
return self._sess.run(self._fetches, feed_dict=self._client_feed_dict)
def restore_variable_values(self):
"""Restore variables to the initial values.
"Initial value" refers to the value when this NodeStepper instance was
first constructed.
"""
for var_name in self._dirty_variables:
self._sess.run(self._variable_initializers[var_name],
feed_dict={
self._variable_initial_values[var_name]:
self._cached_variable_values[var_name]
})
def handle_names(self):
"""Return names of the TensorHandles that the debugger is holding.
Returns:
(list of str) Name of the tensors for which TensorHandle is available.
"""
return [name for name in self._tensor_handles]
def handle_node_names(self):
"""Get list of names of the nodes for which handles are available.
Returns:
(set of str) List of names of the nodes.
"""
return set([self._get_node_name(name) for name in self._tensor_handles])
def dirty_variables(self):
"""Get the set of variables that are currently "dirty".
"dirty" means:
previous cont() calls have updated the value of the Variable,
and the Variable's old value (the value before any cont() calls
happened) was not restored.
Returns:
(set) A set of dirty variables.
"""
return self._dirty_variables
def is_placeholder(self, graph_element_name):
"""Check whether a graph element is a Placeholder, by name.
Args:
graph_element_name: (str) Name of the tensor or op to be tested.
Returns:
(bool) Whether the graph element of the specified name is a Placeholder
op or the output Tensor of a Placeholder op.
Raises:
ValueError: If graph_element_name is not in the transitive closure of the
stepper instance.
"""
node_name = self._get_node_name(graph_element_name)
if node_name not in self.sorted_nodes():
raise ValueError(
"%s is not in the transitive closure of this NodeStepper "
"instance" % graph_element_name)
graph_element = self._sess.graph.as_graph_element(graph_element_name)
if not isinstance(graph_element, ops.Operation):
graph_element = graph_element.op
return graph_element.type == "Placeholder"
def placeholders(self):
"""Get the list of Placeholder Tensors in the transitive closure.
Returns:
(list of str) A list of Placeholder Tensors or ops in the transitive
closure.
"""
placeholders = []
for item in self.sorted_nodes():
if self.is_placeholder(item):
placeholders.append(item)
return placeholders
def get_tensor_value(self, tensor_name):
"""Get the value of a tensor that the stepper has access to.
Args:
tensor_name: (str) Name of the tensor.
Returns:
Value of the tensor, from overriding values or cached tensor handles.
Raises:
ValueError: If the value is not available as an overriding value
or through a TensorHandle.
"""
if self.is_placeholder(tensor_name):
if ":" not in tensor_name:
tensor_name += ":0"
return self._client_feed_dict[tensor_name]
elif tensor_name in self._override_tensors:
return self._override_tensors[tensor_name]
elif tensor_name in self._tensor_handles:
return self._tensor_handles[tensor_name].eval()
else:
raise ValueError(
"This stepper instance does not have access to the value of "
"tensor \"%s\"" % tensor_name)
def override_names(self):
"""Return names of the TensorHandles that the debugger is holding.
Returns:
(list of str) Name of the tensor for which overriding tensor values are
available.
"""
return [name for name in self._override_tensors]
def _get_node(self, element):
"""Get the node of a graph element.
Args:
element: A graph element (Op, Tensor or Node)
Returns:
The node associated with element in the graph.
"""
node_name, _ = debug_data.parse_node_or_tensor_name(element.name)
return self._sess.graph.as_graph_element(node_name)
| {
"content_hash": "66e9477f5e6b23d2948ea61fb5db14d2",
"timestamp": "",
"source": "github",
"line_count": 831,
"max_line_length": 83,
"avg_line_length": 35.43321299638989,
"alnum_prop": 0.6547800984887078,
"repo_name": "jjas0nn/solvem",
"id": "47e42efbbbf8fd60ed763736e70669ca539be59c",
"size": "30134",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/lib/python2.7/site-packages/tensorflow/python/debug/lib/stepper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "67"
},
{
"name": "C",
"bytes": "309086"
},
{
"name": "C++",
"bytes": "10234032"
},
{
"name": "CMake",
"bytes": "307"
},
{
"name": "CSS",
"bytes": "1891"
},
{
"name": "Fortran",
"bytes": "6361"
},
{
"name": "HTML",
"bytes": "2989"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "Python",
"bytes": "18261384"
},
{
"name": "Shell",
"bytes": "3246"
}
],
"symlink_target": ""
} |
from cx_Freeze import setup, Executable
# Dependencies are automatically detected, but it might need
# fine tuning.
buildOptions = dict(packages = [], excludes = [])
import sys
base = 'Win32GUI' if sys.platform=='win32' else None
executables = [
Executable('videocompress.py', base=base)
]
setup(name='Videocompress',
version = '0.5',
description = 'A simple utility that runs ffmpeg over an entire directory (and subfolders).',
options = dict(build_exe = buildOptions),
executables = executables)
| {
"content_hash": "34c1b2b55243eced281fa65280549eec",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 99,
"avg_line_length": 29.444444444444443,
"alnum_prop": 0.7037735849056603,
"repo_name": "point86/Videocompress",
"id": "0c51540bccdcdf3ff1973bae8f258bda6896ba1d",
"size": "530",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19790"
}
],
"symlink_target": ""
} |
"""
Definitions of the GA4GH protocol types.
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import sys
import json
import inspect
import datetime
import itertools
from cStringIO import StringIO
import avro.io
def convertDatetime(t):
"""
Converts the specified datetime object into its appropriate protocol
value. This is the number of milliseconds from the epoch.
"""
epoch = datetime.datetime.utcfromtimestamp(0)
delta = t - epoch
millis = delta.total_seconds() * 1000
return int(millis)
class SearchResponseBuilder(object):
"""
A class to allow sequential building of SearchResponse objects.
This is a performance tweak which allows us to substantially
reduce the number of live objects we require in the server when
we are building responses, as we write the JSON representation
of ProtocolElements directly to a buffer.
"""
def __init__(self, responseClass, pageSize, maxResponseLength):
"""
Allocates a new SearchResponseBuilder for the specified
subclass of SearchResponse, with the specified
user-requested pageSize and the system mandated
maxResponseLength (in bytes). The maxResponseLength is an
approximate limit on the overall length of the JSON
response.
"""
self._responseClass = responseClass
self._pageSize = pageSize
self._maxResponseLength = maxResponseLength
self._valueListBuffer = StringIO()
self._numElements = 0
self._nextPageToken = None
def getPageSize(self):
"""
Returns the page size for this SearchResponseBuilder. This is the
user-requested maximum size for the number of elements in the
value list.
"""
return self._pageSize
def getMaxResponseLength(self):
"""
Returns the approximate maximum response length. More precisely,
this is the total length (in bytes) of the concatenated JSON
representations of the values in the value list after which
we consider the buffer to be full.
"""
return self._maxResponseLength
def getNextPageToken(self):
"""
Returns the value of the nextPageToken for this
SearchResponseBuilder.
"""
return self._nextPageToken
def setNextPageToken(self, nextPageToken):
"""
Sets the nextPageToken to the specified value.
"""
self._nextPageToken = nextPageToken
def addValue(self, protocolElement):
"""
Appends the specified protocolElement to the value list for this
response.
"""
if self._numElements > 0:
self._valueListBuffer.write(", ")
self._numElements += 1
self._valueListBuffer.write(protocolElement.toJsonString())
def isFull(self):
"""
Returns True if the response buffer is full, and False otherwise.
The buffer is full if either (1) the number of items in the value
list is >= pageSize or (2) the total length of the serialised
elements in the page is >= maxResponseLength.
"""
return (
self._numElements >= self._pageSize or
self._valueListBuffer.tell() >= self._maxResponseLength)
def getJsonString(self):
"""
Returns a string version of the SearchResponse that has
been built by this SearchResponseBuilder. This is a fully
formed JSON document, and consists of the pageToken and
the value list.
"""
pageListString = "[{}]".format(self._valueListBuffer.getvalue())
return '{{"nextPageToken": {},"{}": {}}}'.format(
json.dumps(self._nextPageToken),
self._responseClass.getValueListName(), pageListString)
class ProtocolElementEncoder(json.JSONEncoder):
"""
Class responsible for encoding ProtocolElements as JSON.
"""
def default(self, obj):
return {a: getattr(obj, a) for a in obj.__slots__}
class ProtocolElement(object):
"""
Superclass of GA4GH protocol elements. These elements are in one-to-one
correspondence with the Avro definitions, and provide the basic elements
of the on-the-wire protocol.
"""
def __str__(self):
return "{0}({1})".format(self.__class__.__name__, self.toJsonString())
def __eq__(self, other):
"""
Returns True if all fields in this protocol element are equal to the
fields in the specified protocol element.
"""
if type(other) != type(self):
return False
fieldNames = itertools.imap(lambda f: f.name, self.schema.fields)
return all(getattr(self, k) == getattr(other, k) for k in fieldNames)
def __ne__(self, other):
return not self == other
def toJsonString(self):
"""
Returns a JSON encoded string representation of this ProtocolElement.
"""
return json.dumps(self, cls=ProtocolElementEncoder)
def toJsonDict(self):
"""
Returns a JSON dictionary representation of this ProtocolElement.
"""
out = {}
for field in self.schema.fields:
val = getattr(self, field.name)
if self.isEmbeddedType(field.name):
if isinstance(val, list):
out[field.name] = list(el.toJsonDict() for el in val)
elif val is None:
out[field.name] = None
else:
out[field.name] = val.toJsonDict()
elif isinstance(val, list):
out[field.name] = list(val)
else:
out[field.name] = val
return out
@classmethod
def validate(cls, jsonDict):
"""
Validates the specified JSON dictionary to determine if it is an
instance of this element's schema.
"""
return avro.io.validate(cls.schema, jsonDict)
@classmethod
def fromJsonString(cls, jsonStr):
"""
Returns a decoded ProtocolElement from the specified JSON string.
"""
jsonDict = json.loads(jsonStr)
return cls.fromJsonDict(jsonDict)
@classmethod
def fromJsonDict(cls, jsonDict):
"""
Returns a decoded ProtocolElement from the specified JSON dictionary.
"""
if jsonDict is None:
raise ValueError("Required values not set in {0}".format(cls))
instance = cls()
for field in cls.schema.fields:
instanceVal = field.default
if field.name in jsonDict:
val = jsonDict[field.name]
if cls.isEmbeddedType(field.name):
instanceVal = cls._decodeEmbedded(field, val)
else:
instanceVal = val
setattr(instance, field.name, instanceVal)
return instance
@classmethod
def _decodeEmbedded(cls, field, val):
if val is None:
return None
embeddedType = cls.getEmbeddedType(field.name)
if isinstance(field.type, avro.schema.ArraySchema):
return list(embeddedType.fromJsonDict(elem) for elem in val)
else:
return embeddedType.fromJsonDict(val)
class SearchRequest(ProtocolElement):
"""
The superclass of all SearchRequest classes in the protocol.
"""
class SearchResponse(ProtocolElement):
"""
The superclass of all SearchResponse classes in the protocol.
"""
@classmethod
def getValueListName(cls):
"""
Returns the name of the list used to store the values held
in a page of results.
"""
return cls._valueListName
def getProtocolClasses(superclass=ProtocolElement):
"""
Returns all the protocol classes that are subclasses of the
specified superclass. Only 'leaf' classes are returned,
corresponding directly to the classes defined in the protocol.
"""
# We keep a manual list of the superclasses that we define here
# so we can filter them out when we're getting the protocol
# classes.
superclasses = set([
ProtocolElement, SearchRequest, SearchResponse])
thisModule = sys.modules[__name__]
subclasses = []
for name, class_ in inspect.getmembers(thisModule):
if ((inspect.isclass(class_) and
issubclass(class_, superclass) and
class_ not in superclasses)):
subclasses.append(class_)
return subclasses
# We can now import the definitions of the protocol elements from the
# generated file.
from _protocol_definitions import * # NOQA
| {
"content_hash": "1072d933ac5fcce0c51496d57a187ddb",
"timestamp": "",
"source": "github",
"line_count": 264,
"max_line_length": 78,
"avg_line_length": 32.96590909090909,
"alnum_prop": 0.6288636102493393,
"repo_name": "adamnovak/server",
"id": "5eaca87cae81feabc29a95514a4353c7b7066e7d",
"size": "8703",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "ga4gh/protocol.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "4289"
},
{
"name": "Python",
"bytes": "721643"
},
{
"name": "Shell",
"bytes": "1085"
}
],
"symlink_target": ""
} |
from django.db import models
from django.utils.translation import ugettext_lazy as _
from tagging.fields import TagField
class Bookmark(models.Model):
""" Simple model for storing bookmarks """
url = models.URLField(_('url'), unique=True)
description = models.TextField(_('description'), )
extended = models.TextField(_('extended'), blank=True)
created = models.DateTimeField(_('created'), auto_now_add=True)
modified = models.DateTimeField(_('modified'), auto_now=True)
tags = TagField()
class Meta:
verbose_name = _('bookmark')
verbose_name_plural = _('bookmarks')
db_table = "bookmarks"
class Admin:
list_display = ('url', 'description')
search_fields = ('url', 'description', 'extended')
def __unicode__(self):
return self.url
| {
"content_hash": "2c792bbc44aba27a8f735df7b494b592",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 73,
"avg_line_length": 33.24,
"alnum_prop": 0.641395908543923,
"repo_name": "blampe/M2M",
"id": "3983f2f9bcaf052746907817a801041f080aa1ae",
"size": "831",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "basic/bookmarks/models.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "754736"
},
{
"name": "Java",
"bytes": "6333"
},
{
"name": "JavaScript",
"bytes": "21268"
},
{
"name": "PHP",
"bytes": "18"
},
{
"name": "Python",
"bytes": "6374305"
},
{
"name": "Shell",
"bytes": "4721"
}
],
"symlink_target": ""
} |
"""question resources"""
| {
"content_hash": "1a1b2911241027ea069fae49e9f9ba1e",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 24,
"avg_line_length": 25,
"alnum_prop": 0.68,
"repo_name": "nicktimko/Planet-Lab",
"id": "0ceb7500cce2c72d0610c9d394479ca0ee8c3732",
"size": "25",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "backend/src/backend/questions/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "14803"
},
{
"name": "HTML",
"bytes": "47937"
},
{
"name": "JavaScript",
"bytes": "29695"
},
{
"name": "Python",
"bytes": "125546"
},
{
"name": "Shell",
"bytes": "1218"
}
],
"symlink_target": ""
} |
"""
Class for representing hierarchical language structures, such as
syntax trees and morphological trees.
"""
from __future__ import print_function, unicode_literals
# TODO: add LabelledTree (can be used for dependency trees)
import re
from nltk.grammar import Production, Nonterminal
from nltk.probability import ProbabilisticMixIn
from nltk.util import slice_bounds
from nltk.compat import string_types, python_2_unicode_compatible, unicode_repr
from nltk.internals import raise_unorderable_types
######################################################################
## Trees
######################################################################
@python_2_unicode_compatible
class Tree(list):
"""
A Tree represents a hierarchical grouping of leaves and subtrees.
For example, each constituent in a syntax tree is represented by a single Tree.
A tree's children are encoded as a list of leaves and subtrees,
where a leaf is a basic (non-tree) value; and a subtree is a
nested Tree.
>>> from nltk.tree import Tree
>>> print(Tree(1, [2, Tree(3, [4]), 5]))
(1 2 (3 4) 5)
>>> vp = Tree('VP', [Tree('V', ['saw']),
... Tree('NP', ['him'])])
>>> s = Tree('S', [Tree('NP', ['I']), vp])
>>> print(s)
(S (NP I) (VP (V saw) (NP him)))
>>> print(s[1])
(VP (V saw) (NP him))
>>> print(s[1,1])
(NP him)
>>> t = Tree.fromstring("(S (NP I) (VP (V saw) (NP him)))")
>>> s == t
True
>>> t[1][1].set_label('X')
>>> t[1][1].label()
'X'
>>> print(t)
(S (NP I) (VP (V saw) (X him)))
>>> t[0], t[1,1] = t[1,1], t[0]
>>> print(t)
(S (X him) (VP (V saw) (NP I)))
The length of a tree is the number of children it has.
>>> len(t)
2
The set_label() and label() methods allow individual constituents
to be labeled. For example, syntax trees use this label to specify
phrase tags, such as "NP" and "VP".
Several Tree methods use "tree positions" to specify
children or descendants of a tree. Tree positions are defined as
follows:
- The tree position *i* specifies a Tree's *i*\ th child.
- The tree position ``()`` specifies the Tree itself.
- If *p* is the tree position of descendant *d*, then
*p+i* specifies the *i*\ th child of *d*.
I.e., every tree position is either a single index *i*,
specifying ``tree[i]``; or a sequence *i1, i2, ..., iN*,
specifying ``tree[i1][i2]...[iN]``.
Construct a new tree. This constructor can be called in one
of two ways:
- ``Tree(label, children)`` constructs a new tree with the
specified label and list of children.
- ``Tree.fromstring(s)`` constructs a new tree by parsing the string ``s``.
"""
def __init__(self, node, children=None):
if children is None:
raise TypeError("%s: Expected a node value and child list "
% type(self).__name__)
elif isinstance(children, string_types):
raise TypeError("%s() argument 2 should be a list, not a "
"string" % type(self).__name__)
else:
list.__init__(self, children)
self._label = node
#////////////////////////////////////////////////////////////
# Comparison operators
#////////////////////////////////////////////////////////////
def __eq__(self, other):
return (self.__class__ is other.__class__ and
(self._label, list(self)) == (other._label, list(other)))
def __lt__(self, other):
if not isinstance(other, Tree):
# raise_unorderable_types("<", self, other)
# Sometimes children can be pure strings,
# so we need to be able to compare with non-trees:
return self.__class__.__name__ < other.__class__.__name__
elif self.__class__ is other.__class__:
return (self._label, list(self)) < (other._label, list(other))
else:
return self.__class__.__name__ < other.__class__.__name__
# @total_ordering doesn't work here, since the class inherits from a builtin class
__ne__ = lambda self, other: not self == other
__gt__ = lambda self, other: not (self < other or self == other)
__le__ = lambda self, other: self < other or self == other
__ge__ = lambda self, other: not self < other
#////////////////////////////////////////////////////////////
# Disabled list operations
#////////////////////////////////////////////////////////////
def __mul__(self, v):
raise TypeError('Tree does not support multiplication')
def __rmul__(self, v):
raise TypeError('Tree does not support multiplication')
def __add__(self, v):
raise TypeError('Tree does not support addition')
def __radd__(self, v):
raise TypeError('Tree does not support addition')
#////////////////////////////////////////////////////////////
# Indexing (with support for tree positions)
#////////////////////////////////////////////////////////////
def __getitem__(self, index):
if isinstance(index, (int, slice)):
return list.__getitem__(self, index)
elif isinstance(index, (list, tuple)):
if len(index) == 0:
return self
elif len(index) == 1:
return self[index[0]]
else:
return self[index[0]][index[1:]]
else:
raise TypeError("%s indices must be integers, not %s" %
(type(self).__name__, type(index).__name__))
def __setitem__(self, index, value):
if isinstance(index, (int, slice)):
return list.__setitem__(self, index, value)
elif isinstance(index, (list, tuple)):
if len(index) == 0:
raise IndexError('The tree position () may not be '
'assigned to.')
elif len(index) == 1:
self[index[0]] = value
else:
self[index[0]][index[1:]] = value
else:
raise TypeError("%s indices must be integers, not %s" %
(type(self).__name__, type(index).__name__))
def __delitem__(self, index):
if isinstance(index, (int, slice)):
return list.__delitem__(self, index)
elif isinstance(index, (list, tuple)):
if len(index) == 0:
raise IndexError('The tree position () may not be deleted.')
elif len(index) == 1:
del self[index[0]]
else:
del self[index[0]][index[1:]]
else:
raise TypeError("%s indices must be integers, not %s" %
(type(self).__name__, type(index).__name__))
#////////////////////////////////////////////////////////////
# Basic tree operations
#////////////////////////////////////////////////////////////
def _get_node(self):
"""Outdated method to access the node value; use the label() method instead."""
raise NotImplementedError("Use label() to access a node label.")
def _set_node(self, value):
"""Outdated method to set the node value; use the set_label() method instead."""
raise NotImplementedError("Use set_label() method to set a node label.")
node = property(_get_node, _set_node)
def label(self):
"""
Return the node label of the tree.
>>> t = Tree.fromstring('(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))')
>>> t.label()
'S'
:return: the node label (typically a string)
:rtype: any
"""
return self._label
def set_label(self, label):
"""
Set the node label of the tree.
>>> t = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))")
>>> t.set_label("T")
>>> print(t)
(T (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))
:param label: the node label (typically a string)
:type label: any
"""
self._label = label
def leaves(self):
"""
Return the leaves of the tree.
>>> t = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))")
>>> t.leaves()
['the', 'dog', 'chased', 'the', 'cat']
:return: a list containing this tree's leaves.
The order reflects the order of the
leaves in the tree's hierarchical structure.
:rtype: list
"""
leaves = []
for child in self:
if isinstance(child, Tree):
leaves.extend(child.leaves())
else:
leaves.append(child)
return leaves
def flatten(self):
"""
Return a flat version of the tree, with all non-root non-terminals removed.
>>> t = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))")
>>> print(t.flatten())
(S the dog chased the cat)
:return: a tree consisting of this tree's root connected directly to
its leaves, omitting all intervening non-terminal nodes.
:rtype: Tree
"""
return Tree(self.label(), self.leaves())
def height(self):
"""
Return the height of the tree.
>>> t = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))")
>>> t.height()
5
>>> print(t[0,0])
(D the)
>>> t[0,0].height()
2
:return: The height of this tree. The height of a tree
containing no children is 1; the height of a tree
containing only leaves is 2; and the height of any other
tree is one plus the maximum of its children's
heights.
:rtype: int
"""
max_child_height = 0
for child in self:
if isinstance(child, Tree):
max_child_height = max(max_child_height, child.height())
else:
max_child_height = max(max_child_height, 1)
return 1 + max_child_height
def treepositions(self, order='preorder'):
"""
>>> t = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))")
>>> t.treepositions() # doctest: +ELLIPSIS
[(), (0,), (0, 0), (0, 0, 0), (0, 1), (0, 1, 0), (1,), (1, 0), (1, 0, 0), ...]
>>> for pos in t.treepositions('leaves'):
... t[pos] = t[pos][::-1].upper()
>>> print(t)
(S (NP (D EHT) (N GOD)) (VP (V DESAHC) (NP (D EHT) (N TAC))))
:param order: One of: ``preorder``, ``postorder``, ``bothorder``,
``leaves``.
"""
positions = []
if order in ('preorder', 'bothorder'): positions.append( () )
for i, child in enumerate(self):
if isinstance(child, Tree):
childpos = child.treepositions(order)
positions.extend((i,)+p for p in childpos)
else:
positions.append( (i,) )
if order in ('postorder', 'bothorder'): positions.append( () )
return positions
def subtrees(self, filter=None):
"""
Generate all the subtrees of this tree, optionally restricted
to trees matching the filter function.
>>> t = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))")
>>> for s in t.subtrees(lambda t: t.height() == 2):
... print(s)
(D the)
(N dog)
(V chased)
(D the)
(N cat)
:type filter: function
:param filter: the function to filter all local trees
"""
if not filter or filter(self):
yield self
for child in self:
if isinstance(child, Tree):
for subtree in child.subtrees(filter):
yield subtree
def productions(self):
"""
Generate the productions that correspond to the non-terminal nodes of the tree.
For each subtree of the form (P: C1 C2 ... Cn) this produces a production of the
form P -> C1 C2 ... Cn.
>>> t = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))")
>>> t.productions()
[S -> NP VP, NP -> D N, D -> 'the', N -> 'dog', VP -> V NP, V -> 'chased',
NP -> D N, D -> 'the', N -> 'cat']
:rtype: list(Production)
"""
if not isinstance(self._label, string_types):
raise TypeError('Productions can only be generated from trees having node labels that are strings')
prods = [Production(Nonterminal(self._label), _child_names(self))]
for child in self:
if isinstance(child, Tree):
prods += child.productions()
return prods
def pos(self):
"""
Return a sequence of pos-tagged words extracted from the tree.
>>> t = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))")
>>> t.pos()
[('the', 'D'), ('dog', 'N'), ('chased', 'V'), ('the', 'D'), ('cat', 'N')]
:return: a list of tuples containing leaves and pre-terminals (part-of-speech tags).
The order reflects the order of the leaves in the tree's hierarchical structure.
:rtype: list(tuple)
"""
pos = []
for child in self:
if isinstance(child, Tree):
pos.extend(child.pos())
else:
pos.append((child, self._label))
return pos
def leaf_treeposition(self, index):
"""
:return: The tree position of the ``index``-th leaf in this
tree. I.e., if ``tp=self.leaf_treeposition(i)``, then
``self[tp]==self.leaves()[i]``.
:raise IndexError: If this tree contains fewer than ``index+1``
leaves, or if ``index<0``.
"""
if index < 0: raise IndexError('index must be non-negative')
stack = [(self, ())]
while stack:
value, treepos = stack.pop()
if not isinstance(value, Tree):
if index == 0: return treepos
else: index -= 1
else:
for i in range(len(value)-1, -1, -1):
stack.append( (value[i], treepos+(i,)) )
raise IndexError('index must be less than or equal to len(self)')
def treeposition_spanning_leaves(self, start, end):
"""
:return: The tree position of the lowest descendant of this
tree that dominates ``self.leaves()[start:end]``.
:raise ValueError: if ``end <= start``
"""
if end <= start:
raise ValueError('end must be greater than start')
# Find the tree positions of the start & end leaves, and
# take the longest common subsequence.
start_treepos = self.leaf_treeposition(start)
end_treepos = self.leaf_treeposition(end-1)
# Find the first index where they mismatch:
for i in range(len(start_treepos)):
if i == len(end_treepos) or start_treepos[i] != end_treepos[i]:
return start_treepos[:i]
return start_treepos
#////////////////////////////////////////////////////////////
# Transforms
#////////////////////////////////////////////////////////////
def chomsky_normal_form(self, factor="right", horzMarkov=None, vertMarkov=0, childChar="|", parentChar="^"):
"""
This method can modify a tree in three ways:
1. Convert a tree into its Chomsky Normal Form (CNF)
equivalent -- Every subtree has either two non-terminals
or one terminal as its children. This process requires
the creation of more"artificial" non-terminal nodes.
2. Markov (vertical) smoothing of children in new artificial
nodes
3. Horizontal (parent) annotation of nodes
:param factor: Right or left factoring method (default = "right")
:type factor: str = [left|right]
:param horzMarkov: Markov order for sibling smoothing in artificial nodes (None (default) = include all siblings)
:type horzMarkov: int | None
:param vertMarkov: Markov order for parent smoothing (0 (default) = no vertical annotation)
:type vertMarkov: int | None
:param childChar: A string used in construction of the artificial nodes, separating the head of the
original subtree from the child nodes that have yet to be expanded (default = "|")
:type childChar: str
:param parentChar: A string used to separate the node representation from its vertical annotation
:type parentChar: str
"""
from nltk.treetransforms import chomsky_normal_form
chomsky_normal_form(self, factor, horzMarkov, vertMarkov, childChar, parentChar)
def un_chomsky_normal_form(self, expandUnary = True, childChar = "|", parentChar = "^", unaryChar = "+"):
"""
This method modifies the tree in three ways:
1. Transforms a tree in Chomsky Normal Form back to its
original structure (branching greater than two)
2. Removes any parent annotation (if it exists)
3. (optional) expands unary subtrees (if previously
collapsed with collapseUnary(...) )
:param expandUnary: Flag to expand unary or not (default = True)
:type expandUnary: bool
:param childChar: A string separating the head node from its children in an artificial node (default = "|")
:type childChar: str
:param parentChar: A sting separating the node label from its parent annotation (default = "^")
:type parentChar: str
:param unaryChar: A string joining two non-terminals in a unary production (default = "+")
:type unaryChar: str
"""
from nltk.treetransforms import un_chomsky_normal_form
un_chomsky_normal_form(self, expandUnary, childChar, parentChar, unaryChar)
def collapse_unary(self, collapsePOS = False, collapseRoot = False, joinChar = "+"):
"""
Collapse subtrees with a single child (ie. unary productions)
into a new non-terminal (Tree node) joined by 'joinChar'.
This is useful when working with algorithms that do not allow
unary productions, and completely removing the unary productions
would require loss of useful information. The Tree is modified
directly (since it is passed by reference) and no value is returned.
:param collapsePOS: 'False' (default) will not collapse the parent of leaf nodes (ie.
Part-of-Speech tags) since they are always unary productions
:type collapsePOS: bool
:param collapseRoot: 'False' (default) will not modify the root production
if it is unary. For the Penn WSJ treebank corpus, this corresponds
to the TOP -> productions.
:type collapseRoot: bool
:param joinChar: A string used to connect collapsed node values (default = "+")
:type joinChar: str
"""
from nltk.treetransforms import collapse_unary
collapse_unary(self, collapsePOS, collapseRoot, joinChar)
#////////////////////////////////////////////////////////////
# Convert, copy
#////////////////////////////////////////////////////////////
@classmethod
def convert(cls, tree):
"""
Convert a tree between different subtypes of Tree. ``cls`` determines
which class will be used to encode the new tree.
:type tree: Tree
:param tree: The tree that should be converted.
:return: The new Tree.
"""
if isinstance(tree, Tree):
children = [cls.convert(child) for child in tree]
return cls(tree._label, children)
else:
return tree
def copy(self, deep=False):
if not deep: return type(self)(self._label, self)
else: return type(self).convert(self)
def _frozen_class(self): return ImmutableTree
def freeze(self, leaf_freezer=None):
frozen_class = self._frozen_class()
if leaf_freezer is None:
newcopy = frozen_class.convert(self)
else:
newcopy = self.copy(deep=True)
for pos in newcopy.treepositions('leaves'):
newcopy[pos] = leaf_freezer(newcopy[pos])
newcopy = frozen_class.convert(newcopy)
hash(newcopy) # Make sure the leaves are hashable.
return newcopy
#////////////////////////////////////////////////////////////
# Parsing
#////////////////////////////////////////////////////////////
@classmethod
def fromstring(cls, s, brackets='()', read_node=None, read_leaf=None,
node_pattern=None, leaf_pattern=None,
remove_empty_top_bracketing=False):
"""
Read a bracketed tree string and return the resulting tree.
Trees are represented as nested brackettings, such as::
(S (NP (NNP John)) (VP (V runs)))
:type s: str
:param s: The string to read
:type brackets: str (length=2)
:param brackets: The bracket characters used to mark the
beginning and end of trees and subtrees.
:type read_node: function
:type read_leaf: function
:param read_node, read_leaf: If specified, these functions
are applied to the substrings of ``s`` corresponding to
nodes and leaves (respectively) to obtain the values for
those nodes and leaves. They should have the following
signature:
read_node(str) -> value
For example, these functions could be used to process nodes
and leaves whose values should be some type other than
string (such as ``FeatStruct``).
Note that by default, node strings and leaf strings are
delimited by whitespace and brackets; to override this
default, use the ``node_pattern`` and ``leaf_pattern``
arguments.
:type node_pattern: str
:type leaf_pattern: str
:param node_pattern, leaf_pattern: Regular expression patterns
used to find node and leaf substrings in ``s``. By
default, both nodes patterns are defined to match any
sequence of non-whitespace non-bracket characters.
:type remove_empty_top_bracketing: bool
:param remove_empty_top_bracketing: If the resulting tree has
an empty node label, and is length one, then return its
single child instead. This is useful for treebank trees,
which sometimes contain an extra level of bracketing.
:return: A tree corresponding to the string representation ``s``.
If this class method is called using a subclass of Tree,
then it will return a tree of that type.
:rtype: Tree
"""
if not isinstance(brackets, string_types) or len(brackets) != 2:
raise TypeError('brackets must be a length-2 string')
if re.search('\s', brackets):
raise TypeError('whitespace brackets not allowed')
# Construct a regexp that will tokenize the string.
open_b, close_b = brackets
open_pattern, close_pattern = (re.escape(open_b), re.escape(close_b))
if node_pattern is None:
node_pattern = '[^\s%s%s]+' % (open_pattern, close_pattern)
if leaf_pattern is None:
leaf_pattern = '[^\s%s%s]+' % (open_pattern, close_pattern)
token_re = re.compile('%s\s*(%s)?|%s|(%s)' % (
open_pattern, node_pattern, close_pattern, leaf_pattern))
# Walk through each token, updating a stack of trees.
stack = [(None, [])] # list of (node, children) tuples
for match in token_re.finditer(s):
token = match.group()
# Beginning of a tree/subtree
if token[0] == open_b:
if len(stack) == 1 and len(stack[0][1]) > 0:
cls._parse_error(s, match, 'end-of-string')
label = token[1:].lstrip()
if read_node is not None: label = read_node(label)
stack.append((label, []))
# End of a tree/subtree
elif token == close_b:
if len(stack) == 1:
if len(stack[0][1]) == 0:
cls._parse_error(s, match, open_b)
else:
cls._parse_error(s, match, 'end-of-string')
label, children = stack.pop()
stack[-1][1].append(cls(label, children))
# Leaf node
else:
if len(stack) == 1:
cls._parse_error(s, match, open_b)
if read_leaf is not None: token = read_leaf(token)
stack[-1][1].append(token)
# check that we got exactly one complete tree.
if len(stack) > 1:
cls._parse_error(s, 'end-of-string', close_b)
elif len(stack[0][1]) == 0:
cls._parse_error(s, 'end-of-string', open_b)
else:
assert stack[0][0] is None
assert len(stack[0][1]) == 1
tree = stack[0][1][0]
# If the tree has an extra level with node='', then get rid of
# it. E.g.: "((S (NP ...) (VP ...)))"
if remove_empty_top_bracketing and tree._label == '' and len(tree) == 1:
tree = tree[0]
# return the tree.
return tree
@classmethod
def _parse_error(cls, s, match, expecting):
"""
Display a friendly error message when parsing a tree string fails.
:param s: The string we're parsing.
:param match: regexp match of the problem token.
:param expecting: what we expected to see instead.
"""
# Construct a basic error message
if match == 'end-of-string':
pos, token = len(s), 'end-of-string'
else:
pos, token = match.start(), match.group()
msg = '%s.read(): expected %r but got %r\n%sat index %d.' % (
cls.__name__, expecting, token, ' '*12, pos)
# Add a display showing the error token itsels:
s = s.replace('\n', ' ').replace('\t', ' ')
offset = pos
if len(s) > pos+10:
s = s[:pos+10]+'...'
if pos > 10:
s = '...'+s[pos-10:]
offset = 13
msg += '\n%s"%s"\n%s^' % (' '*16, s, ' '*(17+offset))
raise ValueError(msg)
#////////////////////////////////////////////////////////////
# Visualization & String Representation
#////////////////////////////////////////////////////////////
def draw(self):
"""
Open a new window containing a graphical diagram of this tree.
"""
from nltk.draw.tree import draw_trees
draw_trees(self)
def pretty_print(self, sentence=None, highlight=(), stream=None, **kwargs):
"""
Pretty-print this tree as ASCII or Unicode art.
For explanation of the arguments, see the documentation for
`nltk.treeprettyprinter.TreePrettyPrinter`.
"""
from nltk.treeprettyprinter import TreePrettyPrinter
print(TreePrettyPrinter(self, sentence, highlight).text(**kwargs),
file=stream)
def __repr__(self):
childstr = ", ".join(unicode_repr(c) for c in self)
return '%s(%s, [%s])' % (type(self).__name__, unicode_repr(self._label), childstr)
def _repr_png_(self):
"""
Draws and outputs in PNG for ipython.
PNG is used instead of PDF, since it can be displayed in the qt console and
has wider browser support.
"""
import os
import base64
import subprocess
import tempfile
from nltk.draw.tree import tree_to_treesegment
from nltk.draw.util import CanvasFrame
from nltk.internals import find_binary
_canvas_frame = CanvasFrame()
widget = tree_to_treesegment(_canvas_frame.canvas(), self)
_canvas_frame.add_widget(widget)
x, y, w, h = widget.bbox()
# print_to_file uses scrollregion to set the width and height of the pdf.
_canvas_frame.canvas()['scrollregion'] = (0, 0, w, h)
with tempfile.NamedTemporaryFile() as file:
in_path = '{0:}.ps'.format(file.name)
out_path = '{0:}.png'.format(file.name)
_canvas_frame.print_to_file(in_path)
_canvas_frame.destroy_widget(widget)
subprocess.call([find_binary('gs', binary_names=['gswin32c.exe', 'gswin64c.exe'], env_vars=['PATH'], verbose=False)] +
'-q -dEPSCrop -sDEVICE=png16m -r90 -dTextAlphaBits=4 -dGraphicsAlphaBits=4 -dSAFER -dBATCH -dNOPAUSE -sOutputFile={0:} {1:}'
.format(out_path, in_path).split())
with open(out_path, 'rb') as sr:
res = sr.read()
os.remove(in_path)
os.remove(out_path)
return base64.b64encode(res).decode()
def __str__(self):
return self.pformat()
def pprint(self, **kwargs):
"""
Print a string representation of this Tree to 'stream'
"""
if "stream" in kwargs:
stream = kwargs["stream"]
del kwargs["stream"]
else:
stream = None
print(self.pformat(**kwargs), file=stream)
def pformat(self, margin=70, indent=0, nodesep='', parens='()', quotes=False):
"""
:return: A pretty-printed string representation of this tree.
:rtype: str
:param margin: The right margin at which to do line-wrapping.
:type margin: int
:param indent: The indentation level at which printing
begins. This number is used to decide how far to indent
subsequent lines.
:type indent: int
:param nodesep: A string that is used to separate the node
from the children. E.g., the default value ``':'`` gives
trees like ``(S: (NP: I) (VP: (V: saw) (NP: it)))``.
"""
# Try writing it on one line.
s = self._pformat_flat(nodesep, parens, quotes)
if len(s) + indent < margin:
return s
# If it doesn't fit on one line, then write it on multi-lines.
if isinstance(self._label, string_types):
s = '%s%s%s' % (parens[0], self._label, nodesep)
else:
s = '%s%s%s' % (parens[0], unicode_repr(self._label), nodesep)
for child in self:
if isinstance(child, Tree):
s += '\n'+' '*(indent+2)+child.pformat(margin, indent+2,
nodesep, parens, quotes)
elif isinstance(child, tuple):
s += '\n'+' '*(indent+2)+ "/".join(child)
elif isinstance(child, string_types) and not quotes:
s += '\n'+' '*(indent+2)+ '%s' % child
else:
s += '\n'+' '*(indent+2)+ unicode_repr(child)
return s+parens[1]
def pformat_latex_qtree(self):
r"""
Returns a representation of the tree compatible with the
LaTeX qtree package. This consists of the string ``\Tree``
followed by the tree represented in bracketed notation.
For example, the following result was generated from a parse tree of
the sentence ``The announcement astounded us``::
\Tree [.I'' [.N'' [.D The ] [.N' [.N announcement ] ] ]
[.I' [.V'' [.V' [.V astounded ] [.N'' [.N' [.N us ] ] ] ] ] ] ]
See http://www.ling.upenn.edu/advice/latex.html for the LaTeX
style file for the qtree package.
:return: A latex qtree representation of this tree.
:rtype: str
"""
reserved_chars = re.compile('([#\$%&~_\{\}])')
pformat = self.pformat(indent=6, nodesep='', parens=('[.', ' ]'))
return r'\Tree ' + re.sub(reserved_chars, r'\\\1', pformat)
def _pformat_flat(self, nodesep, parens, quotes):
childstrs = []
for child in self:
if isinstance(child, Tree):
childstrs.append(child._pformat_flat(nodesep, parens, quotes))
elif isinstance(child, tuple):
childstrs.append("/".join(child))
elif isinstance(child, string_types) and not quotes:
childstrs.append('%s' % child)
else:
childstrs.append(unicode_repr(child))
if isinstance(self._label, string_types):
return '%s%s%s %s%s' % (parens[0], self._label, nodesep,
" ".join(childstrs), parens[1])
else:
return '%s%s%s %s%s' % (parens[0], unicode_repr(self._label), nodesep,
" ".join(childstrs), parens[1])
class ImmutableTree(Tree):
def __init__(self, node, children=None):
super(ImmutableTree, self).__init__(node, children)
# Precompute our hash value. This ensures that we're really
# immutable. It also means we only have to calculate it once.
try:
self._hash = hash((self._label, tuple(self)))
except (TypeError, ValueError):
raise ValueError("%s: node value and children "
"must be immutable" % type(self).__name__)
def __setitem__(self, index, value):
raise ValueError('%s may not be modified' % type(self).__name__)
def __setslice__(self, i, j, value):
raise ValueError('%s may not be modified' % type(self).__name__)
def __delitem__(self, index):
raise ValueError('%s may not be modified' % type(self).__name__)
def __delslice__(self, i, j):
raise ValueError('%s may not be modified' % type(self).__name__)
def __iadd__(self, other):
raise ValueError('%s may not be modified' % type(self).__name__)
def __imul__(self, other):
raise ValueError('%s may not be modified' % type(self).__name__)
def append(self, v):
raise ValueError('%s may not be modified' % type(self).__name__)
def extend(self, v):
raise ValueError('%s may not be modified' % type(self).__name__)
def pop(self, v=None):
raise ValueError('%s may not be modified' % type(self).__name__)
def remove(self, v):
raise ValueError('%s may not be modified' % type(self).__name__)
def reverse(self):
raise ValueError('%s may not be modified' % type(self).__name__)
def sort(self):
raise ValueError('%s may not be modified' % type(self).__name__)
def __hash__(self):
return self._hash
def set_label(self, value):
"""
Set the node label. This will only succeed the first time the
node label is set, which should occur in ImmutableTree.__init__().
"""
if hasattr(self, '_label'):
raise ValueError('%s may not be modified' % type(self).__name__)
self._label = value
######################################################################
## Parented trees
######################################################################
class AbstractParentedTree(Tree):
"""
An abstract base class for a ``Tree`` that automatically maintains
pointers to parent nodes. These parent pointers are updated
whenever any change is made to a tree's structure. Two subclasses
are currently defined:
- ``ParentedTree`` is used for tree structures where each subtree
has at most one parent. This class should be used in cases
where there is no"sharing" of subtrees.
- ``MultiParentedTree`` is used for tree structures where a
subtree may have zero or more parents. This class should be
used in cases where subtrees may be shared.
Subclassing
===========
The ``AbstractParentedTree`` class redefines all operations that
modify a tree's structure to call two methods, which are used by
subclasses to update parent information:
- ``_setparent()`` is called whenever a new child is added.
- ``_delparent()`` is called whenever a child is removed.
"""
def __init__(self, node, children=None):
super(AbstractParentedTree, self).__init__(node, children)
# If children is None, the tree is read from node, and
# all parents will be set during parsing.
if children is not None:
# Otherwise we have to set the parent of the children.
# Iterate over self, and *not* children, because children
# might be an iterator.
for i, child in enumerate(self):
if isinstance(child, Tree):
self._setparent(child, i, dry_run=True)
for i, child in enumerate(self):
if isinstance(child, Tree):
self._setparent(child, i)
#////////////////////////////////////////////////////////////
# Parent management
#////////////////////////////////////////////////////////////
def _setparent(self, child, index, dry_run=False):
"""
Update the parent pointer of ``child`` to point to ``self``. This
method is only called if the type of ``child`` is ``Tree``;
i.e., it is not called when adding a leaf to a tree. This method
is always called before the child is actually added to the
child list of ``self``.
:type child: Tree
:type index: int
:param index: The index of ``child`` in ``self``.
:raise TypeError: If ``child`` is a tree with an impropriate
type. Typically, if ``child`` is a tree, then its type needs
to match the type of ``self``. This prevents mixing of
different tree types (single-parented, multi-parented, and
non-parented).
:param dry_run: If true, the don't actually set the child's
parent pointer; just check for any error conditions, and
raise an exception if one is found.
"""
raise NotImplementedError()
def _delparent(self, child, index):
"""
Update the parent pointer of ``child`` to not point to self. This
method is only called if the type of ``child`` is ``Tree``; i.e., it
is not called when removing a leaf from a tree. This method
is always called before the child is actually removed from the
child list of ``self``.
:type child: Tree
:type index: int
:param index: The index of ``child`` in ``self``.
"""
raise NotImplementedError()
#////////////////////////////////////////////////////////////
# Methods that add/remove children
#////////////////////////////////////////////////////////////
# Every method that adds or removes a child must make
# appropriate calls to _setparent() and _delparent().
def __delitem__(self, index):
# del ptree[start:stop]
if isinstance(index, slice):
start, stop, step = slice_bounds(self, index, allow_step=True)
# Clear all the children pointers.
for i in range(start, stop, step):
if isinstance(self[i], Tree):
self._delparent(self[i], i)
# Delete the children from our child list.
super(AbstractParentedTree, self).__delitem__(index)
# del ptree[i]
elif isinstance(index, int):
if index < 0: index += len(self)
if index < 0: raise IndexError('index out of range')
# Clear the child's parent pointer.
if isinstance(self[index], Tree):
self._delparent(self[index], index)
# Remove the child from our child list.
super(AbstractParentedTree, self).__delitem__(index)
elif isinstance(index, (list, tuple)):
# del ptree[()]
if len(index) == 0:
raise IndexError('The tree position () may not be deleted.')
# del ptree[(i,)]
elif len(index) == 1:
del self[index[0]]
# del ptree[i1, i2, i3]
else:
del self[index[0]][index[1:]]
else:
raise TypeError("%s indices must be integers, not %s" %
(type(self).__name__, type(index).__name__))
def __setitem__(self, index, value):
# ptree[start:stop] = value
if isinstance(index, slice):
start, stop, step = slice_bounds(self, index, allow_step=True)
# make a copy of value, in case it's an iterator
if not isinstance(value, (list, tuple)):
value = list(value)
# Check for any error conditions, so we can avoid ending
# up in an inconsistent state if an error does occur.
for i, child in enumerate(value):
if isinstance(child, Tree):
self._setparent(child, start + i*step, dry_run=True)
# clear the child pointers of all parents we're removing
for i in range(start, stop, step):
if isinstance(self[i], Tree):
self._delparent(self[i], i)
# set the child pointers of the new children. We do this
# after clearing *all* child pointers, in case we're e.g.
# reversing the elements in a tree.
for i, child in enumerate(value):
if isinstance(child, Tree):
self._setparent(child, start + i*step)
# finally, update the content of the child list itself.
super(AbstractParentedTree, self).__setitem__(index, value)
# ptree[i] = value
elif isinstance(index, int):
if index < 0: index += len(self)
if index < 0: raise IndexError('index out of range')
# if the value is not changing, do nothing.
if value is self[index]:
return
# Set the new child's parent pointer.
if isinstance(value, Tree):
self._setparent(value, index)
# Remove the old child's parent pointer
if isinstance(self[index], Tree):
self._delparent(self[index], index)
# Update our child list.
super(AbstractParentedTree, self).__setitem__(index, value)
elif isinstance(index, (list, tuple)):
# ptree[()] = value
if len(index) == 0:
raise IndexError('The tree position () may not be assigned to.')
# ptree[(i,)] = value
elif len(index) == 1:
self[index[0]] = value
# ptree[i1, i2, i3] = value
else:
self[index[0]][index[1:]] = value
else:
raise TypeError("%s indices must be integers, not %s" %
(type(self).__name__, type(index).__name__))
def append(self, child):
if isinstance(child, Tree):
self._setparent(child, len(self))
super(AbstractParentedTree, self).append(child)
def extend(self, children):
for child in children:
if isinstance(child, Tree):
self._setparent(child, len(self))
super(AbstractParentedTree, self).append(child)
def insert(self, index, child):
# Handle negative indexes. Note that if index < -len(self),
# we do *not* raise an IndexError, unlike __getitem__. This
# is done for consistency with list.__getitem__ and list.index.
if index < 0: index += len(self)
if index < 0: index = 0
# Set the child's parent, and update our child list.
if isinstance(child, Tree):
self._setparent(child, index)
super(AbstractParentedTree, self).insert(index, child)
def pop(self, index=-1):
if index < 0: index += len(self)
if index < 0: raise IndexError('index out of range')
if isinstance(self[index], Tree):
self._delparent(self[index], index)
return super(AbstractParentedTree, self).pop(index)
# n.b.: like `list`, this is done by equality, not identity!
# To remove a specific child, use del ptree[i].
def remove(self, child):
index = self.index(child)
if isinstance(self[index], Tree):
self._delparent(self[index], index)
super(AbstractParentedTree, self).remove(child)
# We need to implement __getslice__ and friends, even though
# they're deprecated, because otherwise list.__getslice__ will get
# called (since we're subclassing from list). Just delegate to
# __getitem__ etc., but use max(0, start) and max(0, stop) because
# because negative indices are already handled *before*
# __getslice__ is called; and we don't want to double-count them.
if hasattr(list, '__getslice__'):
def __getslice__(self, start, stop):
return self.__getitem__(slice(max(0, start), max(0, stop)))
def __delslice__(self, start, stop):
return self.__delitem__(slice(max(0, start), max(0, stop)))
def __setslice__(self, start, stop, value):
return self.__setitem__(slice(max(0, start), max(0, stop)), value)
class ParentedTree(AbstractParentedTree):
"""
A ``Tree`` that automatically maintains parent pointers for
single-parented trees. The following are methods for querying
the structure of a parented tree: ``parent``, ``parent_index``,
``left_sibling``, ``right_sibling``, ``root``, ``treeposition``.
Each ``ParentedTree`` may have at most one parent. In
particular, subtrees may not be shared. Any attempt to reuse a
single ``ParentedTree`` as a child of more than one parent (or
as multiple children of the same parent) will cause a
``ValueError`` exception to be raised.
``ParentedTrees`` should never be used in the same tree as ``Trees``
or ``MultiParentedTrees``. Mixing tree implementations may result
in incorrect parent pointers and in ``TypeError`` exceptions.
"""
def __init__(self, node, children=None):
self._parent = None
"""The parent of this Tree, or None if it has no parent."""
super(ParentedTree, self).__init__(node, children)
if children is None:
# If children is None, the tree is read from node.
# After parsing, the parent of the immediate children
# will point to an intermediate tree, not self.
# We fix this by brute force:
for i, child in enumerate(self):
if isinstance(child, Tree):
child._parent = None
self._setparent(child, i)
def _frozen_class(self): return ImmutableParentedTree
#/////////////////////////////////////////////////////////////////
# Methods
#/////////////////////////////////////////////////////////////////
def parent(self):
"""The parent of this tree, or None if it has no parent."""
return self._parent
def parent_index(self):
"""
The index of this tree in its parent. I.e.,
``ptree.parent()[ptree.parent_index()] is ptree``. Note that
``ptree.parent_index()`` is not necessarily equal to
``ptree.parent.index(ptree)``, since the ``index()`` method
returns the first child that is equal to its argument.
"""
if self._parent is None: return None
for i, child in enumerate(self._parent):
if child is self: return i
assert False, 'expected to find self in self._parent!'
def left_sibling(self):
"""The left sibling of this tree, or None if it has none."""
parent_index = self.parent_index()
if self._parent and parent_index > 0:
return self._parent[parent_index-1]
return None # no left sibling
def right_sibling(self):
"""The right sibling of this tree, or None if it has none."""
parent_index = self.parent_index()
if self._parent and parent_index < (len(self._parent)-1):
return self._parent[parent_index+1]
return None # no right sibling
def root(self):
"""
The root of this tree. I.e., the unique ancestor of this tree
whose parent is None. If ``ptree.parent()`` is None, then
``ptree`` is its own root.
"""
root = self
while root.parent() is not None:
root = root.parent()
return root
def treeposition(self):
"""
The tree position of this tree, relative to the root of the
tree. I.e., ``ptree.root[ptree.treeposition] is ptree``.
"""
if self.parent() is None:
return ()
else:
return self.parent().treeposition() + (self.parent_index(),)
#/////////////////////////////////////////////////////////////////
# Parent Management
#/////////////////////////////////////////////////////////////////
def _delparent(self, child, index):
# Sanity checks
assert isinstance(child, ParentedTree)
assert self[index] is child
assert child._parent is self
# Delete child's parent pointer.
child._parent = None
def _setparent(self, child, index, dry_run=False):
# If the child's type is incorrect, then complain.
if not isinstance(child, ParentedTree):
raise TypeError('Can not insert a non-ParentedTree '+
'into a ParentedTree')
# If child already has a parent, then complain.
if child._parent is not None:
raise ValueError('Can not insert a subtree that already '
'has a parent.')
# Set child's parent pointer & index.
if not dry_run:
child._parent = self
class MultiParentedTree(AbstractParentedTree):
"""
A ``Tree`` that automatically maintains parent pointers for
multi-parented trees. The following are methods for querying the
structure of a multi-parented tree: ``parents()``, ``parent_indices()``,
``left_siblings()``, ``right_siblings()``, ``roots``, ``treepositions``.
Each ``MultiParentedTree`` may have zero or more parents. In
particular, subtrees may be shared. If a single
``MultiParentedTree`` is used as multiple children of the same
parent, then that parent will appear multiple times in its
``parents()`` method.
``MultiParentedTrees`` should never be used in the same tree as
``Trees`` or ``ParentedTrees``. Mixing tree implementations may
result in incorrect parent pointers and in ``TypeError`` exceptions.
"""
def __init__(self, node, children=None):
self._parents = []
"""A list of this tree's parents. This list should not
contain duplicates, even if a parent contains this tree
multiple times."""
super(MultiParentedTree, self).__init__(node, children)
if children is None:
# If children is None, the tree is read from node.
# After parsing, the parent(s) of the immediate children
# will point to an intermediate tree, not self.
# We fix this by brute force:
for i, child in enumerate(self):
if isinstance(child, Tree):
child._parents = []
self._setparent(child, i)
def _frozen_class(self): return ImmutableMultiParentedTree
#/////////////////////////////////////////////////////////////////
# Methods
#/////////////////////////////////////////////////////////////////
def parents(self):
"""
The set of parents of this tree. If this tree has no parents,
then ``parents`` is the empty set. To check if a tree is used
as multiple children of the same parent, use the
``parent_indices()`` method.
:type: list(MultiParentedTree)
"""
return list(self._parents)
def left_siblings(self):
"""
A list of all left siblings of this tree, in any of its parent
trees. A tree may be its own left sibling if it is used as
multiple contiguous children of the same parent. A tree may
appear multiple times in this list if it is the left sibling
of this tree with respect to multiple parents.
:type: list(MultiParentedTree)
"""
return [parent[index-1]
for (parent, index) in self._get_parent_indices()
if index > 0]
def right_siblings(self):
"""
A list of all right siblings of this tree, in any of its parent
trees. A tree may be its own right sibling if it is used as
multiple contiguous children of the same parent. A tree may
appear multiple times in this list if it is the right sibling
of this tree with respect to multiple parents.
:type: list(MultiParentedTree)
"""
return [parent[index+1]
for (parent, index) in self._get_parent_indices()
if index < (len(parent)-1)]
def _get_parent_indices(self):
return [(parent, index)
for parent in self._parents
for index, child in enumerate(parent)
if child is self]
def roots(self):
"""
The set of all roots of this tree. This set is formed by
tracing all possible parent paths until trees with no parents
are found.
:type: list(MultiParentedTree)
"""
return list(self._get_roots_helper({}).values())
def _get_roots_helper(self, result):
if self._parents:
for parent in self._parents:
parent._get_roots_helper(result)
else:
result[id(self)] = self
return result
def parent_indices(self, parent):
"""
Return a list of the indices where this tree occurs as a child
of ``parent``. If this child does not occur as a child of
``parent``, then the empty list is returned. The following is
always true::
for parent_index in ptree.parent_indices(parent):
parent[parent_index] is ptree
"""
if parent not in self._parents: return []
else: return [index for (index, child) in enumerate(parent)
if child is self]
def treepositions(self, root):
"""
Return a list of all tree positions that can be used to reach
this multi-parented tree starting from ``root``. I.e., the
following is always true::
for treepos in ptree.treepositions(root):
root[treepos] is ptree
"""
if self is root:
return [()]
else:
return [treepos+(index,)
for parent in self._parents
for treepos in parent.treepositions(root)
for (index, child) in enumerate(parent) if child is self]
#/////////////////////////////////////////////////////////////////
# Parent Management
#/////////////////////////////////////////////////////////////////
def _delparent(self, child, index):
# Sanity checks
assert isinstance(child, MultiParentedTree)
assert self[index] is child
assert len([p for p in child._parents if p is self]) == 1
# If the only copy of child in self is at index, then delete
# self from child's parent list.
for i, c in enumerate(self):
if c is child and i != index: break
else:
child._parents.remove(self)
def _setparent(self, child, index, dry_run=False):
# If the child's type is incorrect, then complain.
if not isinstance(child, MultiParentedTree):
raise TypeError('Can not insert a non-MultiParentedTree '+
'into a MultiParentedTree')
# Add self as a parent pointer if it's not already listed.
if not dry_run:
for parent in child._parents:
if parent is self: break
else:
child._parents.append(self)
class ImmutableParentedTree(ImmutableTree, ParentedTree):
pass
class ImmutableMultiParentedTree(ImmutableTree, MultiParentedTree):
pass
######################################################################
## Probabilistic trees
######################################################################
@python_2_unicode_compatible
class ProbabilisticTree(Tree, ProbabilisticMixIn):
def __init__(self, node, children=None, **prob_kwargs):
Tree.__init__(self, node, children)
ProbabilisticMixIn.__init__(self, **prob_kwargs)
# We have to patch up these methods to make them work right:
def _frozen_class(self): return ImmutableProbabilisticTree
def __repr__(self):
return '%s (p=%r)' % (Tree.unicode_repr(self), self.prob())
def __str__(self):
return '%s (p=%.6g)' % (self.pformat(margin=60), self.prob())
def copy(self, deep=False):
if not deep: return type(self)(self._label, self, prob=self.prob())
else: return type(self).convert(self)
@classmethod
def convert(cls, val):
if isinstance(val, Tree):
children = [cls.convert(child) for child in val]
if isinstance(val, ProbabilisticMixIn):
return cls(val._label, children, prob=val.prob())
else:
return cls(val._label, children, prob=1.0)
else:
return val
def __eq__(self, other):
return (self.__class__ is other.__class__ and
(self._label, list(self), self.prob()) ==
(other._label, list(other), other.prob()))
def __lt__(self, other):
if not isinstance(other, Tree):
raise_unorderable_types("<", self, other)
if self.__class__ is other.__class__:
return ((self._label, list(self), self.prob()) <
(other._label, list(other), other.prob()))
else:
return self.__class__.__name__ < other.__class__.__name__
@python_2_unicode_compatible
class ImmutableProbabilisticTree(ImmutableTree, ProbabilisticMixIn):
def __init__(self, node, children=None, **prob_kwargs):
ImmutableTree.__init__(self, node, children)
ProbabilisticMixIn.__init__(self, **prob_kwargs)
self._hash = hash((self._label, tuple(self), self.prob()))
# We have to patch up these methods to make them work right:
def _frozen_class(self): return ImmutableProbabilisticTree
def __repr__(self):
return '%s [%s]' % (Tree.unicode_repr(self), self.prob())
def __str__(self):
return '%s [%s]' % (self.pformat(margin=60), self.prob())
def copy(self, deep=False):
if not deep: return type(self)(self._label, self, prob=self.prob())
else: return type(self).convert(self)
@classmethod
def convert(cls, val):
if isinstance(val, Tree):
children = [cls.convert(child) for child in val]
if isinstance(val, ProbabilisticMixIn):
return cls(val._label, children, prob=val.prob())
else:
return cls(val._label, children, prob=1.0)
else:
return val
def _child_names(tree):
names = []
for child in tree:
if isinstance(child, Tree):
names.append(Nonterminal(child._label))
else:
names.append(child)
return names
######################################################################
## Parsing
######################################################################
def bracket_parse(s):
"""
Use Tree.read(s, remove_empty_top_bracketing=True) instead.
"""
raise NameError("Use Tree.read(s, remove_empty_top_bracketing=True) instead.")
def sinica_parse(s):
"""
Parse a Sinica Treebank string and return a tree. Trees are represented as nested brackettings,
as shown in the following example (X represents a Chinese character):
S(goal:NP(Head:Nep:XX)|theme:NP(Head:Nhaa:X)|quantity:Dab:X|Head:VL2:X)#0(PERIODCATEGORY)
:return: A tree corresponding to the string representation.
:rtype: Tree
:param s: The string to be converted
:type s: str
"""
tokens = re.split(r'([()| ])', s)
for i in range(len(tokens)):
if tokens[i] == '(':
tokens[i-1], tokens[i] = tokens[i], tokens[i-1] # pull nonterminal inside parens
elif ':' in tokens[i]:
fields = tokens[i].split(':')
if len(fields) == 2: # non-terminal
tokens[i] = fields[1]
else:
tokens[i] = "(" + fields[-2] + " " + fields[-1] + ")"
elif tokens[i] == '|':
tokens[i] = ''
treebank_string = " ".join(tokens)
return Tree.fromstring(treebank_string, remove_empty_top_bracketing=True)
# s = re.sub(r'^#[^\s]*\s', '', s) # remove leading identifier
# s = re.sub(r'\w+:', '', s) # remove role tags
# return s
######################################################################
## Demonstration
######################################################################
def demo():
"""
A demonstration showing how Trees and Trees can be
used. This demonstration creates a Tree, and loads a
Tree from the Treebank corpus,
and shows the results of calling several of their methods.
"""
from nltk import Tree, ProbabilisticTree
# Demonstrate tree parsing.
s = '(S (NP (DT the) (NN cat)) (VP (VBD ate) (NP (DT a) (NN cookie))))'
t = Tree.fromstring(s)
print("Convert bracketed string into tree:")
print(t)
print(t.__repr__())
print("Display tree properties:")
print(t.label()) # tree's constituent type
print(t[0]) # tree's first child
print(t[1]) # tree's second child
print(t.height())
print(t.leaves())
print(t[1])
print(t[1,1])
print(t[1,1,0])
# Demonstrate tree modification.
the_cat = t[0]
the_cat.insert(1, Tree.fromstring('(JJ big)'))
print("Tree modification:")
print(t)
t[1,1,1] = Tree.fromstring('(NN cake)')
print(t)
print()
# Tree transforms
print("Collapse unary:")
t.collapse_unary()
print(t)
print("Chomsky normal form:")
t.chomsky_normal_form()
print(t)
print()
# Demonstrate probabilistic trees.
pt = ProbabilisticTree('x', ['y', 'z'], prob=0.5)
print("Probabilistic Tree:")
print(pt)
print()
# Demonstrate parsing of treebank output format.
t = Tree.fromstring(t.pformat())
print("Convert tree to bracketed string and back again:")
print(t)
print()
# Demonstrate LaTeX output
print("LaTeX output:")
print(t.pformat_latex_qtree())
print()
# Demonstrate Productions
print("Production output:")
print(t.productions())
print()
# Demonstrate tree nodes containing objects other than strings
t.set_label(('test', 3))
print(t)
__all__ = ['ImmutableProbabilisticTree', 'ImmutableTree', 'ProbabilisticMixIn',
'ProbabilisticTree', 'Tree', 'bracket_parse',
'sinica_parse', 'ParentedTree', 'MultiParentedTree',
'ImmutableParentedTree', 'ImmutableMultiParentedTree']
| {
"content_hash": "b1025efa82b039926e779474ff2d70b9",
"timestamp": "",
"source": "github",
"line_count": 1592,
"max_line_length": 152,
"avg_line_length": 40.196608040201006,
"alnum_prop": 0.5447939618395762,
"repo_name": "neerajvashistha/pa-dude",
"id": "e697d6fef57442f752dcacfebdbaf6dca7abbdb7",
"size": "64375",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "lib/python2.7/site-packages/nltk/tree.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "359307"
},
{
"name": "C++",
"bytes": "5695"
},
{
"name": "CSS",
"bytes": "114504"
},
{
"name": "FORTRAN",
"bytes": "3707"
},
{
"name": "HTML",
"bytes": "216904"
},
{
"name": "JavaScript",
"bytes": "1323680"
},
{
"name": "Makefile",
"bytes": "2299"
},
{
"name": "Python",
"bytes": "31341230"
},
{
"name": "Self",
"bytes": "40307"
},
{
"name": "Shell",
"bytes": "5427"
},
{
"name": "TeX",
"bytes": "96790"
},
{
"name": "XSLT",
"bytes": "152770"
}
],
"symlink_target": ""
} |
def get_first_object_or_none(queryset):
"""
A shortcut to obtain the first object of a queryset if it exists or None
otherwise.
"""
try:
return queryset[:1][0]
except IndexError:
return None
| {
"content_hash": "5a11793a265f99a1f09aad0c335c729e",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 76,
"avg_line_length": 25.666666666666668,
"alnum_prop": 0.6190476190476191,
"repo_name": "Chilledheart/seahub",
"id": "942f5f91ba97f2b83f8f801577ceedf729a6c747",
"size": "272",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "seahub/shortcuts.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "319935"
},
{
"name": "HTML",
"bytes": "816154"
},
{
"name": "Java",
"bytes": "2137623"
},
{
"name": "JavaScript",
"bytes": "2884153"
},
{
"name": "Makefile",
"bytes": "1004"
},
{
"name": "PLSQL",
"bytes": "17176"
},
{
"name": "Python",
"bytes": "1625951"
},
{
"name": "Shell",
"bytes": "9695"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Category.content'
db.add_column('core_category', 'content', self.gf('django.db.models.fields.TextField')(default='', blank=True), keep_default=False)
# Adding field 'Category.template'
db.add_column('core_category', 'template', self.gf('django.db.models.fields.CharField')(default='category.html', max_length=100), keep_default=False)
def backwards(self, orm):
# Deleting field 'Category.content'
db.delete_column('core_category', 'content')
# Deleting field 'Category.template'
db.delete_column('core_category', 'template')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'core.author': {
'Meta': {'object_name': 'Author'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'text': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'core.category': {
'Meta': {'unique_together': "(('site', 'tree_path'),)", 'object_name': 'Category'},
'app_data': ('app_data.fields.AppDataField', [], {'default': "'{}'", 'blank': 'True'}),
'content': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'db_index': 'True'}),
'template': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'tree_parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Category']", 'null': 'True', 'blank': 'True'}),
'tree_path': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'core.dependency': {
'Meta': {'object_name': 'Dependency'},
'dependent_ct': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'depends_on_set'", 'to': "orm['contenttypes.ContentType']"}),
'dependent_id': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'target_ct': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'dependency_for_set'", 'to': "orm['contenttypes.ContentType']"}),
'target_id': ('django.db.models.fields.IntegerField', [], {})
},
'core.listing': {
'Meta': {'object_name': 'Listing'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Category']"}),
'commercial': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'publish_from': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'publish_to': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'publishable': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Publishable']"})
},
'core.publishable': {
'Meta': {'object_name': 'Publishable'},
'app_data': ('app_data.fields.AppDataField', [], {'default': "'{}'", 'blank': 'True'}),
'authors': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['core.Author']", 'symmetrical': 'False'}),
'category': ('ella.core.cache.fields.CachedForeignKey', [], {'to': "orm['core.Category']"}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'photo': ('ella.core.cache.fields.CachedForeignKey', [], {'to': "orm['photos.Photo']", 'null': 'True', 'blank': 'True'}),
'publish_from': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(3000, 1, 1, 0, 0, 0, 2)', 'db_index': 'True'}),
'publish_to': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'db_index': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Source']", 'null': 'True', 'blank': 'True'}),
'static': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'core.related': {
'Meta': {'object_name': 'Related'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'publishable': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Publishable']"}),
'related_ct': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'related_id': ('django.db.models.fields.IntegerField', [], {})
},
'core.source': {
'Meta': {'object_name': 'Source'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'photos.photo': {
'Meta': {'object_name': 'Photo'},
'app_data': ('app_data.fields.AppDataField', [], {'default': "'{}'", 'blank': 'True'}),
'authors': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'photo_set'", 'symmetrical': 'False', 'to': "orm['core.Author']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'height': ('django.db.models.fields.PositiveIntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'important_bottom': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'important_left': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'important_right': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'important_top': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'db_index': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Source']", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'width': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['core']
| {
"content_hash": "3050a3748c44fade1ba2d1220df5f218",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 182,
"avg_line_length": 74.0375,
"alnum_prop": 0.5490460915076819,
"repo_name": "petrlosa/ella",
"id": "441aeceb39766f3b6cb49c7b29c53314473b0c84",
"size": "11864",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "ella/core/south_migrations/0005_auto__add_field_category_content__add_field_category_template.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "10506"
},
{
"name": "Python",
"bytes": "688162"
}
],
"symlink_target": ""
} |
"""A command line parsing module that lets modules define their own options.
Each module defines its own options, e.g.,
from tornado.options import define, options
define("mysql_host", default="127.0.0.1:3306", help="Main user DB")
define("memcache_hosts", default="127.0.0.1:11011", multiple=True,
help="Main user memcache servers")
def connect():
db = database.Connection(options.mysql_host)
...
The main() method of your application does not need to be aware of all of
the options used throughout your program; they are all automatically loaded
when the modules are loaded. Your main() method can parse the command line
or parse a config file with:
import tornado.options
tornado.options.parse_config_file("/etc/server.conf")
tornado.options.parse_command_line()
Command line formats are what you would expect ("--myoption=myvalue").
Config files are just Python files. Global names become options, e.g.,
myoption = "myvalue"
myotheroption = "myothervalue"
We support datetimes, timedeltas, ints, and floats (just pass a 'type'
kwarg to define). We also accept multi-value options. See the documentation
for define() below.
"""
import datetime
import logging
import re
import sys
import time
# For pretty log messages, if available
try:
import curses
except:
curses = None
def define(name, default=None, type=str, help=None, metavar=None,
multiple=False):
"""Defines a new command line option.
If type is given (one of str, float, int, datetime, or timedelta),
we parse the command line arguments based on the given type. If
multiple is True, we accept comma-separated values, and the option
value is always a list.
For multi-value integers, we also accept the syntax x:y, which
turns into range(x, y) - very useful for long integer ranges.
help and metavar are used to construct the automatically generated
command line help string. The help message is formatted like:
--name=METAVAR help string
Command line option names must be unique globally. They can be parsed
from the command line with parse_command_line() or parsed from a
config file with parse_config_file.
"""
if name in options:
raise Error("Option %r already defined in %s", name,
options[name].file_name)
frame = sys._getframe(0)
options_file = frame.f_code.co_filename
file_name = frame.f_back.f_code.co_filename
if file_name == options_file: file_name = ""
options[name] = _Option(name, file_name=file_name, default=default,
type=type, help=help, metavar=metavar,
multiple=multiple)
def parse_command_line(args=None):
"""Parses all options given on the command line.
We return all command line arguments that are not options as a list.
"""
if args is None: args = sys.argv
for i in xrange(1, len(args)):
# All things after the last option are command line arguments
if not args[i].startswith("-"):
return args[i:]
if args[i] == "--":
continue
arg = args[i].lstrip("-")
name, equals, value = arg.partition("=")
name = name.replace('-', '_')
if not name in options:
print_help()
raise Error('Unrecognized command line option: %r' % name)
option = options[name]
if not equals:
if option.type == bool:
value = "true"
else:
raise Error('Option %r requires a value' % name)
option.parse(value)
if options.help:
print_help()
sys.exit(0)
# Set up log level and pretty console logging by default
logging.getLogger().setLevel(getattr(logging, options.logging.upper()))
enable_pretty_logging()
return []
def parse_config_file(path, overwrite=True):
"""Parses and loads the Python config file at the given path."""
config = {}
execfile(path, config, config)
for name in config:
if name in options:
options[name].set(config[name])
def print_help(file=sys.stdout):
"""Prints all the command line options to stdout."""
print >> file, "Usage: %s [OPTIONS]" % sys.argv[0]
print >> file, ""
print >> file, "Options:"
by_file = {}
for option in options.itervalues():
by_file.setdefault(option.file_name, []).append(option)
for filename, o in sorted(by_file.items()):
if filename: print >> file, filename
o.sort(key=lambda option: option.name)
for option in o:
prefix = option.name
if option.metavar:
prefix += "=" + option.metavar
print >> file, " --%-30s %s" % (prefix, option.help or "")
print >> file
class _Options(dict):
"""Our global program options, an dictionary with object-like access."""
@classmethod
def instance(cls):
if not hasattr(cls, "_instance"):
cls._instance = cls()
return cls._instance
def __getattr__(self, name):
if isinstance(self.get(name), _Option):
return self[name].value()
raise Error("Unrecognized option %r" % name)
class _Option(object):
def __init__(self, name, default=None, type=str, help=None, metavar=None,
multiple=False, file_name=None):
if default is None and multiple:
default = []
self.name = name
self.type = type
self.help = help
self.metavar = metavar
self.multiple = multiple
self.file_name = file_name
self.default = default
self._value = None
def value(self):
return self.default if self._value is None else self._value
def parse(self, value):
_parse = {
datetime.datetime: self._parse_datetime,
datetime.timedelta: self._parse_timedelta,
bool: self._parse_bool,
str: self._parse_string,
}.get(self.type, self.type)
if self.multiple:
if self._value is None:
self._value = []
for part in value.split(","):
if self.type in (int, long):
# allow ranges of the form X:Y (inclusive at both ends)
lo, _, hi = part.partition(":")
lo = _parse(lo)
hi = _parse(hi) if hi else lo
self._value.extend(range(lo, hi+1))
else:
self._value.append(_parse(part))
else:
self._value = _parse(value)
return self.value()
def set(self, value):
if self.multiple:
if not isinstance(value, list):
raise Error("Option %r is required to be a list of %s" %
(self.name, self.type.__name__))
for item in value:
if item != None and not isinstance(item, self.type):
raise Error("Option %r is required to be a list of %s" %
(self.name, self.type.__name__))
else:
if value != None and not isinstance(value, self.type):
raise Error("Option %r is required to be a %s" %
(self.name, self.type.__name__))
self._value = value
# Supported date/time formats in our options
_DATETIME_FORMATS = [
"%a %b %d %H:%M:%S %Y",
"%Y-%m-%d %H:%M:%S",
"%Y-%m-%d %H:%M",
"%Y-%m-%dT%H:%M",
"%Y%m%d %H:%M:%S",
"%Y%m%d %H:%M",
"%Y-%m-%d",
"%Y%m%d",
"%H:%M:%S",
"%H:%M",
]
def _parse_datetime(self, value):
for format in self._DATETIME_FORMATS:
try:
return datetime.datetime.strptime(value, format)
except ValueError:
pass
raise Error('Unrecognized date/time format: %r' % value)
_TIMEDELTA_ABBREVS = [
('hours', ['h']),
('minutes', ['m', 'min']),
('seconds', ['s', 'sec']),
('milliseconds', ['ms']),
('microseconds', ['us']),
('days', ['d']),
('weeks', ['w']),
]
_TIMEDELTA_ABBREV_DICT = dict(
(abbrev, full) for full, abbrevs in _TIMEDELTA_ABBREVS
for abbrev in abbrevs)
_FLOAT_PATTERN = r'[-+]?(?:\d+(?:\.\d*)?|\.\d+)(?:[eE][-+]?\d+)?'
_TIMEDELTA_PATTERN = re.compile(
r'\s*(%s)\s*(\w*)\s*' % _FLOAT_PATTERN, re.IGNORECASE)
def _parse_timedelta(self, value):
try:
sum = datetime.timedelta()
start = 0
while start < len(value):
m = self._TIMEDELTA_PATTERN.match(value, start)
if not m:
raise Exception()
num = float(m.group(1))
units = m.group(2) or 'seconds'
units = self._TIMEDELTA_ABBREV_DICT.get(units, units)
sum += datetime.timedelta(**{units: num})
start = m.end()
return sum
except:
raise
def _parse_bool(self, value):
return value.lower() not in ("false", "0", "f")
def _parse_string(self, value):
return value.decode("utf-8")
class Error(Exception):
pass
def enable_pretty_logging():
"""Turns on colored logging output for stderr if we are in a tty."""
if not curses: return
try:
if not sys.stderr.isatty(): return
curses.setupterm()
except:
return
channel = logging.StreamHandler()
channel.setFormatter(_ColorLogFormatter())
logging.getLogger().addHandler(channel)
class _ColorLogFormatter(logging.Formatter):
def __init__(self, *args, **kwargs):
logging.Formatter.__init__(self, *args, **kwargs)
fg_color = curses.tigetstr("setaf") or curses.tigetstr("setf") or ""
self._colors = {
logging.DEBUG: curses.tparm(fg_color, 4), # Blue
logging.INFO: curses.tparm(fg_color, 2), # Green
logging.WARNING: curses.tparm(fg_color, 3), # Yellow
logging.ERROR: curses.tparm(fg_color, 1), # Red
}
self._normal = curses.tigetstr("sgr0")
def format(self, record):
try:
record.message = record.getMessage()
except Exception, e:
record.message = "Bad message (%r): %r" % (e, record.__dict__)
record.asctime = time.strftime(
"%y%m%d %H:%M:%S", self.converter(record.created))
prefix = '[%(levelname)1.1s %(asctime)s %(module)s:%(lineno)d]' % \
record.__dict__
color = self._colors.get(record.levelno, self._normal)
formatted = color + prefix + self._normal + " " + record.message
if record.exc_info:
if not record.exc_text:
record.exc_text = self.formatException(record.exc_info)
if record.exc_text:
formatted = formatted.rstrip() + "\n" + record.exc_text
return formatted.replace("\n", "\n ")
options = _Options.instance()
# Default options
define("help", type=bool, help="show this help information")
define("logging", default="info", help="set the Python log level",
metavar="info|warning|error")
| {
"content_hash": "61de6e4ce5bc3318b61c61adf5545958",
"timestamp": "",
"source": "github",
"line_count": 334,
"max_line_length": 77,
"avg_line_length": 33.76047904191617,
"alnum_prop": 0.5693508336289465,
"repo_name": "aljoscha/shot-o-matic",
"id": "1f676ff01479373122341ae1b5ba66f15663a8b5",
"size": "11873",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "vendor/tornado-0.2/demos/auth/tornado/options.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "12756"
},
{
"name": "Shell",
"bytes": "1866"
}
],
"symlink_target": ""
} |
import datetime
import hashlib
import itertools
import logging
import random
import signal
import sys
import time
from lockfile import LockTimeout
from hlsclient import helpers
from hlsclient.balancer import Balancer
from hlsclient.combine import get_actions
from hlsclient.consumer import consume_from_balancer
from hlsclient.discover import discover_playlists, get_servers
from hlsclient.lock import ExpiringLinkLockFile
MAX_TTL_IN_SECONDS = 600
class PlaylistWorker(object):
def __init__(self, playlist, is_variant=False):
self.playlist = playlist
self.is_variant = is_variant
self.config = helpers.load_config()
self.setup_lock()
def setup(self):
helpers.setup_logging(self.config, "worker for {}".format(self.playlist))
logging.debug('HLS CLIENT Started for {}'.format(self.playlist))
self.destination = self.config.get('hlsclient', 'destination')
self.encrypt = self.config.getboolean('hlsclient', 'encrypt')
not_modified_tolerance = self.config.getint('hlsclient', 'not_modified_tolerance')
self.balancer = Balancer(not_modified_tolerance)
ttl = datetime.timedelta(seconds=random.randint(1, MAX_TTL_IN_SECONDS))
self.death_time = datetime.datetime.now() + ttl
def run_forever(self):
self.setup()
signal.signal(signal.SIGTERM, self.interrupted)
while self.should_run():
try:
self.run_if_locking()
time.sleep(0.1)
except LockTimeout:
logging.debug("Unable to acquire lock")
except KeyboardInterrupt:
logging.debug('Quitting...')
break
except Exception:
logging.exception('An unknown error happened')
self.stop()
def run(self):
playlists = discover_playlists(self.config)
worker_playlists = self.filter_playlists_for_worker(playlists)
if not worker_playlists['streams']:
logging.warning("Playlist is not available anymore")
self.stop()
paths = get_servers(worker_playlists)
self.balancer.update(paths)
consume_from_balancer(self.balancer,
worker_playlists,
self.destination,
self.encrypt)
def filter_playlists_for_worker(self, playlists):
if self.is_variant:
combine_actions = get_actions(playlists, "combine")
my_combine_actions = [action for action in combine_actions if action['output'] == self.playlist]
my_inputs = [action['input'] for action in my_combine_actions]
streams = itertools.chain(*my_inputs)
streams = [s for s in streams if s in playlists['streams']] # transcoded playlists are ignored
elif self.playlist in playlists['streams']:
streams = [self.playlist]
else:
streams = []
result = playlists.copy()
result["streams"] = {stream: playlists['streams'][stream] for stream in streams}
return result
def should_run(self):
should_live = datetime.datetime.now() < self.death_time
if not should_live:
logging.info("Worker {} should die now!".format(self.worker_id()))
return should_live
def interrupted(self, *args):
logging.info('Interrupted. Releasing lock.')
self.stop()
def setup_lock(self):
lock_path = self.lock_path()
self.lock_timeout = self.config.getint('lock', 'timeout')
self.lock_expiration = self.config.getint('lock', 'expiration')
self.lock = ExpiringLinkLockFile(lock_path)
def lock_path(self):
return '{0}.{1}'.format(self.config.get('lock', 'path'), self.worker_id())
def worker_id(self):
return hashlib.md5(self.playlist).hexdigest()
def run_if_locking(self):
if self.other_is_running():
logging.warning("Someone else acquired the lock")
self.stop()
return
if not self.lock.is_locked():
self.lock.acquire(timeout=self.lock_timeout)
if self.lock.i_am_locking():
self.lock.update_lock()
self.run()
def other_is_running(self):
other = self.lock.is_locked() and not self.lock.i_am_locking()
if other and self.lock.expired(tolerance=self.lock_expiration):
logging.warning("Lock expired. Breaking it")
self.lock.break_lock()
return False
return other
def stop(self):
try:
self.lock.release_if_locking()
finally:
sys.exit(0)
| {
"content_hash": "fa64f05209d3f65c733dcfaf92684e28",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 108,
"avg_line_length": 35.778625954198475,
"alnum_prop": 0.6168124599957329,
"repo_name": "globocom/hlsclient",
"id": "9b73279fbf7d46267e6bd8291f0a30acd1feb7af",
"size": "4687",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hlsclient/workers/playlist.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "77815"
},
{
"name": "Shell",
"bytes": "475"
},
{
"name": "TypeScript",
"bytes": "1123112"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.util import nest
# pylint: disable=protected-access
_state_size_with_prefix = rnn_cell_impl._state_size_with_prefix
# pylint: enable=protected-access
def _infer_state_dtype(explicit_dtype, state):
"""Infer the dtype of an RNN state.
Args:
explicit_dtype: explicitly declared dtype or None.
state: RNN's hidden state. Must be a Tensor or a nested iterable containing
Tensors.
Returns:
dtype: inferred dtype of hidden state.
Raises:
ValueError: if `state` has heterogeneous dtypes or is empty.
"""
if explicit_dtype is not None:
return explicit_dtype
elif nest.is_sequence(state):
inferred_dtypes = [element.dtype for element in nest.flatten(state)]
if not inferred_dtypes:
raise ValueError("Unable to infer dtype from empty state.")
all_same = all([x == inferred_dtypes[0] for x in inferred_dtypes])
if not all_same:
raise ValueError(
"State has tensors of different inferred_dtypes. Unable to infer a "
"single representative dtype.")
return inferred_dtypes[0]
else:
return state.dtype
def _on_device(fn, device):
"""Build the subgraph defined by lambda `fn` on `device` if it's not None."""
if device:
with ops.device(device):
return fn()
else:
return fn()
# pylint: disable=unused-argument
def _rnn_step(
time, sequence_length, min_sequence_length, max_sequence_length,
zero_output, state, call_cell, state_size, skip_conditionals=False):
"""Calculate one step of a dynamic RNN minibatch.
Returns an (output, state) pair conditioned on the sequence_lengths.
When skip_conditionals=False, the pseudocode is something like:
if t >= max_sequence_length:
return (zero_output, state)
if t < min_sequence_length:
return call_cell()
# Selectively output zeros or output, old state or new state depending
# on if we've finished calculating each row.
new_output, new_state = call_cell()
final_output = np.vstack([
zero_output if time >= sequence_lengths[r] else new_output_r
for r, new_output_r in enumerate(new_output)
])
final_state = np.vstack([
state[r] if time >= sequence_lengths[r] else new_state_r
for r, new_state_r in enumerate(new_state)
])
return (final_output, final_state)
Args:
time: Python int, the current time step
sequence_length: int32 `Tensor` vector of size [batch_size]
min_sequence_length: int32 `Tensor` scalar, min of sequence_length
max_sequence_length: int32 `Tensor` scalar, max of sequence_length
zero_output: `Tensor` vector of shape [output_size]
state: Either a single `Tensor` matrix of shape `[batch_size, state_size]`,
or a list/tuple of such tensors.
call_cell: lambda returning tuple of (new_output, new_state) where
new_output is a `Tensor` matrix of shape `[batch_size, output_size]`.
new_state is a `Tensor` matrix of shape `[batch_size, state_size]`.
state_size: The `cell.state_size` associated with the state.
skip_conditionals: Python bool, whether to skip using the conditional
calculations. This is useful for `dynamic_rnn`, where the input tensor
matches `max_sequence_length`, and using conditionals just slows
everything down.
Returns:
A tuple of (`final_output`, `final_state`) as given by the pseudocode above:
final_output is a `Tensor` matrix of shape [batch_size, output_size]
final_state is either a single `Tensor` matrix, or a tuple of such
matrices (matching length and shapes of input `state`).
Raises:
ValueError: If the cell returns a state tuple whose length does not match
that returned by `state_size`.
"""
# Convert state to a list for ease of use
flat_state = nest.flatten(state)
flat_zero_output = nest.flatten(zero_output)
def _copy_one_through(output, new_output):
copy_cond = (time >= sequence_length)
return _on_device(
lambda: array_ops.where(copy_cond, output, new_output),
device=new_output.op.device)
def _copy_some_through(flat_new_output, flat_new_state):
# Use broadcasting select to determine which values should get
# the previous state & zero output, and which values should get
# a calculated state & output.
flat_new_output = [
_copy_one_through(zero_output, new_output)
for zero_output, new_output in zip(flat_zero_output, flat_new_output)]
flat_new_state = [
_copy_one_through(state, new_state)
for state, new_state in zip(flat_state, flat_new_state)]
return flat_new_output + flat_new_state
def _maybe_copy_some_through():
"""Run RNN step. Pass through either no or some past state."""
new_output, new_state = call_cell()
nest.assert_same_structure(state, new_state)
flat_new_state = nest.flatten(new_state)
flat_new_output = nest.flatten(new_output)
return control_flow_ops.cond(
# if t < min_seq_len: calculate and return everything
time < min_sequence_length, lambda: flat_new_output + flat_new_state,
# else copy some of it through
lambda: _copy_some_through(flat_new_output, flat_new_state))
# TODO(ebrevdo): skipping these conditionals may cause a slowdown,
# but benefits from removing cond() and its gradient. We should
# profile with and without this switch here.
if skip_conditionals:
# Instead of using conditionals, perform the selective copy at all time
# steps. This is faster when max_seq_len is equal to the number of unrolls
# (which is typical for dynamic_rnn).
new_output, new_state = call_cell()
nest.assert_same_structure(state, new_state)
new_state = nest.flatten(new_state)
new_output = nest.flatten(new_output)
final_output_and_state = _copy_some_through(new_output, new_state)
else:
empty_update = lambda: flat_zero_output + flat_state
final_output_and_state = control_flow_ops.cond(
# if t >= max_seq_len: copy all state through, output zeros
time >= max_sequence_length, empty_update,
# otherwise calculation is required: copy some or all of it through
_maybe_copy_some_through)
if len(final_output_and_state) != len(flat_zero_output) + len(flat_state):
raise ValueError("Internal error: state and output were not concatenated "
"correctly.")
final_output = final_output_and_state[:len(flat_zero_output)]
final_state = final_output_and_state[len(flat_zero_output):]
for output, flat_output in zip(final_output, flat_zero_output):
output.set_shape(flat_output.get_shape())
for substate, flat_substate in zip(final_state, flat_state):
substate.set_shape(flat_substate.get_shape())
final_output = nest.pack_sequence_as(
structure=zero_output, flat_sequence=final_output)
final_state = nest.pack_sequence_as(
structure=state, flat_sequence=final_state)
return final_output, final_state
def dynamic_rnn(cell, inputs, inerval, sequence_length=None, initial_state=None,
dtype=None, parallel_iterations=None, swap_memory=False,
time_major=False, scope=None):
"""Creates a recurrent neural network specified by RNNCell `cell`.
This function is functionally identical to the function `rnn` above, but
performs fully dynamic unrolling of `inputs`.
Unlike `rnn`, the input `inputs` is not a Python list of `Tensors`, one for
each frame. Instead, `inputs` may be a single `Tensor` where
the maximum time is either the first or second dimension (see the parameter
`time_major`). Alternatively, it may be a (possibly nested) tuple of
Tensors, each of them having matching batch and time dimensions.
The corresponding output is either a single `Tensor` having the same number
of time steps and batch size, or a (possibly nested) tuple of such tensors,
matching the nested structure of `cell.output_size`.
The parameter `sequence_length` is optional and is used to copy-through state
and zero-out outputs when past a batch element's sequence length. So it's more
for correctness than performance, unlike in rnn().
Args:
cell: An instance of RNNCell.
inputs: The RNN inputs.
If `time_major == False` (default), this must be a `Tensor` of shape:
`[batch_size, max_time, ...]`, or a nested tuple of such
elements.
If `time_major == True`, this must be a `Tensor` of shape:
`[max_time, batch_size, ...]`, or a nested tuple of such
elements.
This may also be a (possibly nested) tuple of Tensors satisfying
this property. The first two dimensions must match across all the inputs,
but otherwise the ranks and other shape components may differ.
In this case, input to `cell` at each time-step will replicate the
structure of these tuples, except for the time dimension (from which the
time is taken).
The input to `cell` at each time step will be a `Tensor` or (possibly
nested) tuple of Tensors each with dimensions `[batch_size, ...]`.
sequence_length: (optional) An int32/int64 vector sized `[batch_size]`.
initial_state: (optional) An initial state for the RNN.
If `cell.state_size` is an integer, this must be
a `Tensor` of appropriate type and shape `[batch_size, cell.state_size]`.
If `cell.state_size` is a tuple, this should be a tuple of
tensors having shapes `[batch_size, s] for s in cell.state_size`.
dtype: (optional) The data type for the initial state and expected output.
Required if initial_state is not provided or RNN state has a heterogeneous
dtype.
parallel_iterations: (Default: 32). The number of iterations to run in
parallel. Those operations which do not have any temporal dependency
and can be run in parallel, will be. This parameter trades off
time for space. Values >> 1 use more memory but take less time,
while smaller values use less memory but computations take longer.
swap_memory: Transparently swap the tensors produced in forward inference
but needed for back prop from GPU to CPU. This allows training RNNs
which would typically not fit on a single GPU, with very minimal (or no)
performance penalty.
time_major: The shape format of the `inputs` and `outputs` Tensors.
If true, these `Tensors` must be shaped `[max_time, batch_size, depth]`.
If false, these `Tensors` must be shaped `[batch_size, max_time, depth]`.
Using `time_major = True` is a bit more efficient because it avoids
transposes at the beginning and end of the RNN calculation. However,
most TensorFlow data is batch-major, so by default this function
accepts input and emits output in batch-major form.
scope: VariableScope for the created subgraph; defaults to "rnn".
Returns:
A pair (outputs, state) where:
outputs: The RNN output `Tensor`.
If time_major == False (default), this will be a `Tensor` shaped:
`[batch_size, max_time, cell.output_size]`.
If time_major == True, this will be a `Tensor` shaped:
`[max_time, batch_size, cell.output_size]`.
Note, if `cell.output_size` is a (possibly nested) tuple of integers
or `TensorShape` objects, then `outputs` will be a tuple having the
same structure as `cell.output_size`, containing Tensors having shapes
corresponding to the shape data in `cell.output_size`.
state: The final state. If `cell.state_size` is an int, this
will be shaped `[batch_size, cell.state_size]`. If it is a
`TensorShape`, this will be shaped `[batch_size] + cell.state_size`.
If it is a (possibly nested) tuple of ints or `TensorShape`, this will
be a tuple having the corresponding shapes.
Raises:
TypeError: If `cell` is not an instance of RNNCell.
ValueError: If inputs is None or an empty list.
"""
# pylint: disable=protected-access
if not isinstance(cell, rnn_cell_impl._RNNCell):
raise TypeError("cell must be an instance of RNNCell")
# pylint: enable=protected-access
# By default, time_major==False and inputs are batch-major: shaped
# [batch, time, depth]
# For internal calculations, we transpose to [time, batch, depth]
flat_input = nest.flatten(inputs)
if not time_major:
# (B,T,D) => (T,B,D)
flat_input = tuple(array_ops.transpose(input_, [1, 0, 2])
for input_ in flat_input)
parallel_iterations = parallel_iterations or 32
if sequence_length is not None:
sequence_length = math_ops.to_int32(sequence_length)
if sequence_length.get_shape().ndims not in (None, 1):
raise ValueError(
"sequence_length must be a vector of length batch_size, "
"but saw shape: %s" % sequence_length.get_shape())
sequence_length = array_ops.identity( # Just to find it in the graph.
sequence_length, name="sequence_length")
# Create a new scope in which the caching device is either
# determined by the parent scope, or is set to place the cached
# Variable using the same placement as for the rest of the RNN.
with vs.variable_scope(scope or "rnn") as varscope:
if varscope.caching_device is None:
varscope.set_caching_device(lambda op: op.device)
input_shape = tuple(array_ops.shape(input_) for input_ in flat_input)
batch_size = input_shape[0][1]
for input_ in input_shape:
if input_[1].get_shape() != batch_size.get_shape():
raise ValueError("All inputs should have the same batch size")
if initial_state is not None:
state = initial_state
else:
if not dtype:
raise ValueError("If no initial_state is provided, dtype must be.")
state = cell.zero_state(batch_size, dtype)
def _assert_has_shape(x, shape):
x_shape = array_ops.shape(x)
packed_shape = array_ops.stack(shape)
return control_flow_ops.Assert(
math_ops.reduce_all(math_ops.equal(x_shape, packed_shape)),
["Expected shape for Tensor %s is " % x.name,
packed_shape, " but saw shape: ", x_shape])
if sequence_length is not None:
# Perform some shape validation
with ops.control_dependencies(
[_assert_has_shape(sequence_length, [batch_size])]):
sequence_length = array_ops.identity(
sequence_length, name="CheckSeqLen")
inputs = nest.pack_sequence_as(structure=inputs, flat_sequence=flat_input)
(outputs, final_state) = _dynamic_rnn_loop(
cell,
inputs,
state,
inerval,
parallel_iterations=parallel_iterations,
swap_memory=swap_memory,
sequence_length=sequence_length,
dtype=dtype)
# Outputs of _dynamic_rnn_loop are always shaped [time, batch, depth].
# If we are performing batch-major calculations, transpose output back
# to shape [batch, time, depth]
if not time_major:
# (T,B,D) => (B,T,D)
flat_output = nest.flatten(outputs)
flat_output = [array_ops.transpose(output, [1, 0, 2])
for output in flat_output]
outputs = nest.pack_sequence_as(
structure=outputs, flat_sequence=flat_output)
return (outputs, final_state)
def _dynamic_rnn_loop(cell,
inputs,
initial_state,
interval,
parallel_iterations,
swap_memory,
sequence_length=None,
dtype=None):
"""Internal implementation of Dynamic RNN.
Args:
cell: An instance of RNNCell.
inputs: A `Tensor` of shape [time, batch_size, input_size], or a nested
tuple of such elements.
initial_state: A `Tensor` of shape `[batch_size, state_size]`, or if
`cell.state_size` is a tuple, then this should be a tuple of
tensors having shapes `[batch_size, s] for s in cell.state_size`.
parallel_iterations: Positive Python int.
swap_memory: A Python boolean
sequence_length: (optional) An `int32` `Tensor` of shape [batch_size].
dtype: (optional) Expected dtype of output. If not specified, inferred from
initial_state.
Returns:
Tuple `(final_outputs, final_state)`.
final_outputs:
A `Tensor` of shape `[time, batch_size, cell.output_size]`. If
`cell.output_size` is a (possibly nested) tuple of ints or `TensorShape`
objects, then this returns a (possibly nsted) tuple of Tensors matching
the corresponding shapes.
final_state:
A `Tensor`, or possibly nested tuple of Tensors, matching in length
and shapes to `initial_state`.
Raises:
ValueError: If the input depth cannot be inferred via shape inference
from the inputs.
"""
pre_state = initial_state
state = initial_state
assert isinstance(parallel_iterations, int), "parallel_iterations must be int"
state_size = cell.state_size
flat_input = nest.flatten(inputs)
flat_output_size = nest.flatten(cell.output_size)
# Construct an initial output
input_shape = array_ops.shape(flat_input[0])
time_steps = input_shape[0]
batch_size = input_shape[1]
inputs_got_shape = tuple(input_.get_shape().with_rank_at_least(3)
for input_ in flat_input)
const_time_steps, const_batch_size = inputs_got_shape[0].as_list()[:2]
for shape in inputs_got_shape:
if not shape[2:].is_fully_defined():
raise ValueError(
"Input size (depth of inputs) must be accessible via shape inference,"
" but saw value None.")
got_time_steps = shape[0].value
got_batch_size = shape[1].value
if const_time_steps != got_time_steps:
raise ValueError(
"Time steps is not the same for all the elements in the input in a "
"batch.")
if const_batch_size != got_batch_size:
raise ValueError(
"Batch_size is not the same for all the elements in the input.")
# Prepare dynamic conditional copying of state & output
def _create_zero_arrays(size):
size = _state_size_with_prefix(size, prefix=[batch_size])
return array_ops.zeros(
array_ops.stack(size), _infer_state_dtype(dtype, state))
flat_zero_output = tuple(_create_zero_arrays(output)
for output in flat_output_size)
zero_output = nest.pack_sequence_as(structure=cell.output_size,
flat_sequence=flat_zero_output)
if sequence_length is not None:
min_sequence_length = math_ops.reduce_min(sequence_length)
max_sequence_length = math_ops.reduce_max(sequence_length)
time = array_ops.constant(0, dtype=dtypes.int32, name="time")
with ops.name_scope("dynamic_rnn") as scope:
base_name = scope
def _create_ta(name, dtype):
return tensor_array_ops.TensorArray(dtype=dtype,
size=time_steps,
tensor_array_name=base_name + name)
output_ta = tuple(_create_ta("output_%d" % i,
_infer_state_dtype(dtype, state))
for i in range(len(flat_output_size)))
input_ta = tuple(_create_ta("input_%d" % i, flat_input[0].dtype)
for i in range(len(flat_input)))
input_ta = tuple(ta.unstack(input_)
for ta, input_ in zip(input_ta, flat_input))
def _time_step(time, output_ta_t, state, pre_state):
"""Take a time step of the dynamic RNN.
Args:
time: int32 scalar Tensor.
output_ta_t: List of `TensorArray`s that represent the output.
state: nested tuple of vector tensors that represent the state.
Returns:
The tuple (time + 1, output_ta_t with updated flow, new_state).
"""
input_t = tuple(ta.read(time) for ta in input_ta)
# Restore some shape information
for input_, shape in zip(input_t, inputs_got_shape):
input_.set_shape(shape[1:])
new_pre_state = []
input_state = []
for cstate, pstate in zip(list(pre_state),list(state)):
c1 = tf.cond(tf.logical_and(tf.equal(tf.mod(time,interval),tf.constant(0)),
tf.greater(time, tf.constant(0))), lambda: (cstate.c+pstate.c)/2, lambda: pstate.c)
h1 = tf.cond(tf.logical_and(tf.equal(tf.mod(time,interval),tf.constant(0)),
tf.greater(time, tf.constant(0))), lambda: (cstate.h+pstate.h)/2, lambda: pstate.h)
c2 = tf.cond(tf.logical_and(tf.equal(tf.mod(time,interval),tf.constant(0)),
tf.greater(time, tf.constant(0))), lambda: pstate.c, lambda: cstate.c)
h2 = tf.cond(tf.logical_and(tf.equal(tf.mod(time,interval),tf.constant(0)),
tf.greater(time, tf.constant(0))), lambda: pstate.h, lambda: cstate.h)
new_pre_state.append(tf.contrib.rnn.LSTMStateTuple(c2,h2))
input_state.append(tf.contrib.rnn.LSTMStateTuple(c1,h1))
new_pre_state = tuple(new_pre_state)
input_state = tuple(input_state)
input_t = nest.pack_sequence_as(structure=inputs, flat_sequence=input_t)
call_cell = lambda: cell(input_t, input_state)
if sequence_length is not None:
(output, new_state) = _rnn_step(
time=time,
sequence_length=sequence_length,
min_sequence_length=min_sequence_length,
max_sequence_length=max_sequence_length,
zero_output=zero_output,
state=input_state,
call_cell=call_cell,
state_size=state_size,
skip_conditionals=True)
else:
(output, new_state) = call_cell()
# Pack state if using state tuples
output = nest.flatten(output)
output_ta_t = tuple(
ta.write(time, out) for ta, out in zip(output_ta_t, output))
return (time + 1, output_ta_t, new_state, new_pre_state)
_, output_final_ta, final_state, final_pre_state = control_flow_ops.while_loop(
cond=lambda time, *_: time < time_steps,
body=_time_step,
loop_vars=(time, output_ta, state, pre_state),
parallel_iterations=parallel_iterations,
swap_memory=swap_memory)
# Unpack final output if not using output tuples.
final_outputs = tuple(ta.stack() for ta in output_final_ta)
# Restore some shape information
for output, output_size in zip(final_outputs, flat_output_size):
shape = _state_size_with_prefix(
output_size, prefix=[const_time_steps, const_batch_size])
output.set_shape(shape)
final_outputs = nest.pack_sequence_as(
structure=cell.output_size, flat_sequence=final_outputs)
return (final_outputs, final_state) | {
"content_hash": "482bafe8a923c2db970a4818a952549a",
"timestamp": "",
"source": "github",
"line_count": 554,
"max_line_length": 107,
"avg_line_length": 44.23646209386282,
"alnum_prop": 0.6326355735096095,
"repo_name": "wangheda/youtube-8m",
"id": "64d16f10c5f72b770df0cec986fa0175f952e265",
"size": "24507",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "youtube-8m-zhangteng/rnn_residual.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1360297"
},
{
"name": "Python",
"bytes": "1614517"
},
{
"name": "Shell",
"bytes": "372502"
},
{
"name": "Vim script",
"bytes": "443"
}
],
"symlink_target": ""
} |
from rsqueakvm.plugins.foreign_language.process import W_ForeignLanguageProcess
from rsqueakvm.plugins.python.model import W_PythonObject
from rsqueakvm.plugins.python.objspace import py_space
from rsqueakvm.plugins.python import utils
from pypy.interpreter.argument import Arguments
from pypy.interpreter.error import OperationError
class W_PythonProcess(W_ForeignLanguageProcess):
_attrs_ = ['source', 'filename', 'cmd', 'ec']
repr_classname = 'W_PythonProcess'
def __init__(self, space, w_rcvr=None, method_name='', args_w=None,
source='', filename='', cmd='',
is_send=False, break_on_exceptions=False):
W_ForeignLanguageProcess.__init__(
self, space, w_rcvr, method_name, args_w,
is_send, break_on_exceptions)
self.source = source
self.filename = filename
self.cmd = cmd
self.ec = py_space.createexecutioncontext()
self.init_runner()
def eval(self):
if self.source == '' or self.filename == '' or self.cmd == '':
return self.fail('Invalid Python eval')
try:
retval = utils._run_eval_string(
self.source, self.filename, self.cmd)
self.set_result(W_PythonObject(retval))
except OperationError as operr:
# operr was not handled by users, because they pressed proceed.
# save Python error as result instead.
self.set_result(utils.operr_to_w_object(operr))
def send(self):
st_to_py = utils.smalltalk_to_python
wp_rcvr = st_to_py(self.space(), self.w_rcvr)
wp_attrname = py_space.newtext(self.method_name)
try:
if self.method_name == '__call__': # special __call__ case
w_meth = py_space.getattr(wp_rcvr, wp_attrname)
args_wp = [st_to_py(self.space(), a) for a in self.args_w]
# use call_args() to allow variable number of args_w
# (but this disables speed hacks in Pypy)
args = Arguments(py_space, args_wp)
self.set_result(W_PythonObject(
py_space.call_args(w_meth, args)))
elif len(self.args_w) == 1: # setattr when one argument
wp_value = st_to_py(self.space(), self.args_w[0])
py_space.setattr(wp_rcvr, wp_attrname, wp_value)
self.set_result(W_PythonObject(py_space.w_None))
else: # otherwise getattr
self.set_result(W_PythonObject(
py_space.getattr(wp_rcvr, wp_attrname)))
except OperationError as operr:
print 'Python error caught: %s' % operr
error = utils.operr_to_w_object(operr)
self.set_error(error)
self.set_result(error)
except Exception as e:
self.fail('Unable to call %s on %s: %s' % (
self.method_name, wp_rcvr, e))
def pre_resume(self):
py_space.current_python_process.set(self)
def post_resume(self):
# unset `current_python_process` to restore original behavior
py_space.current_python_process.set(None)
def w_top_frame(self):
if self.ec is None:
return None
topframe = self.ec.gettopframe()
if topframe is None:
return None
return W_PythonObject(topframe)
def guess_classname(self):
return self.repr_classname
def str_content(self):
return '%s: "%s"' % (self.cmd, self.source)
| {
"content_hash": "b80ba576d1abe78f3696d81c66451be4",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 79,
"avg_line_length": 40.96511627906977,
"alnum_prop": 0.5915413000283849,
"repo_name": "HPI-SWA-Lab/RSqueak",
"id": "8804d77181ee02946f2311ae8a2a050a12d4103a",
"size": "3523",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rsqueakvm/plugins/python/process.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1638"
},
{
"name": "C",
"bytes": "115644"
},
{
"name": "HTML",
"bytes": "4754"
},
{
"name": "PowerShell",
"bytes": "1691"
},
{
"name": "Python",
"bytes": "1140634"
},
{
"name": "Shell",
"bytes": "18715"
},
{
"name": "Smalltalk",
"bytes": "71208"
}
],
"symlink_target": ""
} |
"""
bma2x2 is a micropython module for the Bosch BMA2X2 sensor.
It measures acceleration three axis.
The MIT License (MIT)
Copyright (c) 2016 Sebastian Plamauer oeplse@gmail.com
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from time import sleep
# from stackoverflow J.F. Sebastian
def _twos_comp(val, bits=8):
"""
compute the 2's complement of int val with bits
"""
if (val & (1 << (bits - 1))) != 0: # if sign bit is set
val = val - (1 << bits) # compute negative value
return val # return positive value as is
class BMA2X2():
"""
Class for BMA2X2 accelerometer
"""
def __init__(self, i2c, addr):
"""
Initializes with an I2C object and address as arguments.
"""
self.i2c = i2c
self.acc_addr = addr
self.chip_id = self.i2c.readfrom_mem(self.acc_addr, 0x00, 1)[0]
self.set_range(2) # default range 16g
self.set_filter_bw(128) # default filter bandwith 125Hz
self.compensation()
def _read_accel(self, addr: int) -> float:
"""
Returns acceleromter data from address.
"""
LSB, MSB = self.i2c.readfrom_mem(self.acc_addr, addr, 2)
LSB = _twos_comp(LSB)
MSB = _twos_comp(MSB)
return (LSB + (MSB<<4))*self._resolution/1000
def temperature(self) -> float:
"""
Returns temperature in degrees C.
"""
return self.i2c.readfrom_mem(self.acc_addr, 0x08, 1)[0]/2 + 23
def set_range(self, accel_range: int):
"""
Sets the accelerometer range to 2, 4, 8 or 16g.
"""
ranges = {2:b'\x03', 4:b'\x05', 8:b'\x08', 16:b'\x0C'}
try:
range_byte = ranges[accel_range]
except KeyError:
raise ValueError('invalid range, use 2, 4, 8 or 16')
self.i2c.writeto_mem(self.acc_addr, 0x0F, range_byte)
self._resolution = {2:0.98, 4:1.95, 8:3.91, 16:7.81}[accel_range]
def get_range(self) -> int:
"""
Returns the accelerometer range.
"""
return {3:2,5:4,8:8,12:16}[self.i2c.readfrom_mem(self.acc_addr, 0x0F, 1)[0]]
def set_filter_bw(self, freq: int):
"""
Sets the filter bandwith to 8, 16, 32, 64, 128, 256, 512 or 1024Hz.
"""
freqs = {8:b'\x08', 16:b'\x09', 32:b'\x0A', 64:b'\x0B', 128:b'\x0C', 256:b'\x0D', 512:b'\x0E', 1024:b'\x0F'}
try:
freq_byte = freqs[freq]
except:
raise ValueError('invalid filter bandwith, use 8, 16, 32, 64, 128, 256, 512 or 1024')
self.i2c.writeto_mem(self.acc_addr, 0x10, freq_byte)
def get_filter_bw(self) -> int:
"""
Returns the filter bandwith.
"""
return 2**(self.i2c.readfrom_mem(self.acc_addr, 0x10, 1)[0]-5)
def compensation(self, active=None) -> bool:
"""
With no arguments passed, runs fast compensation.
With boolean argument passe, activates or deactivates slow compensation.
"""
accel_range = self.get_range()
self.set_range(2)
self.i2c.writeto_mem(self.acc_addr, 0x37, b'\x21') # settings x0y0z1 10Hz
self.i2c.writeto_mem(self.acc_addr, 0x36, b'\x80') # reset
if active is None: # trigger fast comp
self.i2c.writeto_mem(self.acc_addr, 0x36, b'\x00') # deactivate slow comp
active = False
#print(self.i2c.readfrom_mem(self.acc_addr, 0x36, 1))
self.i2c.writeto_mem(self.acc_addr, 0x36, b'\x20') # x
sleep(0.1)
#print(self.i2c.readfrom_mem(self.acc_addr, 0x36, 1))
self.i2c.writeto_mem(self.acc_addr, 0x36, b'\x40') # y
sleep(0.1)
#print(self.i2c.readfrom_mem(self.acc_addr, 0x36, 1))
self.i2c.writeto_mem(self.acc_addr, 0x36, b'\x60') # z
sleep(0.1)
#print(self.i2c.readfrom_mem(self.acc_addr, 0x36, 1))
elif active: # activate slow comp
self.i2c.writeto_mem(self.acc_addr, 0x36, b'\x07')
elif not active: # deactivate slow comp
self.i2c.writeto_mem(self.acc_addr, 0x36, b'\x00')
else:
raise TypeError('pass a boolean or no argument')
self.set_range(accel_range)
return active
def x(self) -> float:
"""
Returns x acceleration in g.
"""
return self._read_accel(0x02)
def y(self) -> float:
"""
Returns y acceleration in g.
"""
return self._read_accel(0x04)
def z(self) -> float:
"""
Returns z acceleration in g.
"""
return self._read_accel(0x06)
def xyz(self) -> tuple:
"""
Returns x,y and z accelerations in g as tuple.
"""
return (self.x(), self.y(), self.z())
| {
"content_hash": "207356afdf7806188892780d139d3696",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 116,
"avg_line_length": 36.34375,
"alnum_prop": 0.5969045571797077,
"repo_name": "micropython-IMU/micropython-bmx055",
"id": "c582d7555c5f58583c19785a0e8d077006818673",
"size": "5815",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bma2x2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15958"
}
],
"symlink_target": ""
} |
"""
_models.py : modeling routines for determining variable significance
"""
import numpy as np
def logit(x):
return np.log(x/(1-x))
def _get_conditional_scores(dataframe=None, feature=None, target=None,
positive_class=None, print_output=True,
master_dict=None):
"""
Creates a single-variable Bayesian model from the provided feature. The
feature is an indicator created from a particular level of the original
categorical feature.
"""
# fillna so we can use all category levels
# dataframe.loc[:, feature
dataframe.loc[:, feature] = dataframe.loc[:, feature].fillna(value="NaN")
unique_levels = dataframe.loc[:, feature].unique()
unique_count = len(unique_levels) - 1
# extra_degrees_of_freedom = np.max([0, unique_count])
# epsilon = 1.0e-6
smoothing_factor = 1.0e-4
# conditions used to derive modeling values
# level_and_true = dataframe.loc[:, feature] == positive_class
overall_true = dataframe.loc[:, target] == positive_class
overall_false = dataframe.loc[:, target] != positive_class
# number of True samples
n_true = dataframe[overall_true].loc[:, target].count()
n_false = dataframe[overall_false].loc[:, target].count()
# unconditioned mean
probability_of_true = dataframe.loc[:, target].mean()
# container for conditional scores for each level
conditional_scores = dict()
master_dict[feature] = dict()
# find the conditional scores for each unique category level
for level in dataframe.loc[:, feature].unique():
# Original version: caused boolean index warning
# condition = dataframe.loc[:, feature] == level
# count of True for the current category level
# n_conditional_and_true = dataframe[condition][overall_true].loc[:, feature].count()
# count of False for the current category level
# n_conditional_and_false = dataframe[condition][overall_false].loc[:, feature].count()
true_filter = "{0} == '{1}' & {2} == {3}".format(feature,
level,
target,
positive_class)
false_filter = "{0} == '{1}' & {2} != {3}".format(feature,
level,
target,
positive_class)
n_conditional_and_true = dataframe.query(true_filter).loc[:, feature].count()
n_conditional_and_false = dataframe.query(false_filter).loc[:, feature].count()
# probability of category level given the outcome is the positive_class
prob_of_level_given_true = (n_conditional_and_true + smoothing_factor)/\
(n_true + smoothing_factor)
# probability of category level given the outcome is NOT the positive_class
prob_of_level_given_false = (n_conditional_and_false + smoothing_factor) /\
(n_false + smoothing_factor)
# un-normalized probability the target is the positive class given the level
prob_true_given_level_unnormalized = prob_of_level_given_true * probability_of_true
# un-normalized probability the target is NOT the positive class given the level
prob_false_given_level_unnormalized = prob_of_level_given_false * (1 -
probability_of_true)
# normalized probability the target is the positive class given the level
prob_of_true_given_level = prob_true_given_level_unnormalized / \
(prob_true_given_level_unnormalized + prob_false_given_level_unnormalized)
# find the conditional score for this level
conditional_score = logit(prob_of_true_given_level) - logit(probability_of_true)
# add the score to the container
conditional_scores[level] = conditional_score
if print_output is True:
print("n_true: {0}".format(n_true))
print("n_false: {0}".format(n_false))
print("conditional_n_true: {0}".format(n_conditional_and_true))
print("conditional_n_false: {0}".format(n_conditional_and_false))
print("probability_of_true: {0}".format(probability_of_true))
print("prob_of_level_given_true: {0}".format(prob_of_level_given_true))
print("prob_of_level_given_false: {0}".format(prob_of_level_given_false))
print("prob_of_true_given_level_unnormalized: {0}".format(prob_true_given_level_unnormalized))
print("prob_of_false_given_level_unnormalized: {0}".format(prob_false_given_level_unnormalized))
print("prob_of_true_given_level: {0}".format(prob_of_true_given_level))
print("conditional_score: {0}".format(conditional_score))
master_dict[feature] = conditional_scores
return master_dict
| {
"content_hash": "68de83b1e11dea485d5efca96b32049c",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 109,
"avg_line_length": 50.067961165048544,
"alnum_prop": 0.589102191196432,
"repo_name": "jmwoloso/avalearn",
"id": "a3e706fbee326c0e1b284eff1d9481470a839005",
"size": "5157",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "avalearn/utils/categorical/_models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "50191"
}
],
"symlink_target": ""
} |
import sublime
import sublime_plugin
class AmalgamateCommand(sublime_plugin.WindowCommand):
settings = {}
def is_enabled(self):
return self.load_settings()
def is_visible(self):
return True
def description(self):
return None
def load_settings(self):
self.settings = self.window.project_data()
if 'amalgamate' in self.settings:
self.settings = self.settings['amalgamate']
return True
else:
return False
def get(self, value, default = None):
if value in self.settings:
return self.settings[value]
else:
return default
def run(self):
if self.load_settings():
path = '/'.join(self.window.project_file_name().split('/')[0:-1]) + '/'
print(self.settings)
for target in self.settings:
outfile = path + target
files = self.settings[target]
if not files is None:
print('Outfile:', str(outfile))
amalgamation = open(outfile, 'w')
for filename in files:
infile = open(path + filename, 'r')
amalgamation.write(infile.read())
amalgamation.write(';')
infile.close()
amalgamation.close()
view = self.window.open_file(outfile)
else:
print('No files specified')
else:
print('Can\'t load settings')
| {
"content_hash": "98da0272647ce4779ec0e0a2634deb71",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 74,
"avg_line_length": 19.741935483870968,
"alnum_prop": 0.6584967320261438,
"repo_name": "donteatyellowsnow/amalgamate",
"id": "ddea89587dfc371772f5e8cbb3668e55dd61a1fb",
"size": "1234",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Amalgamate.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1234"
}
],
"symlink_target": ""
} |
from django import forms
from django.template import loader
from django.utils.encoding import force_unicode
from django.utils import formats
class CodeEditorWidget(forms.Widget):
'''A widget for using the ACE editor (ace.ajax.org) on the admin page'''
@property
def media(self):
scripts = ["admin/ace/ace.js", "admin/ace/ace_editor.js"]
if self.mode:
scripts.append("admin/ace/mode-{0}.js".format(self.mode))
stylesheets = {'all': ('admin/ace/ace_editor.css',)}
return forms.Media(js=scripts, css=stylesheets)
template_name = 'ace_editor.html'
def __init__(self, mode = None, *args, **kwargs):
self.mode = mode
super(CodeEditorWidget, self).__init__(*args, **kwargs)
def _format_value(self, value):
if self.is_localized:
value = formats.localize_input(value)
return force_unicode(value)
def get_context(self, name, value, attrs=None):
context = {
'name': name,
'mode': self.mode,
'required': self.is_required,
'True': True,
}
# True is injected in the context to allow stricter comparisons
# for widget attrs. See:
# https://github.com/brutasse/django-floppyforms/issues/25
if self.is_hidden:
context['hidden'] = True
if value is None:
value = ''
if not '\n' in value:
# The code editor messes up if there's only a single line.
value += '\n'
context['value'] = self._format_value(value)
context['attrs'] = self.build_attrs(attrs)
for key in context['attrs']:
attr = context['attrs'][key]
if attr == 1:
# 1 == True so 'key="1"' will show up only as 'key'
# Casting to a string so that it doesn't equal to True
# See:
# https://github.com/brutasse/django-floppyforms/issues/25
if not isinstance(attr, bool):
context['attrs'][key] = str(attr)
return context
def render(self, name, value, attrs=None, **kwargs):
context = self.get_context(name, value, attrs=attrs or {}, **kwargs)
if not isinstance(value, basestring):
value = ''
if not '\n' in value:
value += '\n'
return loader.render_to_string(self.template_name, context)
| {
"content_hash": "f85411bf7baf59dbdcd5c911fbd9a714",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 73,
"avg_line_length": 29.82608695652174,
"alnum_prop": 0.6710398445092323,
"repo_name": "RedwoodAdmin/RedwoodFramework",
"id": "af24da230ca3a5c77dc2689757c8b44242bbf046",
"size": "2058",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "expecon/widgets.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "7923"
},
{
"name": "Go",
"bytes": "109387"
},
{
"name": "HTML",
"bytes": "64184"
},
{
"name": "Java",
"bytes": "1306375"
},
{
"name": "JavaScript",
"bytes": "3751414"
},
{
"name": "Python",
"bytes": "32267"
},
{
"name": "Shell",
"bytes": "1756"
}
],
"symlink_target": ""
} |
from twisted.trial import unittest
from buildslave.test.fake.runprocess import Expect
from buildslave.test.util.sourcecommand import SourceCommandTestMixin
from buildslave.commands import darcs
class TestDarcs(SourceCommandTestMixin, unittest.TestCase):
def setUp(self):
self.setUpCommand()
def tearDown(self):
self.tearDownCommand()
def test_simple(self):
self.patch_getCommand('darcs', 'path/to/darcs')
self.clean_environ()
self.make_command(darcs.Darcs, dict(
workdir='workdir',
mode='copy',
revision=None,
repourl='http://darcs.net',
))
exp_environ = dict(PWD='.', LC_MESSAGES='C')
expects = [
Expect([ 'clobber', 'workdir' ],
self.basedir)
+ 0,
Expect([ 'clobber', 'source' ],
self.basedir)
+ 0,
Expect([ 'path/to/darcs', 'get', '--verbose', '--lazy',
'--repo-name', 'source', 'http://darcs.net'],
self.basedir,
sendRC=False, timeout=120, usePTY=False)
+ 0,
Expect([ 'path/to/darcs', 'changes', '--context' ],
self.basedir_source,
sendRC=False, timeout=120, usePTY=False, keepStdout=True,
environ=exp_environ, sendStderr=False, sendStdout=False)
+ { 'stdout' : example_changes }
+ 0,
Expect([ 'copy', 'source', 'workdir'],
self.basedir)
+ 0,
]
self.patch_runprocess(*expects)
d = self.run_command()
d.addCallback(self.check_sourcedata, "http://darcs.net\n")
return d
example_changes = """\
Context:
[Resolve issue1874: recognise network tests on cabal test command line.
Eric Kow <kowey@darcs.net>**20100611102251
Ignore-this: 59a455ef26b5df9a3bdd356e1e37854e
]
[haddocks for SelectChanges
Florent Becker <florent.becker@ens-lyon.org>**20100610140023
Ignore-this: c4203f746fc6278dc5290332e3625283
]
[better message when skipping already decided patches
Florent Becker <florent.becker@ens-lyon.org>**20100531065630
Ignore-this: 426675973555e75086781f0c54fbf925
]
[Accept issue1871: darcs record . failure for changes in subdir.
Eric Kow <kowey@darcs.net>**20100609145047
Ignore-this: dd942b980dd3006bfa5d176ec5cfdf99
]
[Extend the issue1014 test to check that named patches are not duplicated.
Petr Rockai <me@mornfall.net>**20100607185041
Ignore-this: 383ff17461076a798193b6c0c2427bba
]
[Haddock merge2FL and fastRemoveFL in Patch.Depends.
Petr Rockai <me@mornfall.net>**20100607184849
Ignore-this: cd6e79c4e404820d4f0ae94a53aed8c1
]
[Limit index updates to relevant subtree in a few cases.
Petr Rockai <me@mornfall.net>**20100509102248
Ignore-this: fea041133d039cecead73935f0cd6762
]
[Fix a bunch of "unused" warnings.
Petr Rockai <me@mornfall.net>**20100607194111
Ignore-this: 1fec82080eca9c3f10b690ee0ef81e34
]
[Shorten issue1210 test name.
Eric Kow <kowey@darcs.net>**20100608090708
Ignore-this: 57ff2a1cbb9795f80ae3d81e19717a9e
]
[Add test for issue1210: global cache gets recorded in _darcs/prefs/sources
builes.adolfo@googlemail.com**20100608010902
Ignore-this: bc02ada910927be93dd4a5cc9826d20d
]
[Fix typo in the BSD version of date arithmetic (testsuite).
Petr Rockai <me@mornfall.net>**20100608062802
Ignore-this: fdfb7aef46966a18edc2f7e93c0118f0
]
[Let's try to work with BSD date as well.
Petr Rockai <me@mornfall.net>**20100608061631
Ignore-this: 628e6f15e8f8d6801a3f1dd6c8605e17
]
[Fix a race condition in the match-date test.
Petr Rockai <me@mornfall.net>**20100607223257
Ignore-this: 4c6452bfdee6c03eb95abcd646add90f
]
"""
| {
"content_hash": "c1d805c830cf134b2f4532cc2ed333f4",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 75,
"avg_line_length": 30.78688524590164,
"alnum_prop": 0.6765175718849841,
"repo_name": "denny820909/builder",
"id": "e6953e31fb740f2e9cedded3a4bcc5d1a9315902",
"size": "4462",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "lib/python2.7/site-packages/buildbot_slave-0.8.8-py2.7.egg/buildslave/test/unit/test_commands_darcs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "106"
},
{
"name": "C",
"bytes": "68706"
},
{
"name": "CSS",
"bytes": "18630"
},
{
"name": "D",
"bytes": "532"
},
{
"name": "GAP",
"bytes": "14120"
},
{
"name": "HTML",
"bytes": "69377"
},
{
"name": "Makefile",
"bytes": "1220"
},
{
"name": "Objective-C",
"bytes": "1291"
},
{
"name": "Python",
"bytes": "21088388"
},
{
"name": "Shell",
"bytes": "2766"
},
{
"name": "Smarty",
"bytes": "393"
}
],
"symlink_target": ""
} |
"""Fichier contenant le type viande."""
from .nourriture import Nourriture
class Viande(Nourriture):
"""Type d'objet: viande.
"""
nom_type = "viande"
def __init__(self, cle=""):
"""Constructeur de l'objet"""
Nourriture.__init__(self, cle)
self.nourrissant = 3
| {
"content_hash": "f9227708d76e62ee082b278fccafe605",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 39,
"avg_line_length": 21.133333333333333,
"alnum_prop": 0.5646687697160884,
"repo_name": "stormi/tsunami",
"id": "557feade322947b1b1efcd1e09a7dcd027f92b62",
"size": "1882",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/primaires/objet/types/viande.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "7188300"
},
{
"name": "Ruby",
"bytes": "373"
}
],
"symlink_target": ""
} |
from django.conf import settings
from django.contrib.auth import authenticate
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from django.db import models
from socialregistration.signals import connect
AUTH_USER_MODEL = getattr(settings, 'AUTH_USER_MODEL', 'auth.User')
class GoogleProfile(models.Model):
user = models.ForeignKey(AUTH_USER_MODEL, unique=True)
site = models.ForeignKey(Site, default=Site.objects.get_current)
google_id = models.CharField(max_length = 255)
def __unicode__(self):
try:
return u'%s: %s' % (self.user, self.google_id)
except User.DoesNotExist:
return u'None'
def authenticate(self):
return authenticate(google_id=self.google_id)
class GoogleAccessToken(models.Model):
profile = models.OneToOneField(GoogleProfile, related_name='access_token')
access_token = models.CharField(max_length=255)
def save_google_token(sender, user, profile, client, **kwargs):
try:
GoogleAccessToken.objects.get(profile=profile).delete()
except GoogleAccessToken.DoesNotExist:
pass
GoogleAccessToken.objects.create(access_token = client.get_access_token(),
profile = profile)
connect.connect(save_google_token, sender=GoogleProfile,
dispatch_uid='socialregistration_google_token')
| {
"content_hash": "a9e096410a1e232e2283ff7e7d1a67c4",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 78,
"avg_line_length": 33.975,
"alnum_prop": 0.7233259749816041,
"repo_name": "kapt/django-socialregistration",
"id": "8506098c8038b24758aab910415b226542cdb125",
"size": "1359",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "socialregistration/contrib/google/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "6158"
},
{
"name": "Makefile",
"bytes": "99"
},
{
"name": "Python",
"bytes": "124265"
}
],
"symlink_target": ""
} |
import sys
import re
from twisted.python import log
from twisted.internet import defer
from twisted.enterprise import adbapi
from buildbot.process.buildstep import LogLineObserver
from buildbot.steps.shell import Test
class EqConnectionPool(adbapi.ConnectionPool):
"""This class works the same way as
twisted.enterprise.adbapi.ConnectionPool. But it adds the ability to
compare connection pools for equality (by comparing the arguments
passed to the constructor).
This is useful when passing the ConnectionPool to a BuildStep, as
otherwise Buildbot will consider the buildstep (and hence the
containing buildfactory) to have changed every time the configuration
is reloaded.
It also sets some defaults differently from adbapi.ConnectionPool that
are more suitable for use in MTR.
"""
def __init__(self, *args, **kwargs):
self._eqKey = (args, kwargs)
return adbapi.ConnectionPool.__init__(self,
cp_reconnect=True, cp_min=1, cp_max=3,
*args, **kwargs)
def __eq__(self, other):
if isinstance(other, EqConnectionPool):
return self._eqKey == other._eqKey
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
class MtrTestFailData:
def __init__(self, testname, variant, result, info, text, callback):
self.testname = testname
self.variant = variant
self.result = result
self.info = info
self.text = text
self.callback = callback
def add(self, line):
self.text+= line
def fireCallback(self):
return self.callback(self.testname, self.variant, self.result, self.info, self.text)
class MtrLogObserver(LogLineObserver):
"""
Class implementing a log observer (can be passed to
BuildStep.addLogObserver().
It parses the output of mysql-test-run.pl as used in MySQL,
MariaDB, Drizzle, etc.
It counts number of tests run and uses it to provide more accurate
completion estimates.
It parses out test failures from the output and summarises the results on
the Waterfall page. It also passes the information to methods that can be
overridden in a subclass to do further processing on the information."""
_line_re = re.compile(r"^([-._0-9a-zA-z]+)( '[-_ a-zA-Z]+')?\s+(w[0-9]+\s+)?\[ (fail|pass) \]\s*(.*)$")
_line_re2 = re.compile(r"^[-._0-9a-zA-z]+( '[-_ a-zA-Z]+')?\s+(w[0-9]+\s+)?\[ [-a-z]+ \]")
_line_re3 = re.compile(r"^\*\*\*Warnings generated in error logs during shutdown after running tests: (.*)")
_line_re4 = re.compile(r"^The servers were restarted [0-9]+ times$")
_line_re5 = re.compile(r"^Only\s+[0-9]+\s+of\s+[0-9]+\s+completed.$")
def __init__(self, textLimit=5, testNameLimit=16, testType=None):
self.textLimit = textLimit
self.testNameLimit = testNameLimit
self.testType = testType
self.numTests = 0
self.testFail = None
self.failList = []
self.warnList = []
LogLineObserver.__init__(self)
def setLog(self, loog):
LogLineObserver.setLog(self, loog)
d= loog.waitUntilFinished()
d.addCallback(lambda l: self.closeTestFail())
def outLineReceived(self, line):
stripLine = line.strip("\r\n")
m = self._line_re.search(stripLine)
if m:
testname, variant, worker, result, info = m.groups()
self.closeTestFail()
self.numTests += 1
self.step.setProgress('tests', self.numTests)
if result == "fail":
if variant == None:
variant = ""
else:
variant = variant[2:-1]
self.openTestFail(testname, variant, result, info, stripLine + "\n")
else:
m = self._line_re3.search(stripLine)
if m:
stuff = m.group(1)
self.closeTestFail()
testList = stuff.split(" ")
self.doCollectWarningTests(testList)
elif (self._line_re2.search(stripLine) or
self._line_re4.search(stripLine) or
self._line_re5.search(stripLine) or
stripLine == "Test suite timeout! Terminating..." or
stripLine.startswith("mysql-test-run: *** ERROR: Not all tests completed") or
(stripLine.startswith("------------------------------------------------------------")
and self.testFail != None)):
self.closeTestFail()
else:
self.addTestFailOutput(stripLine + "\n")
def openTestFail(self, testname, variant, result, info, line):
self.testFail = MtrTestFailData(testname, variant, result, info, line, self.doCollectTestFail)
def addTestFailOutput(self, line):
if self.testFail != None:
self.testFail.add(line)
def closeTestFail(self):
if self.testFail != None:
self.testFail.fireCallback()
self.testFail = None
def addToText(self, src, dst):
lastOne = None
count = 0
for t in src:
if t != lastOne:
dst.append(t)
count += 1
if count >= self.textLimit:
break
def makeText(self, done):
if done:
text = ["test"]
else:
text = ["testing"]
if self.testType:
text.append(self.testType)
fails = self.failList[:]
fails.sort()
self.addToText(fails, text)
warns = self.warnList[:]
warns.sort()
self.addToText(warns, text)
return text
# Update waterfall status.
def updateText(self):
self.step.step_status.setText(self.makeText(False))
strip_re = re.compile(r"^[a-z]+\.")
def displayTestName(self, testname):
displayTestName = self.strip_re.sub("", testname)
if len(displayTestName) > self.testNameLimit:
displayTestName = displayTestName[:(self.testNameLimit-2)] + "..."
return displayTestName
def doCollectTestFail(self, testname, variant, result, info, text):
self.failList.append("F:" + self.displayTestName(testname))
self.updateText()
self.collectTestFail(testname, variant, result, info, text)
def doCollectWarningTests(self, testList):
for t in testList:
self.warnList.append("W:" + self.displayTestName(t))
self.updateText()
self.collectWarningTests(testList)
# These two methods are overridden to actually do something with the data.
def collectTestFail(self, testname, variant, result, info, text):
pass
def collectWarningTests(self, testList):
pass
class MTR(Test):
"""
Build step that runs mysql-test-run.pl, as used in MySQL, Drizzle,
MariaDB, etc.
It uses class MtrLogObserver to parse test results out from the
output of mysql-test-run.pl, providing better completion time
estimates and summarising test failures on the waterfall page.
It also provides access to mysqld server error logs from the test
run to help debugging any problems.
Optionally, it can insert into a database data about the test run,
including details of any test failures.
Parameters:
textLimit
Maximum number of test failures to show on the waterfall page
(to not flood the page in case of a large number of test
failures. Defaults to 5.
testNameLimit
Maximum length of test names to show unabbreviated in the
waterfall page, to avoid excessive column width. Defaults to 16.
parallel
Value of --parallel option used for mysql-test-run.pl (number
of processes used to run the test suite in parallel). Defaults
to 4. This is used to determine the number of server error log
files to download from the slave. Specifying a too high value
does not hurt (as nonexisting error logs will be ignored),
however if using --parallel value greater than the default it
needs to be specified, or some server error logs will be
missing.
dbpool
An instance of twisted.enterprise.adbapi.ConnectionPool, or None.
Defaults to None. If specified, results are inserted into the database
using the ConnectionPool.
The class process.mtrlogobserver.EqConnectionPool subclass of
ConnectionPool can be useful to pass as value for dbpool, to
avoid having config reloads think the Buildstep is changed
just because it gets a new ConnectionPool instance (even
though connection parameters are unchanged).
autoCreateTables
Boolean, defaults to False. If True (and dbpool is specified), the
necessary database tables will be created automatically if they do
not exist already. Alternatively, the tables can be created manually
from the SQL statements found in the mtrlogobserver.py source file.
test_type
test_info
Two descriptive strings that will be inserted in the database tables if
dbpool is specified. The test_type string, if specified, will also
appear on the waterfall page."""
renderables = [ 'mtr_subdir' ]
def __init__(self, dbpool=None, test_type=None, test_info="",
description=None, descriptionDone=None,
autoCreateTables=False, textLimit=5, testNameLimit=16,
parallel=4, logfiles = {}, lazylogfiles = True,
warningPattern="MTR's internal check of the test case '.*' failed",
mtr_subdir="mysql-test", **kwargs):
if description is None:
description = ["testing"]
if test_type:
description.append(test_type)
if descriptionDone is None:
descriptionDone = ["test"]
if test_type:
descriptionDone.append(test_type)
Test.__init__(self, logfiles=logfiles, lazylogfiles=lazylogfiles,
description=description, descriptionDone=descriptionDone,
warningPattern=warningPattern, **kwargs)
self.dbpool = dbpool
self.test_type = test_type
self.test_info = test_info
self.autoCreateTables = autoCreateTables
self.textLimit = textLimit
self.testNameLimit = testNameLimit
self.parallel = parallel
self.mtr_subdir = mtr_subdir
self.progressMetrics += ('tests',)
def start(self):
# Add mysql server logfiles.
for mtr in range(0, self.parallel+1):
for mysqld in range(1, 4+1):
if mtr == 0:
logname = "mysqld.%d.err" % mysqld
filename = "var/log/mysqld.%d.err" % mysqld
else:
logname = "mysqld.%d.err.%d" % (mysqld, mtr)
filename = "var/%d/log/mysqld.%d.err" % (mtr, mysqld)
self.addLogFile(logname, self.mtr_subdir + "/" + filename)
self.myMtr = self.MyMtrLogObserver(textLimit=self.textLimit,
testNameLimit=self.testNameLimit,
testType=self.test_type)
self.addLogObserver("stdio", self.myMtr)
# Insert a row for this test run into the database and set up
# build properties, then start the command proper.
d = self.registerInDB()
d.addCallback(self.afterRegisterInDB)
d.addErrback(self.failed)
def getText(self, command, results):
return self.myMtr.makeText(True)
def runInteractionWithRetry(self, actionFn, *args, **kw):
"""
Run a database transaction with dbpool.runInteraction, but retry the
transaction in case of a temporary error (like connection lost).
This is needed to be robust against things like database connection
idle timeouts.
The passed callable that implements the transaction must be retryable,
ie. it must not have any destructive side effects in the case where
an exception is thrown and/or rollback occurs that would prevent it
from functioning correctly when called again."""
def runWithRetry(txn, *args, **kw):
retryCount = 0
while(True):
try:
return actionFn(txn, *args, **kw)
except txn.OperationalError:
retryCount += 1
if retryCount >= 5:
raise
excType, excValue, excTraceback = sys.exc_info()
log.msg("Database transaction failed (caught exception %s(%s)), retrying ..." % (excType, excValue))
txn.close()
txn.reconnect()
txn.reopen()
return self.dbpool.runInteraction(runWithRetry, *args, **kw)
def runQueryWithRetry(self, *args, **kw):
"""
Run a database query, like with dbpool.runQuery, but retry the query in
case of a temporary error (like connection lost).
This is needed to be robust against things like database connection
idle timeouts."""
def runQuery(txn, *args, **kw):
txn.execute(*args, **kw)
return txn.fetchall()
return self.runInteractionWithRetry(runQuery, *args, **kw)
def registerInDB(self):
if self.dbpool:
return self.runInteractionWithRetry(self.doRegisterInDB)
else:
return defer.succeed(0)
# The real database work is done in a thread in a synchronous way.
def doRegisterInDB(self, txn):
# Auto create tables.
# This is off by default, as it gives warnings in log file
# about tables already existing (and I did not find the issue
# important enough to find a better fix).
if self.autoCreateTables:
txn.execute("""
CREATE TABLE IF NOT EXISTS test_run(
id INT PRIMARY KEY AUTO_INCREMENT,
branch VARCHAR(100),
revision VARCHAR(32) NOT NULL,
platform VARCHAR(100) NOT NULL,
dt TIMESTAMP NOT NULL,
bbnum INT NOT NULL,
typ VARCHAR(32) NOT NULL,
info VARCHAR(255),
KEY (branch, revision),
KEY (dt),
KEY (platform, bbnum)
) ENGINE=innodb
""")
txn.execute("""
CREATE TABLE IF NOT EXISTS test_failure(
test_run_id INT NOT NULL,
test_name VARCHAR(100) NOT NULL,
test_variant VARCHAR(16) NOT NULL,
info_text VARCHAR(255),
failure_text TEXT,
PRIMARY KEY (test_run_id, test_name, test_variant)
) ENGINE=innodb
""")
txn.execute("""
CREATE TABLE IF NOT EXISTS test_warnings(
test_run_id INT NOT NULL,
list_id INT NOT NULL,
list_idx INT NOT NULL,
test_name VARCHAR(100) NOT NULL,
PRIMARY KEY (test_run_id, list_id, list_idx)
) ENGINE=innodb
""")
revision = self.getProperty("got_revision")
if revision is None:
revision = self.getProperty("revision")
typ = "mtr"
if self.test_type:
typ = self.test_type
txn.execute("""
INSERT INTO test_run(branch, revision, platform, dt, bbnum, typ, info)
VALUES (%s, %s, %s, CURRENT_TIMESTAMP(), %s, %s, %s)
""", (self.getProperty("branch"), revision,
self.getProperty("buildername"), self.getProperty("buildnumber"),
typ, self.test_info))
return txn.lastrowid
def afterRegisterInDB(self, insert_id):
self.setProperty("mtr_id", insert_id)
self.setProperty("mtr_warn_id", 0)
Test.start(self)
def reportError(self, err):
log.msg("Error in async insert into database: %s" % err)
class MyMtrLogObserver(MtrLogObserver):
def collectTestFail(self, testname, variant, result, info, text):
# Insert asynchronously into database.
dbpool = self.step.dbpool
run_id = self.step.getProperty("mtr_id")
if dbpool == None:
return defer.succeed(None)
if variant == None:
variant = ""
d = self.step.runQueryWithRetry("""
INSERT INTO test_failure(test_run_id, test_name, test_variant, info_text, failure_text)
VALUES (%s, %s, %s, %s, %s)
""", (run_id, testname, variant, info, text))
d.addErrback(self.step.reportError)
return d
def collectWarningTests(self, testList):
# Insert asynchronously into database.
dbpool = self.step.dbpool
if dbpool == None:
return defer.succeed(None)
run_id = self.step.getProperty("mtr_id")
warn_id = self.step.getProperty("mtr_warn_id")
self.step.setProperty("mtr_warn_id", warn_id + 1)
q = ("INSERT INTO test_warnings(test_run_id, list_id, list_idx, test_name) " +
"VALUES " + ", ".join(map(lambda x: "(%s, %s, %s, %s)", testList)))
v = []
idx = 0
for t in testList:
v.extend([run_id, warn_id, idx, t])
idx = idx + 1
d = self.step.runQueryWithRetry(q, tuple(v))
d.addErrback(self.step.reportError)
return d
| {
"content_hash": "7657f5d15028c7f676b683b3267ee357",
"timestamp": "",
"source": "github",
"line_count": 459,
"max_line_length": 120,
"avg_line_length": 37.70152505446623,
"alnum_prop": 0.6037561398439757,
"repo_name": "denny820909/builder",
"id": "a49b90e2129c5555a2d6fb6205181e5203bc1529",
"size": "18011",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "lib/python2.7/site-packages/buildbot-0.8.8-py2.7.egg/buildbot/process/mtrlogobserver.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "106"
},
{
"name": "C",
"bytes": "68706"
},
{
"name": "CSS",
"bytes": "18630"
},
{
"name": "D",
"bytes": "532"
},
{
"name": "GAP",
"bytes": "14120"
},
{
"name": "HTML",
"bytes": "69377"
},
{
"name": "Makefile",
"bytes": "1220"
},
{
"name": "Objective-C",
"bytes": "1291"
},
{
"name": "Python",
"bytes": "21088388"
},
{
"name": "Shell",
"bytes": "2766"
},
{
"name": "Smarty",
"bytes": "393"
}
],
"symlink_target": ""
} |
import asyncio
import os
import threading
import unittest
from datetime import datetime, timedelta
import pytest
from reactivex.scheduler.eventloop import AsyncIOThreadSafeScheduler
CI = os.getenv("CI") is not None
class TestAsyncIOThreadSafeScheduler(unittest.TestCase):
@pytest.mark.skipif(CI, reason="Flaky test in GitHub Actions")
def test_asyncio_threadsafe_schedule_now(self):
loop = asyncio.get_event_loop()
scheduler = AsyncIOThreadSafeScheduler(loop)
diff = scheduler.now - datetime.utcfromtimestamp(loop.time())
assert abs(diff) < timedelta(milliseconds=2)
@pytest.mark.skipif(CI, reason="Flaky test in GitHub Actions")
def test_asyncio_threadsafe_schedule_now_units(self):
loop = asyncio.get_event_loop()
scheduler = AsyncIOThreadSafeScheduler(loop)
diff = scheduler.now
yield from asyncio.sleep(0.1)
diff = scheduler.now - diff
assert timedelta(milliseconds=80) < diff < timedelta(milliseconds=180)
def test_asyncio_threadsafe_schedule_action(self):
loop = asyncio.get_event_loop()
async def go():
scheduler = AsyncIOThreadSafeScheduler(loop)
ran = False
def action(scheduler, state):
nonlocal ran
ran = True
def schedule():
scheduler.schedule(action)
threading.Thread(target=schedule).start()
await asyncio.sleep(0.1)
assert ran is True
loop.run_until_complete(go())
def test_asyncio_threadsafe_schedule_action_due(self):
loop = asyncio.get_event_loop()
async def go():
scheduler = AsyncIOThreadSafeScheduler(loop)
starttime = loop.time()
endtime = None
def action(scheduler, state):
nonlocal endtime
endtime = loop.time()
def schedule():
scheduler.schedule_relative(0.2, action)
threading.Thread(target=schedule).start()
await asyncio.sleep(0.3)
assert endtime is not None
diff = endtime - starttime
assert diff > 0.18
loop.run_until_complete(go())
def test_asyncio_threadsafe_schedule_action_cancel(self):
loop = asyncio.get_event_loop()
async def go():
ran = False
scheduler = AsyncIOThreadSafeScheduler(loop)
def action(scheduler, state):
nonlocal ran
ran = True
def schedule():
d = scheduler.schedule_relative(0.05, action)
d.dispose()
threading.Thread(target=schedule).start()
await asyncio.sleep(0.3)
assert ran is False
loop.run_until_complete(go())
def cancel_same_thread_common(self, test_body):
update_state = {"ran": False, "dispose_completed": False}
def action(scheduler, state):
update_state["ran"] = True
# Make the actual test body run in deamon thread, so that in case of
# failure it doesn't hang indefinitely.
def thread_target():
loop = asyncio.new_event_loop()
scheduler = AsyncIOThreadSafeScheduler(loop)
test_body(scheduler, action, update_state)
async def go():
await asyncio.sleep(0.2)
loop.run_until_complete(go())
thread = threading.Thread(target=thread_target)
thread.daemon = True
thread.start()
thread.join(0.3)
assert update_state["dispose_completed"] is True
assert update_state["ran"] is False
def test_asyncio_threadsafe_cancel_non_relative_same_thread(self):
def test_body(scheduler, action, update_state):
d = scheduler.schedule(action)
# Test case when dispose is called on thread on which loop is not
# yet running, and non-relative schedele is used.
d.dispose()
update_state["dispose_completed"] = True
self.cancel_same_thread_common(test_body)
def test_asyncio_threadsafe_schedule_action_cancel_same_thread(self):
def test_body(scheduler, action, update_state):
d = scheduler.schedule_relative(0.05, action)
# Test case when dispose is called on thread on which loop is not
# yet running, and relative schedule is used.
d.dispose()
update_state["dispose_completed"] = True
self.cancel_same_thread_common(test_body)
def test_asyncio_threadsafe_schedule_action_cancel_same_loop(self):
def test_body(scheduler, action, update_state):
d = scheduler.schedule_relative(0.1, action)
def do_dispose():
d.dispose()
update_state["dispose_completed"] = True
# Test case when dispose is called in loop's callback.
scheduler._loop.call_soon(do_dispose)
self.cancel_same_thread_common(test_body)
| {
"content_hash": "b244a8f5e7a6daba49c51bb4d8270db0",
"timestamp": "",
"source": "github",
"line_count": 157,
"max_line_length": 78,
"avg_line_length": 32.152866242038215,
"alnum_prop": 0.6071711568938193,
"repo_name": "ReactiveX/RxPY",
"id": "18e30d4453d9bbcfeed62dbd078af8341b69cae9",
"size": "5048",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_scheduler/test_eventloop/test_asynciothreadsafescheduler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "1503"
},
{
"name": "Jupyter Notebook",
"bytes": "347338"
},
{
"name": "Python",
"bytes": "1726895"
}
],
"symlink_target": ""
} |
import os
import pytest
pytestmark = pytest.mark.skipif(
os.environ.get("SKIP_SLOW_TESTS"),
reason="Live test are slow as they need to run chrome",
)
| {
"content_hash": "7575e6d6e000ac435c5756916bd52fe6",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 59,
"avg_line_length": 20,
"alnum_prop": 0.70625,
"repo_name": "meine-stadt-transparent/meine-stadt-transparent",
"id": "1ac29915f7aefb1b95a4fadde0153b6381a9dbc9",
"size": "160",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "mainapp/tests/live/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "2397"
},
{
"name": "HTML",
"bytes": "158632"
},
{
"name": "JavaScript",
"bytes": "62206"
},
{
"name": "Python",
"bytes": "601144"
},
{
"name": "SCSS",
"bytes": "40214"
},
{
"name": "Shell",
"bytes": "1363"
}
],
"symlink_target": ""
} |
"""
A version of first order predicate logic, built on
top of the typed lambda calculus.
"""
from __future__ import print_function, unicode_literals
import re
import operator
from collections import defaultdict
from functools import reduce
from nltk.util import Trie
from nltk.internals import Counter
from nltk.compat import (total_ordering, string_types,
python_2_unicode_compatible)
APP = 'APP'
_counter = Counter()
class Tokens(object):
LAMBDA = '\\'; LAMBDA_LIST = ['\\']
#Quantifiers
EXISTS = 'exists'; EXISTS_LIST = ['some', 'exists', 'exist']
ALL = 'all'; ALL_LIST = ['all', 'forall']
#Punctuation
DOT = '.'
OPEN = '('
CLOSE = ')'
COMMA = ','
#Operations
NOT = '-'; NOT_LIST = ['not', '-', '!']
AND = '&'; AND_LIST = ['and', '&', '^']
OR = '|'; OR_LIST = ['or', '|']
IMP = '->'; IMP_LIST = ['implies', '->', '=>']
IFF = '<->'; IFF_LIST = ['iff', '<->', '<=>']
EQ = '='; EQ_LIST = ['=', '==']
NEQ = '!='; NEQ_LIST = ['!=']
#Collections of tokens
BINOPS = AND_LIST + OR_LIST + IMP_LIST + IFF_LIST
QUANTS = EXISTS_LIST + ALL_LIST
PUNCT = [DOT, OPEN, CLOSE, COMMA]
TOKENS = BINOPS + EQ_LIST + NEQ_LIST + QUANTS + LAMBDA_LIST + PUNCT + NOT_LIST
#Special
SYMBOLS = [x for x in TOKENS if re.match(r'^[-\\.(),!&^|>=<]*$', x)]
def boolean_ops():
"""
Boolean operators
"""
names = ["negation", "conjunction", "disjunction", "implication", "equivalence"]
for pair in zip(names, [Tokens.NOT, Tokens.AND, Tokens.OR, Tokens.IMP, Tokens.IFF]):
print("%-15s\t%s" % pair)
def equality_preds():
"""
Equality predicates
"""
names = ["equality", "inequality"]
for pair in zip(names, [Tokens.EQ, Tokens.NEQ]):
print("%-15s\t%s" % pair)
def binding_ops():
"""
Binding operators
"""
names = ["existential", "universal", "lambda"]
for pair in zip(names, [Tokens.EXISTS, Tokens.ALL, Tokens.LAMBDA]):
print("%-15s\t%s" % pair)
@python_2_unicode_compatible
class LogicParser(object):
"""A lambda calculus expression parser."""
def __init__(self, type_check=False):
"""
:param type_check: bool should type checking be performed?
to their types.
"""
assert isinstance(type_check, bool)
self._currentIndex = 0
self._buffer = []
self.type_check = type_check
"""A list of tuples of quote characters. The 4-tuple is comprised
of the start character, the end character, the escape character, and
a boolean indicating whether the quotes should be included in the
result. Quotes are used to signify that a token should be treated as
atomic, ignoring any special characters within the token. The escape
character allows the quote end character to be used within the quote.
If True, the boolean indicates that the final token should contain the
quote and escape characters.
This method exists to be overridden"""
self.quote_chars = []
self.operator_precedence = dict(
[(x,1) for x in Tokens.LAMBDA_LIST] + \
[(x,2) for x in Tokens.NOT_LIST] + \
[(APP,3)] + \
[(x,4) for x in Tokens.EQ_LIST+Tokens.NEQ_LIST] + \
[(x,5) for x in Tokens.QUANTS] + \
[(x,6) for x in Tokens.AND_LIST] + \
[(x,7) for x in Tokens.OR_LIST] + \
[(x,8) for x in Tokens.IMP_LIST] + \
[(x,9) for x in Tokens.IFF_LIST] + \
[(None,10)])
self.right_associated_operations = [APP]
def parse(self, data, signature=None):
"""
Parse the expression.
:param data: str for the input to be parsed
:param signature: ``dict<str, str>`` that maps variable names to type
strings
:returns: a parsed Expression
"""
data = data.rstrip()
self._currentIndex = 0
self._buffer, mapping = self.process(data)
try:
result = self.process_next_expression(None)
if self.inRange(0):
raise UnexpectedTokenException(self._currentIndex+1, self.token(0))
except LogicalExpressionException as e:
msg = '%s\n%s\n%s^' % (e, data, ' '*mapping[e.index-1])
raise LogicalExpressionException(None, msg)
if self.type_check:
result.typecheck(signature)
return result
def process(self, data):
"""Split the data into tokens"""
out = []
mapping = {}
tokenTrie = Trie(self.get_all_symbols())
token = ''
data_idx = 0
token_start_idx = data_idx
while data_idx < len(data):
cur_data_idx = data_idx
quoted_token, data_idx = self.process_quoted_token(data_idx, data)
if quoted_token:
if not token:
token_start_idx = cur_data_idx
token += quoted_token
continue
st = tokenTrie
c = data[data_idx]
symbol = ''
while c in st:
symbol += c
st = st[c]
if len(data)-data_idx > len(symbol):
c = data[data_idx+len(symbol)]
else:
break
if Trie.LEAF in st:
#token is a complete symbol
if token:
mapping[len(out)] = token_start_idx
out.append(token)
token = ''
mapping[len(out)] = data_idx
out.append(symbol)
data_idx += len(symbol)
else:
if data[data_idx] in ' \t\n': #any whitespace
if token:
mapping[len(out)] = token_start_idx
out.append(token)
token = ''
else:
if not token:
token_start_idx = data_idx
token += data[data_idx]
data_idx += 1
if token:
mapping[len(out)] = token_start_idx
out.append(token)
mapping[len(out)] = len(data)
mapping[len(out)+1] = len(data)+1
return out, mapping
def process_quoted_token(self, data_idx, data):
token = ''
c = data[data_idx]
i = data_idx
for start, end, escape, incl_quotes in self.quote_chars:
if c == start:
if incl_quotes:
token += c
i += 1
while data[i] != end:
if data[i] == escape:
if incl_quotes:
token += data[i]
i += 1
if len(data) == i: #if there are no more chars
raise LogicalExpressionException(None, "End of input reached. "
"Escape character [%s] found at end."
% escape)
token += data[i]
else:
token += data[i]
i += 1
if len(data) == i:
raise LogicalExpressionException(None, "End of input reached. "
"Expected: [%s]" % end)
if incl_quotes:
token += data[i]
i += 1
if not token:
raise LogicalExpressionException(None, 'Empty quoted token found')
break
return token, i
def get_all_symbols(self):
"""This method exists to be overridden"""
return Tokens.SYMBOLS
def inRange(self, location):
"""Return TRUE if the given location is within the buffer"""
return self._currentIndex+location < len(self._buffer)
def token(self, location=None):
"""Get the next waiting token. If a location is given, then
return the token at currentIndex+location without advancing
currentIndex; setting it gives lookahead/lookback capability."""
try:
if location is None:
tok = self._buffer[self._currentIndex]
self._currentIndex += 1
else:
tok = self._buffer[self._currentIndex+location]
return tok
except IndexError:
raise ExpectedMoreTokensException(self._currentIndex+1)
def isvariable(self, tok):
return tok not in Tokens.TOKENS
def process_next_expression(self, context):
"""Parse the next complete expression from the stream and return it."""
try:
tok = self.token()
except ExpectedMoreTokensException:
raise ExpectedMoreTokensException(self._currentIndex+1, message='Expression expected.')
accum = self.handle(tok, context)
if not accum:
raise UnexpectedTokenException(self._currentIndex, tok, message='Expression expected.')
return self.attempt_adjuncts(accum, context)
def handle(self, tok, context):
"""This method is intended to be overridden for logics that
use different operators or expressions"""
if self.isvariable(tok):
return self.handle_variable(tok, context)
elif tok in Tokens.NOT_LIST:
return self.handle_negation(tok, context)
elif tok in Tokens.LAMBDA_LIST:
return self.handle_lambda(tok, context)
elif tok in Tokens.QUANTS:
return self.handle_quant(tok, context)
elif tok == Tokens.OPEN:
return self.handle_open(tok, context)
def attempt_adjuncts(self, expression, context):
cur_idx = None
while cur_idx != self._currentIndex: #while adjuncts are added
cur_idx = self._currentIndex
expression = self.attempt_EqualityExpression(expression, context)
expression = self.attempt_ApplicationExpression(expression, context)
expression = self.attempt_BooleanExpression(expression, context)
return expression
def handle_negation(self, tok, context):
return self.make_NegatedExpression(self.process_next_expression(Tokens.NOT))
def make_NegatedExpression(self, expression):
return NegatedExpression(expression)
def handle_variable(self, tok, context):
#It's either: 1) a predicate expression: sees(x,y)
# 2) an application expression: P(x)
# 3) a solo variable: john OR x
accum = self.make_VariableExpression(tok)
if self.inRange(0) and self.token(0) == Tokens.OPEN:
#The predicate has arguments
if not isinstance(accum, FunctionVariableExpression) and \
not isinstance(accum, ConstantExpression):
raise LogicalExpressionException(self._currentIndex,
"'%s' is an illegal predicate name. "
"Individual variables may not be used as "
"predicates." % tok)
self.token() #swallow the Open Paren
#curry the arguments
accum = self.make_ApplicationExpression(accum, self.process_next_expression(APP))
while self.inRange(0) and self.token(0) == Tokens.COMMA:
self.token() #swallow the comma
accum = self.make_ApplicationExpression(accum, self.process_next_expression(APP))
self.assertNextToken(Tokens.CLOSE)
return accum
def get_next_token_variable(self, description):
try:
tok = self.token()
except ExpectedMoreTokensException as e:
raise ExpectedMoreTokensException(e.index, 'Variable expected.')
if isinstance(self.make_VariableExpression(tok), ConstantExpression):
raise LogicalExpressionException(self._currentIndex,
"'%s' is an illegal variable name. "
"Constants may not be %s." % (tok, description))
return Variable(tok)
def handle_lambda(self, tok, context):
# Expression is a lambda expression
if not self.inRange(0):
raise ExpectedMoreTokensException(self._currentIndex+2,
message="Variable and Expression expected following lambda operator.")
vars = [self.get_next_token_variable('abstracted')]
while True:
if not self.inRange(0) or (self.token(0) == Tokens.DOT and not self.inRange(1)):
raise ExpectedMoreTokensException(self._currentIndex+2, message="Expression expected.")
if not self.isvariable(self.token(0)):
break
# Support expressions like: \x y.M == \x.\y.M
vars.append(self.get_next_token_variable('abstracted'))
if self.inRange(0) and self.token(0) == Tokens.DOT:
self.token() #swallow the dot
accum = self.process_next_expression(tok)
while vars:
accum = self.make_LambdaExpression(vars.pop(), accum)
return accum
def handle_quant(self, tok, context):
# Expression is a quantified expression: some x.M
factory = self.get_QuantifiedExpression_factory(tok)
if not self.inRange(0):
raise ExpectedMoreTokensException(self._currentIndex+2,
message="Variable and Expression expected following quantifier '%s'." % tok)
vars = [self.get_next_token_variable('quantified')]
while True:
if not self.inRange(0) or (self.token(0) == Tokens.DOT and not self.inRange(1)):
raise ExpectedMoreTokensException(self._currentIndex+2, message="Expression expected.")
if not self.isvariable(self.token(0)):
break
# Support expressions like: some x y.M == some x.some y.M
vars.append(self.get_next_token_variable('quantified'))
if self.inRange(0) and self.token(0) == Tokens.DOT:
self.token() #swallow the dot
accum = self.process_next_expression(tok)
while vars:
accum = self.make_QuanifiedExpression(factory, vars.pop(), accum)
return accum
def get_QuantifiedExpression_factory(self, tok):
"""This method serves as a hook for other logic parsers that
have different quantifiers"""
if tok in Tokens.EXISTS_LIST:
return ExistsExpression
elif tok in Tokens.ALL_LIST:
return AllExpression
else:
self.assertToken(tok, Tokens.QUANTS)
def make_QuanifiedExpression(self, factory, variable, term):
return factory(variable, term)
def handle_open(self, tok, context):
#Expression is in parens
accum = self.process_next_expression(None)
self.assertNextToken(Tokens.CLOSE)
return accum
def attempt_EqualityExpression(self, expression, context):
"""Attempt to make an equality expression. If the next token is an
equality operator, then an EqualityExpression will be returned.
Otherwise, the parameter will be returned."""
if self.inRange(0):
tok = self.token(0)
if tok in Tokens.EQ_LIST + Tokens.NEQ_LIST and self.has_priority(tok, context):
self.token() #swallow the "=" or "!="
expression = self.make_EqualityExpression(expression, self.process_next_expression(tok))
if tok in Tokens.NEQ_LIST:
expression = self.make_NegatedExpression(expression)
return expression
def make_EqualityExpression(self, first, second):
"""This method serves as a hook for other logic parsers that
have different equality expression classes"""
return EqualityExpression(first, second)
def attempt_BooleanExpression(self, expression, context):
"""Attempt to make a boolean expression. If the next token is a boolean
operator, then a BooleanExpression will be returned. Otherwise, the
parameter will be returned."""
while self.inRange(0):
tok = self.token(0)
factory = self.get_BooleanExpression_factory(tok)
if factory and self.has_priority(tok, context):
self.token() #swallow the operator
expression = self.make_BooleanExpression(factory, expression,
self.process_next_expression(tok))
else:
break
return expression
def get_BooleanExpression_factory(self, tok):
"""This method serves as a hook for other logic parsers that
have different boolean operators"""
if tok in Tokens.AND_LIST:
return AndExpression
elif tok in Tokens.OR_LIST:
return OrExpression
elif tok in Tokens.IMP_LIST:
return ImpExpression
elif tok in Tokens.IFF_LIST:
return IffExpression
else:
return None
def make_BooleanExpression(self, factory, first, second):
return factory(first, second)
def attempt_ApplicationExpression(self, expression, context):
"""Attempt to make an application expression. The next tokens are
a list of arguments in parens, then the argument expression is a
function being applied to the arguments. Otherwise, return the
argument expression."""
if self.has_priority(APP, context):
if self.inRange(0) and self.token(0) == Tokens.OPEN:
if not isinstance(expression, LambdaExpression) and \
not isinstance(expression, ApplicationExpression) and \
not isinstance(expression, FunctionVariableExpression) and \
not isinstance(expression, ConstantExpression):
raise LogicalExpressionException(self._currentIndex,
("The function '%s" % expression) +
"' is not a Lambda Expression, an "
"Application Expression, or a "
"functional predicate, so it may "
"not take arguments.")
self.token() #swallow then open paren
#curry the arguments
accum = self.make_ApplicationExpression(expression, self.process_next_expression(APP))
while self.inRange(0) and self.token(0) == Tokens.COMMA:
self.token() #swallow the comma
accum = self.make_ApplicationExpression(accum, self.process_next_expression(APP))
self.assertNextToken(Tokens.CLOSE)
return accum
return expression
def make_ApplicationExpression(self, function, argument):
return ApplicationExpression(function, argument)
def make_VariableExpression(self, name):
return VariableExpression(Variable(name))
def make_LambdaExpression(self, variable, term):
return LambdaExpression(variable, term)
def has_priority(self, operation, context):
return self.operator_precedence[operation] < self.operator_precedence[context] or \
(operation in self.right_associated_operations and \
self.operator_precedence[operation] == self.operator_precedence[context])
def assertNextToken(self, expected):
try:
tok = self.token()
except ExpectedMoreTokensException as e:
raise ExpectedMoreTokensException(e.index, message="Expected token '%s'." % expected)
if isinstance(expected, list):
if tok not in expected:
raise UnexpectedTokenException(self._currentIndex, tok, expected)
else:
if tok != expected:
raise UnexpectedTokenException(self._currentIndex, tok, expected)
def assertToken(self, tok, expected):
if isinstance(expected, list):
if tok not in expected:
raise UnexpectedTokenException(self._currentIndex, tok, expected)
else:
if tok != expected:
raise UnexpectedTokenException(self._currentIndex, tok, expected)
def __repr__(self):
if self.inRange(0):
msg = 'Next token: ' + self.token(0)
else:
msg = 'No more tokens'
return '<' + self.__class__.__name__ + ': ' + msg + '>'
def read_logic(s, logic_parser=None, encoding=None):
"""
Convert a file of First Order Formulas into a list of {Expression}s.
:param s: the contents of the file
:type s: str
:param logic_parser: The parser to be used to parse the logical expression
:type logic_parser: LogicParser
:param encoding: the encoding of the input string, if it is binary
:type encoding: str
:return: a list of parsed formulas.
:rtype: list(Expression)
"""
if encoding is not None:
s = s.decode(encoding)
if logic_parser is None:
logic_parser = LogicParser()
statements = []
for linenum, line in enumerate(s.splitlines()):
line = line.strip()
if line.startswith('#') or line=='': continue
try:
statements.append(logic_parser.parse(line))
except LogicalExpressionException:
raise ValueError('Unable to parse line %s: %s' % (linenum, line))
return statements
@total_ordering
@python_2_unicode_compatible
class Variable(object):
def __init__(self, name):
"""
:param name: the name of the variable
"""
assert isinstance(name, string_types), "%s is not a string" % name
self.name = name
def __eq__(self, other):
return isinstance(other, Variable) and self.name == other.name
def __ne__(self, other):
return not self == other
def __lt__(self, other):
if not isinstance(other, Variable):
raise TypeError
return self.name < other.name
def substitute_bindings(self, bindings):
return bindings.get(self, self)
def __hash__(self):
return hash(self.name)
def __str__(self):
return self.name
def __repr__(self):
return "Variable('%s')" % self.name
def unique_variable(pattern=None, ignore=None):
"""
Return a new, unique variable.
:param pattern: ``Variable`` that is being replaced. The new variable must
be the same type.
:param term: a set of ``Variable`` objects that should not be returned from
this function.
:rtype: Variable
"""
if pattern is not None:
if is_indvar(pattern.name):
prefix = 'z'
elif is_funcvar(pattern.name):
prefix = 'F'
elif is_eventvar(pattern.name):
prefix = 'e0'
else:
assert False, "Cannot generate a unique constant"
else:
prefix = 'z'
v = Variable("%s%s" % (prefix, _counter.get()))
while ignore is not None and v in ignore:
v = Variable("%s%s" % (prefix, _counter.get()))
return v
def skolem_function(univ_scope=None):
"""
Return a skolem function over the variables in univ_scope
param univ_scope
"""
skolem = VariableExpression(Variable('F%s' % _counter.get()))
if univ_scope:
for v in list(univ_scope):
skolem = skolem(VariableExpression(v))
return skolem
@python_2_unicode_compatible
class Type(object):
def __repr__(self):
return "%s" % self
def __hash__(self):
return hash("%s" % self)
@classmethod
def fromstring(cls, s):
return read_type(s)
@python_2_unicode_compatible
class ComplexType(Type):
def __init__(self, first, second):
assert(isinstance(first, Type)), "%s is not a Type" % first
assert(isinstance(second, Type)), "%s is not a Type" % second
self.first = first
self.second = second
def __eq__(self, other):
return isinstance(other, ComplexType) and \
self.first == other.first and \
self.second == other.second
def __ne__(self, other):
return not self == other
__hash__ = Type.__hash__
def matches(self, other):
if isinstance(other, ComplexType):
return self.first.matches(other.first) and \
self.second.matches(other.second)
else:
return self == ANY_TYPE
def resolve(self, other):
if other == ANY_TYPE:
return self
elif isinstance(other, ComplexType):
f = self.first.resolve(other.first)
s = self.second.resolve(other.second)
if f and s:
return ComplexType(f,s)
else:
return None
elif self == ANY_TYPE:
return other
else:
return None
def __str__(self):
if self == ANY_TYPE:
return "%s" % ANY_TYPE
else:
return '<%s,%s>' % (self.first, self.second)
def str(self):
if self == ANY_TYPE:
return ANY_TYPE.str()
else:
return '(%s -> %s)' % (self.first.str(), self.second.str())
class BasicType(Type):
def __eq__(self, other):
return isinstance(other, BasicType) and ("%s" % self) == ("%s" % other)
def __ne__(self, other):
return not self == other
__hash__ = Type.__hash__
def matches(self, other):
return other == ANY_TYPE or self == other
def resolve(self, other):
if self.matches(other):
return self
else:
return None
@python_2_unicode_compatible
class EntityType(BasicType):
def __str__(self):
return 'e'
def str(self):
return 'IND'
@python_2_unicode_compatible
class TruthValueType(BasicType):
def __str__(self):
return 't'
def str(self):
return 'BOOL'
@python_2_unicode_compatible
class EventType(BasicType):
def __str__(self):
return 'v'
def str(self):
return 'EVENT'
@python_2_unicode_compatible
class AnyType(BasicType, ComplexType):
def __init__(self):
pass
@property
def first(self): return self
@property
def second(self): return self
def __eq__(self, other):
return isinstance(other, AnyType) or other.__eq__(self)
def __ne__(self, other):
return not self == other
__hash__ = Type.__hash__
def matches(self, other):
return True
def resolve(self, other):
return other
def __str__(self):
return '?'
def str(self):
return 'ANY'
TRUTH_TYPE = TruthValueType()
ENTITY_TYPE = EntityType()
EVENT_TYPE = EventType()
ANY_TYPE = AnyType()
def read_type(type_string):
assert isinstance(type_string, string_types)
type_string = type_string.replace(' ', '') #remove spaces
if type_string[0] == '<':
assert type_string[-1] == '>'
paren_count = 0
for i,char in enumerate(type_string):
if char == '<':
paren_count += 1
elif char == '>':
paren_count -= 1
assert paren_count > 0
elif char == ',':
if paren_count == 1:
break
return ComplexType(read_type(type_string[1 :i ]),
read_type(type_string[i+1:-1]))
elif type_string[0] == "%s" % ENTITY_TYPE:
return ENTITY_TYPE
elif type_string[0] == "%s" % TRUTH_TYPE:
return TRUTH_TYPE
elif type_string[0] == "%s" % ANY_TYPE:
return ANY_TYPE
else:
raise LogicalExpressionException("Unexpected character: '%s'." % type_string[0])
class TypeException(Exception):
def __init__(self, msg):
Exception.__init__(self, msg)
class InconsistentTypeHierarchyException(TypeException):
def __init__(self, variable, expression=None):
if expression:
msg = "The variable '%s' was found in multiple places with different"\
" types in '%s'." % (variable, expression)
else:
msg = "The variable '%s' was found in multiple places with different"\
" types." % (variable)
Exception.__init__(self, msg)
class TypeResolutionException(TypeException):
def __init__(self, expression, other_type):
Exception.__init__(self, "The type of '%s', '%s', cannot be "
"resolved with type '%s'" % \
(expression, expression.type, other_type))
class IllegalTypeException(TypeException):
def __init__(self, expression, other_type, allowed_type):
Exception.__init__(self, "Cannot set type of %s '%s' to '%s'; "
"must match type '%s'." %
(expression.__class__.__name__, expression,
other_type, allowed_type))
def typecheck(expressions, signature=None):
"""
Ensure correct typing across a collection of ``Expression`` objects.
:param expressions: a collection of expressions
:param signature: dict that maps variable names to types (or string
representations of types)
"""
#typecheck and create master signature
for expression in expressions:
signature = expression.typecheck(signature)
#apply master signature to all expressions
for expression in expressions[:-1]:
expression.typecheck(signature)
return signature
class SubstituteBindingsI(object):
"""
An interface for classes that can perform substitutions for
variables.
"""
def substitute_bindings(self, bindings):
"""
:return: The object that is obtained by replacing
each variable bound by ``bindings`` with its values.
Aliases are already resolved. (maybe?)
:rtype: (any)
"""
raise NotImplementedError()
def variables(self):
"""
:return: A list of all variables in this object.
"""
raise NotImplementedError()
@python_2_unicode_compatible
class Expression(SubstituteBindingsI):
"""This is the base abstract object for all logical expressions"""
_logic_parser = LogicParser()
_type_checking_logic_parser = LogicParser(type_check=True)
@classmethod
def fromstring(cls, s, type_check=False, signature=None):
if type_check:
return cls._type_checking_logic_parser.parse(s, signature)
else:
return cls._logic_parser.parse(s, signature)
def __call__(self, other, *additional):
accum = self.applyto(other)
for a in additional:
accum = accum(a)
return accum
def applyto(self, other):
assert isinstance(other, Expression), "%s is not an Expression" % other
return ApplicationExpression(self, other)
def __neg__(self):
return NegatedExpression(self)
def negate(self):
"""If this is a negated expression, remove the negation.
Otherwise add a negation."""
return -self
def __and__(self, other):
if not isinstance(other, Expression):
raise TypeError("%s is not an Expression" % other)
return AndExpression(self, other)
def __or__(self, other):
if not isinstance(other, Expression):
raise TypeError("%s is not an Expression" % other)
return OrExpression(self, other)
def __gt__(self, other):
if not isinstance(other, Expression):
raise TypeError("%s is not an Expression" % other)
return ImpExpression(self, other)
def __lt__(self, other):
if not isinstance(other, Expression):
raise TypeError("%s is not an Expression" % other)
return IffExpression(self, other)
def __eq__(self, other):
raise NotImplementedError()
def __ne__(self, other):
return not self == other
def equiv(self, other, prover=None):
"""
Check for logical equivalence.
Pass the expression (self <-> other) to the theorem prover.
If the prover says it is valid, then the self and other are equal.
:param other: an ``Expression`` to check equality against
:param prover: a ``nltk.inference.api.Prover``
"""
assert isinstance(other, Expression), "%s is not an Expression" % other
if prover is None:
from nltk.inference import Prover9
prover = Prover9()
bicond = IffExpression(self.simplify(), other.simplify())
return prover.prove(bicond)
def __hash__(self):
return hash(repr(self))
def substitute_bindings(self, bindings):
expr = self
for var in expr.variables():
if var in bindings:
val = bindings[var]
if isinstance(val, Variable):
val = self.make_VariableExpression(val)
elif not isinstance(val, Expression):
raise ValueError('Can not substitute a non-expression '
'value into an expression: %r' % (val,))
# Substitute bindings in the target value.
val = val.substitute_bindings(bindings)
# Replace var w/ the target value.
expr = expr.replace(var, val)
return expr.simplify()
def typecheck(self, signature=None):
"""
Infer and check types. Raise exceptions if necessary.
:param signature: dict that maps variable names to types (or string
representations of types)
:return: the signature, plus any additional type mappings
"""
sig = defaultdict(list)
if signature:
for key in signature:
val = signature[key]
varEx = VariableExpression(Variable(key))
if isinstance(val, Type):
varEx.type = val
else:
varEx.type = read_type(val)
sig[key].append(varEx)
self._set_type(signature=sig)
return dict((key, sig[key][0].type) for key in sig)
def findtype(self, variable):
"""
Find the type of the given variable as it is used in this expression.
For example, finding the type of "P" in "P(x) & Q(x,y)" yields "<e,t>"
:param variable: Variable
"""
raise NotImplementedError()
def _set_type(self, other_type=ANY_TYPE, signature=None):
"""
Set the type of this expression to be the given type. Raise type
exceptions where applicable.
:param other_type: Type
:param signature: dict(str -> list(AbstractVariableExpression))
"""
raise NotImplementedError()
def replace(self, variable, expression, replace_bound=False, alpha_convert=True):
"""
Replace every instance of 'variable' with 'expression'
:param variable: ``Variable`` The variable to replace
:param expression: ``Expression`` The expression with which to replace it
:param replace_bound: bool Should bound variables be replaced?
:param alpha_convert: bool Alpha convert automatically to avoid name clashes?
"""
assert isinstance(variable, Variable), "%s is not a Variable" % variable
assert isinstance(expression, Expression), "%s is not an Expression" % expression
return self.visit_structured(lambda e: e.replace(variable, expression,
replace_bound, alpha_convert),
self.__class__)
def normalize(self, newvars=None):
"""Rename auto-generated unique variables"""
def get_indiv_vars(e):
if isinstance(e, IndividualVariableExpression):
return set([e])
elif isinstance(e, AbstractVariableExpression):
return set()
else:
return e.visit(get_indiv_vars,
lambda parts: reduce(operator.or_, parts, set()))
result = self
for i,e in enumerate(sorted(get_indiv_vars(self), key=lambda e: e.variable)):
if isinstance(e,EventVariableExpression):
newVar = e.__class__(Variable('e0%s' % (i+1)))
elif isinstance(e,IndividualVariableExpression):
newVar = e.__class__(Variable('z%s' % (i+1)))
else:
newVar = e
result = result.replace(e.variable, newVar, True)
return result
def visit(self, function, combinator):
"""
Recursively visit subexpressions. Apply 'function' to each
subexpression and pass the result of each function application
to the 'combinator' for aggregation:
return combinator(map(function, self.subexpressions))
Bound variables are neither applied upon by the function nor given to
the combinator.
:param function: ``Function<Expression,T>`` to call on each subexpression
:param combinator: ``Function<list<T>,R>`` to combine the results of the
function calls
:return: result of combination ``R``
"""
raise NotImplementedError()
def visit_structured(self, function, combinator):
"""
Recursively visit subexpressions. Apply 'function' to each
subexpression and pass the result of each function application
to the 'combinator' for aggregation. The combinator must have
the same signature as the constructor. The function is not
applied to bound variables, but they are passed to the
combinator.
:param function: ``Function`` to call on each subexpression
:param combinator: ``Function`` with the same signature as the
constructor, to combine the results of the function calls
:return: result of combination
"""
return self.visit(function, lambda parts: combinator(*parts))
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, self)
def __str__(self):
return self.str()
def variables(self):
"""
Return a set of all the variables for binding substitution.
The variables returned include all free (non-bound) individual
variables and any variable starting with '?' or '@'.
:return: set of ``Variable`` objects
"""
return self.free() | set(p for p in self.predicates()|self.constants()
if re.match('^[?@]', p.name))
def free(self):
"""
Return a set of all the free (non-bound) variables. This includes
both individual and predicate variables, but not constants.
:return: set of ``Variable`` objects
"""
return self.visit(lambda e: e.free(),
lambda parts: reduce(operator.or_, parts, set()))
def constants(self):
"""
Return a set of individual constants (non-predicates).
:return: set of ``Variable`` objects
"""
return self.visit(lambda e: e.constants(),
lambda parts: reduce(operator.or_, parts, set()))
def predicates(self):
"""
Return a set of predicates (constants, not variables).
:return: set of ``Variable`` objects
"""
return self.visit(lambda e: e.predicates(),
lambda parts: reduce(operator.or_, parts, set()))
def simplify(self):
"""
:return: beta-converted version of this expression
"""
return self.visit_structured(lambda e: e.simplify(), self.__class__)
def make_VariableExpression(self, variable):
return VariableExpression(variable)
@python_2_unicode_compatible
class ApplicationExpression(Expression):
r"""
This class is used to represent two related types of logical expressions.
The first is a Predicate Expression, such as "P(x,y)". A predicate
expression is comprised of a ``FunctionVariableExpression`` or
``ConstantExpression`` as the predicate and a list of Expressions as the
arguments.
The second is a an application of one expression to another, such as
"(\x.dog(x))(fido)".
The reason Predicate Expressions are treated as Application Expressions is
that the Variable Expression predicate of the expression may be replaced
with another Expression, such as a LambdaExpression, which would mean that
the Predicate should be thought of as being applied to the arguments.
The logical expression reader will always curry arguments in a application expression.
So, "\x y.see(x,y)(john,mary)" will be represented internally as
"((\x y.(see(x))(y))(john))(mary)". This simplifies the internals since
there will always be exactly one argument in an application.
The str() method will usually print the curried forms of application
expressions. The one exception is when the the application expression is
really a predicate expression (ie, underlying function is an
``AbstractVariableExpression``). This means that the example from above
will be returned as "(\x y.see(x,y)(john))(mary)".
"""
def __init__(self, function, argument):
"""
:param function: ``Expression``, for the function expression
:param argument: ``Expression``, for the argument
"""
assert isinstance(function, Expression), "%s is not an Expression" % function
assert isinstance(argument, Expression), "%s is not an Expression" % argument
self.function = function
self.argument = argument
def simplify(self):
function = self.function.simplify()
argument = self.argument.simplify()
if isinstance(function, LambdaExpression):
return function.term.replace(function.variable, argument).simplify()
else:
return self.__class__(function, argument)
@property
def type(self):
if isinstance(self.function.type, ComplexType):
return self.function.type.second
else:
return ANY_TYPE
def _set_type(self, other_type=ANY_TYPE, signature=None):
""":see Expression._set_type()"""
assert isinstance(other_type, Type)
if signature is None:
signature = defaultdict(list)
self.argument._set_type(ANY_TYPE, signature)
try:
self.function._set_type(ComplexType(self.argument.type, other_type), signature)
except TypeResolutionException:
raise TypeException(
"The function '%s' is of type '%s' and cannot be applied "
"to '%s' of type '%s'. Its argument must match type '%s'."
% (self.function, self.function.type, self.argument,
self.argument.type, self.function.type.first))
def findtype(self, variable):
""":see Expression.findtype()"""
assert isinstance(variable, Variable), "%s is not a Variable" % variable
if self.is_atom():
function, args = self.uncurry()
else:
#It's not a predicate expression ("P(x,y)"), so leave args curried
function = self.function
args = [self.argument]
found = [arg.findtype(variable) for arg in [function]+args]
unique = []
for f in found:
if f != ANY_TYPE:
if unique:
for u in unique:
if f.matches(u):
break
else:
unique.append(f)
if len(unique) == 1:
return list(unique)[0]
else:
return ANY_TYPE
def constants(self):
""":see: Expression.constants()"""
if isinstance(self.function, AbstractVariableExpression):
function_constants = set()
else:
function_constants = self.function.constants()
return function_constants | self.argument.constants()
def predicates(self):
""":see: Expression.predicates()"""
if isinstance(self.function, ConstantExpression):
function_preds = set([self.function.variable])
else:
function_preds = self.function.predicates()
return function_preds | self.argument.predicates()
def visit(self, function, combinator):
""":see: Expression.visit()"""
return combinator([function(self.function), function(self.argument)])
def __eq__(self, other):
return isinstance(other, ApplicationExpression) and \
self.function == other.function and \
self.argument == other.argument
def __ne__(self, other):
return not self == other
__hash__ = Expression.__hash__
def __str__(self):
# uncurry the arguments and find the base function
if self.is_atom():
function, args = self.uncurry()
arg_str = ','.join("%s" % arg for arg in args)
else:
#Leave arguments curried
function = self.function
arg_str = "%s" % self.argument
function_str = "%s" % function
parenthesize_function = False
if isinstance(function, LambdaExpression):
if isinstance(function.term, ApplicationExpression):
if not isinstance(function.term.function,
AbstractVariableExpression):
parenthesize_function = True
elif not isinstance(function.term, BooleanExpression):
parenthesize_function = True
elif isinstance(function, ApplicationExpression):
parenthesize_function = True
if parenthesize_function:
function_str = Tokens.OPEN + function_str + Tokens.CLOSE
return function_str + Tokens.OPEN + arg_str + Tokens.CLOSE
def uncurry(self):
"""
Uncurry this application expression
return: A tuple (base-function, arg-list)
"""
function = self.function
args = [self.argument]
while isinstance(function, ApplicationExpression):
#(\x.\y.sees(x,y)(john))(mary)
args.insert(0, function.argument)
function = function.function
return (function, args)
@property
def pred(self):
"""
Return uncurried base-function.
If this is an atom, then the result will be a variable expression.
Otherwise, it will be a lambda expression.
"""
return self.uncurry()[0]
@property
def args(self):
"""
Return uncurried arg-list
"""
return self.uncurry()[1]
def is_atom(self):
"""
Is this expression an atom (as opposed to a lambda expression applied
to a term)?
"""
return isinstance(self.pred, AbstractVariableExpression)
@total_ordering
@python_2_unicode_compatible
class AbstractVariableExpression(Expression):
"""This class represents a variable to be used as a predicate or entity"""
def __init__(self, variable):
"""
:param variable: ``Variable``, for the variable
"""
assert isinstance(variable, Variable), "%s is not a Variable" % variable
self.variable = variable
def simplify(self):
return self
def replace(self, variable, expression, replace_bound=False, alpha_convert=True):
""":see: Expression.replace()"""
assert isinstance(variable, Variable), "%s is not an Variable" % variable
assert isinstance(expression, Expression), "%s is not an Expression" % expression
if self.variable == variable:
return expression
else:
return self
def _set_type(self, other_type=ANY_TYPE, signature=None):
""":see Expression._set_type()"""
assert isinstance(other_type, Type)
if signature is None:
signature = defaultdict(list)
resolution = other_type
for varEx in signature[self.variable.name]:
resolution = varEx.type.resolve(resolution)
if not resolution:
raise InconsistentTypeHierarchyException(self)
signature[self.variable.name].append(self)
for varEx in signature[self.variable.name]:
varEx.type = resolution
def findtype(self, variable):
""":see Expression.findtype()"""
assert isinstance(variable, Variable), "%s is not a Variable" % variable
if self.variable == variable:
return self.type
else:
return ANY_TYPE
def predicates(self):
""":see: Expression.predicates()"""
return set()
def __eq__(self, other):
"""Allow equality between instances of ``AbstractVariableExpression``
subtypes."""
return isinstance(other, AbstractVariableExpression) and \
self.variable == other.variable
def __ne__(self, other):
return not self == other
def __lt__(self, other):
if not isinstance(other, AbstractVariableExpression):
raise TypeError
return self.variable < other.variable
__hash__ = Expression.__hash__
def __str__(self):
return "%s" % self.variable
class IndividualVariableExpression(AbstractVariableExpression):
"""This class represents variables that take the form of a single lowercase
character (other than 'e') followed by zero or more digits."""
def _set_type(self, other_type=ANY_TYPE, signature=None):
""":see Expression._set_type()"""
assert isinstance(other_type, Type)
if signature is None:
signature = defaultdict(list)
if not other_type.matches(ENTITY_TYPE):
raise IllegalTypeException(self, other_type, ENTITY_TYPE)
signature[self.variable.name].append(self)
def _get_type(self): return ENTITY_TYPE
type = property(_get_type, _set_type)
def free(self):
""":see: Expression.free()"""
return set([self.variable])
def constants(self):
""":see: Expression.constants()"""
return set()
class FunctionVariableExpression(AbstractVariableExpression):
"""This class represents variables that take the form of a single uppercase
character followed by zero or more digits."""
type = ANY_TYPE
def free(self):
""":see: Expression.free()"""
return set([self.variable])
def constants(self):
""":see: Expression.constants()"""
return set()
class EventVariableExpression(IndividualVariableExpression):
"""This class represents variables that take the form of a single lowercase
'e' character followed by zero or more digits."""
type = EVENT_TYPE
class ConstantExpression(AbstractVariableExpression):
"""This class represents variables that do not take the form of a single
character followed by zero or more digits."""
type = ENTITY_TYPE
def _set_type(self, other_type=ANY_TYPE, signature=None):
""":see Expression._set_type()"""
assert isinstance(other_type, Type)
if signature is None:
signature = defaultdict(list)
if other_type == ANY_TYPE:
#entity type by default, for individuals
resolution = ENTITY_TYPE
else:
resolution = other_type
if self.type != ENTITY_TYPE:
resolution = resolution.resolve(self.type)
for varEx in signature[self.variable.name]:
resolution = varEx.type.resolve(resolution)
if not resolution:
raise InconsistentTypeHierarchyException(self)
signature[self.variable.name].append(self)
for varEx in signature[self.variable.name]:
varEx.type = resolution
def free(self):
""":see: Expression.free()"""
return set()
def constants(self):
""":see: Expression.constants()"""
return set([self.variable])
def VariableExpression(variable):
"""
This is a factory method that instantiates and returns a subtype of
``AbstractVariableExpression`` appropriate for the given variable.
"""
assert isinstance(variable, Variable), "%s is not a Variable" % variable
if is_indvar(variable.name):
return IndividualVariableExpression(variable)
elif is_funcvar(variable.name):
return FunctionVariableExpression(variable)
elif is_eventvar(variable.name):
return EventVariableExpression(variable)
else:
return ConstantExpression(variable)
class VariableBinderExpression(Expression):
"""This an abstract class for any Expression that binds a variable in an
Expression. This includes LambdaExpressions and Quantified Expressions"""
def __init__(self, variable, term):
"""
:param variable: ``Variable``, for the variable
:param term: ``Expression``, for the term
"""
assert isinstance(variable, Variable), "%s is not a Variable" % variable
assert isinstance(term, Expression), "%s is not an Expression" % term
self.variable = variable
self.term = term
def replace(self, variable, expression, replace_bound=False, alpha_convert=True):
""":see: Expression.replace()"""
assert isinstance(variable, Variable), "%s is not a Variable" % variable
assert isinstance(expression, Expression), "%s is not an Expression" % expression
#if the bound variable is the thing being replaced
if self.variable == variable:
if replace_bound:
assert isinstance(expression, AbstractVariableExpression),\
"%s is not a AbstractVariableExpression" % expression
return self.__class__(expression.variable,
self.term.replace(variable, expression, True, alpha_convert))
else:
return self
else:
# if the bound variable appears in the expression, then it must
# be alpha converted to avoid a conflict
if alpha_convert and self.variable in expression.free():
self = self.alpha_convert(unique_variable(pattern=self.variable))
#replace in the term
return self.__class__(self.variable,
self.term.replace(variable, expression, replace_bound, alpha_convert))
def alpha_convert(self, newvar):
"""Rename all occurrences of the variable introduced by this variable
binder in the expression to ``newvar``.
:param newvar: ``Variable``, for the new variable
"""
assert isinstance(newvar, Variable), "%s is not a Variable" % newvar
return self.__class__(newvar,
self.term.replace(self.variable,
VariableExpression(newvar),
True))
def free(self):
""":see: Expression.free()"""
return self.term.free() - set([self.variable])
def findtype(self, variable):
""":see Expression.findtype()"""
assert isinstance(variable, Variable), "%s is not a Variable" % variable
if variable == self.variable:
return ANY_TYPE
else:
return self.term.findtype(variable)
def visit(self, function, combinator):
""":see: Expression.visit()"""
return combinator([function(self.term)])
def visit_structured(self, function, combinator):
""":see: Expression.visit_structured()"""
return combinator(self.variable, function(self.term))
def __eq__(self, other):
r"""Defines equality modulo alphabetic variance. If we are comparing
\x.M and \y.N, then check equality of M and N[x/y]."""
if isinstance(self, other.__class__) or \
isinstance(other, self.__class__):
if self.variable == other.variable:
return self.term == other.term
else:
# Comparing \x.M and \y.N. Relabel y in N with x and continue.
varex = VariableExpression(self.variable)
return self.term == other.term.replace(other.variable, varex)
else:
return False
def __ne__(self, other):
return not self == other
__hash__ = Expression.__hash__
@python_2_unicode_compatible
class LambdaExpression(VariableBinderExpression):
@property
def type(self):
return ComplexType(self.term.findtype(self.variable),
self.term.type)
def _set_type(self, other_type=ANY_TYPE, signature=None):
""":see Expression._set_type()"""
assert isinstance(other_type, Type)
if signature is None:
signature = defaultdict(list)
self.term._set_type(other_type.second, signature)
if not self.type.resolve(other_type):
raise TypeResolutionException(self, other_type)
def __str__(self):
variables = [self.variable]
term = self.term
while term.__class__ == self.__class__:
variables.append(term.variable)
term = term.term
return Tokens.LAMBDA + ' '.join("%s" % v for v in variables) + \
Tokens.DOT + "%s" % term
@python_2_unicode_compatible
class QuantifiedExpression(VariableBinderExpression):
@property
def type(self): return TRUTH_TYPE
def _set_type(self, other_type=ANY_TYPE, signature=None):
""":see Expression._set_type()"""
assert isinstance(other_type, Type)
if signature is None:
signature = defaultdict(list)
if not other_type.matches(TRUTH_TYPE):
raise IllegalTypeException(self, other_type, TRUTH_TYPE)
self.term._set_type(TRUTH_TYPE, signature)
def __str__(self):
variables = [self.variable]
term = self.term
while term.__class__ == self.__class__:
variables.append(term.variable)
term = term.term
return self.getQuantifier() + ' ' + ' '.join("%s" % v for v in variables) + \
Tokens.DOT + "%s" % term
class ExistsExpression(QuantifiedExpression):
def getQuantifier(self):
return Tokens.EXISTS
class AllExpression(QuantifiedExpression):
def getQuantifier(self):
return Tokens.ALL
@python_2_unicode_compatible
class NegatedExpression(Expression):
def __init__(self, term):
assert isinstance(term, Expression), "%s is not an Expression" % term
self.term = term
@property
def type(self): return TRUTH_TYPE
def _set_type(self, other_type=ANY_TYPE, signature=None):
""":see Expression._set_type()"""
assert isinstance(other_type, Type)
if signature is None:
signature = defaultdict(list)
if not other_type.matches(TRUTH_TYPE):
raise IllegalTypeException(self, other_type, TRUTH_TYPE)
self.term._set_type(TRUTH_TYPE, signature)
def findtype(self, variable):
assert isinstance(variable, Variable), "%s is not a Variable" % variable
return self.term.findtype(variable)
def visit(self, function, combinator):
""":see: Expression.visit()"""
return combinator([function(self.term)])
def negate(self):
""":see: Expression.negate()"""
return self.term
def __eq__(self, other):
return isinstance(other, NegatedExpression) and self.term == other.term
def __ne__(self, other):
return not self == other
__hash__ = Expression.__hash__
def __str__(self):
return Tokens.NOT + "%s" % self.term
@python_2_unicode_compatible
class BinaryExpression(Expression):
def __init__(self, first, second):
assert isinstance(first, Expression), "%s is not an Expression" % first
assert isinstance(second, Expression), "%s is not an Expression" % second
self.first = first
self.second = second
@property
def type(self): return TRUTH_TYPE
def findtype(self, variable):
""":see Expression.findtype()"""
assert isinstance(variable, Variable), "%s is not a Variable" % variable
f = self.first.findtype(variable)
s = self.second.findtype(variable)
if f == s or s == ANY_TYPE:
return f
elif f == ANY_TYPE:
return s
else:
return ANY_TYPE
def visit(self, function, combinator):
""":see: Expression.visit()"""
return combinator([function(self.first), function(self.second)])
def __eq__(self, other):
return (isinstance(self, other.__class__) or \
isinstance(other, self.__class__)) and \
self.first == other.first and self.second == other.second
def __ne__(self, other):
return not self == other
__hash__ = Expression.__hash__
def __str__(self):
first = self._str_subex(self.first)
second = self._str_subex(self.second)
return Tokens.OPEN + first + ' ' + self.getOp() \
+ ' ' + second + Tokens.CLOSE
def _str_subex(self, subex):
return "%s" % subex
class BooleanExpression(BinaryExpression):
def _set_type(self, other_type=ANY_TYPE, signature=None):
""":see Expression._set_type()"""
assert isinstance(other_type, Type)
if signature is None:
signature = defaultdict(list)
if not other_type.matches(TRUTH_TYPE):
raise IllegalTypeException(self, other_type, TRUTH_TYPE)
self.first._set_type(TRUTH_TYPE, signature)
self.second._set_type(TRUTH_TYPE, signature)
class AndExpression(BooleanExpression):
"""This class represents conjunctions"""
def getOp(self):
return Tokens.AND
def _str_subex(self, subex):
s = "%s" % subex
if isinstance(subex, AndExpression):
return s[1:-1]
return s
class OrExpression(BooleanExpression):
"""This class represents disjunctions"""
def getOp(self):
return Tokens.OR
def _str_subex(self, subex):
s = "%s" % subex
if isinstance(subex, OrExpression):
return s[1:-1]
return s
class ImpExpression(BooleanExpression):
"""This class represents implications"""
def getOp(self):
return Tokens.IMP
class IffExpression(BooleanExpression):
"""This class represents biconditionals"""
def getOp(self):
return Tokens.IFF
class EqualityExpression(BinaryExpression):
"""This class represents equality expressions like "(x = y)"."""
def _set_type(self, other_type=ANY_TYPE, signature=None):
""":see Expression._set_type()"""
assert isinstance(other_type, Type)
if signature is None:
signature = defaultdict(list)
if not other_type.matches(TRUTH_TYPE):
raise IllegalTypeException(self, other_type, TRUTH_TYPE)
self.first._set_type(ENTITY_TYPE, signature)
self.second._set_type(ENTITY_TYPE, signature)
def getOp(self):
return Tokens.EQ
### Utilities
class LogicalExpressionException(Exception):
def __init__(self, index, message):
self.index = index
Exception.__init__(self, message)
class UnexpectedTokenException(LogicalExpressionException):
def __init__(self, index, unexpected=None, expected=None, message=None):
if unexpected and expected:
msg = "Unexpected token: '%s'. " \
"Expected token '%s'." % (unexpected, expected)
elif unexpected:
msg = "Unexpected token: '%s'." % unexpected
if message:
msg += ' '+message
else:
msg = "Expected token '%s'." % expected
LogicalExpressionException.__init__(self, index, msg)
class ExpectedMoreTokensException(LogicalExpressionException):
def __init__(self, index, message=None):
if not message:
message = 'More tokens expected.'
LogicalExpressionException.__init__(self, index, 'End of input found. ' + message)
def is_indvar(expr):
"""
An individual variable must be a single lowercase character other than 'e',
followed by zero or more digits.
:param expr: str
:return: bool True if expr is of the correct form
"""
assert isinstance(expr, string_types), "%s is not a string" % expr
return re.match(r'^[a-df-z]\d*$', expr) is not None
def is_funcvar(expr):
"""
A function variable must be a single uppercase character followed by
zero or more digits.
:param expr: str
:return: bool True if expr is of the correct form
"""
assert isinstance(expr, string_types), "%s is not a string" % expr
return re.match(r'^[A-Z]\d*$', expr) is not None
def is_eventvar(expr):
"""
An event variable must be a single lowercase 'e' character followed by
zero or more digits.
:param expr: str
:return: bool True if expr is of the correct form
"""
assert isinstance(expr, string_types), "%s is not a string" % expr
return re.match(r'^e\d*$', expr) is not None
def demo():
lexpr = Expression.fromstring
print('='*20 + 'Test reader' + '='*20)
print(lexpr(r'john'))
print(lexpr(r'man(x)'))
print(lexpr(r'-man(x)'))
print(lexpr(r'(man(x) & tall(x) & walks(x))'))
print(lexpr(r'exists x.(man(x) & tall(x) & walks(x))'))
print(lexpr(r'\x.man(x)'))
print(lexpr(r'\x.man(x)(john)'))
print(lexpr(r'\x y.sees(x,y)'))
print(lexpr(r'\x y.sees(x,y)(a,b)'))
print(lexpr(r'(\x.exists y.walks(x,y))(x)'))
print(lexpr(r'exists x.x = y'))
print(lexpr(r'exists x.(x = y)'))
print(lexpr('P(x) & x=y & P(y)'))
print(lexpr(r'\P Q.exists x.(P(x) & Q(x))'))
print(lexpr(r'man(x) <-> tall(x)'))
print('='*20 + 'Test simplify' + '='*20)
print(lexpr(r'\x.\y.sees(x,y)(john)(mary)').simplify())
print(lexpr(r'\x.\y.sees(x,y)(john, mary)').simplify())
print(lexpr(r'all x.(man(x) & (\x.exists y.walks(x,y))(x))').simplify())
print(lexpr(r'(\P.\Q.exists x.(P(x) & Q(x)))(\x.dog(x))(\x.bark(x))').simplify())
print('='*20 + 'Test alpha conversion and binder expression equality' + '='*20)
e1 = lexpr('exists x.P(x)')
print(e1)
e2 = e1.alpha_convert(Variable('z'))
print(e2)
print(e1 == e2)
def demo_errors():
print('='*20 + 'Test reader errors' + '='*20)
demoException('(P(x) & Q(x)')
demoException('((P(x) &) & Q(x))')
demoException('P(x) -> ')
demoException('P(x')
demoException('P(x,')
demoException('P(x,)')
demoException('exists')
demoException('exists x.')
demoException('\\')
demoException('\\ x y.')
demoException('P(x)Q(x)')
demoException('(P(x)Q(x)')
demoException('exists x -> y')
def demoException(s):
try:
Expression.fromstring(s)
except LogicalExpressionException as e:
print("%s: %s" % (e.__class__.__name__, e))
def printtype(ex):
print("%s : %s" % (ex.str(), ex.type))
if __name__ == '__main__':
demo()
# demo_errors()
| {
"content_hash": "6b31fcd9875401c784b2a3bdc329c620",
"timestamp": "",
"source": "github",
"line_count": 1902,
"max_line_length": 122,
"avg_line_length": 35.650893796004205,
"alnum_prop": 0.5841198678621992,
"repo_name": "Edu-Glez/Bank_sentiment_analysis",
"id": "2e38bd1e4ba53c6e5b04525dbcb0d078e9a0043c",
"size": "68000",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "env/lib/python3.6/site-packages/nltk/sem/logic.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Lex",
"bytes": "101463"
},
{
"name": "Python",
"bytes": "29876"
},
{
"name": "Shell",
"bytes": "1509"
}
],
"symlink_target": ""
} |
import wx
import armid
from BasePanel import BasePanel
import InternalDocument
from InternalDocumentNotebook import InternalDocumentNotebook
class InternalDocumentPanel(BasePanel):
def __init__(self,parent):
BasePanel.__init__(self,parent,armid.EXTERNALDOCUMENT_ID)
self.theId = None
def buildControls(self,isCreate,isUpdateable=True):
mainSizer = wx.BoxSizer(wx.VERTICAL)
nb = InternalDocumentNotebook(self)
mainSizer.Add(nb,1,wx.EXPAND)
mainSizer.Add(self.buildCommitButtonSizer(armid.INTERNALDOCUMENT_BUTTONCOMMIT_ID,isCreate),0,wx.CENTER)
self.SetSizer(mainSizer)
def loadControls(self,objt,isReadOnly=False):
self.theId = objt.id()
nameCtrl = self.FindWindowById(armid.INTERNALDOCUMENT_TEXTNAME_ID)
descCtrl = self.FindWindowById(armid.INTERNALDOCUMENT_TEXTDESCRIPTION_ID)
contCtrl = self.FindWindowById(armid.INTERNALDOCUMENT_TEXTCONTENT_ID)
nameCtrl.SetValue(objt.name())
descCtrl.SetValue(objt.description())
contCtrl.SetValue(objt.content())
contCtrl.setCodes(objt.codes())
contCtrl.setMemos(objt.memos())
| {
"content_hash": "6cb799fcd6d8285c4c42c70ea8a67e39",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 107,
"avg_line_length": 36.46666666666667,
"alnum_prop": 0.7687385740402194,
"repo_name": "RobinQuetin/CAIRIS-web",
"id": "86e54a44064ec77a118a924e68dce8c552272610",
"size": "1893",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "cairis/cairis/InternalDocumentPanel.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "11265"
},
{
"name": "Mako",
"bytes": "13226"
},
{
"name": "Python",
"bytes": "3313365"
},
{
"name": "Shell",
"bytes": "19461"
},
{
"name": "XSLT",
"bytes": "35522"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import time
import math
import mpu6050
import hmc5883l
import bmp085
import thread
from gpsdData import *
global stop
def fileOpen(fileToWrite,bufferring = 100):
if fileToWrite!=None:
try:
file1 = open(fileToWrite,'w',bufferring)
except:
print('file open failed')
return None
else:
return file1
return None
def writeThread(file1,num):
global stop
try:
if num == 1:
global mpuDatas
while stop==False:
lens = len(mpuDatas)
print ('mpuDatas len is ',lens)
if lens !=0:
for i in range(0,lens-1):
data = mpuDatas.pop(0)
print(data['q']['w'],
data['q']['x'],
data['q']['y'],
data['q']['z'],
data['ypr']['yaw'] * 180 / math.pi,
data['ypr']['pitch'] * 180 / math.pi,
data['ypr']['roll'] * 180 / math.pi,
data['a_X'],
data['a_Y'],
data['a_Z'],
data['r_X'],
data['r_Y'],
data['r_Z'],
data['time'],
file = file1)
time.sleep(1)
elif num == 2:
global magDatas
while stop==False:
lens = len(magDatas)
print ('magDatas len is ',lens)
if lens !=0:
for i in range(0,lens-1):
data = magDatas.pop(0)
print(
data['x'],
data['y'],
data['z'],
data['time'],
file = file1)
time.sleep(1)
elif num == 3:
global bmpDatas
while stop==False:
lens = len(bmpDatas)
print ('bmpDatas len is ',lens)
if lens !=0:
for i in range(0,lens-1):
data = bmpDatas.pop(0)
print(
data['temp'],
data['pre'],
data['alt'],
data['time'],
file = file1)
time.sleep(1)
elif num == 4:
global gpsDatas
while stop==False:
lens = len(gpsDatas)
print ('gpsDatas len is ',lens)
if lens !=0:
for i in range(0,lens-1):
data = gpsDatas.pop(0)
print(
data['lat'],
data['lon'],
data['alt'],
data['timeUtc'],
data['time'],
file = file1)
time.sleep(1)
except (KeyboardInterrupt, SystemExit): #when you press ctrl+c
stop = True
print('stop')
def mpu6050Thread(fileToWrite=None):
# Sensors initialization global mpuData
global mpuDatas
global stop
mpuDatas = list()
if fileToWrite != None:
file1 = fileOpen(fileToWrite)
mpu = mpu6050.MPU6050()
mpu.setDMPEnabled(True)
mpu.resetFIFO()
packetSize = mpu.dmpGetFIFOPacketSize()
if file1 != None:
thread.start_new_thread(writeThread,(file1,1))
try:
while stop==False:
# Get INT_STATUS byte
mpuIntStatus = mpu.getIntStatus()
if mpuIntStatus >= 2: # check for DMP data ready interrupt (this should happen frequently)
# get current FIFO count
fifoCount = mpu.getFIFOCount()
# check for overflow (this should never happen unless our code is too inefficient)
if fifoCount == 1024:
# reset so we can continue cleanly
mpu.resetFIFO()
print('FIFO overflow!')
# wait for correct available data length, should be a VERY short wait
fifoCount = mpu.getFIFOCount()
while fifoCount < packetSize:
fifoCount = mpu.getFIFOCount()
result = mpu.getFIFOBytes(packetSize)
q = mpu.dmpGetQuaternion(result)
g = mpu.dmpGetGravity(q)
ypr = mpu.dmpGetYawPitchRoll(q, g)
a_X = mpu.getAccelerationX()
a_Y = mpu.getAccelerationY()
a_Z = mpu.getAccelerationZ()
r_X = mpu.getRotationX()
r_Y = mpu.getRotationY()
r_Z = mpu.getRotationZ()
#print('raw data of A:\n')
#print(a_X,a_Y,a_Z)
#print('raw data of R:\n')
#print(r_X,r_Y,r_Z)
mpuData={'q':q,
'ypr':ypr,
'a_X':a_X,
'a_Y':a_Y,
'a_Z':a_Z,
'r_X':r_X,
'r_Y':r_Y,
'r_Z':r_Z,
'time':float(time.time())
}
if file1 != None:
mpuDatas.append(mpuData)
# track FIFO count here in case there is > 1 packet available
# (this lets us immediately read more without waiting for an interrupt)
fifoCount -= packetSize
except (KeyboardInterrupt, SystemExit): #when you press ctrl+c
stop = True
print('stop')
def hm5883lThread(fileToWrite=None):
global magData
global magDatas
global stop
magDatas = list()
file1 = fileOpen(fileToWrite)
if file1 != None:
thread.start_new_thread(writeThread,(file1,2))
# HM5883l
mag = hmc5883l.HMC5883L()
try:
while stop==False:
magData = mag.getHeading()
magData['time']= float(time.time())
if file1 != None:
magDatas.append(magData)
time.sleep(0.02)
except (KeyboardInterrupt, SystemExit): #when you press ctrl+c
stop = True
print('stop')
def bmpThread(fileToWrite=None):
global bmpData
global bmpDatas
global stop
bmpDatas = list()
file1 = fileOpen(fileToWrite)
if file1 != None:
thread.start_new_thread(writeThread,(file1,3))
# BMP085
bmp = bmp085.BMP085()
# needless of initialization
try:
while stop==False:
bmpData = bmp.readData()
bmpData['time']= float(time.time())
if file1 != None:
bmpDatas.append(bmpData)
time.sleep(0.2)
except (KeyboardInterrupt, SystemExit): #when you press ctrl+c
stop = True
print('stop')
def gpsThread(fileToWrite=None):
global gpsData
global gpsDatas
global gpsp
global stop
gpsDatas = list()
file1 = fileOpen(fileToWrite)
if file1 != None:
thread.start_new_thread(writeThread,(file1,4))
# GPs read Data
try:
while stop==False:
try:
if(len(gpsp.gpsd.utc)!=0):
gpsData = dict()
gpsData['lat'] = gpsp.gpsd.fix.latitude
gpsData['lon'] = gpsp.gpsd.fix.longitude
gpsData['timeUtc'] = gpsp.gpsd.utc
gpsData['alt'] = gpsp.gpsd.fix.altitude
gpsData['time']= float(time.time())
if file1 != None:
gpsDatas.append(gpsData)
except:
print('read utc error')
if(stop==True):
print('stop')
time.sleep(0.2)
except (KeyboardInterrupt, SystemExit):
stop = True;
print('stop')
def main():
run = True
global mpuData
global magData
global bmpData
global gpsData
global gpsp
global stop
fileMpu = "/mnt/sd/mpu.dat"
fileMag = "/mnt/sd/mag.dat"
fileBmp = "/mnt/sd/bmp.dat"
fileGps = "/mnt/sd/gps.dat"
try:
stop = False
mpu = mpu6050.MPU6050()
mpu.dmpInitialize()
mag = hmc5883l.HMC5883L()
mag.initialize()
gpsp=GpsPoller()
gpsp.start()
except:
print('Error with MPU initialization')
try:
thread.start_new_thread(mpu6050Thread,(fileMpu,))
thread.start_new_thread(hm5883lThread,(fileMag,))
thread.start_new_thread(bmpThread,(fileBmp,))
thread.start_new_thread(gpsThread,(fileGps,))
except (KeyboardInterrupt, SystemExit): #when you press ctrl+c
print ("\nKilling Thread...")
gpsp.running = False
gpsp.join() # wait for the thread to finish what it's doin
except:
print("Error with threading")
run = False
try:
time.sleep(4)
while stop==False:
#print mpuData,
#print magData,
#print bmpData
if(stop==True):
print('stop')
time.sleep(0.1)
except (KeyboardInterrupt, SystemExit):
stop = True;
print('stop')
gpsp.running = False
gpsp.join() # wait for the thread to finish what it's doin
if __name__ == '__main__':
main()
| {
"content_hash": "1a797980cbd37ad3fb6d1e9e600e084c",
"timestamp": "",
"source": "github",
"line_count": 288,
"max_line_length": 93,
"avg_line_length": 26.586805555555557,
"alnum_prop": 0.5785555700666057,
"repo_name": "lj8385174/PyComms",
"id": "125a5fb737d4f028f63d7b4a9d15d04745acb4df",
"size": "7657",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "MPU6050/Examples/main_threads_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "231256"
}
],
"symlink_target": ""
} |
command = testtex_command (OIIO_TESTSUITE_IMAGEDIR + "/miplevels.tx",
" -nowarp -res 256 256 -stochastic 1 -widthramp 4 -d uint8 -o out.tif")
outputs = [ "out.tif" ]
| {
"content_hash": "e5ff6399f9f09e7326027156ac0569e3",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 98,
"avg_line_length": 64.33333333333333,
"alnum_prop": 0.5958549222797928,
"repo_name": "lgritz/oiio",
"id": "91cb4344e816bdd9ddc61b726b84e473bcde6525",
"size": "558",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "testsuite/texture-filtersize-stochastic/run.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "131728"
},
{
"name": "C++",
"bytes": "6649294"
},
{
"name": "CMake",
"bytes": "218101"
},
{
"name": "Makefile",
"bytes": "18697"
},
{
"name": "POV-Ray SDL",
"bytes": "5056106"
},
{
"name": "Python",
"bytes": "269004"
},
{
"name": "Shell",
"bytes": "56909"
}
],
"symlink_target": ""
} |
"""Ttk wrapper.
This module provides classes to allow using Tk themed widget set.
Ttk is based on a revised and enhanced version of
TIP #48 (http://tip.tcl.tk/48) specified style engine.
Its basic idea is to separate, to the extent possible, the code
implementing a widget's behavior from the code implementing its
appearance. Widget class bindings are primarily responsible for
maintaining the widget state and invoking callbacks, all aspects
of the widgets appearance lies at Themes.
"""
__version__ = "0.3.1"
__author__ = "Guilherme Polo <ggpolo@gmail.com>"
__all__ = ["Button", "Checkbutton", "Combobox", "Entry", "Frame", "Label",
"Labelframe", "LabelFrame", "Menubutton", "Notebook", "Panedwindow",
"PanedWindow", "Progressbar", "Radiobutton", "Scale", "Scrollbar",
"Separator", "Sizegrip", "Style", "Treeview",
# Extensions
"LabeledScale", "OptionMenu",
# functions
"tclobjs_to_py", "setup_master"]
import tkinter
_flatten = tkinter._flatten
# Verify if Tk is new enough to not need the Tile package
_REQUIRE_TILE = True if tkinter.TkVersion < 8.5 else False
def _load_tile(master):
if _REQUIRE_TILE:
import os
tilelib = os.environ.get('TILE_LIBRARY')
if tilelib:
# append custom tile path to the list of directories that
# Tcl uses when attempting to resolve packages with the package
# command
master.tk.eval(
'global auto_path; '
'lappend auto_path {%s}' % tilelib)
master.tk.eval('package require tile') # TclError may be raised here
master._tile_loaded = True
def _format_optdict(optdict, script=False, ignore=None):
"""Formats optdict to a tuple to pass it to tk.call.
E.g. (script=False):
{'foreground': 'blue', 'padding': [1, 2, 3, 4]} returns:
('-foreground', 'blue', '-padding', '1 2 3 4')"""
format = "%s" if not script else "{%s}"
opts = []
for opt, value in optdict.items():
if ignore and opt in ignore:
continue
if isinstance(value, (list, tuple)):
v = []
for val in value:
if isinstance(val, str):
v.append(str(val) if val else '{}')
else:
v.append(str(val))
# format v according to the script option, but also check for
# space in any value in v in order to group them correctly
value = format % ' '.join(
('{%s}' if ' ' in val else '%s') % val for val in v)
if script and value == '':
value = '{}' # empty string in Python is equivalent to {} in Tcl
opts.append(("-%s" % opt, value))
# Remember: _flatten skips over None
return _flatten(opts)
def _format_mapdict(mapdict, script=False):
"""Formats mapdict to pass it to tk.call.
E.g. (script=False):
{'expand': [('active', 'selected', 'grey'), ('focus', [1, 2, 3, 4])]}
returns:
('-expand', '{active selected} grey focus {1, 2, 3, 4}')"""
# if caller passes a Tcl script to tk.call, all the values need to
# be grouped into words (arguments to a command in Tcl dialect)
format = "%s" if not script else "{%s}"
opts = []
for opt, value in mapdict.items():
opt_val = []
# each value in mapdict is expected to be a sequence, where each item
# is another sequence containing a state (or several) and a value
for statespec in value:
state, val = statespec[:-1], statespec[-1]
if len(state) > 1: # group multiple states
state = "{%s}" % ' '.join(state)
else: # single state
# if it is empty (something that evaluates to False), then
# format it to Tcl code to denote the "normal" state
state = state[0] or '{}'
if isinstance(val, (list, tuple)): # val needs to be grouped
val = "{%s}" % ' '.join(map(str, val))
opt_val.append("%s %s" % (state, val))
opts.append(("-%s" % opt, format % ' '.join(opt_val)))
return _flatten(opts)
def _format_elemcreate(etype, script=False, *args, **kw):
"""Formats args and kw according to the given element factory etype."""
spec = None
opts = ()
if etype in ("image", "vsapi"):
if etype == "image": # define an element based on an image
# first arg should be the default image name
iname = args[0]
# next args, if any, are statespec/value pairs which is almost
# a mapdict, but we just need the value
imagespec = _format_mapdict({None: args[1:]})[1]
spec = "%s %s" % (iname, imagespec)
else:
# define an element whose visual appearance is drawn using the
# Microsoft Visual Styles API which is responsible for the
# themed styles on Windows XP and Vista.
# Availability: Tk 8.6, Windows XP and Vista.
class_name, part_id = args[:2]
statemap = _format_mapdict({None: args[2:]})[1]
spec = "%s %s %s" % (class_name, part_id, statemap)
opts = _format_optdict(kw, script)
elif etype == "from": # clone an element
# it expects a themename and optionally an element to clone from,
# otherwise it will clone {} (empty element)
spec = args[0] # theme name
if len(args) > 1: # elementfrom specified
opts = (args[1], )
if script:
spec = '{%s}' % spec
opts = ' '.join(map(str, opts))
return spec, opts
def _format_layoutlist(layout, indent=0, indent_size=2):
"""Formats a layout list so we can pass the result to ttk::style
layout and ttk::style settings. Note that the layout doesn't has to
be a list necessarily.
E.g.:
[("Menubutton.background", None),
("Menubutton.button", {"children":
[("Menubutton.focus", {"children":
[("Menubutton.padding", {"children":
[("Menubutton.label", {"side": "left", "expand": 1})]
})]
})]
}),
("Menubutton.indicator", {"side": "right"})
]
returns:
Menubutton.background
Menubutton.button -children {
Menubutton.focus -children {
Menubutton.padding -children {
Menubutton.label -side left -expand 1
}
}
}
Menubutton.indicator -side right"""
script = []
for layout_elem in layout:
elem, opts = layout_elem
opts = opts or {}
fopts = ' '.join(map(str, _format_optdict(opts, True, "children")))
head = "%s%s%s" % (' ' * indent, elem, (" %s" % fopts) if fopts else '')
if "children" in opts:
script.append(head + " -children {")
indent += indent_size
newscript, indent = _format_layoutlist(opts['children'], indent,
indent_size)
script.append(newscript)
indent -= indent_size
script.append('%s}' % (' ' * indent))
else:
script.append(head)
return '\n'.join(script), indent
def _script_from_settings(settings):
"""Returns an appropriate script, based on settings, according to
theme_settings definition to be used by theme_settings and
theme_create."""
script = []
# a script will be generated according to settings passed, which
# will then be evaluated by Tcl
for name, opts in settings.items():
# will format specific keys according to Tcl code
if opts.get('configure'): # format 'configure'
s = ' '.join(map(str, _format_optdict(opts['configure'], True)))
script.append("ttk::style configure %s %s;" % (name, s))
if opts.get('map'): # format 'map'
s = ' '.join(map(str, _format_mapdict(opts['map'], True)))
script.append("ttk::style map %s %s;" % (name, s))
if 'layout' in opts: # format 'layout' which may be empty
if not opts['layout']:
s = 'null' # could be any other word, but this one makes sense
else:
s, _ = _format_layoutlist(opts['layout'])
script.append("ttk::style layout %s {\n%s\n}" % (name, s))
if opts.get('element create'): # format 'element create'
eopts = opts['element create']
etype = eopts[0]
# find where args end, and where kwargs start
argc = 1 # etype was the first one
while argc < len(eopts) and not hasattr(eopts[argc], 'items'):
argc += 1
elemargs = eopts[1:argc]
elemkw = eopts[argc] if argc < len(eopts) and eopts[argc] else {}
spec, opts = _format_elemcreate(etype, True, *elemargs, **elemkw)
script.append("ttk::style element create %s %s %s %s" % (
name, etype, spec, opts))
return '\n'.join(script)
def _dict_from_tcltuple(ttuple, cut_minus=True):
"""Break tuple in pairs, format it properly, then build the return
dict. If cut_minus is True, the supposed '-' prefixing options will
be removed.
ttuple is expected to contain an even number of elements."""
opt_start = 1 if cut_minus else 0
retdict = {}
it = iter(ttuple)
for opt, val in zip(it, it):
retdict[str(opt)[opt_start:]] = val
return tclobjs_to_py(retdict)
def _list_from_statespec(stuple):
"""Construct a list from the given statespec tuple according to the
accepted statespec accepted by _format_mapdict."""
nval = []
for val in stuple:
typename = getattr(val, 'typename', None)
if typename is None:
nval.append(val)
else: # this is a Tcl object
val = str(val)
if typename == 'StateSpec':
val = val.split()
nval.append(val)
it = iter(nval)
return [_flatten(spec) for spec in zip(it, it)]
def _list_from_layouttuple(ltuple):
"""Construct a list from the tuple returned by ttk::layout, this is
somewhat the reverse of _format_layoutlist."""
res = []
indx = 0
while indx < len(ltuple):
name = ltuple[indx]
opts = {}
res.append((name, opts))
indx += 1
while indx < len(ltuple): # grab name's options
opt, val = ltuple[indx:indx + 2]
if not opt.startswith('-'): # found next name
break
opt = opt[1:] # remove the '-' from the option
indx += 2
if opt == 'children':
val = _list_from_layouttuple(val)
opts[opt] = val
return res
def _val_or_dict(options, func, *args):
"""Format options then call func with args and options and return
the appropriate result.
If no option is specified, a dict is returned. If a option is
specified with the None value, the value for that option is returned.
Otherwise, the function just sets the passed options and the caller
shouldn't be expecting a return value anyway."""
options = _format_optdict(options)
res = func(*(args + options))
if len(options) % 2: # option specified without a value, return its value
return res
return _dict_from_tcltuple(res)
def _convert_stringval(value):
"""Converts a value to, hopefully, a more appropriate Python object."""
value = str(value)
try:
value = int(value)
except (ValueError, TypeError):
pass
return value
def tclobjs_to_py(adict):
"""Returns adict with its values converted from Tcl objects to Python
objects."""
for opt, val in adict.items():
if val and hasattr(val, '__len__') and not isinstance(val, str):
if getattr(val[0], 'typename', None) == 'StateSpec':
val = _list_from_statespec(val)
else:
val = list(map(_convert_stringval, val))
elif hasattr(val, 'typename'): # some other (single) Tcl object
val = _convert_stringval(val)
adict[opt] = val
return adict
def setup_master(master=None):
"""If master is not None, itself is returned. If master is None,
the default master is returned if there is one, otherwise a new
master is created and returned.
If it is not allowed to use the default root and master is None,
RuntimeError is raised."""
if master is None:
if tkinter._support_default_root:
master = tkinter._default_root or tkinter.Tk()
else:
raise RuntimeError(
"No master specified and tkinter is "
"configured to not support default root")
return master
class Style(object):
"""Manipulate style database."""
_name = "ttk::style"
def __init__(self, master=None):
master = setup_master(master)
if not getattr(master, '_tile_loaded', False):
# Load tile now, if needed
_load_tile(master)
self.master = master
self.tk = self.master.tk
def configure(self, style, query_opt=None, **kw):
"""Query or sets the default value of the specified option(s) in
style.
Each key in kw is an option and each value is either a string or
a sequence identifying the value for that option."""
if query_opt is not None:
kw[query_opt] = None
return _val_or_dict(kw, self.tk.call, self._name, "configure", style)
def map(self, style, query_opt=None, **kw):
"""Query or sets dynamic values of the specified option(s) in
style.
Each key in kw is an option and each value should be a list or a
tuple (usually) containing statespecs grouped in tuples, or list,
or something else of your preference. A statespec is compound of
one or more states and then a value."""
if query_opt is not None:
return _list_from_statespec(
self.tk.call(self._name, "map", style, '-%s' % query_opt))
return _dict_from_tcltuple(
self.tk.call(self._name, "map", style, *(_format_mapdict(kw))))
def lookup(self, style, option, state=None, default=None):
"""Returns the value specified for option in style.
If state is specified it is expected to be a sequence of one
or more states. If the default argument is set, it is used as
a fallback value in case no specification for option is found."""
state = ' '.join(state) if state else ''
return self.tk.call(self._name, "lookup", style, '-%s' % option,
state, default)
def layout(self, style, layoutspec=None):
"""Define the widget layout for given style. If layoutspec is
omitted, return the layout specification for given style.
layoutspec is expected to be a list or an object different than
None that evaluates to False if you want to "turn off" that style.
If it is a list (or tuple, or something else), each item should be
a tuple where the first item is the layout name and the second item
should have the format described below:
LAYOUTS
A layout can contain the value None, if takes no options, or
a dict of options specifying how to arrange the element.
The layout mechanism uses a simplified version of the pack
geometry manager: given an initial cavity, each element is
allocated a parcel. Valid options/values are:
side: whichside
Specifies which side of the cavity to place the
element; one of top, right, bottom or left. If
omitted, the element occupies the entire cavity.
sticky: nswe
Specifies where the element is placed inside its
allocated parcel.
children: [sublayout... ]
Specifies a list of elements to place inside the
element. Each element is a tuple (or other sequence)
where the first item is the layout name, and the other
is a LAYOUT."""
lspec = None
if layoutspec:
lspec = _format_layoutlist(layoutspec)[0]
elif layoutspec is not None: # will disable the layout ({}, '', etc)
lspec = "null" # could be any other word, but this may make sense
# when calling layout(style) later
return _list_from_layouttuple(
self.tk.call(self._name, "layout", style, lspec))
def element_create(self, elementname, etype, *args, **kw):
"""Create a new element in the current theme of given etype."""
spec, opts = _format_elemcreate(etype, False, *args, **kw)
self.tk.call(self._name, "element", "create", elementname, etype,
spec, *opts)
def element_names(self):
"""Returns the list of elements defined in the current theme."""
return self.tk.call(self._name, "element", "names")
def element_options(self, elementname):
"""Return the list of elementname's options."""
return self.tk.call(self._name, "element", "options", elementname)
def theme_create(self, themename, parent=None, settings=None):
"""Creates a new theme.
It is an error if themename already exists. If parent is
specified, the new theme will inherit styles, elements and
layouts from the specified parent theme. If settings are present,
they are expected to have the same syntax used for theme_settings."""
script = _script_from_settings(settings) if settings else ''
if parent:
self.tk.call(self._name, "theme", "create", themename,
"-parent", parent, "-settings", script)
else:
self.tk.call(self._name, "theme", "create", themename,
"-settings", script)
def theme_settings(self, themename, settings):
"""Temporarily sets the current theme to themename, apply specified
settings and then restore the previous theme.
Each key in settings is a style and each value may contain the
keys 'configure', 'map', 'layout' and 'element create' and they
are expected to have the same format as specified by the methods
configure, map, layout and element_create respectively."""
script = _script_from_settings(settings)
self.tk.call(self._name, "theme", "settings", themename, script)
def theme_names(self):
"""Returns a list of all known themes."""
return self.tk.call(self._name, "theme", "names")
def theme_use(self, themename=None):
"""If themename is None, returns the theme in use, otherwise, set
the current theme to themename, refreshes all widgets and emits
a <<ThemeChanged>> event."""
if themename is None:
# Starting on Tk 8.6, checking this global is no longer needed
# since it allows doing self.tk.call(self._name, "theme", "use")
return self.tk.eval("return $ttk::currentTheme")
# using "ttk::setTheme" instead of "ttk::style theme use" causes
# the variable currentTheme to be updated, also, ttk::setTheme calls
# "ttk::style theme use" in order to change theme.
self.tk.call("ttk::setTheme", themename)
class Widget(tkinter.Widget):
"""Base class for Tk themed widgets."""
def __init__(self, master, widgetname, kw=None):
"""Constructs a Ttk Widget with the parent master.
STANDARD OPTIONS
class, cursor, takefocus, style
SCROLLABLE WIDGET OPTIONS
xscrollcommand, yscrollcommand
LABEL WIDGET OPTIONS
text, textvariable, underline, image, compound, width
WIDGET STATES
active, disabled, focus, pressed, selected, background,
readonly, alternate, invalid
"""
master = setup_master(master)
if not getattr(master, '_tile_loaded', False):
# Load tile now, if needed
_load_tile(master)
tkinter.Widget.__init__(self, master, widgetname, kw=kw)
def identify(self, x, y):
"""Returns the name of the element at position x, y, or the empty
string if the point does not lie within any element.
x and y are pixel coordinates relative to the widget."""
return self.tk.call(self._w, "identify", x, y)
def instate(self, statespec, callback=None, *args, **kw):
"""Test the widget's state.
If callback is not specified, returns True if the widget state
matches statespec and False otherwise. If callback is specified,
then it will be invoked with *args, **kw if the widget state
matches statespec. statespec is expected to be a sequence."""
ret = self.tk.call(self._w, "instate", ' '.join(statespec))
if ret and callback:
return callback(*args, **kw)
return bool(ret)
def state(self, statespec=None):
"""Modify or inquire widget state.
Widget state is returned if statespec is None, otherwise it is
set according to the statespec flags and then a new state spec
is returned indicating which flags were changed. statespec is
expected to be a sequence."""
if statespec is not None:
statespec = ' '.join(statespec)
return self.tk.splitlist(str(self.tk.call(self._w, "state", statespec)))
class Button(Widget):
"""Ttk Button widget, displays a textual label and/or image, and
evaluates a command when pressed."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Button widget with the parent master.
STANDARD OPTIONS
class, compound, cursor, image, state, style, takefocus,
text, textvariable, underline, width
WIDGET-SPECIFIC OPTIONS
command, default, width
"""
Widget.__init__(self, master, "ttk::button", kw)
def invoke(self):
"""Invokes the command associated with the button."""
return self.tk.call(self._w, "invoke")
class Checkbutton(Widget):
"""Ttk Checkbutton widget which is either in on- or off-state."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Checkbutton widget with the parent master.
STANDARD OPTIONS
class, compound, cursor, image, state, style, takefocus,
text, textvariable, underline, width
WIDGET-SPECIFIC OPTIONS
command, offvalue, onvalue, variable
"""
Widget.__init__(self, master, "ttk::checkbutton", kw)
def invoke(self):
"""Toggles between the selected and deselected states and
invokes the associated command. If the widget is currently
selected, sets the option variable to the offvalue option
and deselects the widget; otherwise, sets the option variable
to the option onvalue.
Returns the result of the associated command."""
return self.tk.call(self._w, "invoke")
class Entry(Widget, tkinter.Entry):
"""Ttk Entry widget displays a one-line text string and allows that
string to be edited by the user."""
def __init__(self, master=None, widget=None, **kw):
"""Constructs a Ttk Entry widget with the parent master.
STANDARD OPTIONS
class, cursor, style, takefocus, xscrollcommand
WIDGET-SPECIFIC OPTIONS
exportselection, invalidcommand, justify, show, state,
textvariable, validate, validatecommand, width
VALIDATION MODES
none, key, focus, focusin, focusout, all
"""
Widget.__init__(self, master, widget or "ttk::entry", kw)
def bbox(self, index):
"""Return a tuple of (x, y, width, height) which describes the
bounding box of the character given by index."""
return self.tk.call(self._w, "bbox", index)
def identify(self, x, y):
"""Returns the name of the element at position x, y, or the
empty string if the coordinates are outside the window."""
return self.tk.call(self._w, "identify", x, y)
def validate(self):
"""Force revalidation, independent of the conditions specified
by the validate option. Returns False if validation fails, True
if it succeeds. Sets or clears the invalid state accordingly."""
return bool(self.tk.call(self._w, "validate"))
class Combobox(Entry):
"""Ttk Combobox widget combines a text field with a pop-down list of
values."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Combobox widget with the parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
exportselection, justify, height, postcommand, state,
textvariable, values, width
"""
# The "values" option may need special formatting, so leave to
# _format_optdict the responsibility to format it
if "values" in kw:
kw["values"] = _format_optdict({'v': kw["values"]})[1]
Entry.__init__(self, master, "ttk::combobox", **kw)
def __setitem__(self, item, value):
if item == "values":
value = _format_optdict({item: value})[1]
Entry.__setitem__(self, item, value)
def configure(self, cnf=None, **kw):
"""Custom Combobox configure, created to properly format the values
option."""
if "values" in kw:
kw["values"] = _format_optdict({'v': kw["values"]})[1]
return Entry.configure(self, cnf, **kw)
def current(self, newindex=None):
"""If newindex is supplied, sets the combobox value to the
element at position newindex in the list of values. Otherwise,
returns the index of the current value in the list of values
or -1 if the current value does not appear in the list."""
return self.tk.call(self._w, "current", newindex)
def set(self, value):
"""Sets the value of the combobox to value."""
self.tk.call(self._w, "set", value)
class Frame(Widget):
"""Ttk Frame widget is a container, used to group other widgets
together."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Frame with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
borderwidth, relief, padding, width, height
"""
Widget.__init__(self, master, "ttk::frame", kw)
class Label(Widget):
"""Ttk Label widget displays a textual label and/or image."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Label with parent master.
STANDARD OPTIONS
class, compound, cursor, image, style, takefocus, text,
textvariable, underline, width
WIDGET-SPECIFIC OPTIONS
anchor, background, font, foreground, justify, padding,
relief, text, wraplength
"""
Widget.__init__(self, master, "ttk::label", kw)
class Labelframe(Widget):
"""Ttk Labelframe widget is a container used to group other widgets
together. It has an optional label, which may be a plain text string
or another widget."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Labelframe with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
labelanchor, text, underline, padding, labelwidget, width,
height
"""
Widget.__init__(self, master, "ttk::labelframe", kw)
LabelFrame = Labelframe # tkinter name compatibility
class Menubutton(Widget):
"""Ttk Menubutton widget displays a textual label and/or image, and
displays a menu when pressed."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Menubutton with parent master.
STANDARD OPTIONS
class, compound, cursor, image, state, style, takefocus,
text, textvariable, underline, width
WIDGET-SPECIFIC OPTIONS
direction, menu
"""
Widget.__init__(self, master, "ttk::menubutton", kw)
class Notebook(Widget):
"""Ttk Notebook widget manages a collection of windows and displays
a single one at a time. Each child window is associated with a tab,
which the user may select to change the currently-displayed window."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Notebook with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
height, padding, width
TAB OPTIONS
state, sticky, padding, text, image, compound, underline
TAB IDENTIFIERS (tab_id)
The tab_id argument found in several methods may take any of
the following forms:
* An integer between zero and the number of tabs
* The name of a child window
* A positional specification of the form "@x,y", which
defines the tab
* The string "current", which identifies the
currently-selected tab
* The string "end", which returns the number of tabs (only
valid for method index)
"""
Widget.__init__(self, master, "ttk::notebook", kw)
def add(self, child, **kw):
"""Adds a new tab to the notebook.
If window is currently managed by the notebook but hidden, it is
restored to its previous position."""
self.tk.call(self._w, "add", child, *(_format_optdict(kw)))
def forget(self, tab_id):
"""Removes the tab specified by tab_id, unmaps and unmanages the
associated window."""
self.tk.call(self._w, "forget", tab_id)
def hide(self, tab_id):
"""Hides the tab specified by tab_id.
The tab will not be displayed, but the associated window remains
managed by the notebook and its configuration remembered. Hidden
tabs may be restored with the add command."""
self.tk.call(self._w, "hide", tab_id)
def identify(self, x, y):
"""Returns the name of the tab element at position x, y, or the
empty string if none."""
return self.tk.call(self._w, "identify", x, y)
def index(self, tab_id):
"""Returns the numeric index of the tab specified by tab_id, or
the total number of tabs if tab_id is the string "end"."""
return self.tk.call(self._w, "index", tab_id)
def insert(self, pos, child, **kw):
"""Inserts a pane at the specified position.
pos is either the string end, an integer index, or the name of
a managed child. If child is already managed by the notebook,
moves it to the specified position."""
self.tk.call(self._w, "insert", pos, child, *(_format_optdict(kw)))
def select(self, tab_id=None):
"""Selects the specified tab.
The associated child window will be displayed, and the
previously-selected window (if different) is unmapped. If tab_id
is omitted, returns the widget name of the currently selected
pane."""
return self.tk.call(self._w, "select", tab_id)
def tab(self, tab_id, option=None, **kw):
"""Query or modify the options of the specific tab_id.
If kw is not given, returns a dict of the tab option values. If option
is specified, returns the value of that option. Otherwise, sets the
options to the corresponding values."""
if option is not None:
kw[option] = None
return _val_or_dict(kw, self.tk.call, self._w, "tab", tab_id)
def tabs(self):
"""Returns a list of windows managed by the notebook."""
return self.tk.call(self._w, "tabs") or ()
def enable_traversal(self):
"""Enable keyboard traversal for a toplevel window containing
this notebook.
This will extend the bindings for the toplevel window containing
this notebook as follows:
Control-Tab: selects the tab following the currently selected
one
Shift-Control-Tab: selects the tab preceding the currently
selected one
Alt-K: where K is the mnemonic (underlined) character of any
tab, will select that tab.
Multiple notebooks in a single toplevel may be enabled for
traversal, including nested notebooks. However, notebook traversal
only works properly if all panes are direct children of the
notebook."""
# The only, and good, difference I see is about mnemonics, which works
# after calling this method. Control-Tab and Shift-Control-Tab always
# works (here at least).
self.tk.call("ttk::notebook::enableTraversal", self._w)
class Panedwindow(Widget, tkinter.PanedWindow):
"""Ttk Panedwindow widget displays a number of subwindows, stacked
either vertically or horizontally."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Panedwindow with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
orient, width, height
PANE OPTIONS
weight
"""
Widget.__init__(self, master, "ttk::panedwindow", kw)
forget = tkinter.PanedWindow.forget # overrides Pack.forget
def insert(self, pos, child, **kw):
"""Inserts a pane at the specified positions.
pos is either the string end, and integer index, or the name
of a child. If child is already managed by the paned window,
moves it to the specified position."""
self.tk.call(self._w, "insert", pos, child, *(_format_optdict(kw)))
def pane(self, pane, option=None, **kw):
"""Query or modify the options of the specified pane.
pane is either an integer index or the name of a managed subwindow.
If kw is not given, returns a dict of the pane option values. If
option is specified then the value for that option is returned.
Otherwise, sets the options to the corresponding values."""
if option is not None:
kw[option] = None
return _val_or_dict(kw, self.tk.call, self._w, "pane", pane)
def sashpos(self, index, newpos=None):
"""If newpos is specified, sets the position of sash number index.
May adjust the positions of adjacent sashes to ensure that
positions are monotonically increasing. Sash positions are further
constrained to be between 0 and the total size of the widget.
Returns the new position of sash number index."""
return self.tk.call(self._w, "sashpos", index, newpos)
PanedWindow = Panedwindow # tkinter name compatibility
class Progressbar(Widget):
"""Ttk Progressbar widget shows the status of a long-running
operation. They can operate in two modes: determinate mode shows the
amount completed relative to the total amount of work to be done, and
indeterminate mode provides an animated display to let the user know
that something is happening."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Progressbar with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
orient, length, mode, maximum, value, variable, phase
"""
Widget.__init__(self, master, "ttk::progressbar", kw)
def start(self, interval=None):
"""Begin autoincrement mode: schedules a recurring timer event
that calls method step every interval milliseconds.
interval defaults to 50 milliseconds (20 steps/second) if ommited."""
self.tk.call(self._w, "start", interval)
def step(self, amount=None):
"""Increments the value option by amount.
amount defaults to 1.0 if omitted."""
self.tk.call(self._w, "step", amount)
def stop(self):
"""Stop autoincrement mode: cancels any recurring timer event
initiated by start."""
self.tk.call(self._w, "stop")
class Radiobutton(Widget):
"""Ttk Radiobutton widgets are used in groups to show or change a
set of mutually-exclusive options."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Radiobutton with parent master.
STANDARD OPTIONS
class, compound, cursor, image, state, style, takefocus,
text, textvariable, underline, width
WIDGET-SPECIFIC OPTIONS
command, value, variable
"""
Widget.__init__(self, master, "ttk::radiobutton", kw)
def invoke(self):
"""Sets the option variable to the option value, selects the
widget, and invokes the associated command.
Returns the result of the command, or an empty string if
no command is specified."""
return self.tk.call(self._w, "invoke")
class Scale(Widget, tkinter.Scale):
"""Ttk Scale widget is typically used to control the numeric value of
a linked variable that varies uniformly over some range."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Scale with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
command, from, length, orient, to, value, variable
"""
Widget.__init__(self, master, "ttk::scale", kw)
def configure(self, cnf=None, **kw):
"""Modify or query scale options.
Setting a value for any of the "from", "from_" or "to" options
generates a <<RangeChanged>> event."""
if cnf:
kw.update(cnf)
Widget.configure(self, **kw)
if any(['from' in kw, 'from_' in kw, 'to' in kw]):
self.event_generate('<<RangeChanged>>')
def get(self, x=None, y=None):
"""Get the current value of the value option, or the value
corresponding to the coordinates x, y if they are specified.
x and y are pixel coordinates relative to the scale widget
origin."""
return self.tk.call(self._w, 'get', x, y)
class Scrollbar(Widget, tkinter.Scrollbar):
"""Ttk Scrollbar controls the viewport of a scrollable widget."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Scrollbar with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
command, orient
"""
Widget.__init__(self, master, "ttk::scrollbar", kw)
class Separator(Widget):
"""Ttk Separator widget displays a horizontal or vertical separator
bar."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Separator with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
orient
"""
Widget.__init__(self, master, "ttk::separator", kw)
class Sizegrip(Widget):
"""Ttk Sizegrip allows the user to resize the containing toplevel
window by pressing and dragging the grip."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Sizegrip with parent master.
STANDARD OPTIONS
class, cursor, state, style, takefocus
"""
Widget.__init__(self, master, "ttk::sizegrip", kw)
class Treeview(Widget, tkinter.XView, tkinter.YView):
"""Ttk Treeview widget displays a hierarchical collection of items.
Each item has a textual label, an optional image, and an optional list
of data values. The data values are displayed in successive columns
after the tree label."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Treeview with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus, xscrollcommand,
yscrollcommand
WIDGET-SPECIFIC OPTIONS
columns, displaycolumns, height, padding, selectmode, show
ITEM OPTIONS
text, image, values, open, tags
TAG OPTIONS
foreground, background, font, image
"""
Widget.__init__(self, master, "ttk::treeview", kw)
def bbox(self, item, column=None):
"""Returns the bounding box (relative to the treeview widget's
window) of the specified item in the form x y width height.
If column is specified, returns the bounding box of that cell.
If the item is not visible (i.e., if it is a descendant of a
closed item or is scrolled offscreen), returns an empty string."""
return self.tk.call(self._w, "bbox", item, column)
def get_children(self, item=None):
"""Returns a tuple of children belonging to item.
If item is not specified, returns root children."""
return self.tk.call(self._w, "children", item or '') or ()
def set_children(self, item, *newchildren):
"""Replaces item's child with newchildren.
Children present in item that are not present in newchildren
are detached from tree. No items in newchildren may be an
ancestor of item."""
self.tk.call(self._w, "children", item, newchildren)
def column(self, column, option=None, **kw):
"""Query or modify the options for the specified column.
If kw is not given, returns a dict of the column option values. If
option is specified then the value for that option is returned.
Otherwise, sets the options to the corresponding values."""
if option is not None:
kw[option] = None
return _val_or_dict(kw, self.tk.call, self._w, "column", column)
def delete(self, *items):
"""Delete all specified items and all their descendants. The root
item may not be deleted."""
self.tk.call(self._w, "delete", items)
def detach(self, *items):
"""Unlinks all of the specified items from the tree.
The items and all of their descendants are still present, and may
be reinserted at another point in the tree, but will not be
displayed. The root item may not be detached."""
self.tk.call(self._w, "detach", items)
def exists(self, item):
"""Returns True if the specified item is present in the three,
False otherwise."""
return bool(self.tk.call(self._w, "exists", item))
def focus(self, item=None):
"""If item is specified, sets the focus item to item. Otherwise,
returns the current focus item, or '' if there is none."""
return self.tk.call(self._w, "focus", item)
def heading(self, column, option=None, **kw):
"""Query or modify the heading options for the specified column.
If kw is not given, returns a dict of the heading option values. If
option is specified then the value for that option is returned.
Otherwise, sets the options to the corresponding values.
Valid options/values are:
text: text
The text to display in the column heading
image: image_name
Specifies an image to display to the right of the column
heading
anchor: anchor
Specifies how the heading text should be aligned. One of
the standard Tk anchor values
command: callback
A callback to be invoked when the heading label is
pressed.
To configure the tree column heading, call this with column = "#0" """
cmd = kw.get('command')
if cmd and not isinstance(cmd, str):
# callback not registered yet, do it now
kw['command'] = self.master.register(cmd, self._substitute)
if option is not None:
kw[option] = None
return _val_or_dict(kw, self.tk.call, self._w, 'heading', column)
def identify(self, component, x, y):
"""Returns a description of the specified component under the
point given by x and y, or the empty string if no such component
is present at that position."""
return self.tk.call(self._w, "identify", component, x, y)
def identify_row(self, y):
"""Returns the item ID of the item at position y."""
return self.identify("row", 0, y)
def identify_column(self, x):
"""Returns the data column identifier of the cell at position x.
The tree column has ID #0."""
return self.identify("column", x, 0)
def identify_region(self, x, y):
"""Returns one of:
heading: Tree heading area.
separator: Space between two columns headings;
tree: The tree area.
cell: A data cell.
* Availability: Tk 8.6"""
return self.identify("region", x, y)
def identify_element(self, x, y):
"""Returns the element at position x, y.
* Availability: Tk 8.6"""
return self.identify("element", x, y)
def index(self, item):
"""Returns the integer index of item within its parent's list
of children."""
return self.tk.call(self._w, "index", item)
def insert(self, parent, index, iid=None, **kw):
"""Creates a new item and return the item identifier of the newly
created item.
parent is the item ID of the parent item, or the empty string
to create a new top-level item. index is an integer, or the value
end, specifying where in the list of parent's children to insert
the new item. If index is less than or equal to zero, the new node
is inserted at the beginning, if index is greater than or equal to
the current number of children, it is inserted at the end. If iid
is specified, it is used as the item identifier, iid must not
already exist in the tree. Otherwise, a new unique identifier
is generated."""
opts = _format_optdict(kw)
if iid:
res = self.tk.call(self._w, "insert", parent, index,
"-id", iid, *opts)
else:
res = self.tk.call(self._w, "insert", parent, index, *opts)
return res
def item(self, item, option=None, **kw):
"""Query or modify the options for the specified item.
If no options are given, a dict with options/values for the item
is returned. If option is specified then the value for that option
is returned. Otherwise, sets the options to the corresponding
values as given by kw."""
if option is not None:
kw[option] = None
return _val_or_dict(kw, self.tk.call, self._w, "item", item)
def move(self, item, parent, index):
"""Moves item to position index in parent's list of children.
It is illegal to move an item under one of its descendants. If
index is less than or equal to zero, item is moved to the
beginning, if greater than or equal to the number of children,
it is moved to the end. If item was detached it is reattached."""
self.tk.call(self._w, "move", item, parent, index)
reattach = move # A sensible method name for reattaching detached items
def next(self, item):
"""Returns the identifier of item's next sibling, or '' if item
is the last child of its parent."""
return self.tk.call(self._w, "next", item)
def parent(self, item):
"""Returns the ID of the parent of item, or '' if item is at the
top level of the hierarchy."""
return self.tk.call(self._w, "parent", item)
def prev(self, item):
"""Returns the identifier of item's previous sibling, or '' if
item is the first child of its parent."""
return self.tk.call(self._w, "prev", item)
def see(self, item):
"""Ensure that item is visible.
Sets all of item's ancestors open option to True, and scrolls
the widget if necessary so that item is within the visible
portion of the tree."""
self.tk.call(self._w, "see", item)
def selection(self, selop=None, items=None):
"""If selop is not specified, returns selected items."""
return self.tk.call(self._w, "selection", selop, items)
def selection_set(self, items):
"""items becomes the new selection."""
self.selection("set", items)
def selection_add(self, items):
"""Add items to the selection."""
self.selection("add", items)
def selection_remove(self, items):
"""Remove items from the selection."""
self.selection("remove", items)
def selection_toggle(self, items):
"""Toggle the selection state of each item in items."""
self.selection("toggle", items)
def set(self, item, column=None, value=None):
"""With one argument, returns a dictionary of column/value pairs
for the specified item. With two arguments, returns the current
value of the specified column. With three arguments, sets the
value of given column in given item to the specified value."""
res = self.tk.call(self._w, "set", item, column, value)
if column is None and value is None:
return _dict_from_tcltuple(res, False)
else:
return res
def tag_bind(self, tagname, sequence=None, callback=None):
"""Bind a callback for the given event sequence to the tag tagname.
When an event is delivered to an item, the callbacks for each
of the item's tags option are called."""
self._bind((self._w, "tag", "bind", tagname), sequence, callback, add=0)
def tag_configure(self, tagname, option=None, **kw):
"""Query or modify the options for the specified tagname.
If kw is not given, returns a dict of the option settings for tagname.
If option is specified, returns the value for that option for the
specified tagname. Otherwise, sets the options to the corresponding
values for the given tagname."""
if option is not None:
kw[option] = None
return _val_or_dict(kw, self.tk.call, self._w, "tag", "configure",
tagname)
def tag_has(self, tagname, item=None):
"""If item is specified, returns 1 or 0 depending on whether the
specified item has the given tagname. Otherwise, returns a list of
all items which have the specified tag.
* Availability: Tk 8.6"""
return self.tk.call(self._w, "tag", "has", tagname, item)
# Extensions
class LabeledScale(Frame):
"""A Ttk Scale widget with a Ttk Label widget indicating its
current value.
The Ttk Scale can be accessed through instance.scale, and Ttk Label
can be accessed through instance.label"""
def __init__(self, master=None, variable=None, from_=0, to=10, **kw):
"""Construct an horizontal LabeledScale with parent master, a
variable to be associated with the Ttk Scale widget and its range.
If variable is not specified, a tkinter.IntVar is created.
WIDGET-SPECIFIC OPTIONS
compound: 'top' or 'bottom'
Specifies how to display the label relative to the scale.
Defaults to 'top'.
"""
self._label_top = kw.pop('compound', 'top') == 'top'
Frame.__init__(self, master, **kw)
self._variable = variable or tkinter.IntVar(master)
self._variable.set(from_)
self._last_valid = from_
self.label = Label(self)
self.scale = Scale(self, variable=self._variable, from_=from_, to=to)
self.scale.bind('<<RangeChanged>>', self._adjust)
# position scale and label according to the compound option
scale_side = 'bottom' if self._label_top else 'top'
label_side = 'top' if scale_side == 'bottom' else 'bottom'
self.scale.pack(side=scale_side, fill='x')
tmp = Label(self).pack(side=label_side) # place holder
self.label.place(anchor='n' if label_side == 'top' else 's')
# update the label as scale or variable changes
self.__tracecb = self._variable.trace_variable('w', self._adjust)
self.bind('<Configure>', self._adjust)
self.bind('<Map>', self._adjust)
def destroy(self):
"""Destroy this widget and possibly its associated variable."""
try:
self._variable.trace_vdelete('w', self.__tracecb)
except AttributeError:
# widget has been destroyed already
pass
else:
del self._variable
Frame.destroy(self)
def _adjust(self, *args):
"""Adjust the label position according to the scale."""
def adjust_label():
self.update_idletasks() # "force" scale redraw
x, y = self.scale.coords()
if self._label_top:
y = self.scale.winfo_y() - self.label.winfo_reqheight()
else:
y = self.scale.winfo_reqheight() + self.label.winfo_reqheight()
self.label.place_configure(x=x, y=y)
from_, to = self.scale['from'], self.scale['to']
if to < from_:
from_, to = to, from_
newval = self._variable.get()
if not from_ <= newval <= to:
# value outside range, set value back to the last valid one
self.value = self._last_valid
return
self._last_valid = newval
self.label['text'] = newval
self.after_idle(adjust_label)
def _get_value(self):
"""Return current scale value."""
return self._variable.get()
def _set_value(self, val):
"""Set new scale value."""
self._variable.set(val)
value = property(_get_value, _set_value)
class OptionMenu(Menubutton):
"""Themed OptionMenu, based after tkinter's OptionMenu, which allows
the user to select a value from a menu."""
def __init__(self, master, variable, default=None, *values, **kwargs):
"""Construct a themed OptionMenu widget with master as the parent,
the resource textvariable set to variable, the initially selected
value specified by the default parameter, the menu values given by
*values and additional keywords.
WIDGET-SPECIFIC OPTIONS
style: stylename
Menubutton style.
direction: 'above', 'below', 'left', 'right', or 'flush'
Menubutton direction.
command: callback
A callback that will be invoked after selecting an item.
"""
kw = {'textvariable': variable, 'style': kwargs.pop('style', None),
'direction': kwargs.pop('direction', None)}
Menubutton.__init__(self, master, **kw)
self['menu'] = tkinter.Menu(self, tearoff=False)
self._variable = variable
self._callback = kwargs.pop('command', None)
if kwargs:
raise tkinter.TclError('unknown option -%s' % (
next(iter(kwargs.keys()))))
self.set_menu(default, *values)
def __getitem__(self, item):
if item == 'menu':
return self.nametowidget(Menubutton.__getitem__(self, item))
return Menubutton.__getitem__(self, item)
def set_menu(self, default=None, *values):
"""Build a new menu of radiobuttons with *values and optionally
a default value."""
menu = self['menu']
menu.delete(0, 'end')
for val in values:
menu.add_radiobutton(label=val,
command=tkinter._setit(self._variable, val, self._callback))
if default:
self._variable.set(default)
def destroy(self):
"""Destroy this widget and its associated variable."""
del self._variable
Menubutton.destroy(self)
| {
"content_hash": "f21956c680e5b2875f6e348a2dc62471",
"timestamp": "",
"source": "github",
"line_count": 1635,
"max_line_length": 80,
"avg_line_length": 34.401223241590216,
"alnum_prop": 0.6066564733492159,
"repo_name": "wdv4758h/ZipPy",
"id": "928e1de781803bed1e8133579bc12d21a11d9a82",
"size": "56246",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib-python/3/tkinter/ttk.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "9447"
},
{
"name": "C",
"bytes": "106932"
},
{
"name": "CSS",
"bytes": "32004"
},
{
"name": "Groff",
"bytes": "27753"
},
{
"name": "HTML",
"bytes": "721863"
},
{
"name": "Java",
"bytes": "1550721"
},
{
"name": "JavaScript",
"bytes": "10581"
},
{
"name": "Makefile",
"bytes": "16156"
},
{
"name": "PLSQL",
"bytes": "22886"
},
{
"name": "Python",
"bytes": "33672733"
},
{
"name": "R",
"bytes": "1959"
},
{
"name": "Ruby",
"bytes": "304"
},
{
"name": "Scheme",
"bytes": "125"
},
{
"name": "Shell",
"bytes": "3119"
},
{
"name": "Tcl",
"bytes": "1048"
},
{
"name": "TeX",
"bytes": "8790"
},
{
"name": "Visual Basic",
"bytes": "481"
},
{
"name": "XSLT",
"bytes": "366202"
}
],
"symlink_target": ""
} |
"""
Wrapper for loading templates from the filesystem.
"""
from django.conf import settings
from django.template.base import TemplateDoesNotExist
from django.template.loader import BaseLoader
from django.utils._os import safe_join
class Loader(BaseLoader):
is_usable = True
def get_template_sources(self, template_name, template_dirs=None):
"""
Returns the absolute paths to "template_name", when appended to each
directory in "template_dirs". Any paths that don't lie inside one of the
template dirs are excluded from the result set, for security reasons.
"""
if not template_dirs:
template_dirs = settings.TEMPLATE_DIRS
for template_dir in template_dirs:
try:
yield safe_join(template_dir, template_name)
except UnicodeDecodeError:
# The template dir name was a bytestring that wasn't valid UTF-8.
raise
except ValueError:
# The joined path was located outside of this particular
# template_dir (it might be inside another one, so this isn't
# fatal).
pass
def load_template_source(self, template_name, template_dirs=None):
tried = []
for filepath in self.get_template_sources(template_name, template_dirs):
try:
file = open(filepath)
try:
return (file.read().decode(settings.FILE_CHARSET), filepath)
finally:
file.close()
except IOError:
tried.append(filepath)
if tried:
error_msg = "Tried %s" % tried
else:
error_msg = "Your TEMPLATE_DIRS setting is empty. Change it to point to at least one template directory."
raise TemplateDoesNotExist(error_msg)
load_template_source.is_usable = True
_loader = Loader()
def load_template_source(template_name, template_dirs=None):
# For backwards compatibility
import warnings
warnings.warn(
"'django.template.loaders.filesystem.load_template_source' is deprecated; use 'django.template.loaders.filesystem.Loader' instead.",
DeprecationWarning
)
return _loader.load_template_source(template_name, template_dirs)
load_template_source.is_usable = True
| {
"content_hash": "02c808a70990794a4f433e8c14d0df4a",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 140,
"avg_line_length": 39.65573770491803,
"alnum_prop": 0.614716825134353,
"repo_name": "rimbalinux/MSISDNArea",
"id": "d9577d3299aa37699bf2399d4ccd9821d84fbe6c",
"size": "2419",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "django/template/loaders/filesystem.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "118069"
},
{
"name": "Python",
"bytes": "7281875"
}
],
"symlink_target": ""
} |
from .pythons3 import PythonS3
from .pythons3 import PythonS3 as Pythons3 | {
"content_hash": "f44eafaede9c48ddcb930aa08595083d",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 42,
"avg_line_length": 36.5,
"alnum_prop": 0.8493150684931506,
"repo_name": "rootcss/pythons3",
"id": "c21709e12d4127f50473cb32b1a73b0e65318212",
"size": "73",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pythons3/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2593"
}
],
"symlink_target": ""
} |
''' Client-side interactivity.
'''
from __future__ import absolute_import
from types import FunctionType
from ..core.has_props import abstract
from ..core.properties import Dict, String, Bool, AnyRef
from ..model import Model
from ..util.dependencies import import_required
from ..util.compiler import nodejs_compile, CompilationError
@abstract
class Callback(Model):
''' Base class for interactive callback.
'''
class OpenURL(Callback):
''' Open a URL in a new tab or window (browser dependent).
'''
url = String("http://", help="""
The URL to direct the web browser to. This can be a template string,
which will be formatted with data from the data source.
""")
class CustomJS(Callback):
''' Execute a JavaScript function.
.. warning::
The explicit purpose of this Bokeh Model is to embed *raw JavaScript
code* for a browser to execute. If any part of the code is derived
from untrusted user inputs, then you must take appropriate care to
sanitize the user input prior to passing to Bokeh.
'''
@classmethod
def from_py_func(cls, func):
""" Create a CustomJS instance from a Python function. The
function is translated to JavaScript using PScript.
"""
if not isinstance(func, FunctionType):
raise ValueError('CustomJS.from_py_func needs function object.')
pscript = import_required('pscript',
'To use Python functions for CustomJS, you need PScript ' +
'("conda install -c conda-forge pscript" or "pip install pscript")')
# Collect default values
default_values = func.__defaults__ # Python 2.6+
default_names = func.__code__.co_varnames[:len(default_values)]
args = dict(zip(default_names, default_values))
args.pop('window', None) # Clear window, so we use the global window object
# Get JS code, we could rip out the function def, or just
# call the function. We do the latter.
code = pscript.py2js(func, 'cb') + 'cb(%s);\n' % ', '.join(default_names)
return cls(code=code, args=args)
@classmethod
def from_coffeescript(cls, code, args={}):
''' Create a ``CustomJS`` instance from CoffeeScript code.
'''
compiled = nodejs_compile(code, lang="coffeescript", file="???")
if "error" in compiled:
raise CompilationError(compiled.error)
else:
return cls(code=compiled.code, args=args)
args = Dict(String, AnyRef, help="""
A mapping of names to Python objects. In particular those can be bokeh's models.
These objects are made available to the callback's code snippet as the values of
named parameters to the callback.
""")
code = String(default="", help="""
A snippet of JavaScript code to execute in the browser. The
code is made into the body of a function, and all of of the named objects in
``args`` are available as parameters that the code can use. Additionally,
a ``cb_obj`` parameter contains the object that triggered the callback
and an optional ``cb_data`` parameter that contains any tool-specific data
(i.e. mouse coordinates and hovered glyph indices for the HoverTool).
.. note:: Use ``CustomJS.from_coffeescript()`` for CoffeeScript source code.
""")
use_strict = Bool(default=False, help="""
Enables or disables automatic insertion of ``"use strict";`` into ``code``.
""")
| {
"content_hash": "44efb6f8e5343ee8547f5c9b3d36cd70",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 102,
"avg_line_length": 37.53191489361702,
"alnum_prop": 0.6522108843537415,
"repo_name": "dennisobrien/bokeh",
"id": "3617970859de12b508a0167de96c77793bdb23b5",
"size": "3528",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "bokeh/models/callbacks.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1442"
},
{
"name": "CSS",
"bytes": "102287"
},
{
"name": "CoffeeScript",
"bytes": "413132"
},
{
"name": "Dockerfile",
"bytes": "4099"
},
{
"name": "HTML",
"bytes": "47532"
},
{
"name": "JavaScript",
"bytes": "25172"
},
{
"name": "Makefile",
"bytes": "1150"
},
{
"name": "PowerShell",
"bytes": "691"
},
{
"name": "Python",
"bytes": "3335869"
},
{
"name": "Shell",
"bytes": "9209"
},
{
"name": "TypeScript",
"bytes": "1634873"
}
],
"symlink_target": ""
} |
import sys
import os
import argparse
import numpy
import logging
from datetime import datetime
path = os.path.join(os.getcwd(), "lib")
sys.path.insert(0, path)
import pirates
import plot_functions as plt
def check_negative(value):
""" check if a value is negative"""
ivalue = int(value)
if ivalue <= 0:
raise argparse.ArgumentTypeError("%s is an invalid entry" % value)
return ivalue
if __name__ == '__main__':
desc = """plot.py is the main program for plotting the simulation"""
parser = argparse.ArgumentParser(description = desc, prog = "simulation.py")
parser.add_argument('DirName', type=str, help="Enter the name of the directory")
# initial data
parser.add_argument('-id', '--initial_data', dest='init_data', action='store_true')
parser.add_argument('-l', '--levels_color', type = check_negative, help="Enter the number of working processors", default = 10)
# geometric vector field
parser.add_argument('-gvf', '--geometric_vector_field', dest='geometric_vector_field', action='store_true', help='Plot the geometric vector field')
parser.add_argument('-vf_x', '--horizontal_points_vf', type = check_negative, help="Enter the number of horizontal points for the vector field", default = 10)
parser.add_argument('-vf_y', '--vertical_points_vf', type = check_negative, help="Enter the number of vertical points for the vector field", default = 10)
# kernels
parser.add_argument('-k', '--kernels', dest='kernels', action='store_true', help='Plot the kernels and cut-off functions')
parser.add_argument('-p', '--processors', type = check_negative, help="Enter the number of working processors", default = 1)
# complete solution and movie
parser.add_argument('-s', '--solution', dest = 'solution', action = 'store_true', help = 'Plot the complete solution and the movie')
args = parser.parse_args()
dirName = args.DirName
filelog = os.path.join(dirName, 'Plot-pirates.txt')
try:
os.remove(filelog)
except OSError:
pass
logging.basicConfig(filename = filelog,
filemod = 'w', level = logging.DEBUG)
logging.info('Started at ' + str(datetime.now()))
# Reads all parameters, Initial Datum, Flow and MaxCharSpeed
execfile(os.path.join(dirName, "parameters.py"))
simul_pirates = pirates.pirates(x_1, x_2, y_1, y_2, n_x, n_y, M, tMax, d_o,
InitialDatum_rho, InitialDatum_A,
speed_ships, nu, dirName, mathcal_K, cut_off_C_pirates, kappa, a, cut_off_C_ships, cut_off_C_police, controls)
if args.init_data:
plt.plot_initial_data(simul_pirates, args.levels_color)
if args.geometric_vector_field:
plt.plt_geometric_vector_speed(simul_pirates, args.horizontal_points_vf, args.vertical_points_vf)
if args.kernels:
x = numpy.linspace(-1.5,1.5,50)
y = numpy.linspace(-1.5,1.5,50)
plt.plt_kernels(simul_pirates, x, y, 100, 100)
if args.solution:
plt.plt_solutions(simul_pirates, args.levels_color)
plt.movie(simul_pirates)
logging.info('Finished at ' + str(datetime.now()))
| {
"content_hash": "8681aab235a56c31de513aefb1ae7a25",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 162,
"avg_line_length": 33.8421052631579,
"alnum_prop": 0.6513219284603421,
"repo_name": "maurogaravello/pirati",
"id": "bb9bc874cfb0cf7a29aef5ba2b188b6dea437065",
"size": "3265",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plot.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "88287"
}
],
"symlink_target": ""
} |
"""Implement the Google Smart Home traits."""
import logging
from homeassistant.components import (
alarm_control_panel,
binary_sensor,
camera,
cover,
fan,
group,
input_boolean,
light,
lock,
media_player,
scene,
script,
sensor,
switch,
vacuum,
)
from homeassistant.components.climate import const as climate
from homeassistant.const import (
ATTR_ASSUMED_STATE,
ATTR_CODE,
ATTR_DEVICE_CLASS,
ATTR_ENTITY_ID,
ATTR_SUPPORTED_FEATURES,
ATTR_TEMPERATURE,
SERVICE_ALARM_ARM_AWAY,
SERVICE_ALARM_ARM_CUSTOM_BYPASS,
SERVICE_ALARM_ARM_HOME,
SERVICE_ALARM_ARM_NIGHT,
SERVICE_ALARM_DISARM,
SERVICE_ALARM_TRIGGER,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_CUSTOM_BYPASS,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_ARMED_NIGHT,
STATE_ALARM_DISARMED,
STATE_ALARM_PENDING,
STATE_ALARM_TRIGGERED,
STATE_LOCKED,
STATE_OFF,
STATE_ON,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
)
from homeassistant.core import DOMAIN as HA_DOMAIN
from homeassistant.util import color as color_util, temperature as temp_util
from .const import (
CHALLENGE_ACK_NEEDED,
CHALLENGE_FAILED_PIN_NEEDED,
CHALLENGE_PIN_NEEDED,
ERR_ALREADY_ARMED,
ERR_ALREADY_DISARMED,
ERR_CHALLENGE_NOT_SETUP,
ERR_FUNCTION_NOT_SUPPORTED,
ERR_NOT_SUPPORTED,
ERR_VALUE_OUT_OF_RANGE,
)
from .error import ChallengeNeeded, SmartHomeError
_LOGGER = logging.getLogger(__name__)
PREFIX_TRAITS = "action.devices.traits."
TRAIT_CAMERA_STREAM = PREFIX_TRAITS + "CameraStream"
TRAIT_ONOFF = PREFIX_TRAITS + "OnOff"
TRAIT_DOCK = PREFIX_TRAITS + "Dock"
TRAIT_STARTSTOP = PREFIX_TRAITS + "StartStop"
TRAIT_BRIGHTNESS = PREFIX_TRAITS + "Brightness"
TRAIT_COLOR_SETTING = PREFIX_TRAITS + "ColorSetting"
TRAIT_SCENE = PREFIX_TRAITS + "Scene"
TRAIT_TEMPERATURE_SETTING = PREFIX_TRAITS + "TemperatureSetting"
TRAIT_LOCKUNLOCK = PREFIX_TRAITS + "LockUnlock"
TRAIT_FANSPEED = PREFIX_TRAITS + "FanSpeed"
TRAIT_MODES = PREFIX_TRAITS + "Modes"
TRAIT_OPENCLOSE = PREFIX_TRAITS + "OpenClose"
TRAIT_VOLUME = PREFIX_TRAITS + "Volume"
TRAIT_ARMDISARM = PREFIX_TRAITS + "ArmDisarm"
TRAIT_HUMIDITY_SETTING = PREFIX_TRAITS + "HumiditySetting"
PREFIX_COMMANDS = "action.devices.commands."
COMMAND_ONOFF = PREFIX_COMMANDS + "OnOff"
COMMAND_GET_CAMERA_STREAM = PREFIX_COMMANDS + "GetCameraStream"
COMMAND_DOCK = PREFIX_COMMANDS + "Dock"
COMMAND_STARTSTOP = PREFIX_COMMANDS + "StartStop"
COMMAND_PAUSEUNPAUSE = PREFIX_COMMANDS + "PauseUnpause"
COMMAND_BRIGHTNESS_ABSOLUTE = PREFIX_COMMANDS + "BrightnessAbsolute"
COMMAND_COLOR_ABSOLUTE = PREFIX_COMMANDS + "ColorAbsolute"
COMMAND_ACTIVATE_SCENE = PREFIX_COMMANDS + "ActivateScene"
COMMAND_THERMOSTAT_TEMPERATURE_SETPOINT = (
PREFIX_COMMANDS + "ThermostatTemperatureSetpoint"
)
COMMAND_THERMOSTAT_TEMPERATURE_SET_RANGE = (
PREFIX_COMMANDS + "ThermostatTemperatureSetRange"
)
COMMAND_THERMOSTAT_SET_MODE = PREFIX_COMMANDS + "ThermostatSetMode"
COMMAND_LOCKUNLOCK = PREFIX_COMMANDS + "LockUnlock"
COMMAND_FANSPEED = PREFIX_COMMANDS + "SetFanSpeed"
COMMAND_MODES = PREFIX_COMMANDS + "SetModes"
COMMAND_OPENCLOSE = PREFIX_COMMANDS + "OpenClose"
COMMAND_SET_VOLUME = PREFIX_COMMANDS + "setVolume"
COMMAND_VOLUME_RELATIVE = PREFIX_COMMANDS + "volumeRelative"
COMMAND_ARMDISARM = PREFIX_COMMANDS + "ArmDisarm"
TRAITS = []
def register_trait(trait):
"""Decorate a function to register a trait."""
TRAITS.append(trait)
return trait
def _google_temp_unit(units):
"""Return Google temperature unit."""
if units == TEMP_FAHRENHEIT:
return "F"
return "C"
class _Trait:
"""Represents a Trait inside Google Assistant skill."""
commands = []
@staticmethod
def might_2fa(domain, features, device_class):
"""Return if the trait might ask for 2FA."""
return False
def __init__(self, hass, state, config):
"""Initialize a trait for a state."""
self.hass = hass
self.state = state
self.config = config
def sync_attributes(self):
"""Return attributes for a sync request."""
raise NotImplementedError
def query_attributes(self):
"""Return the attributes of this trait for this entity."""
raise NotImplementedError
def can_execute(self, command, params):
"""Test if command can be executed."""
return command in self.commands
async def execute(self, command, data, params, challenge):
"""Execute a trait command."""
raise NotImplementedError
@register_trait
class BrightnessTrait(_Trait):
"""Trait to control brightness of a device.
https://developers.google.com/actions/smarthome/traits/brightness
"""
name = TRAIT_BRIGHTNESS
commands = [COMMAND_BRIGHTNESS_ABSOLUTE]
@staticmethod
def supported(domain, features, device_class):
"""Test if state is supported."""
if domain == light.DOMAIN:
return features & light.SUPPORT_BRIGHTNESS
return False
def sync_attributes(self):
"""Return brightness attributes for a sync request."""
return {}
def query_attributes(self):
"""Return brightness query attributes."""
domain = self.state.domain
response = {}
if domain == light.DOMAIN:
brightness = self.state.attributes.get(light.ATTR_BRIGHTNESS)
if brightness is not None:
response["brightness"] = int(100 * (brightness / 255))
else:
response["brightness"] = 0
return response
async def execute(self, command, data, params, challenge):
"""Execute a brightness command."""
domain = self.state.domain
if domain == light.DOMAIN:
await self.hass.services.async_call(
light.DOMAIN,
light.SERVICE_TURN_ON,
{
ATTR_ENTITY_ID: self.state.entity_id,
light.ATTR_BRIGHTNESS_PCT: params["brightness"],
},
blocking=True,
context=data.context,
)
@register_trait
class CameraStreamTrait(_Trait):
"""Trait to stream from cameras.
https://developers.google.com/actions/smarthome/traits/camerastream
"""
name = TRAIT_CAMERA_STREAM
commands = [COMMAND_GET_CAMERA_STREAM]
stream_info = None
@staticmethod
def supported(domain, features, device_class):
"""Test if state is supported."""
if domain == camera.DOMAIN:
return features & camera.SUPPORT_STREAM
return False
def sync_attributes(self):
"""Return stream attributes for a sync request."""
return {
"cameraStreamSupportedProtocols": ["hls"],
"cameraStreamNeedAuthToken": False,
"cameraStreamNeedDrmEncryption": False,
}
def query_attributes(self):
"""Return camera stream attributes."""
return self.stream_info or {}
async def execute(self, command, data, params, challenge):
"""Execute a get camera stream command."""
url = await self.hass.components.camera.async_request_stream(
self.state.entity_id, "hls"
)
self.stream_info = {
"cameraStreamAccessUrl": self.hass.config.api.base_url + url
}
@register_trait
class OnOffTrait(_Trait):
"""Trait to offer basic on and off functionality.
https://developers.google.com/actions/smarthome/traits/onoff
"""
name = TRAIT_ONOFF
commands = [COMMAND_ONOFF]
@staticmethod
def supported(domain, features, device_class):
"""Test if state is supported."""
return domain in (
group.DOMAIN,
input_boolean.DOMAIN,
switch.DOMAIN,
fan.DOMAIN,
light.DOMAIN,
media_player.DOMAIN,
)
def sync_attributes(self):
"""Return OnOff attributes for a sync request."""
return {}
def query_attributes(self):
"""Return OnOff query attributes."""
return {"on": self.state.state != STATE_OFF}
async def execute(self, command, data, params, challenge):
"""Execute an OnOff command."""
domain = self.state.domain
if domain == group.DOMAIN:
service_domain = HA_DOMAIN
service = SERVICE_TURN_ON if params["on"] else SERVICE_TURN_OFF
else:
service_domain = domain
service = SERVICE_TURN_ON if params["on"] else SERVICE_TURN_OFF
await self.hass.services.async_call(
service_domain,
service,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
@register_trait
class ColorSettingTrait(_Trait):
"""Trait to offer color temperature functionality.
https://developers.google.com/actions/smarthome/traits/colortemperature
"""
name = TRAIT_COLOR_SETTING
commands = [COMMAND_COLOR_ABSOLUTE]
@staticmethod
def supported(domain, features, device_class):
"""Test if state is supported."""
if domain != light.DOMAIN:
return False
return features & light.SUPPORT_COLOR_TEMP or features & light.SUPPORT_COLOR
def sync_attributes(self):
"""Return color temperature attributes for a sync request."""
attrs = self.state.attributes
features = attrs.get(ATTR_SUPPORTED_FEATURES, 0)
response = {}
if features & light.SUPPORT_COLOR:
response["colorModel"] = "hsv"
if features & light.SUPPORT_COLOR_TEMP:
# Max Kelvin is Min Mireds K = 1000000 / mireds
# Min Kelvin is Max Mireds K = 1000000 / mireds
response["colorTemperatureRange"] = {
"temperatureMaxK": color_util.color_temperature_mired_to_kelvin(
attrs.get(light.ATTR_MIN_MIREDS)
),
"temperatureMinK": color_util.color_temperature_mired_to_kelvin(
attrs.get(light.ATTR_MAX_MIREDS)
),
}
return response
def query_attributes(self):
"""Return color temperature query attributes."""
features = self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
color = {}
if features & light.SUPPORT_COLOR:
color_hs = self.state.attributes.get(light.ATTR_HS_COLOR)
brightness = self.state.attributes.get(light.ATTR_BRIGHTNESS, 1)
if color_hs is not None:
color["spectrumHsv"] = {
"hue": color_hs[0],
"saturation": color_hs[1] / 100,
"value": brightness / 255,
}
if features & light.SUPPORT_COLOR_TEMP:
temp = self.state.attributes.get(light.ATTR_COLOR_TEMP)
# Some faulty integrations might put 0 in here, raising exception.
if temp == 0:
_LOGGER.warning(
"Entity %s has incorrect color temperature %s",
self.state.entity_id,
temp,
)
elif temp is not None:
color["temperatureK"] = color_util.color_temperature_mired_to_kelvin(
temp
)
response = {}
if color:
response["color"] = color
return response
async def execute(self, command, data, params, challenge):
"""Execute a color temperature command."""
if "temperature" in params["color"]:
temp = color_util.color_temperature_kelvin_to_mired(
params["color"]["temperature"]
)
min_temp = self.state.attributes[light.ATTR_MIN_MIREDS]
max_temp = self.state.attributes[light.ATTR_MAX_MIREDS]
if temp < min_temp or temp > max_temp:
raise SmartHomeError(
ERR_VALUE_OUT_OF_RANGE,
"Temperature should be between {} and {}".format(
min_temp, max_temp
),
)
await self.hass.services.async_call(
light.DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: self.state.entity_id, light.ATTR_COLOR_TEMP: temp},
blocking=True,
context=data.context,
)
elif "spectrumRGB" in params["color"]:
# Convert integer to hex format and left pad with 0's till length 6
hex_value = "{0:06x}".format(params["color"]["spectrumRGB"])
color = color_util.color_RGB_to_hs(
*color_util.rgb_hex_to_rgb_list(hex_value)
)
await self.hass.services.async_call(
light.DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: self.state.entity_id, light.ATTR_HS_COLOR: color},
blocking=True,
context=data.context,
)
elif "spectrumHSV" in params["color"]:
color = params["color"]["spectrumHSV"]
saturation = color["saturation"] * 100
brightness = color["value"] * 255
await self.hass.services.async_call(
light.DOMAIN,
SERVICE_TURN_ON,
{
ATTR_ENTITY_ID: self.state.entity_id,
light.ATTR_HS_COLOR: [color["hue"], saturation],
light.ATTR_BRIGHTNESS: brightness,
},
blocking=True,
context=data.context,
)
@register_trait
class SceneTrait(_Trait):
"""Trait to offer scene functionality.
https://developers.google.com/actions/smarthome/traits/scene
"""
name = TRAIT_SCENE
commands = [COMMAND_ACTIVATE_SCENE]
@staticmethod
def supported(domain, features, device_class):
"""Test if state is supported."""
return domain in (scene.DOMAIN, script.DOMAIN)
def sync_attributes(self):
"""Return scene attributes for a sync request."""
# Neither supported domain can support sceneReversible
return {}
def query_attributes(self):
"""Return scene query attributes."""
return {}
async def execute(self, command, data, params, challenge):
"""Execute a scene command."""
# Don't block for scripts as they can be slow.
await self.hass.services.async_call(
self.state.domain,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=self.state.domain != script.DOMAIN,
context=data.context,
)
@register_trait
class DockTrait(_Trait):
"""Trait to offer dock functionality.
https://developers.google.com/actions/smarthome/traits/dock
"""
name = TRAIT_DOCK
commands = [COMMAND_DOCK]
@staticmethod
def supported(domain, features, device_class):
"""Test if state is supported."""
return domain == vacuum.DOMAIN
def sync_attributes(self):
"""Return dock attributes for a sync request."""
return {}
def query_attributes(self):
"""Return dock query attributes."""
return {"isDocked": self.state.state == vacuum.STATE_DOCKED}
async def execute(self, command, data, params, challenge):
"""Execute a dock command."""
await self.hass.services.async_call(
self.state.domain,
vacuum.SERVICE_RETURN_TO_BASE,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
@register_trait
class StartStopTrait(_Trait):
"""Trait to offer StartStop functionality.
https://developers.google.com/actions/smarthome/traits/startstop
"""
name = TRAIT_STARTSTOP
commands = [COMMAND_STARTSTOP, COMMAND_PAUSEUNPAUSE]
@staticmethod
def supported(domain, features, device_class):
"""Test if state is supported."""
return domain == vacuum.DOMAIN
def sync_attributes(self):
"""Return StartStop attributes for a sync request."""
return {
"pausable": self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
& vacuum.SUPPORT_PAUSE
!= 0
}
def query_attributes(self):
"""Return StartStop query attributes."""
return {
"isRunning": self.state.state == vacuum.STATE_CLEANING,
"isPaused": self.state.state == vacuum.STATE_PAUSED,
}
async def execute(self, command, data, params, challenge):
"""Execute a StartStop command."""
if command == COMMAND_STARTSTOP:
if params["start"]:
await self.hass.services.async_call(
self.state.domain,
vacuum.SERVICE_START,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
else:
await self.hass.services.async_call(
self.state.domain,
vacuum.SERVICE_STOP,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
elif command == COMMAND_PAUSEUNPAUSE:
if params["pause"]:
await self.hass.services.async_call(
self.state.domain,
vacuum.SERVICE_PAUSE,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
else:
await self.hass.services.async_call(
self.state.domain,
vacuum.SERVICE_START,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
@register_trait
class TemperatureSettingTrait(_Trait):
"""Trait to offer handling both temperature point and modes functionality.
https://developers.google.com/actions/smarthome/traits/temperaturesetting
"""
name = TRAIT_TEMPERATURE_SETTING
commands = [
COMMAND_THERMOSTAT_TEMPERATURE_SETPOINT,
COMMAND_THERMOSTAT_TEMPERATURE_SET_RANGE,
COMMAND_THERMOSTAT_SET_MODE,
]
# We do not support "on" as we are unable to know how to restore
# the last mode.
hvac_to_google = {
climate.HVAC_MODE_HEAT: "heat",
climate.HVAC_MODE_COOL: "cool",
climate.HVAC_MODE_OFF: "off",
climate.HVAC_MODE_AUTO: "auto",
climate.HVAC_MODE_HEAT_COOL: "heatcool",
climate.HVAC_MODE_FAN_ONLY: "fan-only",
climate.HVAC_MODE_DRY: "dry",
}
google_to_hvac = {value: key for key, value in hvac_to_google.items()}
preset_to_google = {climate.PRESET_ECO: "eco"}
google_to_preset = {value: key for key, value in preset_to_google.items()}
@staticmethod
def supported(domain, features, device_class):
"""Test if state is supported."""
if domain == climate.DOMAIN:
return True
return (
domain == sensor.DOMAIN and device_class == sensor.DEVICE_CLASS_TEMPERATURE
)
@property
def climate_google_modes(self):
"""Return supported Google modes."""
modes = []
attrs = self.state.attributes
for mode in attrs.get(climate.ATTR_HVAC_MODES, []):
google_mode = self.hvac_to_google.get(mode)
if google_mode and google_mode not in modes:
modes.append(google_mode)
for preset in attrs.get(climate.ATTR_PRESET_MODES, []):
google_mode = self.preset_to_google.get(preset)
if google_mode and google_mode not in modes:
modes.append(google_mode)
return modes
def sync_attributes(self):
"""Return temperature point and modes attributes for a sync request."""
response = {}
attrs = self.state.attributes
domain = self.state.domain
response["thermostatTemperatureUnit"] = _google_temp_unit(
self.hass.config.units.temperature_unit
)
if domain == sensor.DOMAIN:
device_class = attrs.get(ATTR_DEVICE_CLASS)
if device_class == sensor.DEVICE_CLASS_TEMPERATURE:
response["queryOnlyTemperatureSetting"] = True
elif domain == climate.DOMAIN:
modes = self.climate_google_modes
if "off" in modes and any(
mode in modes for mode in ("heatcool", "heat", "cool")
):
modes.append("on")
response["availableThermostatModes"] = ",".join(modes)
return response
def query_attributes(self):
"""Return temperature point and modes query attributes."""
response = {}
attrs = self.state.attributes
domain = self.state.domain
unit = self.hass.config.units.temperature_unit
if domain == sensor.DOMAIN:
device_class = attrs.get(ATTR_DEVICE_CLASS)
if device_class == sensor.DEVICE_CLASS_TEMPERATURE:
current_temp = self.state.state
if current_temp not in (STATE_UNKNOWN, STATE_UNAVAILABLE):
response["thermostatTemperatureAmbient"] = round(
temp_util.convert(float(current_temp), unit, TEMP_CELSIUS), 1
)
elif domain == climate.DOMAIN:
operation = self.state.state
preset = attrs.get(climate.ATTR_PRESET_MODE)
supported = attrs.get(ATTR_SUPPORTED_FEATURES, 0)
if preset in self.preset_to_google:
response["thermostatMode"] = self.preset_to_google[preset]
else:
response["thermostatMode"] = self.hvac_to_google.get(operation)
current_temp = attrs.get(climate.ATTR_CURRENT_TEMPERATURE)
if current_temp is not None:
response["thermostatTemperatureAmbient"] = round(
temp_util.convert(current_temp, unit, TEMP_CELSIUS), 1
)
current_humidity = attrs.get(climate.ATTR_CURRENT_HUMIDITY)
if current_humidity is not None:
response["thermostatHumidityAmbient"] = current_humidity
if operation in (climate.HVAC_MODE_AUTO, climate.HVAC_MODE_HEAT_COOL):
if supported & climate.SUPPORT_TARGET_TEMPERATURE_RANGE:
response["thermostatTemperatureSetpointHigh"] = round(
temp_util.convert(
attrs[climate.ATTR_TARGET_TEMP_HIGH], unit, TEMP_CELSIUS
),
1,
)
response["thermostatTemperatureSetpointLow"] = round(
temp_util.convert(
attrs[climate.ATTR_TARGET_TEMP_LOW], unit, TEMP_CELSIUS
),
1,
)
else:
target_temp = attrs.get(ATTR_TEMPERATURE)
if target_temp is not None:
target_temp = round(
temp_util.convert(target_temp, unit, TEMP_CELSIUS), 1
)
response["thermostatTemperatureSetpointHigh"] = target_temp
response["thermostatTemperatureSetpointLow"] = target_temp
else:
target_temp = attrs.get(ATTR_TEMPERATURE)
if target_temp is not None:
response["thermostatTemperatureSetpoint"] = round(
temp_util.convert(target_temp, unit, TEMP_CELSIUS), 1
)
return response
async def execute(self, command, data, params, challenge):
"""Execute a temperature point or mode command."""
domain = self.state.domain
if domain == sensor.DOMAIN:
raise SmartHomeError(
ERR_NOT_SUPPORTED, "Execute is not supported by sensor"
)
# All sent in temperatures are always in Celsius
unit = self.hass.config.units.temperature_unit
min_temp = self.state.attributes[climate.ATTR_MIN_TEMP]
max_temp = self.state.attributes[climate.ATTR_MAX_TEMP]
if command == COMMAND_THERMOSTAT_TEMPERATURE_SETPOINT:
temp = temp_util.convert(
params["thermostatTemperatureSetpoint"], TEMP_CELSIUS, unit
)
if unit == TEMP_FAHRENHEIT:
temp = round(temp)
if temp < min_temp or temp > max_temp:
raise SmartHomeError(
ERR_VALUE_OUT_OF_RANGE,
"Temperature should be between {} and {}".format(
min_temp, max_temp
),
)
await self.hass.services.async_call(
climate.DOMAIN,
climate.SERVICE_SET_TEMPERATURE,
{ATTR_ENTITY_ID: self.state.entity_id, ATTR_TEMPERATURE: temp},
blocking=True,
context=data.context,
)
elif command == COMMAND_THERMOSTAT_TEMPERATURE_SET_RANGE:
temp_high = temp_util.convert(
params["thermostatTemperatureSetpointHigh"], TEMP_CELSIUS, unit
)
if unit == TEMP_FAHRENHEIT:
temp_high = round(temp_high)
if temp_high < min_temp or temp_high > max_temp:
raise SmartHomeError(
ERR_VALUE_OUT_OF_RANGE,
"Upper bound for temperature range should be between "
"{} and {}".format(min_temp, max_temp),
)
temp_low = temp_util.convert(
params["thermostatTemperatureSetpointLow"], TEMP_CELSIUS, unit
)
if unit == TEMP_FAHRENHEIT:
temp_low = round(temp_low)
if temp_low < min_temp or temp_low > max_temp:
raise SmartHomeError(
ERR_VALUE_OUT_OF_RANGE,
"Lower bound for temperature range should be between "
"{} and {}".format(min_temp, max_temp),
)
supported = self.state.attributes.get(ATTR_SUPPORTED_FEATURES)
svc_data = {ATTR_ENTITY_ID: self.state.entity_id}
if supported & climate.SUPPORT_TARGET_TEMPERATURE_RANGE:
svc_data[climate.ATTR_TARGET_TEMP_HIGH] = temp_high
svc_data[climate.ATTR_TARGET_TEMP_LOW] = temp_low
else:
svc_data[ATTR_TEMPERATURE] = (temp_high + temp_low) / 2
await self.hass.services.async_call(
climate.DOMAIN,
climate.SERVICE_SET_TEMPERATURE,
svc_data,
blocking=True,
context=data.context,
)
elif command == COMMAND_THERMOSTAT_SET_MODE:
target_mode = params["thermostatMode"]
supported = self.state.attributes.get(ATTR_SUPPORTED_FEATURES)
if target_mode == "on":
await self.hass.services.async_call(
climate.DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
return
if target_mode == "off":
await self.hass.services.async_call(
climate.DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
return
if target_mode in self.google_to_preset:
await self.hass.services.async_call(
climate.DOMAIN,
climate.SERVICE_SET_PRESET_MODE,
{
climate.ATTR_PRESET_MODE: self.google_to_preset[target_mode],
ATTR_ENTITY_ID: self.state.entity_id,
},
blocking=True,
context=data.context,
)
return
await self.hass.services.async_call(
climate.DOMAIN,
climate.SERVICE_SET_HVAC_MODE,
{
ATTR_ENTITY_ID: self.state.entity_id,
climate.ATTR_HVAC_MODE: self.google_to_hvac[target_mode],
},
blocking=True,
context=data.context,
)
@register_trait
class HumiditySettingTrait(_Trait):
"""Trait to offer humidity setting functionality.
https://developers.google.com/actions/smarthome/traits/humiditysetting
"""
name = TRAIT_HUMIDITY_SETTING
commands = []
@staticmethod
def supported(domain, features, device_class):
"""Test if state is supported."""
return domain == sensor.DOMAIN and device_class == sensor.DEVICE_CLASS_HUMIDITY
def sync_attributes(self):
"""Return humidity attributes for a sync request."""
response = {}
attrs = self.state.attributes
domain = self.state.domain
if domain == sensor.DOMAIN:
device_class = attrs.get(ATTR_DEVICE_CLASS)
if device_class == sensor.DEVICE_CLASS_HUMIDITY:
response["queryOnlyHumiditySetting"] = True
return response
def query_attributes(self):
"""Return humidity query attributes."""
response = {}
attrs = self.state.attributes
domain = self.state.domain
if domain == sensor.DOMAIN:
device_class = attrs.get(ATTR_DEVICE_CLASS)
if device_class == sensor.DEVICE_CLASS_HUMIDITY:
current_humidity = self.state.state
if current_humidity not in (STATE_UNKNOWN, STATE_UNAVAILABLE):
response["humidityAmbientPercent"] = round(float(current_humidity))
return response
async def execute(self, command, data, params, challenge):
"""Execute a humidity command."""
domain = self.state.domain
if domain == sensor.DOMAIN:
raise SmartHomeError(
ERR_NOT_SUPPORTED, "Execute is not supported by sensor"
)
@register_trait
class LockUnlockTrait(_Trait):
"""Trait to lock or unlock a lock.
https://developers.google.com/actions/smarthome/traits/lockunlock
"""
name = TRAIT_LOCKUNLOCK
commands = [COMMAND_LOCKUNLOCK]
@staticmethod
def supported(domain, features, device_class):
"""Test if state is supported."""
return domain == lock.DOMAIN
@staticmethod
def might_2fa(domain, features, device_class):
"""Return if the trait might ask for 2FA."""
return True
def sync_attributes(self):
"""Return LockUnlock attributes for a sync request."""
return {}
def query_attributes(self):
"""Return LockUnlock query attributes."""
return {"isLocked": self.state.state == STATE_LOCKED}
async def execute(self, command, data, params, challenge):
"""Execute an LockUnlock command."""
if params["lock"]:
service = lock.SERVICE_LOCK
else:
_verify_pin_challenge(data, self.state, challenge)
service = lock.SERVICE_UNLOCK
await self.hass.services.async_call(
lock.DOMAIN,
service,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
@register_trait
class ArmDisArmTrait(_Trait):
"""Trait to Arm or Disarm a Security System.
https://developers.google.com/actions/smarthome/traits/armdisarm
"""
name = TRAIT_ARMDISARM
commands = [COMMAND_ARMDISARM]
state_to_service = {
STATE_ALARM_ARMED_HOME: SERVICE_ALARM_ARM_HOME,
STATE_ALARM_ARMED_AWAY: SERVICE_ALARM_ARM_AWAY,
STATE_ALARM_ARMED_NIGHT: SERVICE_ALARM_ARM_NIGHT,
STATE_ALARM_ARMED_CUSTOM_BYPASS: SERVICE_ALARM_ARM_CUSTOM_BYPASS,
STATE_ALARM_TRIGGERED: SERVICE_ALARM_TRIGGER,
}
@staticmethod
def supported(domain, features, device_class):
"""Test if state is supported."""
return domain == alarm_control_panel.DOMAIN
@staticmethod
def might_2fa(domain, features, device_class):
"""Return if the trait might ask for 2FA."""
return True
def sync_attributes(self):
"""Return ArmDisarm attributes for a sync request."""
response = {}
levels = []
for state in self.state_to_service:
# level synonyms are generated from state names
# 'armed_away' becomes 'armed away' or 'away'
level_synonym = [state.replace("_", " ")]
if state != STATE_ALARM_TRIGGERED:
level_synonym.append(state.split("_")[1])
level = {
"level_name": state,
"level_values": [{"level_synonym": level_synonym, "lang": "en"}],
}
levels.append(level)
response["availableArmLevels"] = {"levels": levels, "ordered": False}
return response
def query_attributes(self):
"""Return ArmDisarm query attributes."""
if "post_pending_state" in self.state.attributes:
armed_state = self.state.attributes["post_pending_state"]
else:
armed_state = self.state.state
response = {"isArmed": armed_state in self.state_to_service}
if response["isArmed"]:
response.update({"currentArmLevel": armed_state})
return response
async def execute(self, command, data, params, challenge):
"""Execute an ArmDisarm command."""
if params["arm"] and not params.get("cancel"):
if self.state.state == params["armLevel"]:
raise SmartHomeError(ERR_ALREADY_ARMED, "System is already armed")
if self.state.attributes["code_arm_required"]:
_verify_pin_challenge(data, self.state, challenge)
service = self.state_to_service[params["armLevel"]]
# disarm the system without asking for code when
# 'cancel' arming action is received while current status is pending
elif (
params["arm"]
and params.get("cancel")
and self.state.state == STATE_ALARM_PENDING
):
service = SERVICE_ALARM_DISARM
else:
if self.state.state == STATE_ALARM_DISARMED:
raise SmartHomeError(ERR_ALREADY_DISARMED, "System is already disarmed")
_verify_pin_challenge(data, self.state, challenge)
service = SERVICE_ALARM_DISARM
await self.hass.services.async_call(
alarm_control_panel.DOMAIN,
service,
{
ATTR_ENTITY_ID: self.state.entity_id,
ATTR_CODE: data.config.secure_devices_pin,
},
blocking=True,
context=data.context,
)
@register_trait
class FanSpeedTrait(_Trait):
"""Trait to control speed of Fan.
https://developers.google.com/actions/smarthome/traits/fanspeed
"""
name = TRAIT_FANSPEED
commands = [COMMAND_FANSPEED]
speed_synonyms = {
fan.SPEED_OFF: ["stop", "off"],
fan.SPEED_LOW: ["slow", "low", "slowest", "lowest"],
fan.SPEED_MEDIUM: ["medium", "mid", "middle"],
fan.SPEED_HIGH: ["high", "max", "fast", "highest", "fastest", "maximum"],
}
@staticmethod
def supported(domain, features, device_class):
"""Test if state is supported."""
if domain != fan.DOMAIN:
return False
return features & fan.SUPPORT_SET_SPEED
def sync_attributes(self):
"""Return speed point and modes attributes for a sync request."""
modes = self.state.attributes.get(fan.ATTR_SPEED_LIST, [])
speeds = []
for mode in modes:
if mode not in self.speed_synonyms:
continue
speed = {
"speed_name": mode,
"speed_values": [
{"speed_synonym": self.speed_synonyms.get(mode), "lang": "en"}
],
}
speeds.append(speed)
return {
"availableFanSpeeds": {"speeds": speeds, "ordered": True},
"reversible": bool(
self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
& fan.SUPPORT_DIRECTION
),
}
def query_attributes(self):
"""Return speed point and modes query attributes."""
attrs = self.state.attributes
response = {}
speed = attrs.get(fan.ATTR_SPEED)
if speed is not None:
response["on"] = speed != fan.SPEED_OFF
response["online"] = True
response["currentFanSpeedSetting"] = speed
return response
async def execute(self, command, data, params, challenge):
"""Execute an SetFanSpeed command."""
await self.hass.services.async_call(
fan.DOMAIN,
fan.SERVICE_SET_SPEED,
{ATTR_ENTITY_ID: self.state.entity_id, fan.ATTR_SPEED: params["fanSpeed"]},
blocking=True,
context=data.context,
)
@register_trait
class ModesTrait(_Trait):
"""Trait to set modes.
https://developers.google.com/actions/smarthome/traits/modes
"""
name = TRAIT_MODES
commands = [COMMAND_MODES]
SYNONYMS = {
"input source": ["input source", "input", "source"],
"sound mode": ["sound mode", "effects"],
}
@staticmethod
def supported(domain, features, device_class):
"""Test if state is supported."""
if domain != media_player.DOMAIN:
return False
return (
features & media_player.SUPPORT_SELECT_SOURCE
or features & media_player.SUPPORT_SELECT_SOUND_MODE
)
def sync_attributes(self):
"""Return mode attributes for a sync request."""
def _generate(name, settings):
mode = {
"name": name,
"name_values": [
{"name_synonym": self.SYNONYMS.get(name, [name]), "lang": "en"}
],
"settings": [],
"ordered": False,
}
for setting in settings:
mode["settings"].append(
{
"setting_name": setting,
"setting_values": [
{
"setting_synonym": self.SYNONYMS.get(
setting, [setting]
),
"lang": "en",
}
],
}
)
return mode
attrs = self.state.attributes
modes = []
if media_player.ATTR_INPUT_SOURCE_LIST in attrs:
modes.append(
_generate("input source", attrs[media_player.ATTR_INPUT_SOURCE_LIST])
)
if media_player.ATTR_SOUND_MODE_LIST in attrs:
modes.append(
_generate("sound mode", attrs[media_player.ATTR_SOUND_MODE_LIST])
)
payload = {"availableModes": modes}
return payload
def query_attributes(self):
"""Return current modes."""
attrs = self.state.attributes
response = {}
mode_settings = {}
if media_player.ATTR_INPUT_SOURCE_LIST in attrs:
mode_settings["input source"] = attrs.get(media_player.ATTR_INPUT_SOURCE)
if media_player.ATTR_SOUND_MODE_LIST in attrs:
mode_settings["sound mode"] = attrs.get(media_player.ATTR_SOUND_MODE)
if mode_settings:
response["on"] = self.state.state != STATE_OFF
response["online"] = True
response["currentModeSettings"] = mode_settings
return response
async def execute(self, command, data, params, challenge):
"""Execute an SetModes command."""
settings = params.get("updateModeSettings")
requested_source = settings.get("input source")
sound_mode = settings.get("sound mode")
if requested_source:
await self.hass.services.async_call(
media_player.DOMAIN,
media_player.SERVICE_SELECT_SOURCE,
{
ATTR_ENTITY_ID: self.state.entity_id,
media_player.ATTR_INPUT_SOURCE: requested_source,
},
blocking=True,
context=data.context,
)
if sound_mode:
await self.hass.services.async_call(
media_player.DOMAIN,
media_player.SERVICE_SELECT_SOUND_MODE,
{
ATTR_ENTITY_ID: self.state.entity_id,
media_player.ATTR_SOUND_MODE: sound_mode,
},
blocking=True,
context=data.context,
)
@register_trait
class OpenCloseTrait(_Trait):
"""Trait to open and close a cover.
https://developers.google.com/actions/smarthome/traits/openclose
"""
# Cover device classes that require 2FA
COVER_2FA = (cover.DEVICE_CLASS_DOOR, cover.DEVICE_CLASS_GARAGE)
name = TRAIT_OPENCLOSE
commands = [COMMAND_OPENCLOSE]
override_position = None
@staticmethod
def supported(domain, features, device_class):
"""Test if state is supported."""
if domain == cover.DOMAIN:
return True
return domain == binary_sensor.DOMAIN and device_class in (
binary_sensor.DEVICE_CLASS_DOOR,
binary_sensor.DEVICE_CLASS_GARAGE_DOOR,
binary_sensor.DEVICE_CLASS_LOCK,
binary_sensor.DEVICE_CLASS_OPENING,
binary_sensor.DEVICE_CLASS_WINDOW,
)
@staticmethod
def might_2fa(domain, features, device_class):
"""Return if the trait might ask for 2FA."""
return domain == cover.DOMAIN and device_class in OpenCloseTrait.COVER_2FA
def sync_attributes(self):
"""Return opening direction."""
response = {}
if self.state.domain == binary_sensor.DOMAIN:
response["queryOnlyOpenClose"] = True
return response
def query_attributes(self):
"""Return state query attributes."""
domain = self.state.domain
response = {}
if self.override_position is not None:
response["openPercent"] = self.override_position
elif domain == cover.DOMAIN:
# When it's an assumed state, we will return that querying state
# is not supported.
if self.state.attributes.get(ATTR_ASSUMED_STATE):
raise SmartHomeError(
ERR_NOT_SUPPORTED, "Querying state is not supported"
)
if self.state.state == STATE_UNKNOWN:
raise SmartHomeError(
ERR_NOT_SUPPORTED, "Querying state is not supported"
)
position = self.override_position or self.state.attributes.get(
cover.ATTR_CURRENT_POSITION
)
if position is not None:
response["openPercent"] = position
elif self.state.state != cover.STATE_CLOSED:
response["openPercent"] = 100
else:
response["openPercent"] = 0
elif domain == binary_sensor.DOMAIN:
if self.state.state == STATE_ON:
response["openPercent"] = 100
else:
response["openPercent"] = 0
return response
async def execute(self, command, data, params, challenge):
"""Execute an Open, close, Set position command."""
domain = self.state.domain
if domain == cover.DOMAIN:
svc_params = {ATTR_ENTITY_ID: self.state.entity_id}
if params["openPercent"] == 0:
service = cover.SERVICE_CLOSE_COVER
should_verify = False
elif params["openPercent"] == 100:
service = cover.SERVICE_OPEN_COVER
should_verify = True
elif (
self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
& cover.SUPPORT_SET_POSITION
):
service = cover.SERVICE_SET_COVER_POSITION
should_verify = True
svc_params[cover.ATTR_POSITION] = params["openPercent"]
else:
raise SmartHomeError(
ERR_FUNCTION_NOT_SUPPORTED, "Setting a position is not supported"
)
if (
should_verify
and self.state.attributes.get(ATTR_DEVICE_CLASS)
in OpenCloseTrait.COVER_2FA
):
_verify_pin_challenge(data, self.state, challenge)
await self.hass.services.async_call(
cover.DOMAIN, service, svc_params, blocking=True, context=data.context
)
if (
self.state.attributes.get(ATTR_ASSUMED_STATE)
or self.state.state == STATE_UNKNOWN
):
self.override_position = params["openPercent"]
@register_trait
class VolumeTrait(_Trait):
"""Trait to control brightness of a device.
https://developers.google.com/actions/smarthome/traits/volume
"""
name = TRAIT_VOLUME
commands = [COMMAND_SET_VOLUME, COMMAND_VOLUME_RELATIVE]
@staticmethod
def supported(domain, features, device_class):
"""Test if state is supported."""
if domain == media_player.DOMAIN:
return features & media_player.SUPPORT_VOLUME_SET
return False
def sync_attributes(self):
"""Return brightness attributes for a sync request."""
return {}
def query_attributes(self):
"""Return brightness query attributes."""
response = {}
level = self.state.attributes.get(media_player.ATTR_MEDIA_VOLUME_LEVEL)
muted = self.state.attributes.get(media_player.ATTR_MEDIA_VOLUME_MUTED)
if level is not None:
# Convert 0.0-1.0 to 0-100
response["currentVolume"] = int(level * 100)
response["isMuted"] = bool(muted)
return response
async def _execute_set_volume(self, data, params):
level = params["volumeLevel"]
await self.hass.services.async_call(
media_player.DOMAIN,
media_player.SERVICE_VOLUME_SET,
{
ATTR_ENTITY_ID: self.state.entity_id,
media_player.ATTR_MEDIA_VOLUME_LEVEL: level / 100,
},
blocking=True,
context=data.context,
)
async def _execute_volume_relative(self, data, params):
# This could also support up/down commands using relativeSteps
relative = params["volumeRelativeLevel"]
current = self.state.attributes.get(media_player.ATTR_MEDIA_VOLUME_LEVEL)
await self.hass.services.async_call(
media_player.DOMAIN,
media_player.SERVICE_VOLUME_SET,
{
ATTR_ENTITY_ID: self.state.entity_id,
media_player.ATTR_MEDIA_VOLUME_LEVEL: current + relative / 100,
},
blocking=True,
context=data.context,
)
async def execute(self, command, data, params, challenge):
"""Execute a brightness command."""
if command == COMMAND_SET_VOLUME:
await self._execute_set_volume(data, params)
elif command == COMMAND_VOLUME_RELATIVE:
await self._execute_volume_relative(data, params)
else:
raise SmartHomeError(ERR_NOT_SUPPORTED, "Command not supported")
def _verify_pin_challenge(data, state, challenge):
"""Verify a pin challenge."""
if not data.config.should_2fa(state):
return
if not data.config.secure_devices_pin:
raise SmartHomeError(ERR_CHALLENGE_NOT_SETUP, "Challenge is not set up")
if not challenge:
raise ChallengeNeeded(CHALLENGE_PIN_NEEDED)
pin = challenge.get("pin")
if pin != data.config.secure_devices_pin:
raise ChallengeNeeded(CHALLENGE_FAILED_PIN_NEEDED)
def _verify_ack_challenge(data, state, challenge):
"""Verify an ack challenge."""
if not data.config.should_2fa(state):
return
if not challenge or not challenge.get("ack"):
raise ChallengeNeeded(CHALLENGE_ACK_NEEDED)
| {
"content_hash": "8e665552d1efe7b4ba1ac6db34d15288",
"timestamp": "",
"source": "github",
"line_count": 1454,
"max_line_length": 88,
"avg_line_length": 33.77647867950481,
"alnum_prop": 0.5713791207672415,
"repo_name": "postlund/home-assistant",
"id": "b4585ebde033d92f037169dcf772c65ab02cf617",
"size": "49111",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/google_assistant/trait.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "20215859"
},
{
"name": "Shell",
"bytes": "6663"
}
],
"symlink_target": ""
} |
class A(object): pass
class B(object): pass
class C(object): pass
class D(object): pass
class E(object): pass
class K1(A,B,C): pass
class K2(D,B,E): pass
class K3(D,A): pass
class Z(K1,K2,K3): pass
print(Z.mro())
| {
"content_hash": "4465a240a33f9765c70d2a6b17ae0326",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 23,
"avg_line_length": 21.5,
"alnum_prop": 0.6790697674418604,
"repo_name": "bjpop/berp",
"id": "976e0a4cf2599cb3b77ec24a2df1282a0c0d0fa5",
"size": "467",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "test/regression/features/classes/tricky_mro_3.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "367"
},
{
"name": "Haskell",
"bytes": "242614"
},
{
"name": "Python",
"bytes": "16392"
},
{
"name": "TeX",
"bytes": "8697"
}
],
"symlink_target": ""
} |
import unittest
import utils
from testbin import TestBin
class TestBinCocheck(TestBin, unittest.TestCase):
def setUp(self):
self.bin = 'co-check'
self.do_not_test_running = True
def tearDown(self):
pass
| {
"content_hash": "e6cfd8e79858fdf6758a50b886785a4e",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 49,
"avg_line_length": 19.833333333333332,
"alnum_prop": 0.6722689075630253,
"repo_name": "ow2-mirrors/compatibleone",
"id": "b4280ce138dcad16414edb3301d167fec5834b8a",
"size": "267",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "testsuite/basic/cocheck.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "8939064"
},
{
"name": "C++",
"bytes": "12433"
},
{
"name": "Java",
"bytes": "158731"
},
{
"name": "Objective-C",
"bytes": "1449"
},
{
"name": "Perl",
"bytes": "751"
},
{
"name": "Python",
"bytes": "157987"
},
{
"name": "Shell",
"bytes": "67378"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.