code stringlengths 2 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int32 2 1.05M |
|---|---|---|---|---|---|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 OpenStack, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Request Body limiting middleware.
"""
import webob.dec
import webob.exc
from cinder import flags
from cinder.openstack.common import cfg
from cinder.openstack.common import log as logging
from cinder import wsgi
#default request size is 112k
max_request_body_size_opt = cfg.IntOpt('osapi_max_request_body_size',
default=114688,
help='Max size for body of a request')
FLAGS = flags.FLAGS
FLAGS.register_opt(max_request_body_size_opt)
LOG = logging.getLogger(__name__)
class RequestBodySizeLimiter(wsgi.Middleware):
"""Add a 'cinder.context' to WSGI environ."""
def __init__(self, *args, **kwargs):
super(RequestBodySizeLimiter, self).__init__(*args, **kwargs)
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
if (req.content_length > FLAGS.osapi_max_request_body_size
or len(req.body) > FLAGS.osapi_max_request_body_size):
msg = _("Request is too large.")
raise webob.exc.HTTPBadRequest(explanation=msg)
else:
return self.application
| tylertian/Openstack | openstack F/cinder/cinder/api/sizelimit.py | Python | apache-2.0 | 1,789 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SqueezeNet implementation with TPU support.
Training loop and input pipeline.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
import absl.logging as _logging # pylint: disable=unused-import
import tensorflow as tf
import data_pipeline
import squeezenet_model
# Cloud TPU Cluster Resolvers
flags.DEFINE_string(
'tpu', default=None,
help='The Cloud TPU to use for training. This should be either the name '
'used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 url.')
flags.DEFINE_string(
"gcp_project", default=None,
help="Project name for the Cloud TPU-enabled project. If not specified, we "
"will attempt to automatically detect the GCE project from metadata.")
flags.DEFINE_string(
"tpu_zone", default=None,
help="GCE zone where the Cloud TPU is located in. If not specified, we "
"will attempt to automatically detect the GCE project from metadata.")
# Model specific paramenters
flags.DEFINE_string("data_dir", "", "Location of training files.")
flags.DEFINE_string("model_dir", "", "Where to store model checkpoints.")
flags.DEFINE_integer("save_checkpoints_secs", 3600,
"Interval between saving model checkpoints.")
flags.DEFINE_integer("num_shards", 8, "Number of TPU shards.")
flags.DEFINE_integer("batch_size", 1024, "Batch size for training and eval.")
flags.DEFINE_boolean("use_tpu", True, "If true, use TPU device.")
flags.DEFINE_string("optimizer", "momentum", "Optimizer: momentum|adam|rmsprop")
flags.DEFINE_float("momentum", 0.9, "Momentum parameter for SGD optimizer.")
flags.DEFINE_integer("num_epochs", 150,
"Number of epochs of the training set to process.")
flags.DEFINE_integer("num_evals", 10,
"How many times to run an evaluation during training.")
flags.DEFINE_float("learning_rate", 0.03, "Learning rate.")
FLAGS = flags.FLAGS
def main(argv):
del argv
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu,
zone=FLAGS.tpu_zone,
project=FLAGS.gcp_project)
training_examples = 1300 * 1000 * FLAGS.num_epochs
eval_examples = 50 * 1000
params = {
"num_classes": 1001,
"lr": FLAGS.learning_rate,
"min_lr": 0.005,
"momentum": FLAGS.momentum,
"optimizer": FLAGS.optimizer,
"num_eval_examples": eval_examples,
"num_shards": FLAGS.num_shards,
"num_epochs": FLAGS.num_epochs,
}
run_config = tf.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
model_dir=FLAGS.model_dir,
save_checkpoints_secs=FLAGS.save_checkpoints_secs,
session_config=tf.ConfigProto(
allow_soft_placement=True, log_device_placement=False),
tpu_config=tf.contrib.tpu.TPUConfig(
iterations_per_loop=100,
num_shards=FLAGS.num_shards,
),
)
estimator = tf.contrib.tpu.TPUEstimator(
model_fn=squeezenet_model.model_fn,
use_tpu=FLAGS.use_tpu,
config=run_config,
train_batch_size=FLAGS.batch_size,
eval_batch_size=FLAGS.batch_size,
params=dict(params, use_tpu=FLAGS.use_tpu),
)
num_evals = max(FLAGS.num_evals, 1)
examples_per_eval = training_examples // num_evals
for _ in range(num_evals):
estimator.train(
input_fn=data_pipeline.InputReader(FLAGS.data_dir, is_training=True),
steps=examples_per_eval // FLAGS.batch_size)
tf.logging.info("Running evaluation")
tf.logging.info("%s",
estimator.evaluate(
input_fn=data_pipeline.InputReader(
FLAGS.data_dir, is_training=False),
steps=eval_examples // FLAGS.batch_size,
))
if __name__ == "__main__":
tf.logging.set_verbosity(tf.logging.INFO)
app.run(main)
| mlperf/training_results_v0.5 | v0.5.0/google/cloud_v3.8/ssd-tpuv3-8/code/ssd/model/tpu/models/official/squeezenet/squeezenet_main.py | Python | apache-2.0 | 4,590 |
"""
@package mi.dataset.parser.test
@file mi/dataset/parser/test/test_nutnr_m_glider.py
@author Emily Hahn
@brief A test parser for the nutnr series m instrument through a glider
"""
import os
from nose.plugins.attrib import attr
from mi.core.exceptions import SampleException, ConfigurationException, DatasetParserException
from mi.core.log import get_logger
from mi.dataset.dataset_parser import DataSetDriverConfigKeys
from mi.dataset.driver.nutnr_m.glider.resource import RESOURCE_PATH
from mi.dataset.parser.glider import GliderParser
from mi.dataset.test.test_parser import ParserUnitTestCase
__author__ = 'Emily Hahn'
__license__ = 'Apache 2.0'
log = get_logger()
@attr('UNIT', group='mi')
class NutnrMGliderParserUnitTestCase(ParserUnitTestCase):
config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.glider',
DataSetDriverConfigKeys.PARTICLE_CLASS: 'NutnrMDataParticle'
}
def test_simple(self):
"""
Test a simple case that we can parse a single message
"""
with open(os.path.join(RESOURCE_PATH, 'single.mrg'), 'rU') as file_handle:
parser = GliderParser(self.config, file_handle, self.exception_callback)
particles = parser.get_records(1)
self.assert_particles(particles, "single.yml", RESOURCE_PATH)
self.assertEqual(self.exception_callback_value, [])
def test_many(self):
"""
Test a simple case with more messages
"""
with open(os.path.join(RESOURCE_PATH, 'many.mrg'), 'rU') as file_handle:
parser = GliderParser(self.config, file_handle, self.exception_callback)
particles = parser.get_records(12)
# requested more than are available in file, should only be 10
self.assertEquals(len(particles), 10)
self.assert_particles(particles, "many.yml", RESOURCE_PATH)
self.assertEqual(self.exception_callback_value, [])
def test_full(self):
"""
Test a full file and confirm the right number of particles is returned
"""
with open(os.path.join(RESOURCE_PATH, 'unit_514-2014-351-2-0.mrg'), 'rU') as file_handle:
parser = GliderParser(self.config, file_handle, self.exception_callback)
particles = parser.get_records(40)
# requested more than are available in file, should only be 10
self.assertEquals(len(particles), 31)
self.assertEqual(self.exception_callback_value, [])
def test_empty(self):
"""
An empty file will return a sample exception since it cannot read the header
"""
file_handle = open(os.path.join(RESOURCE_PATH, 'empty.mrg'), 'rU')
with self.assertRaises(DatasetParserException):
parser = GliderParser(self.config, file_handle, self.exception_callback)
particles = parser.get_records(1)
# requested more than are available in file, should only be 10
self.assertEquals(len(particles), 0)
def test_bad_config(self):
"""
Test that a set of bad configurations produces the expected exceptions
"""
file_handle = open(os.path.join(RESOURCE_PATH, 'single.mrg'), 'rU')
# confirm a configuration exception occurs if no config is passed in
with self.assertRaises(ConfigurationException):
GliderParser({}, file_handle, self.exception_callback)
# confirm a config missing the particle class causes an exception
bad_config = {DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.glider'}
with self.assertRaises(ConfigurationException):
GliderParser(bad_config, file_handle, self.exception_callback)
# confirm a config with a non existing class causes an exception
bad_config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.glider',
DataSetDriverConfigKeys.PARTICLE_CLASS: 'BadDataParticle'
}
with self.assertRaises(AttributeError):
GliderParser(bad_config, file_handle, self.exception_callback)
def test_bad_headers(self):
"""
Test that a file with a short header raises a sample exception
"""
# this file does not have enough header lines
file_handle = open(os.path.join(RESOURCE_PATH, 'short_header.mrg'), 'rU')
with self.assertRaises(DatasetParserException):
parser = GliderParser(self.config, file_handle, self.exception_callback)
parser.get_records(1)
# this file specifies a number of header lines other than 14
file_handle = open(os.path.join(RESOURCE_PATH, 'bad_num_header_lines.mrg'), 'rU')
with self.assertRaises(DatasetParserException):
parser = GliderParser(self.config, file_handle, self.exception_callback)
parser.get_records(1)
# this file specifies a number of label lines other than 3
file_handle = open(os.path.join(RESOURCE_PATH, 'bad_num_label_lines.mrg'), 'rU')
with self.assertRaises(DatasetParserException):
parser = GliderParser(self.config, file_handle, self.exception_callback)
parser.get_records(1)
def test_missing_time(self):
"""
Test that a file which is missing the required m_present_time field for timestamps raises a sample exception
"""
# this file is missing the m_present_time label
file_handle = open(os.path.join(RESOURCE_PATH, 'no_time_label.mrg'), 'rU')
with self.assertRaises(DatasetParserException):
parser = GliderParser(self.config, file_handle, self.exception_callback)
parser.get_records(1)
def test_short_data(self):
"""
Test that if the number of columns in the header do not match the number of columns in the data an
exception occurs
"""
# this file is has two columns removed from the data libe
file_handle = open(os.path.join(RESOURCE_PATH, 'short_data.mrg'), 'rU')
with self.assertRaises(DatasetParserException):
parser = GliderParser(self.config, file_handle, self.exception_callback)
parser.get_records(1)
def test_bad_sensors_per_cycle(self):
"""
Test that if the number of sensors per cycle from the header does not match that in the header that an
exception in the callback occurs, but processing continues
"""
with open(os.path.join(RESOURCE_PATH, 'bad_sensors_per_cycle.mrg'), 'rU') as file_handle:
parser = GliderParser(self.config, file_handle, self.exception_callback)
particles = parser.get_records(1)
self.assert_particles(particles, "single.yml", RESOURCE_PATH)
self.assertEqual(len(self.exception_callback_value), 1)
self.assertIsInstance(self.exception_callback_value[0], SampleException)
def test_short_units(self):
"""
Test that if the number of label columns does not match the units number of columns an exception occurs
"""
# this file is has two columns removed from the data libe
file_handle = open(os.path.join(RESOURCE_PATH, 'short_units.mrg'), 'rU')
with self.assertRaises(DatasetParserException):
parser = GliderParser(self.config, file_handle, self.exception_callback)
parser.get_records(1)
| oceanobservatories/mi-instrument | mi/dataset/parser/test/test_nutnr_m_glider.py | Python | bsd-2-clause | 7,448 |
from __future__ import print_function, division
from sympy.core import Basic
from sympy.core.compatibility import xrange
import random
class GrayCode(Basic):
"""
A Gray code is essentially a Hamiltonian walk on
a n-dimensional cube with edge length of one.
The vertices of the cube are represented by vectors
whose values are binary. The Hamilton walk visits
each vertex exactly once. The Gray code for a 3d
cube is ['000','100','110','010','011','111','101',
'001'].
A Gray code solves the problem of sequentially
generating all possible subsets of n objects in such
a way that each subset is obtained from the previous
one by either deleting or adding a single object.
In the above example, 1 indicates that the object is
present, and 0 indicates that its absent.
Gray codes have applications in statistics as well when
we want to compute various statistics related to subsets
in an efficient manner.
References:
[1] Nijenhuis,A. and Wilf,H.S.(1978).
Combinatorial Algorithms. Academic Press.
[2] Knuth, D. (2011). The Art of Computer Programming, Vol 4
Addison Wesley
Examples
========
>>> from sympy.combinatorics.graycode import GrayCode
>>> a = GrayCode(3)
>>> list(a.generate_gray())
['000', '001', '011', '010', '110', '111', '101', '100']
>>> a = GrayCode(4)
>>> list(a.generate_gray())
['0000', '0001', '0011', '0010', '0110', '0111', '0101', '0100', \
'1100', '1101', '1111', '1110', '1010', '1011', '1001', '1000']
"""
_skip = False
_current = 0
_rank = None
def __new__(cls, n, *args, **kw_args):
"""
Default constructor.
It takes a single argument ``n`` which gives the dimension of the Gray
code. The starting Gray code string (``start``) or the starting ``rank``
may also be given; the default is to start at rank = 0 ('0...0').
Examples
========
>>> from sympy.combinatorics.graycode import GrayCode
>>> a = GrayCode(3)
>>> a
GrayCode(3)
>>> a.n
3
>>> a = GrayCode(3, start='100')
>>> a.current
'100'
>>> a = GrayCode(4, rank=4)
>>> a.current
'0110'
>>> a.rank
4
"""
if n < 1 or int(n) != n:
raise ValueError(
'Gray code dimension must be a positive integer, not %i' % n)
n = int(n)
args = (n,) + args
obj = Basic.__new__(cls, *args)
if 'start' in kw_args:
obj._current = kw_args["start"]
if len(obj._current) > n:
raise ValueError('Gray code start has length %i but '
'should not be greater than %i' % (len(obj._current), n))
elif 'rank' in kw_args:
if int(kw_args["rank"]) != kw_args["rank"]:
raise ValueError('Gray code rank must be a positive integer, '
'not %i' % kw_args["rank"])
obj._rank = int(kw_args["rank"]) % obj.selections
obj._current = obj.unrank(n, obj._rank)
return obj
def next(self, delta=1):
"""
Returns the Gray code a distance ``delta`` (default = 1) from the
current value in canonical order.
Examples
========
>>> from sympy.combinatorics.graycode import GrayCode
>>> a = GrayCode(3, start='110')
>>> a.next().current
'111'
>>> a.next(-1).current
'010'
"""
return GrayCode(self.n, rank=(self.rank + delta) % self.selections)
@property
def selections(self):
"""
Returns the number of bit vectors in the Gray code.
Examples
========
>>> from sympy.combinatorics.graycode import GrayCode
>>> a = GrayCode(3)
>>> a.selections
8
"""
return 2**self.n
@property
def n(self):
"""
Returns the dimension of the Gray code.
Examples
========
>>> from sympy.combinatorics.graycode import GrayCode
>>> a = GrayCode(5)
>>> a.n
5
"""
return self.args[0]
def generate_gray(self, **hints):
"""
Generates the sequence of bit vectors of a Gray Code.
[1] Knuth, D. (2011). The Art of Computer Programming,
Vol 4, Addison Wesley
Examples
========
>>> from sympy.combinatorics.graycode import GrayCode
>>> a = GrayCode(3)
>>> list(a.generate_gray())
['000', '001', '011', '010', '110', '111', '101', '100']
>>> list(a.generate_gray(start='011'))
['011', '010', '110', '111', '101', '100']
>>> list(a.generate_gray(rank=4))
['110', '111', '101', '100']
See Also
========
skip
"""
bits = self.n
start = None
if "start" in hints:
start = hints["start"]
elif "rank" in hints:
start = GrayCode.unrank(self.n, hints["rank"])
if start is not None:
self._current = start
current = self.current
graycode_bin = gray_to_bin(current)
if len(graycode_bin) > self.n:
raise ValueError('Gray code start has length %i but should '
'not be greater than %i' % (len(graycode_bin), bits))
self._current = int(current, 2)
graycode_int = int(''.join(graycode_bin), 2)
for i in xrange(graycode_int, 1 << bits):
if self._skip:
self._skip = False
else:
yield self.current
bbtc = (i ^ (i + 1))
gbtc = (bbtc ^ (bbtc >> 1))
self._current = (self._current ^ gbtc)
self._current = 0
def skip(self):
"""
Skips the bit generation.
Examples
========
>>> from sympy.combinatorics.graycode import GrayCode
>>> a = GrayCode(3)
>>> for i in a.generate_gray():
... if i == '010':
... a.skip()
... print(i)
...
000
001
011
010
111
101
100
See Also
========
generate_gray
"""
self._skip = True
@property
def rank(self):
"""
Ranks the Gray code.
A ranking algorithm determines the position (or rank)
of a combinatorial object among all the objects w.r.t.
a given order. For example, the 4 bit binary reflected
Gray code (BRGC) '0101' has a rank of 6 as it appears in
the 6th position in the canonical ordering of the family
of 4 bit Gray codes.
References:
[1] http://www-stat.stanford.edu/~susan/courses/s208/node12.html
Examples
========
>>> from sympy.combinatorics.graycode import GrayCode
>>> a = GrayCode(3)
>>> list(a.generate_gray())
['000', '001', '011', '010', '110', '111', '101', '100']
>>> GrayCode(3, start='100').rank
7
>>> GrayCode(3, rank=7).current
'100'
See Also
========
unrank
"""
if self._rank is None:
self._rank = int(gray_to_bin(self.current), 2)
return self._rank
@property
def current(self):
"""
Returns the currently referenced Gray code as a bit string.
Examples
========
>>> from sympy.combinatorics.graycode import GrayCode
>>> GrayCode(3, start='100').current
'100'
"""
rv = self._current or '0'
if type(rv) is not str:
rv = bin(rv)[2:]
return rv.rjust(self.n, '0')
@classmethod
def unrank(self, n, rank):
"""
Unranks an n-bit sized Gray code of rank k. This method exists
so that a derivative GrayCode class can define its own code of
a given rank.
The string here is generated in reverse order to allow for tail-call
optimization.
Examples
========
>>> from sympy.combinatorics.graycode import GrayCode
>>> GrayCode(5, rank=3).current
'00010'
>>> GrayCode.unrank(5, 3)
'00010'
See Also
========
rank
"""
def _unrank(k, n):
if n == 1:
return str(k % 2)
m = 2**(n - 1)
if k < m:
return '0' + _unrank(k, n - 1)
return '1' + _unrank(m - (k % m) - 1, n - 1)
return _unrank(rank, n)
def random_bitstring(n):
"""
Generates a random bitlist of length n.
Examples
========
>>> from sympy.combinatorics.graycode import random_bitstring
>>> random_bitstring(3) # doctest: +SKIP
100
"""
return ''.join([random.choice('01') for i in xrange(n)])
def gray_to_bin(bin_list):
"""
Convert from Gray coding to binary coding.
We assume big endian encoding.
Examples
========
>>> from sympy.combinatorics.graycode import gray_to_bin
>>> gray_to_bin('100')
'111'
See Also
========
bin_to_gray
"""
b = [bin_list[0]]
for i in xrange(1, len(bin_list)):
b += str(int(b[i - 1] != bin_list[i]))
return ''.join(b)
def bin_to_gray(bin_list):
"""
Convert from binary coding to gray coding.
We assume big endian encoding.
Examples
========
>>> from sympy.combinatorics.graycode import bin_to_gray
>>> bin_to_gray('111')
'100'
See Also
========
gray_to_bin
"""
b = [bin_list[0]]
for i in xrange(0, len(bin_list) - 1):
b += str(int(bin_list[i]) ^ int(b[i - 1]))
return ''.join(b)
def get_subset_from_bitstring(super_set, bitstring):
"""
Gets the subset defined by the bitstring.
Examples
========
>>> from sympy.combinatorics.graycode import get_subset_from_bitstring
>>> get_subset_from_bitstring(['a','b','c','d'], '0011')
['c', 'd']
>>> get_subset_from_bitstring(['c','a','c','c'], '1100')
['c', 'a']
See Also
========
graycode_subsets
"""
if len(super_set) != len(bitstring):
raise ValueError("The sizes of the lists are not equal")
return [super_set[i] for i, j in enumerate(bitstring)
if bitstring[i] == '1']
def graycode_subsets(gray_code_set):
"""
Generates the subsets as enumerated by a Gray code.
Examples
========
>>> from sympy.combinatorics.graycode import graycode_subsets
>>> list(graycode_subsets(['a','b','c']))
[[], ['c'], ['b', 'c'], ['b'], ['a', 'b'], ['a', 'b', 'c'], \
['a', 'c'], ['a']]
>>> list(graycode_subsets(['a','b','c','c']))
[[], ['c'], ['c', 'c'], ['c'], ['b', 'c'], ['b', 'c', 'c'], \
['b', 'c'], ['b'], ['a', 'b'], ['a', 'b', 'c'], ['a', 'b', 'c', 'c'], \
['a', 'b', 'c'], ['a', 'c'], ['a', 'c', 'c'], ['a', 'c'], ['a']]
See Also
========
get_subset_from_bitstring
"""
for bitstring in list(GrayCode(len(gray_code_set)).generate_gray()):
yield get_subset_from_bitstring(gray_code_set, bitstring)
| hrashk/sympy | sympy/combinatorics/graycode.py | Python | bsd-3-clause | 11,202 |
'''
(c) 2011, 2012 Georgia Tech Research Corporation
This source code is released under the New BSD license. Please see
http://wiki.quantsoftware.org/index.php?title=QSTK_License
for license details.
Created on Jan 1, 2011
@author:Drew Bratcher
@contact: dbratcher@gatech.edu
@summary: Contains tutorial for backtester and report.
'''
import datetime as dt
from datetime import timedelta
import time as t
import numpy as np
import os
import pandas as pd
def _cache_dates():
''' Caches dates '''
try:
# filename = os.environ['QS'] + "/qstkutil/NYSE_dates.txt"
filename = os.path.join(os.path.dirname(__file__), 'NYSE_dates.txt')
except KeyError:
print "Please be sure you have NYSE_dates.txt in the qstkutil directory"
datestxt = np.loadtxt(filename, dtype=str)
dates = []
for i in datestxt:
dates.append(dt.datetime.strptime(i, "%m/%d/%Y"))
return pd.TimeSeries(index=dates, data=dates)
GTS_DATES = _cache_dates()
def getMonthNames():
return(['JAN','FEB','MAR','APR','MAY','JUN','JUL','AUG','SEP','OCT','NOV','DEC'])
def getYears(funds):
years=[]
for date in funds.index:
if(not(date.year in years)):
years.append(date.year)
return(years)
def getMonths(funds,year):
months=[]
for date in funds.index:
if((date.year==year) and not(date.month in months)):
months.append(date.month)
return(months)
def getDays(funds,year,month):
days=[]
for date in funds.index:
if((date.year==year) and (date.month==month)):
days.append(date)
return(days)
def getDaysBetween(ts_start, ts_end):
days=[]
for i in range(0,(ts_end-ts_start).days):
days.append(ts_start+timedelta(days=1)*i)
return(days)
def getFirstDay(funds,year,month):
for date in funds.index:
if((date.year==year) and (date.month==month)):
return(date)
return('ERROR')
def getLastDay(funds,year,month):
return_date = 'ERROR'
for date in funds.index:
if((date.year==year) and (date.month==month)):
return_date = date
return(return_date)
def getNextOptionClose(day, trade_days, offset=0):
#get third friday in month of day
#get first of month
year_off=0
if day.month+offset > 12:
year_off = 1
offset = offset - 12
first = dt.datetime(day.year+year_off, day.month+offset, 1, hour=16)
#get weekday
day_num = first.weekday()
#get first friday (friday - weekday) add 7 if less than 1
dif = 5 - day_num
if dif < 1:
dif = dif+7
#move to third friday
dif = dif + 14
friday = first+dt.timedelta(days=(dif-1))
#if friday is a holiday, options expire then
if friday in trade_days:
month_close = first + dt.timedelta(days=dif)
else:
month_close = friday
#if day is past the day after that
if month_close < day:
return_date = getNextOptionClose(day, trade_days, offset=1)
else:
return_date = month_close
return(return_date)
def getLastOptionClose(day, trade_days):
start = day
while getNextOptionClose(day, trade_days)>=start:
day= day - dt.timedelta(days=1)
return(getNextOptionClose(day, trade_days))
def getNYSEoffset(mark, offset):
''' Returns NYSE date offset by number of days '''
mark = mark.replace(hour=0, minute=0, second=0, microsecond=0)
i = GTS_DATES.index.searchsorted(mark, side='right')
# If there is no exact match, take first date in past
if GTS_DATES[i] != mark:
i -= 1
ret = GTS_DATES[i + offset]
ret = ret.replace(hour=16)
return ret
def getNYSEdays(startday = dt.datetime(1964,7,5), endday = dt.datetime(2020,12,31),
timeofday = dt.timedelta(0)):
"""
@summary: Create a list of timestamps between startday and endday (inclusive)
that correspond to the days there was trading at the NYSE. This function
depends on a separately created a file that lists all days since July 4,
1962 that the NYSE has been open, going forward to 2020 (based
on the holidays that NYSE recognizes).
@param startday: First timestamp to consider (inclusive)
@param endday: Last day to consider (inclusive)
@return list: of timestamps between startday and endday on which NYSE traded
@rtype datetime
"""
start = startday - timeofday
end = endday - timeofday
dates = GTS_DATES[start:end]
ret = [x + timeofday for x in dates]
return(ret)
def getNextNNYSEdays(startday, days, timeofday):
"""
@summary: Create a list of timestamps from startday that is days days long
that correspond to the days there was trading at NYSE. This function
depends on the file used in getNYSEdays and assumes the dates within are
in order.
@param startday: First timestamp to consider (inclusive)
@param days: Number of timestamps to return
@return list: List of timestamps starting at startday on which NYSE traded
@rtype datetime
"""
try:
# filename = os.environ['QS'] + "/qstkutil/NYSE_dates.txt"
filename = os.path.join(os.path.dirname(__file__), 'NYSE_dates.txt')
except KeyError:
print "Please be sure to set the value for QS in config.sh or\n"
print "in local.sh and then \'source local.sh\'.\n"
datestxt = np.loadtxt(filename,dtype=str)
dates=[]
for i in datestxt:
if(len(dates)<days):
if((dt.datetime.strptime(i,"%m/%d/%Y")+timeofday)>=startday):
dates.append(dt.datetime.strptime(i,"%m/%d/%Y")+timeofday)
return(dates)
def getPrevNNYSEday(startday, timeofday):
"""
@summary: This function returns the last valid trading day before the start
day, or returns the start day if it is a valid trading day. This function
depends on the file used in getNYSEdays and assumes the dates within are
in order.
@param startday: First timestamp to consider (inclusive)
@param days: Number of timestamps to return
@return list: List of timestamps starting at startday on which NYSE traded
@rtype datetime
"""
try:
# filename = os.environ['QS'] + "/qstkutil/NYSE_dates.txt"
filename = os.path.join(os.path.dirname(__file__), 'NYSE_dates.txt')
except KeyError:
print "Please be sure to set the value for QS in config.sh or\n"
print "in local.sh and then \'source local.sh\'.\n"
datestxt = np.loadtxt(filename,dtype=str)
#''' Set return to first day '''
dtReturn = dt.datetime.strptime( datestxt[0],"%m/%d/%Y")+timeofday
#''' Loop through all but first '''
for i in datestxt[1:]:
dtNext = dt.datetime.strptime(i,"%m/%d/%Y")
#''' If we are > startday, then use previous valid day '''
if( dtNext > startday ):
break
dtReturn = dtNext + timeofday
return(dtReturn)
def ymd2epoch(year, month, day):
"""
@summary: Convert YMD info into a unix epoch value.
@param year: The year
@param month: The month
@param day: The day
@return epoch: number of seconds since epoch
"""
return(t.mktime(dt.date(year,month,day).timetuple()))
def epoch2date(ts):
"""
@summary Convert seconds since epoch into date
@param ts: Seconds since epoch
@return thedate: A date object
"""
tm = t.gmtime(ts)
return(dt.date(tm.tm_year,tm.tm_mon,tm.tm_mday))
def _trade_dates(dt_start, dt_end, s_period):
'''
@summary: Generate dates on which we need to trade
@param c_strat: Strategy config class
@param dt_start: Start date
@param dt_end: End date
'''
ldt_timestamps = getNYSEdays(dt_start,
dt_end, dt.timedelta(hours=16) )
# Use pandas reindex method instead
# Note, dates are index as well as values, we select based on index
# but return values since it is a numpy array of datetimes instead of
# pandas specific.
ts_dates = pd.TimeSeries(index=ldt_timestamps, data=ldt_timestamps)
# These are the dates we want
if s_period[:2] == 'BW':
# special case for biweekly
dr_range = pd.DateRange(dt_start, dt_end,
timeRule=s_period[1:])
dr_range = np.asarray(dr_range)
li_even = np.array(range(len(dr_range)))
dr_range = dr_range[li_even[li_even % 2 == 0]]
else:
dr_range = pd.DateRange(dt_start, dt_end,
timeRule=s_period)
dr_range = np.asarray(dr_range)
# Warning, we MUST copy the date range, if we modify it it will be returned
# in it's modified form the next time we use it.
dr_range = np.copy(dr_range)
dr_range += pd.DateOffset(hours=16)
ts_dates = ts_dates.reindex( dr_range, method='bfill' )
ldt_dates = ts_dates[ts_dates.notnull()].values
#Make unique
sdt_unique = set()
ldt_dates = [x for x in ldt_dates
if x not in sdt_unique and not sdt_unique.add(x)]
return ldt_dates
| wogsland/QSTK | build/lib.linux-x86_64-2.7/QSTK/qstkutil/qsdateutil.py | Python | bsd-3-clause | 9,008 |
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <headingcell level=2>
# Usage of IC 7443
# <codecell>
from __future__ import print_function
from BinPy import *
# <codecell>
# Usage of IC 7443:
ic = IC_7443()
print(ic.__doc__)
# <codecell>
# The Pin configuration is:
inp = {8: 0, 12: 0, 13: 1, 14: 0, 15: 1, 16: 1}
# Pin initinalization
# Powering up the IC - using -- ic.setIC({14: 1, 7: 0})
ic.setIC({14: 1, 7: 0})
# Setting the inputs of the ic
ic.setIC(inp)
# Draw the IC with the current configuration\n
ic.drawIC()
# <codecell>
# Run the IC with the current configuration using -- print ic.run() --
# Note that the ic.run() returns a dict of pin configuration similar to
print (ic.run())
# <codecell>
# Seting the outputs to the current IC configuration using --
# ic.setIC(ic.run()) --\n
ic.setIC(ic.run())
# Draw the final configuration
ic.drawIC()
# <codecell>
# Seting the outputs to the current IC configuration using --
# ic.setIC(ic.run()) --
ic.setIC(ic.run())
# Draw the final configuration
ic.drawIC()
# Run the IC
print (ic.run())
# <codecell>
# Connector Outputs
c = Connector()
# Set the output connector to a particular pin of the ic
ic.setOutput(1, c)
print(c)
| daj0ker/BinPy | BinPy/examples/source/ic/Series_7400/IC7443.py | Python | bsd-3-clause | 1,225 |
"""
The DoInterestManager keeps track of which parent/zones that we currently
have interest in. When you want to "look" into a zone you add an interest
to that zone. When you want to get rid of, or ignore, the objects in that
zone, remove interest in that zone.
p.s. A great deal of this code is just code moved from ClientRepository.py.
"""
from panda3d.core import *
from panda3d.direct import *
from .MsgTypes import *
from direct.showbase.PythonUtil import *
from direct.showbase import DirectObject
from .PyDatagram import PyDatagram
from direct.directnotify.DirectNotifyGlobal import directNotify
import types
from direct.showbase.PythonUtil import report
class InterestState:
StateActive = 'Active'
StatePendingDel = 'PendingDel'
def __init__(self, desc, state, context, event, parentId, zoneIdList,
eventCounter, auto=False):
self.desc = desc
self.state = state
self.context = context
# We must be ready to keep track of multiple events. If somebody
# requested an interest to be removed and we get a second request
# for removal of the same interest before we get a response for the
# first interest removal, we now have two parts of the codebase
# waiting for a response on the removal of a single interest.
self.events = []
self.eventCounter = eventCounter
if event:
self.addEvent(event)
self.parentId = parentId
self.zoneIdList = zoneIdList
self.auto = auto
def addEvent(self, event):
self.events.append(event)
self.eventCounter.num += 1
def getEvents(self):
return list(self.events)
def clearEvents(self):
self.eventCounter.num -= len(self.events)
assert self.eventCounter.num >= 0
self.events = []
def sendEvents(self):
for event in self.events:
messenger.send(event)
self.clearEvents()
def setDesc(self, desc):
self.desc = desc
def isPendingDelete(self):
return self.state == InterestState.StatePendingDel
def __repr__(self):
return 'InterestState(desc=%s, state=%s, context=%s, event=%s, parentId=%s, zoneIdList=%s)' % (
self.desc, self.state, self.context, self.events, self.parentId, self.zoneIdList)
class InterestHandle:
"""This class helps to ensure that valid handles get passed in to DoInterestManager funcs"""
def __init__(self, id):
self._id = id
def asInt(self):
return self._id
def __eq__(self, other):
if type(self) == type(other):
return self._id == other._id
return self._id == other
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, self._id)
# context value for interest changes that have no complete event
NO_CONTEXT = 0
class DoInterestManager(DirectObject.DirectObject):
"""
Top level Interest Manager
"""
notify = directNotify.newCategory("DoInterestManager")
InterestDebug = ConfigVariableBool('interest-debug', False)
# 'handle' is a number that represents a single interest set that the
# client has requested; the interest set may be modified
_HandleSerialNum = 0
# high bit is reserved for server interests
_HandleMask = 0x7FFF
# 'context' refers to a single request to change an interest set
_ContextIdSerialNum = 100
_ContextIdMask = 0x3FFFFFFF # avoid making Python create a long
_interests = {}
if __debug__:
_debug_interestHistory = []
_debug_maxDescriptionLen = 40
_SerialGen = SerialNumGen()
_SerialNum = serialNum()
def __init__(self):
assert DoInterestManager.notify.debugCall()
DirectObject.DirectObject.__init__(self)
self._addInterestEvent = uniqueName('DoInterestManager-Add')
self._removeInterestEvent = uniqueName('DoInterestManager-Remove')
self._noNewInterests = False
self._completeDelayedCallback = None
# keep track of request contexts that have not completed
self._completeEventCount = ScratchPad(num=0)
self._allInterestsCompleteCallbacks = []
def __verbose(self):
return self.InterestDebug.getValue() or self.getVerbose()
def _getAnonymousEvent(self, desc):
return 'anonymous-%s-%s' % (desc, DoInterestManager._SerialGen.next())
def setNoNewInterests(self, flag):
self._noNewInterests = flag
def noNewInterests(self):
return self._noNewInterests
def setAllInterestsCompleteCallback(self, callback):
if ((self._completeEventCount.num == 0) and
(self._completeDelayedCallback is None)):
callback()
else:
self._allInterestsCompleteCallbacks.append(callback)
def getAllInterestsCompleteEvent(self):
return 'allInterestsComplete-%s' % DoInterestManager._SerialNum
def resetInterestStateForConnectionLoss(self):
DoInterestManager._interests.clear()
self._completeEventCount = ScratchPad(num=0)
if __debug__:
self._addDebugInterestHistory("RESET", "", 0, 0, 0, [])
def isValidInterestHandle(self, handle):
# pass in a handle (or anything else) and this will return true if it is
# still a valid interest handle
if not isinstance(handle, InterestHandle):
return False
return handle.asInt() in DoInterestManager._interests
def updateInterestDescription(self, handle, desc):
iState = DoInterestManager._interests.get(handle.asInt())
if iState:
iState.setDesc(desc)
def addInterest(self, parentId, zoneIdList, description, event=None):
"""
Look into a (set of) zone(s).
"""
assert DoInterestManager.notify.debugCall()
handle = self._getNextHandle()
# print 'base.cr.addInterest(',description,',',handle,'):',globalClock.getFrameCount()
if self._noNewInterests:
DoInterestManager.notify.warning(
"addInterest: addingInterests on delete: %s" % (handle))
return
# make sure we've got parenting rules set in the DC
if parentId not in (self.getGameDoId(),):
parent = self.getDo(parentId)
if not parent:
DoInterestManager.notify.error(
'addInterest: attempting to add interest under unknown object %s' % parentId)
else:
if not parent.hasParentingRules():
DoInterestManager.notify.error(
'addInterest: no setParentingRules defined in the DC for object %s (%s)'
'' % (parentId, parent.__class__.__name__))
if event:
contextId = self._getNextContextId()
else:
contextId = 0
# event = self._getAnonymousEvent('addInterest')
DoInterestManager._interests[handle] = InterestState(
description, InterestState.StateActive, contextId, event, parentId, zoneIdList, self._completeEventCount)
if self.__verbose():
print('CR::INTEREST.addInterest(handle=%s, parentId=%s, zoneIdList=%s, description=%s, event=%s)' % (
handle, parentId, zoneIdList, description, event))
self._sendAddInterest(handle, contextId, parentId, zoneIdList, description)
if event:
messenger.send(self._getAddInterestEvent(), [event])
assert self.printInterestsIfDebug()
return InterestHandle(handle)
def addAutoInterest(self, parentId, zoneIdList, description):
"""
Look into a (set of) zone(s).
"""
assert DoInterestManager.notify.debugCall()
handle = self._getNextHandle()
if self._noNewInterests:
DoInterestManager.notify.warning(
"addInterest: addingInterests on delete: %s" % (handle))
return
# make sure we've got parenting rules set in the DC
if parentId not in (self.getGameDoId(),):
parent = self.getDo(parentId)
if not parent:
DoInterestManager.notify.error(
'addInterest: attempting to add interest under unknown object %s' % parentId)
else:
if not parent.hasParentingRules():
DoInterestManager.notify.error(
'addInterest: no setParentingRules defined in the DC for object %s (%s)'
'' % (parentId, parent.__class__.__name__))
DoInterestManager._interests[handle] = InterestState(
description, InterestState.StateActive, 0, None, parentId, zoneIdList, self._completeEventCount, True)
if self.__verbose():
print('CR::INTEREST.addInterest(handle=%s, parentId=%s, zoneIdList=%s, description=%s)' % (
handle, parentId, zoneIdList, description))
assert self.printInterestsIfDebug()
return InterestHandle(handle)
def removeInterest(self, handle, event = None):
"""
Stop looking in a (set of) zone(s)
"""
# print 'base.cr.removeInterest(',handle,'):',globalClock.getFrameCount()
assert DoInterestManager.notify.debugCall()
assert isinstance(handle, InterestHandle)
existed = False
if not event:
event = self._getAnonymousEvent('removeInterest')
handle = handle.asInt()
if handle in DoInterestManager._interests:
existed = True
intState = DoInterestManager._interests[handle]
if event:
messenger.send(self._getRemoveInterestEvent(),
[event, intState.parentId, intState.zoneIdList])
if intState.isPendingDelete():
self.notify.warning(
'removeInterest: interest %s already pending removal' %
handle)
# this interest is already pending delete, so let's just tack this
# callback onto the list
if event is not None:
intState.addEvent(event)
else:
if len(intState.events) > 0:
# we're not pending a removal, but we have outstanding events?
# probably we are waiting for an add/alter complete.
# should we send those events now?
assert self.notify.warning('removeInterest: abandoning events: %s' %
intState.events)
intState.clearEvents()
intState.state = InterestState.StatePendingDel
contextId = self._getNextContextId()
intState.context = contextId
if event:
intState.addEvent(event)
self._sendRemoveInterest(handle, contextId)
if not event:
self._considerRemoveInterest(handle)
if self.__verbose():
print('CR::INTEREST.removeInterest(handle=%s, event=%s)' % (
handle, event))
else:
DoInterestManager.notify.warning(
"removeInterest: handle not found: %s" % (handle))
assert self.printInterestsIfDebug()
return existed
def removeAutoInterest(self, handle):
"""
Stop looking in a (set of) zone(s)
"""
assert DoInterestManager.notify.debugCall()
assert isinstance(handle, InterestHandle)
existed = False
handle = handle.asInt()
if handle in DoInterestManager._interests:
existed = True
intState = DoInterestManager._interests[handle]
if intState.isPendingDelete():
self.notify.warning(
'removeInterest: interest %s already pending removal' %
handle)
# this interest is already pending delete, so let's just tack this
# callback onto the list
else:
if len(intState.events) > 0:
# we're not pending a removal, but we have outstanding events?
# probably we are waiting for an add/alter complete.
# should we send those events now?
self.notify.warning('removeInterest: abandoning events: %s' %
intState.events)
intState.clearEvents()
intState.state = InterestState.StatePendingDel
self._considerRemoveInterest(handle)
if self.__verbose():
print('CR::INTEREST.removeAutoInterest(handle=%s)' % (handle))
else:
DoInterestManager.notify.warning(
"removeInterest: handle not found: %s" % (handle))
assert self.printInterestsIfDebug()
return existed
@report(types = ['args'], dConfigParam = 'guildmgr')
def removeAIInterest(self, handle):
"""
handle is NOT an InterestHandle. It's just a bare integer representing an
AI opened interest. We're making the client close down this interest since
the AI has trouble removing interests(that its opened) when the avatar goes
offline. See GuildManager(UD) for how it's being used.
"""
self._sendRemoveAIInterest(handle)
def alterInterest(self, handle, parentId, zoneIdList, description=None,
event=None):
"""
Removes old interests and adds new interests.
Note that when an interest is changed, only the most recent
change's event will be triggered. Previous events are abandoned.
If this is a problem, consider opening multiple interests.
"""
assert DoInterestManager.notify.debugCall()
assert isinstance(handle, InterestHandle)
#assert not self._noNewInterests
handle = handle.asInt()
if self._noNewInterests:
DoInterestManager.notify.warning(
"alterInterest: addingInterests on delete: %s" % (handle))
return
exists = False
if event is None:
event = self._getAnonymousEvent('alterInterest')
if handle in DoInterestManager._interests:
if description is not None:
DoInterestManager._interests[handle].desc = description
else:
description = DoInterestManager._interests[handle].desc
# are we overriding an existing change?
if DoInterestManager._interests[handle].context != NO_CONTEXT:
DoInterestManager._interests[handle].clearEvents()
contextId = self._getNextContextId()
DoInterestManager._interests[handle].context = contextId
DoInterestManager._interests[handle].parentId = parentId
DoInterestManager._interests[handle].zoneIdList = zoneIdList
DoInterestManager._interests[handle].addEvent(event)
if self.__verbose():
print('CR::INTEREST.alterInterest(handle=%s, parentId=%s, zoneIdList=%s, description=%s, event=%s)' % (
handle, parentId, zoneIdList, description, event))
self._sendAddInterest(handle, contextId, parentId, zoneIdList, description, action='modify')
exists = True
assert self.printInterestsIfDebug()
else:
DoInterestManager.notify.warning(
"alterInterest: handle not found: %s" % (handle))
return exists
def openAutoInterests(self, obj):
if hasattr(obj, '_autoInterestHandle'):
# must be multiple inheritance
self.notify.debug('openAutoInterests(%s): interests already open' % obj.__class__.__name__)
return
autoInterests = obj.getAutoInterests()
obj._autoInterestHandle = None
if not len(autoInterests):
return
obj._autoInterestHandle = self.addAutoInterest(obj.doId, autoInterests, '%s-autoInterest' % obj.__class__.__name__)
def closeAutoInterests(self, obj):
if not hasattr(obj, '_autoInterestHandle'):
# must be multiple inheritance
self.notify.debug('closeAutoInterests(%s): interests already closed' % obj)
return
if obj._autoInterestHandle is not None:
self.removeAutoInterest(obj._autoInterestHandle)
del obj._autoInterestHandle
# events for InterestWatcher
def _getAddInterestEvent(self):
return self._addInterestEvent
def _getRemoveInterestEvent(self):
return self._removeInterestEvent
def _getInterestState(self, handle):
return DoInterestManager._interests[handle]
def _getNextHandle(self):
handle = DoInterestManager._HandleSerialNum
while True:
handle = (handle + 1) & DoInterestManager._HandleMask
# skip handles that are already in use
if handle not in DoInterestManager._interests:
break
DoInterestManager.notify.warning(
'interest %s already in use' % handle)
DoInterestManager._HandleSerialNum = handle
return DoInterestManager._HandleSerialNum
def _getNextContextId(self):
contextId = DoInterestManager._ContextIdSerialNum
while True:
contextId = (contextId + 1) & DoInterestManager._ContextIdMask
# skip over the 'no context' id
if contextId != NO_CONTEXT:
break
DoInterestManager._ContextIdSerialNum = contextId
return DoInterestManager._ContextIdSerialNum
def _considerRemoveInterest(self, handle):
"""
Consider whether we should cull the interest set.
"""
assert DoInterestManager.notify.debugCall()
if handle in DoInterestManager._interests:
if DoInterestManager._interests[handle].isPendingDelete():
# make sure there is no pending event for this interest
if DoInterestManager._interests[handle].context == NO_CONTEXT:
assert len(DoInterestManager._interests[handle].events) == 0
del DoInterestManager._interests[handle]
if __debug__:
def printInterestsIfDebug(self):
if DoInterestManager.notify.getDebug():
self.printInterests()
return 1 # for assert
def _addDebugInterestHistory(self, action, description, handle,
contextId, parentId, zoneIdList):
if description is None:
description = ''
DoInterestManager._debug_interestHistory.append(
(action, description, handle, contextId, parentId, zoneIdList))
DoInterestManager._debug_maxDescriptionLen = max(
DoInterestManager._debug_maxDescriptionLen, len(description))
def printInterestHistory(self):
print("***************** Interest History *************")
format = '%9s %' + str(DoInterestManager._debug_maxDescriptionLen) + 's %6s %6s %9s %s'
print(format % (
"Action", "Description", "Handle", "Context", "ParentId",
"ZoneIdList"))
for i in DoInterestManager._debug_interestHistory:
print(format % tuple(i))
print("Note: interests with a Context of 0 do not get" \
" done/finished notices.")
def printInterestSets(self):
print("******************* Interest Sets **************")
format = '%6s %' + str(DoInterestManager._debug_maxDescriptionLen) + 's %11s %11s %8s %8s %8s'
print(format % (
"Handle", "Description",
"ParentId", "ZoneIdList",
"State", "Context",
"Event"))
for id, state in DoInterestManager._interests.items():
if len(state.events) == 0:
event = ''
elif len(state.events) == 1:
event = state.events[0]
else:
event = state.events
print(format % (id, state.desc,
state.parentId, state.zoneIdList,
state.state, state.context,
event))
print("************************************************")
def printInterests(self):
self.printInterestHistory()
self.printInterestSets()
def _sendAddInterest(self, handle, contextId, parentId, zoneIdList, description,
action=None):
"""
Part of the new otp-server code.
handle is a client-side created number that refers to
a set of interests. The same handle number doesn't
necessarily have any relationship to the same handle
on another client.
"""
assert DoInterestManager.notify.debugCall()
if __debug__:
if isinstance(zoneIdList, list):
zoneIdList.sort()
if action is None:
action = 'add'
self._addDebugInterestHistory(
action, description, handle, contextId, parentId, zoneIdList)
if parentId == 0:
DoInterestManager.notify.error(
'trying to set interest to invalid parent: %s' % parentId)
datagram = PyDatagram()
# Add message type
if isinstance(zoneIdList, list):
vzl = list(zoneIdList)
vzl.sort()
uniqueElements(vzl)
datagram.addUint16(CLIENT_ADD_INTEREST_MULTIPLE)
datagram.addUint32(contextId)
datagram.addUint16(handle)
datagram.addUint32(parentId)
datagram.addUint16(len(vzl))
for zone in vzl:
datagram.addUint32(zone)
else:
datagram.addUint16(CLIENT_ADD_INTEREST)
datagram.addUint32(contextId)
datagram.addUint16(handle)
datagram.addUint32(parentId)
datagram.addUint32(zoneIdList)
self.send(datagram)
def _sendRemoveInterest(self, handle, contextId):
"""
handle is a client-side created number that refers to
a set of interests. The same handle number doesn't
necessarily have any relationship to the same handle
on another client.
"""
assert DoInterestManager.notify.debugCall()
assert handle in DoInterestManager._interests
datagram = PyDatagram()
# Add message type
datagram.addUint16(CLIENT_REMOVE_INTEREST)
datagram.addUint32(contextId)
datagram.addUint16(handle)
self.send(datagram)
if __debug__:
state = DoInterestManager._interests[handle]
self._addDebugInterestHistory(
"remove", state.desc, handle, contextId,
state.parentId, state.zoneIdList)
def _sendRemoveAIInterest(self, handle):
"""
handle is a bare int, NOT an InterestHandle. Use this to
close an AI opened interest.
"""
datagram = PyDatagram()
# Add message type
datagram.addUint16(CLIENT_REMOVE_INTEREST)
datagram.addUint16((1<<15) + handle)
self.send(datagram)
def cleanupWaitAllInterestsComplete(self):
if self._completeDelayedCallback is not None:
self._completeDelayedCallback.destroy()
self._completeDelayedCallback = None
def queueAllInterestsCompleteEvent(self, frames=5):
# wait for N frames, if no new interests, send out all-done event
# calling this is OK even if there are no pending interest completes
def checkMoreInterests():
# if there are new interests, cancel this delayed callback, another
# will automatically be scheduled when all interests complete
# print 'checkMoreInterests(',self._completeEventCount.num,'):',globalClock.getFrameCount()
return self._completeEventCount.num > 0
def sendEvent():
messenger.send(self.getAllInterestsCompleteEvent())
for callback in self._allInterestsCompleteCallbacks:
callback()
self._allInterestsCompleteCallbacks = []
self.cleanupWaitAllInterestsComplete()
self._completeDelayedCallback = FrameDelayedCall(
'waitForAllInterestCompletes',
callback=sendEvent,
frames=frames,
cancelFunc=checkMoreInterests)
checkMoreInterests = None
sendEvent = None
def handleInterestDoneMessage(self, di):
"""
This handles the interest done messages and may dispatch an event
"""
assert DoInterestManager.notify.debugCall()
contextId = di.getUint32()
handle = di.getUint16()
if self.__verbose():
print('CR::INTEREST.interestDone(handle=%s)' % handle)
DoInterestManager.notify.debug(
"handleInterestDoneMessage--> Received handle %s, context %s" % (
handle, contextId))
if handle in DoInterestManager._interests:
eventsToSend = []
# if the context matches, send out the event
if contextId == DoInterestManager._interests[handle].context:
DoInterestManager._interests[handle].context = NO_CONTEXT
# the event handlers may call back into the interest manager. Send out
# the events after we're once again in a stable state.
#DoInterestManager._interests[handle].sendEvents()
eventsToSend = list(DoInterestManager._interests[handle].getEvents())
DoInterestManager._interests[handle].clearEvents()
else:
DoInterestManager.notify.debug(
"handleInterestDoneMessage--> handle: %s: Expecting context %s, got %s" % (
handle, DoInterestManager._interests[handle].context, contextId))
if __debug__:
state = DoInterestManager._interests[handle]
self._addDebugInterestHistory(
"finished", state.desc, handle, contextId, state.parentId,
state.zoneIdList)
self._considerRemoveInterest(handle)
for event in eventsToSend:
messenger.send(event)
else:
DoInterestManager.notify.warning(
"handleInterestDoneMessage: handle not found: %s" % (handle))
# if there are no more outstanding interest-completes, send out global all-done event
if self._completeEventCount.num == 0:
self.queueAllInterestsCompleteEvent()
assert self.printInterestsIfDebug()
if __debug__:
import unittest
class AsyncTestCase(unittest.TestCase):
def setCompleted(self):
self._async_completed = True
def isCompleted(self):
return getattr(self, '_async_completed', False)
class AsyncTestSuite(unittest.TestSuite):
pass
class AsyncTestLoader(unittest.TestLoader):
suiteClass = AsyncTestSuite
class AsyncTextTestRunner(unittest.TextTestRunner):
def run(self, testCase):
result = self._makeResult()
startTime = time.time()
test(result)
stopTime = time.time()
timeTaken = stopTime - startTime
result.printErrors()
self.stream.writeln(result.separator2)
run = result.testsRun
self.stream.writeln("Ran %d test%s in %.3fs" %
(run, run != 1 and "s" or "", timeTaken))
self.stream.writeln()
if not result.wasSuccessful():
self.stream.write("FAILED (")
failed, errored = map(len, (result.failures, result.errors))
if failed:
self.stream.write("failures=%d" % failed)
if errored:
if failed: self.stream.write(", ")
self.stream.write("errors=%d" % errored)
self.stream.writeln(")")
else:
self.stream.writeln("OK")
return result
class TestInterestAddRemove(AsyncTestCase, DirectObject.DirectObject):
def testInterestAdd(self):
event = uniqueName('InterestAdd')
self.acceptOnce(event, self.gotInterestAddResponse)
self.handle = base.cr.addInterest(base.cr.GameGlobalsId, 100, 'TestInterest', event=event)
def gotInterestAddResponse(self):
event = uniqueName('InterestRemove')
self.acceptOnce(event, self.gotInterestRemoveResponse)
base.cr.removeInterest(self.handle, event=event)
def gotInterestRemoveResponse(self):
self.setCompleted()
def runTests():
suite = unittest.makeSuite(TestInterestAddRemove)
unittest.AsyncTextTestRunner(verbosity=2).run(suite)
| chandler14362/panda3d | direct/src/distributed/DoInterestManager.py | Python | bsd-3-clause | 29,162 |
"""
Extra HTML Widget classes
"""
import datetime
import re
from django.newforms.widgets import Widget, Select
from django.utils.dates import MONTHS
from django.utils.safestring import mark_safe
__all__ = ('SelectDateWidget',)
RE_DATE = re.compile(r'(\d{4})-(\d\d?)-(\d\d?)$')
class SelectDateWidget(Widget):
"""
A Widget that splits date input into three <select> boxes.
This also serves as an example of a Widget that has more than one HTML
element and hence implements value_from_datadict.
"""
month_field = '%s_month'
day_field = '%s_day'
year_field = '%s_year'
def __init__(self, attrs=None, years=None):
# years is an optional list/tuple of years to use in the "year" select box.
self.attrs = attrs or {}
if years:
self.years = years
else:
this_year = datetime.date.today().year
self.years = range(this_year, this_year+10)
def render(self, name, value, attrs=None):
try:
year_val, month_val, day_val = value.year, value.month, value.day
except AttributeError:
year_val = month_val = day_val = None
if isinstance(value, basestring):
match = RE_DATE.match(value)
if match:
year_val, month_val, day_val = [int(v) for v in match.groups()]
output = []
if 'id' in self.attrs:
id_ = self.attrs['id']
else:
id_ = 'id_%s' % name
month_choices = MONTHS.items()
month_choices.sort()
local_attrs = self.build_attrs(id=self.month_field % id_)
select_html = Select(choices=month_choices).render(self.month_field % name, month_val, local_attrs)
output.append(select_html)
day_choices = [(i, i) for i in range(1, 32)]
local_attrs['id'] = self.day_field % id_
select_html = Select(choices=day_choices).render(self.day_field % name, day_val, local_attrs)
output.append(select_html)
year_choices = [(i, i) for i in self.years]
local_attrs['id'] = self.year_field % id_
select_html = Select(choices=year_choices).render(self.year_field % name, year_val, local_attrs)
output.append(select_html)
return mark_safe(u'\n'.join(output))
def id_for_label(self, id_):
return '%s_month' % id_
id_for_label = classmethod(id_for_label)
def value_from_datadict(self, data, files, name):
y, m, d = data.get(self.year_field % name), data.get(self.month_field % name), data.get(self.day_field % name)
if y and m and d:
return '%s-%s-%s' % (y, m, d)
return data.get(name, None)
| paulsmith/geodjango | django/newforms/extras/widgets.py | Python | bsd-3-clause | 2,692 |
from __future__ import absolute_import, division, print_function
import os
from time import ctime
from qtpy import QtWidgets
from glue import core
from glue.utils.qt import load_ui
class MessageWidget(QtWidgets.QWidget, core.hub.HubListener):
""" This simple class displays all messages broadcast
by a hub. It is mainly intended for debugging """
def __init__(self):
QtWidgets.QWidget.__init__(self)
self.ui = load_ui('message_widget.ui', self,
directory=os.path.dirname(__file__))
self.ui.messageTable.setColumnCount(3)
labels = ['Time', 'Message', 'Sender']
self.ui.messageTable.setHorizontalHeaderLabels(labels)
def register_to_hub(self, hub):
# catch all messages
hub.subscribe(self, core.message.Message,
handler=self.process_message,
filter=lambda x: True)
def process_message(self, message):
row = self.ui.messageTable.rowCount() * 0
self.ui.messageTable.insertRow(0)
tm = QtWidgets.QTableWidgetItem(ctime().split()[3])
typ = str(type(message)).split("'")[-2].split('.')[-1]
mtyp = QtWidgets.QTableWidgetItem(typ)
typ = str(type(message.sender)).split("'")[-2].split('.')[-1]
sender = QtWidgets.QTableWidgetItem(typ)
self.ui.messageTable.setItem(row, 0, tm)
self.ui.messageTable.setItem(row, 1, mtyp)
self.ui.messageTable.setItem(row, 2, sender)
self.ui.messageTable.resizeColumnsToContents()
| stscieisenhamer/glue | glue/core/qt/message_widget.py | Python | bsd-3-clause | 1,541 |
#!/usr/bin/env python
######################################################################
# Software License Agreement (BSD License)
#
# Copyright (c) 2010, Rice University
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the Rice University nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
######################################################################
# Author: Mark Moll
from math import sin, cos, tan
from functools import partial
try:
from ompl import base as ob
from ompl import control as oc
from ompl import geometric as og
except:
# if the ompl module is not in the PYTHONPATH assume it is installed in a
# subdirectory of the parent directory called "py-bindings."
from os.path import abspath, dirname, join
import sys
sys.path.insert(0, join(dirname(dirname(abspath(__file__))),'py-bindings'))
from ompl import base as ob
from ompl import control as oc
from ompl import geometric as og
def kinematicCarODE(q, u, qdot):
theta = q[2];
carLength = 0.2;
qdot[0] = u[0] * cos(theta)
qdot[1] = u[0] * sin(theta)
qdot[2] = u[0] * tan(u[1]) / carLength
def isStateValid(spaceInformation, state):
# perform collision checking or check if other constraints are
# satisfied
return spaceInformation.satisfiesBounds(state)
def plan():
# construct the state space we are planning in
space = ob.SE2StateSpace()
# set the bounds for the R^2 part of SE(2)
bounds = ob.RealVectorBounds(2)
bounds.setLow(-1)
bounds.setHigh(1)
space.setBounds(bounds)
# create a control space
cspace = oc.RealVectorControlSpace(space, 2)
# set the bounds for the control space
cbounds = ob.RealVectorBounds(2)
cbounds.setLow(-.3)
cbounds.setHigh(.3)
cspace.setBounds(cbounds)
# define a simple setup class
ss = oc.SimpleSetup(cspace)
validityChecker = ob.StateValidityCheckerFn(partial(isStateValid, ss.getSpaceInformation()))
ss.setStateValidityChecker(validityChecker)
ode = oc.ODE(kinematicCarODE)
odeSolver = oc.ODEBasicSolver(ss.getSpaceInformation(), ode)
propagator = oc.ODESolver.getStatePropagator(odeSolver)
ss.setStatePropagator(propagator)
# create a start state
start = ob.State(space)
start().setX(-0.5);
start().setY(0.0);
start().setYaw(0.0);
# create a goal state
goal = ob.State(space);
goal().setX(0.0);
goal().setY(0.5);
goal().setYaw(0.0);
# set the start and goal states
ss.setStartAndGoalStates(start, goal, 0.05)
# attempt to solve the problem
solved = ss.solve(120.0)
if solved:
# print the path to screen
print("Found solution:\n%s" % ss.getSolutionPath().asGeometric().printAsMatrix())
if __name__ == "__main__":
plan()
| davetcoleman/ompl | demos/RigidBodyPlanningWithODESolverAndControls.py | Python | bsd-3-clause | 4,218 |
"""This file exists for backwards compatability.
Please use the separate backends found in either `djangae.contrib.gauth.datastore.backends` or
`djangae.contrib.gauth.sql.backends`.
"""
import warnings
from djangae.contrib.gauth.datastore.backends import AppEngineUserAPIBackend
warnings.warn(
'AppEngineUserAPI is deprecated. Please use the specific backends from gauth.datastore '
'or gauth.sql instead.'
)
class AppEngineUserAPI(AppEngineUserAPIBackend):
pass
| Ali-aqrabawi/ezclinic | lib/djangae/contrib/gauth/backends.py | Python | mit | 482 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django import forms
from django.template import Context
from django.utils.translation import ugettext as _
from django.utils.translation import activate, deactivate
from .compatibility import get_template_from_string
from .conftest import only_bootstrap
from .forms import CheckboxesTestForm, TestForm
from crispy_forms.bootstrap import (
PrependedAppendedText, AppendedText, PrependedText, InlineRadios,
Tab, TabHolder, AccordionGroup, Accordion, Alert, InlineCheckboxes,
FieldWithButtons, StrictButton
)
from crispy_forms.helper import FormHelper
from crispy_forms.layout import (
Layout, HTML, Field, MultiWidgetField
)
from crispy_forms.utils import render_crispy_form
def test_field_with_custom_template():
test_form = TestForm()
test_form.helper = FormHelper()
test_form.helper.layout = Layout(
Field('email', template='custom_field_template.html')
)
html = render_crispy_form(test_form)
assert '<h1>Special custom field</h1>' in html
def test_multiwidget_field():
template = get_template_from_string("""
{% load crispy_forms_tags %}
{% crispy form %}
""")
test_form = TestForm()
test_form.helper = FormHelper()
test_form.helper.layout = Layout(
MultiWidgetField(
'datetime_field',
attrs=(
{'rel': 'test_dateinput'},
{'rel': 'test_timeinput', 'style': 'width: 30px;', 'type': "hidden"}
)
)
)
c = Context({'form': test_form})
html = template.render(c)
assert html.count('class="dateinput') == 1
assert html.count('rel="test_dateinput"') == 1
assert html.count('rel="test_timeinput"') == 1
assert html.count('style="width: 30px;"') == 1
assert html.count('type="hidden"') == 1
def test_field_type_hidden():
template = get_template_from_string("""
{% load crispy_forms_tags %}
{% crispy test_form %}
""")
test_form = TestForm()
test_form.helper = FormHelper()
test_form.helper.layout = Layout(
Field('email', type="hidden", data_test=12),
Field('datetime_field'),
)
c = Context({
'test_form': test_form,
})
html = template.render(c)
# Check form parameters
assert html.count('data-test="12"') == 1
assert html.count('name="email"') == 1
assert html.count('class="dateinput') == 1
assert html.count('class="timeinput') == 1
def test_field_wrapper_class(settings):
form = TestForm()
form.helper = FormHelper()
form.helper.layout = Layout(Field('email', wrapper_class="testing"))
html = render_crispy_form(form)
if settings.CRISPY_TEMPLATE_PACK == 'bootstrap':
assert html.count('class="control-group testing"') == 1
elif settings.CRISPY_TEMPLATE_PACK == 'bootstrap3':
assert html.count('class="form-group testing"') == 1
elif settings.CRISPY_TEMPLATE_PACK == 'bootstrap4':
assert html.count('class="form-group row testing"') == 1
def test_html_with_carriage_returns(settings):
test_form = TestForm()
test_form.helper = FormHelper()
test_form.helper.layout = Layout(
HTML("""
if (a==b){
// some comment
a+1;
foo();
}
""")
)
html = render_crispy_form(test_form)
if settings.CRISPY_TEMPLATE_PACK == 'uni_form':
assert html.count('\n') == 23
elif settings.CRISPY_TEMPLATE_PACK == 'bootstrap':
assert html.count('\n') == 25
else:
assert html.count('\n') == 27
def test_i18n():
activate('es')
form = TestForm()
form.helper = FormHelper()
form.helper.layout = Layout(
HTML(_("Enter a valid value."))
)
html = render_crispy_form(form)
assert "Introduzca un valor correcto" in html
deactivate()
@only_bootstrap
class TestBootstrapLayoutObjects(object):
def test_custom_django_widget(self):
class CustomRadioSelect(forms.RadioSelect):
pass
class CustomCheckboxSelectMultiple(forms.CheckboxSelectMultiple):
pass
# Make sure an inherited RadioSelect gets rendered as it
form = CheckboxesTestForm()
form.fields['inline_radios'].widget = CustomRadioSelect()
form.helper = FormHelper()
form.helper.layout = Layout('inline_radios')
html = render_crispy_form(form)
assert 'class="radio"' in html
# Make sure an inherited CheckboxSelectMultiple gets rendered as it
form.fields['checkboxes'].widget = CustomCheckboxSelectMultiple()
form.helper.layout = Layout('checkboxes')
html = render_crispy_form(form)
assert 'class="checkbox"' in html
def test_prepended_appended_text(self, settings):
test_form = TestForm()
test_form.helper = FormHelper()
test_form.helper.layout = Layout(
PrependedAppendedText('email', '@', 'gmail.com'),
AppendedText('password1', '#'),
PrependedText('password2', '$'),
)
html = render_crispy_form(test_form)
# Check form parameters
if settings.CRISPY_TEMPLATE_PACK == 'bootstrap':
assert html.count('<span class="add-on">@</span>') == 1
assert html.count('<span class="add-on">gmail.com</span>') == 1
assert html.count('<span class="add-on">#</span>') == 1
assert html.count('<span class="add-on">$</span>') == 1
if settings.CRISPY_TEMPLATE_PACK in ['bootstrap3', 'bootstrap4']:
assert html.count('<span class="input-group-addon">@</span>') == 1
assert html.count(
'<span class="input-group-addon">gmail.com</span>') == 1
assert html.count('<span class="input-group-addon">#</span>') == 1
assert html.count('<span class="input-group-addon">$</span>') == 1
if settings.CRISPY_TEMPLATE_PACK == 'bootstrap3':
test_form.helper.layout = Layout(
PrependedAppendedText('email', '@', 'gmail.com',
css_class='input-lg'), )
html = render_crispy_form(test_form)
assert '<input class="input-lg' in html
assert '<span class="input-group-addon input-lg' in html
if settings.CRISPY_TEMPLATE_PACK == 'bootstrap4':
test_form.helper.layout = Layout(
PrependedAppendedText('email', '@', 'gmail.com',
css_class='form-control-lg'), )
html = render_crispy_form(test_form)
assert '<input class="form-control-lg' in html
assert '<span class="input-group-addon' in html
def test_inline_radios(self, settings):
test_form = CheckboxesTestForm()
test_form.helper = FormHelper()
test_form.helper.layout = Layout(
InlineRadios('inline_radios')
)
html = render_crispy_form(test_form)
if settings.CRISPY_TEMPLATE_PACK == 'bootstrap':
assert html.count('radio inline"') == 2
elif settings.CRISPY_TEMPLATE_PACK in ['bootstrap3', 'bootstrap4']:
assert html.count('radio-inline"') == 2
def test_accordion_and_accordiongroup(self, settings):
test_form = TestForm()
test_form.helper = FormHelper()
test_form.helper.layout = Layout(
Accordion(
AccordionGroup(
'one',
'first_name'
),
AccordionGroup(
'two',
'password1',
'password2'
)
)
)
html = render_crispy_form(test_form)
if settings.CRISPY_TEMPLATE_PACK == 'bootstrap':
assert html.count('<div class="accordion"') == 1
assert html.count('<div class="accordion-group">') == 2
assert html.count('<div class="accordion-heading">') == 2
else:
assert html.count('<div class="panel panel-default"') == 2
assert html.count('<div class="panel-group"') == 1
assert html.count('<div class="panel-heading">') == 2
assert html.count('<div id="one"') == 1
assert html.count('<div id="two"') == 1
assert html.count('name="first_name"') == 1
assert html.count('name="password1"') == 1
assert html.count('name="password2"') == 1
def test_accordion_active_false_not_rendered(self, settings):
test_form = TestForm()
test_form.helper = FormHelper()
test_form.helper.layout = Layout(
Accordion(
AccordionGroup(
'one',
'first_name',
),
# there is no ``active`` kwarg here.
)
)
# The first time, there should be one of them there.
html = render_crispy_form(test_form)
if settings.CRISPY_TEMPLATE_PACK == 'bootstrap':
accordion_class = "accordion-body"
else:
accordion_class = "panel-collapse"
assert html.count('<div id="one" class="%s collapse in"' % accordion_class) == 1
test_form.helper.layout = Layout(
Accordion(
AccordionGroup(
'one',
'first_name',
active=False, # now ``active`` manually set as False
),
)
)
# This time, it shouldn't be there at all.
html = render_crispy_form(test_form)
assert html.count('<div id="one" class="%s collapse in"' % accordion_class) == 0
def test_alert(self):
test_form = TestForm()
test_form.helper = FormHelper()
test_form.helper.layout = Layout(
Alert(content='Testing...')
)
html = render_crispy_form(test_form)
assert html.count('<div class="alert"') == 1
assert html.count('<button type="button" class="close"') == 1
assert html.count('Testing...') == 1
def test_alert_block(self):
test_form = TestForm()
test_form.helper = FormHelper()
test_form.helper.layout = Layout(
Alert(content='Testing...', block=True)
)
html = render_crispy_form(test_form)
assert html.count('<div class="alert alert-block"') == 1
assert html.count('Testing...') == 1
def test_tab_and_tab_holder(self):
test_form = TestForm()
test_form.helper = FormHelper()
test_form.helper.layout = Layout(
TabHolder(
Tab(
'one',
'first_name',
css_id="custom-name",
css_class="first-tab-class"
),
Tab(
'two',
'password1',
'password2'
)
)
)
html = render_crispy_form(test_form)
assert html.count(
'<li class="tab-pane active"><a href="#custom-name" data-toggle="tab">One</a></li>'
) == 1
assert html.count('class="tab-pane first-tab-class active"') == 1
assert html.count('<li class="tab-pane') == 2
assert html.count('tab-pane') == 4
assert html.count('<div id="custom-name"') == 1
assert html.count('<div id="two"') == 1
assert html.count('name="first_name"') == 1
assert html.count('name="password1"') == 1
assert html.count('name="password2"') == 1
def test_tab_helper_reuse(self):
# this is a proper form, according to the docs.
# note that the helper is a class property here,
# shared between all instances
class TestForm(forms.Form):
val1 = forms.CharField(required=False)
val2 = forms.CharField(required=True)
helper = FormHelper()
helper.layout = Layout(
TabHolder(
Tab('one', 'val1',),
Tab('two', 'val2',)
)
)
# first render of form => everything is fine
test_form = TestForm()
html = render_crispy_form(test_form)
# second render of form => first tab should be active,
# but not duplicate class
test_form = TestForm()
html = render_crispy_form(test_form)
assert html.count('class="tab-pane active active"') == 0
# render a new form, now with errors
test_form = TestForm(data={'val1': 'foo'})
html = render_crispy_form(test_form)
# tab 1 should not be active
assert html.count('<div id="one" \n class="tab-pane active') == 0
# tab 2 should be active
assert html.count('<div id="two" \n class="tab-pane active') == 1
def test_radio_attrs(self):
form = CheckboxesTestForm()
form.fields['inline_radios'].widget.attrs = {'class': "first"}
form.fields['checkboxes'].widget.attrs = {'class': "second"}
html = render_crispy_form(form)
assert 'class="first"' in html
assert 'class="second"' in html
def test_field_with_buttons(self, settings):
form = TestForm()
form.helper = FormHelper()
form.helper.layout = Layout(
FieldWithButtons(
Field('password1', css_class="span4"),
StrictButton("Go!", css_id="go-button"),
StrictButton("No!", css_class="extra"),
StrictButton("Test", type="submit", name="whatever", value="something"),
css_class="extra",
autocomplete="off"
)
)
html = render_crispy_form(form)
form_group_class = 'control-group'
if settings.CRISPY_TEMPLATE_PACK == 'bootstrap3':
form_group_class = 'form-group'
elif settings.CRISPY_TEMPLATE_PACK == 'bootstrap4':
form_group_class = 'form-group row'
assert html.count('class="%s extra"' % form_group_class) == 1
assert html.count('autocomplete="off"') == 1
assert html.count('class="span4') == 1
assert html.count('id="go-button"') == 1
assert html.count("Go!") == 1
assert html.count("No!") == 1
assert html.count('class="btn"') == 2
assert html.count('class="btn extra"') == 1
assert html.count('type="submit"') == 1
assert html.count('name="whatever"') == 1
assert html.count('value="something"') == 1
if settings.CRISPY_TEMPLATE_PACK == 'bootstrap':
assert html.count('class="input-append"') == 1
elif settings.CRISPY_TEMPLATE_PACK in ['bootstrap3', 'bootstrap4']:
assert html.count('class="input-group-btn') == 1
def test_hidden_fields(self):
form = TestForm()
# All fields hidden
for field in form.fields:
form.fields[field].widget = forms.HiddenInput()
form.helper = FormHelper()
form.helper.layout = Layout(
AppendedText('password1', 'foo'),
PrependedText('password2', 'bar'),
PrependedAppendedText('email', 'bar'),
InlineCheckboxes('first_name'),
InlineRadios('last_name'),
)
html = render_crispy_form(form)
assert html.count("<input") == 5
assert html.count('type="hidden"') == 5
assert html.count('<label') == 0
def test_multiplecheckboxes(self, settings):
test_form = CheckboxesTestForm()
html = render_crispy_form(test_form)
assert html.count('checked="checked"') == 6
test_form.helper = FormHelper(test_form)
test_form.helper[1].wrap(InlineCheckboxes, inline=True)
html = render_crispy_form(test_form)
if settings.CRISPY_TEMPLATE_PACK == 'bootstrap':
assert html.count('checkbox inline"') == 3
assert html.count('inline"') == 3
elif settings.CRISPY_TEMPLATE_PACK in ['bootstrap3', 'bootstrap4']:
assert html.count('checkbox-inline"') == 3
assert html.count('inline="True"') == 4
| samabhi/pstHealth | venv/lib/python2.7/site-packages/crispy_forms/tests/test_layout_objects.py | Python | mit | 16,205 |
import asyncio
import discord
from discord.ext import commands
if not discord.opus.is_loaded():
# the 'opus' library here is opus.dll on windows
# or libopus.so on linux in the current directory
# you should replace this with the location the
# opus library is located in and with the proper filename.
# note that on windows this DLL is automatically provided for you
discord.opus.load_opus('opus')
class VoiceEntry:
def __init__(self, message, player):
self.requester = message.author
self.channel = message.channel
self.player = player
def __str__(self):
fmt = '*{0.title}* uploaded by {0.uploader} and requested by {1.display_name}'
duration = self.player.duration
if duration:
fmt = fmt + ' [length: {0[0]}m {0[1]}s]'.format(divmod(duration, 60))
return fmt.format(self.player, self.requester)
class VoiceState:
def __init__(self, bot):
self.current = None
self.voice = None
self.bot = bot
self.play_next_song = asyncio.Event()
self.songs = asyncio.Queue()
self.skip_votes = set() # a set of user_ids that voted
self.audio_player = self.bot.loop.create_task(self.audio_player_task())
def is_playing(self):
if self.voice is None or self.current is None:
return False
player = self.current.player
return not player.is_done()
@property
def player(self):
return self.current.player
def skip(self):
self.skip_votes.clear()
if self.is_playing():
self.player.stop()
def toggle_next(self):
self.bot.loop.call_soon_threadsafe(self.play_next_song.set)
async def audio_player_task(self):
while True:
self.play_next_song.clear()
self.current = await self.songs.get()
await self.bot.send_message(self.current.channel, 'Now playing ' + str(self.current))
self.current.player.start()
await self.play_next_song.wait()
class Music:
"""Voice related commands.
Works in multiple servers at once.
"""
def __init__(self, bot):
self.bot = bot
self.voice_states = {}
def get_voice_state(self, server):
state = self.voice_states.get(server.id)
if state is None:
state = VoiceState(self.bot)
self.voice_states[server.id] = state
return state
async def create_voice_client(self, channel):
voice = await self.bot.join_voice_channel(channel)
state = self.get_voice_state(channel.server)
state.voice = voice
def __unload(self):
for state in self.voice_states.values():
try:
state.audio_player.cancel()
if state.voice:
self.bot.loop.create_task(state.voice.disconnect())
except:
pass
@commands.command(pass_context=True, no_pm=True)
async def join(self, ctx, *, channel : discord.Channel):
"""Joins a voice channel."""
try:
await self.create_voice_client(channel)
except discord.ClientException:
await self.bot.say('Already in a voice channel...')
except discord.InvalidArgument:
await self.bot.say('This is not a voice channel...')
else:
await self.bot.say('Ready to play audio in ' + channel.name)
@commands.command(pass_context=True, no_pm=True)
async def summon(self, ctx):
"""Summons the bot to join your voice channel."""
summoned_channel = ctx.message.author.voice_channel
if summoned_channel is None:
await self.bot.say('You are not in a voice channel.')
return False
state = self.get_voice_state(ctx.message.server)
if state.voice is None:
state.voice = await self.bot.join_voice_channel(summoned_channel)
else:
await state.voice.move_to(summoned_channel)
return True
@commands.command(pass_context=True, no_pm=True)
async def play(self, ctx, *, song : str):
"""Plays a song.
If there is a song currently in the queue, then it is
queued until the next song is done playing.
This command automatically searches as well from YouTube.
The list of supported sites can be found here:
https://rg3.github.io/youtube-dl/supportedsites.html
"""
state = self.get_voice_state(ctx.message.server)
opts = {
'default_search': 'auto',
'quiet': True,
}
if state.voice is None:
success = await ctx.invoke(self.summon)
if not success:
return
try:
player = await state.voice.create_ytdl_player(song, ytdl_options=opts, after=state.toggle_next)
except Exception as e:
fmt = 'An error occurred while processing this request: ```py\n{}: {}\n```'
await self.bot.send_message(ctx.message.channel, fmt.format(type(e).__name__, e))
else:
player.volume = 0.6
entry = VoiceEntry(ctx.message, player)
await self.bot.say('Enqueued ' + str(entry))
await state.songs.put(entry)
@commands.command(pass_context=True, no_pm=True)
async def volume(self, ctx, value : int):
"""Sets the volume of the currently playing song."""
state = self.get_voice_state(ctx.message.server)
if state.is_playing():
player = state.player
player.volume = value / 100
await self.bot.say('Set the volume to {:.0%}'.format(player.volume))
@commands.command(pass_context=True, no_pm=True)
async def pause(self, ctx):
"""Pauses the currently played song."""
state = self.get_voice_state(ctx.message.server)
if state.is_playing():
player = state.player
player.pause()
@commands.command(pass_context=True, no_pm=True)
async def resume(self, ctx):
"""Resumes the currently played song."""
state = self.get_voice_state(ctx.message.server)
if state.is_playing():
player = state.player
player.resume()
@commands.command(pass_context=True, no_pm=True)
async def stop(self, ctx):
"""Stops playing audio and leaves the voice channel.
This also clears the queue.
"""
server = ctx.message.server
state = self.get_voice_state(server)
if state.is_playing():
player = state.player
player.stop()
try:
state.audio_player.cancel()
del self.voice_states[server.id]
await state.voice.disconnect()
except:
pass
@commands.command(pass_context=True, no_pm=True)
async def skip(self, ctx):
"""Vote to skip a song. The song requester can automatically skip.
3 skip votes are needed for the song to be skipped.
"""
state = self.get_voice_state(ctx.message.server)
if not state.is_playing():
await self.bot.say('Not playing any music right now...')
return
voter = ctx.message.author
if voter == state.current.requester:
await self.bot.say('Requester requested skipping song...')
state.skip()
elif voter.id not in state.skip_votes:
state.skip_votes.add(voter.id)
total_votes = len(state.skip_votes)
if total_votes >= 3:
await self.bot.say('Skip vote passed, skipping song...')
state.skip()
else:
await self.bot.say('Skip vote added, currently at [{}/3]'.format(total_votes))
else:
await self.bot.say('You have already voted to skip this song.')
@commands.command(pass_context=True, no_pm=True)
async def playing(self, ctx):
"""Shows info about the currently played song."""
state = self.get_voice_state(ctx.message.server)
if state.current is None:
await self.bot.say('Not playing anything.')
else:
skip_count = len(state.skip_votes)
await self.bot.say('Now playing {} [skips: {}/3]'.format(state.current, skip_count))
bot = commands.Bot(command_prefix=commands.when_mentioned_or('$'), description='A playlist example for discord.py')
bot.add_cog(Music(bot))
@bot.event
async def on_ready():
print('Logged in as:\n{0} (ID: {0.id})'.format(bot.user))
bot.run('token')
| Boy-314/winner-winner-bidget-sbinner | examples/playlist.py | Python | mit | 8,569 |
"""An NNTP client class based on:
- RFC 977: Network News Transfer Protocol
- RFC 2980: Common NNTP Extensions
- RFC 3977: Network News Transfer Protocol (version 2)
Example:
>>> from nntplib import NNTP
>>> s = NNTP('news')
>>> resp, count, first, last, name = s.group('comp.lang.python')
>>> print('Group', name, 'has', count, 'articles, range', first, 'to', last)
Group comp.lang.python has 51 articles, range 5770 to 5821
>>> resp, subs = s.xhdr('subject', '{0}-{1}'.format(first, last))
>>> resp = s.quit()
>>>
Here 'resp' is the server response line.
Error responses are turned into exceptions.
To post an article from a file:
>>> f = open(filename, 'rb') # file containing article, including header
>>> resp = s.post(f)
>>>
For descriptions of all methods, read the comments in the code below.
Note that all arguments and return values representing article numbers
are strings, not numbers, since they are rarely used for calculations.
"""
# RFC 977 by Brian Kantor and Phil Lapsley.
# xover, xgtitle, xpath, date methods by Kevan Heydon
# Incompatible changes from the 2.x nntplib:
# - all commands are encoded as UTF-8 data (using the "surrogateescape"
# error handler), except for raw message data (POST, IHAVE)
# - all responses are decoded as UTF-8 data (using the "surrogateescape"
# error handler), except for raw message data (ARTICLE, HEAD, BODY)
# - the `file` argument to various methods is keyword-only
#
# - NNTP.date() returns a datetime object
# - NNTP.newgroups() and NNTP.newnews() take a datetime (or date) object,
# rather than a pair of (date, time) strings.
# - NNTP.newgroups() and NNTP.list() return a list of GroupInfo named tuples
# - NNTP.descriptions() returns a dict mapping group names to descriptions
# - NNTP.xover() returns a list of dicts mapping field names (header or metadata)
# to field values; each dict representing a message overview.
# - NNTP.article(), NNTP.head() and NNTP.body() return a (response, ArticleInfo)
# tuple.
# - the "internal" methods have been marked private (they now start with
# an underscore)
# Other changes from the 2.x/3.1 nntplib:
# - automatic querying of capabilities at connect
# - New method NNTP.getcapabilities()
# - New method NNTP.over()
# - New helper function decode_header()
# - NNTP.post() and NNTP.ihave() accept file objects, bytes-like objects and
# arbitrary iterables yielding lines.
# - An extensive test suite :-)
# TODO:
# - return structured data (GroupInfo etc.) everywhere
# - support HDR
# Imports
import re
import socket
import collections
import datetime
import warnings
try:
import ssl
except ImportError:
_have_ssl = False
else:
_have_ssl = True
from email.header import decode_header as _email_decode_header
from socket import _GLOBAL_DEFAULT_TIMEOUT
__all__ = ["NNTP",
"NNTPError", "NNTPReplyError", "NNTPTemporaryError",
"NNTPPermanentError", "NNTPProtocolError", "NNTPDataError",
"decode_header",
]
# maximal line length when calling readline(). This is to prevent
# reading arbitrary length lines. RFC 3977 limits NNTP line length to
# 512 characters, including CRLF. We have selected 2048 just to be on
# the safe side.
_MAXLINE = 2048
# Exceptions raised when an error or invalid response is received
class NNTPError(Exception):
"""Base class for all nntplib exceptions"""
def __init__(self, *args):
Exception.__init__(self, *args)
try:
self.response = args[0]
except IndexError:
self.response = 'No response given'
class NNTPReplyError(NNTPError):
"""Unexpected [123]xx reply"""
pass
class NNTPTemporaryError(NNTPError):
"""4xx errors"""
pass
class NNTPPermanentError(NNTPError):
"""5xx errors"""
pass
class NNTPProtocolError(NNTPError):
"""Response does not begin with [1-5]"""
pass
class NNTPDataError(NNTPError):
"""Error in response data"""
pass
# Standard port used by NNTP servers
NNTP_PORT = 119
NNTP_SSL_PORT = 563
# Response numbers that are followed by additional text (e.g. article)
_LONGRESP = {
'100', # HELP
'101', # CAPABILITIES
'211', # LISTGROUP (also not multi-line with GROUP)
'215', # LIST
'220', # ARTICLE
'221', # HEAD, XHDR
'222', # BODY
'224', # OVER, XOVER
'225', # HDR
'230', # NEWNEWS
'231', # NEWGROUPS
'282', # XGTITLE
}
# Default decoded value for LIST OVERVIEW.FMT if not supported
_DEFAULT_OVERVIEW_FMT = [
"subject", "from", "date", "message-id", "references", ":bytes", ":lines"]
# Alternative names allowed in LIST OVERVIEW.FMT response
_OVERVIEW_FMT_ALTERNATIVES = {
'bytes': ':bytes',
'lines': ':lines',
}
# Line terminators (we always output CRLF, but accept any of CRLF, CR, LF)
_CRLF = b'\r\n'
GroupInfo = collections.namedtuple('GroupInfo',
['group', 'last', 'first', 'flag'])
ArticleInfo = collections.namedtuple('ArticleInfo',
['number', 'message_id', 'lines'])
# Helper function(s)
def decode_header(header_str):
"""Takes an unicode string representing a munged header value
and decodes it as a (possibly non-ASCII) readable value."""
parts = []
for v, enc in _email_decode_header(header_str):
if isinstance(v, bytes):
parts.append(v.decode(enc or 'ascii'))
else:
parts.append(v)
return ''.join(parts)
def _parse_overview_fmt(lines):
"""Parse a list of string representing the response to LIST OVERVIEW.FMT
and return a list of header/metadata names.
Raises NNTPDataError if the response is not compliant
(cf. RFC 3977, section 8.4)."""
fmt = []
for line in lines:
if line[0] == ':':
# Metadata name (e.g. ":bytes")
name, _, suffix = line[1:].partition(':')
name = ':' + name
else:
# Header name (e.g. "Subject:" or "Xref:full")
name, _, suffix = line.partition(':')
name = name.lower()
name = _OVERVIEW_FMT_ALTERNATIVES.get(name, name)
# Should we do something with the suffix?
fmt.append(name)
defaults = _DEFAULT_OVERVIEW_FMT
if len(fmt) < len(defaults):
raise NNTPDataError("LIST OVERVIEW.FMT response too short")
if fmt[:len(defaults)] != defaults:
raise NNTPDataError("LIST OVERVIEW.FMT redefines default fields")
return fmt
def _parse_overview(lines, fmt, data_process_func=None):
"""Parse the response to a OVER or XOVER command according to the
overview format `fmt`."""
n_defaults = len(_DEFAULT_OVERVIEW_FMT)
overview = []
for line in lines:
fields = {}
article_number, *tokens = line.split('\t')
article_number = int(article_number)
for i, token in enumerate(tokens):
if i >= len(fmt):
# XXX should we raise an error? Some servers might not
# support LIST OVERVIEW.FMT and still return additional
# headers.
continue
field_name = fmt[i]
is_metadata = field_name.startswith(':')
if i >= n_defaults and not is_metadata:
# Non-default header names are included in full in the response
# (unless the field is totally empty)
h = field_name + ": "
if token and token[:len(h)].lower() != h:
raise NNTPDataError("OVER/XOVER response doesn't include "
"names of additional headers")
token = token[len(h):] if token else None
fields[fmt[i]] = token
overview.append((article_number, fields))
return overview
def _parse_datetime(date_str, time_str=None):
"""Parse a pair of (date, time) strings, and return a datetime object.
If only the date is given, it is assumed to be date and time
concatenated together (e.g. response to the DATE command).
"""
if time_str is None:
time_str = date_str[-6:]
date_str = date_str[:-6]
hours = int(time_str[:2])
minutes = int(time_str[2:4])
seconds = int(time_str[4:])
year = int(date_str[:-4])
month = int(date_str[-4:-2])
day = int(date_str[-2:])
# RFC 3977 doesn't say how to interpret 2-char years. Assume that
# there are no dates before 1970 on Usenet.
if year < 70:
year += 2000
elif year < 100:
year += 1900
return datetime.datetime(year, month, day, hours, minutes, seconds)
def _unparse_datetime(dt, legacy=False):
"""Format a date or datetime object as a pair of (date, time) strings
in the format required by the NEWNEWS and NEWGROUPS commands. If a
date object is passed, the time is assumed to be midnight (00h00).
The returned representation depends on the legacy flag:
* if legacy is False (the default):
date has the YYYYMMDD format and time the HHMMSS format
* if legacy is True:
date has the YYMMDD format and time the HHMMSS format.
RFC 3977 compliant servers should understand both formats; therefore,
legacy is only needed when talking to old servers.
"""
if not isinstance(dt, datetime.datetime):
time_str = "000000"
else:
time_str = "{0.hour:02d}{0.minute:02d}{0.second:02d}".format(dt)
y = dt.year
if legacy:
y = y % 100
date_str = "{0:02d}{1.month:02d}{1.day:02d}".format(y, dt)
else:
date_str = "{0:04d}{1.month:02d}{1.day:02d}".format(y, dt)
return date_str, time_str
if _have_ssl:
def _encrypt_on(sock, context, hostname):
"""Wrap a socket in SSL/TLS. Arguments:
- sock: Socket to wrap
- context: SSL context to use for the encrypted connection
Returns:
- sock: New, encrypted socket.
"""
# Generate a default SSL context if none was passed.
if context is None:
context = ssl._create_stdlib_context()
return context.wrap_socket(sock, server_hostname=hostname)
# The classes themselves
class _NNTPBase:
# UTF-8 is the character set for all NNTP commands and responses: they
# are automatically encoded (when sending) and decoded (and receiving)
# by this class.
# However, some multi-line data blocks can contain arbitrary bytes (for
# example, latin-1 or utf-16 data in the body of a message). Commands
# taking (POST, IHAVE) or returning (HEAD, BODY, ARTICLE) raw message
# data will therefore only accept and produce bytes objects.
# Furthermore, since there could be non-compliant servers out there,
# we use 'surrogateescape' as the error handler for fault tolerance
# and easy round-tripping. This could be useful for some applications
# (e.g. NNTP gateways).
encoding = 'utf-8'
errors = 'surrogateescape'
def __init__(self, file, host,
readermode=None, timeout=_GLOBAL_DEFAULT_TIMEOUT):
"""Initialize an instance. Arguments:
- file: file-like object (open for read/write in binary mode)
- host: hostname of the server
- readermode: if true, send 'mode reader' command after
connecting.
- timeout: timeout (in seconds) used for socket connections
readermode is sometimes necessary if you are connecting to an
NNTP server on the local machine and intend to call
reader-specific commands, such as `group'. If you get
unexpected NNTPPermanentErrors, you might need to set
readermode.
"""
self.host = host
self.file = file
self.debugging = 0
self.welcome = self._getresp()
# Inquire about capabilities (RFC 3977).
self._caps = None
self.getcapabilities()
# 'MODE READER' is sometimes necessary to enable 'reader' mode.
# However, the order in which 'MODE READER' and 'AUTHINFO' need to
# arrive differs between some NNTP servers. If _setreadermode() fails
# with an authorization failed error, it will set this to True;
# the login() routine will interpret that as a request to try again
# after performing its normal function.
# Enable only if we're not already in READER mode anyway.
self.readermode_afterauth = False
if readermode and 'READER' not in self._caps:
self._setreadermode()
if not self.readermode_afterauth:
# Capabilities might have changed after MODE READER
self._caps = None
self.getcapabilities()
# RFC 4642 2.2.2: Both the client and the server MUST know if there is
# a TLS session active. A client MUST NOT attempt to start a TLS
# session if a TLS session is already active.
self.tls_on = False
# Log in and encryption setup order is left to subclasses.
self.authenticated = False
def __enter__(self):
return self
def __exit__(self, *args):
is_connected = lambda: hasattr(self, "file")
if is_connected():
try:
self.quit()
except (OSError, EOFError):
pass
finally:
if is_connected():
self._close()
def getwelcome(self):
"""Get the welcome message from the server
(this is read and squirreled away by __init__()).
If the response code is 200, posting is allowed;
if it 201, posting is not allowed."""
if self.debugging: print('*welcome*', repr(self.welcome))
return self.welcome
def getcapabilities(self):
"""Get the server capabilities, as read by __init__().
If the CAPABILITIES command is not supported, an empty dict is
returned."""
if self._caps is None:
self.nntp_version = 1
self.nntp_implementation = None
try:
resp, caps = self.capabilities()
except (NNTPPermanentError, NNTPTemporaryError):
# Server doesn't support capabilities
self._caps = {}
else:
self._caps = caps
if 'VERSION' in caps:
# The server can advertise several supported versions,
# choose the highest.
self.nntp_version = max(map(int, caps['VERSION']))
if 'IMPLEMENTATION' in caps:
self.nntp_implementation = ' '.join(caps['IMPLEMENTATION'])
return self._caps
def set_debuglevel(self, level):
"""Set the debugging level. Argument 'level' means:
0: no debugging output (default)
1: print commands and responses but not body text etc.
2: also print raw lines read and sent before stripping CR/LF"""
self.debugging = level
debug = set_debuglevel
def _putline(self, line):
"""Internal: send one line to the server, appending CRLF.
The `line` must be a bytes-like object."""
line = line + _CRLF
if self.debugging > 1: print('*put*', repr(line))
self.file.write(line)
self.file.flush()
def _putcmd(self, line):
"""Internal: send one command to the server (through _putline()).
The `line` must be an unicode string."""
if self.debugging: print('*cmd*', repr(line))
line = line.encode(self.encoding, self.errors)
self._putline(line)
def _getline(self, strip_crlf=True):
"""Internal: return one line from the server, stripping _CRLF.
Raise EOFError if the connection is closed.
Returns a bytes object."""
line = self.file.readline(_MAXLINE +1)
if len(line) > _MAXLINE:
raise NNTPDataError('line too long')
if self.debugging > 1:
print('*get*', repr(line))
if not line: raise EOFError
if strip_crlf:
if line[-2:] == _CRLF:
line = line[:-2]
elif line[-1:] in _CRLF:
line = line[:-1]
return line
def _getresp(self):
"""Internal: get a response from the server.
Raise various errors if the response indicates an error.
Returns an unicode string."""
resp = self._getline()
if self.debugging: print('*resp*', repr(resp))
resp = resp.decode(self.encoding, self.errors)
c = resp[:1]
if c == '4':
raise NNTPTemporaryError(resp)
if c == '5':
raise NNTPPermanentError(resp)
if c not in '123':
raise NNTPProtocolError(resp)
return resp
def _getlongresp(self, file=None):
"""Internal: get a response plus following text from the server.
Raise various errors if the response indicates an error.
Returns a (response, lines) tuple where `response` is an unicode
string and `lines` is a list of bytes objects.
If `file` is a file-like object, it must be open in binary mode.
"""
openedFile = None
try:
# If a string was passed then open a file with that name
if isinstance(file, (str, bytes)):
openedFile = file = open(file, "wb")
resp = self._getresp()
if resp[:3] not in _LONGRESP:
raise NNTPReplyError(resp)
lines = []
if file is not None:
# XXX lines = None instead?
terminators = (b'.' + _CRLF, b'.\n')
while 1:
line = self._getline(False)
if line in terminators:
break
if line.startswith(b'..'):
line = line[1:]
file.write(line)
else:
terminator = b'.'
while 1:
line = self._getline()
if line == terminator:
break
if line.startswith(b'..'):
line = line[1:]
lines.append(line)
finally:
# If this method created the file, then it must close it
if openedFile:
openedFile.close()
return resp, lines
def _shortcmd(self, line):
"""Internal: send a command and get the response.
Same return value as _getresp()."""
self._putcmd(line)
return self._getresp()
def _longcmd(self, line, file=None):
"""Internal: send a command and get the response plus following text.
Same return value as _getlongresp()."""
self._putcmd(line)
return self._getlongresp(file)
def _longcmdstring(self, line, file=None):
"""Internal: send a command and get the response plus following text.
Same as _longcmd() and _getlongresp(), except that the returned `lines`
are unicode strings rather than bytes objects.
"""
self._putcmd(line)
resp, list = self._getlongresp(file)
return resp, [line.decode(self.encoding, self.errors)
for line in list]
def _getoverviewfmt(self):
"""Internal: get the overview format. Queries the server if not
already done, else returns the cached value."""
try:
return self._cachedoverviewfmt
except AttributeError:
pass
try:
resp, lines = self._longcmdstring("LIST OVERVIEW.FMT")
except NNTPPermanentError:
# Not supported by server?
fmt = _DEFAULT_OVERVIEW_FMT[:]
else:
fmt = _parse_overview_fmt(lines)
self._cachedoverviewfmt = fmt
return fmt
def _grouplist(self, lines):
# Parse lines into "group last first flag"
return [GroupInfo(*line.split()) for line in lines]
def capabilities(self):
"""Process a CAPABILITIES command. Not supported by all servers.
Return:
- resp: server response if successful
- caps: a dictionary mapping capability names to lists of tokens
(for example {'VERSION': ['2'], 'OVER': [], LIST: ['ACTIVE', 'HEADERS'] })
"""
caps = {}
resp, lines = self._longcmdstring("CAPABILITIES")
for line in lines:
name, *tokens = line.split()
caps[name] = tokens
return resp, caps
def newgroups(self, date, *, file=None):
"""Process a NEWGROUPS command. Arguments:
- date: a date or datetime object
Return:
- resp: server response if successful
- list: list of newsgroup names
"""
if not isinstance(date, (datetime.date, datetime.date)):
raise TypeError(
"the date parameter must be a date or datetime object, "
"not '{:40}'".format(date.__class__.__name__))
date_str, time_str = _unparse_datetime(date, self.nntp_version < 2)
cmd = 'NEWGROUPS {0} {1}'.format(date_str, time_str)
resp, lines = self._longcmdstring(cmd, file)
return resp, self._grouplist(lines)
def newnews(self, group, date, *, file=None):
"""Process a NEWNEWS command. Arguments:
- group: group name or '*'
- date: a date or datetime object
Return:
- resp: server response if successful
- list: list of message ids
"""
if not isinstance(date, (datetime.date, datetime.date)):
raise TypeError(
"the date parameter must be a date or datetime object, "
"not '{:40}'".format(date.__class__.__name__))
date_str, time_str = _unparse_datetime(date, self.nntp_version < 2)
cmd = 'NEWNEWS {0} {1} {2}'.format(group, date_str, time_str)
return self._longcmdstring(cmd, file)
def list(self, group_pattern=None, *, file=None):
"""Process a LIST or LIST ACTIVE command. Arguments:
- group_pattern: a pattern indicating which groups to query
- file: Filename string or file object to store the result in
Returns:
- resp: server response if successful
- list: list of (group, last, first, flag) (strings)
"""
if group_pattern is not None:
command = 'LIST ACTIVE ' + group_pattern
else:
command = 'LIST'
resp, lines = self._longcmdstring(command, file)
return resp, self._grouplist(lines)
def _getdescriptions(self, group_pattern, return_all):
line_pat = re.compile('^(?P<group>[^ \t]+)[ \t]+(.*)$')
# Try the more std (acc. to RFC2980) LIST NEWSGROUPS first
resp, lines = self._longcmdstring('LIST NEWSGROUPS ' + group_pattern)
if not resp.startswith('215'):
# Now the deprecated XGTITLE. This either raises an error
# or succeeds with the same output structure as LIST
# NEWSGROUPS.
resp, lines = self._longcmdstring('XGTITLE ' + group_pattern)
groups = {}
for raw_line in lines:
match = line_pat.search(raw_line.strip())
if match:
name, desc = match.group(1, 2)
if not return_all:
return desc
groups[name] = desc
if return_all:
return resp, groups
else:
# Nothing found
return ''
def description(self, group):
"""Get a description for a single group. If more than one
group matches ('group' is a pattern), return the first. If no
group matches, return an empty string.
This elides the response code from the server, since it can
only be '215' or '285' (for xgtitle) anyway. If the response
code is needed, use the 'descriptions' method.
NOTE: This neither checks for a wildcard in 'group' nor does
it check whether the group actually exists."""
return self._getdescriptions(group, False)
def descriptions(self, group_pattern):
"""Get descriptions for a range of groups."""
return self._getdescriptions(group_pattern, True)
def group(self, name):
"""Process a GROUP command. Argument:
- group: the group name
Returns:
- resp: server response if successful
- count: number of articles
- first: first article number
- last: last article number
- name: the group name
"""
resp = self._shortcmd('GROUP ' + name)
if not resp.startswith('211'):
raise NNTPReplyError(resp)
words = resp.split()
count = first = last = 0
n = len(words)
if n > 1:
count = words[1]
if n > 2:
first = words[2]
if n > 3:
last = words[3]
if n > 4:
name = words[4].lower()
return resp, int(count), int(first), int(last), name
def help(self, *, file=None):
"""Process a HELP command. Argument:
- file: Filename string or file object to store the result in
Returns:
- resp: server response if successful
- list: list of strings returned by the server in response to the
HELP command
"""
return self._longcmdstring('HELP', file)
def _statparse(self, resp):
"""Internal: parse the response line of a STAT, NEXT, LAST,
ARTICLE, HEAD or BODY command."""
if not resp.startswith('22'):
raise NNTPReplyError(resp)
words = resp.split()
art_num = int(words[1])
message_id = words[2]
return resp, art_num, message_id
def _statcmd(self, line):
"""Internal: process a STAT, NEXT or LAST command."""
resp = self._shortcmd(line)
return self._statparse(resp)
def stat(self, message_spec=None):
"""Process a STAT command. Argument:
- message_spec: article number or message id (if not specified,
the current article is selected)
Returns:
- resp: server response if successful
- art_num: the article number
- message_id: the message id
"""
if message_spec:
return self._statcmd('STAT {0}'.format(message_spec))
else:
return self._statcmd('STAT')
def next(self):
"""Process a NEXT command. No arguments. Return as for STAT."""
return self._statcmd('NEXT')
def last(self):
"""Process a LAST command. No arguments. Return as for STAT."""
return self._statcmd('LAST')
def _artcmd(self, line, file=None):
"""Internal: process a HEAD, BODY or ARTICLE command."""
resp, lines = self._longcmd(line, file)
resp, art_num, message_id = self._statparse(resp)
return resp, ArticleInfo(art_num, message_id, lines)
def head(self, message_spec=None, *, file=None):
"""Process a HEAD command. Argument:
- message_spec: article number or message id
- file: filename string or file object to store the headers in
Returns:
- resp: server response if successful
- ArticleInfo: (article number, message id, list of header lines)
"""
if message_spec is not None:
cmd = 'HEAD {0}'.format(message_spec)
else:
cmd = 'HEAD'
return self._artcmd(cmd, file)
def body(self, message_spec=None, *, file=None):
"""Process a BODY command. Argument:
- message_spec: article number or message id
- file: filename string or file object to store the body in
Returns:
- resp: server response if successful
- ArticleInfo: (article number, message id, list of body lines)
"""
if message_spec is not None:
cmd = 'BODY {0}'.format(message_spec)
else:
cmd = 'BODY'
return self._artcmd(cmd, file)
def article(self, message_spec=None, *, file=None):
"""Process an ARTICLE command. Argument:
- message_spec: article number or message id
- file: filename string or file object to store the article in
Returns:
- resp: server response if successful
- ArticleInfo: (article number, message id, list of article lines)
"""
if message_spec is not None:
cmd = 'ARTICLE {0}'.format(message_spec)
else:
cmd = 'ARTICLE'
return self._artcmd(cmd, file)
def slave(self):
"""Process a SLAVE command. Returns:
- resp: server response if successful
"""
return self._shortcmd('SLAVE')
def xhdr(self, hdr, str, *, file=None):
"""Process an XHDR command (optional server extension). Arguments:
- hdr: the header type (e.g. 'subject')
- str: an article nr, a message id, or a range nr1-nr2
- file: Filename string or file object to store the result in
Returns:
- resp: server response if successful
- list: list of (nr, value) strings
"""
pat = re.compile('^([0-9]+) ?(.*)\n?')
resp, lines = self._longcmdstring('XHDR {0} {1}'.format(hdr, str), file)
def remove_number(line):
m = pat.match(line)
return m.group(1, 2) if m else line
return resp, [remove_number(line) for line in lines]
def xover(self, start, end, *, file=None):
"""Process an XOVER command (optional server extension) Arguments:
- start: start of range
- end: end of range
- file: Filename string or file object to store the result in
Returns:
- resp: server response if successful
- list: list of dicts containing the response fields
"""
resp, lines = self._longcmdstring('XOVER {0}-{1}'.format(start, end),
file)
fmt = self._getoverviewfmt()
return resp, _parse_overview(lines, fmt)
def over(self, message_spec, *, file=None):
"""Process an OVER command. If the command isn't supported, fall
back to XOVER. Arguments:
- message_spec:
- either a message id, indicating the article to fetch
information about
- or a (start, end) tuple, indicating a range of article numbers;
if end is None, information up to the newest message will be
retrieved
- or None, indicating the current article number must be used
- file: Filename string or file object to store the result in
Returns:
- resp: server response if successful
- list: list of dicts containing the response fields
NOTE: the "message id" form isn't supported by XOVER
"""
cmd = 'OVER' if 'OVER' in self._caps else 'XOVER'
if isinstance(message_spec, (tuple, list)):
start, end = message_spec
cmd += ' {0}-{1}'.format(start, end or '')
elif message_spec is not None:
cmd = cmd + ' ' + message_spec
resp, lines = self._longcmdstring(cmd, file)
fmt = self._getoverviewfmt()
return resp, _parse_overview(lines, fmt)
def xgtitle(self, group, *, file=None):
"""Process an XGTITLE command (optional server extension) Arguments:
- group: group name wildcard (i.e. news.*)
Returns:
- resp: server response if successful
- list: list of (name,title) strings"""
warnings.warn("The XGTITLE extension is not actively used, "
"use descriptions() instead",
DeprecationWarning, 2)
line_pat = re.compile('^([^ \t]+)[ \t]+(.*)$')
resp, raw_lines = self._longcmdstring('XGTITLE ' + group, file)
lines = []
for raw_line in raw_lines:
match = line_pat.search(raw_line.strip())
if match:
lines.append(match.group(1, 2))
return resp, lines
def xpath(self, id):
"""Process an XPATH command (optional server extension) Arguments:
- id: Message id of article
Returns:
resp: server response if successful
path: directory path to article
"""
warnings.warn("The XPATH extension is not actively used",
DeprecationWarning, 2)
resp = self._shortcmd('XPATH {0}'.format(id))
if not resp.startswith('223'):
raise NNTPReplyError(resp)
try:
[resp_num, path] = resp.split()
except ValueError:
raise NNTPReplyError(resp)
else:
return resp, path
def date(self):
"""Process the DATE command.
Returns:
- resp: server response if successful
- date: datetime object
"""
resp = self._shortcmd("DATE")
if not resp.startswith('111'):
raise NNTPReplyError(resp)
elem = resp.split()
if len(elem) != 2:
raise NNTPDataError(resp)
date = elem[1]
if len(date) != 14:
raise NNTPDataError(resp)
return resp, _parse_datetime(date, None)
def _post(self, command, f):
resp = self._shortcmd(command)
# Raises a specific exception if posting is not allowed
if not resp.startswith('3'):
raise NNTPReplyError(resp)
if isinstance(f, (bytes, bytearray)):
f = f.splitlines()
# We don't use _putline() because:
# - we don't want additional CRLF if the file or iterable is already
# in the right format
# - we don't want a spurious flush() after each line is written
for line in f:
if not line.endswith(_CRLF):
line = line.rstrip(b"\r\n") + _CRLF
if line.startswith(b'.'):
line = b'.' + line
self.file.write(line)
self.file.write(b".\r\n")
self.file.flush()
return self._getresp()
def post(self, data):
"""Process a POST command. Arguments:
- data: bytes object, iterable or file containing the article
Returns:
- resp: server response if successful"""
return self._post('POST', data)
def ihave(self, message_id, data):
"""Process an IHAVE command. Arguments:
- message_id: message-id of the article
- data: file containing the article
Returns:
- resp: server response if successful
Note that if the server refuses the article an exception is raised."""
return self._post('IHAVE {0}'.format(message_id), data)
def _close(self):
self.file.close()
del self.file
def quit(self):
"""Process a QUIT command and close the socket. Returns:
- resp: server response if successful"""
try:
resp = self._shortcmd('QUIT')
finally:
self._close()
return resp
def login(self, user=None, password=None, usenetrc=True):
if self.authenticated:
raise ValueError("Already logged in.")
if not user and not usenetrc:
raise ValueError(
"At least one of `user` and `usenetrc` must be specified")
# If no login/password was specified but netrc was requested,
# try to get them from ~/.netrc
# Presume that if .netrc has an entry, NNRP authentication is required.
try:
if usenetrc and not user:
import netrc
credentials = netrc.netrc()
auth = credentials.authenticators(self.host)
if auth:
user = auth[0]
password = auth[2]
except OSError:
pass
# Perform NNTP authentication if needed.
if not user:
return
resp = self._shortcmd('authinfo user ' + user)
if resp.startswith('381'):
if not password:
raise NNTPReplyError(resp)
else:
resp = self._shortcmd('authinfo pass ' + password)
if not resp.startswith('281'):
raise NNTPPermanentError(resp)
# Capabilities might have changed after login
self._caps = None
self.getcapabilities()
# Attempt to send mode reader if it was requested after login.
# Only do so if we're not in reader mode already.
if self.readermode_afterauth and 'READER' not in self._caps:
self._setreadermode()
# Capabilities might have changed after MODE READER
self._caps = None
self.getcapabilities()
def _setreadermode(self):
try:
self.welcome = self._shortcmd('mode reader')
except NNTPPermanentError:
# Error 5xx, probably 'not implemented'
pass
except NNTPTemporaryError as e:
if e.response.startswith('480'):
# Need authorization before 'mode reader'
self.readermode_afterauth = True
else:
raise
if _have_ssl:
def starttls(self, context=None):
"""Process a STARTTLS command. Arguments:
- context: SSL context to use for the encrypted connection
"""
# Per RFC 4642, STARTTLS MUST NOT be sent after authentication or if
# a TLS session already exists.
if self.tls_on:
raise ValueError("TLS is already enabled.")
if self.authenticated:
raise ValueError("TLS cannot be started after authentication.")
resp = self._shortcmd('STARTTLS')
if resp.startswith('382'):
self.file.close()
self.sock = _encrypt_on(self.sock, context, self.host)
self.file = self.sock.makefile("rwb")
self.tls_on = True
# Capabilities may change after TLS starts up, so ask for them
# again.
self._caps = None
self.getcapabilities()
else:
raise NNTPError("TLS failed to start.")
class NNTP(_NNTPBase):
def __init__(self, host, port=NNTP_PORT, user=None, password=None,
readermode=None, usenetrc=False,
timeout=_GLOBAL_DEFAULT_TIMEOUT):
"""Initialize an instance. Arguments:
- host: hostname to connect to
- port: port to connect to (default the standard NNTP port)
- user: username to authenticate with
- password: password to use with username
- readermode: if true, send 'mode reader' command after
connecting.
- usenetrc: allow loading username and password from ~/.netrc file
if not specified explicitly
- timeout: timeout (in seconds) used for socket connections
readermode is sometimes necessary if you are connecting to an
NNTP server on the local machine and intend to call
reader-specific commands, such as `group'. If you get
unexpected NNTPPermanentErrors, you might need to set
readermode.
"""
self.host = host
self.port = port
self.sock = socket.create_connection((host, port), timeout)
file = None
try:
file = self.sock.makefile("rwb")
_NNTPBase.__init__(self, file, host,
readermode, timeout)
if user or usenetrc:
self.login(user, password, usenetrc)
except:
if file:
file.close()
self.sock.close()
raise
def _close(self):
try:
_NNTPBase._close(self)
finally:
self.sock.close()
if _have_ssl:
class NNTP_SSL(_NNTPBase):
def __init__(self, host, port=NNTP_SSL_PORT,
user=None, password=None, ssl_context=None,
readermode=None, usenetrc=False,
timeout=_GLOBAL_DEFAULT_TIMEOUT):
"""This works identically to NNTP.__init__, except for the change
in default port and the `ssl_context` argument for SSL connections.
"""
self.sock = socket.create_connection((host, port), timeout)
file = None
try:
self.sock = _encrypt_on(self.sock, ssl_context, host)
file = self.sock.makefile("rwb")
_NNTPBase.__init__(self, file, host,
readermode=readermode, timeout=timeout)
if user or usenetrc:
self.login(user, password, usenetrc)
except:
if file:
file.close()
self.sock.close()
raise
def _close(self):
try:
_NNTPBase._close(self)
finally:
self.sock.close()
__all__.append("NNTP_SSL")
# Test retrieval when run as a script.
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description="""\
nntplib built-in demo - display the latest articles in a newsgroup""")
parser.add_argument('-g', '--group', default='gmane.comp.python.general',
help='group to fetch messages from (default: %(default)s)')
parser.add_argument('-s', '--server', default='news.gmane.org',
help='NNTP server hostname (default: %(default)s)')
parser.add_argument('-p', '--port', default=-1, type=int,
help='NNTP port number (default: %s / %s)' % (NNTP_PORT, NNTP_SSL_PORT))
parser.add_argument('-n', '--nb-articles', default=10, type=int,
help='number of articles to fetch (default: %(default)s)')
parser.add_argument('-S', '--ssl', action='store_true', default=False,
help='use NNTP over SSL')
args = parser.parse_args()
port = args.port
if not args.ssl:
if port == -1:
port = NNTP_PORT
s = NNTP(host=args.server, port=port)
else:
if port == -1:
port = NNTP_SSL_PORT
s = NNTP_SSL(host=args.server, port=port)
caps = s.getcapabilities()
if 'STARTTLS' in caps:
s.starttls()
resp, count, first, last, name = s.group(args.group)
print('Group', name, 'has', count, 'articles, range', first, 'to', last)
def cut(s, lim):
if len(s) > lim:
s = s[:lim - 4] + "..."
return s
first = str(int(last) - args.nb_articles + 1)
resp, overviews = s.xover(first, last)
for artnum, over in overviews:
author = decode_header(over['from']).split('<', 1)[0]
subject = decode_header(over['subject'])
lines = int(over[':lines'])
print("{:7} {:20} {:42} ({})".format(
artnum, cut(author, 20), cut(subject, 42), lines)
)
s.quit()
| MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-3.5.0/Lib/nntplib.py | Python | mit | 43,081 |
#!/usr/bin/env python3
# Copyright (c) 2020-2021 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import argparse
import io
import requests
import subprocess
import sys
DEFAULT_GLOBAL_FAUCET = 'https://signetfaucet.com/claim'
DEFAULT_GLOBAL_CAPTCHA = 'https://signetfaucet.com/captcha'
GLOBAL_FIRST_BLOCK_HASH = '00000086d6b2636cb2a392d45edc4ec544a10024d30141c9adf4bfd9de533b53'
# braille unicode block
BASE = 0x2800
BIT_PER_PIXEL = [
[0x01, 0x08],
[0x02, 0x10],
[0x04, 0x20],
[0x40, 0x80],
]
BW = 2
BH = 4
# imagemagick or compatible fork (used for converting SVG)
CONVERT = 'convert'
class PPMImage:
'''
Load a PPM image (Pillow-ish API).
'''
def __init__(self, f):
if f.readline() != b'P6\n':
raise ValueError('Invalid ppm format: header')
line = f.readline()
(width, height) = (int(x) for x in line.rstrip().split(b' '))
if f.readline() != b'255\n':
raise ValueError('Invalid ppm format: color depth')
data = f.read(width * height * 3)
stride = width * 3
self.size = (width, height)
self._grid = [[tuple(data[stride * y + 3 * x:stride * y + 3 * (x + 1)]) for x in range(width)] for y in range(height)]
def getpixel(self, pos):
return self._grid[pos[1]][pos[0]]
def print_image(img, threshold=128):
'''Print black-and-white image to terminal in braille unicode characters.'''
x_blocks = (img.size[0] + BW - 1) // BW
y_blocks = (img.size[1] + BH - 1) // BH
for yb in range(y_blocks):
line = []
for xb in range(x_blocks):
ch = BASE
for y in range(BH):
for x in range(BW):
try:
val = img.getpixel((xb * BW + x, yb * BH + y))
except IndexError:
pass
else:
if val[0] < threshold:
ch |= BIT_PER_PIXEL[y][x]
line.append(chr(ch))
print(''.join(line))
parser = argparse.ArgumentParser(description='Script to get coins from a faucet.', epilog='You may need to start with double-dash (--) when providing bitcoin-cli arguments.')
parser.add_argument('-c', '--cmd', dest='cmd', default='bitcoin-cli', help='bitcoin-cli command to use')
parser.add_argument('-f', '--faucet', dest='faucet', default=DEFAULT_GLOBAL_FAUCET, help='URL of the faucet')
parser.add_argument('-g', '--captcha', dest='captcha', default=DEFAULT_GLOBAL_CAPTCHA, help='URL of the faucet captcha, or empty if no captcha is needed')
parser.add_argument('-a', '--addr', dest='addr', default='', help='Bitcoin address to which the faucet should send')
parser.add_argument('-p', '--password', dest='password', default='', help='Faucet password, if any')
parser.add_argument('-n', '--amount', dest='amount', default='0.001', help='Amount to request (0.001-0.1, default is 0.001)')
parser.add_argument('-i', '--imagemagick', dest='imagemagick', default=CONVERT, help='Path to imagemagick convert utility')
parser.add_argument('bitcoin_cli_args', nargs='*', help='Arguments to pass on to bitcoin-cli (default: -signet)')
args = parser.parse_args()
if args.bitcoin_cli_args == []:
args.bitcoin_cli_args = ['-signet']
def bitcoin_cli(rpc_command_and_params):
argv = [args.cmd] + args.bitcoin_cli_args + rpc_command_and_params
try:
return subprocess.check_output(argv).strip().decode()
except FileNotFoundError:
print('The binary', args.cmd, 'could not be found.')
exit(1)
except subprocess.CalledProcessError:
cmdline = ' '.join(argv)
print(f'-----\nError while calling "{cmdline}" (see output above).')
exit(1)
if args.faucet.lower() == DEFAULT_GLOBAL_FAUCET:
# Get the hash of the block at height 1 of the currently active signet chain
curr_signet_hash = bitcoin_cli(['getblockhash', '1'])
if curr_signet_hash != GLOBAL_FIRST_BLOCK_HASH:
print('The global faucet cannot be used with a custom Signet network. Please use the global signet or setup your custom faucet to use this functionality.\n')
exit(1)
else:
# For custom faucets, don't request captcha by default.
if args.captcha == DEFAULT_GLOBAL_CAPTCHA:
args.captcha = ''
if args.addr == '':
# get address for receiving coins
args.addr = bitcoin_cli(['getnewaddress', 'faucet', 'bech32'])
data = {'address': args.addr, 'password': args.password, 'amount': args.amount}
# Store cookies
# for debugging: print(session.cookies.get_dict())
session = requests.Session()
if args.captcha != '': # Retrieve a captcha
try:
res = session.get(args.captcha)
except:
print('Unexpected error when contacting faucet:', sys.exc_info()[0])
exit(1)
# Convert SVG image to PPM, and load it
try:
rv = subprocess.run([args.imagemagick, '-', '-depth', '8', 'ppm:-'], input=res.content, check=True, capture_output=True)
except FileNotFoundError:
print('The binary', args.imagemagick, 'could not be found. Please make sure ImageMagick (or a compatible fork) is installed and that the correct path is specified.')
exit(1)
img = PPMImage(io.BytesIO(rv.stdout))
# Terminal interaction
print_image(img)
print('Enter captcha: ', end='')
data['captcha'] = input()
try:
res = session.post(args.faucet, data=data)
except:
print('Unexpected error when contacting faucet:', sys.exc_info()[0])
exit(1)
# Display the output as per the returned status code
if res:
# When the return code is in between 200 and 400 i.e. successful
print(res.text)
elif res.status_code == 404:
print('The specified faucet URL does not exist. Please check for any server issues/typo.')
elif res.status_code == 429:
print('The script does not allow for repeated transactions as the global faucet is rate-limitied to 1 request/IP/day. You can access the faucet website to get more coins manually')
else:
print(f'Returned Error Code {res.status_code}\n{res.text}\n')
print('Please check the provided arguments for their validity and/or any possible typo.')
| jamesob/bitcoin | contrib/signet/getcoins.py | Python | mit | 6,271 |
#
# DBus interface for the interactive partitioning module
#
# Copyright (C) 2019 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from dasbus.server.interface import dbus_interface
from pyanaconda.modules.common.constants.objects import INTERACTIVE_PARTITIONING
from pyanaconda.modules.storage.partitioning.base_interface import PartitioningInterface
@dbus_interface(INTERACTIVE_PARTITIONING.interface_name)
class InteractivePartitioningInterface(PartitioningInterface):
"""DBus interface for the interactive partitioning module."""
| jkonecny12/anaconda | pyanaconda/modules/storage/partitioning/interactive/interactive_interface.py | Python | gpl-2.0 | 1,424 |
from fail2ban.server.action import ActionBase
class TestAction(ActionBase):
def __init__(self, jail, name):
super(TestAction, self).__init__(jail, name)
def start(self):
raise Exception()
def stop(self):
raise Exception()
def ban(self):
raise Exception()
def unban(self):
raise Exception()
Action = TestAction
| jakesyl/fail2ban | fail2ban/tests/files/action.d/action_errors.py | Python | gpl-2.0 | 378 |
### Copyright (C) 2005 Thomas M. Hinkle
### Copyright (C) 2009 Rolf Leggewie
###
### This library is free software; you can redistribute it and/or
### modify it under the terms of the GNU General Public License as
### published by the Free Software Foundation; either version 2 of the
### License, or (at your option) any later version.
###
### This library is distributed in the hope that it will be useful,
### but WITHOUT ANY WARRANTY; without even the implied warranty of
### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
### General Public License for more details.
###
### You should have received a copy of the GNU General Public License
### along with this library; if not, write to the Free Software
### Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
### USA
import gtk
from gettext import gettext as _
import gourmet.convert as convert
import validatingEntry
TIME_TO_READ = 1000
class TimeEntry (validatingEntry.ValidatingEntry):
__gtype_name__ = 'TimeEntry'
def __init__ (self, conv=None):
if not conv: self.conv = convert.get_converter()
else: self.conv = conv
validatingEntry.ValidatingEntry.__init__(self)
self.entry.get_value = self.get_value
self.entry.set_value = self.set_value
def find_errors_in_progress (self, txt):
if (not txt) or self.conv.timestring_to_seconds(txt):
return None
elif not convert.NUMBER_MATCHER.match(txt.split()[0]):
return _('Time must begin with a number or fraction followed by a unit (minutes, hours, etc.).')
else:
words = txt.split()
#if len(words) == 1:
# self._hide_warning_slowly()
# return
if convert.NUMBER_MATCHER.match(words[-1]):
return None
else:
partial_unit = words[-1]
for u in self.conv.unit_to_seconds.keys():
if u.lower().find(partial_unit.lower())==0:
return None
#self._hide_warning_slowly()
#return
return _('Invalid input.') + \
_('Time must be expressed in hours, minutes, seconds, etc.')
self._show_warning()
#else:
# self.set_warning_text("Invalid or incomplete time")
# self._show_warning()
def find_completed_errors (self,*args):
txt = self.entry.get_text()
if txt and not self.conv.timestring_to_seconds(txt):
return _('Invalid input.') + \
_('Time must be expressed in hours, minutes, seconds, etc.')
words = txt.split()
if len(words) == 1:
self._hide_warning_slowly()
return
elif convert.NUMBER_MATCHER.match(words[-1]):
return
else:
partial_unit = words[-1]
for u in self.conv.unit_to_seconds.keys():
if u.lower().find(partial_unit.lower())==0:
self._hide_warning_slowly()
return
self.valid = False
self.warn = True
self.set_warning_text('Invalid input.' + 'Time must be expressed in hours, minutes, seconds, etc.')
self._show_warning()
def set_value (self,seconds):
self.entry.set_text(
convert.seconds_to_timestring(seconds,
fractions=convert.FRACTIONS_ASCII)
)
def get_value (self):
return self.conv.timestring_to_seconds(self.entry.get_text())
def make_time_entry():
te=TimeEntry()
te.show()
return te
if __name__ == '__main__':
w=gtk.Window()
vb = gtk.VBox()
hb = gtk.HBox()
l=gtk.Label('_Label')
l.set_use_underline(True)
l.set_alignment(0,0.5)
hb.pack_start(l)
te=TimeEntry()
import sys
te.connect('changed',lambda w: sys.stderr.write('Time value: %s'%w.get_value()))
l.set_mnemonic_widget(te)
hb.pack_start(te,expand=False,fill=False)
vb.add(hb)
qb = gtk.Button(stock=gtk.STOCK_QUIT)
vb.add(qb)
l.show()
hb.show()
qb.show()
te.show()
vb.show()
qb.connect('clicked',lambda *args: w.hide() and gtk.main_quit() or gtk.main_quit())
w.add(vb)
w.show()
w.connect('delete_event',gtk.main_quit)
gtk.main()
| Lamecarlate/gourmet | gourmet/gtk_extras/timeEntry.py | Python | gpl-2.0 | 4,398 |
#!/usr/bin/python
import re
source_files = {}
comment_re = re.compile(r'\s+#\s+T:\s+(.+)\s*$')
def get_file(file):
if not file in source_files:
#~ print 'Extracting comments from', file
source_files[file] = open(file).readlines()
source_files[file].append('')
return source_files[file]
def extract_comment(file, line):
lines = get_file(file)
line -= 1 # list is 0 based
match = comment_re.search(lines[line])
if match:
# comment on same line
return match.group(1)
else:
# search next line(s) for a comment
i = line+1
while i < len(lines):
if '_(' in lines[i] or 'gettext(' in lines[i]:
break
else:
match = comment_re.search(lines[i])
if match:
return match.group(1)
i += 1
return None
def extract_comments(sources):
sources = [s.split(':') for s in sources]
comments = []
for file, line in sources:
comment = extract_comment(file, int(line))
if comment and comment not in comments:
comments.append(comment)
if comments:
return ' | \n'.join(['#. '+c for c in comments])+'\n'
else:
print 'No translator comment for:'
for file, line in sources:
print '\t%s line %s' % (file, line)
return ''
def add_comments(file):
messages = open(file).readlines()
fh = open(file, 'w')
while messages:
line = messages.pop(0)
if line.startswith('#: '):
lines = [line]
sources = line[3:].strip().split()
while messages[0].startswith('#: '):
line = messages.pop(0)
lines.append(line)
sources += line[3:].strip().split()
fh.write(extract_comments(sources))
fh.writelines(lines)
elif line.startswith('#. '):
pass
else:
fh.write(line)
if __name__ == '__main__':
add_comments('translations/zim.pot')
| tmhorne/simplewiki | tools/extract_translator_comments.py | Python | gpl-2.0 | 1,702 |
from pele.concurrent import BasinhoppingWorker
from start_server import create_system, get_server_uri
def main():
system = create_system()
uri = get_server_uri()
worker = BasinhoppingWorker(uri, system=system)
worker.run(1000)
if __name__ == "__main__":
main()
| smcantab/pele | examples/parallel_pele/start_basinhopping_worker.py | Python | gpl-3.0 | 287 |
def propagate(la): # la: [list(int)]
print la, la # [str], [str]
propagate([1]) # []
propagate([2]) # []
| shedskin/shedskin | tests/28.py | Python | gpl-3.0 | 210 |
#!/usr/bin/env python
#
# Copyright 2011,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
from gnuradio import gr, gr_unittest, vocoder, blocks
class test_g723_24_vocoder (gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test001_module_load(self):
data = (0, 16, 36, 40, 68, 104, 148, 220, 320, 512)
src = blocks.vector_source_s(data)
enc = vocoder.g723_24_encode_sb()
dec = vocoder.g723_24_decode_bs()
snk = blocks.vector_sink_s()
self.tb.connect(src, enc, dec, snk)
self.tb.run()
actual_result = snk.data()
self.assertEqual(list(data), actual_result)
if __name__ == '__main__':
gr_unittest.run(test_g723_24_vocoder)
| mrjacobagilbert/gnuradio | gr-vocoder/python/vocoder/qa_g723_24_vocoder.py | Python | gpl-3.0 | 858 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import print_function, unicode_literals
import frappe, random, erpnext
from frappe.desk import query_report
from erpnext.stock.stock_ledger import NegativeStockError
from erpnext.stock.doctype.serial_no.serial_no import SerialNoRequiredError, SerialNoQtyError
from erpnext.stock.doctype.batch.batch import UnableToSelectBatchError
from erpnext.stock.doctype.delivery_note.delivery_note import make_sales_return
from erpnext.stock.doctype.purchase_receipt.purchase_receipt import make_purchase_return
def work():
frappe.set_user(frappe.db.get_global('demo_manufacturing_user'))
make_purchase_receipt()
make_delivery_note()
make_stock_reconciliation()
submit_draft_stock_entries()
make_sales_return_records()
make_purchase_return_records()
def make_purchase_receipt():
if random.random() < 0.6:
from erpnext.buying.doctype.purchase_order.purchase_order import make_purchase_receipt
report = "Purchase Order Items To Be Received"
po_list =list(set([r[0] for r in query_report.run(report)["result"] if r[0]!="'Total'"]))[:random.randint(1, 10)]
for po in po_list:
pr = frappe.get_doc(make_purchase_receipt(po))
if pr.is_subcontracted=="Yes":
pr.supplier_warehouse = "Supplier - WPL"
pr.posting_date = frappe.flags.current_date
pr.insert()
try:
pr.submit()
except NegativeStockError:
print('Negative stock for {0}'.format(po))
pass
frappe.db.commit()
def make_delivery_note():
# make purchase requests
# make delivery notes (if possible)
if random.random() < 0.6:
from erpnext.selling.doctype.sales_order.sales_order import make_delivery_note
report = "Ordered Items To Be Delivered"
for so in list(set([r[0] for r in query_report.run(report)["result"]
if r[0]!="'Total'"]))[:random.randint(1, 3)]:
dn = frappe.get_doc(make_delivery_note(so))
dn.posting_date = frappe.flags.current_date
for d in dn.get("items"):
if not d.expense_account:
d.expense_account = ("Cost of Goods Sold - {0}".format(
frappe.get_cached_value('Company', dn.company, 'abbr')))
try:
dn.insert()
dn.submit()
frappe.db.commit()
except (NegativeStockError, SerialNoRequiredError, SerialNoQtyError, UnableToSelectBatchError):
frappe.db.rollback()
def make_stock_reconciliation():
# random set some items as damaged
from erpnext.stock.doctype.stock_reconciliation.stock_reconciliation \
import OpeningEntryAccountError, EmptyStockReconciliationItemsError
if random.random() < 0.4:
stock_reco = frappe.new_doc("Stock Reconciliation")
stock_reco.posting_date = frappe.flags.current_date
stock_reco.company = erpnext.get_default_company()
stock_reco.get_items_for("Stores - WP")
if stock_reco.items:
for item in stock_reco.items:
if item.qty:
item.qty = item.qty - round(random.randint(1, item.qty))
try:
stock_reco.insert()
stock_reco.submit()
frappe.db.commit()
except OpeningEntryAccountError:
frappe.db.rollback()
except EmptyStockReconciliationItemsError:
frappe.db.rollback()
def submit_draft_stock_entries():
from erpnext.stock.doctype.stock_entry.stock_entry import IncorrectValuationRateError, \
DuplicateEntryForWorkOrderError, OperationsNotCompleteError
# try posting older drafts (if exists)
frappe.db.commit()
for st in frappe.db.get_values("Stock Entry", {"docstatus":0}, "name"):
try:
ste = frappe.get_doc("Stock Entry", st[0])
ste.posting_date = frappe.flags.current_date
ste.save()
ste.submit()
frappe.db.commit()
except (NegativeStockError, IncorrectValuationRateError, DuplicateEntryForWorkOrderError,
OperationsNotCompleteError):
frappe.db.rollback()
def make_sales_return_records():
if random.random() < 0.1:
for data in frappe.get_all('Delivery Note', fields=["name"], filters={"docstatus": 1}):
if random.random() < 0.1:
try:
dn = make_sales_return(data.name)
dn.insert()
dn.submit()
frappe.db.commit()
except Exception:
frappe.db.rollback()
def make_purchase_return_records():
if random.random() < 0.1:
for data in frappe.get_all('Purchase Receipt', fields=["name"], filters={"docstatus": 1}):
if random.random() < 0.1:
try:
pr = make_purchase_return(data.name)
pr.insert()
pr.submit()
frappe.db.commit()
except Exception:
frappe.db.rollback()
| ovresko/erpnext | erpnext/demo/user/stock.py | Python | gpl-3.0 | 4,466 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (c) 2015 Etherios, Inc. All rights reserved.
# Etherios, Inc. is a Division of Digi International.
| brucetsao/python-devicecloud | devicecloud/test/unit/__init__.py | Python | mpl-2.0 | 312 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Common codegen classes.
from collections import defaultdict
from itertools import groupby
import operator
import os
import re
import string
import textwrap
import functools
from WebIDL import (
BuiltinTypes,
IDLBuiltinType,
IDLNullValue,
IDLNullableType,
IDLObject,
IDLType,
IDLInterfaceMember,
IDLUndefinedValue,
IDLWrapperType,
)
from Configuration import (
MakeNativeName,
MemberIsUnforgeable,
getModuleFromObject,
getTypesFromCallback,
getTypesFromDescriptor,
getTypesFromDictionary,
iteratorNativeType
)
AUTOGENERATED_WARNING_COMMENT = \
"/* THIS FILE IS AUTOGENERATED - DO NOT EDIT */\n\n"
FINALIZE_HOOK_NAME = '_finalize'
TRACE_HOOK_NAME = '_trace'
CONSTRUCT_HOOK_NAME = '_constructor'
HASINSTANCE_HOOK_NAME = '_hasInstance'
RUST_KEYWORDS = {"abstract", "alignof", "as", "become", "box", "break", "const", "continue",
"else", "enum", "extern", "false", "final", "fn", "for", "if", "impl", "in",
"let", "loop", "macro", "match", "mod", "move", "mut", "offsetof", "override",
"priv", "proc", "pub", "pure", "ref", "return", "static", "self", "sizeof",
"struct", "super", "true", "trait", "type", "typeof", "unsafe", "unsized",
"use", "virtual", "where", "while", "yield"}
def replaceFileIfChanged(filename, newContents):
"""
Read a copy of the old file, so that we don't touch it if it hasn't changed.
Returns True if the file was updated, false otherwise.
"""
# XXXjdm This doesn't play well with make right now.
# Force the file to always be updated, or else changing CodegenRust.py
# will cause many autogenerated bindings to be regenerated perpetually
# until the result is actually different.
# oldFileContents = ""
# try:
# with open(filename, 'rb') as oldFile:
# oldFileContents = ''.join(oldFile.readlines())
# except:
# pass
# if newContents == oldFileContents:
# return False
with open(filename, 'wb') as f:
f.write(newContents)
return True
def toStringBool(arg):
return str(not not arg).lower()
def toBindingNamespace(arg):
return re.sub("((_workers)?$)", "Binding\\1", MakeNativeName(arg))
def stripTrailingWhitespace(text):
tail = '\n' if text.endswith('\n') else ''
lines = text.splitlines()
for i in range(len(lines)):
lines[i] = lines[i].rstrip()
return '\n'.join(lines) + tail
def innerContainerType(type):
assert type.isSequence() or type.isMozMap()
return type.inner.inner if type.nullable() else type.inner
def wrapInNativeContainerType(type, inner):
if type.isSequence():
containerType = "Vec"
elif type.isMozMap():
containerType = "MozMap"
else:
raise TypeError("Unexpected container type %s", type)
return CGWrapper(inner, pre=containerType + "<", post=">")
builtinNames = {
IDLType.Tags.bool: 'bool',
IDLType.Tags.int8: 'i8',
IDLType.Tags.int16: 'i16',
IDLType.Tags.int32: 'i32',
IDLType.Tags.int64: 'i64',
IDLType.Tags.uint8: 'u8',
IDLType.Tags.uint16: 'u16',
IDLType.Tags.uint32: 'u32',
IDLType.Tags.uint64: 'u64',
IDLType.Tags.unrestricted_float: 'f32',
IDLType.Tags.float: 'Finite<f32>',
IDLType.Tags.unrestricted_double: 'f64',
IDLType.Tags.double: 'Finite<f64>'
}
numericTags = [
IDLType.Tags.int8, IDLType.Tags.uint8,
IDLType.Tags.int16, IDLType.Tags.uint16,
IDLType.Tags.int32, IDLType.Tags.uint32,
IDLType.Tags.int64, IDLType.Tags.uint64,
IDLType.Tags.unrestricted_float,
IDLType.Tags.unrestricted_double
]
# We'll want to insert the indent at the beginnings of lines, but we
# don't want to indent empty lines. So only indent lines that have a
# non-newline character on them.
lineStartDetector = re.compile("^(?=[^\n#])", re.MULTILINE)
def indent(s, indentLevel=2):
"""
Indent C++ code.
Weird secret feature: this doesn't indent lines that start with # (such as
#include lines or #ifdef/#endif).
"""
if s == "":
return s
return re.sub(lineStartDetector, indentLevel * " ", s)
# dedent() and fill() are often called on the same string multiple
# times. We want to memoize their return values so we don't keep
# recomputing them all the time.
def memoize(fn):
"""
Decorator to memoize a function of one argument. The cache just
grows without bound.
"""
cache = {}
@functools.wraps(fn)
def wrapper(arg):
retval = cache.get(arg)
if retval is None:
retval = cache[arg] = fn(arg)
return retval
return wrapper
@memoize
def dedent(s):
"""
Remove all leading whitespace from s, and remove a blank line
at the beginning.
"""
if s.startswith('\n'):
s = s[1:]
return textwrap.dedent(s)
# This works by transforming the fill()-template to an equivalent
# string.Template.
fill_multiline_substitution_re = re.compile(r"( *)\$\*{(\w+)}(\n)?")
@memoize
def compile_fill_template(template):
"""
Helper function for fill(). Given the template string passed to fill(),
do the reusable part of template processing and return a pair (t,
argModList) that can be used every time fill() is called with that
template argument.
argsModList is list of tuples that represent modifications to be
made to args. Each modification has, in order: i) the arg name,
ii) the modified name, iii) the indent depth.
"""
t = dedent(template)
assert t.endswith("\n") or "\n" not in t
argModList = []
def replace(match):
"""
Replaces a line like ' $*{xyz}\n' with '${xyz_n}',
where n is the indent depth, and add a corresponding entry to
argModList.
Note that this needs to close over argModList, so it has to be
defined inside compile_fill_template().
"""
indentation, name, nl = match.groups()
depth = len(indentation)
# Check that $*{xyz} appears by itself on a line.
prev = match.string[:match.start()]
if (prev and not prev.endswith("\n")) or nl is None:
raise ValueError("Invalid fill() template: $*{%s} must appear by itself on a line" % name)
# Now replace this whole line of template with the indented equivalent.
modified_name = name + "_" + str(depth)
argModList.append((name, modified_name, depth))
return "${" + modified_name + "}"
t = re.sub(fill_multiline_substitution_re, replace, t)
return (string.Template(t), argModList)
def fill(template, **args):
"""
Convenience function for filling in a multiline template.
`fill(template, name1=v1, name2=v2)` is a lot like
`string.Template(template).substitute({"name1": v1, "name2": v2})`.
However, it's shorter, and has a few nice features:
* If `template` is indented, fill() automatically dedents it!
This makes code using fill() with Python's multiline strings
much nicer to look at.
* If `template` starts with a blank line, fill() strips it off.
(Again, convenient with multiline strings.)
* fill() recognizes a special kind of substitution
of the form `$*{name}`.
Use this to paste in, and automatically indent, multiple lines.
(Mnemonic: The `*` is for "multiple lines").
A `$*` substitution must appear by itself on a line, with optional
preceding indentation (spaces only). The whole line is replaced by the
corresponding keyword argument, indented appropriately. If the
argument is an empty string, no output is generated, not even a blank
line.
"""
t, argModList = compile_fill_template(template)
# Now apply argModList to args
for (name, modified_name, depth) in argModList:
if not (args[name] == "" or args[name].endswith("\n")):
raise ValueError("Argument %s with value %r is missing a newline" % (name, args[name]))
args[modified_name] = indent(args[name], depth)
return t.substitute(args)
class CGThing():
"""
Abstract base class for things that spit out code.
"""
def __init__(self):
pass # Nothing for now
def define(self):
"""Produce code for a Rust file."""
raise NotImplementedError # Override me!
class CGMethodCall(CGThing):
"""
A class to generate selection of a method signature from a set of
signatures and generation of a call to that signature.
"""
def __init__(self, argsPre, nativeMethodName, static, descriptor, method):
CGThing.__init__(self)
methodName = '\\"%s.%s\\"' % (descriptor.interface.identifier.name, method.identifier.name)
def requiredArgCount(signature):
arguments = signature[1]
if len(arguments) == 0:
return 0
requiredArgs = len(arguments)
while requiredArgs and arguments[requiredArgs - 1].optional:
requiredArgs -= 1
return requiredArgs
signatures = method.signatures()
def getPerSignatureCall(signature, argConversionStartsAt=0):
signatureIndex = signatures.index(signature)
return CGPerSignatureCall(signature[0], argsPre, signature[1],
nativeMethodName + '_' * signatureIndex,
static, descriptor,
method, argConversionStartsAt)
if len(signatures) == 1:
# Special case: we can just do a per-signature method call
# here for our one signature and not worry about switching
# on anything.
signature = signatures[0]
self.cgRoot = CGList([getPerSignatureCall(signature)])
requiredArgs = requiredArgCount(signature)
if requiredArgs > 0:
code = (
"if argc < %d {\n"
" throw_type_error(cx, \"Not enough arguments to %s.\");\n"
" return false;\n"
"}" % (requiredArgs, methodName))
self.cgRoot.prepend(
CGWrapper(CGGeneric(code), pre="\n", post="\n"))
return
# Need to find the right overload
maxArgCount = method.maxArgCount
allowedArgCounts = method.allowedArgCounts
argCountCases = []
for argCount in allowedArgCounts:
possibleSignatures = method.signaturesForArgCount(argCount)
if len(possibleSignatures) == 1:
# easy case!
signature = possibleSignatures[0]
argCountCases.append(CGCase(str(argCount), getPerSignatureCall(signature)))
continue
distinguishingIndex = method.distinguishingIndexForArgCount(argCount)
# We can't handle unions at the distinguishing index.
for (returnType, args) in possibleSignatures:
if args[distinguishingIndex].type.isUnion():
raise TypeError("No support for unions as distinguishing "
"arguments yet: %s",
args[distinguishingIndex].location)
# Convert all our arguments up to the distinguishing index.
# Doesn't matter which of the possible signatures we use, since
# they all have the same types up to that point; just use
# possibleSignatures[0]
caseBody = [
CGArgumentConverter(possibleSignatures[0][1][i],
i, "args", "argc", descriptor)
for i in range(0, distinguishingIndex)]
# Select the right overload from our set.
distinguishingArg = "args.get(%d)" % distinguishingIndex
def pickFirstSignature(condition, filterLambda):
sigs = filter(filterLambda, possibleSignatures)
assert len(sigs) < 2
if len(sigs) > 0:
call = getPerSignatureCall(sigs[0], distinguishingIndex)
if condition is None:
caseBody.append(call)
else:
caseBody.append(CGGeneric("if " + condition + " {"))
caseBody.append(CGIndenter(call))
caseBody.append(CGGeneric("}"))
return True
return False
# First check for null or undefined
pickFirstSignature("%s.get().is_null_or_undefined()" % distinguishingArg,
lambda s: (s[1][distinguishingIndex].type.nullable() or
s[1][distinguishingIndex].type.isDictionary()))
# Now check for distinguishingArg being an object that implements a
# non-callback interface. That includes typed arrays and
# arraybuffers.
interfacesSigs = [
s for s in possibleSignatures
if (s[1][distinguishingIndex].type.isObject() or
s[1][distinguishingIndex].type.isNonCallbackInterface())]
# There might be more than one of these; we need to check
# which ones we unwrap to.
if len(interfacesSigs) > 0:
# The spec says that we should check for "platform objects
# implementing an interface", but it's enough to guard on these
# being an object. The code for unwrapping non-callback
# interfaces and typed arrays will just bail out and move on to
# the next overload if the object fails to unwrap correctly. We
# could even not do the isObject() check up front here, but in
# cases where we have multiple object overloads it makes sense
# to do it only once instead of for each overload. That will
# also allow the unwrapping test to skip having to do codegen
# for the null-or-undefined case, which we already handled
# above.
caseBody.append(CGGeneric("if %s.get().is_object() {" %
(distinguishingArg)))
for idx, sig in enumerate(interfacesSigs):
caseBody.append(CGIndenter(CGGeneric("loop {")))
type = sig[1][distinguishingIndex].type
# The argument at index distinguishingIndex can't possibly
# be unset here, because we've already checked that argc is
# large enough that we can examine this argument.
info = getJSToNativeConversionInfo(
type, descriptor, failureCode="break;", isDefinitelyObject=True)
template = info.template
declType = info.declType
testCode = instantiateJSToNativeConversionTemplate(
template,
{"val": distinguishingArg},
declType,
"arg%d" % distinguishingIndex)
# Indent by 4, since we need to indent further than our "do" statement
caseBody.append(CGIndenter(testCode, 4))
# If we got this far, we know we unwrapped to the right
# interface, so just do the call. Start conversion with
# distinguishingIndex + 1, since we already converted
# distinguishingIndex.
caseBody.append(CGIndenter(
getPerSignatureCall(sig, distinguishingIndex + 1), 4))
caseBody.append(CGIndenter(CGGeneric("}")))
caseBody.append(CGGeneric("}"))
# XXXbz Now we're supposed to check for distinguishingArg being
# an array or a platform object that supports indexed
# properties... skip that last for now. It's a bit of a pain.
pickFirstSignature("%s.get().is_object() && is_array_like(cx, %s)" %
(distinguishingArg, distinguishingArg),
lambda s:
(s[1][distinguishingIndex].type.isSequence() or
s[1][distinguishingIndex].type.isObject()))
# Check for Date objects
# XXXbz Do we need to worry about security wrappers around the Date?
pickFirstSignature("%s.get().is_object() && "
"{ rooted!(in(cx) let obj = %s.get().to_object()); "
"let mut is_date = false; "
"assert!(JS_ObjectIsDate(cx, obj.handle(), &mut is_date)); "
"is_date }" %
(distinguishingArg, distinguishingArg),
lambda s: (s[1][distinguishingIndex].type.isDate() or
s[1][distinguishingIndex].type.isObject()))
# Check for vanilla JS objects
# XXXbz Do we need to worry about security wrappers?
pickFirstSignature("%s.get().is_object() && !is_platform_object(%s.get().to_object())" %
(distinguishingArg, distinguishingArg),
lambda s: (s[1][distinguishingIndex].type.isCallback() or
s[1][distinguishingIndex].type.isCallbackInterface() or
s[1][distinguishingIndex].type.isDictionary() or
s[1][distinguishingIndex].type.isObject()))
# The remaining cases are mutually exclusive. The
# pickFirstSignature calls are what change caseBody
# Check for strings or enums
if pickFirstSignature(None,
lambda s: (s[1][distinguishingIndex].type.isString() or
s[1][distinguishingIndex].type.isEnum())):
pass
# Check for primitives
elif pickFirstSignature(None,
lambda s: s[1][distinguishingIndex].type.isPrimitive()):
pass
# Check for "any"
elif pickFirstSignature(None,
lambda s: s[1][distinguishingIndex].type.isAny()):
pass
else:
# Just throw; we have no idea what we're supposed to
# do with this.
caseBody.append(CGGeneric("return Throw(cx, NS_ERROR_XPC_BAD_CONVERT_JS);"))
argCountCases.append(CGCase(str(argCount),
CGList(caseBody, "\n")))
overloadCGThings = []
overloadCGThings.append(
CGGeneric("let argcount = cmp::min(argc, %d);" %
maxArgCount))
overloadCGThings.append(
CGSwitch("argcount",
argCountCases,
CGGeneric("throw_type_error(cx, \"Not enough arguments to %s.\");\n"
"return false;" % methodName)))
# XXXjdm Avoid unreachable statement warnings
# overloadCGThings.append(
# CGGeneric('panic!("We have an always-returning default case");\n'
# 'return false;'))
self.cgRoot = CGWrapper(CGList(overloadCGThings, "\n"),
pre="\n")
def define(self):
return self.cgRoot.define()
def dictionaryHasSequenceMember(dictionary):
return (any(typeIsSequenceOrHasSequenceMember(m.type) for m in
dictionary.members) or
(dictionary.parent and
dictionaryHasSequenceMember(dictionary.parent)))
def typeIsSequenceOrHasSequenceMember(type):
if type.nullable():
type = type.inner
if type.isSequence():
return True
if type.isDictionary():
return dictionaryHasSequenceMember(type.inner)
if type.isUnion():
return any(typeIsSequenceOrHasSequenceMember(m.type) for m in
type.flatMemberTypes)
return False
def union_native_type(t):
name = t.unroll().name
return 'UnionTypes::%s' % name
class JSToNativeConversionInfo():
"""
An object representing information about a JS-to-native conversion.
"""
def __init__(self, template, default=None, declType=None):
"""
template: A string representing the conversion code. This will have
template substitution performed on it as follows:
${val} is a handle to the JS::Value in question
default: A string or None representing rust code for default value(if any).
declType: A CGThing representing the native C++ type we're converting
to. This is allowed to be None if the conversion code is
supposed to be used as-is.
"""
assert isinstance(template, str)
assert declType is None or isinstance(declType, CGThing)
self.template = template
self.default = default
self.declType = declType
def getJSToNativeConversionInfo(type, descriptorProvider, failureCode=None,
isDefinitelyObject=False,
isMember=False,
isArgument=False,
invalidEnumValueFatal=True,
defaultValue=None,
treatNullAs="Default",
isEnforceRange=False,
isClamp=False,
exceptionCode=None,
allowTreatNonObjectAsNull=False,
isCallbackReturnValue=False,
sourceDescription="value"):
"""
Get a template for converting a JS value to a native object based on the
given type and descriptor. If failureCode is given, then we're actually
testing whether we can convert the argument to the desired type. That
means that failures to convert due to the JS value being the wrong type of
value need to use failureCode instead of throwing exceptions. Failures to
convert that are due to JS exceptions (from toString or valueOf methods) or
out of memory conditions need to throw exceptions no matter what
failureCode is.
If isDefinitelyObject is True, that means we know the value
isObject() and we have no need to recheck that.
isMember is `False`, "Dictionary", "Union" or "Variadic", and affects
whether this function returns code suitable for an on-stack rooted binding
or suitable for storing in an appropriate larger structure.
invalidEnumValueFatal controls whether an invalid enum value conversion
attempt will throw (if true) or simply return without doing anything (if
false).
If defaultValue is not None, it's the IDL default value for this conversion
If isEnforceRange is true, we're converting an integer and throwing if the
value is out of range.
If isClamp is true, we're converting an integer and clamping if the
value is out of range.
If allowTreatNonObjectAsNull is true, then [TreatNonObjectAsNull]
extended attributes on nullable callback functions will be honored.
The return value from this function is an object of JSToNativeConversionInfo consisting of four things:
1) A string representing the conversion code. This will have template
substitution performed on it as follows:
${val} replaced by an expression for the JS::Value in question
2) A string or None representing Rust code for the default value (if any).
3) A CGThing representing the native C++ type we're converting to
(declType). This is allowed to be None if the conversion code is
supposed to be used as-is.
4) A boolean indicating whether the caller has to root the result.
"""
# We should not have a defaultValue if we know we're an object
assert not isDefinitelyObject or defaultValue is None
# If exceptionCode is not set, we'll just rethrow the exception we got.
# Note that we can't just set failureCode to exceptionCode, because setting
# failureCode will prevent pending exceptions from being set in cases when
# they really should be!
if exceptionCode is None:
exceptionCode = "return false;\n"
if failureCode is None:
failOrPropagate = "throw_type_error(cx, &error);\n%s" % exceptionCode
else:
failOrPropagate = failureCode
def handleOptional(template, declType, default):
assert (defaultValue is None) == (default is None)
return JSToNativeConversionInfo(template, default, declType)
# Unfortunately, .capitalize() on a string will lowercase things inside the
# string, which we do not want.
def firstCap(string):
return string[0].upper() + string[1:]
# Helper functions for dealing with failures due to the JS value being the
# wrong type of value.
def onFailureNotAnObject(failureCode):
return CGWrapper(
CGGeneric(
failureCode or
('throw_type_error(cx, "%s is not an object.");\n'
'%s' % (firstCap(sourceDescription), exceptionCode))),
post="\n")
def onFailureInvalidEnumValue(failureCode, passedVarName):
return CGGeneric(
failureCode or
('throw_type_error(cx, &format!("\'{}\' is not a valid enum value for enumeration \'%s\'.", %s)); %s'
% (type.name, passedVarName, exceptionCode)))
def onFailureNotCallable(failureCode):
return CGGeneric(
failureCode or
('throw_type_error(cx, \"%s is not callable.\");\n'
'%s' % (firstCap(sourceDescription), exceptionCode)))
# A helper function for handling null default values. Checks that the
# default value, if it exists, is null.
def handleDefaultNull(nullValue):
if defaultValue is None:
return None
if not isinstance(defaultValue, IDLNullValue):
raise TypeError("Can't handle non-null default value here")
assert type.nullable() or type.isDictionary()
return nullValue
# A helper function for wrapping up the template body for
# possibly-nullable objecty stuff
def wrapObjectTemplate(templateBody, nullValue, isDefinitelyObject, type,
failureCode=None):
if not isDefinitelyObject:
# Handle the non-object cases by wrapping up the whole
# thing in an if cascade.
templateBody = (
"if ${val}.get().is_object() {\n" +
CGIndenter(CGGeneric(templateBody)).define() + "\n")
if type.nullable():
templateBody += (
"} else if ${val}.get().is_null_or_undefined() {\n"
" %s\n") % nullValue
templateBody += (
"} else {\n" +
CGIndenter(onFailureNotAnObject(failureCode)).define() +
"}")
return templateBody
assert not (isEnforceRange and isClamp) # These are mutually exclusive
if type.isSequence() or type.isMozMap():
innerInfo = getJSToNativeConversionInfo(innerContainerType(type),
descriptorProvider,
isMember=isMember)
declType = wrapInNativeContainerType(type, innerInfo.declType)
config = getConversionConfigForType(type, isEnforceRange, isClamp, treatNullAs)
if type.nullable():
declType = CGWrapper(declType, pre="Option<", post=" >")
templateBody = ("match FromJSValConvertible::from_jsval(cx, ${val}, %s) {\n"
" Ok(ConversionResult::Success(value)) => value,\n"
" Ok(ConversionResult::Failure(error)) => {\n"
"%s\n"
" }\n"
" _ => { %s },\n"
"}" % (config, indent(failOrPropagate, 8), exceptionCode))
return handleOptional(templateBody, declType, handleDefaultNull("None"))
if type.isUnion():
declType = CGGeneric(union_native_type(type))
if type.nullable():
declType = CGWrapper(declType, pre="Option<", post=" >")
if isMember != "Dictionary" and type_needs_tracing(type):
declType = CGTemplatedType("RootedTraceableBox", declType)
templateBody = ("match FromJSValConvertible::from_jsval(cx, ${val}, ()) {\n"
" Ok(ConversionResult::Success(value)) => value,\n"
" Ok(ConversionResult::Failure(error)) => {\n"
"%s\n"
" }\n"
" _ => { %s },\n"
"}" % (indent(failOrPropagate, 8), exceptionCode))
dictionaries = [
memberType
for memberType in type.unroll().flatMemberTypes
if memberType.isDictionary()
]
if dictionaries:
if defaultValue:
assert isinstance(defaultValue, IDLNullValue)
dictionary, = dictionaries
default = "%s::%s(%s::%s::empty(cx))" % (
union_native_type(type),
dictionary.name,
CGDictionary.makeModuleName(dictionary.inner),
CGDictionary.makeDictionaryName(dictionary.inner))
else:
default = None
else:
default = handleDefaultNull("None")
return handleOptional(templateBody, declType, default)
if type.isGeckoInterface():
assert not isEnforceRange and not isClamp
descriptor = descriptorProvider.getDescriptor(
type.unroll().inner.identifier.name)
if descriptor.interface.isCallback():
name = descriptor.nativeType
declType = CGWrapper(CGGeneric(name), pre="Rc<", post=">")
template = "%s::new(cx, ${val}.get().to_object())" % name
if type.nullable():
declType = CGWrapper(declType, pre="Option<", post=">")
template = wrapObjectTemplate("Some(%s)" % template, "None",
isDefinitelyObject, type,
failureCode)
return handleOptional(template, declType, handleDefaultNull("None"))
conversionFunction = "root_from_handlevalue"
descriptorType = descriptor.returnType
if isMember == "Variadic":
conversionFunction = "native_from_handlevalue"
descriptorType = descriptor.nativeType
elif isArgument:
descriptorType = descriptor.argumentType
templateBody = ""
isPromise = descriptor.interface.identifier.name == "Promise"
if isPromise:
# Per spec, what we're supposed to do is take the original
# Promise.resolve and call it with the original Promise as this
# value to make a Promise out of whatever value we actually have
# here. The question is which global we should use. There are
# a couple cases to consider:
#
# 1) Normal call to API with a Promise argument. This is a case the
# spec covers, and we should be using the current Realm's
# Promise. That means the current compartment.
# 2) Promise return value from a callback or callback interface.
# This is in theory a case the spec covers but in practice it
# really doesn't define behavior here because it doesn't define
# what Realm we're in after the callback returns, which is when
# the argument conversion happens. We will use the current
# compartment, which is the compartment of the callable (which
# may itself be a cross-compartment wrapper itself), which makes
# as much sense as anything else. In practice, such an API would
# once again be providing a Promise to signal completion of an
# operation, which would then not be exposed to anyone other than
# our own implementation code.
templateBody = fill(
"""
{ // Scope for our JSAutoCompartment.
rooted!(in(cx) let globalObj = CurrentGlobalOrNull(cx));
let promiseGlobal = GlobalScope::from_object_maybe_wrapped(globalObj.handle().get());
rooted!(in(cx) let mut valueToResolve = $${val}.get());
if !JS_WrapValue(cx, valueToResolve.handle_mut()) {
$*{exceptionCode}
}
match Promise::Resolve(&promiseGlobal, cx, valueToResolve.handle()) {
Ok(value) => value,
Err(error) => {
throw_dom_exception(cx, &promiseGlobal, error);
$*{exceptionCode}
}
}
}
""",
exceptionCode=exceptionCode)
else:
if descriptor.interface.isConsequential():
raise TypeError("Consequential interface %s being used as an "
"argument" % descriptor.interface.identifier.name)
if failureCode is None:
substitutions = {
"sourceDescription": sourceDescription,
"interface": descriptor.interface.identifier.name,
"exceptionCode": exceptionCode,
}
unwrapFailureCode = string.Template(
'throw_type_error(cx, "${sourceDescription} does not '
'implement interface ${interface}.");\n'
'${exceptionCode}').substitute(substitutions)
else:
unwrapFailureCode = failureCode
templateBody = fill(
"""
match ${function}($${val}) {
Ok(val) => val,
Err(()) => {
$*{failureCode}
}
}
""",
failureCode=unwrapFailureCode + "\n",
function=conversionFunction)
declType = CGGeneric(descriptorType)
if type.nullable():
templateBody = "Some(%s)" % templateBody
declType = CGWrapper(declType, pre="Option<", post=">")
templateBody = wrapObjectTemplate(templateBody, "None",
isDefinitelyObject, type, failureCode)
return handleOptional(templateBody, declType, handleDefaultNull("None"))
if type.isSpiderMonkeyInterface():
raise TypeError("Can't handle SpiderMonkey interface arguments yet")
if type.isDOMString():
nullBehavior = getConversionConfigForType(type, isEnforceRange, isClamp, treatNullAs)
conversionCode = (
"match FromJSValConvertible::from_jsval(cx, ${val}, %s) {\n"
" Ok(ConversionResult::Success(strval)) => strval,\n"
" Ok(ConversionResult::Failure(error)) => {\n"
"%s\n"
" }\n"
" _ => { %s },\n"
"}" % (nullBehavior, indent(failOrPropagate, 8), exceptionCode))
if defaultValue is None:
default = None
elif isinstance(defaultValue, IDLNullValue):
assert type.nullable()
default = "None"
else:
assert defaultValue.type.tag() == IDLType.Tags.domstring
default = 'DOMString::from("%s")' % defaultValue.value
if type.nullable():
default = "Some(%s)" % default
declType = "DOMString"
if type.nullable():
declType = "Option<%s>" % declType
return handleOptional(conversionCode, CGGeneric(declType), default)
if type.isUSVString():
assert not isEnforceRange and not isClamp
conversionCode = (
"match FromJSValConvertible::from_jsval(cx, ${val}, ()) {\n"
" Ok(ConversionResult::Success(strval)) => strval,\n"
" Ok(ConversionResult::Failure(error)) => {\n"
"%s\n"
" }\n"
" _ => { %s },\n"
"}" % (indent(failOrPropagate, 8), exceptionCode))
if defaultValue is None:
default = None
elif isinstance(defaultValue, IDLNullValue):
assert type.nullable()
default = "None"
else:
assert defaultValue.type.tag() in (IDLType.Tags.domstring, IDLType.Tags.usvstring)
default = 'USVString("%s".to_owned())' % defaultValue.value
if type.nullable():
default = "Some(%s)" % default
declType = "USVString"
if type.nullable():
declType = "Option<%s>" % declType
return handleOptional(conversionCode, CGGeneric(declType), default)
if type.isByteString():
assert not isEnforceRange and not isClamp
conversionCode = (
"match FromJSValConvertible::from_jsval(cx, ${val}, ()) {\n"
" Ok(ConversionResult::Success(strval)) => strval,\n"
" Ok(ConversionResult::Failure(error)) => {\n"
"%s\n"
" }\n"
" _ => { %s },\n"
"}" % (indent(failOrPropagate, 8), exceptionCode))
if defaultValue is None:
default = None
elif isinstance(defaultValue, IDLNullValue):
assert type.nullable()
default = "None"
else:
assert defaultValue.type.tag() in (IDLType.Tags.domstring, IDLType.Tags.bytestring)
default = 'ByteString::new(b"%s".to_vec())' % defaultValue.value
if type.nullable():
default = "Some(%s)" % default
declType = "ByteString"
if type.nullable():
declType = "Option<%s>" % declType
return handleOptional(conversionCode, CGGeneric(declType), default)
if type.isEnum():
assert not isEnforceRange and not isClamp
if type.nullable():
raise TypeError("We don't support nullable enumerated arguments "
"yet")
enum = type.inner.identifier.name
if invalidEnumValueFatal:
handleInvalidEnumValueCode = onFailureInvalidEnumValue(failureCode, 'search').define()
else:
handleInvalidEnumValueCode = "return true;"
template = (
"match find_enum_value(cx, ${val}, %(pairs)s) {\n"
" Err(_) => { %(exceptionCode)s },\n"
" Ok((None, search)) => { %(handleInvalidEnumValueCode)s },\n"
" Ok((Some(&value), _)) => value,\n"
"}" % {"pairs": enum + "Values::pairs",
"exceptionCode": exceptionCode,
"handleInvalidEnumValueCode": handleInvalidEnumValueCode})
if defaultValue is not None:
assert defaultValue.type.tag() == IDLType.Tags.domstring
default = "%s::%s" % (enum, getEnumValueName(defaultValue.value))
else:
default = None
return handleOptional(template, CGGeneric(enum), default)
if type.isCallback():
assert not isEnforceRange and not isClamp
assert not type.treatNonCallableAsNull()
assert not type.treatNonObjectAsNull() or type.nullable()
assert not type.treatNonObjectAsNull() or not type.treatNonCallableAsNull()
callback = type.unroll().callback
declType = CGGeneric(callback.identifier.name)
finalDeclType = CGTemplatedType("Rc", declType)
conversion = CGCallbackTempRoot(declType.define())
if type.nullable():
declType = CGTemplatedType("Option", declType)
finalDeclType = CGTemplatedType("Option", finalDeclType)
conversion = CGWrapper(conversion, pre="Some(", post=")")
if allowTreatNonObjectAsNull and type.treatNonObjectAsNull():
if not isDefinitelyObject:
haveObject = "${val}.get().is_object()"
template = CGIfElseWrapper(haveObject,
conversion,
CGGeneric("None")).define()
else:
template = conversion
else:
template = CGIfElseWrapper("IsCallable(${val}.get().to_object())",
conversion,
onFailureNotCallable(failureCode)).define()
template = wrapObjectTemplate(
template,
"None",
isDefinitelyObject,
type,
failureCode)
if defaultValue is not None:
assert allowTreatNonObjectAsNull
assert type.treatNonObjectAsNull()
assert type.nullable()
assert isinstance(defaultValue, IDLNullValue)
default = "None"
else:
default = None
return JSToNativeConversionInfo(template, default, finalDeclType)
if type.isAny():
assert not isEnforceRange and not isClamp
assert isMember != "Union"
if isMember == "Dictionary":
# TODO: Need to properly root dictionaries
# https://github.com/servo/servo/issues/6381
declType = CGGeneric("Heap<JSVal>")
if defaultValue is None:
default = None
elif isinstance(defaultValue, IDLNullValue):
default = "Heap::new(NullValue())"
elif isinstance(defaultValue, IDLUndefinedValue):
default = "Heap::new(UndefinedValue())"
else:
raise TypeError("Can't handle non-null, non-undefined default value here")
return handleOptional("Heap::new(${val}.get())", declType, default)
declType = CGGeneric("HandleValue")
if defaultValue is None:
default = None
elif isinstance(defaultValue, IDLNullValue):
default = "HandleValue::null()"
elif isinstance(defaultValue, IDLUndefinedValue):
default = "HandleValue::undefined()"
else:
raise TypeError("Can't handle non-null, non-undefined default value here")
return handleOptional("${val}", declType, default)
if type.isObject():
assert not isEnforceRange and not isClamp
# TODO: Need to root somehow
# https://github.com/servo/servo/issues/6382
default = "ptr::null_mut()"
templateBody = wrapObjectTemplate("${val}.get().to_object()",
default,
isDefinitelyObject, type, failureCode)
if isMember in ("Dictionary", "Union"):
declType = CGGeneric("Heap<*mut JSObject>")
templateBody = "Heap::new(%s)" % templateBody
default = "Heap::new(%s)" % default
else:
# TODO: Need to root somehow
# https://github.com/servo/servo/issues/6382
declType = CGGeneric("*mut JSObject")
return handleOptional(templateBody, declType,
handleDefaultNull(default))
if type.isDictionary():
# There are no nullable dictionaries
assert not type.nullable()
typeName = "%s::%s" % (CGDictionary.makeModuleName(type.inner),
CGDictionary.makeDictionaryName(type.inner))
declType = CGGeneric(typeName)
empty = "%s::empty(cx)" % typeName
if isMember != "Dictionary" and type_needs_tracing(type):
declType = CGTemplatedType("RootedTraceableBox", declType)
empty = "RootedTraceableBox::new(%s)" % empty
template = ("match FromJSValConvertible::from_jsval(cx, ${val}, ()) {\n"
" Ok(ConversionResult::Success(dictionary)) => dictionary,\n"
" Ok(ConversionResult::Failure(error)) => {\n"
"%s\n"
" }\n"
" _ => { %s },\n"
"}" % (indent(failOrPropagate, 8), exceptionCode))
return handleOptional(template, declType, handleDefaultNull(empty))
if type.isVoid():
# This one only happens for return values, and its easy: Just
# ignore the jsval.
return JSToNativeConversionInfo("", None, None)
if not type.isPrimitive():
raise TypeError("Need conversion for argument type '%s'" % str(type))
conversionBehavior = getConversionConfigForType(type, isEnforceRange, isClamp, treatNullAs)
if failureCode is None:
failureCode = 'return false'
declType = CGGeneric(builtinNames[type.tag()])
if type.nullable():
declType = CGWrapper(declType, pre="Option<", post=">")
template = (
"match FromJSValConvertible::from_jsval(cx, ${val}, %s) {\n"
" Ok(ConversionResult::Success(v)) => v,\n"
" Ok(ConversionResult::Failure(error)) => {\n"
"%s\n"
" }\n"
" _ => { %s }\n"
"}" % (conversionBehavior, indent(failOrPropagate, 8), exceptionCode))
if defaultValue is not None:
if isinstance(defaultValue, IDLNullValue):
assert type.nullable()
defaultStr = "None"
else:
tag = defaultValue.type.tag()
if tag in [IDLType.Tags.float, IDLType.Tags.double]:
defaultStr = "Finite::wrap(%s)" % defaultValue.value
elif tag in numericTags:
defaultStr = str(defaultValue.value)
else:
assert tag == IDLType.Tags.bool
defaultStr = toStringBool(defaultValue.value)
if type.nullable():
defaultStr = "Some(%s)" % defaultStr
else:
defaultStr = None
return handleOptional(template, declType, defaultStr)
def instantiateJSToNativeConversionTemplate(templateBody, replacements,
declType, declName):
"""
Take the templateBody and declType as returned by
getJSToNativeConversionInfo, a set of replacements as required by the
strings in such a templateBody, and a declName, and generate code to
convert into a stack Rust binding with that name.
"""
result = CGList([], "\n")
conversion = CGGeneric(string.Template(templateBody).substitute(replacements))
if declType is not None:
newDecl = [
CGGeneric("let "),
CGGeneric(declName),
CGGeneric(": "),
declType,
CGGeneric(" = "),
conversion,
CGGeneric(";"),
]
result.append(CGList(newDecl))
else:
result.append(conversion)
# Add an empty CGGeneric to get an extra newline after the argument
# conversion.
result.append(CGGeneric(""))
return result
def convertConstIDLValueToJSVal(value):
if isinstance(value, IDLNullValue):
return "ConstantVal::NullVal"
tag = value.type.tag()
if tag in [IDLType.Tags.int8, IDLType.Tags.uint8, IDLType.Tags.int16,
IDLType.Tags.uint16, IDLType.Tags.int32]:
return "ConstantVal::IntVal(%s)" % (value.value)
if tag == IDLType.Tags.uint32:
return "ConstantVal::UintVal(%s)" % (value.value)
if tag in [IDLType.Tags.int64, IDLType.Tags.uint64]:
return "ConstantVal::DoubleVal(%s)" % (value.value)
if tag == IDLType.Tags.bool:
return "ConstantVal::BoolVal(true)" if value.value else "ConstantVal::BoolVal(false)"
if tag in [IDLType.Tags.unrestricted_float, IDLType.Tags.float,
IDLType.Tags.unrestricted_double, IDLType.Tags.double]:
return "ConstantVal::DoubleVal(%s)" % (value.value)
raise TypeError("Const value of unhandled type: " + value.type)
class CGArgumentConverter(CGThing):
"""
A class that takes an IDL argument object, its index in the
argument list, and the argv and argc strings and generates code to
unwrap the argument to the right native type.
"""
def __init__(self, argument, index, args, argc, descriptorProvider,
invalidEnumValueFatal=True):
CGThing.__init__(self)
assert not argument.defaultValue or argument.optional
replacer = {
"index": index,
"argc": argc,
"args": args
}
replacementVariables = {
"val": string.Template("${args}.get(${index})").substitute(replacer),
}
info = getJSToNativeConversionInfo(
argument.type,
descriptorProvider,
invalidEnumValueFatal=invalidEnumValueFatal,
defaultValue=argument.defaultValue,
treatNullAs=argument.treatNullAs,
isEnforceRange=argument.enforceRange,
isClamp=argument.clamp,
isMember="Variadic" if argument.variadic else False,
allowTreatNonObjectAsNull=argument.allowTreatNonCallableAsNull())
template = info.template
default = info.default
declType = info.declType
if not argument.variadic:
if argument.optional:
condition = "{args}.get({index}).is_undefined()".format(**replacer)
if argument.defaultValue:
assert default
template = CGIfElseWrapper(condition,
CGGeneric(default),
CGGeneric(template)).define()
else:
assert not default
declType = CGWrapper(declType, pre="Option<", post=">")
template = CGIfElseWrapper(condition,
CGGeneric("None"),
CGGeneric("Some(%s)" % template)).define()
else:
assert not default
self.converter = instantiateJSToNativeConversionTemplate(
template, replacementVariables, declType, "arg%d" % index)
else:
assert argument.optional
variadicConversion = {
"val": string.Template("${args}.get(variadicArg)").substitute(replacer),
}
innerConverter = [instantiateJSToNativeConversionTemplate(
template, variadicConversion, declType, "slot")]
arg = "arg%d" % index
if argument.type.isGeckoInterface():
init = "rooted_vec!(let mut %s)" % arg
innerConverter.append(CGGeneric("%s.push(JS::from_ref(&*slot));" % arg))
else:
init = "let mut %s = vec![]" % arg
innerConverter.append(CGGeneric("%s.push(slot);" % arg))
inner = CGIndenter(CGList(innerConverter, "\n"), 8).define()
self.converter = CGGeneric("""\
%(init)s;
if %(argc)s > %(index)s {
%(arg)s.reserve(%(argc)s as usize - %(index)s);
for variadicArg in %(index)s..%(argc)s {
%(inner)s
}
}""" % {'arg': arg, 'argc': argc, 'index': index, 'inner': inner, 'init': init})
def define(self):
return self.converter.define()
def wrapForType(jsvalRef, result='result', successCode='return true;', pre=''):
"""
Reflect a Rust value into JS.
* 'jsvalRef': a MutableHandleValue in which to store the result
of the conversion;
* 'result': the name of the variable in which the Rust value is stored;
* 'successCode': the code to run once we have done the conversion.
* 'pre': code to run before the conversion if rooting is necessary
"""
wrap = "%s\n(%s).to_jsval(cx, %s);" % (pre, result, jsvalRef)
if successCode:
wrap += "\n%s" % successCode
return wrap
def typeNeedsCx(type, retVal=False):
if type is None:
return False
if type.nullable():
type = type.inner
if type.isSequence():
type = type.inner
if type.isUnion():
return any(typeNeedsCx(t) for t in type.unroll().flatMemberTypes)
if retVal and type.isSpiderMonkeyInterface():
return True
return type.isAny() or type.isObject()
# Returns a conversion behavior suitable for a type
def getConversionConfigForType(type, isEnforceRange, isClamp, treatNullAs):
if type.isSequence() or type.isMozMap():
return getConversionConfigForType(innerContainerType(type), isEnforceRange, isClamp, treatNullAs)
if type.isDOMString():
assert not isEnforceRange and not isClamp
treatAs = {
"Default": "StringificationBehavior::Default",
"EmptyString": "StringificationBehavior::Empty",
}
if treatNullAs not in treatAs:
raise TypeError("We don't support [TreatNullAs=%s]" % treatNullAs)
if type.nullable():
# Note: the actual behavior passed here doesn't matter for nullable
# strings.
return "StringificationBehavior::Default"
else:
return treatAs[treatNullAs]
if type.isPrimitive() and type.isInteger():
if isEnforceRange:
return "ConversionBehavior::EnforceRange"
elif isClamp:
return "ConversionBehavior::Clamp"
else:
return "ConversionBehavior::Default"
assert not isEnforceRange and not isClamp
return "()"
# Returns a CGThing containing the type of the return value.
def getRetvalDeclarationForType(returnType, descriptorProvider):
if returnType is None or returnType.isVoid():
# Nothing to declare
return CGGeneric("()")
if returnType.isPrimitive() and returnType.tag() in builtinNames:
result = CGGeneric(builtinNames[returnType.tag()])
if returnType.nullable():
result = CGWrapper(result, pre="Option<", post=">")
return result
if returnType.isDOMString():
result = CGGeneric("DOMString")
if returnType.nullable():
result = CGWrapper(result, pre="Option<", post=">")
return result
if returnType.isUSVString():
result = CGGeneric("USVString")
if returnType.nullable():
result = CGWrapper(result, pre="Option<", post=">")
return result
if returnType.isByteString():
result = CGGeneric("ByteString")
if returnType.nullable():
result = CGWrapper(result, pre="Option<", post=">")
return result
if returnType.isEnum():
result = CGGeneric(returnType.unroll().inner.identifier.name)
if returnType.nullable():
result = CGWrapper(result, pre="Option<", post=">")
return result
if returnType.isGeckoInterface():
descriptor = descriptorProvider.getDescriptor(
returnType.unroll().inner.identifier.name)
result = CGGeneric(descriptor.returnType)
if returnType.nullable():
result = CGWrapper(result, pre="Option<", post=">")
return result
if returnType.isCallback():
callback = returnType.unroll().callback
result = CGGeneric('Rc<%s::%s>' % (getModuleFromObject(callback), callback.identifier.name))
if returnType.nullable():
result = CGWrapper(result, pre="Option<", post=">")
return result
if returnType.isUnion():
result = CGGeneric(union_native_type(returnType))
if returnType.nullable():
result = CGWrapper(result, pre="Option<", post=">")
return result
# TODO: Return the value through a MutableHandleValue outparam
# https://github.com/servo/servo/issues/6307
if returnType.isAny():
return CGGeneric("JSVal")
if returnType.isObject() or returnType.isSpiderMonkeyInterface():
result = CGGeneric("NonZero<*mut JSObject>")
if returnType.nullable():
result = CGWrapper(result, pre="Option<", post=">")
return result
if returnType.isSequence() or returnType.isMozMap():
result = getRetvalDeclarationForType(innerContainerType(returnType), descriptorProvider)
result = wrapInNativeContainerType(returnType, result)
if returnType.nullable():
result = CGWrapper(result, pre="Option<", post=">")
return result
if returnType.isDictionary():
nullable = returnType.nullable()
dictName = returnType.inner.name if nullable else returnType.name
result = CGGeneric(dictName)
if nullable:
result = CGWrapper(result, pre="Option<", post=">")
return result
raise TypeError("Don't know how to declare return value for %s" %
returnType)
def MemberCondition(pref, func):
"""
A string representing the condition for a member to actually be exposed.
Any of the arguments can be None. If not None, they should have the
following types:
pref: The name of the preference.
func: The name of the function.
"""
assert pref is None or isinstance(pref, str)
assert func is None or isinstance(func, str)
assert func is None or pref is None
if pref:
return 'Condition::Pref("%s")' % pref
if func:
return 'Condition::Func(%s)' % func
return "Condition::Satisfied"
class PropertyDefiner:
"""
A common superclass for defining things on prototype objects.
Subclasses should implement generateArray to generate the actual arrays of
things we're defining. They should also set self.regular to the list of
things exposed to web pages.
"""
def __init__(self, descriptor, name):
self.descriptor = descriptor
self.name = name
def variableName(self):
return "s" + self.name
def length(self):
return len(self.regular)
def __str__(self):
# We only need to generate id arrays for things that will end
# up used via ResolveProperty or EnumerateProperties.
return self.generateArray(self.regular, self.variableName())
@staticmethod
def getStringAttr(member, name):
attr = member.getExtendedAttribute(name)
if attr is None:
return None
# It's a list of strings
assert len(attr) == 1
assert attr[0] is not None
return attr[0]
@staticmethod
def getControllingCondition(interfaceMember, descriptor):
return MemberCondition(
PropertyDefiner.getStringAttr(interfaceMember,
"Pref"),
PropertyDefiner.getStringAttr(interfaceMember,
"Func"))
def generateGuardedArray(self, array, name, specTemplate, specTerminator,
specType, getCondition, getDataTuple):
"""
This method generates our various arrays.
array is an array of interface members as passed to generateArray
name is the name as passed to generateArray
specTemplate is a template for each entry of the spec array
specTerminator is a terminator for the spec array (inserted at the end
of the array), or None
specType is the actual typename of our spec
getDataTuple is a callback function that takes an array entry and
returns a tuple suitable for substitution into specTemplate.
"""
# We generate an all-encompassing list of lists of specs, with each sublist
# representing a group of members that share a common pref name. That will
# make sure the order of the properties as exposed on the interface and
# interface prototype objects does not change when pref control is added to
# members while still allowing us to define all the members in the smallest
# number of JSAPI calls.
assert len(array) != 0
specs = []
prefableSpecs = []
prefableTemplate = ' Guard::new(%s, %s[%d])'
for cond, members in groupby(array, lambda m: getCondition(m, self.descriptor)):
currentSpecs = [specTemplate % getDataTuple(m) for m in members]
if specTerminator:
currentSpecs.append(specTerminator)
specs.append("&[\n" + ",\n".join(currentSpecs) + "]\n")
prefableSpecs.append(
prefableTemplate % (cond, name + "_specs", len(specs) - 1))
specsArray = ("const %s_specs: &'static [&'static[%s]] = &[\n" +
",\n".join(specs) + "\n" +
"];\n") % (name, specType)
prefArray = ("const %s: &'static [Guard<&'static [%s]>] = &[\n" +
",\n".join(prefableSpecs) + "\n" +
"];\n") % (name, specType)
return specsArray + prefArray
# The length of a method is the minimum of the lengths of the
# argument lists of all its overloads.
def methodLength(method):
signatures = method.signatures()
return min(
len([arg for arg in arguments if not arg.optional and not arg.variadic])
for (_, arguments) in signatures)
class MethodDefiner(PropertyDefiner):
"""
A class for defining methods on a prototype object.
"""
def __init__(self, descriptor, name, static, unforgeable):
assert not (static and unforgeable)
PropertyDefiner.__init__(self, descriptor, name)
# FIXME https://bugzilla.mozilla.org/show_bug.cgi?id=772822
# We should be able to check for special operations without an
# identifier. For now we check if the name starts with __
# Ignore non-static methods for callback interfaces
if not descriptor.interface.isCallback() or static:
methods = [m for m in descriptor.interface.members if
m.isMethod() and m.isStatic() == static and
not m.isIdentifierLess() and
MemberIsUnforgeable(m, descriptor) == unforgeable]
else:
methods = []
self.regular = [{"name": m.identifier.name,
"methodInfo": not m.isStatic(),
"length": methodLength(m),
"condition": PropertyDefiner.getControllingCondition(m, descriptor)}
for m in methods]
# FIXME Check for an existing iterator on the interface first.
if any(m.isGetter() and m.isIndexed() for m in methods):
self.regular.append({"name": '@@iterator',
"methodInfo": False,
"selfHostedName": "ArrayValues",
"length": 0,
"condition": "Condition::Satisfied"})
# Generate the keys/values/entries aliases for value iterables.
maplikeOrSetlikeOrIterable = descriptor.interface.maplikeOrSetlikeOrIterable
if (not static and not unforgeable and
(maplikeOrSetlikeOrIterable and
maplikeOrSetlikeOrIterable.isIterable() and
maplikeOrSetlikeOrIterable.isValueIterator())):
# Add our keys/values/entries/forEach
self.regular.append({
"name": "keys",
"methodInfo": False,
"selfHostedName": "ArrayKeys",
"length": 0,
"condition": PropertyDefiner.getControllingCondition(m,
descriptor)
})
self.regular.append({
"name": "values",
"methodInfo": False,
"selfHostedName": "ArrayValues",
"length": 0,
"condition": PropertyDefiner.getControllingCondition(m,
descriptor)
})
self.regular.append({
"name": "entries",
"methodInfo": False,
"selfHostedName": "ArrayEntries",
"length": 0,
"condition": PropertyDefiner.getControllingCondition(m,
descriptor)
})
self.regular.append({
"name": "forEach",
"methodInfo": False,
"selfHostedName": "ArrayForEach",
"length": 1,
"condition": PropertyDefiner.getControllingCondition(m,
descriptor)
})
isUnforgeableInterface = bool(descriptor.interface.getExtendedAttribute("Unforgeable"))
if not static and unforgeable == isUnforgeableInterface:
stringifier = descriptor.operations['Stringifier']
if stringifier:
self.regular.append({
"name": "toString",
"nativeName": stringifier.identifier.name,
"length": 0,
"condition": PropertyDefiner.getControllingCondition(stringifier, descriptor)
})
self.unforgeable = unforgeable
def generateArray(self, array, name):
if len(array) == 0:
return ""
def condition(m, d):
return m["condition"]
flags = "JSPROP_ENUMERATE"
if self.unforgeable:
flags += " | JSPROP_PERMANENT | JSPROP_READONLY"
def specData(m):
# TODO: Use something like JS_FNSPEC
# https://github.com/servo/servo/issues/6391
if "selfHostedName" in m:
selfHostedName = '%s as *const u8 as *const libc::c_char' % str_to_const_array(m["selfHostedName"])
assert not m.get("methodInfo", True)
accessor = "None"
jitinfo = "0 as *const JSJitInfo"
else:
selfHostedName = "0 as *const libc::c_char"
if m.get("methodInfo", True):
identifier = m.get("nativeName", m["name"])
# Go through an intermediate type here, because it's not
# easy to tell whether the methodinfo is a JSJitInfo or
# a JSTypedMethodJitInfo here. The compiler knows, though,
# so let it do the work.
jitinfo = "&%s_methodinfo as *const _ as *const JSJitInfo" % identifier
accessor = "Some(generic_method)"
else:
jitinfo = "0 as *const JSJitInfo"
accessor = 'Some(%s)' % m.get("nativeName", m["name"])
if m["name"].startswith("@@"):
return ('(SymbolCode::%s as i32 + 1)'
% m["name"][2:], accessor, jitinfo, m["length"], flags, selfHostedName)
return (str_to_const_array(m["name"]), accessor, jitinfo, m["length"], flags, selfHostedName)
return self.generateGuardedArray(
array, name,
' JSFunctionSpec {\n'
' name: %s as *const u8 as *const libc::c_char,\n'
' call: JSNativeWrapper { op: %s, info: %s },\n'
' nargs: %s,\n'
' flags: (%s) as u16,\n'
' selfHostedName: %s\n'
' }',
' JSFunctionSpec {\n'
' name: 0 as *const libc::c_char,\n'
' call: JSNativeWrapper { op: None, info: 0 as *const JSJitInfo },\n'
' nargs: 0,\n'
' flags: 0,\n'
' selfHostedName: 0 as *const libc::c_char\n'
' }',
'JSFunctionSpec',
condition, specData)
class AttrDefiner(PropertyDefiner):
def __init__(self, descriptor, name, static, unforgeable):
assert not (static and unforgeable)
PropertyDefiner.__init__(self, descriptor, name)
self.name = name
self.descriptor = descriptor
self.regular = [
m
for m in descriptor.interface.members if
m.isAttr() and m.isStatic() == static and
MemberIsUnforgeable(m, descriptor) == unforgeable
]
self.static = static
self.unforgeable = unforgeable
def generateArray(self, array, name):
if len(array) == 0:
return ""
flags = "JSPROP_ENUMERATE | JSPROP_SHARED"
if self.unforgeable:
flags += " | JSPROP_PERMANENT"
def getter(attr):
if self.static:
accessor = 'get_' + self.descriptor.internalNameFor(attr.identifier.name)
jitinfo = "0 as *const JSJitInfo"
else:
if attr.hasLenientThis():
accessor = "generic_lenient_getter"
else:
accessor = "generic_getter"
jitinfo = "&%s_getterinfo" % self.descriptor.internalNameFor(attr.identifier.name)
return ("JSNativeWrapper { op: Some(%(native)s), info: %(info)s }"
% {"info": jitinfo,
"native": accessor})
def setter(attr):
if (attr.readonly and not attr.getExtendedAttribute("PutForwards")
and not attr.getExtendedAttribute("Replaceable")):
return "JSNativeWrapper { op: None, info: 0 as *const JSJitInfo }"
if self.static:
accessor = 'set_' + self.descriptor.internalNameFor(attr.identifier.name)
jitinfo = "0 as *const JSJitInfo"
else:
if attr.hasLenientThis():
accessor = "generic_lenient_setter"
else:
accessor = "generic_setter"
jitinfo = "&%s_setterinfo" % self.descriptor.internalNameFor(attr.identifier.name)
return ("JSNativeWrapper { op: Some(%(native)s), info: %(info)s }"
% {"info": jitinfo,
"native": accessor})
def specData(attr):
return (str_to_const_array(attr.identifier.name), flags, getter(attr),
setter(attr))
return self.generateGuardedArray(
array, name,
' JSPropertySpec {\n'
' name: %s as *const u8 as *const libc::c_char,\n'
' flags: (%s) as u8,\n'
' getter: %s,\n'
' setter: %s\n'
' }',
' JSPropertySpec {\n'
' name: 0 as *const libc::c_char,\n'
' flags: 0,\n'
' getter: JSNativeWrapper { op: None, info: 0 as *const JSJitInfo },\n'
' setter: JSNativeWrapper { op: None, info: 0 as *const JSJitInfo }\n'
' }',
'JSPropertySpec',
PropertyDefiner.getControllingCondition, specData)
class ConstDefiner(PropertyDefiner):
"""
A class for definining constants on the interface object
"""
def __init__(self, descriptor, name):
PropertyDefiner.__init__(self, descriptor, name)
self.name = name
self.regular = [m for m in descriptor.interface.members if m.isConst()]
def generateArray(self, array, name):
if len(array) == 0:
return ""
def specData(const):
return (str_to_const_array(const.identifier.name),
convertConstIDLValueToJSVal(const.value))
return self.generateGuardedArray(
array, name,
' ConstantSpec { name: %s, value: %s }',
None,
'ConstantSpec',
PropertyDefiner.getControllingCondition, specData)
# We'll want to insert the indent at the beginnings of lines, but we
# don't want to indent empty lines. So only indent lines that have a
# non-newline character on them.
lineStartDetector = re.compile("^(?=[^\n])", re.MULTILINE)
class CGIndenter(CGThing):
"""
A class that takes another CGThing and generates code that indents that
CGThing by some number of spaces. The default indent is two spaces.
"""
def __init__(self, child, indentLevel=4):
CGThing.__init__(self)
self.child = child
self.indent = " " * indentLevel
def define(self):
defn = self.child.define()
if defn != "":
return re.sub(lineStartDetector, self.indent, defn)
else:
return defn
class CGWrapper(CGThing):
"""
Generic CGThing that wraps other CGThings with pre and post text.
"""
def __init__(self, child, pre="", post="", reindent=False):
CGThing.__init__(self)
self.child = child
self.pre = pre
self.post = post
self.reindent = reindent
def define(self):
defn = self.child.define()
if self.reindent:
# We don't use lineStartDetector because we don't want to
# insert whitespace at the beginning of our _first_ line.
defn = stripTrailingWhitespace(
defn.replace("\n", "\n" + (" " * len(self.pre))))
return self.pre + defn + self.post
class CGImports(CGWrapper):
"""
Generates the appropriate import/use statements.
"""
def __init__(self, child, descriptors, callbacks, dictionaries, enums, imports, config, ignored_warnings=None):
"""
Adds a set of imports.
"""
if ignored_warnings is None:
ignored_warnings = [
'non_camel_case_types',
'non_upper_case_globals',
'unused_imports',
'unused_variables',
'unused_assignments',
'unused_mut',
]
def componentTypes(type):
if type.isType() and type.nullable():
type = type.unroll()
if type.isUnion():
return type.flatMemberTypes
if type.isDictionary():
return [type] + getTypesFromDictionary(type)
if type.isSequence():
return componentTypes(type.inner)
return [type]
def isImportable(type):
if not type.isType():
assert (type.isInterface() or type.isDictionary() or
type.isEnum() or type.isNamespace())
return True
return not (type.builtin or type.isSequence() or type.isUnion())
def relatedTypesForSignatures(method):
types = []
for (returnType, arguments) in method.signatures():
types += componentTypes(returnType)
for arg in arguments:
types += componentTypes(arg.type)
return types
def getIdentifier(t):
if t.isType():
if t.nullable():
t = t.inner
if t.isCallback():
return t.callback.identifier
return t.identifier
assert t.isInterface() or t.isDictionary() or t.isEnum() or t.isNamespace()
return t.identifier
def removeWrapperAndNullableTypes(types):
normalized = []
for t in types:
while (t.isType() and t.nullable()) or isinstance(t, IDLWrapperType):
t = t.inner
if isImportable(t):
normalized += [t]
return normalized
types = []
for d in descriptors:
if not d.interface.isCallback():
types += [d.interface]
if d.interface.isIteratorInterface():
types += [d.interface.iterableInterface]
members = d.interface.members + d.interface.namedConstructors
constructor = d.interface.ctor()
if constructor:
members += [constructor]
if d.proxy:
members += [o for o in d.operations.values() if o]
for m in members:
if m.isMethod():
types += relatedTypesForSignatures(m)
elif m.isAttr():
types += componentTypes(m.type)
# Import the type names used in the callbacks that are being defined.
for c in callbacks:
types += relatedTypesForSignatures(c)
# Import the type names used in the dictionaries that are being defined.
for d in dictionaries:
types += componentTypes(d)
# Normalize the types we've collected and remove any ones which can't be imported.
types = removeWrapperAndNullableTypes(types)
descriptorProvider = config.getDescriptorProvider()
extras = []
for t in types:
# Importing these types in the same module that defines them is an error.
if t in dictionaries or t in enums:
continue
if t.isInterface() or t.isNamespace():
name = getIdentifier(t).name
descriptor = descriptorProvider.getDescriptor(name)
if name != 'GlobalScope':
extras += [descriptor.path]
parentName = descriptor.getParentName()
if parentName:
descriptor = descriptorProvider.getDescriptor(parentName)
extras += [descriptor.path, descriptor.bindingPath]
elif t.isType() and t.isMozMap():
extras += ['dom::bindings::mozmap::MozMap']
else:
if t.isEnum():
extras += [getModuleFromObject(t) + '::' + getIdentifier(t).name + 'Values']
extras += [getModuleFromObject(t) + '::' + getIdentifier(t).name]
statements = []
if len(ignored_warnings) > 0:
statements.append('#![allow(%s)]' % ','.join(ignored_warnings))
statements.extend('use %s;' % i for i in sorted(set(imports + extras)))
CGWrapper.__init__(self, child,
pre='\n'.join(statements) + '\n\n')
class CGIfWrapper(CGWrapper):
def __init__(self, condition, child):
pre = CGWrapper(CGGeneric(condition), pre="if ", post=" {\n",
reindent=True)
CGWrapper.__init__(self, CGIndenter(child), pre=pre.define(),
post="\n}")
class CGTemplatedType(CGWrapper):
def __init__(self, templateName, child):
CGWrapper.__init__(self, child, pre=templateName + "<", post=">")
class CGNamespace(CGWrapper):
def __init__(self, namespace, child, public=False):
pre = "%smod %s {\n" % ("pub " if public else "", namespace)
post = "} // mod %s" % namespace
CGWrapper.__init__(self, child, pre=pre, post=post)
@staticmethod
def build(namespaces, child, public=False):
"""
Static helper method to build multiple wrapped namespaces.
"""
if not namespaces:
return child
inner = CGNamespace.build(namespaces[1:], child, public=public)
return CGNamespace(namespaces[0], inner, public=public)
def DOMClassTypeId(desc):
protochain = desc.prototypeChain
inner = ""
if desc.hasDescendants():
if desc.interface.getExtendedAttribute("Abstract"):
return "::dom::bindings::codegen::InheritTypes::TopTypeId { abstract_: () }"
name = desc.interface.identifier.name
inner = "(::dom::bindings::codegen::InheritTypes::%sTypeId::%s)" % (name, name)
elif len(protochain) == 1:
return "::dom::bindings::codegen::InheritTypes::TopTypeId { alone: () }"
reversed_protochain = list(reversed(protochain))
for (child, parent) in zip(reversed_protochain, reversed_protochain[1:]):
inner = "(::dom::bindings::codegen::InheritTypes::%sTypeId::%s%s)" % (parent, child, inner)
return "::dom::bindings::codegen::InheritTypes::TopTypeId { %s: %s }" % (protochain[0].lower(), inner)
def DOMClass(descriptor):
protoList = ['PrototypeList::ID::' + proto for proto in descriptor.prototypeChain]
# Pad out the list to the right length with ID::Last so we
# guarantee that all the lists are the same length. ID::Last
# is never the ID of any prototype, so it's safe to use as
# padding.
protoList.extend(['PrototypeList::ID::Last'] * (descriptor.config.maxProtoChainLength - len(protoList)))
prototypeChainString = ', '.join(protoList)
heapSizeOf = 'heap_size_of_raw_self_and_children::<%s>' % descriptor.concreteType
if descriptor.isGlobal():
globals_ = camel_to_upper_snake(descriptor.name)
else:
globals_ = 'EMPTY'
return """\
DOMClass {
interface_chain: [ %s ],
type_id: %s,
heap_size_of: %s as unsafe fn(_) -> _,
global: InterfaceObjectMap::%s,
}""" % (prototypeChainString, DOMClassTypeId(descriptor), heapSizeOf, globals_)
class CGDOMJSClass(CGThing):
"""
Generate a DOMJSClass for a given descriptor
"""
def __init__(self, descriptor):
CGThing.__init__(self)
self.descriptor = descriptor
def define(self):
args = {
"domClass": DOMClass(self.descriptor),
"enumerateHook": "None",
"finalizeHook": FINALIZE_HOOK_NAME,
"flags": "0",
"name": str_to_const_array(self.descriptor.interface.identifier.name),
"resolveHook": "None",
"slots": "1",
"traceHook": TRACE_HOOK_NAME,
}
if self.descriptor.isGlobal():
assert not self.descriptor.weakReferenceable
args["enumerateHook"] = "Some(enumerate_global)"
args["flags"] = "JSCLASS_IS_GLOBAL | JSCLASS_DOM_GLOBAL"
args["slots"] = "JSCLASS_GLOBAL_SLOT_COUNT + 1"
args["resolveHook"] = "Some(resolve_global)"
args["traceHook"] = "js::jsapi::JS_GlobalObjectTraceHook"
elif self.descriptor.weakReferenceable:
args["slots"] = "2"
return """\
static CLASS_OPS: js::jsapi::JSClassOps = js::jsapi::JSClassOps {
addProperty: None,
delProperty: None,
getProperty: None,
setProperty: None,
enumerate: %(enumerateHook)s,
resolve: %(resolveHook)s,
mayResolve: None,
finalize: Some(%(finalizeHook)s),
call: None,
hasInstance: None,
construct: None,
trace: Some(%(traceHook)s),
};
static Class: DOMJSClass = DOMJSClass {
base: js::jsapi::JSClass {
name: %(name)s as *const u8 as *const libc::c_char,
flags: JSCLASS_IS_DOMJSCLASS | %(flags)s |
(((%(slots)s) & JSCLASS_RESERVED_SLOTS_MASK) << JSCLASS_RESERVED_SLOTS_SHIFT)
/* JSCLASS_HAS_RESERVED_SLOTS(%(slots)s) */,
cOps: &CLASS_OPS,
reserved: [0 as *mut _; 3],
},
dom_class: %(domClass)s
};""" % args
def str_to_const_array(s):
return "b\"%s\\0\"" % s
class CGPrototypeJSClass(CGThing):
def __init__(self, descriptor):
CGThing.__init__(self)
self.descriptor = descriptor
def define(self):
name = str_to_const_array(self.descriptor.interface.identifier.name + "Prototype")
slotCount = 0
if self.descriptor.hasUnforgeableMembers:
slotCount += 1
return """\
static PrototypeClass: JSClass = JSClass {
name: %(name)s as *const u8 as *const libc::c_char,
flags:
// JSCLASS_HAS_RESERVED_SLOTS(%(slotCount)s)
(%(slotCount)s & JSCLASS_RESERVED_SLOTS_MASK) << JSCLASS_RESERVED_SLOTS_SHIFT,
cOps: 0 as *const _,
reserved: [0 as *mut os::raw::c_void; 3]
};
""" % {'name': name, 'slotCount': slotCount}
class CGInterfaceObjectJSClass(CGThing):
def __init__(self, descriptor):
assert descriptor.interface.hasInterfaceObject() and not descriptor.interface.isCallback()
CGThing.__init__(self)
self.descriptor = descriptor
def define(self):
if self.descriptor.interface.isNamespace():
classString = self.descriptor.interface.getExtendedAttribute("ClassString")
if classString:
classString = classString[0]
else:
classString = "Object"
return """\
static NAMESPACE_OBJECT_CLASS: NamespaceObjectClass = unsafe {
NamespaceObjectClass::new(%s)
};
""" % str_to_const_array(classString)
if self.descriptor.interface.ctor():
constructorBehavior = "InterfaceConstructorBehavior::call(%s)" % CONSTRUCT_HOOK_NAME
else:
constructorBehavior = "InterfaceConstructorBehavior::throw()"
name = self.descriptor.interface.identifier.name
args = {
"constructorBehavior": constructorBehavior,
"id": name,
"representation": 'b"function %s() {\\n [native code]\\n}"' % name,
"depth": self.descriptor.prototypeDepth
}
return """\
static INTERFACE_OBJECT_CLASS: NonCallbackInterfaceObjectClass =
NonCallbackInterfaceObjectClass::new(
&%(constructorBehavior)s,
%(representation)s,
PrototypeList::ID::%(id)s,
%(depth)s);
""" % args
class CGList(CGThing):
"""
Generate code for a list of GCThings. Just concatenates them together, with
an optional joiner string. "\n" is a common joiner.
"""
def __init__(self, children, joiner=""):
CGThing.__init__(self)
# Make a copy of the kids into a list, because if someone passes in a
# generator we won't be able to both declare and define ourselves, or
# define ourselves more than once!
self.children = list(children)
self.joiner = joiner
def append(self, child):
self.children.append(child)
def prepend(self, child):
self.children.insert(0, child)
def join(self, iterable):
return self.joiner.join(s for s in iterable if len(s) > 0)
def define(self):
return self.join(child.define() for child in self.children if child is not None)
def __len__(self):
return len(self.children)
class CGIfElseWrapper(CGList):
def __init__(self, condition, ifTrue, ifFalse):
kids = [CGIfWrapper(condition, ifTrue),
CGWrapper(CGIndenter(ifFalse), pre=" else {\n", post="\n}")]
CGList.__init__(self, kids)
class CGGeneric(CGThing):
"""
A class that spits out a fixed string into the codegen. Can spit out a
separate string for the declaration too.
"""
def __init__(self, text):
self.text = text
def define(self):
return self.text
class CGCallbackTempRoot(CGGeneric):
def __init__(self, name):
CGGeneric.__init__(self, "%s::new(cx, ${val}.get().to_object())" % name)
def getAllTypes(descriptors, dictionaries, callbacks, typedefs):
"""
Generate all the types we're dealing with. For each type, a tuple
containing type, descriptor, dictionary is yielded. The
descriptor and dictionary can be None if the type does not come
from a descriptor or dictionary; they will never both be non-None.
"""
for d in descriptors:
for t in getTypesFromDescriptor(d):
yield (t, d, None)
for dictionary in dictionaries:
for t in getTypesFromDictionary(dictionary):
yield (t, None, dictionary)
for callback in callbacks:
for t in getTypesFromCallback(callback):
yield (t, None, None)
for typedef in typedefs:
yield (typedef.innerType, None, None)
def UnionTypes(descriptors, dictionaries, callbacks, typedefs, config):
"""
Returns a CGList containing CGUnionStructs for every union.
"""
imports = [
'dom',
'dom::bindings::codegen::PrototypeList',
'dom::bindings::conversions::ConversionResult',
'dom::bindings::conversions::FromJSValConvertible',
'dom::bindings::conversions::ToJSValConvertible',
'dom::bindings::conversions::ConversionBehavior',
'dom::bindings::conversions::StringificationBehavior',
'dom::bindings::conversions::root_from_handlevalue',
'dom::bindings::error::throw_not_in_union',
'dom::bindings::js::Root',
'dom::bindings::mozmap::MozMap',
'dom::bindings::str::ByteString',
'dom::bindings::str::DOMString',
'dom::bindings::str::USVString',
'dom::types::*',
'js::error::throw_type_error',
'js::jsapi::HandleValue',
'js::jsapi::Heap',
'js::jsapi::JSContext',
'js::jsapi::JSObject',
'js::jsapi::MutableHandleValue',
'js::jsval::JSVal',
]
# Now find all the things we'll need as arguments and return values because
# we need to wrap or unwrap them.
unionStructs = dict()
for (t, descriptor, dictionary) in getAllTypes(descriptors, dictionaries, callbacks, typedefs):
if dictionary:
imports.append("%s::%s" % (CGDictionary.makeModuleName(dictionary),
CGDictionary.makeDictionaryName(dictionary)))
t = t.unroll()
if not t.isUnion():
continue
name = str(t)
if name not in unionStructs:
provider = descriptor or config.getDescriptorProvider()
unionStructs[name] = CGList([
CGUnionStruct(t, provider),
CGUnionConversionStruct(t, provider)
])
# Sort unionStructs by key, retrieve value
unionStructs = (i[1] for i in sorted(unionStructs.items(), key=operator.itemgetter(0)))
return CGImports(CGList(unionStructs, "\n\n"),
descriptors=[],
callbacks=[],
dictionaries=[],
enums=[],
imports=imports,
config=config,
ignored_warnings=[])
class Argument():
"""
A class for outputting the type and name of an argument
"""
def __init__(self, argType, name, default=None, mutable=False):
self.argType = argType
self.name = name
self.default = default
self.mutable = mutable
def declare(self):
string = ('mut ' if self.mutable else '') + self.name + ((': ' + self.argType) if self.argType else '')
# XXXjdm Support default arguments somehow :/
# if self.default is not None:
# string += " = " + self.default
return string
def define(self):
return self.argType + ' ' + self.name
class CGAbstractMethod(CGThing):
"""
An abstract class for generating code for a method. Subclasses
should override definition_body to create the actual code.
descriptor is the descriptor for the interface the method is associated with
name is the name of the method as a string
returnType is the IDLType of the return value
args is a list of Argument objects
inline should be True to generate an inline method, whose body is
part of the declaration.
alwaysInline should be True to generate an inline method annotated with
MOZ_ALWAYS_INLINE.
If templateArgs is not None it should be a list of strings containing
template arguments, and the function will be templatized using those
arguments.
docs is None or documentation for the method in a string.
unsafe is used to add the decorator 'unsafe' to a function, giving as a result
an 'unsafe fn()' declaration.
"""
def __init__(self, descriptor, name, returnType, args, inline=False,
alwaysInline=False, extern=False, unsafe=False, pub=False,
templateArgs=None, docs=None, doesNotPanic=False):
CGThing.__init__(self)
self.descriptor = descriptor
self.name = name
self.returnType = returnType
self.args = args
self.alwaysInline = alwaysInline
self.extern = extern
self.unsafe = extern or unsafe
self.templateArgs = templateArgs
self.pub = pub
self.docs = docs
self.catchPanic = self.extern and not doesNotPanic
def _argstring(self):
return ', '.join([a.declare() for a in self.args])
def _template(self):
if self.templateArgs is None:
return ''
return '<%s>\n' % ', '.join(self.templateArgs)
def _docs(self):
if self.docs is None:
return ''
lines = self.docs.splitlines()
return ''.join('/// %s\n' % line for line in lines)
def _decorators(self):
decorators = []
if self.alwaysInline:
decorators.append('#[inline]')
if self.pub:
decorators.append('pub')
if self.unsafe:
decorators.append('unsafe')
if self.extern:
decorators.append('extern')
if not decorators:
return ''
return ' '.join(decorators) + ' '
def _returnType(self):
return (" -> %s" % self.returnType) if self.returnType != "void" else ""
def define(self):
body = self.definition_body()
if self.catchPanic:
body = CGWrapper(CGIndenter(body),
pre="return wrap_panic(panic::AssertUnwindSafe(|| {\n",
post=("""\n}), %s);""" % ("()" if self.returnType == "void" else "false")))
return CGWrapper(CGIndenter(body),
pre=self.definition_prologue(),
post=self.definition_epilogue()).define()
def definition_prologue(self):
return "%s%sfn %s%s(%s)%s {\n" % (self._docs(), self._decorators(),
self.name, self._template(),
self._argstring(), self._returnType())
def definition_epilogue(self):
return "\n}\n"
def definition_body(self):
raise NotImplementedError # Override me!
class CGConstructorEnabled(CGAbstractMethod):
"""
A method for testing whether we should be exposing this interface object.
This can perform various tests depending on what conditions are specified
on the interface.
"""
def __init__(self, descriptor):
CGAbstractMethod.__init__(self, descriptor,
'ConstructorEnabled', 'bool',
[Argument("*mut JSContext", "aCx"),
Argument("HandleObject", "aObj")],
unsafe=True)
def definition_body(self):
conditions = []
iface = self.descriptor.interface
bits = " | ".join(sorted(
"InterfaceObjectMap::" + camel_to_upper_snake(i) for i in iface.exposureSet
))
conditions.append("is_exposed_in(aObj, %s)" % bits)
pref = iface.getExtendedAttribute("Pref")
if pref:
assert isinstance(pref, list) and len(pref) == 1
conditions.append('PREFS.get("%s").as_boolean().unwrap_or(false)' % pref[0])
func = iface.getExtendedAttribute("Func")
if func:
assert isinstance(func, list) and len(func) == 1
conditions.append("%s(aCx, aObj)" % func[0])
return CGList((CGGeneric(cond) for cond in conditions), " &&\n")
def CreateBindingJSObject(descriptor, parent=None):
assert not descriptor.isGlobal()
create = "let raw = Box::into_raw(object);\nlet _rt = RootedTraceable::new(&*raw);\n"
if descriptor.proxy:
create += """
let handler = RegisterBindings::PROXY_HANDLERS[PrototypeList::Proxies::%s as usize];
rooted!(in(cx) let private = PrivateValue(raw as *const libc::c_void));
let obj = NewProxyObject(cx, handler,
private.handle(),
proto.get(), %s.get(),
ptr::null_mut(), ptr::null_mut());
assert!(!obj.is_null());
rooted!(in(cx) let obj = obj);\
""" % (descriptor.name, parent)
else:
create += ("rooted!(in(cx) let obj = JS_NewObjectWithGivenProto(\n"
" cx, &Class.base as *const JSClass, proto.handle()));\n"
"assert!(!obj.is_null());\n"
"\n"
"JS_SetReservedSlot(obj.get(), DOM_OBJECT_SLOT,\n"
" PrivateValue(raw as *const libc::c_void));")
if descriptor.weakReferenceable:
create += """
JS_SetReservedSlot(obj.get(), DOM_WEAK_SLOT, PrivateValue(ptr::null()));"""
return create
def InitUnforgeablePropertiesOnHolder(descriptor, properties):
"""
Define the unforgeable properties on the unforgeable holder for
the interface represented by descriptor.
properties is a PropertyArrays instance.
"""
unforgeables = []
defineUnforgeableAttrs = "define_guarded_properties(cx, unforgeable_holder.handle(), %s);"
defineUnforgeableMethods = "define_guarded_methods(cx, unforgeable_holder.handle(), %s);"
unforgeableMembers = [
(defineUnforgeableAttrs, properties.unforgeable_attrs),
(defineUnforgeableMethods, properties.unforgeable_methods),
]
for template, array in unforgeableMembers:
if array.length() > 0:
unforgeables.append(CGGeneric(template % array.variableName()))
return CGList(unforgeables, "\n")
def CopyUnforgeablePropertiesToInstance(descriptor):
"""
Copy the unforgeable properties from the unforgeable holder for
this interface to the instance object we have.
"""
if not descriptor.hasUnforgeableMembers:
return ""
copyCode = ""
# For proxies, we want to define on the expando object, not directly on the
# reflector, so we can make sure we don't get confused by named getters.
if descriptor.proxy:
copyCode += """\
rooted!(in(cx) let mut expando = ptr::null_mut());
ensure_expando_object(cx, obj.handle(), expando.handle_mut());
"""
obj = "expando"
else:
obj = "obj"
# We can't do the fast copy for globals, because we can't allocate the
# unforgeable holder for those with the right JSClass. Luckily, there
# aren't too many globals being created.
if descriptor.isGlobal():
copyFunc = "JS_CopyPropertiesFrom"
else:
copyFunc = "JS_InitializePropertiesFromCompatibleNativeObject"
copyCode += """\
rooted!(in(cx) let mut unforgeable_holder = ptr::null_mut());
unforgeable_holder.handle_mut().set(
JS_GetReservedSlot(proto.get(), DOM_PROTO_UNFORGEABLE_HOLDER_SLOT).to_object());
assert!(%(copyFunc)s(cx, %(obj)s.handle(), unforgeable_holder.handle()));
""" % {'copyFunc': copyFunc, 'obj': obj}
return copyCode
class CGWrapMethod(CGAbstractMethod):
"""
Class that generates the FooBinding::Wrap function for non-callback
interfaces.
"""
def __init__(self, descriptor):
assert not descriptor.interface.isCallback()
assert not descriptor.isGlobal()
args = [Argument('*mut JSContext', 'cx'),
Argument('&GlobalScope', 'scope'),
Argument("Box<%s>" % descriptor.concreteType, 'object')]
retval = 'Root<%s>' % descriptor.concreteType
CGAbstractMethod.__init__(self, descriptor, 'Wrap', retval, args,
pub=True, unsafe=True)
def definition_body(self):
unforgeable = CopyUnforgeablePropertiesToInstance(self.descriptor)
create = CreateBindingJSObject(self.descriptor, "scope")
return CGGeneric("""\
let scope = scope.reflector().get_jsobject();
assert!(!scope.get().is_null());
assert!(((*get_object_class(scope.get())).flags & JSCLASS_IS_GLOBAL) != 0);
rooted!(in(cx) let mut proto = ptr::null_mut());
let _ac = JSAutoCompartment::new(cx, scope.get());
GetProtoObject(cx, scope, proto.handle_mut());
assert!(!proto.is_null());
%(createObject)s
%(copyUnforgeable)s
(*raw).init_reflector(obj.get());
Root::from_ref(&*raw)""" % {'copyUnforgeable': unforgeable, 'createObject': create})
class CGWrapGlobalMethod(CGAbstractMethod):
"""
Class that generates the FooBinding::Wrap function for global interfaces.
"""
def __init__(self, descriptor, properties):
assert not descriptor.interface.isCallback()
assert descriptor.isGlobal()
args = [Argument('*mut JSContext', 'cx'),
Argument("Box<%s>" % descriptor.concreteType, 'object')]
retval = 'Root<%s>' % descriptor.concreteType
CGAbstractMethod.__init__(self, descriptor, 'Wrap', retval, args,
pub=True, unsafe=True)
self.properties = properties
def definition_body(self):
values = {
"unforgeable": CopyUnforgeablePropertiesToInstance(self.descriptor)
}
pairs = [
("define_guarded_properties", self.properties.attrs),
("define_guarded_methods", self.properties.methods),
("define_guarded_constants", self.properties.consts)
]
members = ["%s(cx, obj.handle(), %s);" % (function, array.variableName())
for (function, array) in pairs if array.length() > 0]
values["members"] = "\n".join(members)
return CGGeneric("""\
let raw = Box::into_raw(object);
let _rt = RootedTraceable::new(&*raw);
rooted!(in(cx) let mut obj = ptr::null_mut());
create_global_object(
cx,
&Class.base,
raw as *const libc::c_void,
_trace,
obj.handle_mut());
assert!(!obj.is_null());
(*raw).init_reflector(obj.get());
let _ac = JSAutoCompartment::new(cx, obj.get());
rooted!(in(cx) let mut proto = ptr::null_mut());
GetProtoObject(cx, obj.handle(), proto.handle_mut());
assert!(JS_SplicePrototype(cx, obj.handle(), proto.handle()));
let mut immutable = false;
assert!(JS_SetImmutablePrototype(cx, obj.handle(), &mut immutable));
assert!(immutable);
%(members)s
%(unforgeable)s
Root::from_ref(&*raw)\
""" % values)
class CGIDLInterface(CGThing):
"""
Class for codegen of an implementation of the IDLInterface trait.
"""
def __init__(self, descriptor):
CGThing.__init__(self)
self.descriptor = descriptor
def define(self):
interface = self.descriptor.interface
name = self.descriptor.concreteType
if (interface.getUserData("hasConcreteDescendant", False) or
interface.getUserData("hasProxyDescendant", False)):
depth = self.descriptor.prototypeDepth
check = "class.interface_chain[%s] == PrototypeList::ID::%s" % (depth, name)
elif self.descriptor.proxy:
check = "class as *const _ == &Class as *const _"
else:
check = "class as *const _ == &Class.dom_class as *const _"
return """\
impl IDLInterface for %(name)s {
#[inline]
fn derives(class: &'static DOMClass) -> bool {
%(check)s
}
}
impl PartialEq for %(name)s {
fn eq(&self, other: &%(name)s) -> bool {
self as *const %(name)s == &*other
}
}
""" % {'check': check, 'name': name}
class CGAbstractExternMethod(CGAbstractMethod):
"""
Abstract base class for codegen of implementation-only (no
declaration) static methods.
"""
def __init__(self, descriptor, name, returnType, args, doesNotPanic=False):
CGAbstractMethod.__init__(self, descriptor, name, returnType, args,
inline=False, extern=True, doesNotPanic=doesNotPanic)
class PropertyArrays():
def __init__(self, descriptor):
self.static_methods = MethodDefiner(descriptor, "StaticMethods",
static=True, unforgeable=False)
self.static_attrs = AttrDefiner(descriptor, "StaticAttributes",
static=True, unforgeable=False)
self.methods = MethodDefiner(descriptor, "Methods", static=False, unforgeable=False)
self.unforgeable_methods = MethodDefiner(descriptor, "UnforgeableMethods",
static=False, unforgeable=True)
self.attrs = AttrDefiner(descriptor, "Attributes", static=False, unforgeable=False)
self.unforgeable_attrs = AttrDefiner(descriptor, "UnforgeableAttributes",
static=False, unforgeable=True)
self.consts = ConstDefiner(descriptor, "Constants")
pass
@staticmethod
def arrayNames():
return [
"static_methods",
"static_attrs",
"methods",
"unforgeable_methods",
"attrs",
"unforgeable_attrs",
"consts",
]
def variableNames(self):
names = {}
for array in self.arrayNames():
names[array] = getattr(self, array).variableName()
return names
def __str__(self):
define = ""
for array in self.arrayNames():
define += str(getattr(self, array))
return define
class CGCreateInterfaceObjectsMethod(CGAbstractMethod):
"""
Generate the CreateInterfaceObjects method for an interface descriptor.
properties should be a PropertyArrays instance.
"""
def __init__(self, descriptor, properties, haveUnscopables):
args = [Argument('*mut JSContext', 'cx'), Argument('HandleObject', 'global'),
Argument('*mut ProtoOrIfaceArray', 'cache')]
CGAbstractMethod.__init__(self, descriptor, 'CreateInterfaceObjects', 'void', args,
unsafe=True)
self.properties = properties
self.haveUnscopables = haveUnscopables
def definition_body(self):
name = self.descriptor.interface.identifier.name
if self.descriptor.interface.isNamespace():
if self.descriptor.interface.getExtendedAttribute("ProtoObjectHack"):
proto = "JS_GetObjectPrototype(cx, global)"
else:
proto = "JS_NewPlainObject(cx)"
if self.properties.static_methods.length():
methods = self.properties.static_methods.variableName()
else:
methods = "&[]"
return CGGeneric("""\
rooted!(in(cx) let proto = %(proto)s);
assert!(!proto.is_null());
rooted!(in(cx) let mut namespace = ptr::null_mut());
create_namespace_object(cx, global, proto.handle(), &NAMESPACE_OBJECT_CLASS,
%(methods)s, %(name)s, namespace.handle_mut());
assert!(!namespace.is_null());
assert!((*cache)[PrototypeList::Constructor::%(id)s as usize].is_null());
(*cache)[PrototypeList::Constructor::%(id)s as usize] = namespace.get();
<*mut JSObject>::post_barrier((*cache).as_mut_ptr().offset(PrototypeList::Constructor::%(id)s as isize),
ptr::null_mut(),
namespace.get());
""" % {"id": MakeNativeName(name), "methods": methods, "name": str_to_const_array(name), "proto": proto})
if self.descriptor.interface.isCallback():
assert not self.descriptor.interface.ctor() and self.descriptor.interface.hasConstants()
return CGGeneric("""\
rooted!(in(cx) let mut interface = ptr::null_mut());
create_callback_interface_object(cx, global, sConstants, %(name)s, interface.handle_mut());
assert!(!interface.is_null());
assert!((*cache)[PrototypeList::Constructor::%(id)s as usize].is_null());
(*cache)[PrototypeList::Constructor::%(id)s as usize] = interface.get();
<*mut JSObject>::post_barrier((*cache).as_mut_ptr().offset(PrototypeList::Constructor::%(id)s as isize),
ptr::null_mut(),
interface.get());
""" % {"id": name, "name": str_to_const_array(name)})
parentName = self.descriptor.getParentName()
if not parentName:
if self.descriptor.interface.getExtendedAttribute("ExceptionClass"):
getPrototypeProto = "prototype_proto.set(JS_GetErrorPrototype(cx))"
elif self.descriptor.interface.isIteratorInterface():
getPrototypeProto = "prototype_proto.set(JS_GetIteratorPrototype(cx))"
else:
getPrototypeProto = "prototype_proto.set(JS_GetObjectPrototype(cx, global))"
else:
getPrototypeProto = ("%s::GetProtoObject(cx, global, prototype_proto.handle_mut())" %
toBindingNamespace(parentName))
code = [CGGeneric("""\
rooted!(in(cx) let mut prototype_proto = ptr::null_mut());
%s;
assert!(!prototype_proto.is_null());""" % getPrototypeProto)]
properties = {
"id": name,
"unscopables": "unscopable_names" if self.haveUnscopables else "&[]"
}
for arrayName in self.properties.arrayNames():
array = getattr(self.properties, arrayName)
if array.length():
properties[arrayName] = array.variableName()
else:
properties[arrayName] = "&[]"
if self.descriptor.isGlobal():
assert not self.haveUnscopables
proto_properties = {
"attrs": "&[]",
"consts": "&[]",
"id": name,
"methods": "&[]",
"unscopables": "&[]",
}
else:
proto_properties = properties
code.append(CGGeneric("""
rooted!(in(cx) let mut prototype = ptr::null_mut());
create_interface_prototype_object(cx,
prototype_proto.handle(),
&PrototypeClass,
%(methods)s,
%(attrs)s,
%(consts)s,
%(unscopables)s,
prototype.handle_mut());
assert!(!prototype.is_null());
assert!((*cache)[PrototypeList::ID::%(id)s as usize].is_null());
(*cache)[PrototypeList::ID::%(id)s as usize] = prototype.get();
<*mut JSObject>::post_barrier((*cache).as_mut_ptr().offset(PrototypeList::ID::%(id)s as isize),
ptr::null_mut(),
prototype.get());
""" % proto_properties))
if self.descriptor.interface.hasInterfaceObject():
properties["name"] = str_to_const_array(name)
if self.descriptor.interface.ctor():
properties["length"] = methodLength(self.descriptor.interface.ctor())
else:
properties["length"] = 0
parentName = self.descriptor.getParentName()
if parentName:
parentName = toBindingNamespace(parentName)
code.append(CGGeneric("""
rooted!(in(cx) let mut interface_proto = ptr::null_mut());
%s::GetConstructorObject(cx, global, interface_proto.handle_mut());""" % parentName))
else:
code.append(CGGeneric("""
rooted!(in(cx) let interface_proto = JS_GetFunctionPrototype(cx, global));"""))
code.append(CGGeneric("""\
assert!(!interface_proto.is_null());
rooted!(in(cx) let mut interface = ptr::null_mut());
create_noncallback_interface_object(cx,
global,
interface_proto.handle(),
&INTERFACE_OBJECT_CLASS,
%(static_methods)s,
%(static_attrs)s,
%(consts)s,
prototype.handle(),
%(name)s,
%(length)s,
interface.handle_mut());
assert!(!interface.is_null());""" % properties))
if self.descriptor.hasDescendants():
code.append(CGGeneric("""\
assert!((*cache)[PrototypeList::Constructor::%(id)s as usize].is_null());
(*cache)[PrototypeList::Constructor::%(id)s as usize] = interface.get();
<*mut JSObject>::post_barrier((*cache).as_mut_ptr().offset(PrototypeList::Constructor::%(id)s as isize),
ptr::null_mut(),
interface.get());
""" % properties))
aliasedMembers = [m for m in self.descriptor.interface.members if m.isMethod() and m.aliases]
if aliasedMembers:
def defineAlias(alias):
if alias == "@@iterator":
symbolJSID = "RUST_SYMBOL_TO_JSID(GetWellKnownSymbol(cx, SymbolCode::iterator))"
getSymbolJSID = CGGeneric(fill("rooted!(in(cx) let iteratorId = ${symbolJSID});",
symbolJSID=symbolJSID))
defineFn = "JS_DefinePropertyById2"
prop = "iteratorId.handle()"
elif alias.startswith("@@"):
raise TypeError("Can't handle any well-known Symbol other than @@iterator")
else:
getSymbolJSID = None
defineFn = "JS_DefineProperty"
prop = '"%s"' % alias
return CGList([
getSymbolJSID,
# XXX If we ever create non-enumerable properties that can
# be aliased, we should consider making the aliases
# match the enumerability of the property being aliased.
CGGeneric(fill(
"""
assert!(${defineFn}(cx, prototype.handle(), ${prop}, aliasedVal.handle(),
JSPROP_ENUMERATE, None, None));
""",
defineFn=defineFn,
prop=prop))
], "\n")
def defineAliasesFor(m):
return CGList([
CGGeneric(fill(
"""
assert!(JS_GetProperty(cx, prototype.handle(),
${prop} as *const u8 as *const _,
aliasedVal.handle_mut()));
""",
prop=str_to_const_array(m.identifier.name)))
] + [defineAlias(alias) for alias in sorted(m.aliases)])
defineAliases = CGList([
CGGeneric(fill("""
// Set up aliases on the interface prototype object we just created.
""")),
CGGeneric("rooted!(in(cx) let mut aliasedVal = UndefinedValue());\n\n")
] + [defineAliasesFor(m) for m in sorted(aliasedMembers)])
code.append(defineAliases)
constructors = self.descriptor.interface.namedConstructors
if constructors:
decl = "let named_constructors: [(ConstructorClassHook, &'static [u8], u32); %d]" % len(constructors)
specs = []
for constructor in constructors:
hook = CONSTRUCT_HOOK_NAME + "_" + constructor.identifier.name
name = str_to_const_array(constructor.identifier.name)
length = methodLength(constructor)
specs.append(CGGeneric("(%s as ConstructorClassHook, %s, %d)" % (hook, name, length)))
values = CGIndenter(CGList(specs, "\n"), 4)
code.append(CGWrapper(values, pre="%s = [\n" % decl, post="\n];"))
code.append(CGGeneric("create_named_constructors(cx, global, &named_constructors, prototype.handle());"))
if self.descriptor.hasUnforgeableMembers:
# We want to use the same JSClass and prototype as the object we'll
# end up defining the unforgeable properties on in the end, so that
# we can use JS_InitializePropertiesFromCompatibleNativeObject to do
# a fast copy. In the case of proxies that's null, because the
# expando object is a vanilla object, but in the case of other DOM
# objects it's whatever our class is.
#
# Also, for a global we can't use the global's class; just use
# nullpr and when we do the copy off the holder we'll take a slower
# path. This also means that we don't need to worry about matching
# the prototype.
if self.descriptor.proxy or self.descriptor.isGlobal():
holderClass = "ptr::null()"
holderProto = "HandleObject::null()"
else:
holderClass = "&Class.base as *const JSClass"
holderProto = "prototype.handle()"
code.append(CGGeneric("""
rooted!(in(cx) let mut unforgeable_holder = ptr::null_mut());
unforgeable_holder.handle_mut().set(
JS_NewObjectWithoutMetadata(cx, %(holderClass)s, %(holderProto)s));
assert!(!unforgeable_holder.is_null());
""" % {'holderClass': holderClass, 'holderProto': holderProto}))
code.append(InitUnforgeablePropertiesOnHolder(self.descriptor, self.properties))
code.append(CGGeneric("""\
JS_SetReservedSlot(prototype.get(), DOM_PROTO_UNFORGEABLE_HOLDER_SLOT,
ObjectValue(unforgeable_holder.get()))"""))
return CGList(code, "\n")
class CGGetPerInterfaceObject(CGAbstractMethod):
"""
A method for getting a per-interface object (a prototype object or interface
constructor object).
"""
def __init__(self, descriptor, name, idPrefix="", pub=False):
args = [Argument('*mut JSContext', 'cx'),
Argument('HandleObject', 'global'),
Argument('MutableHandleObject', 'rval')]
CGAbstractMethod.__init__(self, descriptor, name,
'void', args, pub=pub, unsafe=True)
self.id = idPrefix + "::" + MakeNativeName(self.descriptor.name)
def definition_body(self):
return CGGeneric("""
assert!(((*get_object_class(global.get())).flags & JSCLASS_DOM_GLOBAL) != 0);
/* Check to see whether the interface objects are already installed */
let proto_or_iface_array = get_proto_or_iface_array(global.get());
rval.set((*proto_or_iface_array)[%(id)s as usize]);
if !rval.get().is_null() {
return;
}
CreateInterfaceObjects(cx, global, proto_or_iface_array);
rval.set((*proto_or_iface_array)[%(id)s as usize]);
assert!(!rval.get().is_null());
""" % {"id": self.id})
class CGGetProtoObjectMethod(CGGetPerInterfaceObject):
"""
A method for getting the interface prototype object.
"""
def __init__(self, descriptor):
CGGetPerInterfaceObject.__init__(self, descriptor, "GetProtoObject",
"PrototypeList::ID", pub=True)
def definition_body(self):
return CGList([
CGGeneric("""\
/* Get the interface prototype object for this class. This will create the
object as needed. */"""),
CGGetPerInterfaceObject.definition_body(self),
])
class CGGetConstructorObjectMethod(CGGetPerInterfaceObject):
"""
A method for getting the interface constructor object.
"""
def __init__(self, descriptor):
CGGetPerInterfaceObject.__init__(self, descriptor, "GetConstructorObject",
"PrototypeList::Constructor",
pub=True)
def definition_body(self):
return CGList([
CGGeneric("""\
/* Get the interface object for this class. This will create the object as
needed. */"""),
CGGetPerInterfaceObject.definition_body(self),
])
class CGDefineProxyHandler(CGAbstractMethod):
"""
A method to create and cache the proxy trap for a given interface.
"""
def __init__(self, descriptor):
assert descriptor.proxy
CGAbstractMethod.__init__(self, descriptor, 'DefineProxyHandler',
'*const libc::c_void', [],
pub=True, unsafe=True)
def define(self):
return CGAbstractMethod.define(self)
def definition_body(self):
customDefineProperty = 'proxyhandler::define_property'
if self.descriptor.operations['IndexedSetter'] or self.descriptor.operations['NamedSetter']:
customDefineProperty = 'defineProperty'
customDelete = 'proxyhandler::delete'
if self.descriptor.operations['NamedDeleter']:
customDelete = 'delete'
getOwnEnumerablePropertyKeys = "own_property_keys"
if self.descriptor.interface.getExtendedAttribute("LegacyUnenumerableNamedProperties"):
getOwnEnumerablePropertyKeys = "getOwnEnumerablePropertyKeys"
args = {
"defineProperty": customDefineProperty,
"delete": customDelete,
"getOwnEnumerablePropertyKeys": getOwnEnumerablePropertyKeys,
"trace": TRACE_HOOK_NAME,
"finalize": FINALIZE_HOOK_NAME,
}
return CGGeneric("""\
let traps = ProxyTraps {
enter: None,
getOwnPropertyDescriptor: Some(getOwnPropertyDescriptor),
defineProperty: Some(%(defineProperty)s),
ownPropertyKeys: Some(own_property_keys),
delete_: Some(%(delete)s),
enumerate: None,
getPrototypeIfOrdinary: Some(proxyhandler::get_prototype_if_ordinary),
preventExtensions: Some(proxyhandler::prevent_extensions),
isExtensible: Some(proxyhandler::is_extensible),
has: None,
get: Some(get),
set: None,
call: None,
construct: None,
getPropertyDescriptor: Some(get_property_descriptor),
hasOwn: Some(hasOwn),
getOwnEnumerablePropertyKeys: Some(%(getOwnEnumerablePropertyKeys)s),
nativeCall: None,
hasInstance: None,
objectClassIs: None,
className: Some(className),
fun_toString: None,
boxedValue_unbox: None,
defaultValue: None,
trace: Some(%(trace)s),
finalize: Some(%(finalize)s),
objectMoved: None,
isCallable: None,
isConstructor: None,
};
CreateProxyHandler(&traps, Class.as_void_ptr())\
""" % args)
class CGDefineDOMInterfaceMethod(CGAbstractMethod):
"""
A method for resolve hooks to try to lazily define the interface object for
a given interface.
"""
def __init__(self, descriptor):
assert descriptor.interface.hasInterfaceObject()
args = [
Argument('*mut JSContext', 'cx'),
Argument('HandleObject', 'global'),
]
CGAbstractMethod.__init__(self, descriptor, 'DefineDOMInterface',
'void', args, pub=True, unsafe=True)
def define(self):
return CGAbstractMethod.define(self)
def definition_body(self):
if self.descriptor.interface.isCallback() or self.descriptor.interface.isNamespace():
function = "GetConstructorObject"
else:
function = "GetProtoObject"
return CGGeneric("""\
assert!(!global.get().is_null());
if !ConstructorEnabled(cx, global) {
return;
}
rooted!(in(cx) let mut proto = ptr::null_mut());
%s(cx, global, proto.handle_mut());
assert!(!proto.is_null());""" % (function,))
def needCx(returnType, arguments, considerTypes):
return (considerTypes and
(typeNeedsCx(returnType, True) or
any(typeNeedsCx(a.type) for a in arguments)))
class CGCallGenerator(CGThing):
"""
A class to generate an actual call to a C++ object. Assumes that the C++
object is stored in a variable whose name is given by the |object| argument.
errorResult should be a string for the value to return in case of an
exception from the native code, or None if no error reporting is needed.
"""
def __init__(self, errorResult, arguments, argsPre, returnType,
extendedAttributes, descriptor, nativeMethodName,
static, object="this"):
CGThing.__init__(self)
assert errorResult is None or isinstance(errorResult, str)
isFallible = errorResult is not None
result = getRetvalDeclarationForType(returnType, descriptor)
if isFallible:
result = CGWrapper(result, pre="Result<", post=", Error>")
args = CGList([CGGeneric(arg) for arg in argsPre], ", ")
for (a, name) in arguments:
# XXXjdm Perhaps we should pass all nontrivial types by borrowed pointer
if a.type.isDictionary() and not type_needs_tracing(a.type):
name = "&" + name
args.append(CGGeneric(name))
needsCx = needCx(returnType, (a for (a, _) in arguments), True)
if "cx" not in argsPre and needsCx:
args.prepend(CGGeneric("cx"))
# Build up our actual call
self.cgRoot = CGList([], "\n")
call = CGGeneric(nativeMethodName)
if static:
call = CGWrapper(call, pre="%s::" % MakeNativeName(descriptor.interface.identifier.name))
else:
call = CGWrapper(call, pre="%s." % object)
call = CGList([call, CGWrapper(args, pre="(", post=")")])
self.cgRoot.append(CGList([
CGGeneric("let result: "),
result,
CGGeneric(" = "),
call,
CGGeneric(";"),
]))
if isFallible:
if static:
glob = "global.upcast::<GlobalScope>()"
else:
glob = "&this.global()"
self.cgRoot.append(CGGeneric(
"let result = match result {\n"
" Ok(result) => result,\n"
" Err(e) => {\n"
" throw_dom_exception(cx, %s, e);\n"
" return%s;\n"
" },\n"
"};" % (glob, errorResult)))
def define(self):
return self.cgRoot.define()
class CGPerSignatureCall(CGThing):
"""
This class handles the guts of generating code for a particular
call signature. A call signature consists of four things:
1) A return type, which can be None to indicate that there is no
actual return value (e.g. this is an attribute setter) or an
IDLType if there's an IDL type involved (including |void|).
2) An argument list, which is allowed to be empty.
3) A name of a native method to call.
4) Whether or not this method is static.
We also need to know whether this is a method or a getter/setter
to do error reporting correctly.
The idlNode parameter can be either a method or an attr. We can query
|idlNode.identifier| in both cases, so we can be agnostic between the two.
"""
# XXXbz For now each entry in the argument list is either an
# IDLArgument or a FakeArgument, but longer-term we may want to
# have ways of flagging things like JSContext* or optional_argc in
# there.
def __init__(self, returnType, argsPre, arguments, nativeMethodName, static,
descriptor, idlNode, argConversionStartsAt=0,
getter=False, setter=False):
CGThing.__init__(self)
self.returnType = returnType
self.descriptor = descriptor
self.idlNode = idlNode
self.extendedAttributes = descriptor.getExtendedAttributes(idlNode,
getter=getter,
setter=setter)
self.argsPre = argsPre
self.arguments = arguments
self.argCount = len(arguments)
cgThings = []
cgThings.extend([CGArgumentConverter(arguments[i], i, self.getArgs(),
self.getArgc(), self.descriptor,
invalidEnumValueFatal=not setter) for
i in range(argConversionStartsAt, self.argCount)])
errorResult = None
if self.isFallible():
errorResult = " false"
if idlNode.isMethod() and idlNode.isMaplikeOrSetlikeOrIterableMethod():
if idlNode.maplikeOrSetlikeOrIterable.isMaplike() or \
idlNode.maplikeOrSetlikeOrIterable.isSetlike():
raise TypeError('Maplike/Setlike methods are not supported yet')
else:
cgThings.append(CGIterableMethodGenerator(descriptor,
idlNode.maplikeOrSetlikeOrIterable,
idlNode.identifier.name))
else:
cgThings.append(CGCallGenerator(
errorResult,
self.getArguments(), self.argsPre, returnType,
self.extendedAttributes, descriptor, nativeMethodName,
static))
self.cgRoot = CGList(cgThings, "\n")
def getArgs(self):
return "args" if self.argCount > 0 else ""
def getArgc(self):
return "argc"
def getArguments(self):
return [(a, process_arg("arg" + str(i), a)) for (i, a) in enumerate(self.arguments)]
def isFallible(self):
return 'infallible' not in self.extendedAttributes
def wrap_return_value(self):
return wrapForType('args.rval()')
def define(self):
return (self.cgRoot.define() + "\n" + self.wrap_return_value())
class CGSwitch(CGList):
"""
A class to generate code for a switch statement.
Takes three constructor arguments: an expression, a list of cases,
and an optional default.
Each case is a CGCase. The default is a CGThing for the body of
the default case, if any.
"""
def __init__(self, expression, cases, default=None):
CGList.__init__(self, [CGIndenter(c) for c in cases], "\n")
self.prepend(CGWrapper(CGGeneric(expression),
pre="match ", post=" {"))
if default is not None:
self.append(
CGIndenter(
CGWrapper(
CGIndenter(default),
pre="_ => {\n",
post="\n}"
)
)
)
self.append(CGGeneric("}"))
class CGCase(CGList):
"""
A class to generate code for a case statement.
Takes three constructor arguments: an expression, a CGThing for
the body (allowed to be None if there is no body), and an optional
argument (defaulting to False) for whether to fall through.
"""
def __init__(self, expression, body, fallThrough=False):
CGList.__init__(self, [], "\n")
self.append(CGWrapper(CGGeneric(expression), post=" => {"))
bodyList = CGList([body], "\n")
if fallThrough:
raise TypeError("fall through required but unsupported")
# bodyList.append(CGGeneric('panic!("fall through unsupported"); /* Fall through */'))
self.append(CGIndenter(bodyList))
self.append(CGGeneric("}"))
class CGGetterCall(CGPerSignatureCall):
"""
A class to generate a native object getter call for a particular IDL
getter.
"""
def __init__(self, argsPre, returnType, nativeMethodName, descriptor, attr):
CGPerSignatureCall.__init__(self, returnType, argsPre, [],
nativeMethodName, attr.isStatic(), descriptor,
attr, getter=True)
class FakeArgument():
"""
A class that quacks like an IDLArgument. This is used to make
setters look like method calls or for special operations.
"""
def __init__(self, type, interfaceMember, allowTreatNonObjectAsNull=False):
self.type = type
self.optional = False
self.variadic = False
self.defaultValue = None
self._allowTreatNonObjectAsNull = allowTreatNonObjectAsNull
self.treatNullAs = interfaceMember.treatNullAs
self.enforceRange = False
self.clamp = False
def allowTreatNonCallableAsNull(self):
return self._allowTreatNonObjectAsNull
class CGSetterCall(CGPerSignatureCall):
"""
A class to generate a native object setter call for a particular IDL
setter.
"""
def __init__(self, argsPre, argType, nativeMethodName, descriptor, attr):
CGPerSignatureCall.__init__(self, None, argsPre,
[FakeArgument(argType, attr, allowTreatNonObjectAsNull=True)],
nativeMethodName, attr.isStatic(), descriptor, attr,
setter=True)
def wrap_return_value(self):
# We have no return value
return "\nreturn true;"
def getArgc(self):
return "1"
class CGAbstractStaticBindingMethod(CGAbstractMethod):
"""
Common class to generate the JSNatives for all our static methods, getters
and setters. This will generate the function declaration and unwrap the
global object. Subclasses are expected to override the generate_code
function to do the rest of the work. This function should return a
CGThing which is already properly indented.
"""
def __init__(self, descriptor, name):
args = [
Argument('*mut JSContext', 'cx'),
Argument('libc::c_uint', 'argc'),
Argument('*mut JSVal', 'vp'),
]
CGAbstractMethod.__init__(self, descriptor, name, "bool", args, extern=True)
self.exposureSet = descriptor.interface.exposureSet
def definition_body(self):
preamble = "let global = GlobalScope::from_object(JS_CALLEE(cx, vp).to_object());\n"
if len(self.exposureSet) == 1:
preamble += "let global = Root::downcast::<dom::types::%s>(global).unwrap();\n" % list(self.exposureSet)[0]
return CGList([CGGeneric(preamble), self.generate_code()])
def generate_code(self):
raise NotImplementedError # Override me!
class CGSpecializedMethod(CGAbstractExternMethod):
"""
A class for generating the C++ code for a specialized method that the JIT
can call with lower overhead.
"""
def __init__(self, descriptor, method):
self.method = method
name = method.identifier.name
args = [Argument('*mut JSContext', 'cx'), Argument('HandleObject', '_obj'),
Argument('*const %s' % descriptor.concreteType, 'this'),
Argument('*const JSJitMethodCallArgs', 'args')]
CGAbstractExternMethod.__init__(self, descriptor, name, 'bool', args)
def definition_body(self):
nativeName = CGSpecializedMethod.makeNativeName(self.descriptor,
self.method)
return CGWrapper(CGMethodCall([], nativeName, self.method.isStatic(),
self.descriptor, self.method),
pre="let this = &*this;\n"
"let args = &*args;\n"
"let argc = args._base.argc_;\n")
@staticmethod
def makeNativeName(descriptor, method):
name = method.identifier.name
nativeName = descriptor.binaryNameFor(name)
if nativeName == name:
nativeName = descriptor.internalNameFor(name)
return MakeNativeName(nativeName)
class CGStaticMethod(CGAbstractStaticBindingMethod):
"""
A class for generating the Rust code for an IDL static method.
"""
def __init__(self, descriptor, method):
self.method = method
name = method.identifier.name
CGAbstractStaticBindingMethod.__init__(self, descriptor, name)
def generate_code(self):
nativeName = CGSpecializedMethod.makeNativeName(self.descriptor,
self.method)
setupArgs = CGGeneric("let args = CallArgs::from_vp(vp, argc);\n")
call = CGMethodCall(["&global"], nativeName, True, self.descriptor, self.method)
return CGList([setupArgs, call])
class CGSpecializedGetter(CGAbstractExternMethod):
"""
A class for generating the code for a specialized attribute getter
that the JIT can call with lower overhead.
"""
def __init__(self, descriptor, attr):
self.attr = attr
name = 'get_' + descriptor.internalNameFor(attr.identifier.name)
args = [Argument('*mut JSContext', 'cx'),
Argument('HandleObject', '_obj'),
Argument('*const %s' % descriptor.concreteType, 'this'),
Argument('JSJitGetterCallArgs', 'args')]
CGAbstractExternMethod.__init__(self, descriptor, name, "bool", args)
def definition_body(self):
nativeName = CGSpecializedGetter.makeNativeName(self.descriptor,
self.attr)
return CGWrapper(CGGetterCall([], self.attr.type, nativeName,
self.descriptor, self.attr),
pre="let this = &*this;\n")
@staticmethod
def makeNativeName(descriptor, attr):
name = attr.identifier.name
nativeName = descriptor.binaryNameFor(name)
if nativeName == name:
nativeName = descriptor.internalNameFor(name)
nativeName = MakeNativeName(nativeName)
infallible = ('infallible' in
descriptor.getExtendedAttributes(attr, getter=True))
if attr.type.nullable() or not infallible:
return "Get" + nativeName
return nativeName
class CGStaticGetter(CGAbstractStaticBindingMethod):
"""
A class for generating the C++ code for an IDL static attribute getter.
"""
def __init__(self, descriptor, attr):
self.attr = attr
name = 'get_' + attr.identifier.name
CGAbstractStaticBindingMethod.__init__(self, descriptor, name)
def generate_code(self):
nativeName = CGSpecializedGetter.makeNativeName(self.descriptor,
self.attr)
setupArgs = CGGeneric("let args = CallArgs::from_vp(vp, argc);\n")
call = CGGetterCall(["&global"], self.attr.type, nativeName, self.descriptor,
self.attr)
return CGList([setupArgs, call])
class CGSpecializedSetter(CGAbstractExternMethod):
"""
A class for generating the code for a specialized attribute setter
that the JIT can call with lower overhead.
"""
def __init__(self, descriptor, attr):
self.attr = attr
name = 'set_' + descriptor.internalNameFor(attr.identifier.name)
args = [Argument('*mut JSContext', 'cx'),
Argument('HandleObject', 'obj'),
Argument('*const %s' % descriptor.concreteType, 'this'),
Argument('JSJitSetterCallArgs', 'args')]
CGAbstractExternMethod.__init__(self, descriptor, name, "bool", args)
def definition_body(self):
nativeName = CGSpecializedSetter.makeNativeName(self.descriptor,
self.attr)
return CGWrapper(CGSetterCall([], self.attr.type, nativeName,
self.descriptor, self.attr),
pre="let this = &*this;\n")
@staticmethod
def makeNativeName(descriptor, attr):
name = attr.identifier.name
nativeName = descriptor.binaryNameFor(name)
if nativeName == name:
nativeName = descriptor.internalNameFor(name)
return "Set" + MakeNativeName(nativeName)
class CGStaticSetter(CGAbstractStaticBindingMethod):
"""
A class for generating the C++ code for an IDL static attribute setter.
"""
def __init__(self, descriptor, attr):
self.attr = attr
name = 'set_' + attr.identifier.name
CGAbstractStaticBindingMethod.__init__(self, descriptor, name)
def generate_code(self):
nativeName = CGSpecializedSetter.makeNativeName(self.descriptor,
self.attr)
checkForArg = CGGeneric(
"let args = CallArgs::from_vp(vp, argc);\n"
"if argc == 0 {\n"
" throw_type_error(cx, \"Not enough arguments to %s setter.\");\n"
" return false;\n"
"}" % self.attr.identifier.name)
call = CGSetterCall(["&global"], self.attr.type, nativeName, self.descriptor,
self.attr)
return CGList([checkForArg, call])
class CGSpecializedForwardingSetter(CGSpecializedSetter):
"""
A class for generating the code for an IDL attribute forwarding setter.
"""
def __init__(self, descriptor, attr):
CGSpecializedSetter.__init__(self, descriptor, attr)
def definition_body(self):
attrName = self.attr.identifier.name
forwardToAttrName = self.attr.getExtendedAttribute("PutForwards")[0]
# JS_GetProperty and JS_SetProperty can only deal with ASCII
assert all(ord(c) < 128 for c in attrName)
assert all(ord(c) < 128 for c in forwardToAttrName)
return CGGeneric("""\
rooted!(in(cx) let mut v = UndefinedValue());
if !JS_GetProperty(cx, obj, %s as *const u8 as *const libc::c_char, v.handle_mut()) {
return false;
}
if !v.is_object() {
throw_type_error(cx, "Value.%s is not an object.");
return false;
}
rooted!(in(cx) let target_obj = v.to_object());
JS_SetProperty(cx, target_obj.handle(), %s as *const u8 as *const libc::c_char, args.get(0))
""" % (str_to_const_array(attrName), attrName, str_to_const_array(forwardToAttrName)))
class CGSpecializedReplaceableSetter(CGSpecializedSetter):
"""
A class for generating the code for an IDL replaceable attribute setter.
"""
def __init__(self, descriptor, attr):
CGSpecializedSetter.__init__(self, descriptor, attr)
def definition_body(self):
assert self.attr.readonly
name = str_to_const_array(self.attr.identifier.name)
# JS_DefineProperty can only deal with ASCII.
assert all(ord(c) < 128 for c in name)
return CGGeneric("""\
JS_DefineProperty(cx, obj, %s as *const u8 as *const libc::c_char,
args.get(0), JSPROP_ENUMERATE, None, None)""" % name)
class CGMemberJITInfo(CGThing):
"""
A class for generating the JITInfo for a property that points to
our specialized getter and setter.
"""
def __init__(self, descriptor, member):
self.member = member
self.descriptor = descriptor
def defineJitInfo(self, infoName, opName, opType, infallible, movable,
aliasSet, alwaysInSlot, lazilyInSlot, slotIndex,
returnTypes, args):
"""
aliasSet is a JSJitInfo_AliasSet value, without the "JSJitInfo_AliasSet::" bit.
args is None if we don't want to output argTypes for some
reason (e.g. we have overloads or we're not a method) and
otherwise an iterable of the arguments for this method.
"""
assert not movable or aliasSet != "AliasEverything" # Can't move write-aliasing things
assert not alwaysInSlot or movable # Things always in slots had better be movable
def jitInfoInitializer(isTypedMethod):
initializer = fill(
"""
JSJitInfo {
call: ${opName} as *const os::raw::c_void,
protoID: PrototypeList::ID::${name} as u16,
depth: ${depth},
_bitfield_1:
JSJitInfo::new_bitfield_1(
JSJitInfo_OpType::${opType} as u8,
JSJitInfo_AliasSet::${aliasSet} as u8,
JSValueType::${returnType} as u8,
${isInfallible},
${isMovable},
${isEliminatable},
${isAlwaysInSlot},
${isLazilyCachedInSlot},
${isTypedMethod},
${slotIndex} as u16,
)
}
""",
opName=opName,
name=self.descriptor.name,
depth=self.descriptor.interface.inheritanceDepth(),
opType=opType,
aliasSet=aliasSet,
returnType=reduce(CGMemberJITInfo.getSingleReturnType, returnTypes,
""),
isInfallible=toStringBool(infallible),
isMovable=toStringBool(movable),
# FIXME(nox): https://github.com/servo/servo/issues/10991
isEliminatable=toStringBool(False),
isAlwaysInSlot=toStringBool(alwaysInSlot),
isLazilyCachedInSlot=toStringBool(lazilyInSlot),
isTypedMethod=toStringBool(isTypedMethod),
slotIndex=slotIndex)
return initializer.rstrip()
if args is not None:
argTypes = "%s_argTypes" % infoName
args = [CGMemberJITInfo.getJSArgType(arg.type) for arg in args]
args.append("JSJitInfo_ArgType::ArgTypeListEnd as i32")
argTypesDecl = (
"const %s: [i32; %d] = [ %s ];\n" %
(argTypes, len(args), ", ".join(args)))
return fill(
"""
$*{argTypesDecl}
const ${infoName}: JSTypedMethodJitInfo = JSTypedMethodJitInfo {
base: ${jitInfo},
argTypes: &${argTypes} as *const _ as *const JSJitInfo_ArgType,
};
""",
argTypesDecl=argTypesDecl,
infoName=infoName,
jitInfo=indent(jitInfoInitializer(True)),
argTypes=argTypes)
return ("\n"
"const %s: JSJitInfo = %s;\n"
% (infoName, jitInfoInitializer(False)))
def define(self):
if self.member.isAttr():
internalMemberName = self.descriptor.internalNameFor(self.member.identifier.name)
getterinfo = ("%s_getterinfo" % internalMemberName)
getter = ("get_%s" % internalMemberName)
getterinfal = "infallible" in self.descriptor.getExtendedAttributes(self.member, getter=True)
movable = self.mayBeMovable() and getterinfal
aliasSet = self.aliasSet()
isAlwaysInSlot = self.member.getExtendedAttribute("StoreInSlot")
if self.member.slotIndices is not None:
assert isAlwaysInSlot or self.member.getExtendedAttribute("Cached")
isLazilyCachedInSlot = not isAlwaysInSlot
slotIndex = memberReservedSlot(self.member) # noqa:FIXME: memberReservedSlot is not defined
# We'll statically assert that this is not too big in
# CGUpdateMemberSlotsMethod, in the case when
# isAlwaysInSlot is true.
else:
isLazilyCachedInSlot = False
slotIndex = "0"
result = self.defineJitInfo(getterinfo, getter, "Getter",
getterinfal, movable, aliasSet,
isAlwaysInSlot, isLazilyCachedInSlot,
slotIndex,
[self.member.type], None)
if (not self.member.readonly or self.member.getExtendedAttribute("PutForwards")
or self.member.getExtendedAttribute("Replaceable")):
setterinfo = ("%s_setterinfo" % internalMemberName)
setter = ("set_%s" % internalMemberName)
# Setters are always fallible, since they have to do a typed unwrap.
result += self.defineJitInfo(setterinfo, setter, "Setter",
False, False, "AliasEverything",
False, False, "0",
[BuiltinTypes[IDLBuiltinType.Types.void]],
None)
return result
if self.member.isMethod():
methodinfo = ("%s_methodinfo" % self.member.identifier.name)
method = ("%s" % self.member.identifier.name)
# Methods are infallible if they are infallible, have no arguments
# to unwrap, and have a return type that's infallible to wrap up for
# return.
sigs = self.member.signatures()
if len(sigs) != 1:
# Don't handle overloading. If there's more than one signature,
# one of them must take arguments.
methodInfal = False
args = None
movable = False
else:
sig = sigs[0]
# For methods that affect nothing, it's OK to set movable to our
# notion of infallible on the C++ side, without considering
# argument conversions, since argument conversions that can
# reliably throw would be effectful anyway and the jit doesn't
# move effectful things.
hasInfallibleImpl = "infallible" in self.descriptor.getExtendedAttributes(self.member)
movable = self.mayBeMovable() and hasInfallibleImpl
# XXXbz can we move the smarts about fallibility due to arg
# conversions into the JIT, using our new args stuff?
if (len(sig[1]) != 0):
# We have arguments or our return-value boxing can fail
methodInfal = False
else:
methodInfal = hasInfallibleImpl
# For now, only bother to output args if we're side-effect-free.
if self.member.affects == "Nothing":
args = sig[1]
else:
args = None
aliasSet = self.aliasSet()
result = self.defineJitInfo(methodinfo, method, "Method",
methodInfal, movable, aliasSet,
False, False, "0",
[s[0] for s in sigs], args)
return result
raise TypeError("Illegal member type to CGPropertyJITInfo")
def mayBeMovable(self):
"""
Returns whether this attribute or method may be movable, just
based on Affects/DependsOn annotations.
"""
affects = self.member.affects
dependsOn = self.member.dependsOn
assert affects in IDLInterfaceMember.AffectsValues
assert dependsOn in IDLInterfaceMember.DependsOnValues
# Things that are DependsOn=DeviceState are not movable, because we
# don't want them coalesced with each other or loop-hoisted, since
# their return value can change even if nothing is going on from our
# point of view.
return (affects == "Nothing" and
(dependsOn != "Everything" and dependsOn != "DeviceState"))
def aliasSet(self):
"""Returns the alias set to store in the jitinfo. This may not be the
effective alias set the JIT uses, depending on whether we have enough
information about our args to allow the JIT to prove that effectful
argument conversions won't happen.
"""
dependsOn = self.member.dependsOn
assert dependsOn in IDLInterfaceMember.DependsOnValues
if dependsOn == "Nothing" or dependsOn == "DeviceState":
assert self.member.affects == "Nothing"
return "AliasNone"
if dependsOn == "DOMState":
assert self.member.affects == "Nothing"
return "AliasDOMSets"
return "AliasEverything"
@staticmethod
def getJSReturnTypeTag(t):
if t.nullable():
# Sometimes it might return null, sometimes not
return "JSVAL_TYPE_UNKNOWN"
if t.isVoid():
# No return, every time
return "JSVAL_TYPE_UNDEFINED"
if t.isSequence():
return "JSVAL_TYPE_OBJECT"
if t.isMozMap():
return "JSVAL_TYPE_OBJECT"
if t.isGeckoInterface():
return "JSVAL_TYPE_OBJECT"
if t.isString():
return "JSVAL_TYPE_STRING"
if t.isEnum():
return "JSVAL_TYPE_STRING"
if t.isCallback():
return "JSVAL_TYPE_OBJECT"
if t.isAny():
# The whole point is to return various stuff
return "JSVAL_TYPE_UNKNOWN"
if t.isObject():
return "JSVAL_TYPE_OBJECT"
if t.isSpiderMonkeyInterface():
return "JSVAL_TYPE_OBJECT"
if t.isUnion():
u = t.unroll()
if u.hasNullableType:
# Might be null or not
return "JSVAL_TYPE_UNKNOWN"
return reduce(CGMemberJITInfo.getSingleReturnType,
u.flatMemberTypes, "")
if t.isDictionary():
return "JSVAL_TYPE_OBJECT"
if t.isDate():
return "JSVAL_TYPE_OBJECT"
if not t.isPrimitive():
raise TypeError("No idea what type " + str(t) + " is.")
tag = t.tag()
if tag == IDLType.Tags.bool:
return "JSVAL_TYPE_BOOLEAN"
if tag in [IDLType.Tags.int8, IDLType.Tags.uint8,
IDLType.Tags.int16, IDLType.Tags.uint16,
IDLType.Tags.int32]:
return "JSVAL_TYPE_INT32"
if tag in [IDLType.Tags.int64, IDLType.Tags.uint64,
IDLType.Tags.unrestricted_float, IDLType.Tags.float,
IDLType.Tags.unrestricted_double, IDLType.Tags.double]:
# These all use JS_NumberValue, which can return int or double.
# But TI treats "double" as meaning "int or double", so we're
# good to return JSVAL_TYPE_DOUBLE here.
return "JSVAL_TYPE_DOUBLE"
if tag != IDLType.Tags.uint32:
raise TypeError("No idea what type " + str(t) + " is.")
# uint32 is sometimes int and sometimes double.
return "JSVAL_TYPE_DOUBLE"
@staticmethod
def getSingleReturnType(existingType, t):
type = CGMemberJITInfo.getJSReturnTypeTag(t)
if existingType == "":
# First element of the list; just return its type
return type
if type == existingType:
return existingType
if ((type == "JSVAL_TYPE_DOUBLE" and
existingType == "JSVAL_TYPE_INT32") or
(existingType == "JSVAL_TYPE_DOUBLE" and
type == "JSVAL_TYPE_INT32")):
# Promote INT32 to DOUBLE as needed
return "JSVAL_TYPE_DOUBLE"
# Different types
return "JSVAL_TYPE_UNKNOWN"
@staticmethod
def getJSArgType(t):
assert not t.isVoid()
if t.nullable():
# Sometimes it might return null, sometimes not
return "JSJitInfo_ArgType::Null as i32 | %s" % CGMemberJITInfo.getJSArgType(t.inner)
if t.isSequence():
return "JSJitInfo_ArgType::Object as i32"
if t.isGeckoInterface():
return "JSJitInfo_ArgType::Object as i32"
if t.isString():
return "JSJitInfo_ArgType::String as i32"
if t.isEnum():
return "JSJitInfo_ArgType::String as i32"
if t.isCallback():
return "JSJitInfo_ArgType::Object as i32"
if t.isAny():
# The whole point is to return various stuff
return "JSJitInfo_ArgType::Any as i32"
if t.isObject():
return "JSJitInfo_ArgType::Object as i32"
if t.isSpiderMonkeyInterface():
return "JSJitInfo_ArgType::Object as i32"
if t.isUnion():
u = t.unroll()
type = "JSJitInfo::Null as i32" if u.hasNullableType else ""
return reduce(CGMemberJITInfo.getSingleArgType,
u.flatMemberTypes, type)
if t.isDictionary():
return "JSJitInfo_ArgType::Object as i32"
if t.isDate():
return "JSJitInfo_ArgType::Object as i32"
if not t.isPrimitive():
raise TypeError("No idea what type " + str(t) + " is.")
tag = t.tag()
if tag == IDLType.Tags.bool:
return "JSJitInfo_ArgType::Boolean as i32"
if tag in [IDLType.Tags.int8, IDLType.Tags.uint8,
IDLType.Tags.int16, IDLType.Tags.uint16,
IDLType.Tags.int32]:
return "JSJitInfo_ArgType::Integer as i32"
if tag in [IDLType.Tags.int64, IDLType.Tags.uint64,
IDLType.Tags.unrestricted_float, IDLType.Tags.float,
IDLType.Tags.unrestricted_double, IDLType.Tags.double]:
# These all use JS_NumberValue, which can return int or double.
# But TI treats "double" as meaning "int or double", so we're
# good to return JSVAL_TYPE_DOUBLE here.
return "JSJitInfo_ArgType::Double as i32"
if tag != IDLType.Tags.uint32:
raise TypeError("No idea what type " + str(t) + " is.")
# uint32 is sometimes int and sometimes double.
return "JSJitInfo_ArgType::Double as i32"
@staticmethod
def getSingleArgType(existingType, t):
type = CGMemberJITInfo.getJSArgType(t)
if existingType == "":
# First element of the list; just return its type
return type
if type == existingType:
return existingType
return "%s | %s" % (existingType, type)
def getEnumValueName(value):
# Some enum values can be empty strings. Others might have weird
# characters in them. Deal with the former by returning "_empty",
# deal with possible name collisions from that by throwing if the
# enum value is actually "_empty", and throw on any value
# containing non-ASCII chars for now. Replace all chars other than
# [0-9A-Za-z_] with '_'.
if re.match("[^\x20-\x7E]", value):
raise SyntaxError('Enum value "' + value + '" contains non-ASCII characters')
if re.match("^[0-9]", value):
raise SyntaxError('Enum value "' + value + '" starts with a digit')
value = re.sub(r'[^0-9A-Za-z_]', '_', value)
if re.match("^_[A-Z]|__", value):
raise SyntaxError('Enum value "' + value + '" is reserved by the C++ spec')
if value == "_empty":
raise SyntaxError('"_empty" is not an IDL enum value we support yet')
if value == "":
return "_empty"
return MakeNativeName(value)
class CGEnum(CGThing):
def __init__(self, enum):
CGThing.__init__(self)
ident = enum.identifier.name
decl = """\
#[repr(usize)]
#[derive(JSTraceable, PartialEq, Copy, Clone, HeapSizeOf, Debug)]
pub enum %s {
%s
}
""" % (ident, ",\n ".join(map(getEnumValueName, enum.values())))
pairs = ",\n ".join(['("%s", super::%s::%s)' % (val, ident, getEnumValueName(val)) for val in enum.values()])
inner = """\
use dom::bindings::conversions::ToJSValConvertible;
use js::jsapi::{JSContext, MutableHandleValue};
use js::jsval::JSVal;
pub const pairs: &'static [(&'static str, super::%s)] = &[
%s,
];
impl super::%s {
pub fn as_str(&self) -> &'static str {
pairs[*self as usize].0
}
}
impl ToJSValConvertible for super::%s {
unsafe fn to_jsval(&self, cx: *mut JSContext, rval: MutableHandleValue) {
pairs[*self as usize].0.to_jsval(cx, rval);
}
}
""" % (ident, pairs, ident, ident)
self.cgRoot = CGList([
CGGeneric(decl),
CGNamespace.build([ident + "Values"],
CGIndenter(CGGeneric(inner)), public=True),
])
def define(self):
return self.cgRoot.define()
def convertConstIDLValueToRust(value):
tag = value.type.tag()
if tag in [IDLType.Tags.int8, IDLType.Tags.uint8,
IDLType.Tags.int16, IDLType.Tags.uint16,
IDLType.Tags.int32, IDLType.Tags.uint32,
IDLType.Tags.int64, IDLType.Tags.uint64,
IDLType.Tags.unrestricted_float, IDLType.Tags.float,
IDLType.Tags.unrestricted_double, IDLType.Tags.double]:
return str(value.value)
if tag == IDLType.Tags.bool:
return toStringBool(value.value)
raise TypeError("Const value of unhandled type: " + value.type)
class CGConstant(CGThing):
def __init__(self, constant):
CGThing.__init__(self)
self.constant = constant
def define(self):
name = self.constant.identifier.name
value = convertConstIDLValueToRust(self.constant.value)
return "pub const %s: %s = %s;\n" % (name, builtinNames[self.constant.value.type.tag()], value)
def getUnionTypeTemplateVars(type, descriptorProvider):
if type.isGeckoInterface():
name = type.inner.identifier.name
typeName = descriptorProvider.getDescriptor(name).returnType
elif type.isEnum():
name = type.inner.identifier.name
typeName = name
elif type.isDictionary():
name = type.name
typeName = name
elif type.isSequence() or type.isMozMap():
name = type.name
inner = getUnionTypeTemplateVars(innerContainerType(type), descriptorProvider)
typeName = wrapInNativeContainerType(type, CGGeneric(inner["typeName"])).define()
elif type.isByteString():
name = type.name
typeName = "ByteString"
elif type.isDOMString():
name = type.name
typeName = "DOMString"
elif type.isUSVString():
name = type.name
typeName = "USVString"
elif type.isPrimitive():
name = type.name
typeName = builtinNames[type.tag()]
elif type.isObject():
name = type.name
typeName = "Heap<*mut JSObject>"
else:
raise TypeError("Can't handle %s in unions yet" % type)
info = getJSToNativeConversionInfo(
type, descriptorProvider, failureCode="return Ok(None);",
exceptionCode='return Err(());',
isDefinitelyObject=True,
isMember="Union")
template = info.template
jsConversion = string.Template(template).substitute({
"val": "value",
})
jsConversion = CGWrapper(CGGeneric(jsConversion), pre="Ok(Some(", post="))")
return {
"name": name,
"typeName": typeName,
"jsConversion": jsConversion,
}
class CGUnionStruct(CGThing):
def __init__(self, type, descriptorProvider):
assert not type.nullable()
assert not type.hasNullableType
CGThing.__init__(self)
self.type = type
self.descriptorProvider = descriptorProvider
def define(self):
templateVars = map(lambda t: getUnionTypeTemplateVars(t, self.descriptorProvider),
self.type.flatMemberTypes)
enumValues = [
" %s(%s)," % (v["name"], v["typeName"]) for v in templateVars
]
enumConversions = [
" %s::%s(ref inner) => inner.to_jsval(cx, rval),"
% (self.type, v["name"]) for v in templateVars
]
return ("""\
#[derive(JSTraceable)]
pub enum %s {
%s
}
impl ToJSValConvertible for %s {
unsafe fn to_jsval(&self, cx: *mut JSContext, rval: MutableHandleValue) {
match *self {
%s
}
}
}
""") % (self.type, "\n".join(enumValues), self.type, "\n".join(enumConversions))
class CGUnionConversionStruct(CGThing):
def __init__(self, type, descriptorProvider):
assert not type.nullable()
assert not type.hasNullableType
CGThing.__init__(self)
self.type = type
self.descriptorProvider = descriptorProvider
def from_jsval(self):
memberTypes = self.type.flatMemberTypes
names = []
conversions = []
def get_name(memberType):
if self.type.isGeckoInterface():
return memberType.inner.identifier.name
return memberType.name
def get_match(name):
return (
"match %s::TryConvertTo%s(cx, value) {\n"
" Err(_) => return Err(()),\n"
" Ok(Some(value)) => return Ok(ConversionResult::Success(%s::%s(value))),\n"
" Ok(None) => (),\n"
"}\n") % (self.type, name, self.type, name)
interfaceMemberTypes = filter(lambda t: t.isNonCallbackInterface(), memberTypes)
if len(interfaceMemberTypes) > 0:
typeNames = [get_name(memberType) for memberType in interfaceMemberTypes]
interfaceObject = CGList(CGGeneric(get_match(typeName)) for typeName in typeNames)
names.extend(typeNames)
else:
interfaceObject = None
arrayObjectMemberTypes = filter(lambda t: t.isSequence(), memberTypes)
if len(arrayObjectMemberTypes) > 0:
assert len(arrayObjectMemberTypes) == 1
typeName = arrayObjectMemberTypes[0].name
arrayObject = CGGeneric(get_match(typeName))
names.append(typeName)
else:
arrayObject = None
dateObjectMemberTypes = filter(lambda t: t.isDate(), memberTypes)
if len(dateObjectMemberTypes) > 0:
assert len(dateObjectMemberTypes) == 1
raise TypeError("Can't handle dates in unions.")
else:
dateObject = None
callbackMemberTypes = filter(lambda t: t.isCallback() or t.isCallbackInterface(), memberTypes)
if len(callbackMemberTypes) > 0:
assert len(callbackMemberTypes) == 1
raise TypeError("Can't handle callbacks in unions.")
else:
callbackObject = None
dictionaryMemberTypes = filter(lambda t: t.isDictionary(), memberTypes)
if len(dictionaryMemberTypes) > 0:
assert len(dictionaryMemberTypes) == 1
typeName = dictionaryMemberTypes[0].name
dictionaryObject = CGGeneric(get_match(typeName))
names.append(typeName)
else:
dictionaryObject = None
objectMemberTypes = filter(lambda t: t.isObject(), memberTypes)
if len(objectMemberTypes) > 0:
assert len(objectMemberTypes) == 1
typeName = objectMemberTypes[0].name
object = CGGeneric(get_match(typeName))
names.append(typeName)
else:
object = None
mozMapMemberTypes = filter(lambda t: t.isMozMap(), memberTypes)
if len(mozMapMemberTypes) > 0:
assert len(mozMapMemberTypes) == 1
typeName = mozMapMemberTypes[0].name
mozMapObject = CGGeneric(get_match(typeName))
names.append(typeName)
else:
mozMapObject = None
hasObjectTypes = interfaceObject or arrayObject or dateObject or object or mozMapObject
if hasObjectTypes:
# "object" is not distinguishable from other types
assert not object or not (interfaceObject or arrayObject or dateObject or callbackObject or mozMapObject)
templateBody = CGList([], "\n")
if interfaceObject:
templateBody.append(interfaceObject)
if arrayObject:
templateBody.append(arrayObject)
if mozMapObject:
templateBody.append(mozMapObject)
conversions.append(CGIfWrapper("value.get().is_object()", templateBody))
if dictionaryObject:
assert not hasObjectTypes
conversions.append(dictionaryObject)
stringTypes = [t for t in memberTypes if t.isString() or t.isEnum()]
numericTypes = [t for t in memberTypes if t.isNumeric()]
booleanTypes = [t for t in memberTypes if t.isBoolean()]
if stringTypes or numericTypes or booleanTypes:
assert len(stringTypes) <= 1
assert len(numericTypes) <= 1
assert len(booleanTypes) <= 1
def getStringOrPrimitiveConversion(memberType):
typename = get_name(memberType)
return CGGeneric(get_match(typename))
other = []
stringConversion = map(getStringOrPrimitiveConversion, stringTypes)
numericConversion = map(getStringOrPrimitiveConversion, numericTypes)
booleanConversion = map(getStringOrPrimitiveConversion, booleanTypes)
if stringConversion:
if booleanConversion:
other.append(CGIfWrapper("value.get().is_boolean()", booleanConversion[0]))
if numericConversion:
other.append(CGIfWrapper("value.get().is_number()", numericConversion[0]))
other.append(stringConversion[0])
elif numericConversion:
if booleanConversion:
other.append(CGIfWrapper("value.get().is_boolean()", booleanConversion[0]))
other.append(numericConversion[0])
else:
assert booleanConversion
other.append(booleanConversion[0])
conversions.append(CGList(other, "\n\n"))
conversions.append(CGGeneric(
"throw_not_in_union(cx, \"%s\");\n"
"Err(())" % ", ".join(names)))
method = CGWrapper(
CGIndenter(CGList(conversions, "\n\n")),
pre="unsafe fn from_jsval(cx: *mut JSContext,\n"
" value: HandleValue,\n"
" _option: ())\n"
" -> Result<ConversionResult<%s>, ()> {\n" % self.type,
post="\n}")
return CGWrapper(
CGIndenter(CGList([
CGGeneric("type Config = ();"),
method,
], "\n")),
pre="impl FromJSValConvertible for %s {\n" % self.type,
post="\n}")
def try_method(self, t):
templateVars = getUnionTypeTemplateVars(t, self.descriptorProvider)
returnType = "Result<Option<%s>, ()>" % templateVars["typeName"]
jsConversion = templateVars["jsConversion"]
return CGWrapper(
CGIndenter(jsConversion, 4),
# TryConvertToObject is unused, but not generating it while generating others is tricky.
pre="#[allow(dead_code)] unsafe fn TryConvertTo%s(cx: *mut JSContext, value: HandleValue) -> %s {\n"
% (t.name, returnType),
post="\n}")
def define(self):
from_jsval = self.from_jsval()
methods = CGIndenter(CGList([
self.try_method(t) for t in self.type.flatMemberTypes
], "\n\n"))
return """
%s
impl %s {
%s
}
""" % (from_jsval.define(), self.type, methods.define())
class ClassItem:
""" Use with CGClass """
def __init__(self, name, visibility):
self.name = name
self.visibility = visibility
def declare(self, cgClass):
assert False
def define(self, cgClass):
assert False
class ClassBase(ClassItem):
def __init__(self, name, visibility='pub'):
ClassItem.__init__(self, name, visibility)
def declare(self, cgClass):
return '%s %s' % (self.visibility, self.name)
def define(self, cgClass):
# Only in the header
return ''
class ClassMethod(ClassItem):
def __init__(self, name, returnType, args, inline=False, static=False,
virtual=False, const=False, bodyInHeader=False,
templateArgs=None, visibility='public', body=None,
breakAfterReturnDecl="\n",
breakAfterSelf="\n", override=False):
"""
override indicates whether to flag the method as MOZ_OVERRIDE
"""
assert not override or virtual
assert not (override and static)
self.returnType = returnType
self.args = args
self.inline = False
self.static = static
self.virtual = virtual
self.const = const
self.bodyInHeader = True
self.templateArgs = templateArgs
self.body = body
self.breakAfterReturnDecl = breakAfterReturnDecl
self.breakAfterSelf = breakAfterSelf
self.override = override
ClassItem.__init__(self, name, visibility)
def getDecorators(self, declaring):
decorators = []
if self.inline:
decorators.append('inline')
if declaring:
if self.static:
decorators.append('static')
if self.virtual:
decorators.append('virtual')
if decorators:
return ' '.join(decorators) + ' '
return ''
def getBody(self):
# Override me or pass a string to constructor
assert self.body is not None
return self.body
def declare(self, cgClass):
templateClause = '<%s>' % ', '.join(self.templateArgs) \
if self.bodyInHeader and self.templateArgs else ''
args = ', '.join([a.declare() for a in self.args])
if self.bodyInHeader:
body = CGIndenter(CGGeneric(self.getBody())).define()
body = ' {\n' + body + '\n}'
else:
body = ';'
return string.Template(
"${decorators}%s"
"${visibility}fn ${name}${templateClause}(${args})${returnType}${const}${override}${body}%s" %
(self.breakAfterReturnDecl, self.breakAfterSelf)
).substitute({
'templateClause': templateClause,
'decorators': self.getDecorators(True),
'returnType': (" -> %s" % self.returnType) if self.returnType else "",
'name': self.name,
'const': ' const' if self.const else '',
'override': ' MOZ_OVERRIDE' if self.override else '',
'args': args,
'body': body,
'visibility': self.visibility + ' ' if self.visibility != 'priv' else ''
})
def define(self, cgClass):
pass
class ClassConstructor(ClassItem):
"""
Used for adding a constructor to a CGClass.
args is a list of Argument objects that are the arguments taken by the
constructor.
inline should be True if the constructor should be marked inline.
bodyInHeader should be True if the body should be placed in the class
declaration in the header.
visibility determines the visibility of the constructor (public,
protected, private), defaults to private.
explicit should be True if the constructor should be marked explicit.
baseConstructors is a list of strings containing calls to base constructors,
defaults to None.
body contains a string with the code for the constructor, defaults to empty.
"""
def __init__(self, args, inline=False, bodyInHeader=False,
visibility="priv", explicit=False, baseConstructors=None,
body=""):
self.args = args
self.inline = False
self.bodyInHeader = bodyInHeader
self.explicit = explicit
self.baseConstructors = baseConstructors or []
self.body = body
ClassItem.__init__(self, None, visibility)
def getDecorators(self, declaring):
decorators = []
if self.explicit:
decorators.append('explicit')
if self.inline and declaring:
decorators.append('inline')
if decorators:
return ' '.join(decorators) + ' '
return ''
def getInitializationList(self, cgClass):
items = [str(c) for c in self.baseConstructors]
for m in cgClass.members:
if not m.static:
initialize = m.body
if initialize:
items.append(m.name + "(" + initialize + ")")
if len(items) > 0:
return '\n : ' + ',\n '.join(items)
return ''
def getBody(self, cgClass):
initializers = [" parent: %s" % str(self.baseConstructors[0])]
return (self.body + (
"let mut ret = Rc::new(%s {\n"
"%s\n"
"});\n"
"// Note: callback cannot be moved after calling init.\n"
"match Rc::get_mut(&mut ret) {\n"
" Some(ref mut callback) => unsafe { callback.parent.init(%s, %s) },\n"
" None => unreachable!(),\n"
"};\n"
"ret") % (cgClass.name, '\n'.join(initializers),
self.args[0].name, self.args[1].name))
def declare(self, cgClass):
args = ', '.join([a.declare() for a in self.args])
body = ' ' + self.getBody(cgClass)
body = stripTrailingWhitespace(body.replace('\n', '\n '))
if len(body) > 0:
body += '\n'
body = ' {\n' + body + '}'
return string.Template("""\
pub fn ${decorators}new(${args}) -> Rc<${className}>${body}
""").substitute({'decorators': self.getDecorators(True),
'className': cgClass.getNameString(),
'args': args,
'body': body})
def define(self, cgClass):
if self.bodyInHeader:
return ''
args = ', '.join([a.define() for a in self.args])
body = ' ' + self.getBody()
body = '\n' + stripTrailingWhitespace(body.replace('\n', '\n '))
if len(body) > 0:
body += '\n'
return string.Template("""\
${decorators}
${className}::${className}(${args})${initializationList}
{${body}}
""").substitute({'decorators': self.getDecorators(False),
'className': cgClass.getNameString(),
'args': args,
'initializationList': self.getInitializationList(cgClass),
'body': body})
class ClassMember(ClassItem):
def __init__(self, name, type, visibility="priv", static=False,
body=None):
self.type = type
self.static = static
self.body = body
ClassItem.__init__(self, name, visibility)
def declare(self, cgClass):
return '%s %s: %s,\n' % (self.visibility, self.name, self.type)
def define(self, cgClass):
if not self.static:
return ''
if self.body:
body = " = " + self.body
else:
body = ""
return '%s %s::%s%s;\n' % (self.type, cgClass.getNameString(),
self.name, body)
class CGClass(CGThing):
def __init__(self, name, bases=[], members=[], constructors=[],
destructor=None, methods=[],
typedefs=[], enums=[], unions=[], templateArgs=[],
templateSpecialization=[],
disallowCopyConstruction=False, indent='',
decorators='',
extradeclarations=''):
CGThing.__init__(self)
self.name = name
self.bases = bases
self.members = members
self.constructors = constructors
# We store our single destructor in a list, since all of our
# code wants lists of members.
self.destructors = [destructor] if destructor else []
self.methods = methods
self.typedefs = typedefs
self.enums = enums
self.unions = unions
self.templateArgs = templateArgs
self.templateSpecialization = templateSpecialization
self.disallowCopyConstruction = disallowCopyConstruction
self.indent = indent
self.decorators = decorators
self.extradeclarations = extradeclarations
def getNameString(self):
className = self.name
if self.templateSpecialization:
className = className + \
'<%s>' % ', '.join([str(a) for a
in self.templateSpecialization])
return className
def define(self):
result = ''
if self.templateArgs:
templateArgs = [a.declare() for a in self.templateArgs]
templateArgs = templateArgs[len(self.templateSpecialization):]
result = result + self.indent + 'template <%s>\n' % ','.join([str(a) for a in templateArgs])
if self.templateSpecialization:
specialization = \
'<%s>' % ', '.join([str(a) for a in self.templateSpecialization])
else:
specialization = ''
myself = ''
if self.decorators != '':
myself += self.decorators + '\n'
myself += '%spub struct %s%s' % (self.indent, self.name, specialization)
result += myself
assert len(self.bases) == 1 # XXjdm Can we support multiple inheritance?
result += ' {\n'
if self.bases:
self.members = [ClassMember("parent", self.bases[0].name, "pub")] + self.members
result += CGIndenter(CGGeneric(self.extradeclarations),
len(self.indent)).define()
def declareMembers(cgClass, memberList):
result = ''
for member in memberList:
declaration = member.declare(cgClass)
declaration = CGIndenter(CGGeneric(declaration)).define()
result = result + declaration
return result
if self.disallowCopyConstruction:
class DisallowedCopyConstructor(object):
def __init__(self):
self.visibility = "private"
def declare(self, cgClass):
name = cgClass.getNameString()
return ("%s(const %s&) MOZ_DELETE;\n"
"void operator=(const %s) MOZ_DELETE;\n" % (name, name, name))
disallowedCopyConstructors = [DisallowedCopyConstructor()]
else:
disallowedCopyConstructors = []
order = [(self.enums, ''), (self.unions, ''),
(self.typedefs, ''), (self.members, '')]
for (memberList, separator) in order:
memberString = declareMembers(self, memberList)
if self.indent:
memberString = CGIndenter(CGGeneric(memberString),
len(self.indent)).define()
result = result + memberString
result += self.indent + '}\n\n'
result += 'impl %s {\n' % self.name
order = [(self.constructors + disallowedCopyConstructors, '\n'),
(self.destructors, '\n'), (self.methods, '\n)')]
for (memberList, separator) in order:
memberString = declareMembers(self, memberList)
if self.indent:
memberString = CGIndenter(CGGeneric(memberString),
len(self.indent)).define()
result = result + memberString
result += "}"
return result
class CGProxySpecialOperation(CGPerSignatureCall):
"""
Base class for classes for calling an indexed or named special operation
(don't use this directly, use the derived classes below).
"""
def __init__(self, descriptor, operation):
nativeName = MakeNativeName(descriptor.binaryNameFor(operation))
operation = descriptor.operations[operation]
assert len(operation.signatures()) == 1
signature = operation.signatures()[0]
(returnType, arguments) = signature
if operation.isGetter() and not returnType.nullable():
returnType = IDLNullableType(returnType.location, returnType)
# We pass len(arguments) as the final argument so that the
# CGPerSignatureCall won't do any argument conversion of its own.
CGPerSignatureCall.__init__(self, returnType, "", arguments, nativeName,
False, descriptor, operation,
len(arguments))
if operation.isSetter() or operation.isCreator():
# arguments[0] is the index or name of the item that we're setting.
argument = arguments[1]
info = getJSToNativeConversionInfo(
argument.type, descriptor, treatNullAs=argument.treatNullAs,
exceptionCode="return false;")
template = info.template
declType = info.declType
templateValues = {
"val": "value.handle()",
}
self.cgRoot.prepend(instantiateJSToNativeConversionTemplate(
template, templateValues, declType, argument.identifier.name))
self.cgRoot.prepend(CGGeneric("rooted!(in(cx) let value = desc.value);"))
def getArguments(self):
args = [(a, process_arg(a.identifier.name, a)) for a in self.arguments]
return args
def wrap_return_value(self):
if not self.idlNode.isGetter() or self.templateValues is None:
return ""
wrap = CGGeneric(wrapForType(**self.templateValues))
wrap = CGIfWrapper("let Some(result) = result", wrap)
return "\n" + wrap.define()
class CGProxyIndexedGetter(CGProxySpecialOperation):
"""
Class to generate a call to an indexed getter. If templateValues is not None
the returned value will be wrapped with wrapForType using templateValues.
"""
def __init__(self, descriptor, templateValues=None):
self.templateValues = templateValues
CGProxySpecialOperation.__init__(self, descriptor, 'IndexedGetter')
class CGProxyIndexedSetter(CGProxySpecialOperation):
"""
Class to generate a call to an indexed setter.
"""
def __init__(self, descriptor):
CGProxySpecialOperation.__init__(self, descriptor, 'IndexedSetter')
class CGProxyNamedOperation(CGProxySpecialOperation):
"""
Class to generate a call to a named operation.
"""
def __init__(self, descriptor, name):
CGProxySpecialOperation.__init__(self, descriptor, name)
def define(self):
# Our first argument is the id we're getting.
argName = self.arguments[0].identifier.name
return ("let %s = string_jsid_to_string(cx, id);\n"
"let this = UnwrapProxy(proxy);\n"
"let this = &*this;\n" % argName +
CGProxySpecialOperation.define(self))
class CGProxyNamedGetter(CGProxyNamedOperation):
"""
Class to generate a call to an named getter. If templateValues is not None
the returned value will be wrapped with wrapForType using templateValues.
"""
def __init__(self, descriptor, templateValues=None):
self.templateValues = templateValues
CGProxySpecialOperation.__init__(self, descriptor, 'NamedGetter')
class CGProxyNamedPresenceChecker(CGProxyNamedGetter):
"""
Class to generate a call that checks whether a named property exists.
For now, we just delegate to CGProxyNamedGetter
"""
def __init__(self, descriptor):
CGProxyNamedGetter.__init__(self, descriptor)
class CGProxyNamedSetter(CGProxyNamedOperation):
"""
Class to generate a call to a named setter.
"""
def __init__(self, descriptor):
CGProxySpecialOperation.__init__(self, descriptor, 'NamedSetter')
class CGProxyNamedDeleter(CGProxyNamedOperation):
"""
Class to generate a call to a named deleter.
"""
def __init__(self, descriptor):
CGProxySpecialOperation.__init__(self, descriptor, 'NamedDeleter')
class CGProxyUnwrap(CGAbstractMethod):
def __init__(self, descriptor):
args = [Argument('HandleObject', 'obj')]
CGAbstractMethod.__init__(self, descriptor, "UnwrapProxy",
'*const ' + descriptor.concreteType, args,
alwaysInline=True, unsafe=True)
def definition_body(self):
return CGGeneric("""\
/*if (xpc::WrapperFactory::IsXrayWrapper(obj)) {
obj = js::UnwrapObject(obj);
}*/
//MOZ_ASSERT(IsProxy(obj));
let box_ = GetProxyPrivate(obj.get()).to_private() as *const %s;
return box_;""" % self.descriptor.concreteType)
class CGDOMJSProxyHandler_getOwnPropertyDescriptor(CGAbstractExternMethod):
def __init__(self, descriptor):
args = [Argument('*mut JSContext', 'cx'), Argument('HandleObject', 'proxy'),
Argument('HandleId', 'id'),
Argument('MutableHandle<PropertyDescriptor>', 'desc')]
CGAbstractExternMethod.__init__(self, descriptor, "getOwnPropertyDescriptor",
"bool", args)
self.descriptor = descriptor
def getBody(self):
indexedGetter = self.descriptor.operations['IndexedGetter']
indexedSetter = self.descriptor.operations['IndexedSetter']
get = ""
if indexedGetter or indexedSetter:
get = "let index = get_array_index_from_id(cx, id);\n"
if indexedGetter:
attrs = "JSPROP_ENUMERATE"
if self.descriptor.operations['IndexedSetter'] is None:
attrs += " | JSPROP_READONLY"
# FIXME(#11868) Should assign to desc.value, desc.get() is a copy.
fillDescriptor = ("desc.get().value = result_root.get();\n"
"fill_property_descriptor(desc, proxy.get(), %s);\n"
"return true;" % attrs)
templateValues = {
'jsvalRef': 'result_root.handle_mut()',
'successCode': fillDescriptor,
'pre': 'rooted!(in(cx) let mut result_root = UndefinedValue());'
}
get += ("if let Some(index) = index {\n" +
" let this = UnwrapProxy(proxy);\n" +
" let this = &*this;\n" +
CGIndenter(CGProxyIndexedGetter(self.descriptor, templateValues)).define() + "\n" +
"}\n")
namedGetter = self.descriptor.operations['NamedGetter']
if namedGetter:
attrs = []
if not self.descriptor.interface.getExtendedAttribute("LegacyUnenumerableNamedProperties"):
attrs.append("JSPROP_ENUMERATE")
if self.descriptor.operations['NamedSetter'] is None:
attrs.append("JSPROP_READONLY")
if attrs:
attrs = " | ".join(attrs)
else:
attrs = "0"
# FIXME(#11868) Should assign to desc.value, desc.get() is a copy.
fillDescriptor = ("desc.get().value = result_root.get();\n"
"fill_property_descriptor(desc, proxy.get(), %s);\n"
"return true;" % attrs)
templateValues = {
'jsvalRef': 'result_root.handle_mut()',
'successCode': fillDescriptor,
'pre': 'rooted!(in(cx) let mut result_root = UndefinedValue());'
}
# Once we start supporting OverrideBuiltins we need to make
# ResolveOwnProperty or EnumerateOwnProperties filter out named
# properties that shadow prototype properties.
namedGet = """
if RUST_JSID_IS_STRING(id) {
let mut has_on_proto = false;
if !has_property_on_prototype(cx, proxy, id, &mut has_on_proto) {
return false;
}
if !has_on_proto {
%s
}
}
""" % CGIndenter(CGProxyNamedGetter(self.descriptor, templateValues), 8).define()
else:
namedGet = ""
# FIXME(#11868) Should assign to desc.obj, desc.get() is a copy.
return get + """\
rooted!(in(cx) let mut expando = ptr::null_mut());
get_expando_object(proxy, expando.handle_mut());
//if (!xpc::WrapperFactory::IsXrayWrapper(proxy) && (expando = GetExpandoObject(proxy))) {
if !expando.is_null() {
if !JS_GetPropertyDescriptorById(cx, expando.handle(), id, desc) {
return false;
}
if !desc.obj.is_null() {
// Pretend the property lives on the wrapper.
desc.get().obj = proxy.get();
return true;
}
}
""" + namedGet + """\
desc.get().obj = ptr::null_mut();
return true;"""
def definition_body(self):
return CGGeneric(self.getBody())
class CGDOMJSProxyHandler_defineProperty(CGAbstractExternMethod):
def __init__(self, descriptor):
args = [Argument('*mut JSContext', 'cx'), Argument('HandleObject', 'proxy'),
Argument('HandleId', 'id'),
Argument('Handle<PropertyDescriptor>', 'desc'),
Argument('*mut ObjectOpResult', 'opresult')]
CGAbstractExternMethod.__init__(self, descriptor, "defineProperty", "bool", args)
self.descriptor = descriptor
def getBody(self):
set = ""
indexedSetter = self.descriptor.operations['IndexedSetter']
if indexedSetter:
set += ("let index = get_array_index_from_id(cx, id);\n" +
"if let Some(index) = index {\n" +
" let this = UnwrapProxy(proxy);\n" +
" let this = &*this;\n" +
CGIndenter(CGProxyIndexedSetter(self.descriptor)).define() +
" return (*opresult).succeed();\n" +
"}\n")
elif self.descriptor.operations['IndexedGetter']:
set += ("if get_array_index_from_id(cx, id).is_some() {\n" +
" return (*opresult).failNoIndexedSetter();\n" +
"}\n")
namedSetter = self.descriptor.operations['NamedSetter']
if namedSetter:
if self.descriptor.hasUnforgeableMembers:
raise TypeError("Can't handle a named setter on an interface that has "
"unforgeables. Figure out how that should work!")
set += ("if RUST_JSID_IS_STRING(id) {\n" +
CGIndenter(CGProxyNamedSetter(self.descriptor)).define() +
" return (*opresult).succeed();\n" +
"} else {\n" +
" return false;\n" +
"}\n")
else:
set += ("if RUST_JSID_IS_STRING(id) {\n" +
CGIndenter(CGProxyNamedGetter(self.descriptor)).define() +
" if result.is_some() {\n"
" return (*opresult).failNoNamedSetter();\n"
" }\n"
"}\n")
set += "return proxyhandler::define_property(%s);" % ", ".join(a.name for a in self.args)
return set
def definition_body(self):
return CGGeneric(self.getBody())
class CGDOMJSProxyHandler_delete(CGAbstractExternMethod):
def __init__(self, descriptor):
args = [Argument('*mut JSContext', 'cx'), Argument('HandleObject', 'proxy'),
Argument('HandleId', 'id'),
Argument('*mut ObjectOpResult', 'res')]
CGAbstractExternMethod.__init__(self, descriptor, "delete", "bool", args)
self.descriptor = descriptor
def getBody(self):
set = ""
if self.descriptor.operations['NamedDeleter']:
if self.descriptor.hasUnforgeableMembers:
raise TypeError("Can't handle a deleter on an interface that has "
"unforgeables. Figure out how that should work!")
set += CGProxyNamedDeleter(self.descriptor).define()
set += "return proxyhandler::delete(%s);" % ", ".join(a.name for a in self.args)
return set
def definition_body(self):
return CGGeneric(self.getBody())
class CGDOMJSProxyHandler_ownPropertyKeys(CGAbstractExternMethod):
def __init__(self, descriptor):
args = [Argument('*mut JSContext', 'cx'),
Argument('HandleObject', 'proxy'),
Argument('*mut AutoIdVector', 'props')]
CGAbstractExternMethod.__init__(self, descriptor, "own_property_keys", "bool", args)
self.descriptor = descriptor
def getBody(self):
body = dedent(
"""
let unwrapped_proxy = UnwrapProxy(proxy);
""")
if self.descriptor.operations['IndexedGetter']:
body += dedent(
"""
for i in 0..(*unwrapped_proxy).Length() {
rooted!(in(cx) let rooted_jsid = int_to_jsid(i as i32));
AppendToAutoIdVector(props, rooted_jsid.handle().get());
}
""")
if self.descriptor.operations['NamedGetter']:
body += dedent(
"""
for name in (*unwrapped_proxy).SupportedPropertyNames() {
let cstring = CString::new(name).unwrap();
let jsstring = JS_AtomizeAndPinString(cx, cstring.as_ptr());
rooted!(in(cx) let rooted = jsstring);
let jsid = INTERNED_STRING_TO_JSID(cx, rooted.handle().get());
rooted!(in(cx) let rooted_jsid = jsid);
AppendToAutoIdVector(props, rooted_jsid.handle().get());
}
""")
body += dedent(
"""
rooted!(in(cx) let mut expando = ptr::null_mut());
get_expando_object(proxy, expando.handle_mut());
if !expando.is_null() {
GetPropertyKeys(cx, expando.handle(), JSITER_OWNONLY | JSITER_HIDDEN | JSITER_SYMBOLS, props);
}
return true;
""")
return body
def definition_body(self):
return CGGeneric(self.getBody())
class CGDOMJSProxyHandler_getOwnEnumerablePropertyKeys(CGAbstractExternMethod):
def __init__(self, descriptor):
assert (descriptor.operations["IndexedGetter"] and
descriptor.interface.getExtendedAttribute("LegacyUnenumerableNamedProperties"))
args = [Argument('*mut JSContext', 'cx'),
Argument('HandleObject', 'proxy'),
Argument('*mut AutoIdVector', 'props')]
CGAbstractExternMethod.__init__(self, descriptor,
"getOwnEnumerablePropertyKeys", "bool", args)
self.descriptor = descriptor
def getBody(self):
body = dedent(
"""
let unwrapped_proxy = UnwrapProxy(proxy);
""")
if self.descriptor.operations['IndexedGetter']:
body += dedent(
"""
for i in 0..(*unwrapped_proxy).Length() {
rooted!(in(cx) let rooted_jsid = int_to_jsid(i as i32));
AppendToAutoIdVector(props, rooted_jsid.handle().get());
}
""")
body += dedent(
"""
rooted!(in(cx) let mut expando = ptr::null_mut());
get_expando_object(proxy, expando.handle_mut());
if !expando.is_null() {
GetPropertyKeys(cx, expando.handle(), JSITER_OWNONLY | JSITER_HIDDEN | JSITER_SYMBOLS, props);
}
return true;
""")
return body
def definition_body(self):
return CGGeneric(self.getBody())
class CGDOMJSProxyHandler_hasOwn(CGAbstractExternMethod):
def __init__(self, descriptor):
args = [Argument('*mut JSContext', 'cx'), Argument('HandleObject', 'proxy'),
Argument('HandleId', 'id'), Argument('*mut bool', 'bp')]
CGAbstractExternMethod.__init__(self, descriptor, "hasOwn", "bool", args)
self.descriptor = descriptor
def getBody(self):
indexedGetter = self.descriptor.operations['IndexedGetter']
if indexedGetter:
indexed = ("let index = get_array_index_from_id(cx, id);\n" +
"if let Some(index) = index {\n" +
" let this = UnwrapProxy(proxy);\n" +
" let this = &*this;\n" +
CGIndenter(CGProxyIndexedGetter(self.descriptor)).define() + "\n" +
" *bp = result.is_some();\n" +
" return true;\n" +
"}\n\n")
else:
indexed = ""
namedGetter = self.descriptor.operations['NamedGetter']
if namedGetter:
named = """\
if RUST_JSID_IS_STRING(id) {
let mut has_on_proto = false;
if !has_property_on_prototype(cx, proxy, id, &mut has_on_proto) {
return false;
}
if !has_on_proto {
%s
*bp = result.is_some();
return true;
}
}
""" % CGIndenter(CGProxyNamedGetter(self.descriptor), 8).define()
else:
named = ""
return indexed + """\
rooted!(in(cx) let mut expando = ptr::null_mut());
get_expando_object(proxy, expando.handle_mut());
if !expando.is_null() {
let ok = JS_HasPropertyById(cx, expando.handle(), id, bp);
if !ok || *bp {
return ok;
}
}
""" + named + """\
*bp = false;
return true;"""
def definition_body(self):
return CGGeneric(self.getBody())
class CGDOMJSProxyHandler_get(CGAbstractExternMethod):
def __init__(self, descriptor):
args = [Argument('*mut JSContext', 'cx'), Argument('HandleObject', 'proxy'),
Argument('HandleValue', 'receiver'), Argument('HandleId', 'id'),
Argument('MutableHandleValue', 'vp')]
CGAbstractExternMethod.__init__(self, descriptor, "get", "bool", args)
self.descriptor = descriptor
def getBody(self):
getFromExpando = """\
rooted!(in(cx) let mut expando = ptr::null_mut());
get_expando_object(proxy, expando.handle_mut());
if !expando.is_null() {
let mut hasProp = false;
if !JS_HasPropertyById(cx, expando.handle(), id, &mut hasProp) {
return false;
}
if hasProp {
return JS_ForwardGetPropertyTo(cx, expando.handle(), id, receiver, vp);
}
}"""
templateValues = {
'jsvalRef': 'vp',
'successCode': 'return true;',
}
indexedGetter = self.descriptor.operations['IndexedGetter']
if indexedGetter:
getIndexedOrExpando = ("let index = get_array_index_from_id(cx, id);\n" +
"if let Some(index) = index {\n" +
" let this = UnwrapProxy(proxy);\n" +
" let this = &*this;\n" +
CGIndenter(CGProxyIndexedGetter(self.descriptor, templateValues)).define())
getIndexedOrExpando += """\
// Even if we don't have this index, we don't forward the
// get on to our expando object.
} else {
%s
}
""" % (stripTrailingWhitespace(getFromExpando.replace('\n', '\n ')))
else:
getIndexedOrExpando = getFromExpando + "\n"
namedGetter = self.descriptor.operations['NamedGetter']
if namedGetter:
getNamed = ("if RUST_JSID_IS_STRING(id) {\n" +
CGIndenter(CGProxyNamedGetter(self.descriptor, templateValues)).define() +
"}\n")
else:
getNamed = ""
return """\
//MOZ_ASSERT(!xpc::WrapperFactory::IsXrayWrapper(proxy),
//"Should not have a XrayWrapper here");
%s
let mut found = false;
if !get_property_on_prototype(cx, proxy, receiver, id, &mut found, vp) {
return false;
}
if found {
return true;
}
%s
vp.set(UndefinedValue());
return true;""" % (getIndexedOrExpando, getNamed)
def definition_body(self):
return CGGeneric(self.getBody())
class CGDOMJSProxyHandler_className(CGAbstractExternMethod):
def __init__(self, descriptor):
args = [Argument('*mut JSContext', 'cx'), Argument('HandleObject', '_proxy')]
CGAbstractExternMethod.__init__(self, descriptor, "className", "*const i8", args, doesNotPanic=True)
self.descriptor = descriptor
def getBody(self):
return '%s as *const u8 as *const i8' % str_to_const_array(self.descriptor.name)
def definition_body(self):
return CGGeneric(self.getBody())
class CGAbstractClassHook(CGAbstractExternMethod):
"""
Meant for implementing JSClass hooks, like Finalize or Trace. Does very raw
'this' unwrapping as it assumes that the unwrapped type is always known.
"""
def __init__(self, descriptor, name, returnType, args, doesNotPanic=False):
CGAbstractExternMethod.__init__(self, descriptor, name, returnType,
args)
def definition_body_prologue(self):
return CGGeneric("""
let this = native_from_object::<%s>(obj).unwrap();
""" % self.descriptor.concreteType)
def definition_body(self):
return CGList([
self.definition_body_prologue(),
self.generate_code(),
])
def generate_code(self):
raise NotImplementedError # Override me!
def finalizeHook(descriptor, hookName, context):
release = ""
if descriptor.isGlobal():
release += """\
finalize_global(obj);
"""
elif descriptor.weakReferenceable:
release += """\
let weak_box_ptr = JS_GetReservedSlot(obj, DOM_WEAK_SLOT).to_private() as *mut WeakBox<%s>;
if !weak_box_ptr.is_null() {
let count = {
let weak_box = &*weak_box_ptr;
assert!(weak_box.value.get().is_some());
assert!(weak_box.count.get() > 0);
weak_box.value.set(None);
let count = weak_box.count.get() - 1;
weak_box.count.set(count);
count
};
if count == 0 {
mem::drop(Box::from_raw(weak_box_ptr));
}
}
""" % descriptor.concreteType
release += """\
if !this.is_null() {
// The pointer can be null if the object is the unforgeable holder of that interface.
let _ = Box::from_raw(this as *mut %s);
}
debug!("%s finalize: {:p}", this);\
""" % (descriptor.concreteType, descriptor.concreteType)
return release
class CGClassTraceHook(CGAbstractClassHook):
"""
A hook to trace through our native object; used for GC and CC
"""
def __init__(self, descriptor):
args = [Argument('*mut JSTracer', 'trc'), Argument('*mut JSObject', 'obj')]
CGAbstractClassHook.__init__(self, descriptor, TRACE_HOOK_NAME, 'void',
args, doesNotPanic=True)
self.traceGlobal = descriptor.isGlobal()
def generate_code(self):
body = [CGGeneric("if this.is_null() { return; } // GC during obj creation\n"
"(*this).trace(%s);" % self.args[0].name)]
if self.traceGlobal:
body += [CGGeneric("trace_global(trc, obj);")]
return CGList(body, "\n")
class CGClassConstructHook(CGAbstractExternMethod):
"""
JS-visible constructor for our objects
"""
def __init__(self, descriptor, constructor=None):
args = [Argument('*mut JSContext', 'cx'), Argument('u32', 'argc'), Argument('*mut JSVal', 'vp')]
name = CONSTRUCT_HOOK_NAME
if constructor:
name += "_" + constructor.identifier.name
else:
constructor = descriptor.interface.ctor()
assert constructor
CGAbstractExternMethod.__init__(self, descriptor, name, 'bool', args)
self.constructor = constructor
self.exposureSet = descriptor.interface.exposureSet
def definition_body(self):
preamble = """let global = GlobalScope::from_object(JS_CALLEE(cx, vp).to_object());\n"""
if len(self.exposureSet) == 1:
preamble += "let global = Root::downcast::<dom::types::%s>(global).unwrap();\n" % list(self.exposureSet)[0]
preamble += """let args = CallArgs::from_vp(vp, argc);\n"""
preamble = CGGeneric(preamble)
name = self.constructor.identifier.name
nativeName = MakeNativeName(self.descriptor.binaryNameFor(name))
callGenerator = CGMethodCall(["&global"], nativeName, True,
self.descriptor, self.constructor)
return CGList([preamble, callGenerator])
class CGClassFinalizeHook(CGAbstractClassHook):
"""
A hook for finalize, used to release our native object.
"""
def __init__(self, descriptor):
args = [Argument('*mut JSFreeOp', '_fop'), Argument('*mut JSObject', 'obj')]
CGAbstractClassHook.__init__(self, descriptor, FINALIZE_HOOK_NAME,
'void', args)
def generate_code(self):
return CGGeneric(finalizeHook(self.descriptor, self.name, self.args[0].name))
class CGDOMJSProxyHandlerDOMClass(CGThing):
def __init__(self, descriptor):
CGThing.__init__(self)
self.descriptor = descriptor
def define(self):
return "static Class: DOMClass = " + DOMClass(self.descriptor) + ";\n"
class CGInterfaceTrait(CGThing):
def __init__(self, descriptor):
CGThing.__init__(self)
def attribute_arguments(needCx, argument=None):
if needCx:
yield "cx", "*mut JSContext"
if argument:
yield "value", argument_type(descriptor, argument)
def members():
for m in descriptor.interface.members:
if (m.isMethod() and not m.isStatic() and
not m.isMaplikeOrSetlikeOrIterableMethod() and
(not m.isIdentifierLess() or m.isStringifier())):
name = CGSpecializedMethod.makeNativeName(descriptor, m)
infallible = 'infallible' in descriptor.getExtendedAttributes(m)
for idx, (rettype, arguments) in enumerate(m.signatures()):
arguments = method_arguments(descriptor, rettype, arguments)
rettype = return_type(descriptor, rettype, infallible)
yield name + ('_' * idx), arguments, rettype
elif m.isAttr() and not m.isStatic():
name = CGSpecializedGetter.makeNativeName(descriptor, m)
infallible = 'infallible' in descriptor.getExtendedAttributes(m, getter=True)
yield (name,
attribute_arguments(typeNeedsCx(m.type, True)),
return_type(descriptor, m.type, infallible))
if not m.readonly:
name = CGSpecializedSetter.makeNativeName(descriptor, m)
infallible = 'infallible' in descriptor.getExtendedAttributes(m, setter=True)
if infallible:
rettype = "()"
else:
rettype = "ErrorResult"
yield name, attribute_arguments(typeNeedsCx(m.type, False), m.type), rettype
if descriptor.proxy:
for name, operation in descriptor.operations.iteritems():
if not operation or operation.isStringifier():
continue
assert len(operation.signatures()) == 1
rettype, arguments = operation.signatures()[0]
infallible = 'infallible' in descriptor.getExtendedAttributes(operation)
if operation.isGetter():
if not rettype.nullable():
rettype = IDLNullableType(rettype.location, rettype)
arguments = method_arguments(descriptor, rettype, arguments)
# If this interface 'supports named properties', then we
# should be able to access 'supported property names'
#
# WebIDL, Second Draft, section 3.2.4.5
# https://heycam.github.io/webidl/#idl-named-properties
if operation.isNamed():
yield "SupportedPropertyNames", [], "Vec<DOMString>"
else:
arguments = method_arguments(descriptor, rettype, arguments)
rettype = return_type(descriptor, rettype, infallible)
yield name, arguments, rettype
def fmt(arguments):
return "".join(", %s: %s" % argument for argument in arguments)
def contains_unsafe_arg(arguments):
if not arguments or len(arguments) == 0:
return False
return reduce((lambda x, y: x or y[1] == '*mut JSContext'), arguments, False)
methods = []
for name, arguments, rettype in members():
arguments = list(arguments)
methods.append(CGGeneric("%sfn %s(&self%s) -> %s;\n" % (
'unsafe ' if contains_unsafe_arg(arguments) else '',
name, fmt(arguments), rettype))
)
if methods:
self.cgRoot = CGWrapper(CGIndenter(CGList(methods, "")),
pre="pub trait %sMethods {\n" % descriptor.interface.identifier.name,
post="}")
else:
self.cgRoot = CGGeneric("")
self.empty = not methods
def define(self):
return self.cgRoot.define()
class CGWeakReferenceableTrait(CGThing):
def __init__(self, descriptor):
CGThing.__init__(self)
assert descriptor.weakReferenceable
self.code = "impl WeakReferenceable for %s {}" % descriptor.interface.identifier.name
def define(self):
return self.code
def generate_imports(config, cgthings, descriptors, callbacks=None, dictionaries=None, enums=None):
if not callbacks:
callbacks = []
if not dictionaries:
dictionaries = []
if not enums:
enums = []
return CGImports(cgthings, descriptors, callbacks, dictionaries, enums, [
'core::nonzero::NonZero',
'js',
'js::JSCLASS_GLOBAL_SLOT_COUNT',
'js::JSCLASS_IS_DOMJSCLASS',
'js::JSCLASS_IS_GLOBAL',
'js::JSCLASS_RESERVED_SLOTS_MASK',
'js::JS_CALLEE',
'js::error::throw_type_error',
'js::jsapi::AutoIdVector',
'js::jsapi::Call',
'js::jsapi::CallArgs',
'js::jsapi::CurrentGlobalOrNull',
'js::jsapi::FreeOp',
'js::jsapi::GetPropertyKeys',
'js::jsapi::GetWellKnownSymbol',
'js::jsapi::Handle',
'js::jsapi::HandleId',
'js::jsapi::HandleObject',
'js::jsapi::HandleValue',
'js::jsapi::HandleValueArray',
'js::jsapi::Heap',
'js::jsapi::INTERNED_STRING_TO_JSID',
'js::jsapi::IsCallable',
'js::jsapi::JSAutoCompartment',
'js::jsapi::JSCLASS_RESERVED_SLOTS_SHIFT',
'js::jsapi::JSClass',
'js::jsapi::JSContext',
'js::jsapi::JSFreeOp',
'js::jsapi::JSFunctionSpec',
'js::jsapi::JSITER_HIDDEN',
'js::jsapi::JSITER_OWNONLY',
'js::jsapi::JSITER_SYMBOLS',
'js::jsapi::JSJitGetterCallArgs',
'js::jsapi::JSJitInfo',
'js::jsapi::JSJitInfo_AliasSet',
'js::jsapi::JSJitInfo_ArgType',
'js::jsapi::JSJitInfo_OpType',
'js::jsapi::JSJitMethodCallArgs',
'js::jsapi::JSJitSetterCallArgs',
'js::jsapi::JSNative',
'js::jsapi::JSNativeWrapper',
'js::jsapi::JSObject',
'js::jsapi::JSPROP_ENUMERATE',
'js::jsapi::JSPROP_PERMANENT',
'js::jsapi::JSPROP_READONLY',
'js::jsapi::JSPROP_SHARED',
'js::jsapi::JSPropertySpec',
'js::jsapi::JSString',
'js::jsapi::JSTracer',
'js::jsapi::JSType',
'js::jsapi::JSTypedMethodJitInfo',
'js::jsapi::JSValueType',
'js::jsapi::JS_AtomizeAndPinString',
'js::jsapi::JS_CallFunctionValue',
'js::jsapi::JS_CopyPropertiesFrom',
'js::jsapi::JS_DefineProperty',
'js::jsapi::JS_DefinePropertyById2',
'js::jsapi::JS_ForwardGetPropertyTo',
'js::jsapi::JS_GetErrorPrototype',
'js::jsapi::JS_GetFunctionPrototype',
'js::jsapi::JS_GetGlobalForObject',
'js::jsapi::JS_GetIteratorPrototype',
'js::jsapi::JS_GetObjectPrototype',
'js::jsapi::JS_GetProperty',
'js::jsapi::JS_GetPropertyById',
'js::jsapi::JS_GetPropertyDescriptorById',
'js::jsapi::JS_GetReservedSlot',
'js::jsapi::JS_HasProperty',
'js::jsapi::JS_HasPropertyById',
'js::jsapi::JS_InitializePropertiesFromCompatibleNativeObject',
'js::jsapi::JS_NewObject',
'js::jsapi::JS_NewObjectWithGivenProto',
'js::jsapi::JS_NewObjectWithoutMetadata',
'js::jsapi::JS_ObjectIsDate',
'js::jsapi::JS_SetImmutablePrototype',
'js::jsapi::JS_SetProperty',
'js::jsapi::JS_SetReservedSlot',
'js::jsapi::JS_SplicePrototype',
'js::jsapi::JS_WrapValue',
'js::jsapi::MutableHandle',
'js::jsapi::MutableHandleObject',
'js::jsapi::MutableHandleValue',
'js::jsapi::ObjectOpResult',
'js::jsapi::PropertyDescriptor',
'js::jsapi::RootedId',
'js::jsapi::RootedObject',
'js::jsapi::RootedString',
'js::jsapi::SymbolCode',
'js::jsapi::jsid',
'js::jsval::JSVal',
'js::jsval::NullValue',
'js::jsval::ObjectValue',
'js::jsval::ObjectOrNullValue',
'js::jsval::PrivateValue',
'js::jsval::UndefinedValue',
'js::glue::AppendToAutoIdVector',
'js::glue::CallJitGetterOp',
'js::glue::CallJitMethodOp',
'js::glue::CallJitSetterOp',
'js::glue::CreateProxyHandler',
'js::glue::GetProxyPrivate',
'js::glue::NewProxyObject',
'js::glue::ProxyTraps',
'js::glue::RUST_JSID_IS_STRING',
'js::glue::RUST_SYMBOL_TO_JSID',
'js::glue::int_to_jsid',
'js::panic::maybe_resume_unwind',
'js::panic::wrap_panic',
'js::rust::GCMethods',
'js::rust::define_methods',
'js::rust::define_properties',
'js::rust::get_object_class',
'dom',
'dom::bindings',
'dom::bindings::codegen::InterfaceObjectMap',
'dom::bindings::constant::ConstantSpec',
'dom::bindings::constant::ConstantVal',
'dom::bindings::interface::ConstructorClassHook',
'dom::bindings::interface::InterfaceConstructorBehavior',
'dom::bindings::interface::NonCallbackInterfaceObjectClass',
'dom::bindings::interface::create_callback_interface_object',
'dom::bindings::interface::create_global_object',
'dom::bindings::interface::create_interface_prototype_object',
'dom::bindings::interface::create_named_constructors',
'dom::bindings::interface::create_noncallback_interface_object',
'dom::bindings::interface::define_guarded_constants',
'dom::bindings::interface::define_guarded_methods',
'dom::bindings::interface::define_guarded_properties',
'dom::bindings::interface::is_exposed_in',
'dom::bindings::iterable::Iterable',
'dom::bindings::iterable::IteratorType',
'dom::bindings::js::JS',
'dom::bindings::js::Root',
'dom::bindings::js::RootedReference',
'dom::bindings::namespace::NamespaceObjectClass',
'dom::bindings::namespace::create_namespace_object',
'dom::bindings::reflector::MutDomObject',
'dom::bindings::reflector::DomObject',
'dom::bindings::utils::AsVoidPtr',
'dom::bindings::utils::DOMClass',
'dom::bindings::utils::DOMJSClass',
'dom::bindings::utils::DOM_PROTO_UNFORGEABLE_HOLDER_SLOT',
'dom::bindings::utils::JSCLASS_DOM_GLOBAL',
'dom::bindings::utils::ProtoOrIfaceArray',
'dom::bindings::utils::enumerate_global',
'dom::bindings::utils::finalize_global',
'dom::bindings::utils::find_enum_value',
'dom::bindings::utils::generic_getter',
'dom::bindings::utils::generic_lenient_getter',
'dom::bindings::utils::generic_lenient_setter',
'dom::bindings::utils::generic_method',
'dom::bindings::utils::generic_setter',
'dom::bindings::utils::get_array_index_from_id',
'dom::bindings::utils::get_dictionary_property',
'dom::bindings::utils::get_property_on_prototype',
'dom::bindings::utils::get_proto_or_iface_array',
'dom::bindings::utils::has_property_on_prototype',
'dom::bindings::utils::is_platform_object',
'dom::bindings::utils::resolve_global',
'dom::bindings::utils::set_dictionary_property',
'dom::bindings::utils::trace_global',
'dom::bindings::trace::JSTraceable',
'dom::bindings::trace::RootedTraceable',
'dom::bindings::trace::RootedTraceableBox',
'dom::bindings::callback::CallSetup',
'dom::bindings::callback::CallbackContainer',
'dom::bindings::callback::CallbackInterface',
'dom::bindings::callback::CallbackFunction',
'dom::bindings::callback::CallbackObject',
'dom::bindings::callback::ExceptionHandling',
'dom::bindings::callback::wrap_call_this_object',
'dom::bindings::conversions::ConversionBehavior',
'dom::bindings::conversions::ConversionResult',
'dom::bindings::conversions::DOM_OBJECT_SLOT',
'dom::bindings::conversions::FromJSValConvertible',
'dom::bindings::conversions::IDLInterface',
'dom::bindings::conversions::StringificationBehavior',
'dom::bindings::conversions::ToJSValConvertible',
'dom::bindings::conversions::is_array_like',
'dom::bindings::conversions::native_from_handlevalue',
'dom::bindings::conversions::native_from_object',
'dom::bindings::conversions::private_from_object',
'dom::bindings::conversions::root_from_handleobject',
'dom::bindings::conversions::root_from_handlevalue',
'dom::bindings::conversions::root_from_object',
'dom::bindings::conversions::string_jsid_to_string',
'dom::bindings::codegen::PrototypeList',
'dom::bindings::codegen::RegisterBindings',
'dom::bindings::codegen::UnionTypes',
'dom::bindings::error::Error',
'dom::bindings::error::ErrorResult',
'dom::bindings::error::Fallible',
'dom::bindings::error::Error::JSFailed',
'dom::bindings::error::throw_dom_exception',
'dom::bindings::guard::Condition',
'dom::bindings::guard::Guard',
'dom::bindings::inheritance::Castable',
'dom::bindings::proxyhandler',
'dom::bindings::proxyhandler::ensure_expando_object',
'dom::bindings::proxyhandler::fill_property_descriptor',
'dom::bindings::proxyhandler::get_expando_object',
'dom::bindings::proxyhandler::get_property_descriptor',
'dom::bindings::mozmap::MozMap',
'dom::bindings::num::Finite',
'dom::bindings::str::ByteString',
'dom::bindings::str::DOMString',
'dom::bindings::str::USVString',
'dom::bindings::weakref::DOM_WEAK_SLOT',
'dom::bindings::weakref::WeakBox',
'dom::bindings::weakref::WeakReferenceable',
'dom::windowproxy::WindowProxy',
'dom::globalscope::GlobalScope',
'mem::heap_size_of_raw_self_and_children',
'libc',
'servo_config::prefs::PREFS',
'std::borrow::ToOwned',
'std::cmp',
'std::mem',
'std::num',
'std::os',
'std::panic',
'std::ptr',
'std::str',
'std::rc',
'std::rc::Rc',
'std::default::Default',
'std::ffi::CString',
], config)
class CGDescriptor(CGThing):
def __init__(self, descriptor, config, soleDescriptor):
CGThing.__init__(self)
assert not descriptor.concrete or not descriptor.interface.isCallback()
reexports = []
def reexportedName(name):
if name.startswith(descriptor.name):
return name
if not soleDescriptor:
return '%s as %s%s' % (name, descriptor.name, name)
return name
cgThings = []
unscopableNames = []
for m in descriptor.interface.members:
if (m.isMethod() and
(not m.isIdentifierLess() or m == descriptor.operations["Stringifier"])):
if m.getExtendedAttribute("Unscopable"):
assert not m.isStatic()
unscopableNames.append(m.identifier.name)
if m.isStatic():
assert descriptor.interface.hasInterfaceObject()
cgThings.append(CGStaticMethod(descriptor, m))
elif not descriptor.interface.isCallback():
cgThings.append(CGSpecializedMethod(descriptor, m))
cgThings.append(CGMemberJITInfo(descriptor, m))
elif m.isAttr():
if m.stringifier:
raise TypeError("Stringifier attributes not supported yet. "
"See https://github.com/servo/servo/issues/7590\n"
"%s" % m.location)
if m.getExtendedAttribute("Unscopable"):
assert not m.isStatic()
unscopableNames.append(m.identifier.name)
if m.isStatic():
assert descriptor.interface.hasInterfaceObject()
cgThings.append(CGStaticGetter(descriptor, m))
elif not descriptor.interface.isCallback():
cgThings.append(CGSpecializedGetter(descriptor, m))
if not m.readonly:
if m.isStatic():
assert descriptor.interface.hasInterfaceObject()
cgThings.append(CGStaticSetter(descriptor, m))
elif not descriptor.interface.isCallback():
cgThings.append(CGSpecializedSetter(descriptor, m))
elif m.getExtendedAttribute("PutForwards"):
cgThings.append(CGSpecializedForwardingSetter(descriptor, m))
elif m.getExtendedAttribute("Replaceable"):
cgThings.append(CGSpecializedReplaceableSetter(descriptor, m))
if (not m.isStatic() and not descriptor.interface.isCallback()):
cgThings.append(CGMemberJITInfo(descriptor, m))
if descriptor.concrete:
cgThings.append(CGClassFinalizeHook(descriptor))
cgThings.append(CGClassTraceHook(descriptor))
# If there are no constant members, don't make a module for constants
constMembers = [CGConstant(m) for m in descriptor.interface.members if m.isConst()]
if constMembers:
cgThings.append(CGNamespace.build([descriptor.name + "Constants"],
CGIndenter(CGList(constMembers)),
public=True))
reexports.append(descriptor.name + 'Constants')
if descriptor.proxy:
cgThings.append(CGDefineProxyHandler(descriptor))
properties = PropertyArrays(descriptor)
if descriptor.concrete:
if descriptor.proxy:
# cgThings.append(CGProxyIsProxy(descriptor))
cgThings.append(CGProxyUnwrap(descriptor))
cgThings.append(CGDOMJSProxyHandlerDOMClass(descriptor))
cgThings.append(CGDOMJSProxyHandler_ownPropertyKeys(descriptor))
if descriptor.interface.getExtendedAttribute("LegacyUnenumerableNamedProperties"):
cgThings.append(CGDOMJSProxyHandler_getOwnEnumerablePropertyKeys(descriptor))
cgThings.append(CGDOMJSProxyHandler_getOwnPropertyDescriptor(descriptor))
cgThings.append(CGDOMJSProxyHandler_className(descriptor))
cgThings.append(CGDOMJSProxyHandler_get(descriptor))
cgThings.append(CGDOMJSProxyHandler_hasOwn(descriptor))
if descriptor.operations['IndexedSetter'] or descriptor.operations['NamedSetter']:
cgThings.append(CGDOMJSProxyHandler_defineProperty(descriptor))
# We want to prevent indexed deleters from compiling at all.
assert not descriptor.operations['IndexedDeleter']
if descriptor.operations['NamedDeleter']:
cgThings.append(CGDOMJSProxyHandler_delete(descriptor))
# cgThings.append(CGDOMJSProxyHandler(descriptor))
# cgThings.append(CGIsMethod(descriptor))
pass
else:
cgThings.append(CGDOMJSClass(descriptor))
pass
if descriptor.isGlobal():
cgThings.append(CGWrapGlobalMethod(descriptor, properties))
else:
cgThings.append(CGWrapMethod(descriptor))
reexports.append('Wrap')
haveUnscopables = False
if not descriptor.interface.isCallback() and not descriptor.interface.isNamespace():
if unscopableNames:
haveUnscopables = True
cgThings.append(
CGList([CGGeneric("const unscopable_names: &'static [&'static [u8]] = &["),
CGIndenter(CGList([CGGeneric(str_to_const_array(name)) for
name in unscopableNames], ",\n")),
CGGeneric("];\n")], "\n"))
if descriptor.concrete or descriptor.hasDescendants():
cgThings.append(CGIDLInterface(descriptor))
interfaceTrait = CGInterfaceTrait(descriptor)
cgThings.append(interfaceTrait)
if not interfaceTrait.empty:
reexports.append('%sMethods' % descriptor.name)
if descriptor.weakReferenceable:
cgThings.append(CGWeakReferenceableTrait(descriptor))
cgThings.append(CGGeneric(str(properties)))
if not descriptor.interface.getExtendedAttribute("Inline"):
if not descriptor.interface.isCallback() and not descriptor.interface.isNamespace():
cgThings.append(CGGetProtoObjectMethod(descriptor))
reexports.append('GetProtoObject')
cgThings.append(CGPrototypeJSClass(descriptor))
if descriptor.interface.hasInterfaceObject():
if descriptor.interface.ctor():
cgThings.append(CGClassConstructHook(descriptor))
for ctor in descriptor.interface.namedConstructors:
cgThings.append(CGClassConstructHook(descriptor, ctor))
if not descriptor.interface.isCallback():
cgThings.append(CGInterfaceObjectJSClass(descriptor))
if descriptor.shouldHaveGetConstructorObjectMethod():
cgThings.append(CGGetConstructorObjectMethod(descriptor))
reexports.append('GetConstructorObject')
if descriptor.register:
cgThings.append(CGDefineDOMInterfaceMethod(descriptor))
reexports.append('DefineDOMInterface')
cgThings.append(CGConstructorEnabled(descriptor))
cgThings.append(CGCreateInterfaceObjectsMethod(descriptor, properties, haveUnscopables))
cgThings = generate_imports(config, CGList(cgThings, '\n'), [descriptor])
cgThings = CGWrapper(CGNamespace(toBindingNamespace(descriptor.name),
cgThings, public=True),
post='\n')
if reexports:
reexports = ', '.join(map(lambda name: reexportedName(name), reexports))
cgThings = CGList([CGGeneric('pub use self::%s::{%s};' % (toBindingNamespace(descriptor.name), reexports)),
cgThings], '\n')
self.cgRoot = cgThings
def define(self):
return self.cgRoot.define()
class CGNonNamespacedEnum(CGThing):
def __init__(self, enumName, names, first, comment="", deriving="", repr=""):
# Account for first value
entries = ["%s = %s" % (names[0], first)] + names[1:]
# Append a Last.
entries.append('#[allow(dead_code)] Last = ' + str(first + len(entries)))
# Indent.
entries = [' ' + e for e in entries]
# Build the enum body.
enumstr = comment + 'pub enum %s {\n%s\n}\n' % (enumName, ',\n'.join(entries))
if repr:
enumstr = ('#[repr(%s)]\n' % repr) + enumstr
if deriving:
enumstr = ('#[derive(%s)]\n' % deriving) + enumstr
curr = CGGeneric(enumstr)
# Add some whitespace padding.
curr = CGWrapper(curr, pre='\n', post='\n')
# Add the typedef
# typedef = '\ntypedef %s::%s %s;\n\n' % (namespace, enumName, enumName)
# curr = CGList([curr, CGGeneric(typedef)])
# Save the result.
self.node = curr
def define(self):
return self.node.define()
class CGDictionary(CGThing):
def __init__(self, dictionary, descriptorProvider):
self.dictionary = dictionary
if all(CGDictionary(d, descriptorProvider).generatable for
d in CGDictionary.getDictionaryDependencies(dictionary)):
self.generatable = True
else:
self.generatable = False
# Nothing else to do here
return
self.memberInfo = [
(member,
getJSToNativeConversionInfo(member.type,
descriptorProvider,
isMember="Dictionary",
defaultValue=member.defaultValue,
exceptionCode="return Err(());"))
for member in dictionary.members]
def define(self):
if not self.generatable:
return ""
return self.struct() + "\n" + self.impl()
def struct(self):
d = self.dictionary
if d.parent:
inheritance = " pub parent: %s::%s,\n" % (self.makeModuleName(d.parent),
self.makeClassName(d.parent))
else:
inheritance = ""
memberDecls = [" pub %s: %s," %
(self.makeMemberName(m[0].identifier.name), self.getMemberType(m))
for m in self.memberInfo]
return (string.Template(
"#[derive(JSTraceable)]\n"
"pub struct ${selfName} {\n" +
"${inheritance}" +
"\n".join(memberDecls) + "\n" +
"}").substitute({"selfName": self.makeClassName(d),
"inheritance": inheritance}))
def impl(self):
d = self.dictionary
if d.parent:
initParent = ("parent: {\n"
" match try!(%s::%s::new(cx, val)) {\n"
" ConversionResult::Success(v) => v,\n"
" ConversionResult::Failure(error) => {\n"
" throw_type_error(cx, &error);\n"
" return Err(());\n"
" }\n"
" }\n"
"},\n" % (self.makeModuleName(d.parent),
self.makeClassName(d.parent)))
else:
initParent = ""
def memberInit(memberInfo):
member, _ = memberInfo
name = self.makeMemberName(member.identifier.name)
conversion = self.getMemberConversion(memberInfo, member.type)
return CGGeneric("%s: %s,\n" % (name, conversion.define()))
def varInsert(varName, dictionaryName):
insertion = ("rooted!(in(cx) let mut %s_js = UndefinedValue());\n"
"%s.to_jsval(cx, %s_js.handle_mut());\n"
"set_dictionary_property(cx, obj.handle(), \"%s\", %s_js.handle()).unwrap();"
% (varName, varName, varName, dictionaryName, varName))
return CGGeneric(insertion)
def memberInsert(memberInfo):
member, _ = memberInfo
name = self.makeMemberName(member.identifier.name)
if member.optional and not member.defaultValue:
insertion = CGIfWrapper("let Some(ref %s) = self.%s" % (name, name),
varInsert(name, member.identifier.name))
else:
insertion = CGGeneric("let %s = &self.%s;\n%s" %
(name, name, varInsert(name, member.identifier.name).define()))
return CGGeneric("%s\n" % insertion.define())
memberInits = CGList([memberInit(m) for m in self.memberInfo])
memberInserts = CGList([memberInsert(m) for m in self.memberInfo])
return string.Template(
"impl ${selfName} {\n"
" pub unsafe fn empty(cx: *mut JSContext) -> ${selfName} {\n"
" match ${selfName}::new(cx, HandleValue::null()) {\n"
" Ok(ConversionResult::Success(v)) => v,\n"
" _ => unreachable!(),\n"
" }\n"
" }\n"
" pub unsafe fn new(cx: *mut JSContext, val: HandleValue) \n"
" -> Result<ConversionResult<${selfName}>, ()> {\n"
" let object = if val.get().is_null_or_undefined() {\n"
" ptr::null_mut()\n"
" } else if val.get().is_object() {\n"
" val.get().to_object()\n"
" } else {\n"
" throw_type_error(cx, \"Value not an object.\");\n"
" return Err(());\n"
" };\n"
" rooted!(in(cx) let object = object);\n"
" Ok(ConversionResult::Success(${selfName} {\n"
"${initParent}"
"${initMembers}"
" }))\n"
" }\n"
"}\n"
"\n"
"impl FromJSValConvertible for ${selfName} {\n"
" type Config = ();\n"
" unsafe fn from_jsval(cx: *mut JSContext, value: HandleValue, _option: ())\n"
" -> Result<ConversionResult<${selfName}>, ()> {\n"
" ${selfName}::new(cx, value)\n"
" }\n"
"}\n"
"\n"
"impl ToJSValConvertible for ${selfName} {\n"
" unsafe fn to_jsval(&self, cx: *mut JSContext, rval: MutableHandleValue) {\n"
" rooted!(in(cx) let obj = JS_NewObject(cx, ptr::null()));\n"
"${insertMembers}"
" rval.set(ObjectOrNullValue(obj.get()))\n"
" }\n"
"}\n").substitute({
"selfName": self.makeClassName(d),
"initParent": CGIndenter(CGGeneric(initParent), indentLevel=12).define(),
"initMembers": CGIndenter(memberInits, indentLevel=12).define(),
"insertMembers": CGIndenter(memberInserts, indentLevel=8).define(),
})
@staticmethod
def makeDictionaryName(dictionary):
return dictionary.identifier.name
def makeClassName(self, dictionary):
return self.makeDictionaryName(dictionary)
@staticmethod
def makeModuleName(dictionary):
return getModuleFromObject(dictionary)
def getMemberType(self, memberInfo):
member, info = memberInfo
declType = info.declType
if member.optional and not member.defaultValue:
declType = CGWrapper(info.declType, pre="Option<", post=">")
return declType.define()
def getMemberConversion(self, memberInfo, memberType):
def indent(s):
return CGIndenter(CGGeneric(s), 12).define()
member, info = memberInfo
templateBody = info.template
default = info.default
replacements = {"val": "rval.handle()"}
conversion = string.Template(templateBody).substitute(replacements)
assert (member.defaultValue is None) == (default is None)
if not member.optional:
assert default is None
default = ("throw_type_error(cx, \"Missing required member \\\"%s\\\".\");\n"
"return Err(());") % member.identifier.name
elif not default:
default = "None"
conversion = "Some(%s)" % conversion
conversion = (
"{\n"
" rooted!(in(cx) let mut rval = UndefinedValue());\n"
" match try!(get_dictionary_property(cx, object.handle(), \"%s\", rval.handle_mut())) {\n"
" true => {\n"
"%s\n"
" },\n"
" false => {\n"
"%s\n"
" },\n"
" }\n"
"}") % (member.identifier.name, indent(conversion), indent(default))
return CGGeneric(conversion)
@staticmethod
def makeMemberName(name):
# Can't use Rust keywords as member names.
if name in RUST_KEYWORDS:
return name + "_"
return name
@staticmethod
def getDictionaryDependencies(dictionary):
deps = set()
if dictionary.parent:
deps.add(dictionary.parent)
for member in dictionary.members:
if member.type.isDictionary():
deps.add(member.type.unroll().inner)
return deps
class CGRegisterProxyHandlersMethod(CGAbstractMethod):
def __init__(self, descriptors):
docs = "Create the global vtables used by the generated DOM bindings to implement JS proxies."
CGAbstractMethod.__init__(self, None, 'RegisterProxyHandlers', 'void', [],
unsafe=True, pub=True, docs=docs)
self.descriptors = descriptors
def definition_body(self):
return CGList([
CGGeneric("PROXY_HANDLERS[Proxies::%s as usize] = Bindings::%s::DefineProxyHandler();"
% (desc.name, '::'.join([desc.name + 'Binding'] * 2)))
for desc in self.descriptors
], "\n")
class CGRegisterProxyHandlers(CGThing):
def __init__(self, config):
descriptors = config.getDescriptors(proxy=True)
length = len(descriptors)
self.root = CGList([
CGGeneric("pub static mut PROXY_HANDLERS: [*const libc::c_void; %d] = [0 as *const libc::c_void; %d];"
% (length, length)),
CGRegisterProxyHandlersMethod(descriptors),
], "\n")
def define(self):
return self.root.define()
class CGBindingRoot(CGThing):
"""
Root codegen class for binding generation. Instantiate the class, and call
declare or define to generate header or cpp code (respectively).
"""
def __init__(self, config, prefix, webIDLFile):
descriptors = config.getDescriptors(webIDLFile=webIDLFile,
hasInterfaceObject=True)
# We also want descriptors that have an interface prototype object
# (isCallback=False), but we don't want to include a second copy
# of descriptors that we also matched in the previous line
# (hence hasInterfaceObject=False).
descriptors.extend(config.getDescriptors(webIDLFile=webIDLFile,
hasInterfaceObject=False,
isCallback=False,
register=True))
dictionaries = config.getDictionaries(webIDLFile=webIDLFile)
mainCallbacks = config.getCallbacks(webIDLFile=webIDLFile)
callbackDescriptors = config.getDescriptors(webIDLFile=webIDLFile,
isCallback=True)
enums = config.getEnums(webIDLFile)
typedefs = config.getTypedefs(webIDLFile)
if not (descriptors or dictionaries or mainCallbacks or callbackDescriptors or enums):
self.root = None
return
# Do codegen for all the enums.
cgthings = [CGEnum(e) for e in enums]
# Do codegen for all the typdefs
for t in typedefs:
typeName = getRetvalDeclarationForType(t.innerType, config.getDescriptorProvider())
substs = {
"name": t.identifier.name,
"type": typeName.define(),
}
if t.innerType.isUnion() and not t.innerType.nullable():
# Allow using the typedef's name for accessing variants.
template = "pub use self::%(type)s as %(name)s;"
else:
template = "pub type %(name)s = %(type)s;"
cgthings.append(CGGeneric(template % substs))
# Do codegen for all the dictionaries.
cgthings.extend([CGDictionary(d, config.getDescriptorProvider())
for d in dictionaries])
# Do codegen for all the callbacks.
cgthings.extend(CGList([CGCallbackFunction(c, config.getDescriptorProvider()),
CGCallbackFunctionImpl(c)], "\n")
for c in mainCallbacks)
# Do codegen for all the descriptors
cgthings.extend([CGDescriptor(x, config, len(descriptors) == 1) for x in descriptors])
# Do codegen for all the callback interfaces.
cgthings.extend(CGList([CGCallbackInterface(x),
CGCallbackFunctionImpl(x.interface)], "\n")
for x in callbackDescriptors)
# And make sure we have the right number of newlines at the end
curr = CGWrapper(CGList(cgthings, "\n\n"), post="\n\n")
# Add imports
curr = generate_imports(config, curr, callbackDescriptors, mainCallbacks,
dictionaries, enums)
# Add the auto-generated comment.
curr = CGWrapper(curr, pre=AUTOGENERATED_WARNING_COMMENT)
# Store the final result.
self.root = curr
def define(self):
if not self.root:
return None
return stripTrailingWhitespace(self.root.define())
def type_needs_tracing(t):
assert isinstance(t, IDLObject), (t, type(t))
if t.isType():
if isinstance(t, IDLWrapperType):
return type_needs_tracing(t.inner)
if t.nullable():
return type_needs_tracing(t.inner)
if t.isAny():
return True
if t.isObject():
return True
if t.isSequence():
return type_needs_tracing(t.inner)
if t.isUnion():
return any(type_needs_tracing(member) for member in t.flatMemberTypes)
return False
if t.isDictionary():
if t.parent and type_needs_tracing(t.parent):
return True
if any(type_needs_tracing(member.type) for member in t.members):
return True
return False
if t.isInterface():
return False
if t.isEnum():
return False
assert False, (t, type(t))
def argument_type(descriptorProvider, ty, optional=False, defaultValue=None, variadic=False):
info = getJSToNativeConversionInfo(
ty, descriptorProvider, isArgument=True)
declType = info.declType
if variadic:
if ty.isGeckoInterface():
declType = CGWrapper(declType, pre="&[", post="]")
else:
declType = CGWrapper(declType, pre="Vec<", post=">")
elif optional and not defaultValue:
declType = CGWrapper(declType, pre="Option<", post=">")
if ty.isDictionary() and not type_needs_tracing(ty):
declType = CGWrapper(declType, pre="&")
return declType.define()
def method_arguments(descriptorProvider, returnType, arguments, passJSBits=True, trailing=None):
if needCx(returnType, arguments, passJSBits):
yield "cx", "*mut JSContext"
for argument in arguments:
ty = argument_type(descriptorProvider, argument.type, argument.optional,
argument.defaultValue, argument.variadic)
yield CGDictionary.makeMemberName(argument.identifier.name), ty
if trailing:
yield trailing
def return_type(descriptorProvider, rettype, infallible):
result = getRetvalDeclarationForType(rettype, descriptorProvider)
if not infallible:
result = CGWrapper(result, pre="Fallible<", post=">")
return result.define()
class CGNativeMember(ClassMethod):
def __init__(self, descriptorProvider, member, name, signature, extendedAttrs,
breakAfter=True, passJSBitsAsNeeded=True, visibility="public"):
"""
If passJSBitsAsNeeded is false, we don't automatically pass in a
JSContext* or a JSObject* based on the return and argument types.
"""
self.descriptorProvider = descriptorProvider
self.member = member
self.extendedAttrs = extendedAttrs
self.passJSBitsAsNeeded = passJSBitsAsNeeded
breakAfterSelf = "\n" if breakAfter else ""
ClassMethod.__init__(self, name,
self.getReturnType(signature[0]),
self.getArgs(signature[0], signature[1]),
static=member.isStatic(),
# Mark our getters, which are attrs that
# have a non-void return type, as const.
const=(not member.isStatic() and member.isAttr() and
not signature[0].isVoid()),
breakAfterSelf=breakAfterSelf,
visibility=visibility)
def getReturnType(self, type):
infallible = 'infallible' in self.extendedAttrs
typeDecl = return_type(self.descriptorProvider, type, infallible)
return typeDecl
def getArgs(self, returnType, argList):
return [Argument(arg[1], arg[0]) for arg in method_arguments(self.descriptorProvider,
returnType,
argList,
self.passJSBitsAsNeeded)]
class CGCallback(CGClass):
def __init__(self, idlObject, descriptorProvider, baseName, methods,
getters=[], setters=[]):
self.baseName = baseName
self._deps = idlObject.getDeps()
name = idlObject.identifier.name
# For our public methods that needThisHandling we want most of the
# same args and the same return type as what CallbackMember
# generates. So we want to take advantage of all its
# CGNativeMember infrastructure, but that infrastructure can't deal
# with templates and most especially template arguments. So just
# cheat and have CallbackMember compute all those things for us.
realMethods = []
for method in methods:
if not method.needThisHandling:
realMethods.append(method)
else:
realMethods.extend(self.getMethodImpls(method))
CGClass.__init__(self, name,
bases=[ClassBase(baseName)],
constructors=self.getConstructors(),
methods=realMethods + getters + setters,
decorators="#[derive(JSTraceable, PartialEq)]\n#[allow_unrooted_interior]")
def getConstructors(self):
return [ClassConstructor(
[Argument("*mut JSContext", "aCx"), Argument("*mut JSObject", "aCallback")],
bodyInHeader=True,
visibility="pub",
explicit=False,
baseConstructors=[
"%s::new()" % self.baseName
])]
def getMethodImpls(self, method):
assert method.needThisHandling
args = list(method.args)
# Strip out the JSContext*/JSObject* args
# that got added.
assert args[0].name == "cx" and args[0].argType == "*mut JSContext"
assert args[1].name == "aThisObj" and args[1].argType == "HandleObject"
args = args[2:]
# Record the names of all the arguments, so we can use them when we call
# the private method.
argnames = [arg.name for arg in args]
argnamesWithThis = ["s.get_context()", "thisObjJS.handle()"] + argnames
argnamesWithoutThis = ["s.get_context()", "thisObjJS.handle()"] + argnames
# Now that we've recorded the argnames for our call to our private
# method, insert our optional argument for deciding whether the
# CallSetup should re-throw exceptions on aRv.
args.append(Argument("ExceptionHandling", "aExceptionHandling",
"ReportExceptions"))
# And now insert our template argument.
argsWithoutThis = list(args)
args.insert(0, Argument("&T", "thisObj"))
# And the self argument
method.args.insert(0, Argument(None, "&self"))
args.insert(0, Argument(None, "&self"))
argsWithoutThis.insert(0, Argument(None, "&self"))
setupCall = "let s = CallSetup::new(self, aExceptionHandling);\n"
bodyWithThis = string.Template(
setupCall +
"rooted!(in(s.get_context()) let mut thisObjJS = ptr::null_mut());\n"
"wrap_call_this_object(s.get_context(), thisObj, thisObjJS.handle_mut());\n"
"if thisObjJS.is_null() {\n"
" return Err(JSFailed);\n"
"}\n"
"return ${methodName}(${callArgs});").substitute({
"callArgs": ", ".join(argnamesWithThis),
"methodName": 'self.' + method.name,
})
bodyWithoutThis = string.Template(
setupCall +
"rooted!(in(s.get_context()) let thisObjJS = ptr::null_mut());\n"
"return ${methodName}(${callArgs});").substitute({
"callArgs": ", ".join(argnamesWithoutThis),
"methodName": 'self.' + method.name,
})
return [ClassMethod(method.name + '_', method.returnType, args,
bodyInHeader=True,
templateArgs=["T: DomObject"],
body=bodyWithThis,
visibility='pub'),
ClassMethod(method.name + '__', method.returnType, argsWithoutThis,
bodyInHeader=True,
body=bodyWithoutThis,
visibility='pub'),
method]
def deps(self):
return self._deps
# We're always fallible
def callbackGetterName(attr, descriptor):
return "Get" + MakeNativeName(
descriptor.binaryNameFor(attr.identifier.name))
def callbackSetterName(attr, descriptor):
return "Set" + MakeNativeName(
descriptor.binaryNameFor(attr.identifier.name))
class CGCallbackFunction(CGCallback):
def __init__(self, callback, descriptorProvider):
CGCallback.__init__(self, callback, descriptorProvider,
"CallbackFunction",
methods=[CallCallback(callback, descriptorProvider)])
def getConstructors(self):
return CGCallback.getConstructors(self)
class CGCallbackFunctionImpl(CGGeneric):
def __init__(self, callback):
impl = string.Template("""\
impl CallbackContainer for ${type} {
unsafe fn new(cx: *mut JSContext, callback: *mut JSObject) -> Rc<${type}> {
${type}::new(cx, callback)
}
fn callback_holder(&self) -> &CallbackObject {
self.parent.callback_holder()
}
}
impl ToJSValConvertible for ${type} {
unsafe fn to_jsval(&self, cx: *mut JSContext, rval: MutableHandleValue) {
self.callback().to_jsval(cx, rval);
}
}\
""").substitute({"type": callback.identifier.name})
CGGeneric.__init__(self, impl)
class CGCallbackInterface(CGCallback):
def __init__(self, descriptor):
iface = descriptor.interface
attrs = [m for m in iface.members if m.isAttr() and not m.isStatic()]
getters = [CallbackGetter(a, descriptor) for a in attrs]
setters = [CallbackSetter(a, descriptor) for a in attrs
if not a.readonly]
methods = [m for m in iface.members
if m.isMethod() and not m.isStatic() and not m.isIdentifierLess()]
methods = [CallbackOperation(m, sig, descriptor) for m in methods
for sig in m.signatures()]
assert not iface.isJSImplemented() or not iface.ctor()
CGCallback.__init__(self, iface, descriptor, "CallbackInterface",
methods, getters=getters, setters=setters)
class FakeMember():
def __init__(self):
self.treatNullAs = "Default"
def isStatic(self):
return False
def isAttr(self):
return False
def isMethod(self):
return False
def getExtendedAttribute(self, name):
return None
class CallbackMember(CGNativeMember):
def __init__(self, sig, name, descriptorProvider, needThisHandling):
"""
needThisHandling is True if we need to be able to accept a specified
thisObj, False otherwise.
"""
self.retvalType = sig[0]
self.originalSig = sig
args = sig[1]
self.argCount = len(args)
if self.argCount > 0:
# Check for variadic arguments
lastArg = args[self.argCount - 1]
if lastArg.variadic:
self.argCountStr = (
"(%d - 1) + %s.len()" % (self.argCount,
lastArg.identifier.name))
else:
self.argCountStr = "%d" % self.argCount
self.needThisHandling = needThisHandling
# If needThisHandling, we generate ourselves as private and the caller
# will handle generating public versions that handle the "this" stuff.
visibility = "priv" if needThisHandling else "pub"
# We don't care, for callback codegen, whether our original member was
# a method or attribute or whatnot. Just always pass FakeMember()
# here.
CGNativeMember.__init__(self, descriptorProvider, FakeMember(),
name, (self.retvalType, args),
extendedAttrs={},
passJSBitsAsNeeded=False,
visibility=visibility)
# We have to do all the generation of our body now, because
# the caller relies on us throwing if we can't manage it.
self.exceptionCode = "return Err(JSFailed);"
self.body = self.getImpl()
def getImpl(self):
replacements = {
"declRval": self.getRvalDecl(),
"returnResult": self.getResultConversion(),
"convertArgs": self.getArgConversions(),
"doCall": self.getCall(),
"setupCall": self.getCallSetup(),
}
if self.argCount > 0:
replacements["argCount"] = self.argCountStr
replacements["argvDecl"] = string.Template(
"rooted_vec!(let mut argv);\n"
"argv.extend((0..${argCount}).map(|_| Heap::new(UndefinedValue())));\n"
).substitute(replacements)
else:
# Avoid weird 0-sized arrays
replacements["argvDecl"] = ""
# Newlines and semicolons are in the values
pre = string.Template(
"${setupCall}"
"${declRval}"
"${argvDecl}").substitute(replacements)
body = string.Template(
"${convertArgs}"
"${doCall}"
"${returnResult}").substitute(replacements)
return CGWrapper(CGIndenter(CGList([
CGGeneric(pre),
CGGeneric(body),
], "\n"), 4), pre="unsafe {\n", post="\n}").define()
def getResultConversion(self):
replacements = {
"val": "rval.handle()",
}
info = getJSToNativeConversionInfo(
self.retvalType,
self.descriptorProvider,
exceptionCode=self.exceptionCode,
isCallbackReturnValue="Callback",
# XXXbz we should try to do better here
sourceDescription="return value")
template = info.template
declType = info.declType
convertType = instantiateJSToNativeConversionTemplate(
template, replacements, declType, "rvalDecl")
if self.retvalType is None or self.retvalType.isVoid():
retval = "()"
elif self.retvalType.isAny():
retval = "rvalDecl.get()"
else:
retval = "rvalDecl"
return "%s\nOk(%s)\n" % (convertType.define(), retval)
def getArgConversions(self):
# Just reget the arglist from self.originalSig, because our superclasses
# just have way to many members they like to clobber, so I can't find a
# safe member name to store it in.
argConversions = [self.getArgConversion(i, arg) for (i, arg)
in enumerate(self.originalSig[1])]
# Do them back to front, so our argc modifications will work
# correctly, because we examine trailing arguments first.
argConversions.reverse()
argConversions = [CGGeneric(c) for c in argConversions]
if self.argCount > 0:
argConversions.insert(0, self.getArgcDecl())
# And slap them together.
return CGList(argConversions, "\n\n").define() + "\n\n"
def getArgConversion(self, i, arg):
argval = arg.identifier.name
if arg.variadic:
argval = argval + "[idx].get()"
jsvalIndex = "%d + idx" % i
else:
jsvalIndex = "%d" % i
if arg.optional and not arg.defaultValue:
argval += ".clone().unwrap()"
conversion = wrapForType(
"argv_root.handle_mut()", result=argval,
successCode="argv[%s] = Heap::new(argv_root.get());" % jsvalIndex,
pre="rooted!(in(cx) let mut argv_root = UndefinedValue());")
if arg.variadic:
conversion = string.Template(
"for idx in 0..${arg}.len() {\n" +
CGIndenter(CGGeneric(conversion)).define() + "\n"
"}"
).substitute({"arg": arg.identifier.name})
elif arg.optional and not arg.defaultValue:
conversion = (
CGIfWrapper("%s.is_some()" % arg.identifier.name,
CGGeneric(conversion)).define() +
" else if argc == %d {\n"
" // This is our current trailing argument; reduce argc\n"
" argc -= 1;\n"
"} else {\n"
" argv[%d] = Heap::new(UndefinedValue());\n"
"}" % (i + 1, i))
return conversion
def getArgs(self, returnType, argList):
args = CGNativeMember.getArgs(self, returnType, argList)
if not self.needThisHandling:
# Since we don't need this handling, we're the actual method that
# will be called, so we need an aRethrowExceptions argument.
args.append(Argument("ExceptionHandling", "aExceptionHandling",
"ReportExceptions"))
return args
# We want to allow the caller to pass in a "this" object, as
# well as a JSContext.
return [Argument("*mut JSContext", "cx"),
Argument("HandleObject", "aThisObj")] + args
def getCallSetup(self):
if self.needThisHandling:
# It's been done for us already
return ""
return (
"CallSetup s(CallbackPreserveColor(), aRv, aExceptionHandling);\n"
"JSContext* cx = s.get_context();\n"
"if (!cx) {\n"
" return Err(JSFailed);\n"
"}\n")
def getArgcDecl(self):
if self.argCount <= 1:
return CGGeneric("let argc = %s;" % self.argCountStr)
return CGGeneric("let mut argc = %s;" % self.argCountStr)
@staticmethod
def ensureASCIIName(idlObject):
type = "attribute" if idlObject.isAttr() else "operation"
if re.match("[^\x20-\x7E]", idlObject.identifier.name):
raise SyntaxError('Callback %s name "%s" contains non-ASCII '
"characters. We can't handle that. %s" %
(type, idlObject.identifier.name,
idlObject.location))
if re.match('"', idlObject.identifier.name):
raise SyntaxError("Callback %s name '%s' contains "
"double-quote character. We can't handle "
"that. %s" %
(type, idlObject.identifier.name,
idlObject.location))
class CallbackMethod(CallbackMember):
def __init__(self, sig, name, descriptorProvider, needThisHandling):
CallbackMember.__init__(self, sig, name, descriptorProvider,
needThisHandling)
def getRvalDecl(self):
return "rooted!(in(cx) let mut rval = UndefinedValue());\n"
def getCall(self):
replacements = {
"thisObj": self.getThisObj(),
"getCallable": self.getCallableDecl(),
"callGuard": self.getCallGuard(),
}
if self.argCount > 0:
replacements["argv"] = "argv.as_ptr() as *const JSVal"
replacements["argc"] = "argc"
else:
replacements["argv"] = "ptr::null_mut()"
replacements["argc"] = "0"
return string.Template(
"${getCallable}"
"rooted!(in(cx) let rootedThis = ${thisObj});\n"
"let ok = ${callGuard}JS_CallFunctionValue(\n"
" cx, rootedThis.handle(), callable.handle(),\n"
" &HandleValueArray {\n"
" length_: ${argc} as ::libc::size_t,\n"
" elements_: ${argv}\n"
" }, rval.handle_mut());\n"
"maybe_resume_unwind();\n"
"if !ok {\n"
" return Err(JSFailed);\n"
"}\n").substitute(replacements)
class CallCallback(CallbackMethod):
def __init__(self, callback, descriptorProvider):
self.callback = callback
CallbackMethod.__init__(self, callback.signatures()[0], "Call",
descriptorProvider, needThisHandling=True)
def getThisObj(self):
return "aThisObj.get()"
def getCallableDecl(self):
return "rooted!(in(cx) let callable = ObjectValue(self.callback()));\n"
def getCallGuard(self):
if self.callback._treatNonObjectAsNull:
return "!IsCallable(self.callback()) || "
return ""
class CallbackOperationBase(CallbackMethod):
"""
Common class for implementing various callback operations.
"""
def __init__(self, signature, jsName, nativeName, descriptor, singleOperation):
self.singleOperation = singleOperation
self.methodName = jsName
CallbackMethod.__init__(self, signature, nativeName, descriptor, singleOperation)
def getThisObj(self):
if not self.singleOperation:
return "self.callback()"
# This relies on getCallableDecl declaring a boolean
# isCallable in the case when we're a single-operation
# interface.
return "if isCallable { aThisObj.get() } else { self.callback() }"
def getCallableDecl(self):
replacements = {
"methodName": self.methodName
}
getCallableFromProp = string.Template(
'try!(self.parent.get_callable_property(cx, "${methodName}"))'
).substitute(replacements)
if not self.singleOperation:
return 'rooted!(in(cx) let callable =\n' + getCallableFromProp + ');\n'
return (
'let isCallable = IsCallable(self.callback());\n'
'rooted!(in(cx) let callable =\n' +
CGIndenter(
CGIfElseWrapper('isCallable',
CGGeneric('ObjectValue(self.callback())'),
CGGeneric(getCallableFromProp))).define() + ');\n')
def getCallGuard(self):
return ""
class CallbackOperation(CallbackOperationBase):
"""
Codegen actual WebIDL operations on callback interfaces.
"""
def __init__(self, method, signature, descriptor):
self.ensureASCIIName(method)
jsName = method.identifier.name
CallbackOperationBase.__init__(self, signature,
jsName,
MakeNativeName(descriptor.binaryNameFor(jsName)),
descriptor, descriptor.interface.isSingleOperationInterface())
class CallbackGetter(CallbackMember):
def __init__(self, attr, descriptor):
self.ensureASCIIName(attr)
self.attrName = attr.identifier.name
CallbackMember.__init__(self,
(attr.type, []),
callbackGetterName(attr),
descriptor,
needThisHandling=False)
def getRvalDecl(self):
return "JS::Rooted<JS::Value> rval(cx, JS::UndefinedValue());\n"
def getCall(self):
replacements = {
"attrName": self.attrName
}
return string.Template(
'if (!JS_GetProperty(cx, mCallback, "${attrName}", &rval)) {\n'
' return Err(JSFailed);\n'
'}\n').substitute(replacements)
class CallbackSetter(CallbackMember):
def __init__(self, attr, descriptor):
self.ensureASCIIName(attr)
self.attrName = attr.identifier.name
CallbackMember.__init__(self,
(BuiltinTypes[IDLBuiltinType.Types.void],
[FakeArgument(attr.type, attr)]),
callbackSetterName(attr),
descriptor,
needThisHandling=False)
def getRvalDecl(self):
# We don't need an rval
return ""
def getCall(self):
replacements = {
"attrName": self.attrName,
"argv": "argv.handleAt(0)",
}
return string.Template(
'MOZ_ASSERT(argv.length() == 1);\n'
'if (!JS_SetProperty(cx, mCallback, "${attrName}", ${argv})) {\n'
' return Err(JSFailed);\n'
'}\n').substitute(replacements)
def getArgcDecl(self):
return None
class CGIterableMethodGenerator(CGGeneric):
"""
Creates methods for iterable interfaces. Unwrapping/wrapping
will be taken care of by the usual method generation machinery in
CGMethodCall/CGPerSignatureCall. Functionality is filled in here instead of
using CGCallGenerator.
"""
def __init__(self, descriptor, iterable, methodName):
if methodName == "forEach":
CGGeneric.__init__(self, fill(
"""
if !IsCallable(arg0) {
throw_type_error(cx, "Argument 1 of ${ifaceName}.forEach is not callable.");
return false;
}
rooted!(in(cx) let arg0 = ObjectValue(arg0));
rooted!(in(cx) let mut call_arg1 = UndefinedValue());
rooted!(in(cx) let mut call_arg2 = UndefinedValue());
let mut call_args = vec![UndefinedValue(), UndefinedValue(), ObjectValue(*_obj)];
rooted!(in(cx) let mut ignoredReturnVal = UndefinedValue());
for i in 0..(*this).get_iterable_length() {
(*this).get_value_at_index(i).to_jsval(cx, call_arg1.handle_mut());
(*this).get_key_at_index(i).to_jsval(cx, call_arg2.handle_mut());
call_args[0] = call_arg1.handle().get();
call_args[1] = call_arg2.handle().get();
let call_args = HandleValueArray { length_: 3, elements_: call_args.as_ptr() };
if !Call(cx, arg1, arg0.handle(), &call_args,
ignoredReturnVal.handle_mut()) {
return false;
}
}
let result = ();
""",
ifaceName=descriptor.interface.identifier.name))
return
CGGeneric.__init__(self, fill(
"""
let result = ${iterClass}::new(&*this,
IteratorType::${itrMethod},
super::${ifaceName}IteratorBinding::Wrap);
""",
iterClass=iteratorNativeType(descriptor, True),
ifaceName=descriptor.interface.identifier.name,
itrMethod=methodName.title()))
def camel_to_upper_snake(s):
return "_".join(m.group(0).upper() for m in re.finditer("[A-Z][a-z]*", s))
def process_arg(expr, arg):
if arg.type.isGeckoInterface() and not arg.type.unroll().inner.isCallback():
if arg.type.nullable() or arg.type.isSequence() or arg.optional:
expr += ".r()"
else:
expr = "&" + expr
return expr
class GlobalGenRoots():
"""
Roots for global codegen.
To generate code, call the method associated with the target, and then
call the appropriate define/declare method.
"""
@staticmethod
def InterfaceObjectMap(config):
mods = [
"dom::bindings::codegen",
"js::jsapi::{HandleObject, JSContext}",
"phf",
]
imports = CGList([CGGeneric("use %s;" % mod) for mod in mods], "\n")
global_descriptors = config.getDescriptors(isGlobal=True)
flags = [("EMPTY", 0)]
flags.extend(
(camel_to_upper_snake(d.name), 2 ** idx)
for (idx, d) in enumerate(global_descriptors)
)
global_flags = CGWrapper(CGIndenter(CGList([
CGGeneric("const %s = %#x," % args)
for args in flags
], "\n")), pre="pub flags Globals: u8 {\n", post="\n}")
globals_ = CGWrapper(CGIndenter(global_flags), pre="bitflags! {\n", post="\n}")
phf = CGGeneric("include!(concat!(env!(\"OUT_DIR\"), \"/InterfaceObjectMapPhf.rs\"));")
return CGList([
CGGeneric(AUTOGENERATED_WARNING_COMMENT),
CGList([imports, globals_, phf], "\n\n")
])
@staticmethod
def InterfaceObjectMapData(config):
pairs = []
for d in config.getDescriptors(hasInterfaceObject=True, isInline=False):
binding = toBindingNamespace(d.name)
pairs.append((d.name, binding, binding))
for ctor in d.interface.namedConstructors:
pairs.append((ctor.identifier.name, binding, binding))
pairs.sort(key=operator.itemgetter(0))
mappings = [
CGGeneric('"%s": "codegen::Bindings::%s::%s::DefineDOMInterface as unsafe fn(_, _)"' % pair)
for pair in pairs
]
return CGWrapper(
CGList(mappings, ",\n"),
pre="{\n",
post="\n}\n")
@staticmethod
def PrototypeList(config):
# Prototype ID enum.
interfaces = config.getDescriptors(isCallback=False, isNamespace=False)
protos = [d.name for d in interfaces]
constructors = sorted([MakeNativeName(d.name)
for d in config.getDescriptors(hasInterfaceObject=True)
if d.shouldHaveGetConstructorObjectMethod()])
proxies = [d.name for d in config.getDescriptors(proxy=True)]
return CGList([
CGGeneric(AUTOGENERATED_WARNING_COMMENT),
CGGeneric("pub const PROTO_OR_IFACE_LENGTH: usize = %d;\n" % (len(protos) + len(constructors))),
CGGeneric("pub const MAX_PROTO_CHAIN_LENGTH: usize = %d;\n\n" % config.maxProtoChainLength),
CGNonNamespacedEnum('ID', protos, 0, deriving="PartialEq, Copy, Clone", repr="u16"),
CGNonNamespacedEnum('Constructor', constructors, len(protos),
deriving="PartialEq, Copy, Clone", repr="u16"),
CGWrapper(CGIndenter(CGList([CGGeneric('"' + name + '"') for name in protos],
",\n"),
indentLevel=4),
pre="static INTERFACES: [&'static str; %d] = [\n" % len(protos),
post="\n];\n\n"),
CGGeneric("pub fn proto_id_to_name(proto_id: u16) -> &'static str {\n"
" debug_assert!(proto_id < ID::Last as u16);\n"
" INTERFACES[proto_id as usize]\n"
"}\n\n"),
CGNonNamespacedEnum('Proxies', proxies, 0, deriving="PartialEq, Copy, Clone"),
])
@staticmethod
def RegisterBindings(config):
# TODO - Generate the methods we want
code = CGList([
CGRegisterProxyHandlers(config),
], "\n")
return CGImports(code, descriptors=[], callbacks=[], dictionaries=[], enums=[], imports=[
'dom::bindings::codegen::Bindings',
'dom::bindings::codegen::PrototypeList::Proxies',
'libc',
], config=config, ignored_warnings=[])
@staticmethod
def InterfaceTypes(config):
descriptors = sorted([MakeNativeName(d.name)
for d in config.getDescriptors(register=True,
isCallback=False,
isIteratorInterface=False)])
curr = CGList([CGGeneric("pub use dom::%s::%s;\n" % (name.lower(),
MakeNativeName(name)))
for name in descriptors])
curr = CGWrapper(curr, pre=AUTOGENERATED_WARNING_COMMENT)
return curr
@staticmethod
def Bindings(config):
def leafModule(d):
return getModuleFromObject(d).split('::')[-1]
descriptors = config.getDescriptors(register=True, isIteratorInterface=False)
descriptors = (set(toBindingNamespace(d.name) for d in descriptors) |
set(leafModule(d) for d in config.callbacks) |
set(leafModule(d) for d in config.getDictionaries()))
curr = CGList([CGGeneric("pub mod %s;\n" % name) for name in sorted(descriptors)])
curr = CGWrapper(curr, pre=AUTOGENERATED_WARNING_COMMENT)
return curr
@staticmethod
def InheritTypes(config):
descriptors = config.getDescriptors(register=True, isCallback=False)
imports = [CGGeneric("use dom::types::*;\n"),
CGGeneric("use dom::bindings::conversions::{DerivedFrom, get_dom_class};\n"),
CGGeneric("use dom::bindings::inheritance::Castable;\n"),
CGGeneric("use dom::bindings::js::{JS, LayoutJS, Root};\n"),
CGGeneric("use dom::bindings::trace::JSTraceable;\n"),
CGGeneric("use dom::bindings::reflector::DomObject;\n"),
CGGeneric("use js::jsapi::JSTracer;\n\n"),
CGGeneric("use std::mem;\n\n")]
allprotos = []
topTypes = []
hierarchy = defaultdict(list)
for descriptor in descriptors:
name = descriptor.name
chain = descriptor.prototypeChain
upcast = descriptor.hasDescendants()
downcast = len(chain) != 1
if upcast and not downcast:
topTypes.append(name)
if not upcast:
# No other interface will implement DeriveFrom<Foo> for this Foo, so avoid
# implementing it for itself.
chain = chain[:-1]
# Implement `DerivedFrom<Bar>` for `Foo`, for all `Bar` that `Foo` inherits from.
if chain:
allprotos.append(CGGeneric("impl Castable for %s {}\n" % name))
for baseName in chain:
allprotos.append(CGGeneric("impl DerivedFrom<%s> for %s {}\n" % (baseName, name)))
if chain:
allprotos.append(CGGeneric("\n"))
if downcast:
hierarchy[descriptor.interface.parent.identifier.name].append(name)
typeIdCode = []
topTypeVariants = [
("ID used by abstract interfaces.", "pub abstract_: ()"),
("ID used by interfaces that are not castable.", "pub alone: ()"),
]
topTypeVariants += [
("ID used by interfaces that derive from %s." % typeName,
"pub %s: %sTypeId" % (typeName.lower(), typeName))
for typeName in topTypes
]
topTypeVariantsAsStrings = [CGGeneric("/// %s\n%s," % variant) for variant in topTypeVariants]
typeIdCode.append(CGWrapper(CGIndenter(CGList(topTypeVariantsAsStrings, "\n"), 4),
pre="#[derive(Copy)]\npub union TopTypeId {\n",
post="\n}\n\n"))
typeIdCode.append(CGGeneric("""\
impl Clone for TopTypeId {
fn clone(&self) -> Self { *self }
}
"""))
def type_id_variant(name):
# If `name` is present in the hierarchy keys', that means some other interfaces
# derive from it and this enum variant should have an argument with its own
# TypeId enum.
return "%s(%sTypeId)" % (name, name) if name in hierarchy else name
for base, derived in hierarchy.iteritems():
variants = []
if config.getDescriptor(base).concrete:
variants.append(CGGeneric(base))
variants += [CGGeneric(type_id_variant(derivedName)) for derivedName in derived]
derives = "Clone, Copy, Debug, PartialEq"
typeIdCode.append(CGWrapper(CGIndenter(CGList(variants, ",\n"), 4),
pre="#[derive(%s)]\npub enum %sTypeId {\n" % (derives, base),
post="\n}\n\n"))
if base in topTypes:
typeIdCode.append(CGGeneric("""\
impl %(base)s {
pub fn type_id(&self) -> &'static %(base)sTypeId {
unsafe {
&get_dom_class(self.reflector().get_jsobject().get())
.unwrap()
.type_id
.%(field)s
}
}
}
""" % {'base': base, 'field': base.lower()}))
curr = CGList(imports + typeIdCode + allprotos)
curr = CGWrapper(curr, pre=AUTOGENERATED_WARNING_COMMENT)
return curr
@staticmethod
def UnionTypes(config):
curr = UnionTypes(config.getDescriptors(),
config.getDictionaries(),
config.getCallbacks(),
config.typedefs,
config)
# Add the auto-generated comment.
curr = CGWrapper(curr, pre=AUTOGENERATED_WARNING_COMMENT)
# Done.
return curr
@staticmethod
def SupportedDomApis(config):
descriptors = config.getDescriptors(isExposedConditionally=False)
base_path = os.path.join('dom', 'bindings', 'codegen')
with open(os.path.join(base_path, 'apis.html.template')) as f:
base_template = f.read()
with open(os.path.join(base_path, 'api.html.template')) as f:
api_template = f.read()
with open(os.path.join(base_path, 'property.html.template')) as f:
property_template = f.read()
with open(os.path.join(base_path, 'interface.html.template')) as f:
interface_template = f.read()
apis = []
interfaces = []
for descriptor in descriptors:
props = []
for m in descriptor.interface.members:
if PropertyDefiner.getStringAttr(m, 'Pref') or \
PropertyDefiner.getStringAttr(m, 'Func') or \
(m.isMethod() and m.isIdentifierLess()):
continue
display = m.identifier.name + ('()' if m.isMethod() else '')
props += [property_template.replace('${name}', display)]
name = descriptor.interface.identifier.name
apis += [(api_template.replace('${interface}', name)
.replace('${properties}', '\n'.join(props)))]
interfaces += [interface_template.replace('${interface}', name)]
return CGGeneric((base_template.replace('${apis}', '\n'.join(apis))
.replace('${interfaces}', '\n'.join(interfaces))))
| srivassumit/servo | components/script/dom/bindings/codegen/CodegenRust.py | Python | mpl-2.0 | 284,993 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
def get_comma_split_names( name ):
split_name_list = [name]
if ',' in name:
split_name_list = name.split(",")
elif ';' in name:
split_name_list = name.split(";")
for i in range(0, len(split_name_list)):
split_name_list[i] = split_name_list[i].strip()
return split_name_list
old = []
old_file = open("currentDictNames.txt")
new_file = open("selectList.txt")
result = open("dictionaryResult.sql",'w')
for line in old_file:
old.append(line.strip())
old_file.close()
for line in new_file:
if len(line) > 1:
values = get_comma_split_names(line)
for value in values:
if value.strip() not in old:
old.append(value.strip())
result.write("INSERT INTO clinlims.dictionary ( id, is_active, dict_entry, lastupdated, dictionary_category_id ) \n\t")
result.write("VALUES ( nextval( 'dictionary_seq' ) , 'Y' , '" + value.strip() + "' , now(), ( select id from clinlims.dictionary_category where description = 'Haiti Lab' ));\n")
result.close()
print "Done check dictionaryResult.sql for values"
| openelisglobal/openelisglobal-sandbox | liquibase/OE2.7/CILNSPMassive/scripts/dictionary.py | Python | mpl-2.0 | 1,212 |
'''OpenGL extension OES.blend_subtract
This module customises the behaviour of the
OpenGL.raw.GLES1.OES.blend_subtract to provide a more
Python-friendly API
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/OES/blend_subtract.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES1 import _types, _glgets
from OpenGL.raw.GLES1.OES.blend_subtract import *
from OpenGL.raw.GLES1.OES.blend_subtract import _EXTENSION_NAME
def glInitBlendSubtractOES():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION | stack-of-tasks/rbdlpy | tutorial/lib/python2.7/site-packages/OpenGL/GLES1/OES/blend_subtract.py | Python | lgpl-3.0 | 768 |
#!/usr/bin/env python # pylint: disable=too-many-lines
# ___ ___ _ _ ___ ___ _ _____ ___ ___
# / __| __| \| | __| _ \ /_\_ _| __| \
# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
# | |) | (_) | | .` | (_) || | | _|| |) | | | |
# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
'''
OpenShiftCLI class that wraps the oc commands in a subprocess
'''
# pylint: disable=too-many-lines
import atexit
import json
import os
import re
import shutil
import subprocess
import ruamel.yaml as yaml
#import yaml
#
## This is here because of a bug that causes yaml
## to incorrectly handle timezone info on timestamps
#def timestamp_constructor(_, node):
# '''return timestamps as strings'''
# return str(node.value)
#yaml.add_constructor(u'tag:yaml.org,2002:timestamp', timestamp_constructor)
class OpenShiftCLIError(Exception):
'''Exception class for openshiftcli'''
pass
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
def __init__(self,
namespace,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False,
all_namespaces=False):
''' Constructor for OpenshiftCLI '''
self.namespace = namespace
self.verbose = verbose
self.kubeconfig = kubeconfig
self.all_namespaces = all_namespaces
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
def _replace_content(self, resource, rname, content, force=False, sep='.'):
''' replace the current object with the content '''
res = self._get(resource, rname)
if not res['results']:
return res
fname = '/tmp/%s' % rname
yed = Yedit(fname, res['results'][0], separator=sep)
changes = []
for key, value in content.items():
changes.append(yed.put(key, value))
if any([change[0] for change in changes]):
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._replace(fname, force)
return {'returncode': 0, 'updated': False}
def _replace(self, fname, force=False):
'''return all pods '''
cmd = ['-n', self.namespace, 'replace', '-f', fname]
if force:
cmd.append('--force')
return self.openshift_cmd(cmd)
def _create_from_content(self, rname, content):
'''return all pods '''
fname = '/tmp/%s' % rname
yed = Yedit(fname, content=content)
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._create(fname)
def _create(self, fname):
'''return all pods '''
return self.openshift_cmd(['create', '-f', fname, '-n', self.namespace])
def _delete(self, resource, rname, selector=None):
'''return all pods '''
cmd = ['delete', resource, rname, '-n', self.namespace]
if selector:
cmd.append('--selector=%s' % selector)
return self.openshift_cmd(cmd)
def _process(self, template_name, create=False, params=None, template_data=None):
'''return all pods '''
cmd = ['process', '-n', self.namespace]
if template_data:
cmd.extend(['-f', '-'])
else:
cmd.append(template_name)
if params:
param_str = ["%s=%s" % (key, value) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
results = self.openshift_cmd(cmd, output=True, input_data=template_data)
if results['returncode'] != 0 or not create:
return results
fname = '/tmp/%s' % template_name
yed = Yedit(fname, results['results'])
yed.write()
atexit.register(Utils.cleanup, [fname])
return self.openshift_cmd(['-n', self.namespace, 'create', '-f', fname])
def _get(self, resource, rname=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
if selector:
cmd.append('--selector=%s' % selector)
if self.all_namespaces:
cmd.extend(['--all-namespaces'])
elif self.namespace:
cmd.extend(['-n', self.namespace])
cmd.extend(['-o', 'json'])
if rname:
cmd.append(rname)
rval = self.openshift_cmd(cmd, output=True)
# Ensure results are retuned in an array
if rval.has_key('items'):
rval['results'] = rval['items']
elif not isinstance(rval['results'], list):
rval['results'] = [rval['results']]
return rval
def _schedulable(self, node=None, selector=None, schedulable=True):
''' perform oadm manage-node scheduable '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
cmd.append('--schedulable=%s' % schedulable)
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _list_pods(self, node=None, selector=None, pod_selector=None):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
if pod_selector:
cmd.append('--pod-selector=%s' % pod_selector)
cmd.extend(['--list-pods', '-o', 'json'])
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
#pylint: disable=too-many-arguments
def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
if dry_run:
cmd.append('--dry-run')
if pod_selector:
cmd.append('--pod-selector=%s' % pod_selector)
if grace_period:
cmd.append('--grace-period=%s' % int(grace_period))
if force:
cmd.append('--force')
cmd.append('--evacuate')
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _import_image(self, url=None, name=None, tag=None):
''' perform image import '''
cmd = ['import-image']
image = '{0}'.format(name)
if tag:
image += ':{0}'.format(tag)
cmd.append(image)
if url:
cmd.append('--from={0}/{1}'.format(url, image))
cmd.append('-n{0}'.format(self.namespace))
cmd.append('--confirm')
return self.openshift_cmd(cmd)
#pylint: disable=too-many-arguments
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
cmds = []
if oadm:
cmds = ['/usr/bin/oc', 'adm']
else:
cmds = ['/usr/bin/oc']
cmds.extend(cmd)
rval = {}
results = ''
err = None
if self.verbose:
print ' '.join(cmds)
proc = subprocess.Popen(cmds,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env={'KUBECONFIG': self.kubeconfig})
stdout, stderr = proc.communicate(input_data)
rval = {"returncode": proc.returncode,
"results": results,
"cmd": ' '.join(cmds),
}
if proc.returncode == 0:
if output:
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
except ValueError as err:
if "No JSON object could be decoded" in err.message:
err = err.message
elif output_type == 'raw':
rval['results'] = stdout
if self.verbose:
print stdout
print stderr
if err:
rval.update({"err": err,
"stderr": stderr,
"stdout": stdout,
"cmd": cmds
})
else:
rval.update({"stderr": stderr,
"stdout": stdout,
"results": {},
})
return rval
class Utils(object):
''' utilities for openshiftcli modules '''
@staticmethod
def create_file(rname, data, ftype='yaml'):
''' create a file in tmp with name and contents'''
path = os.path.join('/tmp', rname)
with open(path, 'w') as fds:
if ftype == 'yaml':
fds.write(yaml.dump(data, Dumper=yaml.RoundTripDumper))
elif ftype == 'json':
fds.write(json.dumps(data))
else:
fds.write(data)
# Register cleanup when module is done
atexit.register(Utils.cleanup, [path])
return path
@staticmethod
def create_files_from_contents(content, content_type=None):
'''Turn an array of dict: filename, content into a files array'''
if not isinstance(content, list):
content = [content]
files = []
for item in content:
path = Utils.create_file(item['path'], item['data'], ftype=content_type)
files.append({'name': os.path.basename(path), 'path': path})
return files
@staticmethod
def cleanup(files):
'''Clean up on exit '''
for sfile in files:
if os.path.exists(sfile):
if os.path.isdir(sfile):
shutil.rmtree(sfile)
elif os.path.isfile(sfile):
os.remove(sfile)
@staticmethod
def exists(results, _name):
''' Check to see if the results include the name '''
if not results:
return False
if Utils.find_result(results, _name):
return True
return False
@staticmethod
def find_result(results, _name):
''' Find the specified result by name'''
rval = None
for result in results:
if result.has_key('metadata') and result['metadata']['name'] == _name:
rval = result
break
return rval
@staticmethod
def get_resource_file(sfile, sfile_type='yaml'):
''' return the service file '''
contents = None
with open(sfile) as sfd:
contents = sfd.read()
if sfile_type == 'yaml':
contents = yaml.load(contents, yaml.RoundTripLoader)
elif sfile_type == 'json':
contents = json.loads(contents)
return contents
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
@staticmethod
def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
''' Given a user defined definition, compare it with the results given back by our query. '''
# Currently these values are autogenerated and we do not need to check them
skip = ['metadata', 'status']
if skip_keys:
skip.extend(skip_keys)
for key, value in result_def.items():
if key in skip:
continue
# Both are lists
if isinstance(value, list):
if not user_def.has_key(key):
if debug:
print 'User data does not have key [%s]' % key
print 'User data: %s' % user_def
return False
if not isinstance(user_def[key], list):
if debug:
print 'user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key])
return False
if len(user_def[key]) != len(value):
if debug:
print "List lengths are not equal."
print "key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value))
print "user_def: %s" % user_def[key]
print "value: %s" % value
return False
for values in zip(user_def[key], value):
if isinstance(values[0], dict) and isinstance(values[1], dict):
if debug:
print 'sending list - list'
print type(values[0])
print type(values[1])
result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
if not result:
print 'list compare returned false'
return False
elif value != user_def[key]:
if debug:
print 'value should be identical'
print value
print user_def[key]
return False
# recurse on a dictionary
elif isinstance(value, dict):
if not user_def.has_key(key):
if debug:
print "user_def does not have key [%s]" % key
return False
if not isinstance(user_def[key], dict):
if debug:
print "dict returned false: not instance of dict"
return False
# before passing ensure keys match
api_values = set(value.keys()) - set(skip)
user_values = set(user_def[key].keys()) - set(skip)
if api_values != user_values:
if debug:
print "keys are not equal in dict"
print api_values
print user_values
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
if not result:
if debug:
print "dict returned false"
print result
return False
# Verify each key, value pair is the same
else:
if not user_def.has_key(key) or value != user_def[key]:
if debug:
print "value not equal; user_def does not have key"
print key
print value
if user_def.has_key(key):
print user_def[key]
return False
if debug:
print 'returning true'
return True
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
self.kubeconfig = kubeconfig
self.name = rname
self.namespace = namespace
self._options = options
@property
def config_options(self):
''' return config options '''
return self._options
def to_option_list(self):
'''return all options as a string'''
return self.stringify()
def stringify(self):
''' return the options hash as cli params in a string '''
rval = []
for key, data in self.config_options.items():
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
rval.append('--%s=%s' % (key.replace('_', '-'), data['value']))
return rval
class YeditException(Exception):
''' Exception class for Yedit '''
pass
class Yedit(object):
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
def __init__(self, filename=None, content=None, content_type='yaml', separator='.', backup=False):
self.content = content
self._separator = separator
self.filename = filename
self.__yaml_dict = content
self.content_type = content_type
self.backup = backup
self.load(content_type=self.content_type)
if self.__yaml_dict == None:
self.__yaml_dict = {}
@property
def separator(self):
''' getter method for yaml_dict '''
return self._separator
@separator.setter
def separator(self):
''' getter method for yaml_dict '''
return self._separator
@property
def yaml_dict(self):
''' getter method for yaml_dict '''
return self.__yaml_dict
@yaml_dict.setter
def yaml_dict(self, value):
''' setter method for yaml_dict '''
self.__yaml_dict = value
@staticmethod
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
return re.findall(Yedit.re_key % ''.join(common_separators), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
if not re.match(Yedit.re_valid_key % ''.join(common_separators), key):
return False
return True
@staticmethod
def remove_entry(data, key, sep='.'):
''' remove data at location key '''
if key == '' and isinstance(data, dict):
data.clear()
return True
elif key == '' and isinstance(data, list):
del data[:]
return True
if not (key and Yedit.valid_key(key, sep)) and isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
data = data.get(dict_key, None)
elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
data = data[int(arr_ind)]
else:
return None
# process last index for remove
# expected list entry
if key_indexes[-1][0]:
if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:
del data[int(key_indexes[-1][0])]
return True
# expected dict entry
elif key_indexes[-1][1]:
if isinstance(data, dict):
del data[key_indexes[-1][1]]
return True
@staticmethod
def add_entry(data, key, item=None, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a#b
return c
'''
if key == '':
pass
elif not (key and Yedit.valid_key(key, sep)) and isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key:
if isinstance(data, dict) and data.has_key(dict_key) and data[dict_key]:
data = data[dict_key]
continue
elif data and not isinstance(data, dict):
raise YeditException("Unexpected item type found while going through key " +
"path: {} (at key: {})".format(key, dict_key))
data[dict_key] = {}
data = data[dict_key]
elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
data = data[int(arr_ind)]
else:
raise YeditException("Unexpected item type found while going through key path: {}".format(key))
if key == '':
data = item
# process last index for add
# expected list entry
elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:
data[int(key_indexes[-1][0])] = item
# expected dict entry
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
# didn't add/update to an existing list, nor add/update key to a dict
# so we must have been provided some syntax like a.b.c[<int>] = "data" for a
# non-existent array
else:
raise YeditException("Error adding data to object at path: {}".format(key))
return data
@staticmethod
def get_entry(data, key, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a.b
return c
'''
if key == '':
pass
elif not (key and Yedit.valid_key(key, sep)) and isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
data = data.get(dict_key, None)
elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
data = data[int(arr_ind)]
else:
return None
return data
def write(self):
''' write to file '''
if not self.filename:
raise YeditException('Please specify a filename.')
if self.backup and self.file_exists():
shutil.copy(self.filename, self.filename + '.orig')
tmp_filename = self.filename + '.yedit'
try:
with open(tmp_filename, 'w') as yfd:
# pylint: disable=no-member,maybe-no-member
if hasattr(self.yaml_dict, 'fa'):
self.yaml_dict.fa.set_block_style()
yfd.write(yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
except Exception as err:
raise YeditException(err.message)
os.rename(tmp_filename, self.filename)
return (True, self.yaml_dict)
def read(self):
''' read from file '''
# check if it exists
if self.filename == None or not self.file_exists():
return None
contents = None
with open(self.filename) as yfd:
contents = yfd.read()
return contents
def file_exists(self):
''' return whether file exists '''
if os.path.exists(self.filename):
return True
return False
def load(self, content_type='yaml'):
''' return yaml file '''
contents = self.read()
if not contents and not self.content:
return None
if self.content:
if isinstance(self.content, dict):
self.yaml_dict = self.content
return self.yaml_dict
elif isinstance(self.content, str):
contents = self.content
# check if it is yaml
try:
if content_type == 'yaml' and contents:
self.yaml_dict = yaml.load(contents, yaml.RoundTripLoader)
# pylint: disable=no-member,maybe-no-member
if hasattr(self.yaml_dict, 'fa'):
self.yaml_dict.fa.set_block_style()
elif content_type == 'json' and contents:
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
raise YeditException('Problem with loading yaml file. %s' % err)
return self.yaml_dict
def get(self, key):
''' get a specified key'''
try:
entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
except KeyError as _:
entry = None
return entry
def pop(self, path, key_or_item):
''' remove a key, value pair from a dict or an item for a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if entry == None:
return (False, self.yaml_dict)
if isinstance(entry, dict):
# pylint: disable=no-member,maybe-no-member
if entry.has_key(key_or_item):
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
# pylint: disable=no-member,maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
except ValueError:
return (False, self.yaml_dict)
entry.pop(ind)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
def delete(self, path):
''' remove path from a dict'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if entry == None:
return (False, self.yaml_dict)
result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
if not result:
return (False, self.yaml_dict)
return (True, self.yaml_dict)
def exists(self, path, value):
''' check if value exists at path'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if isinstance(entry, list):
if value in entry:
return True
return False
elif isinstance(entry, dict):
if isinstance(value, dict):
rval = False
for key, val in value.items():
if entry[key] != val:
rval = False
break
else:
rval = True
return rval
return value in entry
return entry == value
def append(self, path, value):
'''append value to a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if entry is None:
self.put(path, [])
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
if not isinstance(entry, list):
return (False, self.yaml_dict)
# pylint: disable=no-member,maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
# pylint: disable=too-many-arguments
def update(self, path, value, index=None, curr_value=None):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if isinstance(entry, dict):
# pylint: disable=no-member,maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in dict with non-dict type.' \
' value=[%s] [%s]' % (value, type(value)))
entry.update(value)
return (True, self.yaml_dict)
elif isinstance(entry, list):
# pylint: disable=no-member,maybe-no-member
ind = None
if curr_value:
try:
ind = entry.index(curr_value)
except ValueError:
return (False, self.yaml_dict)
elif index != None:
ind = index
if ind != None and entry[ind] != value:
entry[ind] = value
return (True, self.yaml_dict)
# see if it exists in the list
try:
ind = entry.index(value)
except ValueError:
# doesn't exist, append it
entry.append(value)
return (True, self.yaml_dict)
#already exists, return
if ind != None:
return (False, self.yaml_dict)
return (False, self.yaml_dict)
def put(self, path, value):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if entry == value:
return (False, self.yaml_dict)
# deepcopy didn't work
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), yaml.RoundTripLoader)
# pylint: disable=no-member
if hasattr(self.yaml_dict, 'fa'):
tmp_copy.fa.set_block_style()
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if not result:
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
def create(self, path, value):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), yaml.RoundTripLoader)
# pylint: disable=no-member
if hasattr(self.yaml_dict, 'fa'):
tmp_copy.fa.set_block_style()
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
return (False, self.yaml_dict)
import base64
# pylint: disable=too-many-arguments
class Secret(OpenShiftCLI):
''' Class to wrap the oc command line tools
'''
def __init__(self,
namespace,
secret_name=None,
decode=False,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False):
''' Constructor for OpenshiftOC '''
super(Secret, self).__init__(namespace, kubeconfig)
self.namespace = namespace
self.name = secret_name
self.kubeconfig = kubeconfig
self.decode = decode
self.verbose = verbose
def get(self):
'''return a secret by name '''
results = self._get('secrets', self.name)
results['decoded'] = {}
results['exists'] = False
if results['returncode'] == 0 and results['results'][0]:
results['exists'] = True
if self.decode:
if results['results'][0].has_key('data'):
for sname, value in results['results'][0]['data'].items():
results['decoded'][sname] = base64.decodestring(value)
if results['returncode'] != 0 and '"%s" not found' % self.name in results['stderr']:
results['returncode'] = 0
return results
def delete(self):
'''delete a secret by name'''
return self._delete('secrets', self.name)
def create(self, files=None, contents=None, content_type=None):
'''Create a secret '''
if not files:
files = Utils.create_files_from_contents(contents, content_type=content_type)
secrets = ["%s=%s" % (sfile['name'], sfile['path']) for sfile in files]
cmd = ['-n%s' % self.namespace, 'secrets', 'new', self.name]
cmd.extend(secrets)
return self.openshift_cmd(cmd)
def update(self, files, force=False):
'''run update secret
This receives a list of file names and converts it into a secret.
The secret is then written to disk and passed into the `oc replace` command.
'''
secret = self.prep_secret(files)
if secret['returncode'] != 0:
return secret
sfile_path = '/tmp/%s' % self.name
with open(sfile_path, 'w') as sfd:
sfd.write(json.dumps(secret['results']))
atexit.register(Utils.cleanup, [sfile_path])
return self._replace(sfile_path, force=force)
def prep_secret(self, files=None, contents=None):
''' return what the secret would look like if created
This is accomplished by passing -ojson. This will most likely change in the future
'''
if not files:
files = Utils.create_files_from_contents(contents)
secrets = ["%s=%s" % (sfile['name'], sfile['path']) for sfile in files]
cmd = ['-ojson', '-n%s' % self.namespace, 'secrets', 'new', self.name]
cmd.extend(secrets)
return self.openshift_cmd(cmd, output=True)
# pylint: disable=too-many-branches
def main():
'''
ansible oc module for secrets
'''
module = AnsibleModule(
argument_spec=dict(
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
state=dict(default='present', type='str',
choices=['present', 'absent', 'list']),
debug=dict(default=False, type='bool'),
namespace=dict(default='default', type='str'),
name=dict(default=None, type='str'),
files=dict(default=None, type='list'),
delete_after=dict(default=False, type='bool'),
contents=dict(default=None, type='list'),
content_type=dict(default='raw', choices=['yaml', 'json', 'raw'], type='str'),
force=dict(default=False, type='bool'),
decode=dict(default=False, type='bool'),
),
mutually_exclusive=[["contents", "files"]],
supports_check_mode=True,
)
occmd = Secret(module.params['namespace'],
module.params['name'],
module.params['decode'],
kubeconfig=module.params['kubeconfig'],
verbose=module.params['debug'])
state = module.params['state']
api_rval = occmd.get()
#####
# Get
#####
if state == 'list':
module.exit_json(changed=False, results=api_rval, state="list")
if not module.params['name']:
module.fail_json(msg='Please specify a name when state is absent|present.')
########
# Delete
########
if state == 'absent':
if not Utils.exists(api_rval['results'], module.params['name']):
module.exit_json(changed=False, state="absent")
if module.check_mode:
module.exit_json(changed=False, msg='Would have performed a delete.')
api_rval = occmd.delete()
module.exit_json(changed=True, results=api_rval, state="absent")
if state == 'present':
if module.params['files']:
files = module.params['files']
elif module.params['contents']:
files = Utils.create_files_from_contents(module.params['contents'])
else:
module.fail_json(msg='Either specify files or contents.')
########
# Create
########
if not Utils.exists(api_rval['results'], module.params['name']):
if module.check_mode:
module.exit_json(changed=False, msg='Would have performed a create.')
api_rval = occmd.create(module.params['files'], module.params['contents'])
# Remove files
if files and module.params['delete_after']:
Utils.cleanup([ftmp['path'] for ftmp in files])
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
module.exit_json(changed=True, results=api_rval, state="present")
########
# Update
########
secret = occmd.prep_secret(module.params['files'], module.params['contents'])
if secret['returncode'] != 0:
module.fail_json(msg=secret)
if Utils.check_def_equal(secret['results'], api_rval['results'][0]):
# Remove files
if files and module.params['delete_after']:
Utils.cleanup([ftmp['path'] for ftmp in files])
module.exit_json(changed=False, results=secret['results'], state="present")
if module.check_mode:
module.exit_json(changed=False, msg='Would have performed an update.')
api_rval = occmd.update(files, force=module.params['force'])
# Remove files
if secret and module.params['delete_after']:
Utils.cleanup([ftmp['path'] for ftmp in files])
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
module.exit_json(changed=True, results=api_rval, state="present")
module.exit_json(failed=True,
changed=False,
results='Unknown state passed. %s' % state,
state="unknown")
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
# import module snippets. This are required
from ansible.module_utils.basic import *
main()
| ivanhorvath/openshift-tools | ansible/roles/lib_openshift_3.2/library/oc_secret.py | Python | apache-2.0 | 37,290 |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import torch
class GradMultiply(torch.autograd.Function):
@staticmethod
def forward(ctx, x, scale):
ctx.scale = scale
res = x.new(x)
return res
@staticmethod
def backward(ctx, grad):
return grad * ctx.scale, None
| mlperf/training_results_v0.5 | v0.5.0/nvidia/submission/code/translation/pytorch/fairseq/modules/grad_multiply.py | Python | apache-2.0 | 550 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Value for RaggedTensor."""
import numpy as np
from tensorflow.python.ops.ragged.row_partition import RowPartition
from tensorflow.python.util import dispatch
from tensorflow.python.util.tf_export import tf_export
@tf_export(v1=["ragged.RaggedTensorValue"])
@dispatch.register_dispatchable_type
class RaggedTensorValue:
"""Represents the value of a `RaggedTensor`.
Warning: `RaggedTensorValue` should only be used in graph mode; in
eager mode, the `tf.RaggedTensor` class contains its value directly.
See `tf.RaggedTensor` for a description of ragged tensors.
"""
def __init__(self, values, row_splits):
"""Creates a `RaggedTensorValue`.
Args:
values: A numpy array of any type and shape; or a RaggedTensorValue.
row_splits: A 1-D int32 or int64 numpy array.
"""
if not (isinstance(row_splits, (np.ndarray, np.generic)) and
row_splits.dtype in (np.int64, np.int32) and row_splits.ndim == 1):
raise TypeError("row_splits must be a 1D int32 or int64 numpy array")
if not isinstance(values, (np.ndarray, np.generic, RaggedTensorValue)):
raise TypeError("values must be a numpy array or a RaggedTensorValue")
if (isinstance(values, RaggedTensorValue) and
row_splits.dtype != values.row_splits.dtype):
raise ValueError("row_splits and values.row_splits must have "
"the same dtype")
self._values = values
self._row_splits = row_splits
row_splits = property(
lambda self: self._row_splits,
doc="""The split indices for the ragged tensor value.""")
values = property(
lambda self: self._values,
doc="""The concatenated values for all rows in this tensor.""")
dtype = property(
lambda self: self._values.dtype,
doc="""The numpy dtype of values in this tensor.""")
@property
def flat_values(self):
"""The innermost `values` array for this ragged tensor value."""
rt_values = self.values
while isinstance(rt_values, RaggedTensorValue):
rt_values = rt_values.values
return rt_values
@property
def nested_row_splits(self):
"""The row_splits for all ragged dimensions in this ragged tensor value."""
rt_nested_splits = [self.row_splits]
rt_values = self.values
while isinstance(rt_values, RaggedTensorValue):
rt_nested_splits.append(rt_values.row_splits)
rt_values = rt_values.values
return tuple(rt_nested_splits)
@property
def ragged_rank(self):
"""The number of ragged dimensions in this ragged tensor value."""
values_is_ragged = isinstance(self._values, RaggedTensorValue)
return self._values.ragged_rank + 1 if values_is_ragged else 1
@property
def shape(self):
"""A tuple indicating the shape of this RaggedTensorValue."""
return (self._row_splits.shape[0] - 1,) + (None,) + self._values.shape[1:]
@property
def _nested_row_partitions(self):
"""The row_partitions representing this shape."""
return [RowPartition.from_row_splits(rs) for rs in self.nested_row_splits]
def __str__(self):
return "<tf.RaggedTensorValue %s>" % self.to_list()
def __repr__(self):
return "tf.RaggedTensorValue(values=%r, row_splits=%r)" % (self._values,
self._row_splits)
def to_list(self):
"""Returns this ragged tensor value as a nested Python list."""
if isinstance(self._values, RaggedTensorValue):
values_as_list = self._values.to_list()
else:
values_as_list = self._values.tolist()
return [
values_as_list[self._row_splits[i]:self._row_splits[i + 1]]
for i in range(len(self._row_splits) - 1)
]
| tensorflow/tensorflow | tensorflow/python/ops/ragged/ragged_tensor_value.py | Python | apache-2.0 | 4,366 |
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
import os
import logging
logger = logging.getLogger(__name__)
def create_directory(path):
"""
Creates a directory if it doesn't already exist.
"""
# Check if path is a file_path or a dir_path. Dir path is a string that
# ends with os.sep
if path[-1] != os.sep:
path, file_name = os.path.split(path)
if not os.path.exists(path):
logger.info("Creating directory: %s", path)
os.makedirs(path)
def get_unique_postfix(file_path, extension):
postfix = 0
new_path = file_path + str(postfix) + extension
while os.path.isfile(new_path):
postfix += 1
new_path = file_path + str(postfix) + extension
return new_path
def delete_directory_contents(path):
for file_name in os.listdir(path):
os.remove(path + os.sep + file_name)
| NejcZupec/ggrc-core | test/selenium/src/lib/file_ops.py | Python | apache-2.0 | 886 |
# Copyright (c) 2014 Freescale, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from oslo.config import cfg
from neutron.extensions import portbindings
from neutron.plugins.ml2.drivers.freescale import mechanism_fslsdn
from neutron.tests import base
from neutron.tests.unit import test_db_plugin
"""Unit testing for Freescale SDN mechanism driver."""
class TestFslSdnMechDriverV2(test_db_plugin.NeutronDbPluginV2TestCase):
"""Testing mechanism driver with ML2 plugin."""
def setUp(self):
cfg.CONF.set_override('mechanism_drivers', ['fslsdn'], 'ml2')
def mocked_fslsdn_init(self):
# Mock CRD client, since it requires CRD service running.
self._crdclient = mock.Mock()
with mock.patch.object(mechanism_fslsdn.FslsdnMechanismDriver,
'initialize', new=mocked_fslsdn_init):
super(TestFslSdnMechDriverV2, self).setUp()
class TestFslSdnMechDriverNetworksV2(test_db_plugin.TestNetworksV2,
TestFslSdnMechDriverV2):
pass
class TestFslSdnMechDriverPortsV2(test_db_plugin.TestPortsV2,
TestFslSdnMechDriverV2):
VIF_TYPE = portbindings.VIF_TYPE_OVS
CAP_PORT_FILTER = True
class TestFslSdnMechDriverSubnetsV2(test_db_plugin.TestSubnetsV2,
TestFslSdnMechDriverV2):
pass
class TestFslSdnMechanismDriver(base.BaseTestCase):
"""Testing FSL SDN Mechanism driver."""
def setUp(self):
super(TestFslSdnMechanismDriver, self).setUp()
cfg.CONF.set_override('mechanism_drivers', ['fslsdn'], 'ml2')
self.driver = mechanism_fslsdn.FslsdnMechanismDriver()
self.driver.initialize()
self.client = self.driver._crdclient = mock.Mock()
def test_create_update_delete_network_postcommit(self):
"""Testing create/update/delete network postcommit operations."""
tenant_id = 'test'
network_id = '123'
segmentation_id = 456
expected_seg = [{'segmentation_id': segmentation_id}]
expected_crd_network = {'network':
{'network_id': network_id,
'tenant_id': tenant_id,
'name': 'FakeNetwork',
'status': 'ACTIVE',
'admin_state_up': True,
'segments': expected_seg}}
network_context = self._get_network_context(tenant_id, network_id,
segmentation_id)
network = network_context.current
segments = network_context.network_segments
net_id = network['id']
req = self.driver._prepare_crd_network(network, segments)
# test crd network dict
self.assertEqual(expected_crd_network, req)
# test create_network.
self.driver.create_network_postcommit(network_context)
self.client.create_network.assert_called_once_with(body=req)
# test update_network.
self.driver.update_network_postcommit(network_context)
self.client.update_network.assert_called_once_with(net_id, body=req)
# test delete_network.
self.driver.delete_network_postcommit(network_context)
self.client.delete_network.assert_called_once_with(net_id)
def test_create_update_delete_subnet_postcommit(self):
"""Testing create/update/delete subnet postcommit operations."""
tenant_id = 'test'
network_id = '123'
subnet_id = '122'
cidr = '192.0.0.0/8'
gateway_ip = '192.0.0.1'
expected_crd_subnet = {'subnet':
{'subnet_id': subnet_id, 'tenant_id': tenant_id,
'name': 'FakeSubnet', 'network_id': network_id,
'ip_version': 4, 'cidr': cidr,
'gateway_ip': gateway_ip,
'dns_nameservers': '',
'allocation_pools': '',
'host_routes': ''}}
subnet_context = self._get_subnet_context(tenant_id, network_id,
subnet_id, cidr, gateway_ip)
subnet = subnet_context.current
subnet_id = subnet['id']
req = self.driver._prepare_crd_subnet(subnet)
# test crd subnet dict
self.assertEqual(expected_crd_subnet, req)
# test create_subnet.
self.driver.create_subnet_postcommit(subnet_context)
self.client.create_subnet.assert_called_once_with(body=req)
# test update_subnet.
self.driver.update_subnet_postcommit(subnet_context)
self.client.update_subnet.assert_called_once_with(subnet_id, body=req)
# test delete_subnet.
self.driver.delete_subnet_postcommit(subnet_context)
self.client.delete_subnet.assert_called_once_with(subnet_id)
def test_create_delete_port_postcommit(self):
"""Testing create/delete port postcommit operations."""
tenant_id = 'test'
network_id = '123'
port_id = '453'
expected_crd_port = {'port':
{'port_id': port_id, 'tenant_id': tenant_id,
'name': 'FakePort', 'network_id': network_id,
'subnet_id': '', 'mac_address': 'aabb',
'device_id': '1234', 'ip_address': '',
'admin_state_up': True, 'status': 'ACTIVE',
'device_owner': 'compute',
'security_groups': ''}}
# Test with empty fixed IP
port_context = self._get_port_context(tenant_id, network_id, port_id)
port = port_context.current
req = self.driver._prepare_crd_port(port)
# Test crd port dict
self.assertEqual(expected_crd_port, req)
# test create_port.
self.driver.create_port_postcommit(port_context)
self.client.create_port.assert_called_once_with(body=req)
# Test delete_port
self.driver.delete_port_postcommit(port_context)
self.client.delete_port.assert_called_once_with(port['id'])
def test_prepare_port_with_single_fixed_ip(self):
"""Test _prepare_crd_port with single fixed_ip."""
tenant_id = 'test'
network_id = '123'
port_id = '453'
fips = [{"subnet_id": "sub-1", "ip_address": "10.0.0.1"}]
expected_crd_port = {'port':
{'port_id': port_id, 'tenant_id': tenant_id,
'name': 'FakePort', 'network_id': network_id,
'subnet_id': '', 'mac_address': 'aabb',
'device_id': '1234', 'ip_address': '',
'admin_state_up': True, 'status': 'ACTIVE',
'device_owner': 'compute',
'security_groups': ''}}
port_context = self._get_port_context(tenant_id, network_id, port_id,
fips)
port = port_context.current
req = self.driver._prepare_crd_port(port)
expected_crd_port['port']['subnet_id'] = 'sub-1'
expected_crd_port['port']['ip_address'] = '10.0.0.1'
self.assertEqual(expected_crd_port, req)
def test_prepare_port_with_multiple_fixed_ips(self):
"""Test _prepare_crd_port with multiple fixed_ips."""
tenant_id = 'test'
network_id = '123'
port_id = '453'
multiple_fips = [{"subnet_id": "sub-1", "ip_address": "10.0.0.1"},
{"subnet_id": "sub-1", "ip_address": "10.0.0.4"}]
expected_crd_port = {'port':
{'port_id': port_id, 'tenant_id': tenant_id,
'name': 'FakePort', 'network_id': network_id,
'subnet_id': '', 'mac_address': 'aabb',
'device_id': '1234', 'ip_address': '',
'admin_state_up': True, 'status': 'ACTIVE',
'device_owner': 'compute',
'security_groups': ''}}
port_context = self._get_port_context(tenant_id, network_id, port_id,
multiple_fips)
port = port_context.current
req = self.driver._prepare_crd_port(port)
expected_crd_port['port']['subnet_id'] = 'sub-1'
expected_crd_port['port']['ip_address'] = '10.0.0.1'
self.assertEqual(expected_crd_port, req)
def _get_subnet_context(self, tenant_id, net_id, subnet_id, cidr,
gateway_ip):
# sample data for testing purpose only.
subnet = {'tenant_id': tenant_id,
'network_id': net_id,
'id': subnet_id,
'cidr': cidr,
'name': 'FakeSubnet',
'ip_version': 4,
'gateway_ip': gateway_ip,
}
return FakeContext(subnet)
def _get_port_context(self, tenant_id, net_id, port_id,
fixed_ips=[]):
# sample data for testing purpose only
port = {'device_id': '1234',
'name': 'FakePort',
'mac_address': 'aabb',
'device_owner': 'compute',
'tenant_id': tenant_id,
'id': port_id,
'fixed_ips': fixed_ips,
'admin_state_up': True,
'status': 'ACTIVE',
'network_id': net_id}
return FakeContext(port)
def _get_network_context(self, tenant_id, net_id, seg_id):
# sample data for testing purpose only.
network = {'id': net_id,
'tenant_id': tenant_id,
'admin_state_up': True,
'status': 'ACTIVE',
'name': 'FakeNetwork', }
segments = [{'segmentation_id': seg_id}]
return FakeNetworkContext(network, segments)
class FakeNetworkContext(object):
"""To generate network context for testing purposes only."""
def __init__(self, network, segments):
self._network = network
self._segments = segments
@property
def current(self):
return self._network
@property
def network_segments(self):
return self._segments
class FakeContext(object):
"""To generate context for testing purposes only."""
def __init__(self, record):
self._record = record
@property
def current(self):
return self._record
| samsu/neutron | tests/unit/ml2/drivers/freescale/test_mechanism_fslsdn.py | Python | apache-2.0 | 11,225 |
# Generated by Django 2.2.14 on 2020-07-09 10:37
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import olympia.amo.models
class Migration(migrations.Migration):
initial = True
dependencies = [
('versions', '0008_auto_20200625_1114'),
('addons', '0014_remove_addon_view_source'),
]
operations = [
migrations.CreateModel(
name='PromotedApproval',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(blank=True, default=django.utils.timezone.now, editable=False)),
('modified', models.DateTimeField(auto_now=True)),
('group_id', models.SmallIntegerField(choices=[(1, 'Recommended'), (2, 'Verified - Tier 1'), (3, 'Verified - Tier 2'), (4, 'Line'), (5, 'Spotlight'), (6, 'Strategic')], null=True)),
('version', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='promoted_approvals', to='versions.Version')),
],
bases=(olympia.amo.models.SearchMixin, olympia.amo.models.SaveUpdateMixin, models.Model),
),
migrations.CreateModel(
name='PromotedAddon',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(blank=True, default=django.utils.timezone.now, editable=False)),
('modified', models.DateTimeField(auto_now=True)),
('group_id', models.SmallIntegerField(choices=[(0, 'Not Promoted'), (1, 'Recommended'), (2, 'Verified - Tier 1'), (3, 'Verified - Tier 2'), (4, 'Line'), (5, 'Spotlight'), (6, 'Strategic')], default=0)),
('addon', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='addons.Addon')),
],
options={
'get_latest_by': 'created',
'abstract': False,
'base_manager_name': 'objects',
},
bases=(olympia.amo.models.SearchMixin, olympia.amo.models.SaveUpdateMixin, models.Model),
),
migrations.AddConstraint(
model_name='promotedapproval',
constraint=models.UniqueConstraint(fields=('group_id', 'version'), name='unique_promoted_version'),
),
]
| bqbn/addons-server | src/olympia/promoted/migrations/0001_initial.py | Python | bsd-3-clause | 2,466 |
"""
Feature agglomeration. Base classes and functions for performing feature
agglomeration.
"""
# Author: V. Michel, A. Gramfort
# License: BSD 3 clause
import numpy as np
from ..base import TransformerMixin
from ..utils import array2d
###############################################################################
# Mixin class for feature agglomeration.
class AgglomerationTransform(TransformerMixin):
"""
A class for feature agglomeration via the transform interface
"""
def transform(self, X, pooling_func=np.mean):
"""
Transform a new matrix using the built clustering
Parameters
---------
X : array-like, shape = [n_samples, n_features]
A M by N array of M observations in N dimensions or a length
M array of M one-dimensional observations.
pooling_func : a function that takes an array of shape = [M, N] and
return an array of value of size M.
Defaut is np.mean
"""
X = array2d(X)
nX = []
if len(self.labels_) != X.shape[1]:
raise ValueError("X has a different number of features than "
"during fitting.")
for l in np.unique(self.labels_):
nX.append(pooling_func(X[:, self.labels_ == l], axis=1))
return np.array(nX).T
def inverse_transform(self, Xred):
"""
Inverse the transformation.
Return a vector of size nb_features with the values of Xred assigned
to each group of features
Parameters
----------
Xred : array of size k
The values to be assigned to each cluster of samples
Returns
-------
X : array of size nb_samples
A vector of size nb_samples with the values of Xred assigned to
each of the cluster of samples.
"""
if np.size((Xred.shape)) == 1:
X = np.zeros([self.labels_.shape[0]])
else:
X = np.zeros([Xred.shape[0], self.labels_.shape[0]])
unil = np.unique(self.labels_)
for i in range(len(unil)):
if np.size((Xred.shape)) == 1:
X[self.labels_ == unil[i]] = Xred[i]
else:
X[:, self.labels_ == unil[i]] = array2d(Xred[:, i]).T
return X
| florian-f/sklearn | sklearn/cluster/_feature_agglomeration.py | Python | bsd-3-clause | 2,350 |
# -*- coding: utf-8 -*-
"""
Use nose
`$ pip install nose`
`$ nosetests`
"""
from hyde.generator import Generator
from hyde.site import Site
from fswrap import File
from pyquery import PyQuery
TEST_SITE = File(__file__).parent.parent.child_folder('_test')
class TestBlockdown(object):
def setUp(self):
TEST_SITE.make()
TEST_SITE.parent.child_folder(
'sites/test_jinja').copy_contents_to(TEST_SITE)
def tearDown(self):
TEST_SITE.delete()
def test_can_parse_blockdown(self):
s = Site(TEST_SITE)
s.config.plugins = ['hyde.ext.plugins.text.BlockdownPlugin']
txt = ("This template tests to make sure blocks can be replaced"
"with markdownish syntax.")
templ = """
{%% extends "base.html" %%}
=====title========
%s
====/title========"""
content = (templ.strip() % txt).strip()
bd = File(TEST_SITE.child('content/blockdown.html'))
bd.write(content)
gen = Generator(s)
gen.generate_resource_at_path(bd.path)
res = s.content.resource_from_path(bd.path)
target = File(
s.config.deploy_root_path.child(res.relative_deploy_path))
assert target.exists
text = target.read_all()
q = PyQuery(text)
assert q('title').text().strip() == txt.strip()
| hyde/hyde | tests/ext/test_blockdown.py | Python | mit | 1,332 |
"""
tcp_message Inline Script Hook API Demonstration
------------------------------------------------
* modifies packets containing "foo" to "bar"
* prints various details for each packet.
example cmdline invocation:
mitmdump -T --host --tcp ".*" -q -s examples/tcp_message.py
"""
from mitmproxy.utils import strutils
def tcp_message(tcp_msg):
modified_msg = tcp_msg.message.replace("foo", "bar")
is_modified = False if modified_msg == tcp_msg.message else True
tcp_msg.message = modified_msg
print(
"[tcp_message{}] from {} {} to {} {}:\r\n{}".format(
" (modified)" if is_modified else "",
"client" if tcp_msg.sender == tcp_msg.client_conn else "server",
tcp_msg.sender.address,
"server" if tcp_msg.receiver == tcp_msg.server_conn else "client",
tcp_msg.receiver.address, strutils.bytes_to_escaped_str(tcp_msg.message))
)
| dwfreed/mitmproxy | examples/complex/tcp_message.py | Python | mit | 917 |
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
from datetime import timedelta
import logging
from sqlalchemy import desc, func
from flexget import plugin
from flexget.event import event
from flexget.manager import Session
from flexget.utils.tools import multiply_timedelta
try:
from flexget.plugins.filter.series import Series, Episode
except ImportError:
raise plugin.DependencyError(issued_by='est_released_series', missing='series plugin', silent=True)
log = logging.getLogger('est_series_internal')
class EstimatesSeriesInternal(object):
@plugin.priority(0) # Should always be last priority
def estimate(self, entry):
if not all(field in entry for field in ['series_name', 'series_season', 'series_episode']):
return
with Session() as session:
series = session.query(Series).filter(Series.name == entry['series_name']).first()
if not series:
return
episodes = (session.query(Episode).join(Episode.series).
filter(Episode.season != None).
filter(Series.id == series.id).
filter(Episode.season == func.max(Episode.season).select()).
order_by(desc(Episode.number)).limit(2).all())
if len(episodes) < 2:
return
# If last two eps were not contiguous, don't guess
if episodes[0].number != episodes[1].number + 1:
return
# If first_seen in None, return
if episodes[0].first_seen is None or episodes[1].first_seen is None:
return
last_diff = episodes[0].first_seen - episodes[1].first_seen
# If last eps were grabbed close together, we might be catching up, don't guess
# Or, if last eps were too far apart, don't guess
# TODO: What range?
if last_diff < timedelta(days=2) or last_diff > timedelta(days=10):
return
# Estimate next season somewhat more than a normal episode break
if entry['series_season'] > episodes[0].season:
# TODO: How big should this be?
return episodes[0].first_seen + multiply_timedelta(last_diff, 2)
# Estimate next episode comes out about same length as last ep span, with a little leeway
return episodes[0].first_seen + multiply_timedelta(last_diff, 0.9)
@event('plugin.register')
def register_plugin():
plugin.register(EstimatesSeriesInternal, 'est_series_internal', interfaces=['estimate_release'], api_ver=2)
| sean797/Flexget | flexget/plugins/estimators/est_release_series_internal_db.py | Python | mit | 2,707 |
from flask import Flask
from flask.ext.bootstrap import Bootstrap
from flask.ext.mail import Mail
from flask.ext.moment import Moment
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.login import LoginManager
from flask.ext.pagedown import PageDown
from config import config
bootstrap = Bootstrap()
mail = Mail()
moment = Moment()
db = SQLAlchemy()
pagedown = PageDown()
login_manager = LoginManager()
login_manager.session_protection = 'strong'
login_manager.login_view = 'auth.login'
def create_app(config_name):
app = Flask(__name__)
app.config.from_object(config[config_name])
config[config_name].init_app(app)
bootstrap.init_app(app)
mail.init_app(app)
moment.init_app(app)
db.init_app(app)
login_manager.init_app(app)
pagedown.init_app(app)
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
from .auth import auth as auth_blueprint
app.register_blueprint(auth_blueprint, url_prefix='/auth')
return app
| fakdora/flaksy-upto-login | app/__init__.py | Python | mit | 1,012 |
# -*- coding: utf-8 -*-
# -------------------------------------------------------------------------------
# Name: sfp_XXX
# Purpose: Description of the plug-in.
#
# Author: Name and e-mail address
#
# Created: Date
# Copyright: (c) Name
# Licence: GPL
# -------------------------------------------------------------------------------
from sflib import SpiderFoot, SpiderFootPlugin, SpiderFootEvent
class sfp_XXX(SpiderFootPlugin):
"""Name:Description"""
# Default options
opts = {}
# Option descriptions
optdescs = {
# For each option in opts you should have a key/value pair here
# describing it. It will end up in the UI to explain the option
# to the end-user.
}
# Be sure to completely clear any class variables in setup()
# or you run the risk of data persisting between scan runs.
# Target
results = dict()
def setup(self, sfc, userOpts=dict()):
self.sf = sfc
self.results = dict()
# Clear / reset any other class member variables here
# or you risk them persisting between threads.
for opt in userOpts.keys():
self.opts[opt] = userOpts[opt]
# What events is this module interested in for input
# * = be notified about all events.
def watchedEvents(self):
return ["*"]
# What events this module produces
# This is to support the end user in selecting modules based on events
# produced.
def producedEvents(self):
return None
# Handle events sent to this module
def handleEvent(self, event):
eventName = event.eventType
srcModuleName = event.module
eventData = event.data
# If you are processing TARGET_WEB_CONTENT from sfp_spider, this is how you
# would get the source of that raw data (e.g. a URL.)
eventSource = event.sourceEvent.data
self.sf.debug("Received event, " + eventName + ", from " + srcModuleName)
# DO SOMETHING HERE
# Notify other modules of what you've found
evt = SpiderFootEvent("EVENT_CODE_HERE", "data here", self.__name__, event.sourceEvent)
self.notifyListeners(evt)
return None
# If you intend for this module to act on its own (e.g. not solely rely
# on events from other modules, then you need to have a start() method
# and within that method call self.checkForStop() to see if you've been
# politely asked by the controller to stop your activities (user abort.)
# End of sfp_XXX class
| LubyRuffy/spiderfoot | modules/sfp_template.py | Python | gpl-2.0 | 2,631 |
# Copyright (c) Mathias Kaerlev 2012.
# This file is part of pyspades.
# pyspades is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# pyspades is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with pyspades. If not, see <http://www.gnu.org/licenses/>.
from twisted.internet import reactor
from twisted.internet.task import LoopingCall
try:
from weakref import WeakSet
except ImportError:
# python 2.6 support (sigh)
from weakref import WeakKeyDictionary
class WeakSet(object):
def __init__(self):
self._dict = WeakKeyDictionary()
def add(self, value):
self._dict[value] = True
def remove(self, value):
del self._dict[value]
def __iter__(self):
for key in self._dict.keys():
yield key
def __contains__(self, other):
return other in self._dict
def __len__(self):
return len(self._dict)
class Scheduler(object):
def __init__(self, protocol):
self.protocol = protocol
self.calls = WeakSet()
self.loops = WeakSet()
def call_later(self, *arg, **kw):
call = reactor.callLater(*arg, **kw)
self.calls.add(call)
return call
def call_end(self, *arg, **kw):
call = self.protocol.call_end(*arg, **kw)
self.calls.add(call)
return call
def loop_call(self, delay, func, *arg, **kw):
loop = LoopingCall(func, *arg, **kw)
loop.start(delay, False)
self.loops.add(loop)
return loop
def reset(self):
for call in self.calls:
if call.active():
call.cancel()
for loop in self.loops:
if loop.running:
loop.stop()
self.calls = WeakSet()
self.loops = WeakSet() | NateShoffner/PySnip | feature_server/scheduler.py | Python | gpl-3.0 | 2,339 |
# A module to expose various thread/process/job related structures and
# methods from kernel32
#
# The MIT License
#
# Copyright (c) 2006 the Mozilla Foundation <http://www.mozilla.org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from ctypes import c_void_p, POINTER, sizeof, Structure, windll, WinError, WINFUNCTYPE
from ctypes.wintypes import BOOL, BYTE, DWORD, HANDLE, LPCWSTR, LPWSTR, UINT, WORD
LPVOID = c_void_p
LPBYTE = POINTER(BYTE)
LPDWORD = POINTER(DWORD)
SW_HIDE = 0
def ErrCheckBool(result, func, args):
"""errcheck function for Windows functions that return a BOOL True
on success"""
if not result:
raise WinError()
return args
# CloseHandle()
CloseHandleProto = WINFUNCTYPE(BOOL, HANDLE)
CloseHandle = CloseHandleProto(("CloseHandle", windll.kernel32))
CloseHandle.errcheck = ErrCheckBool
# AutoHANDLE
class AutoHANDLE(HANDLE):
"""Subclass of HANDLE which will call CloseHandle() on deletion."""
def Close(self):
if self.value:
CloseHandle(self)
self.value = 0
def __del__(self):
self.Close()
def __int__(self):
return self.value
def ErrCheckHandle(result, func, args):
"""errcheck function for Windows functions that return a HANDLE."""
if not result:
raise WinError()
return AutoHANDLE(result)
# PROCESS_INFORMATION structure
class PROCESS_INFORMATION(Structure):
_fields_ = [("hProcess", HANDLE),
("hThread", HANDLE),
("dwProcessID", DWORD),
("dwThreadID", DWORD)]
def __init__(self):
Structure.__init__(self)
self.cb = sizeof(self)
LPPROCESS_INFORMATION = POINTER(PROCESS_INFORMATION)
# STARTUPINFO structure
class STARTUPINFO(Structure):
_fields_ = [("cb", DWORD),
("lpReserved", LPWSTR),
("lpDesktop", LPWSTR),
("lpTitle", LPWSTR),
("dwX", DWORD),
("dwY", DWORD),
("dwXSize", DWORD),
("dwYSize", DWORD),
("dwXCountChars", DWORD),
("dwYCountChars", DWORD),
("dwFillAttribute", DWORD),
("dwFlags", DWORD),
("wShowWindow", WORD),
("cbReserved2", WORD),
("lpReserved2", LPBYTE),
("hStdInput", HANDLE),
("hStdOutput", HANDLE),
("hStdError", HANDLE)
]
LPSTARTUPINFO = POINTER(STARTUPINFO)
STARTF_USESHOWWINDOW = 0x01
STARTF_USESIZE = 0x02
STARTF_USEPOSITION = 0x04
STARTF_USECOUNTCHARS = 0x08
STARTF_USEFILLATTRIBUTE = 0x10
STARTF_RUNFULLSCREEN = 0x20
STARTF_FORCEONFEEDBACK = 0x40
STARTF_FORCEOFFFEEDBACK = 0x80
STARTF_USESTDHANDLES = 0x100
# EnvironmentBlock
class EnvironmentBlock:
"""An object which can be passed as the lpEnv parameter of CreateProcess.
It is initialized with a dictionary."""
def __init__(self, dict):
if not dict:
self._as_parameter_ = None
else:
values = ["%s=%s" % (key, value)
for (key, value) in dict.iteritems()]
values.append("")
self._as_parameter_ = LPCWSTR("\0".join(values))
# CreateProcess()
CreateProcessProto = WINFUNCTYPE(BOOL, # Return type
LPCWSTR, # lpApplicationName
LPWSTR, # lpCommandLine
LPVOID, # lpProcessAttributes
LPVOID, # lpThreadAttributes
BOOL, # bInheritHandles
DWORD, # dwCreationFlags
LPVOID, # lpEnvironment
LPCWSTR, # lpCurrentDirectory
LPSTARTUPINFO, # lpStartupInfo
LPPROCESS_INFORMATION # lpProcessInformation
)
CreateProcessFlags = ((1, "lpApplicationName", None),
(1, "lpCommandLine"),
(1, "lpProcessAttributes", None),
(1, "lpThreadAttributes", None),
(1, "bInheritHandles", True),
(1, "dwCreationFlags", 0),
(1, "lpEnvironment", None),
(1, "lpCurrentDirectory", None),
(1, "lpStartupInfo"),
(2, "lpProcessInformation"))
def ErrCheckCreateProcess(result, func, args):
ErrCheckBool(result, func, args)
# return a tuple (hProcess, hThread, dwProcessID, dwThreadID)
pi = args[9]
return AutoHANDLE(pi.hProcess), AutoHANDLE(pi.hThread), pi.dwProcessID, pi.dwThreadID
CreateProcess = CreateProcessProto(("CreateProcessW", windll.kernel32),
CreateProcessFlags)
CreateProcess.errcheck = ErrCheckCreateProcess
CREATE_BREAKAWAY_FROM_JOB = 0x01000000
CREATE_DEFAULT_ERROR_MODE = 0x04000000
CREATE_NEW_CONSOLE = 0x00000010
CREATE_NEW_PROCESS_GROUP = 0x00000200
CREATE_NO_WINDOW = 0x08000000
CREATE_SUSPENDED = 0x00000004
CREATE_UNICODE_ENVIRONMENT = 0x00000400
DEBUG_ONLY_THIS_PROCESS = 0x00000002
DEBUG_PROCESS = 0x00000001
DETACHED_PROCESS = 0x00000008
# CreateJobObject()
CreateJobObjectProto = WINFUNCTYPE(HANDLE, # Return type
LPVOID, # lpJobAttributes
LPCWSTR # lpName
)
CreateJobObjectFlags = ((1, "lpJobAttributes", None),
(1, "lpName", None))
CreateJobObject = CreateJobObjectProto(("CreateJobObjectW", windll.kernel32),
CreateJobObjectFlags)
CreateJobObject.errcheck = ErrCheckHandle
# AssignProcessToJobObject()
AssignProcessToJobObjectProto = WINFUNCTYPE(BOOL, # Return type
HANDLE, # hJob
HANDLE # hProcess
)
AssignProcessToJobObjectFlags = ((1, "hJob"),
(1, "hProcess"))
AssignProcessToJobObject = AssignProcessToJobObjectProto(
("AssignProcessToJobObject", windll.kernel32),
AssignProcessToJobObjectFlags)
AssignProcessToJobObject.errcheck = ErrCheckBool
# ResumeThread()
def ErrCheckResumeThread(result, func, args):
if result == -1:
raise WinError()
return args
ResumeThreadProto = WINFUNCTYPE(DWORD, # Return type
HANDLE # hThread
)
ResumeThreadFlags = ((1, "hThread"),)
ResumeThread = ResumeThreadProto(("ResumeThread", windll.kernel32),
ResumeThreadFlags)
ResumeThread.errcheck = ErrCheckResumeThread
# TerminateJobObject()
TerminateJobObjectProto = WINFUNCTYPE(BOOL, # Return type
HANDLE, # hJob
UINT # uExitCode
)
TerminateJobObjectFlags = ((1, "hJob"),
(1, "uExitCode", 127))
TerminateJobObject = TerminateJobObjectProto(
("TerminateJobObject", windll.kernel32),
TerminateJobObjectFlags)
TerminateJobObject.errcheck = ErrCheckBool
# WaitForSingleObject()
WaitForSingleObjectProto = WINFUNCTYPE(DWORD, # Return type
HANDLE, # hHandle
DWORD, # dwMilliseconds
)
WaitForSingleObjectFlags = ((1, "hHandle"),
(1, "dwMilliseconds", -1))
WaitForSingleObject = WaitForSingleObjectProto(
("WaitForSingleObject", windll.kernel32),
WaitForSingleObjectFlags)
INFINITE = -1
WAIT_TIMEOUT = 0x0102
WAIT_OBJECT_0 = 0x0
WAIT_ABANDONED = 0x0080
# GetExitCodeProcess()
GetExitCodeProcessProto = WINFUNCTYPE(BOOL, # Return type
HANDLE, # hProcess
LPDWORD, # lpExitCode
)
GetExitCodeProcessFlags = ((1, "hProcess"),
(2, "lpExitCode"))
GetExitCodeProcess = GetExitCodeProcessProto(
("GetExitCodeProcess", windll.kernel32),
GetExitCodeProcessFlags)
GetExitCodeProcess.errcheck = ErrCheckBool
| mastizada/kuma | vendor/packages/ipython/IPython/frontend/process/winprocess.py | Python | mpl-2.0 | 9,831 |
# pylint: disable=missing-docstring
import logging
import numpy as np
from scipy import stats
from rest_framework.generics import GenericAPIView
from rest_framework.response import Response
from edxval.api import get_videos_for_course
from openedx.core.djangoapps.request_cache.middleware import request_cached
from openedx.core.lib.api.view_utils import DeveloperErrorViewMixin, view_auth_classes
from openedx.core.lib.graph_traversals import traverse_pre_order
from xmodule.modulestore.django import modulestore
from .utils import get_bool_param, course_author_access_required
log = logging.getLogger(__name__)
@view_auth_classes()
class CourseQualityView(DeveloperErrorViewMixin, GenericAPIView):
"""
**Use Case**
**Example Requests**
GET /api/courses/v1/quality/{course_id}/
**GET Parameters**
A GET request may include the following parameters.
* all
* sections
* subsections
* units
* videos
* exclude_graded (boolean) - whether to exclude graded subsections in the subsections and units information.
**GET Response Values**
The HTTP 200 response has the following values.
* is_self_paced - whether the course is self-paced.
* sections
* total_number - number of sections in the course.
* total_visible - number of sections visible to learners in the course.
* number_with_highlights - number of sections that have at least one highlight entered.
* highlights_enabled - whether highlights are enabled in the course.
* subsections
* total_visible - number of subsections visible to learners in the course.
* num_with_one_block_type - number of visible subsections containing only one type of block.
* num_block_types - statistics for number of block types across all visible subsections.
* min
* max
* mean
* median
* mode
* units
* total_visible - number of units visible to learners in the course.
* num_blocks - statistics for number of block across all visible units.
* min
* max
* mean
* median
* mode
* videos
* total_number - number of video blocks in the course.
* num_with_val_id - number of video blocks that include video pipeline IDs.
* num_mobile_encoded - number of videos encoded through the video pipeline.
* durations - statistics for video duration across all videos encoded through the video pipeline.
* min
* max
* mean
* median
* mode
"""
@course_author_access_required
def get(self, request, course_key):
"""
Returns validation information for the given course.
"""
all_requested = get_bool_param(request, 'all', False)
store = modulestore()
with store.bulk_operations(course_key):
course = store.get_course(course_key, depth=self._required_course_depth(request, all_requested))
response = dict(
is_self_paced=course.self_paced,
)
if get_bool_param(request, 'sections', all_requested):
response.update(
sections=self._sections_quality(course)
)
if get_bool_param(request, 'subsections', all_requested):
response.update(
subsections=self._subsections_quality(course, request)
)
if get_bool_param(request, 'units', all_requested):
response.update(
units=self._units_quality(course, request)
)
if get_bool_param(request, 'videos', all_requested):
response.update(
videos=self._videos_quality(course)
)
return Response(response)
def _required_course_depth(self, request, all_requested):
if get_bool_param(request, 'units', all_requested):
# The num_blocks metric for "units" requires retrieving all blocks in the graph.
return None
elif get_bool_param(request, 'subsections', all_requested):
# The num_block_types metric for "subsections" requires retrieving all blocks in the graph.
return None
elif get_bool_param(request, 'sections', all_requested):
return 1
else:
return 0
def _sections_quality(self, course):
sections, visible_sections = self._get_sections(course)
sections_with_highlights = [s for s in visible_sections if s.highlights]
return dict(
total_number=len(sections),
total_visible=len(visible_sections),
number_with_highlights=len(sections_with_highlights),
highlights_enabled=course.highlights_enabled_for_messaging,
)
def _subsections_quality(self, course, request):
subsection_unit_dict = self._get_subsections_and_units(course, request)
num_block_types_per_subsection_dict = {}
for subsection_key, unit_dict in subsection_unit_dict.iteritems():
leaf_block_types_in_subsection = (
unit_info['leaf_block_types']
for unit_info in unit_dict.itervalues()
)
num_block_types_per_subsection_dict[subsection_key] = len(set().union(*leaf_block_types_in_subsection))
return dict(
total_visible=len(num_block_types_per_subsection_dict),
num_with_one_block_type=list(num_block_types_per_subsection_dict.itervalues()).count(1),
num_block_types=self._stats_dict(list(num_block_types_per_subsection_dict.itervalues())),
)
def _units_quality(self, course, request):
subsection_unit_dict = self._get_subsections_and_units(course, request)
num_leaf_blocks_per_unit = [
unit_info['num_leaf_blocks']
for unit_dict in subsection_unit_dict.itervalues()
for unit_info in unit_dict.itervalues()
]
return dict(
total_visible=len(num_leaf_blocks_per_unit),
num_blocks=self._stats_dict(num_leaf_blocks_per_unit),
)
def _videos_quality(self, course):
video_blocks_in_course = modulestore().get_items(course.id, qualifiers={'category': 'video'})
videos_in_val = list(get_videos_for_course(course.id))
video_durations = [video['duration'] for video in videos_in_val]
return dict(
total_number=len(video_blocks_in_course),
num_mobile_encoded=len(videos_in_val),
num_with_val_id=len([v for v in video_blocks_in_course if v.edx_video_id]),
durations=self._stats_dict(video_durations),
)
@request_cached
def _get_subsections_and_units(self, course, request):
"""
Returns {subsection_key: {unit_key: {num_leaf_blocks: <>, leaf_block_types: set(<>) }}}
for all visible subsections and units.
"""
_, visible_sections = self._get_sections(course)
subsection_dict = {}
for section in visible_sections:
visible_subsections = self._get_visible_children(section)
if get_bool_param(request, 'exclude_graded', False):
visible_subsections = [s for s in visible_subsections if not s.graded]
for subsection in visible_subsections:
unit_dict = {}
visible_units = self._get_visible_children(subsection)
for unit in visible_units:
leaf_blocks = self._get_leaf_blocks(unit)
unit_dict[unit.location] = dict(
num_leaf_blocks=len(leaf_blocks),
leaf_block_types=set(block.location.block_type for block in leaf_blocks),
)
subsection_dict[subsection.location] = unit_dict
return subsection_dict
@request_cached
def _get_sections(self, course):
return self._get_all_children(course)
def _get_all_children(self, parent):
store = modulestore()
children = [store.get_item(child_usage_key) for child_usage_key in self._get_children(parent)]
visible_children = [
c for c in children
if not c.visible_to_staff_only and not c.hide_from_toc
]
return children, visible_children
def _get_visible_children(self, parent):
_, visible_chidren = self._get_all_children(parent)
return visible_chidren
def _get_children(self, parent):
if not hasattr(parent, 'children'):
return []
else:
return parent.children
def _get_leaf_blocks(self, unit):
def leaf_filter(block):
return (
block.location.block_type not in ('chapter', 'sequential', 'vertical') and
len(self._get_children(block)) == 0
)
return [
block for block in
traverse_pre_order(unit, self._get_visible_children, leaf_filter)
]
def _stats_dict(self, data):
if not data:
return dict(
min=None,
max=None,
mean=None,
median=None,
mode=None,
)
else:
return dict(
min=min(data),
max=max(data),
mean=np.around(np.mean(data)),
median=np.around(np.median(data)),
mode=stats.mode(data, axis=None)[0][0],
)
| gsehub/edx-platform | cms/djangoapps/contentstore/api/views/course_quality.py | Python | agpl-3.0 | 9,779 |
"""Mock helpers for Z-Wave component."""
from pydispatch import dispatcher
from tests.async_mock import MagicMock
def value_changed(value):
"""Fire a value changed."""
dispatcher.send(
MockNetwork.SIGNAL_VALUE_CHANGED,
value=value,
node=value.node,
network=value.node._network,
)
def node_changed(node):
"""Fire a node changed."""
dispatcher.send(MockNetwork.SIGNAL_NODE, node=node, network=node._network)
def notification(node_id, network=None):
"""Fire a notification."""
dispatcher.send(
MockNetwork.SIGNAL_NOTIFICATION, args={"nodeId": node_id}, network=network
)
class MockOption(MagicMock):
"""Mock Z-Wave options."""
def __init__(self, device=None, config_path=None, user_path=None, cmd_line=None):
"""Initialize a Z-Wave mock options."""
super().__init__()
self.device = device
self.config_path = config_path
self.user_path = user_path
self.cmd_line = cmd_line
def _get_child_mock(self, **kw):
"""Create child mocks with right MagicMock class."""
return MagicMock(**kw)
class MockNetwork(MagicMock):
"""Mock Z-Wave network."""
SIGNAL_NETWORK_FAILED = "mock_NetworkFailed"
SIGNAL_NETWORK_STARTED = "mock_NetworkStarted"
SIGNAL_NETWORK_READY = "mock_NetworkReady"
SIGNAL_NETWORK_STOPPED = "mock_NetworkStopped"
SIGNAL_NETWORK_RESETTED = "mock_DriverResetted"
SIGNAL_NETWORK_AWAKED = "mock_DriverAwaked"
SIGNAL_DRIVER_FAILED = "mock_DriverFailed"
SIGNAL_DRIVER_READY = "mock_DriverReady"
SIGNAL_DRIVER_RESET = "mock_DriverReset"
SIGNAL_DRIVER_REMOVED = "mock_DriverRemoved"
SIGNAL_GROUP = "mock_Group"
SIGNAL_NODE = "mock_Node"
SIGNAL_NODE_ADDED = "mock_NodeAdded"
SIGNAL_NODE_EVENT = "mock_NodeEvent"
SIGNAL_NODE_NAMING = "mock_NodeNaming"
SIGNAL_NODE_NEW = "mock_NodeNew"
SIGNAL_NODE_PROTOCOL_INFO = "mock_NodeProtocolInfo"
SIGNAL_NODE_READY = "mock_NodeReady"
SIGNAL_NODE_REMOVED = "mock_NodeRemoved"
SIGNAL_SCENE_EVENT = "mock_SceneEvent"
SIGNAL_VALUE = "mock_Value"
SIGNAL_VALUE_ADDED = "mock_ValueAdded"
SIGNAL_VALUE_CHANGED = "mock_ValueChanged"
SIGNAL_VALUE_REFRESHED = "mock_ValueRefreshed"
SIGNAL_VALUE_REMOVED = "mock_ValueRemoved"
SIGNAL_POLLING_ENABLED = "mock_PollingEnabled"
SIGNAL_POLLING_DISABLED = "mock_PollingDisabled"
SIGNAL_CREATE_BUTTON = "mock_CreateButton"
SIGNAL_DELETE_BUTTON = "mock_DeleteButton"
SIGNAL_BUTTON_ON = "mock_ButtonOn"
SIGNAL_BUTTON_OFF = "mock_ButtonOff"
SIGNAL_ESSENTIAL_NODE_QUERIES_COMPLETE = "mock_EssentialNodeQueriesComplete"
SIGNAL_NODE_QUERIES_COMPLETE = "mock_NodeQueriesComplete"
SIGNAL_AWAKE_NODES_QUERIED = "mock_AwakeNodesQueried"
SIGNAL_ALL_NODES_QUERIED = "mock_AllNodesQueried"
SIGNAL_ALL_NODES_QUERIED_SOME_DEAD = "mock_AllNodesQueriedSomeDead"
SIGNAL_MSG_COMPLETE = "mock_MsgComplete"
SIGNAL_NOTIFICATION = "mock_Notification"
SIGNAL_CONTROLLER_COMMAND = "mock_ControllerCommand"
SIGNAL_CONTROLLER_WAITING = "mock_ControllerWaiting"
STATE_STOPPED = 0
STATE_FAILED = 1
STATE_RESETTED = 3
STATE_STARTED = 5
STATE_AWAKED = 7
STATE_READY = 10
def __init__(self, options=None, *args, **kwargs):
"""Initialize a Z-Wave mock network."""
super().__init__()
self.options = options
self.state = MockNetwork.STATE_STOPPED
class MockNode(MagicMock):
"""Mock Z-Wave node."""
def __init__(
self,
*,
node_id=567,
name="Mock Node",
manufacturer_id="ABCD",
product_id="123",
product_type="678",
command_classes=None,
can_wake_up_value=True,
manufacturer_name="Test Manufacturer",
product_name="Test Product",
network=None,
**kwargs,
):
"""Initialize a Z-Wave mock node."""
super().__init__()
self.node_id = node_id
self.name = name
self.manufacturer_id = manufacturer_id
self.product_id = product_id
self.product_type = product_type
self.manufacturer_name = manufacturer_name
self.product_name = product_name
self.can_wake_up_value = can_wake_up_value
self._command_classes = command_classes or []
if network is not None:
self._network = network
for attr_name in kwargs:
setattr(self, attr_name, kwargs[attr_name])
def has_command_class(self, command_class):
"""Test if mock has a command class."""
return command_class in self._command_classes
def get_battery_level(self):
"""Return mock battery level."""
return 42
def can_wake_up(self):
"""Return whether the node can wake up."""
return self.can_wake_up_value
def _get_child_mock(self, **kw):
"""Create child mocks with right MagicMock class."""
return MagicMock(**kw)
class MockValue(MagicMock):
"""Mock Z-Wave value."""
_mock_value_id = 1234
def __init__(
self,
*,
label="Mock Value",
node=None,
instance=0,
index=0,
value_id=None,
**kwargs,
):
"""Initialize a Z-Wave mock value."""
super().__init__()
self.label = label
self.node = node
self.instance = instance
self.index = index
if value_id is None:
MockValue._mock_value_id += 1
value_id = MockValue._mock_value_id
self.value_id = value_id
self.object_id = value_id
for attr_name in kwargs:
setattr(self, attr_name, kwargs[attr_name])
def _get_child_mock(self, **kw):
"""Create child mocks with right MagicMock class."""
return MagicMock(**kw)
def refresh(self):
"""Mock refresh of node value."""
value_changed(self)
class MockEntityValues:
"""Mock Z-Wave entity values."""
def __init__(self, **kwargs):
"""Initialize the mock zwave values."""
self.primary = None
self.wakeup = None
self.battery = None
self.power = None
for name in kwargs:
setattr(self, name, kwargs[name])
def __iter__(self):
"""Allow iteration over all values."""
return iter(self.__dict__.values())
| sdague/home-assistant | tests/mock/zwave.py | Python | apache-2.0 | 6,380 |
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Remove unused attr status
Revision ID: 533646c7af38
Revises: 3a482171410f
Create Date: 2015-05-28 13:13:47.651353
"""
# revision identifiers, used by Alembic.
revision = '533646c7af38'
down_revision = '3a482171410f'
from alembic import op
from oslo_log import log
import sqlalchemy as sql
from manila.common import constants
from manila.i18n import _LE
LOG = log.getLogger(__name__)
COLUMN_NAME = 'status'
TABLE_NAMES = ('network_allocations', 'security_services')
def upgrade():
for t_name in TABLE_NAMES:
try:
op.drop_column(t_name, COLUMN_NAME)
except Exception:
LOG.error(_LE("Column '%s' could not be dropped"), COLUMN_NAME)
raise
def downgrade():
for t_name in TABLE_NAMES:
try:
op.add_column(
t_name,
sql.Column(
COLUMN_NAME,
# NOTE(vponomaryov): original type of attr was enum. But
# alembic is buggy with enums [1], so use string type
# instead. Anyway we have no reason to keep enum/constraint
# on specific set of possible statuses because they have
# not been used.
# [1] - https://bitbucket.org/zzzeek/alembic/
# issue/89/opadd_column-and-opdrop_column-should
sql.String(255),
default=constants.STATUS_NEW,
),
)
except Exception:
LOG.error(_LE("Column '%s' could not be added"), COLUMN_NAME)
raise
| weiting-chen/manila | manila/db/migrations/alembic/versions/533646c7af38_remove_unused_attr_status.py | Python | apache-2.0 | 2,171 |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2015, Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
""" A MeshVisual Visual that uses the new shader Function.
"""
from __future__ import division
import numpy as np
from .visual import Visual
from .shaders import Function, Varying
from ..gloo import VertexBuffer, IndexBuffer
from ..geometry import MeshData
from ..color import Color
# Shaders for lit rendering (using phong shading)
shading_vertex_template = """
varying vec3 v_normal_vec;
varying vec3 v_light_vec;
varying vec3 v_eye_vec;
varying vec4 v_ambientk;
varying vec4 v_light_color;
varying vec4 v_base_color;
void main() {
v_ambientk = $ambientk;
v_light_color = $light_color;
v_base_color = $base_color;
vec4 pos_scene = $visual2scene($to_vec4($position));
vec4 normal_scene = $visual2scene(vec4($normal, 1));
vec4 origin_scene = $visual2scene(vec4(0, 0, 0, 1));
normal_scene /= normal_scene.w;
origin_scene /= origin_scene.w;
vec3 normal = normalize(normal_scene.xyz - origin_scene.xyz);
v_normal_vec = normal; //VARYING COPY
vec4 pos_front = $scene2doc(pos_scene);
pos_front.z += 0.01;
pos_front = $doc2scene(pos_front);
pos_front /= pos_front.w;
vec4 pos_back = $scene2doc(pos_scene);
pos_back.z -= 0.01;
pos_back = $doc2scene(pos_back);
pos_back /= pos_back.w;
vec3 eye = normalize(pos_front.xyz - pos_back.xyz);
v_eye_vec = eye; //VARYING COPY
vec3 light = normalize($light_dir.xyz);
v_light_vec = light; //VARYING COPY
gl_Position = $transform($to_vec4($position));
}
"""
shading_fragment_template = """
varying vec3 v_normal_vec;
varying vec3 v_light_vec;
varying vec3 v_eye_vec;
varying vec4 v_ambientk;
varying vec4 v_light_color;
varying vec4 v_base_color;
void main() {
//DIFFUSE
float diffusek = dot(v_light_vec, v_normal_vec);
//clamp, because 0 < theta < pi/2
diffusek = clamp(diffusek, 0, 1);
vec4 diffuse_color = v_light_color * diffusek;
//diffuse_color.a = 1.0;
//SPECULAR
//reflect light wrt normal for the reflected ray, then
//find the angle made with the eye
float speculark = dot(reflect(v_light_vec, v_normal_vec), v_eye_vec);
speculark = clamp(speculark, 0, 1);
//raise to the material's shininess, multiply with a
//small factor for spread
speculark = 20 * pow(speculark, 200.0);
vec4 specular_color = v_light_color * speculark;
gl_FragColor =
v_base_color * (v_ambientk + diffuse_color) + specular_color;
//gl_FragColor = vec4(speculark, 0, 1, 1.0);
}
"""
# Shader code for non lighted rendering
vertex_template = """
void main() {
gl_Position = $transform($to_vec4($position));
}
"""
fragment_template = """
void main() {
gl_FragColor = $color;
}
"""
# Functions that can be used as is (don't have template variables)
# Consider these stored in a central location in vispy ...
vec3to4 = Function("""
vec4 vec3to4(vec3 xyz) {
return vec4(xyz, 1.0);
}
""")
vec2to4 = Function("""
vec4 vec2to4(vec2 xyz) {
return vec4(xyz, 0.0, 1.0);
}
""")
class MeshVisual(Visual):
"""Mesh visual
Parameters
----------
vertices : array-like | None
The vertices.
faces : array-like | None
The faces.
vertex_colors : array-like | None
Colors to use for each vertex.
face_colors : array-like | None
Colors to use for each face.
color : instance of Color
The color to use.
meshdata : instance of MeshData | None
The meshdata.
shading : str | None
Shading to use.
mode : str
The drawing mode.
**kwargs : dict
Keyword arguments to pass to `Visual`.
"""
def __init__(self, vertices=None, faces=None, vertex_colors=None,
face_colors=None, color=(0.5, 0.5, 1, 1), meshdata=None,
shading=None, mode='triangles', **kwargs):
# Function for computing phong shading
# self._phong = Function(phong_template)
# Visual.__init__ -> prepare_transforms() -> uses shading
self.shading = shading
if shading is not None:
Visual.__init__(self, vcode=shading_vertex_template,
fcode=shading_fragment_template,
**kwargs)
else:
Visual.__init__(self, vcode=vertex_template,
fcode=fragment_template,
**kwargs)
self.set_gl_state('translucent', depth_test=True,
cull_face=False)
# Define buffers
self._vertices = VertexBuffer(np.zeros((0, 3), dtype=np.float32))
self._normals = None
self._faces = IndexBuffer()
self._colors = VertexBuffer(np.zeros((0, 4), dtype=np.float32))
self._normals = VertexBuffer(np.zeros((0, 3), dtype=np.float32))
# Uniform color
self._color = Color(color)
# varyings
self._color_var = Varying('v_color', dtype='vec4')
# Init
self._bounds = None
# Note we do not call subclass set_data -- often the signatures
# do no match.
MeshVisual.set_data(self, vertices=vertices, faces=faces,
vertex_colors=vertex_colors,
face_colors=face_colors, meshdata=meshdata,
color=color)
# primitive mode
self._draw_mode = mode
self.freeze()
def set_data(self, vertices=None, faces=None, vertex_colors=None,
face_colors=None, color=None, meshdata=None):
"""Set the mesh data
Parameters
----------
vertices : array-like | None
The vertices.
faces : array-like | None
The faces.
vertex_colors : array-like | None
Colors to use for each vertex.
face_colors : array-like | None
Colors to use for each face.
color : instance of Color
The color to use.
meshdata : instance of MeshData | None
The meshdata.
"""
if meshdata is not None:
self._meshdata = meshdata
else:
self._meshdata = MeshData(vertices=vertices, faces=faces,
vertex_colors=vertex_colors,
face_colors=face_colors)
self._bounds = self._meshdata.get_bounds()
if color is not None:
self._color = Color(color)
self.mesh_data_changed()
@property
def mode(self):
"""The triangle mode used to draw this mesh.
Options are:
* 'triangles': Draw one triangle for every three vertices
(eg, [1,2,3], [4,5,6], [7,8,9)
* 'triangle_strip': Draw one strip for every vertex excluding the
first two (eg, [1,2,3], [2,3,4], [3,4,5])
* 'triangle_fan': Draw each triangle from the first vertex and the
last two vertices (eg, [1,2,3], [1,3,4], [1,4,5])
"""
return self._draw_mode
@mode.setter
def mode(self, m):
modes = ['triangles', 'triangle_strip', 'triangle_fan']
if m not in modes:
raise ValueError("Mesh mode must be one of %s" % ', '.join(modes))
self._draw_mode = m
@property
def mesh_data(self):
"""The mesh data"""
return self._meshdata
@property
def color(self):
"""The uniform color for this mesh.
This value is only used if per-vertex or per-face colors are not
specified.
"""
return self._color
@color.setter
def color(self, c):
self.set_data(color=c)
def mesh_data_changed(self):
self._data_changed = True
self.update()
def _update_data(self):
md = self.mesh_data
# Update vertex/index buffers
if self.shading == 'smooth' and not md.has_face_indexed_data():
v = md.get_vertices()
if v is None:
return False
if v.shape[-1] == 2:
v = np.concatenate((v, np.zeros((v.shape[:-1] + (1,)))), -1)
self._vertices.set_data(v, convert=True)
self._normals.set_data(md.get_vertex_normals(), convert=True)
self._faces.set_data(md.get_faces(), convert=True)
self._index_buffer = self._faces
if md.has_vertex_color():
self._colors.set_data(md.get_vertex_colors(), convert=True)
elif md.has_face_color():
self._colors.set_data(md.get_face_colors(), convert=True)
else:
self._colors.set_data(np.zeros((0, 4), dtype=np.float32))
else:
v = md.get_vertices(indexed='faces')
if v is None:
return False
if v.shape[-1] == 2:
v = np.concatenate((v, np.zeros((v.shape[:-1] + (1,)))), -1)
self._vertices.set_data(v, convert=True)
if self.shading == 'smooth':
normals = md.get_vertex_normals(indexed='faces')
self._normals.set_data(normals, convert=True)
elif self.shading == 'flat':
normals = md.get_face_normals(indexed='faces')
self._normals.set_data(normals, convert=True)
else:
self._normals.set_data(np.zeros((0, 3), dtype=np.float32))
self._index_buffer = None
if md.has_vertex_color():
self._colors.set_data(md.get_vertex_colors(indexed='faces'),
convert=True)
elif md.has_face_color():
self._colors.set_data(md.get_face_colors(indexed='faces'),
convert=True)
else:
self._colors.set_data(np.zeros((0, 4), dtype=np.float32))
self.shared_program.vert['position'] = self._vertices
# Position input handling
if v.shape[-1] == 2:
self.shared_program.vert['to_vec4'] = vec2to4
elif v.shape[-1] == 3:
self.shared_program.vert['to_vec4'] = vec3to4
else:
raise TypeError("Vertex data must have shape (...,2) or (...,3).")
# Color input handling
# If non-lit shading is used, then just pass the colors
# Otherwise, the shader uses a base_color to represent the underlying
# color, which is then lit with the lighting model
colors = self._colors if self._colors.size > 0 else self._color.rgba
if self.shading is None:
self.shared_program.vert[self._color_var] = colors
# Shading
if self.shading is None:
self.shared_program.frag['color'] = self._color_var
else:
# Normal data comes via vertex shader
if self._normals.size > 0:
normals = self._normals
else:
normals = (1., 0., 0.)
self.shared_program.vert['normal'] = normals
self.shared_program.vert['base_color'] = colors
# Additional phong properties
self.shared_program.vert['light_dir'] = (10, 5, -5)
self.shared_program.vert['light_color'] = (1.0, 1.0, 1.0, 1.0)
self.shared_program.vert['ambientk'] = (0.3, 0.3, 0.3, 1.0)
self._data_changed = False
@property
def shading(self):
""" The shading method used.
"""
return self._shading
@shading.setter
def shading(self, value):
assert value in (None, 'flat', 'smooth')
self._shading = value
def _prepare_draw(self, view):
if self._data_changed:
if self._update_data() is False:
return False
self._data_changed = False
def draw(self, *args, **kwds):
Visual.draw(self, *args, **kwds)
@staticmethod
def _prepare_transforms(view):
tr = view.transforms.get_transform()
view.view_program.vert['transform'] = tr # .simplified
if view.shading is not None:
visual2scene = view.transforms.get_transform('visual', 'scene')
scene2doc = view.transforms.get_transform('scene', 'document')
doc2scene = view.transforms.get_transform('document', 'scene')
view.shared_program.vert['visual2scene'] = visual2scene
view.shared_program.vert['scene2doc'] = scene2doc
view.shared_program.vert['doc2scene'] = doc2scene
def _compute_bounds(self, axis, view):
if self._bounds is None:
return None
return self._bounds[axis]
| kkuunnddaannkk/vispy | vispy/visuals/mesh.py | Python | bsd-3-clause | 12,882 |
# Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Class for storing Skia Gold comparison properties.
Examples:
* git revision being tested
* Whether the test is being run locally or on a bot
* What the continuous integration system is
"""
import logging
import os
class SkiaGoldProperties(object):
def __init__(self, args):
"""Abstract class to validate and store properties related to Skia Gold.
Args:
args: The parsed arguments from an argparse.ArgumentParser.
"""
self._git_revision = None
self._issue = None
self._patchset = None
self._job_id = None
self._local_pixel_tests = None
self._no_luci_auth = None
self._bypass_skia_gold_functionality = None
self._code_review_system = None
self._continuous_integration_system = None
self._local_png_directory = None
self._InitializeProperties(args)
def IsTryjobRun(self):
return self.issue is not None
@property
def continuous_integration_system(self):
return self._continuous_integration_system or 'buildbucket'
@property
def code_review_system(self):
return self._code_review_system or 'gerrit'
@property
def git_revision(self):
return self._GetGitRevision()
@property
def issue(self):
return self._issue
@property
def job_id(self):
return self._job_id
@property
def local_pixel_tests(self):
return self._IsLocalRun()
@property
def local_png_directory(self):
return self._local_png_directory
@property
def no_luci_auth(self):
return self._no_luci_auth
@property
def patchset(self):
return self._patchset
@property
def bypass_skia_gold_functionality(self):
return self._bypass_skia_gold_functionality
@staticmethod
def _GetGitOriginMainHeadSha1():
raise NotImplementedError()
def _GetGitRevision(self):
if not self._git_revision:
# Automated tests should always pass the revision, so assume we're on
# a workstation and try to get the local origin/master HEAD.
if not self._IsLocalRun():
raise RuntimeError(
'--git-revision was not passed when running on a bot')
revision = self._GetGitOriginMainHeadSha1()
if not revision or len(revision) != 40:
raise RuntimeError(
'--git-revision not passed and unable to determine from git')
self._git_revision = revision
return self._git_revision
def _IsLocalRun(self):
if self._local_pixel_tests is None:
# Look for the presence of the SWARMING_SERVER environment variable as a
# heuristic to determine whether we're running on a workstation or a bot.
# This should always be set on swarming, but would be strange to be set on
# a workstation.
self._local_pixel_tests = 'SWARMING_SERVER' not in os.environ
if self._local_pixel_tests:
logging.warning(
'Automatically determined that test is running on a workstation')
else:
logging.warning(
'Automatically determined that test is running on a bot')
return self._local_pixel_tests
def _InitializeProperties(self, args):
if hasattr(args, 'local_pixel_tests'):
# If not set, will be automatically determined later if needed.
self._local_pixel_tests = args.local_pixel_tests
if hasattr(args, 'skia_gold_local_png_write_directory'):
self._local_png_directory = args.skia_gold_local_png_write_directory
if hasattr(args, 'no_luci_auth'):
self._no_luci_auth = args.no_luci_auth
if hasattr(args, 'bypass_skia_gold_functionality'):
self._bypass_skia_gold_functionality = args.bypass_skia_gold_functionality
if hasattr(args, 'code_review_system'):
self._code_review_system = args.code_review_system
if hasattr(args, 'continuous_integration_system'):
self._continuous_integration_system = args.continuous_integration_system
# Will be automatically determined later if needed.
if not hasattr(args, 'git_revision') or not args.git_revision:
return
self._git_revision = args.git_revision
# Only expected on tryjob runs.
if not hasattr(args, 'gerrit_issue') or not args.gerrit_issue:
return
self._issue = args.gerrit_issue
if not hasattr(args, 'gerrit_patchset') or not args.gerrit_patchset:
raise RuntimeError(
'--gerrit-issue passed, but --gerrit-patchset not passed.')
self._patchset = args.gerrit_patchset
if not hasattr(args, 'buildbucket_id') or not args.buildbucket_id:
raise RuntimeError(
'--gerrit-issue passed, but --buildbucket-id not passed.')
self._job_id = args.buildbucket_id
| ric2b/Vivaldi-browser | chromium/build/skia_gold_common/skia_gold_properties.py | Python | bsd-3-clause | 4,742 |
''' Provides a command line application for Bokeh.
The following subcommands are available:
'''
from __future__ import absolute_import
def _build_docstring():
global __doc__
from . import subcommands
for cls in subcommands.all:
__doc__ += "%8s : %s\n" % (cls.name, cls.help)
_build_docstring()
del _build_docstring
| phobson/bokeh | bokeh/command/__init__.py | Python | bsd-3-clause | 340 |
from __future__ import unicode_literals
from django.utils.six.moves.urllib.parse import parse_qs, urlsplit
from reviewboard.hostingsvcs.utils.paginator import (APIPaginator,
InvalidPageError,
ProxyPaginator)
from reviewboard.testing import TestCase
class DummyAPIPaginator(APIPaginator):
start_query_param = 'start'
per_page_query_param = 'per-page'
def fetch_url(self, url):
return {
'data': [1, 2, 3],
'headers': {},
}
class APIPaginatorTests(TestCase):
"""Tests for APIPaginator."""
def test_construct_initial_load(self):
"""Testing APIPaginator construction performs initial load"""
paginator = DummyAPIPaginator(None, 'http://example.com', start=10)
self.assertEqual(paginator.page_data, [1, 2, 3])
def test_construct_with_start(self):
"""Testing APIPaginator construction with start=<value>"""
url = 'http://example.com/api/list/?foo=1'
paginator = DummyAPIPaginator(None, url, start=10)
parts = urlsplit(paginator.url)
query_params = parse_qs(parts[3])
self.assertEqual(query_params['foo'], ['1'])
self.assertEqual(query_params['start'], ['10'])
def test_construct_with_per_page(self):
"""Testing APIPaginator construction with per_page=<value>"""
url = 'http://example.com/api/list/?foo=1'
paginator = DummyAPIPaginator(None, url, per_page=10)
parts = urlsplit(paginator.url)
query_params = parse_qs(parts[3])
self.assertEqual(query_params['foo'], ['1'])
self.assertEqual(query_params['per-page'], ['10'])
def test_extract_page_info(self):
"""Testing APIPaginator page information extraction"""
class PageInfoAPIPaginator(APIPaginator):
def fetch_url(self, url):
return {
'data': ['a', 'b', 'c'],
'headers': {
'Foo': 'Bar',
},
'per_page': 10,
'total_count': 100,
'prev_url': 'http://example.com/?page=1',
'next_url': 'http://example.com/?page=3',
}
paginator = PageInfoAPIPaginator(None, 'http://example.com/')
self.assertEqual(paginator.page_data, ['a', 'b', 'c'])
self.assertEqual(paginator.page_headers['Foo'], 'Bar')
self.assertEqual(paginator.per_page, 10)
self.assertEqual(paginator.total_count, 100)
self.assertEqual(paginator.prev_url, 'http://example.com/?page=1')
self.assertEqual(paginator.next_url, 'http://example.com/?page=3')
def test_prev(self):
"""Testing APIPaginator.prev"""
prev_url = 'http://example.com/?page=1'
paginator = DummyAPIPaginator(None, 'http://example.com')
paginator.prev_url = prev_url
self.assertTrue(paginator.has_prev)
self.assertFalse(paginator.has_next)
data = paginator.prev()
self.assertEqual(data, [1, 2, 3])
self.assertEqual(paginator.url, prev_url)
def test_prev_without_prev_page(self):
"""Testing APIPaginator.prev without a previous page"""
paginator = DummyAPIPaginator(None, 'http://example.com')
url = paginator.url
self.assertFalse(paginator.has_prev)
self.assertRaises(InvalidPageError, paginator.prev)
self.assertEqual(paginator.url, url)
def test_next(self):
"""Testing APIPaginator.next"""
next_url = 'http://example.com/?page=3'
paginator = DummyAPIPaginator(None, 'http://example.com')
paginator.next_url = next_url
self.assertFalse(paginator.has_prev)
self.assertTrue(paginator.has_next)
data = paginator.next()
self.assertEqual(data, [1, 2, 3])
self.assertEqual(paginator.url, next_url)
def test_next_without_next_page(self):
"""Testing APIPaginator.next without a next page"""
paginator = DummyAPIPaginator(None, 'http://example.com')
url = paginator.url
self.assertFalse(paginator.has_next)
self.assertRaises(InvalidPageError, paginator.next)
self.assertEqual(paginator.url, url)
class ProxyPaginatorTests(TestCase):
"""Tests for ProxyPaginator."""
def setUp(self):
self.paginator = DummyAPIPaginator(None, 'http://example.com')
self.proxy = ProxyPaginator(self.paginator)
def test_has_prev(self):
"""Testing ProxyPaginator.has_prev"""
self.assertFalse(self.proxy.has_prev)
self.paginator.prev_url = 'http://example.com/?start=1'
self.assertTrue(self.proxy.has_prev)
def test_has_next(self):
"""Testing ProxyPaginator.has_next"""
self.assertFalse(self.proxy.has_next)
self.paginator.next_url = 'http://example.com/?start=2'
self.assertTrue(self.proxy.has_next)
def test_per_page(self):
"""Testing ProxyPaginator.per_page"""
self.paginator.per_page = 10
self.assertEqual(self.proxy.per_page, 10)
def test_total_count(self):
"""Testing ProxyPaginator.total_count"""
self.paginator.total_count = 100
self.assertEqual(self.proxy.total_count, 100)
def test_prev(self):
"""Testing ProxyPaginator.prev"""
prev_url = 'http://example.com/?page=1'
self.paginator.prev_url = prev_url
self.assertTrue(self.proxy.has_prev)
self.assertFalse(self.proxy.has_next)
data = self.proxy.prev()
self.assertEqual(data, [1, 2, 3])
self.assertEqual(self.paginator.url, prev_url)
def test_next(self):
"""Testing ProxyPaginator.next"""
next_url = 'http://example.com/?page=3'
self.paginator.next_url = next_url
self.assertFalse(self.proxy.has_prev)
self.assertTrue(self.proxy.has_next)
data = self.proxy.next()
self.assertEqual(data, [1, 2, 3])
self.assertEqual(self.paginator.url, next_url)
def test_normalize_page_data(self):
"""Testing ProxyPaginator.normalize_page_data"""
proxy = ProxyPaginator(
self.paginator,
normalize_page_data_func=lambda data: list(reversed(data)))
self.assertEqual(proxy.page_data, [3, 2, 1])
def test_normalize_page_data_on_prev(self):
"""Testing ProxyPaginator.normalize_page_data on prev"""
proxy = ProxyPaginator(
self.paginator,
normalize_page_data_func=lambda data: list(reversed(data)))
self.paginator.prev_url = 'http://example.com/?page=1'
data = proxy.prev()
self.assertEqual(data, [3, 2, 1])
def test_normalize_page_data_on_next(self):
"""Testing ProxyPaginator.normalize_page_data on next"""
proxy = ProxyPaginator(
self.paginator,
normalize_page_data_func=lambda data: list(reversed(data)))
self.paginator.next_url = 'http://example.com/?page=3'
data = proxy.next()
self.assertEqual(data, [3, 2, 1])
| KnowNo/reviewboard | reviewboard/hostingsvcs/utils/tests.py | Python | mit | 7,168 |
#!/usr/bin/env python
"""
C.11.3 Bibliography and Citation (p208)
"""
import plasTeX, codecs
from plasTeX.Base.LaTeX.Sectioning import chapter, section
from plasTeX import Command, Environment
from Lists import List
log = plasTeX.Logging.getLogger()
class bibliography(chapter):
args = 'files:str'
linkType = 'bibliography'
def invoke(self, tex):
res = chapter.invoke(self, tex)
self.title = self.ownerDocument.createElement('bibname').expand(tex)
self.loadBibliographyFile(tex)
return res
def loadBibliographyFile(self, tex):
# Load bibtex file
try:
file = tex.kpsewhich(tex.jobname+'.bbl')
tex.input(codecs.open(file, 'r', self.ownerDocument.config['files']['input-encoding']))
except OSError, msg:
log.warning(msg)
class bibliographystyle(Command):
args = 'style'
class thebibliography(List):
args = 'widelabel'
linkType = 'bibliography'
class bibitem(List.item):
args = '[ label ] key:str'
def invoke(self, tex):
res = List.item.invoke(self, tex)
a = self.attributes
# Put the entry into the global bibliography
doc = self.ownerDocument
bibitems = doc.userdata.getPath('bibliography/bibitems', {})
bibitems[a['key']] = self
doc.userdata.setPath('bibliography/bibitems', bibitems)
self.ref = str(len([x for x in bibitems.values()
if not x.attributes['label']]))
key = a['key']
label = a.get('label')
bibcites = doc.userdata.getPath('bibliography/bibcites', {})
if not bibcites.has_key(key):
if label is None:
label = doc.createDocumentFragment()
label.extend(self.ref)
bibcites[key] = label
doc.userdata.setPath('bibliography/bibcites', bibcites)
return res
@property
def id(self):
return self.attributes['key']
@property
def bibcite(self):
doc = self.ownerDocument
res = doc.createDocumentFragment()
bibcites = doc.userdata.getPath('bibliography/bibcites', {})
res.extend(bibcites.get(self.attributes['key']))
return res
def digest(self, tokens):
if self.macroMode == Command.MODE_END:
return
for tok in tokens:
if not isinstance(tok, thebibliography.bibitem):
continue
tokens.push(tok)
break
return List.digest(self, tokens)
class cite(Command):
args = '[ text ] bibkeys:list:str'
@property
def bibitems(self):
# Get all referenced items
output = []
doc = self.ownerDocument
for x in self.attributes['bibkeys']:
item = doc.userdata.getPath('bibliography/bibitems', {}).get(x)
if item is None:
log.warning('Bibliography item "%s" has no entry', x)
else:
output.append(item)
return output
@property
def postnote(self):
a = self.attributes
if a['text'] is not None:
return a['text']
return ''
def citation(self):
""" (Jones et al., 1990) """
res = self.ownerDocument.createDocumentFragment()
i = 0
res.append('[')
for i, item in enumerate(self.bibitems):
node = self.ownerDocument.createElement('bgroup')
node.extend(item.bibcite)
node.idref['bibitem'] = item
res.append(node)
if i < (len(self.bibitems)-1):
res.append(', ')
else:
if self.postnote:
res.append(', ')
res.append(self.postnote)
res.append(']')
return res
class nocite(Command):
args = 'bibkeys:str'
class bibcite(Command):
args = 'key:str info'
def invoke(self, tex):
Command.invoke(self, tex)
value = self.attributes['info'].firstChild
doc = self.ownerDocument
bibcites = doc.userdata.getPath('bibliography/bibcites', {})
bibcites[self.attributes['key']] = value
doc.userdata.setPath('bibliography/bibcites', bibcites)
class citation(Command):
pass
class bibstyle(Command):
pass
class bibdata(Command):
pass
class newblock(Command):
pass
class bibliographyref(Command):
pass
| nibrahim/PlasTeX | plasTeX/Base/LaTeX/Bibliography.py | Python | mit | 4,545 |
from flask import render_template, jsonify, request
from jsonrpclib import jsonrpc
import base64
import urllib
from maraschino import app, logger
from maraschino.tools import *
def nzbget_http():
if get_setting_value('nzbget_https') == '1':
return 'https://'
else:
return 'http://'
def nzbget_auth():
return 'nzbget:%s@' % (get_setting_value('nzbget_password'))
def nzbget_url():
return '%s%s%s:%s' % (nzbget_http(), \
nzbget_auth(), \
get_setting_value('nzbget_host'), \
get_setting_value('nzbget_port'))
def nzbget_exception(e):
logger.log('NZBGet :: EXCEPTION -- %s' % e, 'DEBUG')
@app.route('/xhr/nzbget/')
@requires_auth
def xhr_nzbget():
downloads = status = nzbget = None
logger.log('NZBGet :: Getting download list', 'INFO')
try:
nzbget = jsonrpc.ServerProxy('%s/jsonrpc' % nzbget_url())
status = nzbget.status()
downloads = nzbget.listgroups()
except Exception as e:
nzbget_exception(e)
logger.log('NZBGet :: Getting download list (DONE)', 'INFO')
return render_template('nzbget/queue.html',
nzbget=status,
downloads=downloads,
)
@app.route('/xhr/nzbget/queue/<action>/')
@requires_auth
def queue_action_nzbget(action):
status = False
logger.log('NZBGet :: Queue action: %s' % action, 'INFO')
try:
nzbget = jsonrpc.ServerProxy('%s/jsonrpc' % nzbget_url())
if 'resume' in action:
status = nzbget.resume()
elif 'pause' in action:
status = nzbget.pause()
except Exception as e:
nzbget_exception(e)
return jsonify({'success': status})
@app.route('/xhr/nzbget/queue/add/', methods=['POST'])
@requires_auth
def queue_add_nzbget():
status = False
if len(nzb):
try:
nzbget = jsonrpc.ServerProxy('%s/jsonrpc' % nzbget_url())
nzb = request.form['url']
nzb = urllib.urlopen(nzb).read()
status = nzbget.append('test', '', False, base64.encode(nzb))
except Exception as e:
nzbget_exception(e)
return jsonify({'success': status})
@app.route('/xhr/nzbget/individual/<int:id>/<action>/')
@requires_auth
def individual_action_nzbget(id, action):
status = False
logger.log('NZBGet :: Item %s action: %s' % (id, action), 'INFO')
if 'resume' in action:
action = 'GroupResume'
elif 'pause' in action:
action = 'GroupPause'
elif 'delete' in action:
action = 'GroupDelete'
try:
nzbget = jsonrpc.ServerProxy('%s/jsonrpc' % nzbget_url())
status = nzbget.editqueue(action, 0, '', id)
except Exception as e:
nzbget_exception(e)
return jsonify({'success': status, 'id': id, 'action': action})
@app.route('/xhr/nzbget/set_speed/<int:speed>/')
@requires_auth
def set_speed_nzbget(speed):
logger.log('NZBGet :: Setting speed limit: %s' % speed, 'INFO')
try:
nzbget = jsonrpc.ServerProxy('%s/jsonrpc' % nzbget_url())
status = nzbget.rate(speed)
except Exception as e:
nzbget_exception(e)
return jsonify({'success': status})
| hephaestus9/Ironworks | modules/plugins/nzbget.py | Python | mit | 3,148 |
# coding=utf-8
"""Test Hazard Metadata."""
from unittest import TestCase
from safe.common.utilities import unique_filename
from safe.metadata import HazardLayerMetadata
__copyright__ = "Copyright 2016, The InaSAFE Project"
__license__ = "GPL version 3"
__email__ = "info@inasafe.org"
__revision__ = '$Format:%H$'
class TestHazardMetadata(TestCase):
def test_standard_properties(self):
metadata = HazardLayerMetadata(unique_filename())
with self.assertRaises(KeyError):
metadata.get_property('non_existing_key')
# from BaseMetadata
metadata.get_property('email')
# from HazardLayerMetadata
metadata.get_property('hazard')
metadata.get_property('hazard_category')
metadata.get_property('continuous_hazard_unit')
metadata.get_property('thresholds')
metadata.get_property('value_maps')
| AIFDR/inasafe | safe/metadata/test/test_hazard_metadata.py | Python | gpl-3.0 | 886 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from psycopg2 import OperationalError
from odoo import api, fields, models, registry, _
from odoo.exceptions import UserError
import odoo.addons.decimal_precision as dp
PROCUREMENT_PRIORITIES = [('0', 'Not urgent'), ('1', 'Normal'), ('2', 'Urgent'), ('3', 'Very Urgent')]
class ProcurementGroup(models.Model):
'''
The procurement group class is used to group products together
when computing procurements. (tasks, physical products, ...)
The goal is that when you have one sale order of several products
and the products are pulled from the same or several location(s), to keep
having the moves grouped into pickings that represent the sale order.
Used in: sales order (to group delivery order lines like the so), pull/push
rules (to pack like the delivery order), on orderpoints (e.g. for wave picking
all the similar products together).
Grouping is made only if the source and the destination is the same.
Suppose you have 4 lines on a picking from Output where 2 lines will need
to come from Input (crossdock) and 2 lines coming from Stock -> Output As
the four procurement orders will have the same group ids from the SO, the
move from input will have a stock.picking with 2 grouped lines and the move
from stock will have 2 grouped lines also.
The name is usually the name of the original document (sale order) or a
sequence computed if created manually.
'''
_name = 'procurement.group'
_description = 'Procurement Requisition'
_order = "id desc"
name = fields.Char(
'Reference',
default=lambda self: self.env['ir.sequence'].next_by_code('procurement.group') or '',
required=True)
move_type = fields.Selection([
('direct', 'Partial'),
('one', 'All at once')], string='Delivery Type', default='direct',
required=True)
procurement_ids = fields.One2many('procurement.order', 'group_id', 'Procurements')
class ProcurementRule(models.Model):
''' A rule describe what a procurement should do; produce, buy, move, ... '''
_name = 'procurement.rule'
_description = "Procurement Rule"
_order = "name"
name = fields.Char(
'Name', required=True, translate=True,
help="This field will fill the packing origin and the name of its moves")
active = fields.Boolean(
'Active', default=True,
help="If unchecked, it will allow you to hide the rule without removing it.")
group_propagation_option = fields.Selection([
('none', 'Leave Empty'),
('propagate', 'Propagate'),
('fixed', 'Fixed')], string="Propagation of Procurement Group", default='propagate')
group_id = fields.Many2one('procurement.group', 'Fixed Procurement Group')
action = fields.Selection(
selection='_get_action', string='Action',
required=True)
sequence = fields.Integer('Sequence', default=20)
company_id = fields.Many2one('res.company', 'Company')
@api.model
def _get_action(self):
return []
class ProcurementOrder(models.Model):
""" Procurement Orders """
_name = "procurement.order"
_description = "Procurement"
_order = 'priority desc, date_planned, id asc'
_inherit = ['mail.thread','ir.needaction_mixin']
name = fields.Text('Description', required=True)
origin = fields.Char('Source Document', help="Reference of the document that created this Procurement. This is automatically completed by Odoo.")
company_id = fields.Many2one(
'res.company', 'Company',
default=lambda self: self.env['res.company']._company_default_get('procurement.orer'),
required=True)
# These two fields are used for scheduling
priority = fields.Selection(
PROCUREMENT_PRIORITIES, string='Priority', default='1',
required=True, index=True, track_visibility='onchange')
date_planned = fields.Datetime(
'Scheduled Date', default=fields.Datetime.now,
required=True, index=True, track_visibility='onchange')
group_id = fields.Many2one('procurement.group', 'Procurement Group')
rule_id = fields.Many2one(
'procurement.rule', 'Rule',
track_visibility='onchange',
help="Chosen rule for the procurement resolution. Usually chosen by the system but can be manually set by the procurement manager to force an unusual behavior.")
product_id = fields.Many2one(
'product.product', 'Product',
readonly=True, required=True,
states={'confirmed': [('readonly', False)]})
product_qty = fields.Float(
'Quantity',
digits=dp.get_precision('Product Unit of Measure'),
readonly=True, required=True,
states={'confirmed': [('readonly', False)]})
product_uom = fields.Many2one(
'product.uom', 'Product Unit of Measure',
readonly=True, required=True,
states={'confirmed': [('readonly', False)]})
state = fields.Selection([
('cancel', 'Cancelled'),
('confirmed', 'Confirmed'),
('exception', 'Exception'),
('running', 'Running'),
('done', 'Done')], string='Status', default='confirmed',
copy=False, required=True, track_visibility='onchange')
@api.model
def _needaction_domain_get(self):
return [('state', '=', 'exception')]
@api.model
def create(self, vals):
procurement = super(ProcurementOrder, self).create(vals)
if not self._context.get('procurement_autorun_defer'):
procurement.run()
return procurement
@api.multi
def unlink(self):
if any(procurement.state == 'cancel' for procurement in self):
raise UserError(_('You cannot delete procurements that are in cancel state.'))
return super(ProcurementOrder, self).unlink()
@api.multi
def do_view_procurements(self):
'''
This function returns an action that display existing procurement orders
of same procurement group of given ids.
'''
action = self.env.ref('procurement.do_view_procurements').read()[0]
action['domain'] = [('group_id', 'in', self.mapped('group_id').ids)]
return action
@api.onchange('product_id')
def onchange_product_id(self):
""" Finds UoM of changed product. """
if self.product_id:
self.product_uom = self.product_id.uom_id.id
@api.multi
def cancel(self):
to_cancel = self.filtered(lambda procurement: procurement.state != 'done')
if to_cancel:
return to_cancel.write({'state': 'cancel'})
@api.multi
def reset_to_confirmed(self):
return self.write({'state': 'confirmed'})
@api.multi
def run(self, autocommit=False):
# TDE FIXME: avoid browsing everything -> avoid prefetching ?
for procurement in self:
# we intentionnaly do the browse under the for loop to avoid caching all ids which would be resource greedy
# and useless as we'll make a refresh later that will invalidate all the cache (and thus the next iteration
# will fetch all the ids again)
if procurement.state not in ("running", "done"):
try:
if procurement._assign():
res = procurement._run()
if res:
procurement.write({'state': 'running'})
else:
procurement.write({'state': 'exception'})
else:
procurement.message_post(body=_('No rule matching this procurement'))
procurement.write({'state': 'exception'})
if autocommit:
self.env.cr.commit()
except OperationalError:
if autocommit:
self.env.cr.rollback()
continue
else:
raise
return True
@api.multi
@api.returns('self', lambda procurements: [procurement.id for procurement in procurements])
def check(self, autocommit=False):
# TDE FIXME: check should not do something, just check
procurements_done = self.env['procurement.order']
for procurement in self:
try:
result = procurement._check()
if result:
procurements_done += procurement
if autocommit:
self.env.cr.commit()
except OperationalError:
if autocommit:
self.env.cr.rollback()
continue
else:
raise
if procurements_done:
procurements_done.write({'state': 'done'})
return procurements_done
#
# Method to overwrite in different procurement modules
#
@api.multi
def _find_suitable_rule(self):
'''This method returns a procurement.rule that depicts what to do with the given procurement
in order to complete its needs. It returns False if no suiting rule is found.
:rtype: int or False
'''
return False
@api.multi
def _assign(self):
'''This method check what to do with the given procurement in order to complete its needs.
It returns False if no solution is found, otherwise it stores the matching rule (if any) and
returns True.
:rtype: boolean
'''
# if the procurement already has a rule assigned, we keep it (it has a higher priority as it may have been chosen manually)
if self.rule_id:
return True
elif self.product_id.type not in ('service', 'digital'):
rule = self._find_suitable_rule()
if rule:
self.write({'rule_id': rule.id})
return True
return False
@api.multi
def _run(self):
'''This method implements the resolution of the given procurement
:returns: True if the resolution of the procurement was a success, False otherwise to set it in exception
'''
return True
@api.multi
def _check(self):
'''Returns True if the given procurement is fulfilled, False otherwise
:rtype: boolean
'''
return False
#
# Scheduler
#
@api.model
def run_scheduler(self, use_new_cursor=False, company_id=False):
'''
Call the scheduler to check the procurement order. This is intented to be done for all existing companies at
the same time, so we're running all the methods as SUPERUSER to avoid intercompany and access rights issues.
@param use_new_cursor: if set, use a dedicated cursor and auto-commit after processing each procurement.
This is appropriate for batch jobs only.
@return: Dictionary of values
'''
ProcurementSudo = self.env['procurement.order'].sudo()
try:
if use_new_cursor:
cr = registry(self._cr.dbname).cursor()
self = self.with_env(self.env(cr=cr)) # TDE FIXME
# Run confirmed procurements
procurements = ProcurementSudo.search([('state', '=', 'confirmed')] + (company_id and [('company_id', '=', company_id)] or []))
while procurements:
procurements.run(autocommit=use_new_cursor)
if use_new_cursor:
self.env.cr.commit()
procurements = ProcurementSudo.search([('id', 'not in', procurements.ids), ('state', '=', 'confirmed')] + (company_id and [('company_id', '=', company_id)] or []))
# Check done procurements
procurements = ProcurementSudo.search([('state', '=', 'running')] + (company_id and [('company_id', '=', company_id)] or []))
while procurements:
procurements.check(autocommit=use_new_cursor)
if use_new_cursor:
self.env.cr.commit()
procurements = ProcurementSudo.search([('id', 'not in', procurements.ids), ('state', '=', 'running')] + (company_id and [('company_id', '=', company_id)] or []))
finally:
if use_new_cursor:
try:
self.env.cr.close()
except Exception:
pass
return {}
| chienlieu2017/it_management | odoo/addons/procurement/models/procurement.py | Python | gpl-3.0 | 12,497 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Joseph Callen <jcallen () csc.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: vmware_dvswitch
short_description: Create or remove a distributed vSwitch
description:
- Create or remove a distributed vSwitch
version_added: 2.0
author: "Joseph Callen (@jcpowermac)"
notes:
- Tested on vSphere 5.5
requirements:
- "python >= 2.6"
- PyVmomi
options:
datacenter_name:
description:
- The name of the datacenter that will contain the dvSwitch
required: True
switch_name:
description:
- The name of the switch to create or remove
required: True
mtu:
description:
- The switch maximum transmission unit
required: True
uplink_quantity:
description:
- Quantity of uplink per ESXi host added to the switch
required: True
discovery_proto:
description:
- Link discovery protocol between Cisco and Link Layer discovery
choices:
- 'cdp'
- 'lldp'
required: True
discovery_operation:
description:
- Select the discovery operation
choices:
- 'both'
- 'none'
- 'advertise'
- 'listen'
state:
description:
- Create or remove dvSwitch
default: 'present'
choices:
- 'present'
- 'absent'
required: False
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
- name: Create dvswitch
local_action:
module: vmware_dvswitch
hostname: vcenter_ip_or_hostname
username: vcenter_username
password: vcenter_password
datacenter_name: datacenter
switch_name: dvSwitch
mtu: 9000
uplink_quantity: 2
discovery_proto: lldp
discovery_operation: both
state: present
'''
try:
from pyVmomi import vim, vmodl
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import (HAS_PYVMOMI,
connect_to_api,
find_datacenter_by_name,
find_dvs_by_name,
vmware_argument_spec,
wait_for_task
)
class VMwareDVSwitch(object):
def __init__(self, module):
self.module = module
self.dvs = None
self.switch_name = self.module.params['switch_name']
self.datacenter_name = self.module.params['datacenter_name']
self.mtu = self.module.params['mtu']
self.uplink_quantity = self.module.params['uplink_quantity']
self.discovery_proto = self.module.params['discovery_proto']
self.discovery_operation = self.module.params['discovery_operation']
self.state = self.module.params['state']
self.content = connect_to_api(module)
def process_state(self):
try:
dvs_states = {
'absent': {
'present': self.state_destroy_dvs,
'absent': self.state_exit_unchanged,
},
'present': {
'update': self.state_update_dvs,
'present': self.state_exit_unchanged,
'absent': self.state_create_dvs,
}
}
dvs_states[self.state][self.check_dvs_configuration()]()
except vmodl.RuntimeFault as runtime_fault:
self.module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
self.module.fail_json(msg=method_fault.msg)
except Exception as e:
self.module.fail_json(msg=str(e))
def create_dvswitch(self, network_folder):
result = None
changed = False
spec = vim.DistributedVirtualSwitch.CreateSpec()
spec.configSpec = vim.dvs.VmwareDistributedVirtualSwitch.ConfigSpec()
spec.configSpec.uplinkPortPolicy = vim.DistributedVirtualSwitch.NameArrayUplinkPortPolicy()
spec.configSpec.linkDiscoveryProtocolConfig = vim.host.LinkDiscoveryProtocolConfig()
spec.configSpec.name = self.switch_name
spec.configSpec.maxMtu = self.mtu
spec.configSpec.linkDiscoveryProtocolConfig.protocol = self.discovery_proto
spec.configSpec.linkDiscoveryProtocolConfig.operation = self.discovery_operation
spec.productInfo = vim.dvs.ProductSpec()
spec.productInfo.name = "DVS"
spec.productInfo.vendor = "VMware"
for count in range(1, self.uplink_quantity+1):
spec.configSpec.uplinkPortPolicy.uplinkPortName.append("uplink%d" % count)
task = network_folder.CreateDVS_Task(spec)
changed, result = wait_for_task(task)
return changed, result
def state_exit_unchanged(self):
self.module.exit_json(changed=False)
def state_destroy_dvs(self):
task = self.dvs.Destroy_Task()
changed, result = wait_for_task(task)
self.module.exit_json(changed=changed, result=str(result))
def state_update_dvs(self):
self.module.exit_json(changed=False, msg="Currently not implemented.")
def state_create_dvs(self):
changed = True
result = None
if not self.module.check_mode:
dc = find_datacenter_by_name(self.content, self.datacenter_name)
changed, result = self.create_dvswitch(dc.networkFolder)
self.module.exit_json(changed=changed, result=str(result))
def check_dvs_configuration(self):
self.dvs = find_dvs_by_name(self.content, self.switch_name)
if self.dvs is None:
return 'absent'
else:
return 'present'
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(dict(datacenter_name=dict(required=True, type='str'),
switch_name=dict(required=True, type='str'),
mtu=dict(required=True, type='int'),
uplink_quantity=dict(required=True, type='int'),
discovery_proto=dict(required=True, choices=['cdp', 'lldp'], type='str'),
discovery_operation=dict(required=True, choices=['both', 'none', 'advertise', 'listen'], type='str'),
state=dict(default='present', choices=['present', 'absent'], type='str')))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
if not HAS_PYVMOMI:
module.fail_json(msg='pyvmomi is required for this module')
vmware_dvswitch = VMwareDVSwitch(module)
vmware_dvswitch.process_state()
if __name__ == '__main__':
main()
| fernandezcuesta/ansible | lib/ansible/modules/cloud/vmware/vmware_dvswitch.py | Python | gpl-3.0 | 7,199 |
#!/usr/bin/env python
import os.path
import sys
DIRNAME = os.path.dirname(__file__)
if not DIRNAME in sys.path:
sys.path.append(DIRNAME)
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
| luxnovalabs/enjigo_door | web_interface/keyedcache/test_app/manage.py | Python | unlicense | 668 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Views for managing keypairs.
"""
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django import http
from django.template.defaultfilters import slugify # noqa
from django.utils.translation import ugettext_lazy as _
from django.views.generic import TemplateView # noqa
from django.views.generic import View # noqa
from horizon import exceptions
from horizon import forms
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.access_and_security.keypairs \
import forms as project_forms
class CreateView(forms.ModalFormView):
form_class = project_forms.CreateKeypair
template_name = 'project/access_and_security/keypairs/create.html'
success_url = 'horizon:project:access_and_security:keypairs:download'
def get_success_url(self):
return reverse(self.success_url,
kwargs={"keypair_name": self.request.POST['name']})
class ImportView(forms.ModalFormView):
form_class = project_forms.ImportKeypair
template_name = 'project/access_and_security/keypairs/import.html'
success_url = reverse_lazy('horizon:project:access_and_security:index')
def get_object_id(self, keypair):
return keypair.name
class DownloadView(TemplateView):
def get_context_data(self, keypair_name=None):
return {'keypair_name': keypair_name}
template_name = 'project/access_and_security/keypairs/download.html'
class GenerateView(View):
def get(self, request, keypair_name=None):
try:
keypair = api.nova.keypair_create(request, keypair_name)
except Exception:
redirect = reverse('horizon:project:access_and_security:index')
exceptions.handle(self.request,
_('Unable to create key pair: %(exc)s'),
redirect=redirect)
response = http.HttpResponse(content_type='application/binary')
response['Content-Disposition'] = \
'attachment; filename=%s.pem' % slugify(keypair.name)
response.write(keypair.private_key)
response['Content-Length'] = str(len(response.content))
return response
| gochist/horizon | openstack_dashboard/dashboards/project/access_and_security/keypairs/views.py | Python | apache-2.0 | 3,012 |
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
@no_debug_info_test
def test(self):
self.build()
self.dbg.CreateTarget(self.getBuildArtifact("a.out"))
# The offset of f2 should be 8 because of `alignas(8)`.
self.expect_expr("(intptr_t)&d3g.f2 - (intptr_t)&d3g", result_value="8")
| google/llvm-propeller | lldb/test/API/lang/cpp/alignas_base_class/TestAlignAsBaseClass.py | Python | apache-2.0 | 473 |
#
# Copyright (c) 2014 Juniper Networks, Inc. All rights reserved.
#
import gevent
from gevent import monkey; monkey.patch_all()
import argparse
import os
import socket
import random
import math
import uuid
from netaddr import IPAddress
from pysandesh.sandesh_base import *
from pysandesh.util import UTCTimestampUsec
from sandesh_common.vns.ttypes import Module, NodeType
from sandesh_common.vns.constants import ModuleNames, Module2NodeType, \
NodeTypeNames
from vrouter.sandesh.virtual_network.ttypes import UveVirtualNetworkAgent, \
InterVnStats, UveInterVnStats, UveVirtualNetworkAgentTrace
from vrouter.sandesh.virtual_machine.ttypes import VmInterfaceAgent, \
UveVirtualMachineAgent, UveVirtualMachineAgentTrace
from vrouter.vrouter.ttypes import VrouterStatsAgent, VrouterStats
from vrouter.cpuinfo import CpuInfoData
from vrouter.sandesh.flow.ttypes import *
class MockGenerator(object):
_VN_PREFIX = 'default-domain:mock-gen-test:vn'
_VM_PREFIX = 'vm'
_BYTES_PER_PACKET = 1024
_OTHER_VN_PKTS_PER_SEC = 1000
_UVE_MSG_INTVL_IN_SEC = 10
_GEVENT_SPAWN_DELAY_IN_SEC = 10
_FLOW_GEVENT_SPAWN_DELAY_IN_SEC = 30
_NUM_FLOWS_IN_ITERATION = 145 * 10
_FLOW_MSG_INTVL_IN_SEC = 1
_FLOW_PKTS_PER_SEC = 100
def __init__(self, hostname, module_name, node_type_name, instance_id,
start_vn, end_vn, other_vn,
num_vns, vm_iterations, collectors, ip_vns, ip_start_index,
num_flows_per_vm):
self._module_name = module_name
self._hostname = hostname
self._node_type_name = node_type_name
self._instance_id = instance_id
self._start_vn = start_vn
self._end_vn = end_vn
self._num_vns = num_vns
self._other_vn = other_vn
self._ip_vns = ip_vns
self._ip_start_index = ip_start_index
self._vm_iterations = vm_iterations
self._num_flows_per_vm = num_flows_per_vm
self._sandesh_instance = Sandesh()
if not isinstance(collectors, list):
collectors = [collectors]
self._collectors = collectors
#end __init__
def run_generator(self):
self._sandesh_instance.init_generator(self._module_name, self._hostname,
self._node_type_name, self._instance_id, self._collectors,
'', -1, ['vrouter'])
self._sandesh_instance.set_logging_params(enable_local_log = False,
level = SandeshLevel.SYS_EMERG)
send_uve_task = gevent.spawn_later(
random.randint(0, self._GEVENT_SPAWN_DELAY_IN_SEC),
self._send_uve_sandesh)
cpu_info_task = gevent.spawn_later(
random.randint(0, self._GEVENT_SPAWN_DELAY_IN_SEC),
self._send_cpu_info)
send_flow_task = gevent.spawn_later(
random.randint(5, self._FLOW_GEVENT_SPAWN_DELAY_IN_SEC),
self._send_flow_sandesh)
return [send_uve_task, cpu_info_task, send_flow_task]
#end run_generator
def _send_flow_sandesh(self):
flows = []
while True:
# Populate flows if not done
if len(flows) == 0:
other_vn = self._other_vn
for vn in range(self._start_vn, self._end_vn):
for nvm in range(self._vm_iterations):
for nflow in range(self._num_flows_per_vm):
init_packets = random.randint(1, \
self._FLOW_PKTS_PER_SEC)
init_bytes = init_packets * \
random.randint(1, self._BYTES_PER_PACKET)
sourceip = int(self._ip_vns[vn] + \
self._ip_start_index + nvm)
destip = int(self._ip_vns[other_vn] + \
self._ip_start_index + nvm)
flows.append(FlowDataIpv4(
flowuuid = str(uuid.uuid1()),
direction_ing = random.randint(0, 1),
sourcevn = self._VN_PREFIX + str(vn),
destvn = self._VN_PREFIX + str(other_vn),
sourceip = sourceip,
destip = destip,
sport = random.randint(0, 65535),
dport = random.randint(0, 65535),
protocol = random.choice([6, 17, 1]),
setup_time = UTCTimestampUsec(),
packets = init_packets,
bytes = init_bytes,
diff_packets = init_packets,
diff_bytes = init_bytes))
other_vn = (other_vn + 1) % self._num_vns
# Send the flows periodically
flow_cnt = 0
for flow_data in flows:
new_packets = random.randint(1, self._FLOW_PKTS_PER_SEC)
new_bytes = new_packets * \
random.randint(1, self._BYTES_PER_PACKET)
flow_data.packets += new_packets
flow_data.bytes += new_bytes
flow_data.diff_packets = new_packets
flow_data.diff_bytes = new_bytes
flow_object = FlowDataIpv4Object(flowdata = flow_data,
sandesh = self._sandesh_instance)
flow_object.send(sandesh = self._sandesh_instance)
flow_cnt += 1
if flow_cnt == self._NUM_FLOWS_IN_ITERATION:
flow_cnt = 0
gevent.sleep(self._FLOW_MSG_INTVL_IN_SEC)
else:
gevent.sleep(0)
#end _send_flow_sandesh
def _send_cpu_info(self):
vrouter_cpu_info = CpuInfoData()
vrouter_stats = VrouterStatsAgent()
vrouter_stats.name = self._hostname
while True:
vrouter_stats.cpu_info = vrouter_cpu_info.get_cpu_info(system = False)
vrouter_stats.cpu_share = vrouter_stats.cpu_info.cpu_share
vrouter_stats.virt_mem = vrouter_stats.cpu_info.meminfo.virt
stats = VrouterStats(sandesh = self._sandesh_instance,
data = vrouter_stats)
stats.send(sandesh = self._sandesh_instance)
gevent.sleep(60)
#end _send_cpu_info
def _populate_other_vn_stats(self, other_vn, intervn_list, vn, vn_stats,
in_uve_intervn_list, out_uve_intervn_list):
other_vn_name = self._VN_PREFIX + str(other_vn)
intervn = InterVnStats()
intervn.other_vn = other_vn_name
intervn.vrouter = self._hostname
intervn.in_tpkts = random.randint(1, self._OTHER_VN_PKTS_PER_SEC * \
self._num_vns * self._UVE_MSG_INTVL_IN_SEC)
intervn.in_bytes = intervn.in_tpkts * random.randint(1, \
self._BYTES_PER_PACKET)
intervn.out_tpkts = random.randint(1, self._OTHER_VN_PKTS_PER_SEC * \
self._num_vns * self._UVE_MSG_INTVL_IN_SEC)
intervn.out_bytes = intervn.out_tpkts * random.randint(1, \
self._BYTES_PER_PACKET)
if vn in vn_stats:
other_vn_stats = vn_stats[vn]
else:
other_vn_stats = None
if other_vn_stats is None:
other_vn_stats = {}
other_vn_stats[other_vn] = (intervn.in_tpkts, intervn.in_bytes, \
intervn.out_tpkts, intervn.out_bytes)
else:
if other_vn in other_vn_stats:
prev_in_tpkts, prev_in_bytes, prev_out_tpkts, prev_out_bytes = \
other_vn_stats[other_vn]
new_in_tpkts = prev_in_tpkts + intervn.in_tpkts
new_in_bytes = prev_in_bytes + intervn.in_bytes
new_out_tpkts = prev_out_tpkts + intervn.out_tpkts
new_out_bytes = prev_out_bytes + intervn.out_bytes
other_vn_stats[other_vn] = (new_in_tpkts, new_in_bytes, \
new_out_tpkts, new_out_bytes)
else:
other_vn_stats[other_vn] = (intervn.in_tpkts, \
intervn.in_bytes, intervn.out_tpkts, intervn.out_bytes)
vn_stats[vn] = other_vn_stats
in_uve_intervn = UveInterVnStats()
in_uve_intervn.other_vn = other_vn_name
out_uve_intervn = UveInterVnStats()
out_uve_intervn.other_vn = other_vn_name
in_uve_intervn.tpkts, in_uve_intervn.bytes, out_uve_intervn.tpkts, \
out_uve_intervn.bytes = other_vn_stats[other_vn]
in_uve_intervn_list.append(in_uve_intervn)
out_uve_intervn_list.append(out_uve_intervn)
intervn_list.append(intervn)
#end _populate_other_vn_stats
def _send_uve_sandesh(self):
vn_stats = {}
vn_vm_list = {}
vn_vm_list_populated = False
vn_vm_list_sent = False
while True:
# Send VM list if populated and not already sent
if vn_vm_list_populated and not vn_vm_list_sent:
for vn in range(self._start_vn, self._end_vn):
vn_agent = UveVirtualNetworkAgent(virtualmachine_list = \
vn_vm_list[vn])
vn_agent.name = self._VN_PREFIX + str(vn)
uve_agent_vn = UveVirtualNetworkAgentTrace( \
data = vn_agent, sandesh = self._sandesh_instance)
uve_agent_vn.send(sandesh = self._sandesh_instance)
gevent.sleep(random.randint(0, self._UVE_MSG_INTVL_IN_SEC))
vn_vm_list_sent = True
other_vn = self._other_vn
for vn in range(self._start_vn, self._end_vn):
intervn_list = []
in_uve_intervn_list = []
out_uve_intervn_list = []
# Populate inter-VN and UVE inter-VN stats for other_vn
self._populate_other_vn_stats(other_vn, intervn_list, vn, \
vn_stats, in_uve_intervn_list, out_uve_intervn_list)
# Populate inter-VN and UVE inter-VN stats for self - vn
self._populate_other_vn_stats(vn, intervn_list, vn, \
vn_stats, in_uve_intervn_list, out_uve_intervn_list)
vn_agent = UveVirtualNetworkAgent(vn_stats = intervn_list,
in_stats = in_uve_intervn_list,
out_stats = out_uve_intervn_list)
vn_agent.name = self._VN_PREFIX + str(vn)
uve_agent_vn = UveVirtualNetworkAgentTrace(data = vn_agent,
sandesh = self._sandesh_instance)
uve_agent_vn.send(sandesh = self._sandesh_instance)
for nvm in range(self._vm_iterations):
vm_if = VmInterfaceAgent()
vm_if.name = 'p2p1'
vm_if.ip_address = str(self._ip_vns[vn] + \
self._ip_start_index + nvm)
vm_if.virtual_network = vn_agent.name
vm_agent = UveVirtualMachineAgent()
vm_name = vn_agent.name + ':' + self._hostname + ':' + \
self._VM_PREFIX + str(vn) + '-' + str(nvm)
vm_agent.name = vm_name
vm_agent.interface_list = []
vm_agent.interface_list.append(vm_if)
uve_agent_vm = UveVirtualMachineAgentTrace(data = vm_agent,
sandesh = self._sandesh_instance)
uve_agent_vm.send(sandesh = self._sandesh_instance)
# Populate VN VM list
if not vn in vn_vm_list:
vn_vm_list[vn] = [vm_name]
else:
vm_list = vn_vm_list[vn]
vm_list.append(vm_name)
gevent.sleep(random.randint(0, self._UVE_MSG_INTVL_IN_SEC))
other_vn += 1
gevent.sleep(random.randint(0, self._UVE_MSG_INTVL_IN_SEC))
vn_vm_list_populated = True
#end _send_uve_sandesh
#end class MockGenerator
class MockGeneratorTest(object):
def __init__(self):
self._parse_args()
#end __init__
def _parse_args(self):
'''
Eg. python mock_generator.py
--num_generators 10
--collectors 127.0.0.1:8086
--num_instances_per_generator 10
--num_networks 100
--num_flows_per_instance 10
--start_ip_address 1.0.0.1
'''
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--num_generators", type=int,
default=10,
help="Number of mock generators")
parser.add_argument("--num_instances_per_generator", type=int,
default=10,
help="Number of instances (virtual machines) per generator")
parser.add_argument("--num_networks", type=int,
default=100,
help="Number of virtual networks")
parser.add_argument("--collectors",
default='127.0.0.1:8086',
help="List of Collector IP addresses in ip:port format",
nargs="+")
parser.add_argument("--num_flows_per_instance", type=int,
default=10,
help="Number of flows per instance (virtual machine)")
parser.add_argument("--start_ip_address",
default="1.0.0.1",
help="Start IP address to be used for instances")
self._args = parser.parse_args()
if isinstance(self._args.collectors, basestring):
self._args.collectors = self._args.collectors.split()
#end _parse_args
def setup(self):
collectors = self._args.collectors
ngens = self._args.num_generators
pid = os.getpid()
num_instances = self._args.num_instances_per_generator
num_networks = self._args.num_networks
module = Module.VROUTER_AGENT
moduleid = ModuleNames[module]
node_type = Module2NodeType[module]
node_type_name = NodeTypeNames[node_type]
hostname = socket.gethostname() + '-' + str(pid)
hostnames = [hostname + '-' + str(x) for x in range(ngens)]
gen_factor = num_networks / num_instances
if gen_factor == 0:
print("Number of virtual networks(%d) should be "
"greater than number of instances per generator(%d)" % \
(num_networks, num_instances))
return False
start_vns = [(x % gen_factor) * num_instances for x in range(ngens)]
end_vns = [((x % gen_factor) + 1) * num_instances \
for x in range(ngens)]
other_vn_adj = num_networks / 2
other_vns = [x - other_vn_adj if x >= other_vn_adj \
else x + other_vn_adj for x in start_vns]
instance_iterations = int(math.ceil(float(num_instances) / \
num_networks))
num_ips_per_vn = int(math.ceil(float(ngens * num_instances) / \
num_networks))
start_ip_address = IPAddress(self._args.start_ip_address)
ip_vns = [start_ip_address + num_ips_per_vn * x for x in \
range(num_networks)]
start_ip_index = [x * num_instances / num_networks for x in \
range(ngens)]
self._generators = [MockGenerator(hostnames[x], moduleid, \
node_type_name, str(x), start_vns[x], end_vns[x], other_vns[x], \
num_networks, instance_iterations, \
collectors[x % len(collectors)], ip_vns, \
start_ip_index[x], self._args.num_flows_per_instance) \
for x in range(ngens)]
return True
#end setup
def run(self):
generator_run_tasks = [gen.run_generator() for gen in self._generators]
generator_tasks = [gen_task for gen_task_sublist in \
generator_run_tasks for gen_task in gen_task_sublist ]
gevent.joinall(generator_tasks)
#end run
#end class MockGeneratorTest
def main():
test = MockGeneratorTest()
success = test.setup()
if success:
test.run()
#end main
if __name__ == '__main__':
main()
| Juniper/contrail-dev-controller | src/vnsw/agent/uve/mock_generator.py | Python | apache-2.0 | 16,590 |
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from oslo_log import log as logging
import six
import testtools
from tempest.api.compute import base
from tempest.common.utils import data_utils
from tempest.common import waiters
from tempest import config
from tempest import test
CONF = config.CONF
LOG = logging.getLogger(__name__)
class ListImageFiltersTestJSON(base.BaseV2ComputeTest):
@classmethod
def skip_checks(cls):
super(ListImageFiltersTestJSON, cls).skip_checks()
if not CONF.service_available.glance:
skip_msg = ("%s skipped as glance is not available" % cls.__name__)
raise cls.skipException(skip_msg)
@classmethod
def setup_clients(cls):
super(ListImageFiltersTestJSON, cls).setup_clients()
cls.client = cls.images_client
cls.glance_client = cls.os.image_client
@classmethod
def resource_setup(cls):
super(ListImageFiltersTestJSON, cls).resource_setup()
def _create_image():
name = data_utils.rand_name('image')
body = cls.glance_client.create_image(name=name,
container_format='bare',
disk_format='raw',
is_public=False)['image']
image_id = body['id']
cls.images.append(image_id)
# Wait 1 second between creation and upload to ensure a delta
# between created_at and updated_at.
time.sleep(1)
image_file = six.StringIO(('*' * 1024))
cls.glance_client.update_image(image_id, data=image_file)
waiters.wait_for_image_status(cls.client, image_id, 'ACTIVE')
body = cls.client.show_image(image_id)['image']
return body
# Create non-snapshot images via glance
cls.image1 = _create_image()
cls.image1_id = cls.image1['id']
cls.image2 = _create_image()
cls.image2_id = cls.image2['id']
cls.image3 = _create_image()
cls.image3_id = cls.image3['id']
if not CONF.compute_feature_enabled.snapshot:
return
# Create instances and snapshots via nova
cls.server1 = cls.create_test_server()
cls.server2 = cls.create_test_server(wait_until='ACTIVE')
# NOTE(sdague) this is faster than doing the sync wait_util on both
waiters.wait_for_server_status(cls.servers_client,
cls.server1['id'], 'ACTIVE')
# Create images to be used in the filter tests
cls.snapshot1 = cls.create_image_from_server(
cls.server1['id'], wait_until='ACTIVE')
cls.snapshot1_id = cls.snapshot1['id']
# Servers have a hidden property for when they are being imaged
# Performing back-to-back create image calls on a single
# server will sometimes cause failures
cls.snapshot3 = cls.create_image_from_server(
cls.server2['id'], wait_until='ACTIVE')
cls.snapshot3_id = cls.snapshot3['id']
# Wait for the server to be active after the image upload
cls.snapshot2 = cls.create_image_from_server(
cls.server1['id'], wait_until='ACTIVE')
cls.snapshot2_id = cls.snapshot2['id']
@test.idempotent_id('a3f5b513-aeb3-42a9-b18e-f091ef73254d')
def test_list_images_filter_by_status(self):
# The list of images should contain only images with the
# provided status
params = {'status': 'ACTIVE'}
images = self.client.list_images(**params)['images']
self.assertTrue(any([i for i in images if i['id'] == self.image1_id]))
self.assertTrue(any([i for i in images if i['id'] == self.image2_id]))
self.assertTrue(any([i for i in images if i['id'] == self.image3_id]))
@test.idempotent_id('33163b73-79f5-4d07-a7ea-9213bcc468ff')
def test_list_images_filter_by_name(self):
# List of all images should contain the expected images filtered
# by name
params = {'name': self.image1['name']}
images = self.client.list_images(**params)['images']
self.assertTrue(any([i for i in images if i['id'] == self.image1_id]))
self.assertFalse(any([i for i in images if i['id'] == self.image2_id]))
self.assertFalse(any([i for i in images if i['id'] == self.image3_id]))
@test.idempotent_id('9f238683-c763-45aa-b848-232ec3ce3105')
@testtools.skipUnless(CONF.compute_feature_enabled.snapshot,
'Snapshotting is not available.')
def test_list_images_filter_by_server_id(self):
# The images should contain images filtered by server id
params = {'server': self.server1['id']}
images = self.client.list_images(**params)['images']
self.assertTrue(any([i for i in images
if i['id'] == self.snapshot1_id]),
"Failed to find image %s in images. Got images %s" %
(self.image1_id, images))
self.assertTrue(any([i for i in images
if i['id'] == self.snapshot2_id]))
self.assertFalse(any([i for i in images
if i['id'] == self.snapshot3_id]))
@test.idempotent_id('05a377b8-28cf-4734-a1e6-2ab5c38bf606')
@testtools.skipUnless(CONF.compute_feature_enabled.snapshot,
'Snapshotting is not available.')
def test_list_images_filter_by_server_ref(self):
# The list of servers should be filtered by server ref
server_links = self.server2['links']
# Try all server link types
for link in server_links:
params = {'server': link['href']}
images = self.client.list_images(**params)['images']
self.assertFalse(any([i for i in images
if i['id'] == self.snapshot1_id]))
self.assertFalse(any([i for i in images
if i['id'] == self.snapshot2_id]))
self.assertTrue(any([i for i in images
if i['id'] == self.snapshot3_id]))
@test.idempotent_id('e3356918-4d3e-4756-81d5-abc4524ba29f')
@testtools.skipUnless(CONF.compute_feature_enabled.snapshot,
'Snapshotting is not available.')
def test_list_images_filter_by_type(self):
# The list of servers should be filtered by image type
params = {'type': 'snapshot'}
images = self.client.list_images(**params)['images']
self.assertTrue(any([i for i in images
if i['id'] == self.snapshot1_id]))
self.assertTrue(any([i for i in images
if i['id'] == self.snapshot2_id]))
self.assertTrue(any([i for i in images
if i['id'] == self.snapshot3_id]))
self.assertFalse(any([i for i in images
if i['id'] == self.image_ref]))
@test.idempotent_id('3a484ca9-67ba-451e-b494-7fcf28d32d62')
def test_list_images_limit_results(self):
# Verify only the expected number of results are returned
params = {'limit': '1'}
images = self.client.list_images(**params)['images']
self.assertEqual(1, len([x for x in images if 'id' in x]))
@test.idempotent_id('18bac3ae-da27-436c-92a9-b22474d13aab')
def test_list_images_filter_by_changes_since(self):
# Verify only updated images are returned in the detailed list
# Becoming ACTIVE will modify the updated time
# Filter by the image's created time
params = {'changes-since': self.image3['created']}
images = self.client.list_images(**params)['images']
found = any([i for i in images if i['id'] == self.image3_id])
self.assertTrue(found)
@test.idempotent_id('9b0ea018-6185-4f71-948a-a123a107988e')
def test_list_images_with_detail_filter_by_status(self):
# Detailed list of all images should only contain images
# with the provided status
params = {'status': 'ACTIVE'}
images = self.client.list_images(detail=True, **params)['images']
self.assertTrue(any([i for i in images if i['id'] == self.image1_id]))
self.assertTrue(any([i for i in images if i['id'] == self.image2_id]))
self.assertTrue(any([i for i in images if i['id'] == self.image3_id]))
@test.idempotent_id('644ea267-9bd9-4f3b-af9f-dffa02396a17')
def test_list_images_with_detail_filter_by_name(self):
# Detailed list of all images should contain the expected
# images filtered by name
params = {'name': self.image1['name']}
images = self.client.list_images(detail=True, **params)['images']
self.assertTrue(any([i for i in images if i['id'] == self.image1_id]))
self.assertFalse(any([i for i in images if i['id'] == self.image2_id]))
self.assertFalse(any([i for i in images if i['id'] == self.image3_id]))
@test.idempotent_id('ba2fa9a9-b672-47cc-b354-3b4c0600e2cb')
def test_list_images_with_detail_limit_results(self):
# Verify only the expected number of results (with full details)
# are returned
params = {'limit': '1'}
images = self.client.list_images(detail=True, **params)['images']
self.assertEqual(1, len(images))
@test.idempotent_id('8c78f822-203b-4bf6-8bba-56ebd551cf84')
@testtools.skipUnless(CONF.compute_feature_enabled.snapshot,
'Snapshotting is not available.')
def test_list_images_with_detail_filter_by_server_ref(self):
# Detailed list of servers should be filtered by server ref
server_links = self.server2['links']
# Try all server link types
for link in server_links:
params = {'server': link['href']}
images = self.client.list_images(detail=True, **params)['images']
self.assertFalse(any([i for i in images
if i['id'] == self.snapshot1_id]))
self.assertFalse(any([i for i in images
if i['id'] == self.snapshot2_id]))
self.assertTrue(any([i for i in images
if i['id'] == self.snapshot3_id]))
@test.idempotent_id('888c0cc0-7223-43c5-9db0-b125fd0a393b')
@testtools.skipUnless(CONF.compute_feature_enabled.snapshot,
'Snapshotting is not available.')
def test_list_images_with_detail_filter_by_type(self):
# The detailed list of servers should be filtered by image type
params = {'type': 'snapshot'}
images = self.client.list_images(detail=True, **params)['images']
self.client.show_image(self.image_ref)
self.assertTrue(any([i for i in images
if i['id'] == self.snapshot1_id]))
self.assertTrue(any([i for i in images
if i['id'] == self.snapshot2_id]))
self.assertTrue(any([i for i in images
if i['id'] == self.snapshot3_id]))
self.assertFalse(any([i for i in images
if i['id'] == self.image_ref]))
@test.idempotent_id('7d439e18-ac2e-4827-b049-7e18004712c4')
def test_list_images_with_detail_filter_by_changes_since(self):
# Verify an update image is returned
# Becoming ACTIVE will modify the updated time
# Filter by the image's created time
params = {'changes-since': self.image1['created']}
images = self.client.list_images(detail=True, **params)['images']
self.assertTrue(any([i for i in images if i['id'] == self.image1_id]))
| liucode/tempest-master | tempest/api/compute/images/test_list_image_filters.py | Python | apache-2.0 | 12,355 |
#!/usr/bin/env vpython3
# Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Script for updating the active milestones for the chromium project.
To activate a new chromium branch, run the following from the root of
the repo (where MM is the milestone number and BBBB is the branch
number):
```
scripts/chromium/milestones.py activate --milestone MM --branch BBBB
./main.star
```
To deactivate a chromium branch, run the following from the root of the
repo (where MM is the milestone number):
```
scripts/chromium/milestones.py deactivate --milestone MM
./main.star
```
Usage:
milestones.py activate --milestone XX --branch YYYY
milestones.py deactivate --milestone XX
"""
import argparse
import itertools
import json
import os
import re
import sys
INFRA_CONFIG_DIR = os.path.abspath(os.path.join(__file__, '..', '..'))
def parse_args(args=None, *, parser_type=None):
parser_type = parser_type or argparse.ArgumentParser
parser = parser_type(
description='Update the active milestones for the chromium project')
parser.set_defaults(func=None)
parser.add_argument('--milestones-json',
help='Path to the milestones.json file',
default=os.path.join(INFRA_CONFIG_DIR, 'milestones.json'))
subparsers = parser.add_subparsers()
activate_parser = subparsers.add_parser(
'activate', help='Add an additional active milestone')
activate_parser.set_defaults(func=activate_cmd)
activate_parser.add_argument(
'--milestone',
required=True,
help=('The milestone identifier '
'(e.g. the milestone number for standard release channel)'))
activate_parser.add_argument(
'--branch',
required=True,
help='The branch name, must correspond to a ref in refs/branch-heads')
deactivate_parser = subparsers.add_parser(
'deactivate', help='Remove an active milestone')
deactivate_parser.set_defaults(func=deactivate_cmd)
deactivate_parser.add_argument(
'--milestone',
required=True,
help=('The milestone identifier '
'(e.g. the milestone number for standard release channel)'))
args = parser.parse_args(args)
if args.func is None:
parser.error('no sub-command specified')
return args
class MilestonesException(Exception):
pass
_NUMBER_RE = re.compile('([0-9]+)')
def numeric_sort_key(s):
# The capture group in the regex means that the numeric portions are returned,
# odd indices will be the numeric portions of the string (the 0th or last
# element will be empty if the string starts or ends with a number,
# respectively)
pieces = _NUMBER_RE.split(s)
return [
(int(x), x) if is_numeric else x
for x, is_numeric
in zip(pieces, itertools.cycle([False, True]))
]
def add_milestone(milestones, milestone, branch):
if milestone in milestones:
raise MilestonesException(
f'there is already an active milestone with id {milestone!r}: '
f'{milestones[milestone]}')
milestones[milestone] = {
'name': f'm{milestone}',
'project': f'chromium-m{milestone}',
'ref': f'refs/branch-heads/{branch}',
}
milestones = {
k: milestones[k] for k in sorted(milestones, key=numeric_sort_key)
}
return json.dumps(milestones, indent=4) + '\n'
def activate_cmd(args):
with open(args.milestones_json) as f:
milestones = json.load(f)
milestones = add_milestone(milestones, args.milestone, args.branch)
with open(args.milestones_json, 'w') as f:
f.write(milestones)
def remove_milestone(milestones, milestone):
if milestone not in milestones:
raise MilestonesException(
f'{milestone!r} does not refer to an active milestone: '
f'{list(milestones.keys())}')
del milestones[milestone]
milestones = {
k: milestones[k] for k in sorted(milestones, key=numeric_sort_key)
}
return json.dumps(milestones, indent=4) + '\n'
def deactivate_cmd(args):
with open(args.milestones_json) as f:
milestones = json.load(f)
milestones = remove_milestone(milestones, args.milestone)
with open(args.milestones_json, 'w') as f:
f.write(milestones)
def main():
args = parse_args()
try:
args.func(args)
except MilestonesException as e:
print(str(e), file=sys.stderr)
sys.exit(1)
if __name__ == '__main__':
main() | nwjs/chromium.src | infra/config/scripts/milestones.py | Python | bsd-3-clause | 4,423 |
'''tzinfo timezone information for Asia/Ashkhabad.'''
from pytz.tzinfo import DstTzInfo
from pytz.tzinfo import memorized_datetime as d
from pytz.tzinfo import memorized_ttinfo as i
class Ashkhabad(DstTzInfo):
'''Asia/Ashkhabad timezone definition. See datetime.tzinfo for details'''
zone = 'Asia/Ashkhabad'
_utc_transition_times = [
d(1,1,1,0,0,0),
d(1924,5,1,20,6,28),
d(1930,6,20,20,0,0),
d(1981,3,31,19,0,0),
d(1981,9,30,18,0,0),
d(1982,3,31,19,0,0),
d(1982,9,30,18,0,0),
d(1983,3,31,19,0,0),
d(1983,9,30,18,0,0),
d(1984,3,31,19,0,0),
d(1984,9,29,21,0,0),
d(1985,3,30,21,0,0),
d(1985,9,28,21,0,0),
d(1986,3,29,21,0,0),
d(1986,9,27,21,0,0),
d(1987,3,28,21,0,0),
d(1987,9,26,21,0,0),
d(1988,3,26,21,0,0),
d(1988,9,24,21,0,0),
d(1989,3,25,21,0,0),
d(1989,9,23,21,0,0),
d(1990,3,24,21,0,0),
d(1990,9,29,21,0,0),
d(1991,3,30,21,0,0),
d(1991,9,28,22,0,0),
d(1991,10,26,20,0,0),
d(1992,1,18,22,0,0),
]
_transition_info = [
i(14040,0,'LMT'),
i(14400,0,'ASHT'),
i(18000,0,'ASHT'),
i(21600,3600,'ASHST'),
i(18000,0,'ASHT'),
i(21600,3600,'ASHST'),
i(18000,0,'ASHT'),
i(21600,3600,'ASHST'),
i(18000,0,'ASHT'),
i(21600,3600,'ASHST'),
i(18000,0,'ASHT'),
i(21600,3600,'ASHST'),
i(18000,0,'ASHT'),
i(21600,3600,'ASHST'),
i(18000,0,'ASHT'),
i(21600,3600,'ASHST'),
i(18000,0,'ASHT'),
i(21600,3600,'ASHST'),
i(18000,0,'ASHT'),
i(21600,3600,'ASHST'),
i(18000,0,'ASHT'),
i(21600,3600,'ASHST'),
i(18000,0,'ASHT'),
i(18000,0,'ASHST'),
i(14400,0,'ASHT'),
i(14400,0,'TMT'),
i(18000,0,'TMT'),
]
Ashkhabad = Ashkhabad()
| newvem/pytz | pytz/zoneinfo/Asia/Ashkhabad.py | Python | mit | 1,535 |
"""
Setup for the API
"""
import api
log = api.logger.use(__name__)
def index_mongo():
"""
Ensure the mongo collections are indexed.
"""
db = api.common.get_conn()
log.debug("Ensuring mongo is indexed.")
db.users.ensure_index("uid", unique=True, name="unique uid")
db.users.ensure_index("username", unique=True, name="unique username")
db.groups.ensure_index("gid", unique=True, name="unique gid")
db.problems.ensure_index("pid", unique=True, name="unique pid")
db.submissions.ensure_index("tid", name="submission tids")
db.ssh.ensure_index("tid", unique=True, name="unique ssh tid")
db.teams.ensure_index("team_name", unique=True, name="unique team names")
db.cache.ensure_index("expireAt", expireAfterSeconds=0)
db.cache.ensure_index("kwargs", name="kwargs")
db.cache.ensure_index("args", name="args")
| PATechmasters/techmaster-ctf | api/api/setup.py | Python | mit | 873 |
from __future__ import absolute_import
import os
import base64
from kombu.serialization import registry
from celery.exceptions import SecurityError
from celery.security.serialization import SecureSerializer, register_auth
from celery.security.certificate import Certificate, CertStore
from celery.security.key import PrivateKey
from . import CERT1, CERT2, KEY1, KEY2
from .case import SecurityCase
class test_SecureSerializer(SecurityCase):
def _get_s(self, key, cert, certs):
store = CertStore()
for c in certs:
store.add_cert(Certificate(c))
return SecureSerializer(PrivateKey(key), Certificate(cert), store)
def test_serialize(self):
s = self._get_s(KEY1, CERT1, [CERT1])
self.assertEqual(s.deserialize(s.serialize('foo')), 'foo')
def test_deserialize(self):
s = self._get_s(KEY1, CERT1, [CERT1])
self.assertRaises(SecurityError, s.deserialize, 'bad data')
def test_unmatched_key_cert(self):
s = self._get_s(KEY1, CERT2, [CERT1, CERT2])
self.assertRaises(SecurityError,
s.deserialize, s.serialize('foo'))
def test_unknown_source(self):
s1 = self._get_s(KEY1, CERT1, [CERT2])
s2 = self._get_s(KEY1, CERT1, [])
self.assertRaises(SecurityError,
s1.deserialize, s1.serialize('foo'))
self.assertRaises(SecurityError,
s2.deserialize, s2.serialize('foo'))
def test_self_send(self):
s1 = self._get_s(KEY1, CERT1, [CERT1])
s2 = self._get_s(KEY1, CERT1, [CERT1])
self.assertEqual(s2.deserialize(s1.serialize('foo')), 'foo')
def test_separate_ends(self):
s1 = self._get_s(KEY1, CERT1, [CERT2])
s2 = self._get_s(KEY2, CERT2, [CERT1])
self.assertEqual(s2.deserialize(s1.serialize('foo')), 'foo')
def test_register_auth(self):
register_auth(KEY1, CERT1, '')
self.assertIn('application/data', registry._decoders)
def test_lots_of_sign(self):
for i in range(1000):
rdata = base64.urlsafe_b64encode(os.urandom(265))
s = self._get_s(KEY1, CERT1, [CERT1])
self.assertEqual(s.deserialize(s.serialize(rdata)), rdata)
| sunze/py_flask | venv/lib/python3.4/site-packages/celery/tests/security/test_serialization.py | Python | mit | 2,252 |
#######################################################################
#
# An example of creating Excel Scatter charts with Python and XlsxWriter.
#
# Copyright 2013-2015, John McNamara, jmcnamara@cpan.org
#
import xlsxwriter
workbook = xlsxwriter.Workbook('chart_scatter.xlsx')
worksheet = workbook.add_worksheet()
bold = workbook.add_format({'bold': 1})
# Add the worksheet data that the charts will refer to.
headings = ['Number', 'Batch 1', 'Batch 2']
data = [
[2, 3, 4, 5, 6, 7],
[10, 40, 50, 20, 10, 50],
[30, 60, 70, 50, 40, 30],
]
worksheet.write_row('A1', headings, bold)
worksheet.write_column('A2', data[0])
worksheet.write_column('B2', data[1])
worksheet.write_column('C2', data[2])
#######################################################################
#
# Create a new scatter chart.
#
chart1 = workbook.add_chart({'type': 'scatter'})
# Configure the first series.
chart1.add_series({
'name': '=Sheet1!$B$1',
'categories': '=Sheet1!$A$2:$A$7',
'values': '=Sheet1!$B$2:$B$7',
})
# Configure second series. Note use of alternative syntax to define ranges.
chart1.add_series({
'name': ['Sheet1', 0, 2],
'categories': ['Sheet1', 1, 0, 6, 0],
'values': ['Sheet1', 1, 2, 6, 2],
})
# Add a chart title and some axis labels.
chart1.set_title ({'name': 'Results of sample analysis'})
chart1.set_x_axis({'name': 'Test number'})
chart1.set_y_axis({'name': 'Sample length (mm)'})
# Set an Excel chart style.
chart1.set_style(11)
# Insert the chart into the worksheet (with an offset).
worksheet.insert_chart('D2', chart1, {'x_offset': 25, 'y_offset': 10})
#######################################################################
#
# Create a scatter chart sub-type with straight lines and markers.
#
chart2 = workbook.add_chart({'type': 'scatter',
'subtype': 'straight_with_markers'})
# Configure the first series.
chart2.add_series({
'name': '=Sheet1!$B$1',
'categories': '=Sheet1!$A$2:$A$7',
'values': '=Sheet1!$B$2:$B$7',
})
# Configure second series.
chart2.add_series({
'name': '=Sheet1!$C$1',
'categories': '=Sheet1!$A$2:$A$7',
'values': '=Sheet1!$C$2:$C$7',
})
# Add a chart title and some axis labels.
chart2.set_title ({'name': 'Straight line with markers'})
chart2.set_x_axis({'name': 'Test number'})
chart2.set_y_axis({'name': 'Sample length (mm)'})
# Set an Excel chart style.
chart2.set_style(12)
# Insert the chart into the worksheet (with an offset).
worksheet.insert_chart('D18', chart2, {'x_offset': 25, 'y_offset': 10})
#######################################################################
#
# Create a scatter chart sub-type with straight lines and no markers.
#
chart3 = workbook.add_chart({'type': 'scatter',
'subtype': 'straight'})
# Configure the first series.
chart3.add_series({
'name': '=Sheet1!$B$1',
'categories': '=Sheet1!$A$2:$A$7',
'values': '=Sheet1!$B$2:$B$7',
})
# Configure second series.
chart3.add_series({
'name': '=Sheet1!$C$1',
'categories': '=Sheet1!$A$2:$A$7',
'values': '=Sheet1!$C$2:$C$7',
})
# Add a chart title and some axis labels.
chart3.set_title ({'name': 'Straight line'})
chart3.set_x_axis({'name': 'Test number'})
chart3.set_y_axis({'name': 'Sample length (mm)'})
# Set an Excel chart style.
chart3.set_style(13)
# Insert the chart into the worksheet (with an offset).
worksheet.insert_chart('D34', chart3, {'x_offset': 25, 'y_offset': 10})
#######################################################################
#
# Create a scatter chart sub-type with smooth lines and markers.
#
chart4 = workbook.add_chart({'type': 'scatter',
'subtype': 'smooth_with_markers'})
# Configure the first series.
chart4.add_series({
'name': '=Sheet1!$B$1',
'categories': '=Sheet1!$A$2:$A$7',
'values': '=Sheet1!$B$2:$B$7',
})
# Configure second series.
chart4.add_series({
'name': '=Sheet1!$C$1',
'categories': '=Sheet1!$A$2:$A$7',
'values': '=Sheet1!$C$2:$C$7',
})
# Add a chart title and some axis labels.
chart4.set_title ({'name': 'Smooth line with markers'})
chart4.set_x_axis({'name': 'Test number'})
chart4.set_y_axis({'name': 'Sample length (mm)'})
# Set an Excel chart style.
chart4.set_style(14)
# Insert the chart into the worksheet (with an offset).
worksheet.insert_chart('D51', chart4, {'x_offset': 25, 'y_offset': 10})
#######################################################################
#
# Create a scatter chart sub-type with smooth lines and no markers.
#
chart5 = workbook.add_chart({'type': 'scatter',
'subtype': 'smooth'})
# Configure the first series.
chart5.add_series({
'name': '=Sheet1!$B$1',
'categories': '=Sheet1!$A$2:$A$7',
'values': '=Sheet1!$B$2:$B$7',
})
# Configure second series.
chart5.add_series({
'name': '=Sheet1!$C$1',
'categories': '=Sheet1!$A$2:$A$7',
'values': '=Sheet1!$C$2:$C$7',
})
# Add a chart title and some axis labels.
chart5.set_title ({'name': 'Smooth line'})
chart5.set_x_axis({'name': 'Test number'})
chart5.set_y_axis({'name': 'Sample length (mm)'})
# Set an Excel chart style.
chart5.set_style(15)
# Insert the chart into the worksheet (with an offset).
worksheet.insert_chart('D66', chart5, {'x_offset': 25, 'y_offset': 10})
workbook.close()
| lewislone/mStocks | packets-analysis/lib/XlsxWriter-0.7.3/examples/chart_scatter.py | Python | mit | 5,416 |
import os
import sys
def is_active():
return True
def get_name():
return "X11"
def can_build():
if (os.name!="posix"):
return False
if sys.platform == "darwin":
return False # no x11 on mac for now
errorval=os.system("pkg-config --version > /dev/null")
if (errorval):
print("pkg-config not found.. x11 disabled.")
return False
x11_error=os.system("pkg-config x11 --modversion > /dev/null ")
if (x11_error):
print("X11 not found.. x11 disabled.")
return False
ssl_error=os.system("pkg-config openssl --modversion > /dev/null ")
if (ssl_error):
print("OpenSSL not found.. x11 disabled.")
return False
x11_error=os.system("pkg-config xcursor --modversion > /dev/null ")
if (x11_error):
print("xcursor not found.. x11 disabled.")
return False
x11_error=os.system("pkg-config xinerama --modversion > /dev/null ")
if (x11_error):
print("xinerama not found.. x11 disabled.")
return False
return True # X11 enabled
def get_opts():
return [
('use_llvm','Use llvm compiler','no'),
('use_sanitizer','Use llvm compiler sanitize address','no'),
('use_leak_sanitizer','Use llvm compiler sanitize memory leaks','no'),
('pulseaudio','Detect & Use pulseaudio','yes'),
('new_wm_api', 'Use experimental window management API','no'),
('debug_release', 'Add debug symbols to release version','no'),
]
def get_flags():
return [
('builtin_zlib', 'no'),
("openssl", "yes"),
("theora","no"),
]
def configure(env):
is64=sys.maxsize > 2**32
if (env["bits"]=="default"):
if (is64):
env["bits"]="64"
else:
env["bits"]="32"
env.Append(CPPPATH=['#platform/x11'])
if (env["use_llvm"]=="yes"):
if 'clang++' not in env['CXX']:
env["CC"]="clang"
env["CXX"]="clang++"
env["LD"]="clang++"
env.Append(CPPFLAGS=['-DTYPED_METHOD_BIND'])
env.extra_suffix=".llvm"
if (env["colored"]=="yes"):
if sys.stdout.isatty():
env.Append(CXXFLAGS=["-fcolor-diagnostics"])
if (env["use_sanitizer"]=="yes"):
env.Append(CXXFLAGS=['-fsanitize=address','-fno-omit-frame-pointer'])
env.Append(LINKFLAGS=['-fsanitize=address'])
env.extra_suffix+="s"
if (env["use_leak_sanitizer"]=="yes"):
env.Append(CXXFLAGS=['-fsanitize=address','-fno-omit-frame-pointer'])
env.Append(LINKFLAGS=['-fsanitize=address'])
env.extra_suffix+="s"
#if (env["tools"]=="no"):
# #no tools suffix
# env['OBJSUFFIX'] = ".nt"+env['OBJSUFFIX']
# env['LIBSUFFIX'] = ".nt"+env['LIBSUFFIX']
if (env["target"]=="release"):
if (env["debug_release"]=="yes"):
env.Append(CCFLAGS=['-g2'])
else:
env.Append(CCFLAGS=['-O3','-ffast-math'])
elif (env["target"]=="release_debug"):
env.Append(CCFLAGS=['-O2','-ffast-math','-DDEBUG_ENABLED'])
elif (env["target"]=="debug"):
env.Append(CCFLAGS=['-g2', '-Wall','-DDEBUG_ENABLED','-DDEBUG_MEMORY_ENABLED'])
env.ParseConfig('pkg-config x11 --cflags --libs')
env.ParseConfig('pkg-config xinerama --cflags --libs')
env.ParseConfig('pkg-config xcursor --cflags --libs')
if (env["openssl"]=="yes"):
env.ParseConfig('pkg-config openssl --cflags --libs')
if (env["freetype"]=="yes"):
env.ParseConfig('pkg-config freetype2 --cflags --libs')
if (env["freetype"]!="no"):
env.Append(CCFLAGS=['-DFREETYPE_ENABLED'])
if (env["freetype"]=="builtin"):
env.Append(CPPPATH=['#tools/freetype'])
env.Append(CPPPATH=['#tools/freetype/freetype/include'])
env.Append(CPPFLAGS=['-DOPENGL_ENABLED','-DGLEW_ENABLED'])
env.Append(CPPFLAGS=["-DALSA_ENABLED"])
if (env["pulseaudio"]=="yes"):
if not os.system("pkg-config --exists libpulse-simple"):
print("Enabling PulseAudio")
env.Append(CPPFLAGS=["-DPULSEAUDIO_ENABLED"])
env.ParseConfig('pkg-config --cflags --libs libpulse-simple')
else:
print("PulseAudio development libraries not found, disabling driver")
env.Append(CPPFLAGS=['-DX11_ENABLED','-DUNIX_ENABLED','-DGLES2_ENABLED','-DGLES_OVER_GL'])
env.Append(LIBS=['GL', 'GLU', 'pthread','asound','z']) #TODO detect linux/BSD!
#env.Append(CPPFLAGS=['-DMPC_FIXED_POINT'])
#host compiler is default..
if (is64 and env["bits"]=="32"):
env.Append(CPPFLAGS=['-m32'])
env.Append(LINKFLAGS=['-m32','-L/usr/lib/i386-linux-gnu'])
elif (not is64 and env["bits"]=="64"):
env.Append(CPPFLAGS=['-m64'])
env.Append(LINKFLAGS=['-m64','-L/usr/lib/i686-linux-gnu'])
import methods
env.Append( BUILDERS = { 'GLSL120' : env.Builder(action = methods.build_legacygl_headers, suffix = 'glsl.h',src_suffix = '.glsl') } )
env.Append( BUILDERS = { 'GLSL' : env.Builder(action = methods.build_glsl_headers, suffix = 'glsl.h',src_suffix = '.glsl') } )
env.Append( BUILDERS = { 'GLSL120GLES' : env.Builder(action = methods.build_gles2_headers, suffix = 'glsl.h',src_suffix = '.glsl') } )
#env.Append( BUILDERS = { 'HLSL9' : env.Builder(action = methods.build_hlsl_dx9_headers, suffix = 'hlsl.h',src_suffix = '.hlsl') } )
if(env["new_wm_api"]=="yes"):
env.Append(CPPFLAGS=['-DNEW_WM_API'])
env.ParseConfig('pkg-config xinerama --cflags --libs')
| hipgraphics/godot | platform/x11/detect.py | Python | mit | 5,057 |
"""
Forwards events to a HTTP call. The configuration used by this notifier
is as follows:
url
Full URL to contact with the event data. A POST request will be made to this
URL with the contents of the events in the body.
Eventually this should be enhanced to support authentication credentials as well.
"""
import base64
import httplib
import logging
import threading
from pulp.server.compat import json, json_util
TYPE_ID = 'http'
_logger = logging.getLogger(__name__)
def handle_event(notifier_config, event):
# fire the actual http push function off in a separate thread to keep
# pulp from blocking or deadlocking due to the tasking subsystem
data = event.data()
_logger.info(data)
body = json.dumps(data, default=json_util.default)
thread = threading.Thread(target=_send_post, args=[notifier_config, body])
thread.setDaemon(True)
thread.start()
def _send_post(notifier_config, body):
# Basic headers
headers = {'Accept': 'application/json',
'Content-Type': 'application/json'}
# Parse the URL for the pieces we need
if 'url' not in notifier_config or not notifier_config['url']:
_logger.warn('HTTP notifier configured without a URL; cannot fire event')
return
url = notifier_config['url']
try:
scheme, empty, server, path = url.split('/', 3)
except ValueError:
_logger.warn('Improperly configured post_sync_url: %(u)s' % {'u': url})
return
connection = _create_connection(scheme, server)
# Process authentication
if 'username' in notifier_config and 'password' in notifier_config:
raw = ':'.join((notifier_config['username'], notifier_config['password']))
encoded = base64.encodestring(raw)[:-1]
headers['Authorization'] = 'Basic ' + encoded
connection.request('POST', '/' + path, body=body, headers=headers)
response = connection.getresponse()
if response.status != httplib.OK:
error_msg = response.read()
_logger.warn('Error response from HTTP notifier: %(e)s' % {'e': error_msg})
connection.close()
def _create_connection(scheme, server):
if scheme.startswith('https'):
connection = httplib.HTTPSConnection(server)
else:
connection = httplib.HTTPConnection(server)
return connection
| rbramwell/pulp | server/pulp/server/event/http.py | Python | gpl-2.0 | 2,330 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
# mappings for table dumps
# "remember to add indexes!"
data_map = {
"Company": {
"columns": ["name"],
"conditions": ["docstatus < 2"]
},
"Fiscal Year": {
"columns": ["name", "year_start_date", "year_end_date"],
"conditions": ["docstatus < 2"],
},
# Accounts
"Account": {
"columns": ["name", "parent_account", "lft", "rgt", "report_type",
"company", "is_group"],
"conditions": ["docstatus < 2"],
"order_by": "lft",
"links": {
"company": ["Company", "name"],
}
},
"Cost Center": {
"columns": ["name", "lft", "rgt"],
"conditions": ["docstatus < 2"],
"order_by": "lft"
},
"GL Entry": {
"columns": ["name", "account", "posting_date", "cost_center", "debit", "credit",
"is_opening", "company", "voucher_type", "voucher_no", "remarks"],
"order_by": "posting_date, account",
"links": {
"account": ["Account", "name"],
"company": ["Company", "name"],
"cost_center": ["Cost Center", "name"]
}
},
# Stock
"Item": {
"columns": ["name", "if(item_name=name, '', item_name) as item_name", "description",
"item_group as parent_item_group", "stock_uom", "brand", "valuation_method"],
# "conditions": ["docstatus < 2"],
"order_by": "name",
"links": {
"parent_item_group": ["Item Group", "name"],
"brand": ["Brand", "name"]
}
},
"Item Group": {
"columns": ["name", "parent_item_group"],
# "conditions": ["docstatus < 2"],
"order_by": "lft"
},
"Brand": {
"columns": ["name"],
"conditions": ["docstatus < 2"],
"order_by": "name"
},
"Project": {
"columns": ["name"],
"conditions": ["docstatus < 2"],
"order_by": "name"
},
"Warehouse": {
"columns": ["name"],
"conditions": ["docstatus < 2"],
"order_by": "name"
},
"Stock Ledger Entry": {
"columns": ["name", "posting_date", "posting_time", "item_code", "warehouse",
"actual_qty as qty", "voucher_type", "voucher_no", "project",
"incoming_rate as incoming_rate", "stock_uom", "serial_no",
"qty_after_transaction", "valuation_rate"],
"order_by": "posting_date, posting_time, creation",
"links": {
"item_code": ["Item", "name"],
"warehouse": ["Warehouse", "name"],
"project": ["Project", "name"]
},
"force_index": "posting_sort_index"
},
"Serial No": {
"columns": ["name", "purchase_rate as incoming_rate"],
"conditions": ["docstatus < 2"],
"order_by": "name"
},
"Stock Entry": {
"columns": ["name", "purpose"],
"conditions": ["docstatus=1"],
"order_by": "posting_date, posting_time, name",
},
"Material Request Item": {
"columns": ["item.name as name", "item_code", "warehouse",
"(qty - ordered_qty) as qty"],
"from": "`tabMaterial Request Item` item, `tabMaterial Request` main",
"conditions": ["item.parent = main.name", "main.docstatus=1", "main.status != 'Stopped'",
"ifnull(warehouse, '')!=''", "qty > ordered_qty"],
"links": {
"item_code": ["Item", "name"],
"warehouse": ["Warehouse", "name"]
},
},
"Purchase Order Item": {
"columns": ["item.name as name", "item_code", "warehouse",
"(qty - received_qty)*conversion_factor as qty"],
"from": "`tabPurchase Order Item` item, `tabPurchase Order` main",
"conditions": ["item.parent = main.name", "main.docstatus=1", "main.status != 'Stopped'",
"ifnull(warehouse, '')!=''", "qty > received_qty"],
"links": {
"item_code": ["Item", "name"],
"warehouse": ["Warehouse", "name"]
},
},
"Sales Order Item": {
"columns": ["item.name as name", "item_code", "(qty - delivered_qty)*conversion_factor as qty", "warehouse"],
"from": "`tabSales Order Item` item, `tabSales Order` main",
"conditions": ["item.parent = main.name", "main.docstatus=1", "main.status != 'Stopped'",
"ifnull(warehouse, '')!=''", "qty > delivered_qty"],
"links": {
"item_code": ["Item", "name"],
"warehouse": ["Warehouse", "name"]
},
},
# Sales
"Customer": {
"columns": ["name", "if(customer_name=name, '', customer_name) as customer_name",
"customer_group as parent_customer_group", "territory as parent_territory"],
"conditions": ["docstatus < 2"],
"order_by": "name",
"links": {
"parent_customer_group": ["Customer Group", "name"],
"parent_territory": ["Territory", "name"],
}
},
"Customer Group": {
"columns": ["name", "parent_customer_group"],
"conditions": ["docstatus < 2"],
"order_by": "lft"
},
"Territory": {
"columns": ["name", "parent_territory"],
"conditions": ["docstatus < 2"],
"order_by": "lft"
},
"Sales Invoice": {
"columns": ["name", "customer", "posting_date", "company"],
"conditions": ["docstatus=1"],
"order_by": "posting_date",
"links": {
"customer": ["Customer", "name"],
"company":["Company", "name"]
}
},
"Sales Invoice Item": {
"columns": ["name", "parent", "item_code", "stock_qty as qty", "base_net_amount"],
"conditions": ["docstatus=1", "ifnull(parent, '')!=''"],
"order_by": "parent",
"links": {
"parent": ["Sales Invoice", "name"],
"item_code": ["Item", "name"]
}
},
"Sales Order": {
"columns": ["name", "customer", "transaction_date as posting_date", "company"],
"conditions": ["docstatus=1"],
"order_by": "transaction_date",
"links": {
"customer": ["Customer", "name"],
"company":["Company", "name"]
}
},
"Sales Order Item[Sales Analytics]": {
"columns": ["name", "parent", "item_code", "stock_qty as qty", "base_net_amount"],
"conditions": ["docstatus=1", "ifnull(parent, '')!=''"],
"order_by": "parent",
"links": {
"parent": ["Sales Order", "name"],
"item_code": ["Item", "name"]
}
},
"Delivery Note": {
"columns": ["name", "customer", "posting_date", "company"],
"conditions": ["docstatus=1"],
"order_by": "posting_date",
"links": {
"customer": ["Customer", "name"],
"company":["Company", "name"]
}
},
"Delivery Note Item[Sales Analytics]": {
"columns": ["name", "parent", "item_code", "stock_qty as qty", "base_net_amount"],
"conditions": ["docstatus=1", "ifnull(parent, '')!=''"],
"order_by": "parent",
"links": {
"parent": ["Delivery Note", "name"],
"item_code": ["Item", "name"]
}
},
"Supplier": {
"columns": ["name", "if(supplier_name=name, '', supplier_name) as supplier_name",
"supplier_group as parent_supplier_group"],
"conditions": ["docstatus < 2"],
"order_by": "name",
"links": {
"parent_supplier_group": ["Supplier Group", "name"],
}
},
"Supplier Group": {
"columns": ["name", "parent_supplier_group"],
"conditions": ["docstatus < 2"],
"order_by": "name"
},
"Purchase Invoice": {
"columns": ["name", "supplier", "posting_date", "company"],
"conditions": ["docstatus=1"],
"order_by": "posting_date",
"links": {
"supplier": ["Supplier", "name"],
"company":["Company", "name"]
}
},
"Purchase Invoice Item": {
"columns": ["name", "parent", "item_code", "stock_qty as qty", "base_net_amount"],
"conditions": ["docstatus=1", "ifnull(parent, '')!=''"],
"order_by": "parent",
"links": {
"parent": ["Purchase Invoice", "name"],
"item_code": ["Item", "name"]
}
},
"Purchase Order": {
"columns": ["name", "supplier", "transaction_date as posting_date", "company"],
"conditions": ["docstatus=1"],
"order_by": "posting_date",
"links": {
"supplier": ["Supplier", "name"],
"company":["Company", "name"]
}
},
"Purchase Order Item[Purchase Analytics]": {
"columns": ["name", "parent", "item_code", "stock_qty as qty", "base_net_amount"],
"conditions": ["docstatus=1", "ifnull(parent, '')!=''"],
"order_by": "parent",
"links": {
"parent": ["Purchase Order", "name"],
"item_code": ["Item", "name"]
}
},
"Purchase Receipt": {
"columns": ["name", "supplier", "posting_date", "company"],
"conditions": ["docstatus=1"],
"order_by": "posting_date",
"links": {
"supplier": ["Supplier", "name"],
"company":["Company", "name"]
}
},
"Purchase Receipt Item[Purchase Analytics]": {
"columns": ["name", "parent", "item_code", "stock_qty as qty", "base_net_amount"],
"conditions": ["docstatus=1", "ifnull(parent, '')!=''"],
"order_by": "parent",
"links": {
"parent": ["Purchase Receipt", "name"],
"item_code": ["Item", "name"]
}
},
# Support
"Issue": {
"columns": ["name","status","creation","resolution_date","first_responded_on"],
"conditions": ["docstatus < 2"],
"order_by": "creation"
},
# Manufacturing
"Work Order": {
"columns": ["name","status","creation","planned_start_date","planned_end_date","status","actual_start_date","actual_end_date", "modified"],
"conditions": ["docstatus = 1"],
"order_by": "creation"
},
#Medical
"Patient": {
"columns": ["name", "creation", "owner", "if(patient_name=name, '', patient_name) as patient_name"],
"conditions": ["docstatus < 2"],
"order_by": "name",
"links": {
"owner" : ["User", "name"]
}
},
"Patient Appointment": {
"columns": ["name", "appointment_type", "patient", "practitioner", "appointment_date", "department", "status", "company"],
"order_by": "name",
"links": {
"practitioner": ["Healthcare Practitioner", "name"],
"appointment_type": ["Appointment Type", "name"]
}
},
"Healthcare Practitioner": {
"columns": ["name", "department"],
"order_by": "name",
"links": {
"department": ["Department", "name"],
}
},
"Appointment Type": {
"columns": ["name"],
"order_by": "name"
},
"Medical Department": {
"columns": ["name"],
"order_by": "name"
}
}
| Zlash65/erpnext | erpnext/startup/report_data_map.py | Python | gpl-3.0 | 9,520 |
# -*- coding: utf-8 -*-
# pylint: disable=protected-access
"""Test for Video Xmodule functional logic.
These test data read from xml, not from mongo.
We have a ModuleStoreTestCase class defined in
common/lib/xmodule/xmodule/modulestore/tests/django_utils.py. You can
search for usages of this in the cms and lms tests for examples. You use
this so that it will do things like point the modulestore setting to mongo,
flush the contentstore before and after, load the templates, etc.
You can then use the CourseFactory and XModuleItemFactory as defined
in common/lib/xmodule/xmodule/modulestore/tests/factories.py to create
the course, section, subsection, unit, etc.
"""
import unittest
import datetime
from uuid import uuid4
from lxml import etree
from mock import ANY, Mock, patch
import ddt
from django.conf import settings
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from opaque_keys.edx.keys import CourseKey
from xblock.field_data import DictFieldData
from xblock.fields import ScopeIds
from xmodule.tests import get_test_descriptor_system
from xmodule.video_module import VideoDescriptor, create_youtube_string
from xmodule.video_module.transcripts_utils import download_youtube_subs, save_to_store
from . import LogicTest
from .test_import import DummySystem
SRT_FILEDATA = '''
0
00:00:00,270 --> 00:00:02,720
sprechen sie deutsch?
1
00:00:02,720 --> 00:00:05,430
Ja, ich spreche Deutsch
'''
CRO_SRT_FILEDATA = '''
0
00:00:00,270 --> 00:00:02,720
Dobar dan!
1
00:00:02,720 --> 00:00:05,430
Kako ste danas?
'''
YOUTUBE_SUBTITLES = (
"LILA FISHER: Hi, welcome to Edx. I'm Lila Fisher, an Edx fellow helping to put together these"
" courses. As you know, our courses are entirely online. So before we start learning about the"
" subjects that brought you here, let's learn about the tools that you will use to navigate through"
" the course material. Let's start with what is on your screen right now. You are watching a video"
" of me talking. You have several tools associated with these videos. Some of them are standard"
" video buttons, like the play Pause Button on the bottom left. Like most video players, you can see"
" how far you are into this particular video segment and how long the entire video segment is."
" Something that you might not be used to is the speed option. While you are going through the"
" videos, you can speed up or slow down the video player with these buttons. Go ahead and try that"
" now. Make me talk faster and slower. If you ever get frustrated by the pace of speech, you can"
" adjust it this way. Another great feature is the transcript on the side. This will follow along"
" with everything that I am saying as I am saying it, so you can read along if you like. You can"
" also click on any of the words, and you will notice that the video jumps to that word. The video"
" slider at the bottom of the video will let you navigate through the video quickly. If you ever"
" find the transcript distracting, you can toggle the captioning button in order to make it go away"
" or reappear. Now that you know about the video player, I want to point out the sequence navigator."
" Right now you're in a lecture sequence, which interweaves many videos and practice exercises. You"
" can see how far you are in a particular sequence by observing which tab you're on. You can"
" navigate directly to any video or exercise by clicking on the appropriate tab. You can also"
" progress to the next element by pressing the Arrow button, or by clicking on the next tab. Try"
" that now. The tutorial will continue in the next video."
)
def instantiate_descriptor(**field_data):
"""
Instantiate descriptor with most properties.
"""
system = get_test_descriptor_system()
course_key = SlashSeparatedCourseKey('org', 'course', 'run')
usage_key = course_key.make_usage_key('video', 'SampleProblem')
return system.construct_xblock_from_class(
VideoDescriptor,
scope_ids=ScopeIds(None, None, usage_key, usage_key),
field_data=DictFieldData(field_data),
)
# Because of the way xmodule.video_module.video_module imports edxval.api, we
# must mock the entire module, which requires making mock exception classes.
class _MockValVideoNotFoundError(Exception):
"""Mock ValVideoNotFoundError exception"""
pass
class _MockValCannotCreateError(Exception):
"""Mock ValCannotCreateError exception"""
pass
class VideoModuleTest(LogicTest):
"""Logic tests for Video Xmodule."""
descriptor_class = VideoDescriptor
raw_field_data = {
'data': '<video />'
}
def test_parse_youtube(self):
"""Test parsing old-style Youtube ID strings into a dict."""
youtube_str = '0.75:jNCf2gIqpeE,1.00:ZwkTiUPN0mg,1.25:rsq9auxASqI,1.50:kMyNdzVHHgg'
output = VideoDescriptor._parse_youtube(youtube_str)
self.assertEqual(output, {'0.75': 'jNCf2gIqpeE',
'1.00': 'ZwkTiUPN0mg',
'1.25': 'rsq9auxASqI',
'1.50': 'kMyNdzVHHgg'})
def test_parse_youtube_one_video(self):
"""
Ensure that all keys are present and missing speeds map to the
empty string.
"""
youtube_str = '0.75:jNCf2gIqpeE'
output = VideoDescriptor._parse_youtube(youtube_str)
self.assertEqual(output, {'0.75': 'jNCf2gIqpeE',
'1.00': '',
'1.25': '',
'1.50': ''})
def test_parse_youtube_invalid(self):
"""Ensure that ids that are invalid return an empty dict"""
# invalid id
youtube_str = 'thisisaninvalidid'
output = VideoDescriptor._parse_youtube(youtube_str)
self.assertEqual(output, {'0.75': '',
'1.00': '',
'1.25': '',
'1.50': ''})
# another invalid id
youtube_str = ',::,:,,'
output = VideoDescriptor._parse_youtube(youtube_str)
self.assertEqual(output, {'0.75': '',
'1.00': '',
'1.25': '',
'1.50': ''})
# and another one, partially invalid
youtube_str = '0.75_BAD!!!,1.0:AXdE34_U,1.25:KLHF9K_Y,1.5:VO3SxfeD,'
output = VideoDescriptor._parse_youtube(youtube_str)
self.assertEqual(output, {'0.75': '',
'1.00': 'AXdE34_U',
'1.25': 'KLHF9K_Y',
'1.50': 'VO3SxfeD'})
def test_parse_youtube_key_format(self):
"""
Make sure that inconsistent speed keys are parsed correctly.
"""
youtube_str = '1.00:p2Q6BrNhdh8'
youtube_str_hack = '1.0:p2Q6BrNhdh8'
self.assertEqual(
VideoDescriptor._parse_youtube(youtube_str),
VideoDescriptor._parse_youtube(youtube_str_hack)
)
def test_parse_youtube_empty(self):
"""
Some courses have empty youtube attributes, so we should handle
that well.
"""
self.assertEqual(
VideoDescriptor._parse_youtube(''),
{'0.75': '',
'1.00': '',
'1.25': '',
'1.50': ''}
)
class VideoDescriptorTestBase(unittest.TestCase):
"""
Base class for tests for VideoDescriptor
"""
def setUp(self):
super(VideoDescriptorTestBase, self).setUp()
self.descriptor = instantiate_descriptor()
def assertXmlEqual(self, expected, xml):
"""
Assert that the given XML fragments have the same attributes, text, and
(recursively) children
"""
def get_child_tags(elem):
"""Extract the list of tag names for children of elem"""
return [child.tag for child in elem]
for attr in ['tag', 'attrib', 'text', 'tail']:
self.assertEqual(getattr(expected, attr), getattr(xml, attr))
self.assertEqual(get_child_tags(expected), get_child_tags(xml))
for left, right in zip(expected, xml):
self.assertXmlEqual(left, right)
class TestCreateYoutubeString(VideoDescriptorTestBase):
"""
Checks that create_youtube_string correcty extracts information from Video descriptor.
"""
def test_create_youtube_string(self):
"""
Test that Youtube ID strings are correctly created when writing back out to XML.
"""
self.descriptor.youtube_id_0_75 = 'izygArpw-Qo'
self.descriptor.youtube_id_1_0 = 'p2Q6BrNhdh8'
self.descriptor.youtube_id_1_25 = '1EeWXzPdhSA'
self.descriptor.youtube_id_1_5 = 'rABDYkeK0x8'
expected = "0.75:izygArpw-Qo,1.00:p2Q6BrNhdh8,1.25:1EeWXzPdhSA,1.50:rABDYkeK0x8"
self.assertEqual(create_youtube_string(self.descriptor), expected)
def test_create_youtube_string_missing(self):
"""
Test that Youtube IDs which aren't explicitly set aren't included in the output string.
"""
self.descriptor.youtube_id_0_75 = 'izygArpw-Qo'
self.descriptor.youtube_id_1_0 = 'p2Q6BrNhdh8'
self.descriptor.youtube_id_1_25 = '1EeWXzPdhSA'
expected = "0.75:izygArpw-Qo,1.00:p2Q6BrNhdh8,1.25:1EeWXzPdhSA"
self.assertEqual(create_youtube_string(self.descriptor), expected)
@ddt.ddt
class VideoDescriptorImportTestCase(unittest.TestCase):
"""
Make sure that VideoDescriptor can import an old XML-based video correctly.
"""
def assert_attributes_equal(self, video, attrs):
"""
Assert that `video` has the correct attributes. `attrs` is a map of {metadata_field: value}.
"""
for key, value in attrs.items():
self.assertEquals(getattr(video, key), value)
def test_constructor(self):
sample_xml = '''
<video display_name="Test Video"
youtube="1.0:p2Q6BrNhdh8,0.75:izygArpw-Qo,1.25:1EeWXzPdhSA,1.5:rABDYkeK0x8"
show_captions="false"
download_track="true"
download_video="true"
start_time="00:00:01"
end_time="00:01:00">
<source src="http://www.example.com/source.mp4"/>
<source src="http://www.example.com/source.ogg"/>
<track src="http://www.example.com/track"/>
<handout src="http://www.example.com/handout"/>
<transcript language="ua" src="ukrainian_translation.srt" />
<transcript language="ge" src="german_translation.srt" />
</video>
'''
descriptor = instantiate_descriptor(data=sample_xml)
self.assert_attributes_equal(descriptor, {
'youtube_id_0_75': 'izygArpw-Qo',
'youtube_id_1_0': 'p2Q6BrNhdh8',
'youtube_id_1_25': '1EeWXzPdhSA',
'youtube_id_1_5': 'rABDYkeK0x8',
'download_video': True,
'show_captions': False,
'start_time': datetime.timedelta(seconds=1),
'end_time': datetime.timedelta(seconds=60),
'track': 'http://www.example.com/track',
'handout': 'http://www.example.com/handout',
'download_track': True,
'html5_sources': ['http://www.example.com/source.mp4', 'http://www.example.com/source.ogg'],
'data': '',
'transcripts': {'ua': 'ukrainian_translation.srt', 'ge': 'german_translation.srt'}
})
def test_from_xml(self):
module_system = DummySystem(load_error_modules=True)
xml_data = '''
<video display_name="Test Video"
youtube="1.0:p2Q6BrNhdh8,0.75:izygArpw-Qo,1.25:1EeWXzPdhSA,1.5:rABDYkeK0x8"
show_captions="false"
download_track="false"
start_time="00:00:01"
download_video="false"
end_time="00:01:00">
<source src="http://www.example.com/source.mp4"/>
<track src="http://www.example.com/track"/>
<handout src="http://www.example.com/handout"/>
<transcript language="uk" src="ukrainian_translation.srt" />
<transcript language="de" src="german_translation.srt" />
</video>
'''
output = VideoDescriptor.from_xml(xml_data, module_system, Mock())
self.assert_attributes_equal(output, {
'youtube_id_0_75': 'izygArpw-Qo',
'youtube_id_1_0': 'p2Q6BrNhdh8',
'youtube_id_1_25': '1EeWXzPdhSA',
'youtube_id_1_5': 'rABDYkeK0x8',
'show_captions': False,
'start_time': datetime.timedelta(seconds=1),
'end_time': datetime.timedelta(seconds=60),
'track': 'http://www.example.com/track',
'handout': 'http://www.example.com/handout',
'download_track': False,
'download_video': False,
'html5_sources': ['http://www.example.com/source.mp4'],
'data': '',
'transcripts': {'uk': 'ukrainian_translation.srt', 'de': 'german_translation.srt'},
})
@ddt.data(
('course-v1:test_org+test_course+test_run',
'/asset-v1:test_org+test_course+test_run+type@asset+block@test.png'),
('test_org/test_course/test_run', '/c4x/test_org/test_course/asset/test.png')
)
@ddt.unpack
def test_from_xml_when_handout_is_course_asset(self, course_id_string, expected_handout_link):
"""
Test that if handout link is course_asset then it will contain targeted course_id in handout link.
"""
module_system = DummySystem(load_error_modules=True)
course_id = CourseKey.from_string(course_id_string)
xml_data = '''
<video display_name="Test Video"
youtube="1.0:p2Q6BrNhdh8,0.75:izygArpw-Qo,1.25:1EeWXzPdhSA,1.5:rABDYkeK0x8"
show_captions="false"
download_track="false"
start_time="00:00:01"
download_video="false"
end_time="00:01:00">
<source src="http://www.example.com/source.mp4"/>
<track src="http://www.example.com/track"/>
<handout src="/asset-v1:test_org_1+test_course_1+test_run_1+type@asset+block@test.png"/>
<transcript language="uk" src="ukrainian_translation.srt" />
<transcript language="de" src="german_translation.srt" />
</video>
'''
id_generator = Mock()
id_generator.target_course_id = course_id
output = VideoDescriptor.from_xml(xml_data, module_system, id_generator)
self.assert_attributes_equal(output, {
'youtube_id_0_75': 'izygArpw-Qo',
'youtube_id_1_0': 'p2Q6BrNhdh8',
'youtube_id_1_25': '1EeWXzPdhSA',
'youtube_id_1_5': 'rABDYkeK0x8',
'show_captions': False,
'start_time': datetime.timedelta(seconds=1),
'end_time': datetime.timedelta(seconds=60),
'track': 'http://www.example.com/track',
'handout': expected_handout_link,
'download_track': False,
'download_video': False,
'html5_sources': ['http://www.example.com/source.mp4'],
'data': '',
'transcripts': {'uk': 'ukrainian_translation.srt', 'de': 'german_translation.srt'},
})
def test_from_xml_missing_attributes(self):
"""
Ensure that attributes have the right values if they aren't
explicitly set in XML.
"""
module_system = DummySystem(load_error_modules=True)
xml_data = '''
<video display_name="Test Video"
youtube="1.0:p2Q6BrNhdh8,1.25:1EeWXzPdhSA"
show_captions="true">
<source src="http://www.example.com/source.mp4"/>
</video>
'''
output = VideoDescriptor.from_xml(xml_data, module_system, Mock())
self.assert_attributes_equal(output, {
'youtube_id_0_75': '',
'youtube_id_1_0': 'p2Q6BrNhdh8',
'youtube_id_1_25': '1EeWXzPdhSA',
'youtube_id_1_5': '',
'show_captions': True,
'start_time': datetime.timedelta(seconds=0.0),
'end_time': datetime.timedelta(seconds=0.0),
'track': '',
'handout': None,
'download_track': False,
'download_video': True,
'html5_sources': ['http://www.example.com/source.mp4'],
'data': ''
})
def test_from_xml_missing_download_track(self):
"""
Ensure that attributes have the right values if they aren't
explicitly set in XML.
"""
module_system = DummySystem(load_error_modules=True)
xml_data = '''
<video display_name="Test Video"
youtube="1.0:p2Q6BrNhdh8,1.25:1EeWXzPdhSA"
show_captions="true">
<source src="http://www.example.com/source.mp4"/>
<track src="http://www.example.com/track"/>
</video>
'''
output = VideoDescriptor.from_xml(xml_data, module_system, Mock())
self.assert_attributes_equal(output, {
'youtube_id_0_75': '',
'youtube_id_1_0': 'p2Q6BrNhdh8',
'youtube_id_1_25': '1EeWXzPdhSA',
'youtube_id_1_5': '',
'show_captions': True,
'start_time': datetime.timedelta(seconds=0.0),
'end_time': datetime.timedelta(seconds=0.0),
'track': 'http://www.example.com/track',
'download_track': True,
'download_video': True,
'html5_sources': ['http://www.example.com/source.mp4'],
'data': '',
'transcripts': {},
})
def test_from_xml_no_attributes(self):
"""
Make sure settings are correct if none are explicitly set in XML.
"""
module_system = DummySystem(load_error_modules=True)
xml_data = '<video></video>'
output = VideoDescriptor.from_xml(xml_data, module_system, Mock())
self.assert_attributes_equal(output, {
'youtube_id_0_75': '',
'youtube_id_1_0': '3_yD_cEKoCk',
'youtube_id_1_25': '',
'youtube_id_1_5': '',
'show_captions': True,
'start_time': datetime.timedelta(seconds=0.0),
'end_time': datetime.timedelta(seconds=0.0),
'track': '',
'handout': None,
'download_track': False,
'download_video': False,
'html5_sources': [],
'data': '',
'transcripts': {},
})
def test_from_xml_double_quotes(self):
"""
Make sure we can handle the double-quoted string format (which was used for exporting for
a few weeks).
"""
module_system = DummySystem(load_error_modules=True)
xml_data = '''
<video display_name=""display_name""
html5_sources="["source_1", "source_2"]"
show_captions="false"
download_video="true"
sub=""html5_subtitles""
track=""http://www.example.com/track""
handout=""http://www.example.com/handout""
download_track="true"
youtube_id_0_75=""OEoXaMPEzf65""
youtube_id_1_25=""OEoXaMPEzf125""
youtube_id_1_5=""OEoXaMPEzf15""
youtube_id_1_0=""OEoXaMPEzf10""
/>
'''
output = VideoDescriptor.from_xml(xml_data, module_system, Mock())
self.assert_attributes_equal(output, {
'youtube_id_0_75': 'OEoXaMPEzf65',
'youtube_id_1_0': 'OEoXaMPEzf10',
'youtube_id_1_25': 'OEoXaMPEzf125',
'youtube_id_1_5': 'OEoXaMPEzf15',
'show_captions': False,
'start_time': datetime.timedelta(seconds=0.0),
'end_time': datetime.timedelta(seconds=0.0),
'track': 'http://www.example.com/track',
'handout': 'http://www.example.com/handout',
'download_track': True,
'download_video': True,
'html5_sources': ["source_1", "source_2"],
'data': ''
})
def test_from_xml_double_quote_concatenated_youtube(self):
module_system = DummySystem(load_error_modules=True)
xml_data = '''
<video display_name="Test Video"
youtube="1.0:"p2Q6BrNhdh8",1.25:"1EeWXzPdhSA"">
</video>
'''
output = VideoDescriptor.from_xml(xml_data, module_system, Mock())
self.assert_attributes_equal(output, {
'youtube_id_0_75': '',
'youtube_id_1_0': 'p2Q6BrNhdh8',
'youtube_id_1_25': '1EeWXzPdhSA',
'youtube_id_1_5': '',
'show_captions': True,
'start_time': datetime.timedelta(seconds=0.0),
'end_time': datetime.timedelta(seconds=0.0),
'track': '',
'handout': None,
'download_track': False,
'download_video': False,
'html5_sources': [],
'data': ''
})
def test_old_video_format(self):
"""
Test backwards compatibility with VideoModule's XML format.
"""
module_system = DummySystem(load_error_modules=True)
xml_data = """
<video display_name="Test Video"
youtube="1.0:p2Q6BrNhdh8,0.75:izygArpw-Qo,1.25:1EeWXzPdhSA,1.5:rABDYkeK0x8"
show_captions="false"
source="http://www.example.com/source.mp4"
from="00:00:01"
to="00:01:00">
<source src="http://www.example.com/source.mp4"/>
<track src="http://www.example.com/track"/>
</video>
"""
output = VideoDescriptor.from_xml(xml_data, module_system, Mock())
self.assert_attributes_equal(output, {
'youtube_id_0_75': 'izygArpw-Qo',
'youtube_id_1_0': 'p2Q6BrNhdh8',
'youtube_id_1_25': '1EeWXzPdhSA',
'youtube_id_1_5': 'rABDYkeK0x8',
'show_captions': False,
'start_time': datetime.timedelta(seconds=1),
'end_time': datetime.timedelta(seconds=60),
'track': 'http://www.example.com/track',
# 'download_track': True,
'html5_sources': ['http://www.example.com/source.mp4'],
'data': '',
})
def test_old_video_data(self):
"""
Ensure that Video is able to read VideoModule's model data.
"""
module_system = DummySystem(load_error_modules=True)
xml_data = """
<video display_name="Test Video"
youtube="1.0:p2Q6BrNhdh8,0.75:izygArpw-Qo,1.25:1EeWXzPdhSA,1.5:rABDYkeK0x8"
show_captions="false"
from="00:00:01"
to="00:01:00">
<source src="http://www.example.com/source.mp4"/>
<track src="http://www.example.com/track"/>
</video>
"""
video = VideoDescriptor.from_xml(xml_data, module_system, Mock())
self.assert_attributes_equal(video, {
'youtube_id_0_75': 'izygArpw-Qo',
'youtube_id_1_0': 'p2Q6BrNhdh8',
'youtube_id_1_25': '1EeWXzPdhSA',
'youtube_id_1_5': 'rABDYkeK0x8',
'show_captions': False,
'start_time': datetime.timedelta(seconds=1),
'end_time': datetime.timedelta(seconds=60),
'track': 'http://www.example.com/track',
# 'download_track': True,
'html5_sources': ['http://www.example.com/source.mp4'],
'data': ''
})
def test_import_with_float_times(self):
"""
Ensure that Video is able to read VideoModule's model data.
"""
module_system = DummySystem(load_error_modules=True)
xml_data = """
<video display_name="Test Video"
youtube="1.0:p2Q6BrNhdh8,0.75:izygArpw-Qo,1.25:1EeWXzPdhSA,1.5:rABDYkeK0x8"
show_captions="false"
from="1.0"
to="60.0">
<source src="http://www.example.com/source.mp4"/>
<track src="http://www.example.com/track"/>
</video>
"""
video = VideoDescriptor.from_xml(xml_data, module_system, Mock())
self.assert_attributes_equal(video, {
'youtube_id_0_75': 'izygArpw-Qo',
'youtube_id_1_0': 'p2Q6BrNhdh8',
'youtube_id_1_25': '1EeWXzPdhSA',
'youtube_id_1_5': 'rABDYkeK0x8',
'show_captions': False,
'start_time': datetime.timedelta(seconds=1),
'end_time': datetime.timedelta(seconds=60),
'track': 'http://www.example.com/track',
# 'download_track': True,
'html5_sources': ['http://www.example.com/source.mp4'],
'data': ''
})
@patch('xmodule.video_module.video_module.edxval_api')
def test_import_val_data(self, mock_val_api):
def mock_val_import(xml, edx_video_id, course_id):
"""Mock edxval.api.import_from_xml"""
self.assertEqual(xml.tag, 'video_asset')
self.assertEqual(dict(xml.items()), {'mock_attr': ''})
self.assertEqual(edx_video_id, 'test_edx_video_id')
self.assertEqual(course_id, 'test_course_id')
mock_val_api.import_from_xml = Mock(wraps=mock_val_import)
module_system = DummySystem(load_error_modules=True)
# import new edx_video_id
xml_data = """
<video edx_video_id="test_edx_video_id">
<video_asset mock_attr=""/>
</video>
"""
id_generator = Mock()
id_generator.target_course_id = 'test_course_id'
video = VideoDescriptor.from_xml(xml_data, module_system, id_generator)
self.assert_attributes_equal(video, {'edx_video_id': 'test_edx_video_id'})
mock_val_api.import_from_xml.assert_called_once_with(ANY, 'test_edx_video_id', course_id='test_course_id')
@patch('xmodule.video_module.video_module.edxval_api')
def test_import_val_data_invalid(self, mock_val_api):
mock_val_api.ValCannotCreateError = _MockValCannotCreateError
mock_val_api.import_from_xml = Mock(side_effect=mock_val_api.ValCannotCreateError)
module_system = DummySystem(load_error_modules=True)
# Negative duration is invalid
xml_data = """
<video edx_video_id="test_edx_video_id">
<video_asset client_video_id="test_client_video_id" duration="-1"/>
</video>
"""
with self.assertRaises(mock_val_api.ValCannotCreateError):
VideoDescriptor.from_xml(xml_data, module_system, id_generator=Mock())
class VideoExportTestCase(VideoDescriptorTestBase):
"""
Make sure that VideoDescriptor can export itself to XML correctly.
"""
@patch('xmodule.video_module.video_module.edxval_api')
def test_export_to_xml(self, mock_val_api):
"""
Test that we write the correct XML on export.
"""
def mock_val_export(edx_video_id):
"""Mock edxval.api.export_to_xml"""
return etree.Element(
'video_asset',
attrib={'export_edx_video_id': edx_video_id}
)
mock_val_api.export_to_xml = mock_val_export
self.descriptor.youtube_id_0_75 = 'izygArpw-Qo'
self.descriptor.youtube_id_1_0 = 'p2Q6BrNhdh8'
self.descriptor.youtube_id_1_25 = '1EeWXzPdhSA'
self.descriptor.youtube_id_1_5 = 'rABDYkeK0x8'
self.descriptor.show_captions = False
self.descriptor.start_time = datetime.timedelta(seconds=1.0)
self.descriptor.end_time = datetime.timedelta(seconds=60)
self.descriptor.track = 'http://www.example.com/track'
self.descriptor.handout = 'http://www.example.com/handout'
self.descriptor.download_track = True
self.descriptor.html5_sources = ['http://www.example.com/source.mp4', 'http://www.example.com/source.ogg']
self.descriptor.download_video = True
self.descriptor.transcripts = {'ua': 'ukrainian_translation.srt', 'ge': 'german_translation.srt'}
self.descriptor.edx_video_id = 'test_edx_video_id'
xml = self.descriptor.definition_to_xml(None) # We don't use the `resource_fs` parameter
parser = etree.XMLParser(remove_blank_text=True)
xml_string = '''\
<video url_name="SampleProblem" start_time="0:00:01" youtube="0.75:izygArpw-Qo,1.00:p2Q6BrNhdh8,1.25:1EeWXzPdhSA,1.50:rABDYkeK0x8" show_captions="false" end_time="0:01:00" download_video="true" download_track="true">
<source src="http://www.example.com/source.mp4"/>
<source src="http://www.example.com/source.ogg"/>
<track src="http://www.example.com/track"/>
<handout src="http://www.example.com/handout"/>
<transcript language="ge" src="german_translation.srt" />
<transcript language="ua" src="ukrainian_translation.srt" />
<video_asset export_edx_video_id="test_edx_video_id"/>
</video>
'''
expected = etree.XML(xml_string, parser=parser)
self.assertXmlEqual(expected, xml)
@patch('xmodule.video_module.video_module.edxval_api')
def test_export_to_xml_val_error(self, mock_val_api):
# Export should succeed without VAL data if video does not exist
mock_val_api.ValVideoNotFoundError = _MockValVideoNotFoundError
mock_val_api.export_to_xml = Mock(side_effect=mock_val_api.ValVideoNotFoundError)
self.descriptor.edx_video_id = 'test_edx_video_id'
xml = self.descriptor.definition_to_xml(None)
parser = etree.XMLParser(remove_blank_text=True)
xml_string = '<video url_name="SampleProblem" download_video="false"/>'
expected = etree.XML(xml_string, parser=parser)
self.assertXmlEqual(expected, xml)
def test_export_to_xml_empty_end_time(self):
"""
Test that we write the correct XML on export.
"""
self.descriptor.youtube_id_0_75 = 'izygArpw-Qo'
self.descriptor.youtube_id_1_0 = 'p2Q6BrNhdh8'
self.descriptor.youtube_id_1_25 = '1EeWXzPdhSA'
self.descriptor.youtube_id_1_5 = 'rABDYkeK0x8'
self.descriptor.show_captions = False
self.descriptor.start_time = datetime.timedelta(seconds=5.0)
self.descriptor.end_time = datetime.timedelta(seconds=0.0)
self.descriptor.track = 'http://www.example.com/track'
self.descriptor.download_track = True
self.descriptor.html5_sources = ['http://www.example.com/source.mp4', 'http://www.example.com/source.ogg']
self.descriptor.download_video = True
xml = self.descriptor.definition_to_xml(None) # We don't use the `resource_fs` parameter
parser = etree.XMLParser(remove_blank_text=True)
xml_string = '''\
<video url_name="SampleProblem" start_time="0:00:05" youtube="0.75:izygArpw-Qo,1.00:p2Q6BrNhdh8,1.25:1EeWXzPdhSA,1.50:rABDYkeK0x8" show_captions="false" download_video="true" download_track="true">
<source src="http://www.example.com/source.mp4"/>
<source src="http://www.example.com/source.ogg"/>
<track src="http://www.example.com/track"/>
</video>
'''
expected = etree.XML(xml_string, parser=parser)
self.assertXmlEqual(expected, xml)
def test_export_to_xml_empty_parameters(self):
"""
Test XML export with defaults.
"""
xml = self.descriptor.definition_to_xml(None)
# Check that download_video field is also set to default (False) in xml for backward compatibility
expected = '<video url_name="SampleProblem" download_video="false"/>\n'
self.assertEquals(expected, etree.tostring(xml, pretty_print=True))
def test_export_to_xml_with_transcripts_as_none(self):
"""
Test XML export with transcripts being overridden to None.
"""
self.descriptor.transcripts = None
xml = self.descriptor.definition_to_xml(None)
expected = '<video url_name="SampleProblem" download_video="false"/>\n'
self.assertEquals(expected, etree.tostring(xml, pretty_print=True))
def test_export_to_xml_invalid_characters_in_attributes(self):
"""
Test XML export will raise TypeError by lxml library if contains illegal characters.
"""
self.descriptor.display_name = '\x1e'
with self.assertRaises(ValueError):
self.descriptor.definition_to_xml(None)
def test_export_to_xml_unicode_characters(self):
"""
Test XML export handles the unicode characters.
"""
self.descriptor.display_name = '这是文'
xml = self.descriptor.definition_to_xml(None)
self.assertEqual(xml.get('display_name'), u'\u8fd9\u662f\u6587')
class VideoDescriptorIndexingTestCase(unittest.TestCase):
"""
Make sure that VideoDescriptor can format data for indexing as expected.
"""
def setUp(self):
"""
Overrides YOUTUBE and CONTENTSTORE settings
"""
super(VideoDescriptorIndexingTestCase, self).setUp()
self.youtube_setting = getattr(settings, "YOUTUBE", None)
self.contentstore_setting = getattr(settings, "CONTENTSTORE", None)
settings.YOUTUBE = {
# YouTube JavaScript API
'API': 'www.youtube.com/iframe_api',
# URL to get YouTube metadata
'METADATA_URL': 'www.googleapis.com/youtube/v3/videos/',
# Current youtube api for requesting transcripts.
# For example: http://video.google.com/timedtext?lang=en&v=j_jEn79vS3g.
'TEXT_API': {
'url': 'video.google.com/timedtext',
'params': {
'lang': 'en',
'v': 'set_youtube_id_of_11_symbols_here',
},
},
}
settings.CONTENTSTORE = {
'ENGINE': 'xmodule.contentstore.mongo.MongoContentStore',
'DOC_STORE_CONFIG': {
'host': 'localhost',
'db': 'test_xcontent_%s' % uuid4().hex,
},
# allow for additional options that can be keyed on a name, e.g. 'trashcan'
'ADDITIONAL_OPTIONS': {
'trashcan': {
'bucket': 'trash_fs'
}
}
}
self.addCleanup(self.cleanup)
def cleanup(self):
"""
Returns YOUTUBE and CONTENTSTORE settings to a default value
"""
if self.youtube_setting:
settings.YOUTUBE = self.youtube_setting
self.youtube_setting = None
else:
del settings.YOUTUBE
if self.contentstore_setting:
settings.CONTENTSTORE = self.contentstore_setting
self.contentstore_setting = None
else:
del settings.CONTENTSTORE
def test_video_with_no_subs_index_dictionary(self):
"""
Test index dictionary of a video module without subtitles.
"""
xml_data = '''
<video display_name="Test Video"
youtube="1.0:p2Q6BrNhdh8,0.75:izygArpw-Qo,1.25:1EeWXzPdhSA,1.5:rABDYkeK0x8"
show_captions="false"
download_track="false"
start_time="00:00:01"
download_video="false"
end_time="00:01:00">
<source src="http://www.example.com/source.mp4"/>
<track src="http://www.example.com/track"/>
<handout src="http://www.example.com/handout"/>
</video>
'''
descriptor = instantiate_descriptor(data=xml_data)
self.assertEqual(descriptor.index_dictionary(), {
"content": {"display_name": "Test Video"},
"content_type": "Video"
})
def test_video_with_youtube_subs_index_dictionary(self):
"""
Test index dictionary of a video module with YouTube subtitles.
"""
xml_data_sub = '''
<video display_name="Test Video"
youtube="1.0:p2Q6BrNhdh8,0.75:izygArpw-Qo,1.25:1EeWXzPdhSA,1.5:rABDYkeK0x8"
show_captions="false"
download_track="false"
sub="OEoXaMPEzfM"
start_time="00:00:01"
download_video="false"
end_time="00:01:00">
<source src="http://www.example.com/source.mp4"/>
<track src="http://www.example.com/track"/>
<handout src="http://www.example.com/handout"/>
</video>
'''
descriptor = instantiate_descriptor(data=xml_data_sub)
download_youtube_subs('OEoXaMPEzfM', descriptor, settings)
self.assertEqual(descriptor.index_dictionary(), {
"content": {
"display_name": "Test Video",
"transcript_en": YOUTUBE_SUBTITLES
},
"content_type": "Video"
})
def test_video_with_subs_and_transcript_index_dictionary(self):
"""
Test index dictionary of a video module with
YouTube subtitles and German transcript uploaded by a user.
"""
xml_data_sub_transcript = '''
<video display_name="Test Video"
youtube="1.0:p2Q6BrNhdh8,0.75:izygArpw-Qo,1.25:1EeWXzPdhSA,1.5:rABDYkeK0x8"
show_captions="false"
download_track="false"
sub="OEoXaMPEzfM"
start_time="00:00:01"
download_video="false"
end_time="00:01:00">
<source src="http://www.example.com/source.mp4"/>
<track src="http://www.example.com/track"/>
<handout src="http://www.example.com/handout"/>
<transcript language="ge" src="subs_grmtran1.srt" />
</video>
'''
descriptor = instantiate_descriptor(data=xml_data_sub_transcript)
download_youtube_subs('OEoXaMPEzfM', descriptor, settings)
save_to_store(SRT_FILEDATA, "subs_grmtran1.srt", 'text/srt', descriptor.location)
self.assertEqual(descriptor.index_dictionary(), {
"content": {
"display_name": "Test Video",
"transcript_en": YOUTUBE_SUBTITLES,
"transcript_ge": "sprechen sie deutsch? Ja, ich spreche Deutsch",
},
"content_type": "Video"
})
def test_video_with_multiple_transcripts_index_dictionary(self):
"""
Test index dictionary of a video module with
two transcripts uploaded by a user.
"""
xml_data_transcripts = '''
<video display_name="Test Video"
youtube="1.0:p2Q6BrNhdh8,0.75:izygArpw-Qo,1.25:1EeWXzPdhSA,1.5:rABDYkeK0x8"
show_captions="false"
download_track="false"
start_time="00:00:01"
download_video="false"
end_time="00:01:00">
<source src="http://www.example.com/source.mp4"/>
<track src="http://www.example.com/track"/>
<handout src="http://www.example.com/handout"/>
<transcript language="ge" src="subs_grmtran1.srt" />
<transcript language="hr" src="subs_croatian1.srt" />
</video>
'''
descriptor = instantiate_descriptor(data=xml_data_transcripts)
save_to_store(SRT_FILEDATA, "subs_grmtran1.srt", 'text/srt', descriptor.location)
save_to_store(CRO_SRT_FILEDATA, "subs_croatian1.srt", 'text/srt', descriptor.location)
self.assertEqual(descriptor.index_dictionary(), {
"content": {
"display_name": "Test Video",
"transcript_ge": "sprechen sie deutsch? Ja, ich spreche Deutsch",
"transcript_hr": "Dobar dan! Kako ste danas?"
},
"content_type": "Video"
})
def test_video_with_multiple_transcripts_translation_retrieval(self):
"""
Test translation retrieval of a video module with
multiple transcripts uploaded by a user.
"""
xml_data_transcripts = '''
<video display_name="Test Video"
youtube="1.0:p2Q6BrNhdh8,0.75:izygArpw-Qo,1.25:1EeWXzPdhSA,1.5:rABDYkeK0x8"
show_captions="false"
download_track="false"
start_time="00:00:01"
download_video="false"
end_time="00:01:00">
<source src="http://www.example.com/source.mp4"/>
<track src="http://www.example.com/track"/>
<handout src="http://www.example.com/handout"/>
<transcript language="ge" src="subs_grmtran1.srt" />
<transcript language="hr" src="subs_croatian1.srt" />
</video>
'''
descriptor = instantiate_descriptor(data=xml_data_transcripts)
translations = descriptor.available_translations(descriptor.get_transcripts_info(), verify_assets=False)
self.assertEqual(translations, ['hr', 'ge'])
def test_video_with_no_transcripts_translation_retrieval(self):
"""
Test translation retrieval of a video module with
no transcripts uploaded by a user- ie, that retrieval
does not throw an exception.
"""
descriptor = instantiate_descriptor(data=None)
translations = descriptor.available_translations(descriptor.get_transcripts_info(), verify_assets=False)
self.assertEqual(translations, ['en'])
| JioEducation/edx-platform | common/lib/xmodule/xmodule/tests/test_video.py | Python | agpl-3.0 | 42,276 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import io
import os
import random
import re
import numpy as np
from tensorflow.core.profiler import profile_pb2
from tensorflow.core.profiler import tfprof_log_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.profiler import model_analyzer
from tensorflow.python.profiler import option_builder
from tensorflow.python.profiler import profile_context
from tensorflow.python.profiler.internal import model_analyzer_testlib as lib
from tensorflow.python.util import compat
builder = option_builder.ProfileOptionBuilder
class PrintModelAnalysisTest(test.TestCase):
def _no_rewrite_session_config(self):
rewriter_config = rewriter_config_pb2.RewriterConfig(
pin_to_host_optimization=rewriter_config_pb2.RewriterConfig.OFF)
graph_options = config_pb2.GraphOptions(rewrite_options=rewriter_config)
return config_pb2.ConfigProto(graph_options=graph_options)
def testDumpToFile(self):
ops.reset_default_graph()
outfile = os.path.join(test.get_temp_dir(), 'dump')
opts = builder(builder.trainable_variables_parameter()
).with_file_output(outfile).build()
with session.Session(config=self._no_rewrite_session_config()) as sess:
_ = lib.BuildSmallModel()
model_analyzer.profile(sess.graph, options=opts)
with gfile.Open(outfile, 'r') as f:
self.assertEqual(u'node name | # parameters\n'
'_TFProfRoot (--/451 params)\n'
' DW (3x3x3x6, 162/162 params)\n'
' DW2 (2x2x6x12, 288/288 params)\n'
' ScalarW (1, 1/1 params)\n',
lib.CheckAndRemoveDoc(f.read()))
def testSelectEverythingDetail(self):
ops.reset_default_graph()
dev = '/device:GPU:0' if test.is_gpu_available() else '/device:CPU:0'
outfile = os.path.join(test.get_temp_dir(), 'dump')
opts = (builder(builder.trainable_variables_parameter())
.with_file_output(outfile)
.with_accounted_types(['.*'])
.select(['micros', 'bytes', 'params', 'float_ops', 'occurrence',
'device', 'op_types', 'input_shapes']).build())
with profile_context.ProfileContext(test.get_temp_dir(),
trace_steps=[],
dump_steps=[]) as pctx:
with session.Session(
config=self._no_rewrite_session_config()) as sess, ops.device(dev):
x = lib.BuildSmallModel()
sess.run(variables.global_variables_initializer())
pctx.trace_next_step()
pctx.dump_next_step()
_ = sess.run(x)
pctx.profiler.profile_name_scope(options=opts)
with gfile.Open(outfile, 'r') as f:
# pylint: disable=line-too-long
dump_str = lib.CheckAndRemoveDoc(f.read())
outputs = dump_str.split('\n')
self.assertEqual(outputs[0],
'node name | # parameters | # float_ops | requested bytes | total execution time | accelerator execution time | cpu execution time | assigned devices | op types | op count (run|defined) | input shapes')
for o in outputs[1:]:
if o.find('Conv2D ') > 0:
metrics = o[o.find('(') +1: o.find(')')].split(',')
# Make sure time is profiled.
gap = 1 if test.is_gpu_available() else 2
for i in range(3, 6, gap):
mat = re.search('(.*)(?:us|ms|sec)/(.*)(?:us|ms|sec)', metrics[i])
self.assertGreater(float(mat.group(1)), 0.0)
self.assertGreater(float(mat.group(2)), 0.0)
# Make sure device is profiled.
if test.is_gpu_available():
self.assertTrue(metrics[6].find('gpu') > 0)
self.assertFalse(metrics[6].find('cpu') > 0)
else:
self.assertFalse(metrics[6].find('gpu') > 0)
self.assertTrue(metrics[6].find('cpu') > 0)
# Make sure float_ops is profiled.
mat = re.search('(.*)k/(.*)k flops', metrics[1].strip())
self.assertGreater(float(mat.group(1)), 0.0)
self.assertGreater(float(mat.group(2)), 0.0)
# Make sure op_count is profiled.
self.assertEqual(metrics[8].strip(), '1/1|1/1')
# Make sure input_shapes is profiled.
self.assertEqual(metrics[9].strip(), '0:2x6x6x3|1:3x3x3x6')
if o.find('DW (3x3x3x6') > 0:
metrics = o[o.find('(') +1: o.find(')')].split(',')
mat = re.search('(.*)/(.*) params', metrics[1].strip())
self.assertGreater(float(mat.group(1)), 0.0)
self.assertGreater(float(mat.group(2)), 0.0)
# pylint: enable=line-too-long
# Test that profiler restored from profile file gives the same result.
gfile.Remove(outfile)
profile_file = os.path.join(test.get_temp_dir(), 'profile_1')
with lib.ProfilerFromFile(profile_file) as profiler:
profiler.profile_name_scope(options=opts)
with gfile.Open(outfile, 'r') as f:
self.assertEqual(dump_str, lib.CheckAndRemoveDoc(f.read()))
def testSelectEverything(self):
ops.reset_default_graph()
outfile = os.path.join(test.get_temp_dir(), 'dump')
opts = (builder(builder.trainable_variables_parameter())
.with_file_output(outfile)
.with_accounted_types(['.*'])
.select(['params', 'float_ops', 'occurrence', 'device', 'op_types',
'input_shapes']).build())
with session.Session(config=self._no_rewrite_session_config()
) as sess, ops.device('/device:CPU:0'):
x = lib.BuildSmallModel()
sess.run(variables.global_variables_initializer())
run_meta = config_pb2.RunMetadata()
_ = sess.run(x,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE),
run_metadata=run_meta)
model_analyzer.profile(
sess.graph, run_meta, options=opts)
def testSimpleCodeView(self):
ops.reset_default_graph()
outfile = os.path.join(test.get_temp_dir(), 'dump')
# TODO(xpan): Test 'micros'. Since the execution time changes each run,
# it's a bit difficult to test it now.
opts = (builder(builder.trainable_variables_parameter())
.with_file_output(outfile)
.with_accounted_types(['.*'])
.with_node_names(show_name_regexes=['.*model_analyzer_testlib.*'])
.account_displayed_op_only(False)
.select(['bytes', 'params', 'float_ops', 'num_hidden_ops', 'device',
'input_shapes']).build())
with session.Session(config=self._no_rewrite_session_config()) as sess:
x = lib.BuildSmallModel()
sess.run(variables.global_variables_initializer())
run_meta = config_pb2.RunMetadata()
_ = sess.run(x,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE),
run_metadata=run_meta)
model_analyzer.profile(
sess.graph, run_meta, cmd='code', options=opts)
with gfile.Open(outfile, 'r') as f:
# pylint: disable=line-too-long
self.assertEqual(
'node name | requested bytes | # parameters | # float_ops | assigned devices | in',
lib.CheckAndRemoveDoc(f.read())[0:80])
# pylint: enable=line-too-long
def testComplexCodeView(self):
ops.reset_default_graph()
outfile = os.path.join(test.get_temp_dir(), 'dump')
opts = (builder(builder.trainable_variables_parameter())
.with_file_output(outfile)
.with_accounted_types(['.*'])
.with_node_names(show_name_regexes=
['.*model_analyzer_testlib.py.*'])
.account_displayed_op_only(False)
.select(['params', 'float_ops']).build())
with profile_context.ProfileContext(test.get_temp_dir(),
trace_steps=[],
dump_steps=[]) as pctx:
with session.Session(config=self._no_rewrite_session_config()) as sess:
x = lib.BuildFullModel()
sess.run(variables.global_variables_initializer())
pctx.trace_next_step()
_ = sess.run(x)
tfprof_node = pctx.profiler.profile_python(options=opts)
# pylint: disable=line-too-long
with gfile.Open(outfile, 'r') as f:
lines = f.read().split('\n')
self.assertGreater(len(lines), 5)
result = '\n'.join([l[:min(len(l), 80)] for l in lines])
self.assertTrue(
compat.as_text(lib.CheckAndRemoveDoc(result))
.startswith('node name | # parameters | # float_ops'))
self.assertLess(0, tfprof_node.total_exec_micros)
self.assertEqual(2844, tfprof_node.total_parameters)
#The graph is modifed when MKL is enabled,total_float_ops will
#be different
if test_util.IsMklEnabled():
self.assertLess(101600, tfprof_node.total_float_ops)
else:
self.assertLess(145660, tfprof_node.total_float_ops)
self.assertEqual(8, len(tfprof_node.children))
self.assertEqual('_TFProfRoot', tfprof_node.name)
self.assertEqual(
'model_analyzer_testlib.py:63:BuildFullModel',
tfprof_node.children[0].name)
self.assertEqual(
'model_analyzer_testlib.py:63:BuildFullModel (gradient)',
tfprof_node.children[1].name)
self.assertEqual(
'model_analyzer_testlib.py:67:BuildFullModel',
tfprof_node.children[2].name)
self.assertEqual(
'model_analyzer_testlib.py:67:BuildFullModel (gradient)',
tfprof_node.children[3].name)
self.assertEqual(
'model_analyzer_testlib.py:69:BuildFullModel',
tfprof_node.children[4].name)
self.assertEqual(
'model_analyzer_testlib.py:70:BuildFullModel',
tfprof_node.children[5].name)
self.assertEqual(
'model_analyzer_testlib.py:70:BuildFullModel (gradient)',
tfprof_node.children[6].name)
self.assertEqual(
'model_analyzer_testlib.py:72:BuildFullModel',
tfprof_node.children[7].name)
# pylint: enable=line-too-long
def testCodeViewLeafGraphNode(self):
ops.reset_default_graph()
opts = (builder(builder.trainable_variables_parameter())
.with_empty_output()
.with_accounted_types(['.*'])
.account_displayed_op_only(False)
.select(['bytes', 'params', 'float_ops', 'device']).build())
with session.Session(config=self._no_rewrite_session_config()) as sess:
x = lib.BuildSmallModel()
sess.run(variables.global_variables_initializer())
run_meta = config_pb2.RunMetadata()
_ = sess.run(x,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE),
run_metadata=run_meta)
tfprof_node = model_analyzer.profile(
sess.graph, run_meta, cmd='code', options=opts)
leaf = tfprof_node
while leaf.children:
self.assertEqual(0, len(leaf.graph_nodes))
leaf = leaf.children[0]
self.assertEqual(1, len(leaf.graph_nodes))
def testTimeline(self):
ops.reset_default_graph()
outfile = os.path.join(test.get_temp_dir(), 'timeline')
opts = (builder(builder.trainable_variables_parameter())
.with_max_depth(100000)
.with_step(0)
.with_timeline_output(outfile)
.with_accounted_types(['.*']).build())
with session.Session(config=self._no_rewrite_session_config()) as sess:
x = lib.BuildFullModel()
sess.run(variables.global_variables_initializer())
run_meta = config_pb2.RunMetadata()
_ = sess.run(
x,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE),
run_metadata=run_meta)
_ = model_analyzer.profile(
sess.graph, run_meta, cmd='graph', options=opts)
with gfile.Open(outfile + '_0', 'r') as f:
# Test that a json file is created.
# TODO(xpan): tfprof Timeline isn't quite correct on Windows.
# Investigate why.
if os.name != 'nt':
self.assertLess(1000, len(f.read()))
else:
self.assertLess(1, len(f.read()))
def testOpView(self):
ops.reset_default_graph()
outfile = os.path.join(test.get_temp_dir(), 'dump')
opts = (builder(builder.trainable_variables_parameter())
.with_file_output(outfile)
.with_accounted_types(['.*'])
.with_min_occurrence(10)
.order_by('occurrence')
.select(['params', 'micros', 'bytes',
'peak_bytes', 'residual_bytes',
'output_bytes', 'occurrence', 'input_shapes']).build())
with session.Session(config=self._no_rewrite_session_config()) as sess:
x = lib.BuildFullModel()
sess.run(variables.global_variables_initializer())
run_meta = config_pb2.RunMetadata()
_ = sess.run(x,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE),
run_metadata=run_meta)
tfprof_node = model_analyzer.profile(
sess.graph, run_meta, cmd='op', options=opts)
with gfile.Open(outfile, 'r') as f:
# pylint: disable=line-too-long
self.assertEqual(
'nodename|requestedbytes|peakbytes|residualbytes|outputbytes|totalexecutiontime|acceleratorexecutiontime|cpuexecutiontime|#parameters|opoccurrence(run|defined)|inputshapes',
lib.CheckAndRemoveDoc(f.read()).replace('\t',
'').replace(' ', '')[0:170])
# pylint: enable=line-too-long
total_children = 0
last_occurrence = 1e32
input_shapes = 0
last_total_micros = tfprof_node.total_exec_micros
last_micros = tfprof_node.exec_micros
while tfprof_node.children:
for gnode in tfprof_node.graph_nodes:
input_shapes += len(gnode.input_shapes)
self.assertEqual(len(tfprof_node.children), 1)
tfprof_node = tfprof_node.children[0]
self.assertEqual(
last_total_micros, tfprof_node.total_exec_micros + last_micros)
last_total_micros = tfprof_node.total_exec_micros
last_micros = tfprof_node.exec_micros
total_children += 1
self.assertLessEqual(len(tfprof_node.graph_nodes), last_occurrence)
last_occurrence = len(tfprof_node.graph_nodes)
self.assertGreater(input_shapes, 0)
def testAdvisor(self):
ops.reset_default_graph()
with session.Session(config=self._no_rewrite_session_config()) as sess:
x = lib.BuildFullModel()
sess.run(variables.global_variables_initializer())
run_meta = config_pb2.RunMetadata()
_ = sess.run(
x,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE),
run_metadata=run_meta)
advice_pb = model_analyzer.advise(sess.graph, run_meta)
self.assertTrue('AcceleratorUtilizationChecker' in advice_pb.checkers)
self.assertTrue('ExpensiveOperationChecker' in advice_pb.checkers)
self.assertTrue('OperationChecker' in advice_pb.checkers)
checker = advice_pb.checkers['AcceleratorUtilizationChecker']
if test.is_gpu_available():
self.assertGreater(len(checker.reports), 0)
else:
self.assertEqual(len(checker.reports), 0)
checker = advice_pb.checkers['ExpensiveOperationChecker']
self.assertGreater(len(checker.reports), 0)
def pprof_test_helper(self, attribute, should_fail=False):
ops.reset_default_graph()
outfile = os.path.join(test.get_temp_dir(), attribute + '_pprof.pb.gz')
opts = (builder(builder.time_and_memory())
.select([attribute])
.with_max_depth(100000)
.with_node_names(trim_name_regexes=['ops.py.*'])
.with_pprof_output(outfile).build())
with session.Session(config=self._no_rewrite_session_config()) as sess:
x = lib.BuildFullModel()
sess.run(variables.global_variables_initializer())
run_meta = config_pb2.RunMetadata()
_ = sess.run(
x,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE),
run_metadata=run_meta)
_ = model_analyzer.profile(
sess.graph, run_meta, cmd='code', options=opts)
if should_fail:
self.assertFalse(gfile.Exists(outfile))
return
profile_pb = profile_pb2.Profile()
with gfile.Open(outfile, 'rb') as f:
with gzip.GzipFile(fileobj=io.BytesIO(f.read())) as gzipf:
profile_pb.ParseFromString(gzipf.read())
self.assertGreater(len(profile_pb.sample), 10)
self.assertGreater(len(profile_pb.location), 10)
self.assertGreater(len(profile_pb.function), 10)
self.assertGreater(len(profile_pb.string_table), 30)
has_rnn = False
has_loop = False
for s in profile_pb.string_table:
if s.find('rnn') > 0:
has_rnn = True
if s.find('while') > 0:
has_loop = True
self.assertFalse(s.startswith('ops.py'))
self.assertTrue(has_rnn)
self.assertTrue(has_loop)
def testPprof(self):
for attr in ['micros', 'bytes', 'accelerator_micros', 'cpu_micros',
'params', 'float_ops']:
self.pprof_test_helper(attr)
for attr in ['op_types', 'device', 'input_shapes']:
self.pprof_test_helper(attr, True)
def testMinOption(self):
ops.reset_default_graph()
def check_min(nodes, mm=0, mam=0, mcm=0, mb=0, mpb=0, mrb=0, mob=0):
for n in nodes:
if mm > 0:
self.assertGreaterEqual(n.exec_micros, mm)
if mam > 0:
self.assertGreaterEqual(n.accelerator_exec_micros, mam)
if mcm > 0:
self.assertGreaterEqual(n.cpu_exec_micros, mcm)
if mb > 0:
self.assertGreaterEqual(n.requested_bytes, mb)
if mpb > 0:
self.assertGreaterEqual(n.peak_bytes, mpb)
if mrb > 0:
self.assertGreaterEqual(n.residual_bytes, mrb)
if mob > 0:
self.assertGreaterEqual(n.output_bytes, mob)
check_min(n.children, mm, mam, mcm, mb, mpb, mrb, mob)
with session.Session(config=self._no_rewrite_session_config()) as sess:
x = lib.BuildSmallModel()
sess.run(variables.global_variables_initializer())
run_meta = config_pb2.RunMetadata()
_ = sess.run(x,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE),
run_metadata=run_meta)
min_val = random.randint(0, 10000)
opts = builder(builder.time_and_memory(min_micros=min_val)
).with_empty_output().build()
tfprof_node = model_analyzer.profile(
sess.graph, run_meta=run_meta, options=opts)
check_min(tfprof_node.children, mm=min_val)
opts = builder(builder.time_and_memory(min_accelerator_micros=min_val)
).with_empty_output().build()
tfprof_node = model_analyzer.profile(
sess.graph, run_meta=run_meta, options=opts)
check_min(tfprof_node.children, mam=min_val)
opts = builder(builder.time_and_memory(min_cpu_micros=min_val)
).with_empty_output().build()
tfprof_node = model_analyzer.profile(
sess.graph, run_meta=run_meta, options=opts)
check_min(tfprof_node.children, mcm=min_val)
opts = builder(builder.time_and_memory(min_bytes=min_val)
).with_empty_output().build()
tfprof_node = model_analyzer.profile(
sess.graph, run_meta=run_meta, options=opts)
check_min(tfprof_node.children, mb=min_val)
opts = builder(builder.time_and_memory(min_peak_bytes=min_val)
).with_empty_output().build()
tfprof_node = model_analyzer.profile(
sess.graph, run_meta=run_meta, options=opts)
check_min(tfprof_node.children, mpb=min_val)
opts = builder(builder.time_and_memory(min_residual_bytes=min_val)
).with_empty_output().build()
tfprof_node = model_analyzer.profile(
sess.graph, run_meta=run_meta, options=opts)
check_min(tfprof_node.children, mrb=min_val)
opts = builder(builder.time_and_memory(min_output_bytes=min_val)
).with_empty_output().build()
tfprof_node = model_analyzer.profile(
sess.graph, run_meta=run_meta, options=opts)
check_min(tfprof_node.children, mob=min_val)
def testSelectOption(self):
ops.reset_default_graph()
outfile = os.path.join(test.get_temp_dir(), 'dump')
def check_selection(selected, not_selected):
with gfile.Open(outfile, 'r') as f:
s = f.read()
for attr in selected:
self.assertTrue(s.find(attr) > 0, s)
for attr in not_selected:
self.assertFalse(s.find(attr) > 0, s)
with session.Session(config=self._no_rewrite_session_config()) as sess:
x = lib.BuildSmallModel()
sess.run(variables.global_variables_initializer())
run_meta = config_pb2.RunMetadata()
_ = sess.run(x,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE),
run_metadata=run_meta)
opts = builder(builder.time_and_memory()
).with_file_output(outfile).select(['micros']).build()
_ = model_analyzer.profile(
sess.graph, run_meta=run_meta, options=opts)
check_selection(['total execution time', 'accelerator execution time'],
['bytes'])
opts = builder(builder.time_and_memory()
).with_file_output(outfile).select(['bytes']).build()
_ = model_analyzer.profile(
sess.graph, run_meta=run_meta, options=opts)
check_selection(['requested bytes'],
['peak bytes', 'residual bytes', 'output bytes'])
opts = builder(builder.time_and_memory()).with_file_output(
outfile).select(
['peak_bytes', 'residual_bytes', 'output_bytes']).build()
_ = model_analyzer.profile(
sess.graph, run_meta=run_meta, options=opts)
check_selection(['peak bytes', 'residual bytes', 'output bytes'],
['requested_bytes'])
def _trainLoop(self, train_op, train_steps, time_dir, time_step,
memory_dir, memory_step, profile_dir, dump_step):
with session.Session(config=self._no_rewrite_session_config()) as sess:
sess.run(variables.global_variables_initializer())
# start from 1 because variable_initializer took one step.
for i in range(1, train_steps + 1):
_ = sess.run(train_op)
if i in time_step:
ret = gfile.ListDirectory(time_dir)
self.assertEqual(len(ret), 1)
self.assertTrue(
gfile.Open(os.path.join(time_dir, ret[0]), 'r').read()
.find('execution time') > 0)
_ = [gfile.Remove(os.path.join(time_dir, x)) for x in ret]
else:
self.assertEqual(len(gfile.ListDirectory(time_dir)), 0)
if i in memory_step:
ret = gfile.ListDirectory(memory_dir)
self.assertEqual(len(ret), 1)
self.assertTrue(
gfile.Open(os.path.join(memory_dir, ret[0]), 'r').read()
.find('requested bytes') > 0)
_ = [gfile.Remove(os.path.join(memory_dir, x)) for x in ret]
else:
self.assertEqual(len(gfile.ListDirectory(memory_dir)), 0)
if i in dump_step:
ret = gfile.ListDirectory(profile_dir)
self.assertAllEqual(ret, ['profile_%d' % i])
_ = [gfile.Remove(os.path.join(profile_dir, x)) for x in ret]
else:
if i < dump_step[0]:
self.assertFalse(gfile.Exists(profile_dir))
else:
self.assertEqual(len(gfile.ListDirectory(profile_dir)), 0)
def testAutoProfiling(self):
ops.reset_default_graph()
time_dir = os.path.join(test.get_temp_dir(), 'time')
memory_dir = os.path.join(test.get_temp_dir(), 'memory')
profile_dir = os.path.join(test.get_temp_dir(), 'dir/dir2/profile')
# TODO(xpan): Should we create parent directory for them?
gfile.MkDir(time_dir)
gfile.MkDir(memory_dir)
time_opts = (builder(builder.time_and_memory())
.with_file_output(os.path.join(time_dir, 'profile'))
.select(['micros']).build())
memory_opts = (builder(builder.time_and_memory())
.with_file_output(os.path.join(memory_dir, 'profile'))
.select(['bytes']).build())
time_steps = [2, 3]
memory_steps = [1, 3]
dump_steps = [3, 4]
x = lib.BuildSmallModel()
with profile_context.ProfileContext(profile_dir,
trace_steps=[1, 2, 3],
dump_steps=[3, 4]) as pctx:
pctx.add_auto_profiling('scope', time_opts, time_steps)
pctx.add_auto_profiling('scope', memory_opts, memory_steps)
self._trainLoop(x, 10, time_dir, time_steps,
memory_dir, memory_steps, profile_dir, dump_steps)
def testOOM(self):
if not test.is_gpu_available():
return
ops.reset_default_graph()
with ops.device('/device:GPU:0'):
a = random_ops.random_normal([1, 10000, 20000], name='test_random1')
b = random_ops.random_normal([30000, 10000, 1], name='test_random2')
c = a * b
try:
with session.Session(config=self._no_rewrite_session_config()) as sess:
sess.run(c, options=config_pb2.RunOptions(
report_tensor_allocations_upon_oom=True))
except Exception as e: # pylint: disable=broad-except
exception_str = '%s' % e
# This trace reports allocations for to random tensor.
self.assertTrue(
'OOM when allocating tensor with shape[30000,10000,20000]' in
exception_str)
mat = re.search('(.*)GiB from test_random2/RandomStandardNormal',
exception_str)
self.assertGreater(float(mat.group(1)), 0.0)
mat = re.search('(.*)MiB from test_random1/RandomStandardNormal',
exception_str)
self.assertGreater(float(mat.group(1)), 0.0)
def testDistributedOOM(self):
if not test.is_gpu_available():
return
ops.reset_default_graph()
workers, _ = test_util.create_local_cluster(2, 0)
with ops.device('/job:worker/replica:0/task:0/gpu:0'):
a = random_ops.random_normal([1, 10000, 20000], name='test_random1')
with ops.device('/job:worker/replica:0/task:1/gpu:0'):
b = random_ops.random_normal([30000, 10000, 1], name='test_random2')
c = a * b
try:
with session.Session(workers[1].target) as sess:
sess.run(c, options=config_pb2.RunOptions(
report_tensor_allocations_upon_oom=True))
except Exception as e: # pylint: disable=broad-except
exception_str = '%s' % e
# test_random2 is reported because it's allocated in worker 1.
self.assertTrue('Current usage from device: '
'/job:worker/replica:0/task:1/device:GPU:0, '
'allocator: GPU_0_bfc' in exception_str)
mat = re.search('(.*)GiB from test_random2/RandomStandardNormal',
exception_str)
self.assertGreater(float(mat.group(1)), 0.0)
# test_random1 is not reported because it's allocated in worker 0.
mat = re.search('(.*)MiB from test_random1/RandomStandardNormal',
exception_str)
self.assertTrue(mat is None)
def testTrackPersistentBytes(self):
ops.reset_default_graph()
a = array_ops.constant(np.ones((100, 100)))
b = array_ops.constant(np.ones((100, 100)))
c = a * b
config = config_pb2.ConfigProto()
config.graph_options.rewrite_options.min_graph_nodes = -1
with session.Session(config=config) as sess:
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
sess.run(c, options=run_options, run_metadata=run_metadata)
options = option_builder.ProfileOptionBuilder.time_and_memory()
options['min_bytes'] = 0
options['select'] = ('bytes', 'peak_bytes', 'output_bytes',
'residual_bytes')
ret = model_analyzer.profile(
sess.graph, run_meta=run_metadata, cmd='scope', options=options)
run_metadata = config_pb2.RunMetadata()
sess.run(c, options=run_options, run_metadata=run_metadata)
ret2 = model_analyzer.profile(
sess.graph, run_meta=run_metadata, cmd='scope', options=options)
n = lib.SearchTFProfNode(ret, 'mul')
n2 = lib.SearchTFProfNode(ret2, 'mul')
self.assertGreater(n.peak_bytes, 0)
self.assertGreater(n.output_bytes, 0)
self.assertGreater(n.residual_bytes, 0)
self.assertEqual(n.peak_bytes, n2.peak_bytes)
self.assertEqual(n.output_bytes, n2.output_bytes)
self.assertEqual(n.residual_bytes, n2.residual_bytes)
def testTraceLoopBytes(self):
if not test.is_gpu_available(): return
ops.reset_default_graph()
steps = 100
with ops.device('/gpu:0'):
x = array_ops.ones((100, 100), dtype=dtypes.float32)
n = array_ops.constant(steps, dtype=dtypes.int32)
x1 = array_ops.ones((100, 100))
x *= x1
def loop_body(i, x):
x *= x
return i + 1, x
_, y = control_flow_ops.while_loop(
lambda i, x: i < n, loop_body,
[array_ops.constant(0), x])
grad = gradients.gradients(y, [x1])
with session.Session(config=self._no_rewrite_session_config()) as sess:
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
sess.run(grad, options=run_options, run_metadata=run_metadata)
options = option_builder.ProfileOptionBuilder.time_and_memory()
options['min_bytes'] = 0
options['min_micros'] = 0
options['select'] = ('bytes', 'peak_bytes', 'output_bytes',
'residual_bytes')
options['output'] = 'none'
ret_pb = model_analyzer.profile(
sess.graph, run_meta=run_metadata, cmd='scope', options=options)
self.assertGreater(ret_pb.total_requested_bytes, 1000000)
def testEager(self):
ops.reset_default_graph()
with context.eager_mode():
outfile = os.path.join(test.get_temp_dir(), 'dump')
opts = builder(
builder.time_and_memory()).with_file_output(outfile).build()
context.enable_run_metadata()
lib.BuildSmallModel()
profiler = model_analyzer.Profiler()
profiler.add_step(0, context.export_run_metadata())
context.disable_run_metadata()
profiler.profile_operations(opts)
with gfile.Open(outfile, 'r') as f:
out_str = f.read()
self.assertTrue('Conv2D' in out_str)
self.assertTrue('VarHandleOp' in out_str)
with gfile.Open('/tmp/eager_profile', 'wb') as f:
profile_pb = tfprof_log_pb2.ProfileProto()
profile_pb.ParseFromString(profiler.serialize_to_string())
profile_pb_str = '%s' % profile_pb
self.assertTrue('Conv2D' in profile_pb_str)
self.assertTrue('VarHandleOp' in profile_pb_str)
if __name__ == '__main__':
test.main()
| kobejean/tensorflow | tensorflow/python/profiler/model_analyzer_test.py | Python | apache-2.0 | 33,231 |
#
# [The "BSD license"]
# Copyright (c) 2012 Terence Parr
# Copyright (c) 2012 Sam Harwell
# Copyright (c) 2014 Eric Vergnaud
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#/
# When we hit an accept state in either the DFA or the ATN, we
# have to notify the character stream to start buffering characters
# via {@link IntStream#mark} and record the current state. The current sim state
# includes the current index into the input, the current line,
# and current character position in that line. Note that the Lexer is
# tracking the starting line and characterization of the token. These
# variables track the "state" of the simulator when it hits an accept state.
#
# <p>We track these variables separately for the DFA and ATN simulation
# because the DFA simulation often has to fail over to the ATN
# simulation. If the ATN simulation fails, we need the DFA to fall
# back to its previously accepted state, if any. If the ATN succeeds,
# then the ATN does the accept and the DFA simulator that invoked it
# can simply return the predicted token type.</p>
#/
from antlr4.PredictionContext import PredictionContextCache, SingletonPredictionContext, PredictionContext
from antlr4.InputStream import InputStream
from antlr4.Token import Token
from antlr4.atn.ATN import ATN
from antlr4.atn.ATNConfig import LexerATNConfig
from antlr4.atn.ATNSimulator import ATNSimulator
from antlr4.atn.ATNConfigSet import ATNConfigSet, OrderedATNConfigSet
from antlr4.atn.ATNState import RuleStopState, ATNState
from antlr4.atn.LexerActionExecutor import LexerActionExecutor
from antlr4.atn.Transition import Transition
from antlr4.dfa.DFAState import DFAState
from antlr4.error.Errors import LexerNoViableAltException, UnsupportedOperationException
class SimState(object):
def __init__(self):
self.reset()
def reset(self):
self.index = -1
self.line = 0
self.column = -1
self.dfaState = None
# need forward declaration
Lexer = None
LexerATNSimulator = None
class LexerATNSimulator(ATNSimulator):
debug = False
dfa_debug = False
MIN_DFA_EDGE = 0
MAX_DFA_EDGE = 127 # forces unicode to stay in ATN
ERROR = None
match_calls = 0
def __init__(self, recog:Lexer, atn:ATN, decisionToDFA:list, sharedContextCache:PredictionContextCache):
super().__init__(atn, sharedContextCache)
self.decisionToDFA = decisionToDFA
self.recog = recog
# The current token's starting index into the character stream.
# Shared across DFA to ATN simulation in case the ATN fails and the
# DFA did not have a previous accept state. In this case, we use the
# ATN-generated exception object.
self.startIndex = -1
# line number 1..n within the input#/
self.line = 1
# The index of the character relative to the beginning of the line 0..n-1#/
self.column = 0
from antlr4.Lexer import Lexer
self.mode = Lexer.DEFAULT_MODE
# Used during DFA/ATN exec to record the most recent accept configuration info
self.prevAccept = SimState()
def copyState(self, simulator:LexerATNSimulator ):
self.column = simulator.column
self.line = simulator.line
self.mode = simulator.mode
self.startIndex = simulator.startIndex
def match(self, input:InputStream , mode:int):
self.match_calls += 1
self.mode = mode
mark = input.mark()
try:
self.startIndex = input.index
self.prevAccept.reset()
dfa = self.decisionToDFA[mode]
if dfa.s0 is None:
return self.matchATN(input)
else:
return self.execATN(input, dfa.s0)
finally:
input.release(mark)
def reset(self):
self.prevAccept.reset()
self.startIndex = -1
self.line = 1
self.column = 0
self.mode = Lexer.DEFAULT_MODE
def matchATN(self, input:InputStream):
startState = self.atn.modeToStartState[self.mode]
if self.debug:
print("matchATN mode " + str(self.mode) + " start: " + str(startState))
old_mode = self.mode
s0_closure = self.computeStartState(input, startState)
suppressEdge = s0_closure.hasSemanticContext
s0_closure.hasSemanticContext = False
next = self.addDFAState(s0_closure)
if not suppressEdge:
self.decisionToDFA[self.mode].s0 = next
predict = self.execATN(input, next)
if self.debug:
print("DFA after matchATN: " + str(self.decisionToDFA[old_mode].toLexerString()))
return predict
def execATN(self, input:InputStream, ds0:DFAState):
if self.debug:
print("start state closure=" + str(ds0.configs))
if ds0.isAcceptState:
# allow zero-length tokens
self.captureSimState(self.prevAccept, input, ds0)
t = input.LA(1)
s = ds0 # s is current/from DFA state
while True: # while more work
if self.debug:
print("execATN loop starting closure: %s\n", s.configs)
# As we move src->trg, src->trg, we keep track of the previous trg to
# avoid looking up the DFA state again, which is expensive.
# If the previous target was already part of the DFA, we might
# be able to avoid doing a reach operation upon t. If s!=null,
# it means that semantic predicates didn't prevent us from
# creating a DFA state. Once we know s!=null, we check to see if
# the DFA state has an edge already for t. If so, we can just reuse
# it's configuration set; there's no point in re-computing it.
# This is kind of like doing DFA simulation within the ATN
# simulation because DFA simulation is really just a way to avoid
# computing reach/closure sets. Technically, once we know that
# we have a previously added DFA state, we could jump over to
# the DFA simulator. But, that would mean popping back and forth
# a lot and making things more complicated algorithmically.
# This optimization makes a lot of sense for loops within DFA.
# A character will take us back to an existing DFA state
# that already has lots of edges out of it. e.g., .* in comments.
# print("Target for:" + str(s) + " and:" + str(t))
target = self.getExistingTargetState(s, t)
# print("Existing:" + str(target))
if target is None:
target = self.computeTargetState(input, s, t)
# print("Computed:" + str(target))
if target == self.ERROR:
break
# If this is a consumable input element, make sure to consume before
# capturing the accept state so the input index, line, and char
# position accurately reflect the state of the interpreter at the
# end of the token.
if t != Token.EOF:
self.consume(input)
if target.isAcceptState:
self.captureSimState(self.prevAccept, input, target)
if t == Token.EOF:
break
t = input.LA(1)
s = target # flip; current DFA target becomes new src/from state
return self.failOrAccept(self.prevAccept, input, s.configs, t)
# Get an existing target state for an edge in the DFA. If the target state
# for the edge has not yet been computed or is otherwise not available,
# this method returns {@code null}.
#
# @param s The current DFA state
# @param t The next input symbol
# @return The existing target DFA state for the given input symbol
# {@code t}, or {@code null} if the target state for this edge is not
# already cached
def getExistingTargetState(self, s:DFAState, t:int):
if s.edges is None or t < self.MIN_DFA_EDGE or t > self.MAX_DFA_EDGE:
return None
target = s.edges[t - self.MIN_DFA_EDGE]
if self.debug and target is not None:
print("reuse state "+s.stateNumber+ " edge to "+target.stateNumber)
return target
# Compute a target state for an edge in the DFA, and attempt to add the
# computed state and corresponding edge to the DFA.
#
# @param input The input stream
# @param s The current DFA state
# @param t The next input symbol
#
# @return The computed target DFA state for the given input symbol
# {@code t}. If {@code t} does not lead to a valid DFA state, this method
# returns {@link #ERROR}.
def computeTargetState(self, input:InputStream, s:DFAState, t:int):
reach = OrderedATNConfigSet()
# if we don't find an existing DFA state
# Fill reach starting from closure, following t transitions
self.getReachableConfigSet(input, s.configs, reach, t)
if len(reach)==0: # we got nowhere on t from s
if not reach.hasSemanticContext:
# we got nowhere on t, don't throw out this knowledge; it'd
# cause a failover from DFA later.
self. addDFAEdge(s, t, self.ERROR)
# stop when we can't match any more char
return self.ERROR
# Add an edge from s to target DFA found/created for reach
return self.addDFAEdge(s, t, cfgs=reach)
def failOrAccept(self, prevAccept:SimState , input:InputStream, reach:ATNConfigSet, t:int):
if self.prevAccept.dfaState is not None:
lexerActionExecutor = prevAccept.dfaState.lexerActionExecutor
self.accept(input, lexerActionExecutor, self.startIndex, prevAccept.index, prevAccept.line, prevAccept.column)
return prevAccept.dfaState.prediction
else:
# if no accept and EOF is first char, return EOF
if t==Token.EOF and input.index==self.startIndex:
return Token.EOF
raise LexerNoViableAltException(self.recog, input, self.startIndex, reach)
# Given a starting configuration set, figure out all ATN configurations
# we can reach upon input {@code t}. Parameter {@code reach} is a return
# parameter.
def getReachableConfigSet(self, input:InputStream, closure:ATNConfigSet, reach:ATNConfigSet, t:int):
# this is used to skip processing for configs which have a lower priority
# than a config that already reached an accept state for the same rule
skipAlt = ATN.INVALID_ALT_NUMBER
for cfg in closure:
currentAltReachedAcceptState = ( cfg.alt == skipAlt )
if currentAltReachedAcceptState and cfg.passedThroughNonGreedyDecision:
continue
if self.debug:
print("testing %s at %s\n", self.getTokenName(t), cfg.toString(self.recog, True))
for trans in cfg.state.transitions: # for each transition
target = self.getReachableTarget(trans, t)
if target is not None:
lexerActionExecutor = cfg.lexerActionExecutor
if lexerActionExecutor is not None:
lexerActionExecutor = lexerActionExecutor.fixOffsetBeforeMatch(input.index - self.startIndex)
treatEofAsEpsilon = (t == Token.EOF)
config = LexerATNConfig(state=target, lexerActionExecutor=lexerActionExecutor, config=cfg)
if self.closure(input, config, reach, currentAltReachedAcceptState, True, treatEofAsEpsilon):
# any remaining configs for this alt have a lower priority than
# the one that just reached an accept state.
skipAlt = cfg.alt
def accept(self, input:InputStream, lexerActionExecutor:LexerActionExecutor, startIndex:int, index:int, line:int, charPos:int):
if self.debug:
print("ACTION %s\n", lexerActionExecutor)
# seek to after last char in token
input.seek(index)
self.line = line
self.column = charPos
if lexerActionExecutor is not None and self.recog is not None:
lexerActionExecutor.execute(self.recog, input, startIndex)
def getReachableTarget(self, trans:Transition, t:int):
if trans.matches(t, 0, 0xFFFE):
return trans.target
else:
return None
def computeStartState(self, input:InputStream, p:ATNState):
initialContext = PredictionContext.EMPTY
configs = OrderedATNConfigSet()
for i in range(0,len(p.transitions)):
target = p.transitions[i].target
c = LexerATNConfig(state=target, alt=i+1, context=initialContext)
self.closure(input, c, configs, False, False, False)
return configs
# Since the alternatives within any lexer decision are ordered by
# preference, this method stops pursuing the closure as soon as an accept
# state is reached. After the first accept state is reached by depth-first
# search from {@code config}, all other (potentially reachable) states for
# this rule would have a lower priority.
#
# @return {@code true} if an accept state is reached, otherwise
# {@code false}.
def closure(self, input:InputStream, config:LexerATNConfig, configs:ATNConfigSet, currentAltReachedAcceptState:bool,
speculative:bool, treatEofAsEpsilon:bool):
if self.debug:
print("closure("+config.toString(self.recog, True)+")")
if isinstance( config.state, RuleStopState ):
if self.debug:
if self.recog is not None:
print("closure at %s rule stop %s\n", self.recog.getRuleNames()[config.state.ruleIndex], config)
else:
print("closure at rule stop %s\n", config)
if config.context is None or config.context.hasEmptyPath():
if config.context is None or config.context.isEmpty():
configs.add(config)
return True
else:
configs.add(LexerATNConfig(state=config.state, config=config, context=PredictionContext.EMPTY))
currentAltReachedAcceptState = True
if config.context is not None and not config.context.isEmpty():
for i in range(0,len(config.context)):
if config.context.getReturnState(i) != PredictionContext.EMPTY_RETURN_STATE:
newContext = config.context.getParent(i) # "pop" return state
returnState = self.atn.states[config.context.getReturnState(i)]
c = LexerATNConfig(state=returnState, config=config, context=newContext)
currentAltReachedAcceptState = self.closure(input, c, configs,
currentAltReachedAcceptState, speculative, treatEofAsEpsilon)
return currentAltReachedAcceptState
# optimization
if not config.state.epsilonOnlyTransitions:
if not currentAltReachedAcceptState or not config.passedThroughNonGreedyDecision:
configs.add(config)
for t in config.state.transitions:
c = self.getEpsilonTarget(input, config, t, configs, speculative, treatEofAsEpsilon)
if c is not None:
currentAltReachedAcceptState = self.closure(input, c, configs, currentAltReachedAcceptState, speculative, treatEofAsEpsilon)
return currentAltReachedAcceptState
# side-effect: can alter configs.hasSemanticContext
def getEpsilonTarget(self, input:InputStream, config:LexerATNConfig, t:Transition, configs:ATNConfigSet,
speculative:bool, treatEofAsEpsilon:bool):
c = None
if t.serializationType==Transition.RULE:
newContext = SingletonPredictionContext.create(config.context, t.followState.stateNumber)
c = LexerATNConfig(state=t.target, config=config, context=newContext)
elif t.serializationType==Transition.PRECEDENCE:
raise UnsupportedOperationException("Precedence predicates are not supported in lexers.")
elif t.serializationType==Transition.PREDICATE:
# Track traversing semantic predicates. If we traverse,
# we cannot add a DFA state for this "reach" computation
# because the DFA would not test the predicate again in the
# future. Rather than creating collections of semantic predicates
# like v3 and testing them on prediction, v4 will test them on the
# fly all the time using the ATN not the DFA. This is slower but
# semantically it's not used that often. One of the key elements to
# this predicate mechanism is not adding DFA states that see
# predicates immediately afterwards in the ATN. For example,
# a : ID {p1}? | ID {p2}? ;
# should create the start state for rule 'a' (to save start state
# competition), but should not create target of ID state. The
# collection of ATN states the following ID references includes
# states reached by traversing predicates. Since this is when we
# test them, we cannot cash the DFA state target of ID.
if self.debug:
print("EVAL rule "+ str(t.ruleIndex) + ":" + str(t.predIndex))
configs.hasSemanticContext = True
if self.evaluatePredicate(input, t.ruleIndex, t.predIndex, speculative):
c = LexerATNConfig(state=t.target, config=config)
elif t.serializationType==Transition.ACTION:
if config.context is None or config.context.hasEmptyPath():
# execute actions anywhere in the start rule for a token.
#
# TODO: if the entry rule is invoked recursively, some
# actions may be executed during the recursive call. The
# problem can appear when hasEmptyPath() is true but
# isEmpty() is false. In this case, the config needs to be
# split into two contexts - one with just the empty path
# and another with everything but the empty path.
# Unfortunately, the current algorithm does not allow
# getEpsilonTarget to return two configurations, so
# additional modifications are needed before we can support
# the split operation.
lexerActionExecutor = LexerActionExecutor.append(config.lexerActionExecutor,
self.atn.lexerActions[t.actionIndex])
c = LexerATNConfig(state=t.target, config=config, lexerActionExecutor=lexerActionExecutor)
else:
# ignore actions in referenced rules
c = LexerATNConfig(state=t.target, config=config)
elif t.serializationType==Transition.EPSILON:
c = LexerATNConfig(state=t.target, config=config)
elif t.serializationType in [ Transition.ATOM, Transition.RANGE, Transition.SET ]:
if treatEofAsEpsilon:
if t.matches(Token.EOF, 0, 0xFFFF):
c = LexerATNConfig(state=t.target, config=config)
return c
# Evaluate a predicate specified in the lexer.
#
# <p>If {@code speculative} is {@code true}, this method was called before
# {@link #consume} for the matched character. This method should call
# {@link #consume} before evaluating the predicate to ensure position
# sensitive values, including {@link Lexer#getText}, {@link Lexer#getLine},
# and {@link Lexer#getcolumn}, properly reflect the current
# lexer state. This method should restore {@code input} and the simulator
# to the original state before returning (i.e. undo the actions made by the
# call to {@link #consume}.</p>
#
# @param input The input stream.
# @param ruleIndex The rule containing the predicate.
# @param predIndex The index of the predicate within the rule.
# @param speculative {@code true} if the current index in {@code input} is
# one character before the predicate's location.
#
# @return {@code true} if the specified predicate evaluates to
# {@code true}.
#/
def evaluatePredicate(self, input:InputStream, ruleIndex:int, predIndex:int, speculative:bool):
# assume true if no recognizer was provided
if self.recog is None:
return True
if not speculative:
return self.recog.sempred(None, ruleIndex, predIndex)
savedcolumn = self.column
savedLine = self.line
index = input.index
marker = input.mark()
try:
self.consume(input)
return self.recog.sempred(None, ruleIndex, predIndex)
finally:
self.column = savedcolumn
self.line = savedLine
input.seek(index)
input.release(marker)
def captureSimState(self, settings:SimState, input:InputStream, dfaState:DFAState):
settings.index = input.index
settings.line = self.line
settings.column = self.column
settings.dfaState = dfaState
def addDFAEdge(self, from_:DFAState, tk:int, to:DFAState=None, cfgs:ATNConfigSet=None) -> DFAState:
if to is None and cfgs is not None:
# leading to this call, ATNConfigSet.hasSemanticContext is used as a
# marker indicating dynamic predicate evaluation makes this edge
# dependent on the specific input sequence, so the static edge in the
# DFA should be omitted. The target DFAState is still created since
# execATN has the ability to resynchronize with the DFA state cache
# following the predicate evaluation step.
#
# TJP notes: next time through the DFA, we see a pred again and eval.
# If that gets us to a previously created (but dangling) DFA
# state, we can continue in pure DFA mode from there.
#/
suppressEdge = cfgs.hasSemanticContext
cfgs.hasSemanticContext = False
to = self.addDFAState(cfgs)
if suppressEdge:
return to
# add the edge
if tk < self.MIN_DFA_EDGE or tk > self.MAX_DFA_EDGE:
# Only track edges within the DFA bounds
return to
if self.debug:
print("EDGE " + str(from_) + " -> " + str(to) + " upon "+ chr(tk))
if from_.edges is None:
# make room for tokens 1..n and -1 masquerading as index 0
from_.edges = [ None ] * (self.MAX_DFA_EDGE - self.MIN_DFA_EDGE + 1)
from_.edges[tk - self.MIN_DFA_EDGE] = to # connect
return to
# Add a new DFA state if there isn't one with this set of
# configurations already. This method also detects the first
# configuration containing an ATN rule stop state. Later, when
# traversing the DFA, we will know which rule to accept.
def addDFAState(self, configs:ATNConfigSet) -> DFAState:
proposed = DFAState(configs=configs)
firstConfigWithRuleStopState = None
for c in configs:
if isinstance(c.state, RuleStopState):
firstConfigWithRuleStopState = c
break
if firstConfigWithRuleStopState is not None:
proposed.isAcceptState = True
proposed.lexerActionExecutor = firstConfigWithRuleStopState.lexerActionExecutor
proposed.prediction = self.atn.ruleToTokenType[firstConfigWithRuleStopState.state.ruleIndex]
dfa = self.decisionToDFA[self.mode]
existing = dfa.states.get(proposed, None)
if existing is not None:
return existing
newState = proposed
newState.stateNumber = len(dfa.states)
configs.setReadonly(True)
newState.configs = configs
dfa.states[newState] = newState
return newState
def getDFA(self, mode:int):
return self.decisionToDFA[mode]
# Get the text matched so far for the current token.
def getText(self, input:InputStream):
# index is first lookahead char, don't include.
return input.getText(self.startIndex, input.index-1)
def consume(self, input:InputStream):
curChar = input.LA(1)
if curChar==ord('\n'):
self.line += 1
self.column = 0
else:
self.column += 1
input.consume()
def getTokenName(self, t:int):
if t==-1:
return "EOF"
else:
return "'" + chr(t) + "'"
LexerATNSimulator.ERROR = DFAState(0x7FFFFFFF, ATNConfigSet())
del Lexer | sidhart/antlr4 | runtime/Python3/src/antlr4/atn/LexerATNSimulator.py | Python | bsd-3-clause | 26,465 |
import pytest
from pandas import interval_range
import pandas._testing as tm
@pytest.mark.parametrize(
"kwargs",
[
{"start": 0, "periods": 4},
{"start": 1, "periods": 5},
{"start": 5, "end": 10, "closed": "left"},
],
)
def test_interval_array_equal(kwargs):
arr = interval_range(**kwargs).values
tm.assert_interval_array_equal(arr, arr)
def test_interval_array_equal_closed_mismatch():
kwargs = {"start": 0, "periods": 5}
arr1 = interval_range(closed="left", **kwargs).values
arr2 = interval_range(closed="right", **kwargs).values
msg = """\
IntervalArray are different
Attribute "closed" are different
\\[left\\]: left
\\[right\\]: right"""
with pytest.raises(AssertionError, match=msg):
tm.assert_interval_array_equal(arr1, arr2)
def test_interval_array_equal_periods_mismatch():
kwargs = {"start": 0}
arr1 = interval_range(periods=5, **kwargs).values
arr2 = interval_range(periods=6, **kwargs).values
msg = """\
IntervalArray.left are different
IntervalArray.left shapes are different
\\[left\\]: \\(5,\\)
\\[right\\]: \\(6,\\)"""
with pytest.raises(AssertionError, match=msg):
tm.assert_interval_array_equal(arr1, arr2)
def test_interval_array_equal_end_mismatch():
kwargs = {"start": 0, "periods": 5}
arr1 = interval_range(end=10, **kwargs).values
arr2 = interval_range(end=20, **kwargs).values
msg = """\
IntervalArray.left are different
IntervalArray.left values are different \\(80.0 %\\)
\\[left\\]: \\[0, 2, 4, 6, 8\\]
\\[right\\]: \\[0, 4, 8, 12, 16\\]"""
with pytest.raises(AssertionError, match=msg):
tm.assert_interval_array_equal(arr1, arr2)
def test_interval_array_equal_start_mismatch():
kwargs = {"periods": 4}
arr1 = interval_range(start=0, **kwargs).values
arr2 = interval_range(start=1, **kwargs).values
msg = """\
IntervalArray.left are different
IntervalArray.left values are different \\(100.0 %\\)
\\[left\\]: \\[0, 1, 2, 3\\]
\\[right\\]: \\[1, 2, 3, 4\\]"""
with pytest.raises(AssertionError, match=msg):
tm.assert_interval_array_equal(arr1, arr2)
| dsm054/pandas | pandas/tests/util/test_assert_interval_array_equal.py | Python | bsd-3-clause | 2,158 |
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os.path
import subprocess
import sys
_enable_style_format = None
_clang_format_command_path = None
_gn_command_path = None
def init(root_src_dir, enable_style_format=True):
assert isinstance(root_src_dir, str)
assert isinstance(enable_style_format, bool)
global _enable_style_format
global _clang_format_command_path
global _gn_command_path
assert _enable_style_format is None
assert _clang_format_command_path is None
assert _gn_command_path is None
_enable_style_format = enable_style_format
root_src_dir = os.path.abspath(root_src_dir)
# Determine //buildtools/<platform>/ directory
if sys.platform.startswith("linux"):
platform = "linux64"
exe_suffix = ""
elif sys.platform.startswith("darwin"):
platform = "mac"
exe_suffix = ""
elif sys.platform.startswith(("cygwin", "win")):
platform = "win"
exe_suffix = ".exe"
else:
assert False, "Unknown platform: {}".format(sys.platform)
buildtools_platform_dir = os.path.join(root_src_dir, "buildtools",
platform)
# //buildtools/<platform>/clang-format
_clang_format_command_path = os.path.join(
buildtools_platform_dir, "clang-format{}".format(exe_suffix))
# //buildtools/<platform>/gn
_gn_command_path = os.path.join(buildtools_platform_dir,
"gn{}".format(exe_suffix))
def auto_format(contents, filename):
assert isinstance(filename, str)
_, ext = os.path.splitext(filename)
if ext in (".gn", ".gni"):
return gn_format(contents, filename)
return clang_format(contents, filename)
def clang_format(contents, filename=None):
command_line = [_clang_format_command_path]
if filename is not None:
command_line.append('-assume-filename={}'.format(filename))
return _invoke_format_command(command_line, filename, contents)
def gn_format(contents, filename=None):
command_line = [_gn_command_path, "format", "--stdin"]
if filename is not None:
command_line.append('-assume-filename={}'.format(filename))
return _invoke_format_command(command_line, filename, contents)
def _invoke_format_command(command_line, filename, contents):
if not _enable_style_format:
return StyleFormatResult(stdout_output=contents,
stderr_output="",
exit_code=0,
filename=filename)
kwargs = {}
if sys.version_info.major != 2:
kwargs['encoding'] = 'utf-8'
proc = subprocess.Popen(command_line,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
**kwargs)
stdout_output, stderr_output = proc.communicate(input=contents)
exit_code = proc.wait()
return StyleFormatResult(
stdout_output=stdout_output,
stderr_output=stderr_output,
exit_code=exit_code,
filename=filename)
class StyleFormatResult(object):
def __init__(self, stdout_output, stderr_output, exit_code, filename):
self._stdout_output = stdout_output
self._stderr_output = stderr_output
self._exit_code = exit_code
self._filename = filename
@property
def did_succeed(self):
return self._exit_code == 0
@property
def contents(self):
assert self.did_succeed
return self._stdout_output
@property
def error_message(self):
return self._stderr_output
@property
def filename(self):
return self._filename
| scheib/chromium | third_party/blink/renderer/bindings/scripts/bind_gen/style_format.py | Python | bsd-3-clause | 3,817 |
# Copyright (c) 2011 The Chromium OS Authors.
#
# SPDX-License-Identifier: GPL-2.0+
#
import command
import re
import os
import series
import subprocess
import sys
import terminal
import checkpatch
import settings
def CountCommitsToBranch():
"""Returns number of commits between HEAD and the tracking branch.
This looks back to the tracking branch and works out the number of commits
since then.
Return:
Number of patches that exist on top of the branch
"""
pipe = [['git', 'log', '--no-color', '--oneline', '--no-decorate',
'@{upstream}..'],
['wc', '-l']]
stdout = command.RunPipe(pipe, capture=True, oneline=True).stdout
patch_count = int(stdout)
return patch_count
def GetUpstream(git_dir, branch):
"""Returns the name of the upstream for a branch
Args:
git_dir: Git directory containing repo
branch: Name of branch
Returns:
Name of upstream branch (e.g. 'upstream/master') or None if none
"""
try:
remote = command.OutputOneLine('git', '--git-dir', git_dir, 'config',
'branch.%s.remote' % branch)
merge = command.OutputOneLine('git', '--git-dir', git_dir, 'config',
'branch.%s.merge' % branch)
except:
return None
if remote == '.':
return merge
elif remote and merge:
leaf = merge.split('/')[-1]
return '%s/%s' % (remote, leaf)
else:
raise ValueError, ("Cannot determine upstream branch for branch "
"'%s' remote='%s', merge='%s'" % (branch, remote, merge))
def GetRangeInBranch(git_dir, branch, include_upstream=False):
"""Returns an expression for the commits in the given branch.
Args:
git_dir: Directory containing git repo
branch: Name of branch
Return:
Expression in the form 'upstream..branch' which can be used to
access the commits. If the branch does not exist, returns None.
"""
upstream = GetUpstream(git_dir, branch)
if not upstream:
return None
return '%s%s..%s' % (upstream, '~' if include_upstream else '', branch)
def CountCommitsInBranch(git_dir, branch, include_upstream=False):
"""Returns the number of commits in the given branch.
Args:
git_dir: Directory containing git repo
branch: Name of branch
Return:
Number of patches that exist on top of the branch, or None if the
branch does not exist.
"""
range_expr = GetRangeInBranch(git_dir, branch, include_upstream)
if not range_expr:
return None
pipe = [['git', '--git-dir', git_dir, 'log', '--oneline', '--no-decorate',
range_expr],
['wc', '-l']]
result = command.RunPipe(pipe, capture=True, oneline=True)
patch_count = int(result.stdout)
return patch_count
def CountCommits(commit_range):
"""Returns the number of commits in the given range.
Args:
commit_range: Range of commits to count (e.g. 'HEAD..base')
Return:
Number of patches that exist on top of the branch
"""
pipe = [['git', 'log', '--oneline', '--no-decorate', commit_range],
['wc', '-l']]
stdout = command.RunPipe(pipe, capture=True, oneline=True).stdout
patch_count = int(stdout)
return patch_count
def Checkout(commit_hash, git_dir=None, work_tree=None, force=False):
"""Checkout the selected commit for this build
Args:
commit_hash: Commit hash to check out
"""
pipe = ['git']
if git_dir:
pipe.extend(['--git-dir', git_dir])
if work_tree:
pipe.extend(['--work-tree', work_tree])
pipe.append('checkout')
if force:
pipe.append('-f')
pipe.append(commit_hash)
result = command.RunPipe([pipe], capture=True, raise_on_error=False)
if result.return_code != 0:
raise OSError, 'git checkout (%s): %s' % (pipe, result.stderr)
def Clone(git_dir, output_dir):
"""Checkout the selected commit for this build
Args:
commit_hash: Commit hash to check out
"""
pipe = ['git', 'clone', git_dir, '.']
result = command.RunPipe([pipe], capture=True, cwd=output_dir)
if result.return_code != 0:
raise OSError, 'git clone: %s' % result.stderr
def Fetch(git_dir=None, work_tree=None):
"""Fetch from the origin repo
Args:
commit_hash: Commit hash to check out
"""
pipe = ['git']
if git_dir:
pipe.extend(['--git-dir', git_dir])
if work_tree:
pipe.extend(['--work-tree', work_tree])
pipe.append('fetch')
result = command.RunPipe([pipe], capture=True)
if result.return_code != 0:
raise OSError, 'git fetch: %s' % result.stderr
def CreatePatches(start, count, series):
"""Create a series of patches from the top of the current branch.
The patch files are written to the current directory using
git format-patch.
Args:
start: Commit to start from: 0=HEAD, 1=next one, etc.
count: number of commits to include
Return:
Filename of cover letter
List of filenames of patch files
"""
if series.get('version'):
version = '%s ' % series['version']
cmd = ['git', 'format-patch', '-M', '--signoff']
if series.get('cover'):
cmd.append('--cover-letter')
prefix = series.GetPatchPrefix()
if prefix:
cmd += ['--subject-prefix=%s' % prefix]
cmd += ['HEAD~%d..HEAD~%d' % (start + count, start)]
stdout = command.RunList(cmd)
files = stdout.splitlines()
# We have an extra file if there is a cover letter
if series.get('cover'):
return files[0], files[1:]
else:
return None, files
def ApplyPatch(verbose, fname):
"""Apply a patch with git am to test it
TODO: Convert these to use command, with stderr option
Args:
fname: filename of patch file to apply
"""
col = terminal.Color()
cmd = ['git', 'am', fname]
pipe = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = pipe.communicate()
re_error = re.compile('^error: patch failed: (.+):(\d+)')
for line in stderr.splitlines():
if verbose:
print line
match = re_error.match(line)
if match:
print checkpatch.GetWarningMsg(col, 'warning', match.group(1),
int(match.group(2)), 'Patch failed')
return pipe.returncode == 0, stdout
def ApplyPatches(verbose, args, start_point):
"""Apply the patches with git am to make sure all is well
Args:
verbose: Print out 'git am' output verbatim
args: List of patch files to apply
start_point: Number of commits back from HEAD to start applying.
Normally this is len(args), but it can be larger if a start
offset was given.
"""
error_count = 0
col = terminal.Color()
# Figure out our current position
cmd = ['git', 'name-rev', 'HEAD', '--name-only']
pipe = subprocess.Popen(cmd, stdout=subprocess.PIPE)
stdout, stderr = pipe.communicate()
if pipe.returncode:
str = 'Could not find current commit name'
print col.Color(col.RED, str)
print stdout
return False
old_head = stdout.splitlines()[0]
# Checkout the required start point
cmd = ['git', 'checkout', 'HEAD~%d' % start_point]
pipe = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = pipe.communicate()
if pipe.returncode:
str = 'Could not move to commit before patch series'
print col.Color(col.RED, str)
print stdout, stderr
return False
# Apply all the patches
for fname in args:
ok, stdout = ApplyPatch(verbose, fname)
if not ok:
print col.Color(col.RED, 'git am returned errors for %s: will '
'skip this patch' % fname)
if verbose:
print stdout
error_count += 1
cmd = ['git', 'am', '--skip']
pipe = subprocess.Popen(cmd, stdout=subprocess.PIPE)
stdout, stderr = pipe.communicate()
if pipe.returncode != 0:
print col.Color(col.RED, 'Unable to skip patch! Aborting...')
print stdout
break
# Return to our previous position
cmd = ['git', 'checkout', old_head]
pipe = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = pipe.communicate()
if pipe.returncode:
print col.Color(col.RED, 'Could not move back to head commit')
print stdout, stderr
return error_count == 0
def BuildEmailList(in_list, tag=None, alias=None, raise_on_error=True):
"""Build a list of email addresses based on an input list.
Takes a list of email addresses and aliases, and turns this into a list
of only email address, by resolving any aliases that are present.
If the tag is given, then each email address is prepended with this
tag and a space. If the tag starts with a minus sign (indicating a
command line parameter) then the email address is quoted.
Args:
in_list: List of aliases/email addresses
tag: Text to put before each address
alias: Alias dictionary
raise_on_error: True to raise an error when an alias fails to match,
False to just print a message.
Returns:
List of email addresses
>>> alias = {}
>>> alias['fred'] = ['f.bloggs@napier.co.nz']
>>> alias['john'] = ['j.bloggs@napier.co.nz']
>>> alias['mary'] = ['Mary Poppins <m.poppins@cloud.net>']
>>> alias['boys'] = ['fred', ' john']
>>> alias['all'] = ['fred ', 'john', ' mary ']
>>> BuildEmailList(['john', 'mary'], None, alias)
['j.bloggs@napier.co.nz', 'Mary Poppins <m.poppins@cloud.net>']
>>> BuildEmailList(['john', 'mary'], '--to', alias)
['--to "j.bloggs@napier.co.nz"', \
'--to "Mary Poppins <m.poppins@cloud.net>"']
>>> BuildEmailList(['john', 'mary'], 'Cc', alias)
['Cc j.bloggs@napier.co.nz', 'Cc Mary Poppins <m.poppins@cloud.net>']
"""
quote = '"' if tag and tag[0] == '-' else ''
raw = []
for item in in_list:
raw += LookupEmail(item, alias, raise_on_error=raise_on_error)
result = []
for item in raw:
if not item in result:
result.append(item)
if tag:
return ['%s %s%s%s' % (tag, quote, email, quote) for email in result]
return result
def EmailPatches(series, cover_fname, args, dry_run, raise_on_error, cc_fname,
self_only=False, alias=None, in_reply_to=None):
"""Email a patch series.
Args:
series: Series object containing destination info
cover_fname: filename of cover letter
args: list of filenames of patch files
dry_run: Just return the command that would be run
raise_on_error: True to raise an error when an alias fails to match,
False to just print a message.
cc_fname: Filename of Cc file for per-commit Cc
self_only: True to just email to yourself as a test
in_reply_to: If set we'll pass this to git as --in-reply-to.
Should be a message ID that this is in reply to.
Returns:
Git command that was/would be run
# For the duration of this doctest pretend that we ran patman with ./patman
>>> _old_argv0 = sys.argv[0]
>>> sys.argv[0] = './patman'
>>> alias = {}
>>> alias['fred'] = ['f.bloggs@napier.co.nz']
>>> alias['john'] = ['j.bloggs@napier.co.nz']
>>> alias['mary'] = ['m.poppins@cloud.net']
>>> alias['boys'] = ['fred', ' john']
>>> alias['all'] = ['fred ', 'john', ' mary ']
>>> alias[os.getenv('USER')] = ['this-is-me@me.com']
>>> series = series.Series()
>>> series.to = ['fred']
>>> series.cc = ['mary']
>>> EmailPatches(series, 'cover', ['p1', 'p2'], True, True, 'cc-fname', \
False, alias)
'git send-email --annotate --to "f.bloggs@napier.co.nz" --cc \
"m.poppins@cloud.net" --cc-cmd "./patman --cc-cmd cc-fname" cover p1 p2'
>>> EmailPatches(series, None, ['p1'], True, True, 'cc-fname', False, \
alias)
'git send-email --annotate --to "f.bloggs@napier.co.nz" --cc \
"m.poppins@cloud.net" --cc-cmd "./patman --cc-cmd cc-fname" p1'
>>> series.cc = ['all']
>>> EmailPatches(series, 'cover', ['p1', 'p2'], True, True, 'cc-fname', \
True, alias)
'git send-email --annotate --to "this-is-me@me.com" --cc-cmd "./patman \
--cc-cmd cc-fname" cover p1 p2'
>>> EmailPatches(series, 'cover', ['p1', 'p2'], True, True, 'cc-fname', \
False, alias)
'git send-email --annotate --to "f.bloggs@napier.co.nz" --cc \
"f.bloggs@napier.co.nz" --cc "j.bloggs@napier.co.nz" --cc \
"m.poppins@cloud.net" --cc-cmd "./patman --cc-cmd cc-fname" cover p1 p2'
# Restore argv[0] since we clobbered it.
>>> sys.argv[0] = _old_argv0
"""
to = BuildEmailList(series.get('to'), '--to', alias, raise_on_error)
if not to:
print ("No recipient, please add something like this to a commit\n"
"Series-to: Fred Bloggs <f.blogs@napier.co.nz>")
return
cc = BuildEmailList(series.get('cc'), '--cc', alias, raise_on_error)
if self_only:
to = BuildEmailList([os.getenv('USER')], '--to', alias, raise_on_error)
cc = []
cmd = ['git', 'send-email', '--annotate']
if in_reply_to:
cmd.append('--in-reply-to="%s"' % in_reply_to)
cmd += to
cmd += cc
cmd += ['--cc-cmd', '"%s --cc-cmd %s"' % (sys.argv[0], cc_fname)]
if cover_fname:
cmd.append(cover_fname)
cmd += args
str = ' '.join(cmd)
if not dry_run:
os.system(str)
return str
def LookupEmail(lookup_name, alias=None, raise_on_error=True, level=0):
"""If an email address is an alias, look it up and return the full name
TODO: Why not just use git's own alias feature?
Args:
lookup_name: Alias or email address to look up
alias: Dictionary containing aliases (None to use settings default)
raise_on_error: True to raise an error when an alias fails to match,
False to just print a message.
Returns:
tuple:
list containing a list of email addresses
Raises:
OSError if a recursive alias reference was found
ValueError if an alias was not found
>>> alias = {}
>>> alias['fred'] = ['f.bloggs@napier.co.nz']
>>> alias['john'] = ['j.bloggs@napier.co.nz']
>>> alias['mary'] = ['m.poppins@cloud.net']
>>> alias['boys'] = ['fred', ' john', 'f.bloggs@napier.co.nz']
>>> alias['all'] = ['fred ', 'john', ' mary ']
>>> alias['loop'] = ['other', 'john', ' mary ']
>>> alias['other'] = ['loop', 'john', ' mary ']
>>> LookupEmail('mary', alias)
['m.poppins@cloud.net']
>>> LookupEmail('arthur.wellesley@howe.ro.uk', alias)
['arthur.wellesley@howe.ro.uk']
>>> LookupEmail('boys', alias)
['f.bloggs@napier.co.nz', 'j.bloggs@napier.co.nz']
>>> LookupEmail('all', alias)
['f.bloggs@napier.co.nz', 'j.bloggs@napier.co.nz', 'm.poppins@cloud.net']
>>> LookupEmail('odd', alias)
Traceback (most recent call last):
...
ValueError: Alias 'odd' not found
>>> LookupEmail('loop', alias)
Traceback (most recent call last):
...
OSError: Recursive email alias at 'other'
>>> LookupEmail('odd', alias, raise_on_error=False)
\033[1;31mAlias 'odd' not found\033[0m
[]
>>> # In this case the loop part will effectively be ignored.
>>> LookupEmail('loop', alias, raise_on_error=False)
\033[1;31mRecursive email alias at 'other'\033[0m
\033[1;31mRecursive email alias at 'john'\033[0m
\033[1;31mRecursive email alias at 'mary'\033[0m
['j.bloggs@napier.co.nz', 'm.poppins@cloud.net']
"""
if not alias:
alias = settings.alias
lookup_name = lookup_name.strip()
if '@' in lookup_name: # Perhaps a real email address
return [lookup_name]
lookup_name = lookup_name.lower()
col = terminal.Color()
out_list = []
if level > 10:
msg = "Recursive email alias at '%s'" % lookup_name
if raise_on_error:
raise OSError, msg
else:
print col.Color(col.RED, msg)
return out_list
if lookup_name:
if not lookup_name in alias:
msg = "Alias '%s' not found" % lookup_name
if raise_on_error:
raise ValueError, msg
else:
print col.Color(col.RED, msg)
return out_list
for item in alias[lookup_name]:
todo = LookupEmail(item, alias, raise_on_error, level + 1)
for new_item in todo:
if not new_item in out_list:
out_list.append(new_item)
#print "No match for alias '%s'" % lookup_name
return out_list
def GetTopLevel():
"""Return name of top-level directory for this git repo.
Returns:
Full path to git top-level directory
This test makes sure that we are running tests in the right subdir
>>> os.path.realpath(os.path.dirname(__file__)) == \
os.path.join(GetTopLevel(), 'tools', 'patman')
True
"""
return command.OutputOneLine('git', 'rev-parse', '--show-toplevel')
def GetAliasFile():
"""Gets the name of the git alias file.
Returns:
Filename of git alias file, or None if none
"""
fname = command.OutputOneLine('git', 'config', 'sendemail.aliasesfile',
raise_on_error=False)
if fname:
fname = os.path.join(GetTopLevel(), fname.strip())
return fname
def GetDefaultUserName():
"""Gets the user.name from .gitconfig file.
Returns:
User name found in .gitconfig file, or None if none
"""
uname = command.OutputOneLine('git', 'config', '--global', 'user.name')
return uname
def GetDefaultUserEmail():
"""Gets the user.email from the global .gitconfig file.
Returns:
User's email found in .gitconfig file, or None if none
"""
uemail = command.OutputOneLine('git', 'config', '--global', 'user.email')
return uemail
def Setup():
"""Set up git utils, by reading the alias files."""
# Check for a git alias file also
alias_fname = GetAliasFile()
if alias_fname:
settings.ReadGitAliases(alias_fname)
def GetHead():
"""Get the hash of the current HEAD
Returns:
Hash of HEAD
"""
return command.OutputOneLine('git', 'show', '-s', '--pretty=format:%H')
if __name__ == "__main__":
import doctest
doctest.testmod()
| lkylei/ten_thousand | roms/u-boot/tools/patman/gitutil.py | Python | gpl-2.0 | 18,813 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Adam Miller (maxamillion@fedoraproject.org)
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: firewalld
short_description: Manage arbitrary ports/services with firewalld
description:
- This module allows for addition or deletion of services and ports either tcp or udp in either running or permanent firewalld rules.
version_added: "1.4"
options:
service:
description:
- "Name of a service to add/remove to/from firewalld - service must be listed in /etc/services."
required: false
default: null
port:
description:
- "Name of a port or port range to add/remove to/from firewalld. Must be in the form PORT/PROTOCOL or PORT-PORT/PROTOCOL for port ranges."
required: false
default: null
rich_rule:
description:
- "Rich rule to add/remove to/from firewalld."
required: false
default: null
source:
description:
- 'The source/network you would like to add/remove to/from firewalld'
required: false
default: null
version_added: "2.0"
interface:
description:
- 'The interface you would like to add/remove to/from a zone in firewalld'
required: false
default: null
version_added: "2.1"
zone:
description:
- 'The firewalld zone to add/remove to/from (NOTE: default zone can be configured per system but "public" is default from upstream. Available choices can be extended based on per-system configs, listed here are "out of the box" defaults).'
required: false
default: system-default(public)
choices: [ "work", "drop", "internal", "external", "trusted", "home", "dmz", "public", "block" ]
permanent:
description:
- "Should this configuration be in the running firewalld configuration or persist across reboots."
required: false
default: null
immediate:
description:
- "Should this configuration be applied immediately, if set as permanent"
required: false
default: false
version_added: "1.9"
state:
description:
- "Should this port accept(enabled) or reject(disabled) connections."
required: true
choices: [ "enabled", "disabled" ]
timeout:
description:
- "The amount of time the rule should be in effect for when non-permanent."
required: false
default: 0
masquerade:
description:
- 'The masquerade setting you would like to enable/disable to/from zones within firewalld'
required: false
default: null
version_added: "2.1"
notes:
- Not tested on any Debian based system.
- Requires the python2 bindings of firewalld, who may not be installed by default if the distribution switched to python 3
requirements: [ 'firewalld >= 0.2.11' ]
author: "Adam Miller (@maxamillion)"
'''
EXAMPLES = '''
- firewalld: service=https permanent=true state=enabled
- firewalld: port=8081/tcp permanent=true state=disabled
- firewalld: port=161-162/udp permanent=true state=enabled
- firewalld: zone=dmz service=http permanent=true state=enabled
- firewalld: rich_rule='rule service name="ftp" audit limit value="1/m" accept' permanent=true state=enabled
- firewalld: source='192.168.1.0/24' zone=internal state=enabled
- firewalld: zone=trusted interface=eth2 permanent=true state=enabled
- firewalld: masquerade=yes state=enabled permanent=true zone=dmz
'''
import os
import re
try:
import firewall.config
FW_VERSION = firewall.config.VERSION
from firewall.client import Rich_Rule
from firewall.client import FirewallClient
fw = FirewallClient()
if not fw.connected:
HAS_FIREWALLD = False
else:
HAS_FIREWALLD = True
except ImportError:
HAS_FIREWALLD = False
#####################
# masquerade handling
#
def get_masquerade_enabled(zone):
if fw.queryMasquerade(zone) == True:
return True
else:
return False
def get_masquerade_enabled_permanent(zone):
fw_zone = fw.config().getZoneByName(zone)
fw_settings = fw_zone.getSettings()
if fw_settings.getMasquerade() == True:
return True
else:
return False
def set_masquerade_enabled(zone):
fw.addMasquerade(zone)
def set_masquerade_disabled(zone):
fw.removeMasquerade(zone)
def set_masquerade_permanent(zone, masquerade):
fw_zone = fw.config().getZoneByName(zone)
fw_settings = fw_zone.getSettings()
fw_settings.setMasquerade(masquerade)
fw_zone.update(fw_settings)
################
# port handling
#
def get_port_enabled(zone, port_proto):
if port_proto in fw.getPorts(zone):
return True
else:
return False
def set_port_enabled(zone, port, protocol, timeout):
fw.addPort(zone, port, protocol, timeout)
def set_port_disabled(zone, port, protocol):
fw.removePort(zone, port, protocol)
def get_port_enabled_permanent(zone, port_proto):
fw_zone = fw.config().getZoneByName(zone)
fw_settings = fw_zone.getSettings()
if tuple(port_proto) in fw_settings.getPorts():
return True
else:
return False
def set_port_enabled_permanent(zone, port, protocol):
fw_zone = fw.config().getZoneByName(zone)
fw_settings = fw_zone.getSettings()
fw_settings.addPort(port, protocol)
fw_zone.update(fw_settings)
def set_port_disabled_permanent(zone, port, protocol):
fw_zone = fw.config().getZoneByName(zone)
fw_settings = fw_zone.getSettings()
fw_settings.removePort(port, protocol)
fw_zone.update(fw_settings)
####################
# source handling
#
def get_source(zone, source):
fw_zone = fw.config().getZoneByName(zone)
fw_settings = fw_zone.getSettings()
if source in fw_settings.getSources():
return True
else:
return False
def add_source(zone, source):
fw_zone = fw.config().getZoneByName(zone)
fw_settings = fw_zone.getSettings()
fw_settings.addSource(source)
fw_zone.update(fw_settings)
def remove_source(zone, source):
fw_zone = fw.config().getZoneByName(zone)
fw_settings = fw_zone.getSettings()
fw_settings.removeSource(source)
fw_zone.update(fw_settings)
####################
# interface handling
#
def get_interface(zone, interface):
fw_zone = fw.config().getZoneByName(zone)
fw_settings = fw_zone.getSettings()
if interface in fw_settings.getInterfaces():
return True
else:
return False
def add_interface(zone, interface):
fw_zone = fw.config().getZoneByName(zone)
fw_settings = fw_zone.getSettings()
fw_settings.addInterface(interface)
fw_zone.update(fw_settings)
def remove_interface(zone, interface):
fw_zone = fw.config().getZoneByName(zone)
fw_settings = fw_zone.getSettings()
fw_settings.removeInterface(interface)
fw_zone.update(fw_settings)
####################
# service handling
#
def get_service_enabled(zone, service):
if service in fw.getServices(zone):
return True
else:
return False
def set_service_enabled(zone, service, timeout):
fw.addService(zone, service, timeout)
def set_service_disabled(zone, service):
fw.removeService(zone, service)
def get_service_enabled_permanent(zone, service):
fw_zone = fw.config().getZoneByName(zone)
fw_settings = fw_zone.getSettings()
if service in fw_settings.getServices():
return True
else:
return False
def set_service_enabled_permanent(zone, service):
fw_zone = fw.config().getZoneByName(zone)
fw_settings = fw_zone.getSettings()
fw_settings.addService(service)
fw_zone.update(fw_settings)
def set_service_disabled_permanent(zone, service):
fw_zone = fw.config().getZoneByName(zone)
fw_settings = fw_zone.getSettings()
fw_settings.removeService(service)
fw_zone.update(fw_settings)
####################
# rich rule handling
#
def get_rich_rule_enabled(zone, rule):
# Convert the rule string to standard format
# before checking whether it is present
rule = str(Rich_Rule(rule_str=rule))
if rule in fw.getRichRules(zone):
return True
else:
return False
def set_rich_rule_enabled(zone, rule, timeout):
fw.addRichRule(zone, rule, timeout)
def set_rich_rule_disabled(zone, rule):
fw.removeRichRule(zone, rule)
def get_rich_rule_enabled_permanent(zone, rule):
fw_zone = fw.config().getZoneByName(zone)
fw_settings = fw_zone.getSettings()
# Convert the rule string to standard format
# before checking whether it is present
rule = str(Rich_Rule(rule_str=rule))
if rule in fw_settings.getRichRules():
return True
else:
return False
def set_rich_rule_enabled_permanent(zone, rule):
fw_zone = fw.config().getZoneByName(zone)
fw_settings = fw_zone.getSettings()
fw_settings.addRichRule(rule)
fw_zone.update(fw_settings)
def set_rich_rule_disabled_permanent(zone, rule):
fw_zone = fw.config().getZoneByName(zone)
fw_settings = fw_zone.getSettings()
fw_settings.removeRichRule(rule)
fw_zone.update(fw_settings)
def main():
module = AnsibleModule(
argument_spec = dict(
service=dict(required=False,default=None),
port=dict(required=False,default=None),
rich_rule=dict(required=False,default=None),
zone=dict(required=False,default=None),
immediate=dict(type='bool',default=False),
source=dict(required=False,default=None),
permanent=dict(type='bool',required=False,default=None),
state=dict(choices=['enabled', 'disabled'], required=True),
timeout=dict(type='int',required=False,default=0),
interface=dict(required=False,default=None),
masquerade=dict(required=False,default=None),
),
supports_check_mode=True
)
if module.params['source'] == None and module.params['permanent'] == None:
module.fail_json(msg='permanent is a required parameter')
if module.params['interface'] != None and module.params['zone'] == None:
module.fail(msg='zone is a required parameter')
if not HAS_FIREWALLD:
module.fail_json(msg='firewalld and its python 2 module are required for this module')
## Pre-run version checking
if FW_VERSION < "0.2.11":
module.fail_json(msg='unsupported version of firewalld, requires >= 2.0.11')
## Global Vars
changed=False
msgs = []
service = module.params['service']
rich_rule = module.params['rich_rule']
source = module.params['source']
if module.params['port'] != None:
port, protocol = module.params['port'].split('/')
if protocol == None:
module.fail_json(msg='improper port format (missing protocol?)')
else:
port = None
if module.params['zone'] != None:
zone = module.params['zone']
else:
zone = fw.getDefaultZone()
permanent = module.params['permanent']
desired_state = module.params['state']
immediate = module.params['immediate']
timeout = module.params['timeout']
interface = module.params['interface']
masquerade = module.params['masquerade']
## Check for firewalld running
try:
if fw.connected == False:
module.fail_json(msg='firewalld service must be running')
except AttributeError:
module.fail_json(msg="firewalld connection can't be established,\
version likely too old. Requires firewalld >= 2.0.11")
modification_count = 0
if service != None:
modification_count += 1
if port != None:
modification_count += 1
if rich_rule != None:
modification_count += 1
if interface != None:
modification_count += 1
if masquerade != None:
modification_count += 1
if modification_count > 1:
module.fail_json(msg='can only operate on port, service, rich_rule or interface at once')
if service != None:
if permanent:
is_enabled = get_service_enabled_permanent(zone, service)
msgs.append('Permanent operation')
if desired_state == "enabled":
if is_enabled == False:
if module.check_mode:
module.exit_json(changed=True)
set_service_enabled_permanent(zone, service)
changed=True
elif desired_state == "disabled":
if is_enabled == True:
if module.check_mode:
module.exit_json(changed=True)
set_service_disabled_permanent(zone, service)
changed=True
if immediate or not permanent:
is_enabled = get_service_enabled(zone, service)
msgs.append('Non-permanent operation')
if desired_state == "enabled":
if is_enabled == False:
if module.check_mode:
module.exit_json(changed=True)
set_service_enabled(zone, service, timeout)
changed=True
elif desired_state == "disabled":
if is_enabled == True:
if module.check_mode:
module.exit_json(changed=True)
set_service_disabled(zone, service)
changed=True
if changed == True:
msgs.append("Changed service %s to %s" % (service, desired_state))
if source != None:
is_enabled = get_source(zone, source)
if desired_state == "enabled":
if is_enabled == False:
if module.check_mode:
module.exit_json(changed=True)
add_source(zone, source)
changed=True
msgs.append("Added %s to zone %s" % (source, zone))
elif desired_state == "disabled":
if is_enabled == True:
if module.check_mode:
module.exit_json(changed=True)
remove_source(zone, source)
changed=True
msgs.append("Removed %s from zone %s" % (source, zone))
if port != None:
if permanent:
is_enabled = get_port_enabled_permanent(zone, [port, protocol])
msgs.append('Permanent operation')
if desired_state == "enabled":
if is_enabled == False:
if module.check_mode:
module.exit_json(changed=True)
set_port_enabled_permanent(zone, port, protocol)
changed=True
elif desired_state == "disabled":
if is_enabled == True:
if module.check_mode:
module.exit_json(changed=True)
set_port_disabled_permanent(zone, port, protocol)
changed=True
if immediate or not permanent:
is_enabled = get_port_enabled(zone, [port,protocol])
msgs.append('Non-permanent operation')
if desired_state == "enabled":
if is_enabled == False:
if module.check_mode:
module.exit_json(changed=True)
set_port_enabled(zone, port, protocol, timeout)
changed=True
elif desired_state == "disabled":
if is_enabled == True:
if module.check_mode:
module.exit_json(changed=True)
set_port_disabled(zone, port, protocol)
changed=True
if changed == True:
msgs.append("Changed port %s to %s" % ("%s/%s" % (port, protocol), \
desired_state))
if rich_rule != None:
if permanent:
is_enabled = get_rich_rule_enabled_permanent(zone, rich_rule)
msgs.append('Permanent operation')
if desired_state == "enabled":
if is_enabled == False:
if module.check_mode:
module.exit_json(changed=True)
set_rich_rule_enabled_permanent(zone, rich_rule)
changed=True
elif desired_state == "disabled":
if is_enabled == True:
if module.check_mode:
module.exit_json(changed=True)
set_rich_rule_disabled_permanent(zone, rich_rule)
changed=True
if immediate or not permanent:
is_enabled = get_rich_rule_enabled(zone, rich_rule)
msgs.append('Non-permanent operation')
if desired_state == "enabled":
if is_enabled == False:
if module.check_mode:
module.exit_json(changed=True)
set_rich_rule_enabled(zone, rich_rule, timeout)
changed=True
elif desired_state == "disabled":
if is_enabled == True:
if module.check_mode:
module.exit_json(changed=True)
set_rich_rule_disabled(zone, rich_rule)
changed=True
if changed == True:
msgs.append("Changed rich_rule %s to %s" % (rich_rule, desired_state))
if interface != None:
is_enabled = get_interface(zone, interface)
if desired_state == "enabled":
if is_enabled == False:
if module.check_mode:
module.exit_json(changed=True)
add_interface(zone, interface)
changed=True
msgs.append("Added %s to zone %s" % (interface, zone))
elif desired_state == "disabled":
if is_enabled == True:
if module.check_mode:
module.exit_json(changed=True)
remove_interface(zone, interface)
changed=True
msgs.append("Removed %s from zone %s" % (interface, zone))
if masquerade != None:
if permanent:
is_enabled = get_masquerade_enabled_permanent(zone)
msgs.append('Permanent operation')
if desired_state == "enabled":
if is_enabled == False:
if module.check_mode:
module.exit_json(changed=True)
set_masquerade_permanent(zone, True)
changed=True
msgs.append("Added masquerade to zone %s" % (zone))
elif desired_state == "disabled":
if is_enabled == True:
if module.check_mode:
module.exit_json(changed=True)
set_masquerade_permanent(zone, False)
changed=True
msgs.append("Removed masquerade from zone %s" % (zone))
if immediate or not permanent:
is_enabled = get_masquerade_enabled(zone)
msgs.append('Non-permanent operation')
if desired_state == "enabled":
if is_enabled == False:
if module.check_mode:
module.exit_json(changed=True)
set_masquerade_enabled(zone)
changed=True
msgs.append("Added masquerade to zone %s" % (zone))
elif desired_state == "disabled":
if is_enabled == True:
if module.check_mode:
module.exit_json(changed=True)
set_masquerade_disabled(zone)
changed=True
msgs.append("Removed masquerade from zone %s" % (zone))
module.exit_json(changed=changed, msg=', '.join(msgs))
#################################################
# import module snippets
from ansible.module_utils.basic import *
main()
| alxgu/ansible-modules-extras | system/firewalld.py | Python | gpl-3.0 | 20,455 |
# This file is a part of MediaDrop (http://www.mediadrop.net),
# Copyright 2009-2015 MediaDrop contributors
# For the exact contribution history, see the git revision log.
# The source code contained in this file is licensed under the GPLv3 or
# (at your option) any later version.
# See LICENSE.txt in the main project directory, for more information.
"""add custom head tags
add setting for custom tags (HTML) in <head> section
added: 2012-02-13 (v0.10dev)
previously migrate script v054
Revision ID: 280565a54124
Revises: 4d27ff5680e5
Create Date: 2013-05-14 22:38:02.552230
"""
# revision identifiers, used by Alembic.
revision = '280565a54124'
down_revision = '4d27ff5680e5'
from alembic.op import execute, inline_literal
from sqlalchemy import Integer, Unicode, UnicodeText
from sqlalchemy import Column, MetaData, Table
# -- table definition ---------------------------------------------------------
metadata = MetaData()
settings = Table('settings', metadata,
Column('id', Integer, autoincrement=True, primary_key=True),
Column('key', Unicode(255), nullable=False, unique=True),
Column('value', UnicodeText),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
# -- helpers ------------------------------------------------------------------
def insert_setting(key, value):
execute(
settings.insert().\
values({
'key': inline_literal(key),
'value': inline_literal(value),
})
)
def delete_setting(key):
execute(
settings.delete().\
where(settings.c.key==inline_literal(key))
)
# -----------------------------------------------------------------------------
SETTINGS = [
(u'appearance_custom_head_tags', u''),
]
def upgrade():
for key, value in SETTINGS:
insert_setting(key, value)
def downgrade():
for key, value in SETTINGS:
delete_setting(key)
| rbu/mediadrop | mediadrop/migrations/versions/004-280565a54124-add_custom_head_tags.py | Python | gpl-3.0 | 1,908 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Types for specifying saving and loading behavior."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
class SaveSpec(object):
"""Class used to describe tensor slices that need to be saved."""
def __init__(self, tensor, slice_spec, name, dtype=None, device=None):
"""Creates a `SaveSpec` object.
Args:
tensor: the tensor to save or callable that produces a tensor to save.
If the value is `None`, the `SaveSpec` is ignored.
slice_spec: the slice to be saved. See `Variable.SaveSliceInfo`.
name: the name to save the tensor under.
dtype: The data type of the Tensor. Required if `tensor` is callable.
Used for error checking in the restore op.
device: The device generating and consuming this tensor. Required if
`tensor` is callable. Used to group objects to save by device.
"""
self._tensor = tensor
self.slice_spec = slice_spec
self.name = name
if callable(self._tensor):
if dtype is None or device is None:
raise AssertionError(
"When passing a callable `tensor` to a SaveSpec, an explicit "
"dtype and device must be provided.")
self.dtype = dtype
self.device = device
else:
self.dtype = tensor.dtype
if device is not None:
self.device = device
else:
self.device = tensor.device
@property
def tensor(self):
return self._tensor() if callable(self._tensor) else self._tensor
class SaveableObject(object):
"""Base class for saving and restoring saveable objects."""
def __init__(self, op, specs, name):
"""Creates a `SaveableObject` object.
Args:
op: the "producer" object that this class wraps; it produces a list of
tensors to save. E.g., a "Variable" object saving its backing tensor.
specs: a list of SaveSpec, each element of which describes one tensor to
save under this object. All Tensors must be on the same device.
name: the name to save the object under.
"""
self.op = op
self.specs = specs
self.name = name
@property
def optional_restore(self):
"""A hint to restore assertions that this object is optional."""
return False # Default to required
@property
def device(self):
"""The device for SaveSpec Tensors."""
return self.specs[0].device
def restore(self, restored_tensors, restored_shapes):
"""Restores this object from 'restored_tensors'.
Args:
restored_tensors: the tensors that were loaded from a checkpoint
restored_shapes: the shapes this object should conform to after
restore, or None.
Returns:
An operation that restores the state of the object.
Raises:
ValueError: If the object cannot be restored using the provided
parameters.
"""
# pylint: disable=unused-argument
raise ValueError("Calling an abstract method.")
| karllessard/tensorflow | tensorflow/python/training/saving/saveable_object.py | Python | apache-2.0 | 3,646 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License
from nova.api.openstack import extensions
class Multiple_create(extensions.ExtensionDescriptor):
"""Allow multiple create in the Create Server v1.1 API."""
name = "MultipleCreate"
alias = "os-multiple-create"
namespace = ("http://docs.openstack.org/compute/ext/"
"multiplecreate/api/v1.1")
updated = "2012-08-07T00:00:00+00:00"
| zestrada/nova-cs498cc | nova/api/openstack/compute/contrib/multiple_create.py | Python | apache-2.0 | 1,025 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests python protocol buffers against the golden message.
Note that the golden messages exercise every known field type, thus this
test ends up exercising and verifying nearly all of the parsing and
serialization code in the whole library.
TODO(kenton): Merge with wire_format_test? It doesn't make a whole lot of
sense to call this a test of the "message" module, which only declares an
abstract interface.
"""
__author__ = 'gps@google.com (Gregory P. Smith)'
import copy
import math
import operator
import pickle
import pydoc
import six
import sys
import warnings
try:
# Since python 3
import collections.abc as collections_abc
except ImportError:
# Won't work after python 3.8
import collections as collections_abc
try:
import unittest2 as unittest # PY26
except ImportError:
import unittest
try:
cmp # Python 2
except NameError:
cmp = lambda x, y: (x > y) - (x < y) # Python 3
from google.protobuf import map_proto2_unittest_pb2
from google.protobuf import map_unittest_pb2
from google.protobuf import unittest_pb2
from google.protobuf import unittest_proto3_arena_pb2
from google.protobuf import descriptor_pb2
from google.protobuf import descriptor_pool
from google.protobuf import message_factory
from google.protobuf import text_format
from google.protobuf.internal import api_implementation
from google.protobuf.internal import encoder
from google.protobuf.internal import more_extensions_pb2
from google.protobuf.internal import packed_field_test_pb2
from google.protobuf.internal import test_util
from google.protobuf.internal import test_proto3_optional_pb2
from google.protobuf.internal import testing_refleaks
from google.protobuf import message
from google.protobuf.internal import _parameterized
UCS2_MAXUNICODE = 65535
if six.PY3:
long = int
# Python pre-2.6 does not have isinf() or isnan() functions, so we have
# to provide our own.
def isnan(val):
# NaN is never equal to itself.
return val != val
def isinf(val):
# Infinity times zero equals NaN.
return not isnan(val) and isnan(val * 0)
def IsPosInf(val):
return isinf(val) and (val > 0)
def IsNegInf(val):
return isinf(val) and (val < 0)
warnings.simplefilter('error', DeprecationWarning)
@_parameterized.named_parameters(
('_proto2', unittest_pb2),
('_proto3', unittest_proto3_arena_pb2))
@testing_refleaks.TestCase
class MessageTest(unittest.TestCase):
def testBadUtf8String(self, message_module):
if api_implementation.Type() != 'python':
self.skipTest("Skipping testBadUtf8String, currently only the python "
"api implementation raises UnicodeDecodeError when a "
"string field contains bad utf-8.")
bad_utf8_data = test_util.GoldenFileData('bad_utf8_string')
with self.assertRaises(UnicodeDecodeError) as context:
message_module.TestAllTypes.FromString(bad_utf8_data)
self.assertIn('TestAllTypes.optional_string', str(context.exception))
def testGoldenMessage(self, message_module):
# Proto3 doesn't have the "default_foo" members or foreign enums,
# and doesn't preserve unknown fields, so for proto3 we use a golden
# message that doesn't have these fields set.
if message_module is unittest_pb2:
golden_data = test_util.GoldenFileData(
'golden_message_oneof_implemented')
else:
golden_data = test_util.GoldenFileData('golden_message_proto3')
golden_message = message_module.TestAllTypes()
golden_message.ParseFromString(golden_data)
if message_module is unittest_pb2:
test_util.ExpectAllFieldsSet(self, golden_message)
self.assertEqual(golden_data, golden_message.SerializeToString())
golden_copy = copy.deepcopy(golden_message)
self.assertEqual(golden_data, golden_copy.SerializeToString())
def testGoldenPackedMessage(self, message_module):
golden_data = test_util.GoldenFileData('golden_packed_fields_message')
golden_message = message_module.TestPackedTypes()
parsed_bytes = golden_message.ParseFromString(golden_data)
all_set = message_module.TestPackedTypes()
test_util.SetAllPackedFields(all_set)
self.assertEqual(parsed_bytes, len(golden_data))
self.assertEqual(all_set, golden_message)
self.assertEqual(golden_data, all_set.SerializeToString())
golden_copy = copy.deepcopy(golden_message)
self.assertEqual(golden_data, golden_copy.SerializeToString())
def testParseErrors(self, message_module):
msg = message_module.TestAllTypes()
self.assertRaises(TypeError, msg.FromString, 0)
self.assertRaises(Exception, msg.FromString, '0')
# TODO(jieluo): Fix cpp extension to raise error instead of warning.
# b/27494216
end_tag = encoder.TagBytes(1, 4)
if api_implementation.Type() == 'python':
with self.assertRaises(message.DecodeError) as context:
msg.FromString(end_tag)
self.assertEqual('Unexpected end-group tag.', str(context.exception))
# Field number 0 is illegal.
self.assertRaises(message.DecodeError, msg.FromString, b'\3\4')
def testDeterminismParameters(self, message_module):
# This message is always deterministically serialized, even if determinism
# is disabled, so we can use it to verify that all the determinism
# parameters work correctly.
golden_data = (b'\xe2\x02\nOne string'
b'\xe2\x02\nTwo string'
b'\xe2\x02\nRed string'
b'\xe2\x02\x0bBlue string')
golden_message = message_module.TestAllTypes()
golden_message.repeated_string.extend([
'One string',
'Two string',
'Red string',
'Blue string',
])
self.assertEqual(golden_data,
golden_message.SerializeToString(deterministic=None))
self.assertEqual(golden_data,
golden_message.SerializeToString(deterministic=False))
self.assertEqual(golden_data,
golden_message.SerializeToString(deterministic=True))
class BadArgError(Exception):
pass
class BadArg(object):
def __nonzero__(self):
raise BadArgError()
def __bool__(self):
raise BadArgError()
with self.assertRaises(BadArgError):
golden_message.SerializeToString(deterministic=BadArg())
def testPickleSupport(self, message_module):
golden_data = test_util.GoldenFileData('golden_message')
golden_message = message_module.TestAllTypes()
golden_message.ParseFromString(golden_data)
pickled_message = pickle.dumps(golden_message)
unpickled_message = pickle.loads(pickled_message)
self.assertEqual(unpickled_message, golden_message)
def testPickleNestedMessage(self, message_module):
golden_message = message_module.TestPickleNestedMessage.NestedMessage(bb=1)
pickled_message = pickle.dumps(golden_message)
unpickled_message = pickle.loads(pickled_message)
self.assertEqual(unpickled_message, golden_message)
def testPickleNestedNestedMessage(self, message_module):
cls = message_module.TestPickleNestedMessage.NestedMessage
golden_message = cls.NestedNestedMessage(cc=1)
pickled_message = pickle.dumps(golden_message)
unpickled_message = pickle.loads(pickled_message)
self.assertEqual(unpickled_message, golden_message)
def testPositiveInfinity(self, message_module):
if message_module is unittest_pb2:
golden_data = (b'\x5D\x00\x00\x80\x7F'
b'\x61\x00\x00\x00\x00\x00\x00\xF0\x7F'
b'\xCD\x02\x00\x00\x80\x7F'
b'\xD1\x02\x00\x00\x00\x00\x00\x00\xF0\x7F')
else:
golden_data = (b'\x5D\x00\x00\x80\x7F'
b'\x61\x00\x00\x00\x00\x00\x00\xF0\x7F'
b'\xCA\x02\x04\x00\x00\x80\x7F'
b'\xD2\x02\x08\x00\x00\x00\x00\x00\x00\xF0\x7F')
golden_message = message_module.TestAllTypes()
golden_message.ParseFromString(golden_data)
self.assertTrue(IsPosInf(golden_message.optional_float))
self.assertTrue(IsPosInf(golden_message.optional_double))
self.assertTrue(IsPosInf(golden_message.repeated_float[0]))
self.assertTrue(IsPosInf(golden_message.repeated_double[0]))
self.assertEqual(golden_data, golden_message.SerializeToString())
def testNegativeInfinity(self, message_module):
if message_module is unittest_pb2:
golden_data = (b'\x5D\x00\x00\x80\xFF'
b'\x61\x00\x00\x00\x00\x00\x00\xF0\xFF'
b'\xCD\x02\x00\x00\x80\xFF'
b'\xD1\x02\x00\x00\x00\x00\x00\x00\xF0\xFF')
else:
golden_data = (b'\x5D\x00\x00\x80\xFF'
b'\x61\x00\x00\x00\x00\x00\x00\xF0\xFF'
b'\xCA\x02\x04\x00\x00\x80\xFF'
b'\xD2\x02\x08\x00\x00\x00\x00\x00\x00\xF0\xFF')
golden_message = message_module.TestAllTypes()
golden_message.ParseFromString(golden_data)
self.assertTrue(IsNegInf(golden_message.optional_float))
self.assertTrue(IsNegInf(golden_message.optional_double))
self.assertTrue(IsNegInf(golden_message.repeated_float[0]))
self.assertTrue(IsNegInf(golden_message.repeated_double[0]))
self.assertEqual(golden_data, golden_message.SerializeToString())
def testNotANumber(self, message_module):
golden_data = (b'\x5D\x00\x00\xC0\x7F'
b'\x61\x00\x00\x00\x00\x00\x00\xF8\x7F'
b'\xCD\x02\x00\x00\xC0\x7F'
b'\xD1\x02\x00\x00\x00\x00\x00\x00\xF8\x7F')
golden_message = message_module.TestAllTypes()
golden_message.ParseFromString(golden_data)
self.assertTrue(isnan(golden_message.optional_float))
self.assertTrue(isnan(golden_message.optional_double))
self.assertTrue(isnan(golden_message.repeated_float[0]))
self.assertTrue(isnan(golden_message.repeated_double[0]))
# The protocol buffer may serialize to any one of multiple different
# representations of a NaN. Rather than verify a specific representation,
# verify the serialized string can be converted into a correctly
# behaving protocol buffer.
serialized = golden_message.SerializeToString()
message = message_module.TestAllTypes()
message.ParseFromString(serialized)
self.assertTrue(isnan(message.optional_float))
self.assertTrue(isnan(message.optional_double))
self.assertTrue(isnan(message.repeated_float[0]))
self.assertTrue(isnan(message.repeated_double[0]))
def testPositiveInfinityPacked(self, message_module):
golden_data = (b'\xA2\x06\x04\x00\x00\x80\x7F'
b'\xAA\x06\x08\x00\x00\x00\x00\x00\x00\xF0\x7F')
golden_message = message_module.TestPackedTypes()
golden_message.ParseFromString(golden_data)
self.assertTrue(IsPosInf(golden_message.packed_float[0]))
self.assertTrue(IsPosInf(golden_message.packed_double[0]))
self.assertEqual(golden_data, golden_message.SerializeToString())
def testNegativeInfinityPacked(self, message_module):
golden_data = (b'\xA2\x06\x04\x00\x00\x80\xFF'
b'\xAA\x06\x08\x00\x00\x00\x00\x00\x00\xF0\xFF')
golden_message = message_module.TestPackedTypes()
golden_message.ParseFromString(golden_data)
self.assertTrue(IsNegInf(golden_message.packed_float[0]))
self.assertTrue(IsNegInf(golden_message.packed_double[0]))
self.assertEqual(golden_data, golden_message.SerializeToString())
def testNotANumberPacked(self, message_module):
golden_data = (b'\xA2\x06\x04\x00\x00\xC0\x7F'
b'\xAA\x06\x08\x00\x00\x00\x00\x00\x00\xF8\x7F')
golden_message = message_module.TestPackedTypes()
golden_message.ParseFromString(golden_data)
self.assertTrue(isnan(golden_message.packed_float[0]))
self.assertTrue(isnan(golden_message.packed_double[0]))
serialized = golden_message.SerializeToString()
message = message_module.TestPackedTypes()
message.ParseFromString(serialized)
self.assertTrue(isnan(message.packed_float[0]))
self.assertTrue(isnan(message.packed_double[0]))
def testExtremeFloatValues(self, message_module):
message = message_module.TestAllTypes()
# Most positive exponent, no significand bits set.
kMostPosExponentNoSigBits = math.pow(2, 127)
message.optional_float = kMostPosExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == kMostPosExponentNoSigBits)
# Most positive exponent, one significand bit set.
kMostPosExponentOneSigBit = 1.5 * math.pow(2, 127)
message.optional_float = kMostPosExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == kMostPosExponentOneSigBit)
# Repeat last two cases with values of same magnitude, but negative.
message.optional_float = -kMostPosExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == -kMostPosExponentNoSigBits)
message.optional_float = -kMostPosExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == -kMostPosExponentOneSigBit)
# Most negative exponent, no significand bits set.
kMostNegExponentNoSigBits = math.pow(2, -127)
message.optional_float = kMostNegExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == kMostNegExponentNoSigBits)
# Most negative exponent, one significand bit set.
kMostNegExponentOneSigBit = 1.5 * math.pow(2, -127)
message.optional_float = kMostNegExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == kMostNegExponentOneSigBit)
# Repeat last two cases with values of the same magnitude, but negative.
message.optional_float = -kMostNegExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == -kMostNegExponentNoSigBits)
message.optional_float = -kMostNegExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == -kMostNegExponentOneSigBit)
# Max 4 bytes float value
max_float = float.fromhex('0x1.fffffep+127')
message.optional_float = max_float
self.assertAlmostEqual(message.optional_float, max_float)
serialized_data = message.SerializeToString()
message.ParseFromString(serialized_data)
self.assertAlmostEqual(message.optional_float, max_float)
# Test set double to float field.
message.optional_float = 3.4028235e+39
self.assertEqual(message.optional_float, float('inf'))
serialized_data = message.SerializeToString()
message.ParseFromString(serialized_data)
self.assertEqual(message.optional_float, float('inf'))
message.optional_float = -3.4028235e+39
self.assertEqual(message.optional_float, float('-inf'))
message.optional_float = 1.4028235e-39
self.assertAlmostEqual(message.optional_float, 1.4028235e-39)
def testExtremeDoubleValues(self, message_module):
message = message_module.TestAllTypes()
# Most positive exponent, no significand bits set.
kMostPosExponentNoSigBits = math.pow(2, 1023)
message.optional_double = kMostPosExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == kMostPosExponentNoSigBits)
# Most positive exponent, one significand bit set.
kMostPosExponentOneSigBit = 1.5 * math.pow(2, 1023)
message.optional_double = kMostPosExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == kMostPosExponentOneSigBit)
# Repeat last two cases with values of same magnitude, but negative.
message.optional_double = -kMostPosExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == -kMostPosExponentNoSigBits)
message.optional_double = -kMostPosExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == -kMostPosExponentOneSigBit)
# Most negative exponent, no significand bits set.
kMostNegExponentNoSigBits = math.pow(2, -1023)
message.optional_double = kMostNegExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == kMostNegExponentNoSigBits)
# Most negative exponent, one significand bit set.
kMostNegExponentOneSigBit = 1.5 * math.pow(2, -1023)
message.optional_double = kMostNegExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == kMostNegExponentOneSigBit)
# Repeat last two cases with values of the same magnitude, but negative.
message.optional_double = -kMostNegExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == -kMostNegExponentNoSigBits)
message.optional_double = -kMostNegExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == -kMostNegExponentOneSigBit)
def testFloatPrinting(self, message_module):
message = message_module.TestAllTypes()
message.optional_float = 2.0
self.assertEqual(str(message), 'optional_float: 2.0\n')
def testHighPrecisionFloatPrinting(self, message_module):
msg = message_module.TestAllTypes()
msg.optional_float = 0.12345678912345678
old_float = msg.optional_float
msg.ParseFromString(msg.SerializeToString())
self.assertEqual(old_float, msg.optional_float)
def testHighPrecisionDoublePrinting(self, message_module):
msg = message_module.TestAllTypes()
msg.optional_double = 0.12345678912345678
if sys.version_info >= (3,):
self.assertEqual(str(msg), 'optional_double: 0.12345678912345678\n')
else:
self.assertEqual(str(msg), 'optional_double: 0.123456789123\n')
def testUnknownFieldPrinting(self, message_module):
populated = message_module.TestAllTypes()
test_util.SetAllNonLazyFields(populated)
empty = message_module.TestEmptyMessage()
empty.ParseFromString(populated.SerializeToString())
self.assertEqual(str(empty), '')
def testAppendRepeatedCompositeField(self, message_module):
msg = message_module.TestAllTypes()
msg.repeated_nested_message.append(
message_module.TestAllTypes.NestedMessage(bb=1))
nested = message_module.TestAllTypes.NestedMessage(bb=2)
msg.repeated_nested_message.append(nested)
try:
msg.repeated_nested_message.append(1)
except TypeError:
pass
self.assertEqual(2, len(msg.repeated_nested_message))
self.assertEqual([1, 2],
[m.bb for m in msg.repeated_nested_message])
def testInsertRepeatedCompositeField(self, message_module):
msg = message_module.TestAllTypes()
msg.repeated_nested_message.insert(
-1, message_module.TestAllTypes.NestedMessage(bb=1))
sub_msg = msg.repeated_nested_message[0]
msg.repeated_nested_message.insert(
0, message_module.TestAllTypes.NestedMessage(bb=2))
msg.repeated_nested_message.insert(
99, message_module.TestAllTypes.NestedMessage(bb=3))
msg.repeated_nested_message.insert(
-2, message_module.TestAllTypes.NestedMessage(bb=-1))
msg.repeated_nested_message.insert(
-1000, message_module.TestAllTypes.NestedMessage(bb=-1000))
try:
msg.repeated_nested_message.insert(1, 999)
except TypeError:
pass
self.assertEqual(5, len(msg.repeated_nested_message))
self.assertEqual([-1000, 2, -1, 1, 3],
[m.bb for m in msg.repeated_nested_message])
self.assertEqual(str(msg),
'repeated_nested_message {\n'
' bb: -1000\n'
'}\n'
'repeated_nested_message {\n'
' bb: 2\n'
'}\n'
'repeated_nested_message {\n'
' bb: -1\n'
'}\n'
'repeated_nested_message {\n'
' bb: 1\n'
'}\n'
'repeated_nested_message {\n'
' bb: 3\n'
'}\n')
self.assertEqual(sub_msg.bb, 1)
def testMergeFromRepeatedField(self, message_module):
msg = message_module.TestAllTypes()
msg.repeated_int32.append(1)
msg.repeated_int32.append(3)
msg.repeated_nested_message.add(bb=1)
msg.repeated_nested_message.add(bb=2)
other_msg = message_module.TestAllTypes()
other_msg.repeated_nested_message.add(bb=3)
other_msg.repeated_nested_message.add(bb=4)
other_msg.repeated_int32.append(5)
other_msg.repeated_int32.append(7)
msg.repeated_int32.MergeFrom(other_msg.repeated_int32)
self.assertEqual(4, len(msg.repeated_int32))
msg.repeated_nested_message.MergeFrom(other_msg.repeated_nested_message)
self.assertEqual([1, 2, 3, 4],
[m.bb for m in msg.repeated_nested_message])
def testAddWrongRepeatedNestedField(self, message_module):
msg = message_module.TestAllTypes()
try:
msg.repeated_nested_message.add('wrong')
except TypeError:
pass
try:
msg.repeated_nested_message.add(value_field='wrong')
except ValueError:
pass
self.assertEqual(len(msg.repeated_nested_message), 0)
def testRepeatedContains(self, message_module):
msg = message_module.TestAllTypes()
msg.repeated_int32.extend([1, 2, 3])
self.assertIn(2, msg.repeated_int32)
self.assertNotIn(0, msg.repeated_int32)
msg.repeated_nested_message.add(bb=1)
sub_msg1 = msg.repeated_nested_message[0]
sub_msg2 = message_module.TestAllTypes.NestedMessage(bb=2)
sub_msg3 = message_module.TestAllTypes.NestedMessage(bb=3)
msg.repeated_nested_message.append(sub_msg2)
msg.repeated_nested_message.insert(0, sub_msg3)
self.assertIn(sub_msg1, msg.repeated_nested_message)
self.assertIn(sub_msg2, msg.repeated_nested_message)
self.assertIn(sub_msg3, msg.repeated_nested_message)
def testRepeatedScalarIterable(self, message_module):
msg = message_module.TestAllTypes()
msg.repeated_int32.extend([1, 2, 3])
add = 0
for item in msg.repeated_int32:
add += item
self.assertEqual(add, 6)
def testRepeatedNestedFieldIteration(self, message_module):
msg = message_module.TestAllTypes()
msg.repeated_nested_message.add(bb=1)
msg.repeated_nested_message.add(bb=2)
msg.repeated_nested_message.add(bb=3)
msg.repeated_nested_message.add(bb=4)
self.assertEqual([1, 2, 3, 4],
[m.bb for m in msg.repeated_nested_message])
self.assertEqual([4, 3, 2, 1],
[m.bb for m in reversed(msg.repeated_nested_message)])
self.assertEqual([4, 3, 2, 1],
[m.bb for m in msg.repeated_nested_message[::-1]])
def testSortingRepeatedScalarFieldsDefaultComparator(self, message_module):
"""Check some different types with the default comparator."""
message = message_module.TestAllTypes()
# TODO(mattp): would testing more scalar types strengthen test?
message.repeated_int32.append(1)
message.repeated_int32.append(3)
message.repeated_int32.append(2)
message.repeated_int32.sort()
self.assertEqual(message.repeated_int32[0], 1)
self.assertEqual(message.repeated_int32[1], 2)
self.assertEqual(message.repeated_int32[2], 3)
self.assertEqual(str(message.repeated_int32), str([1, 2, 3]))
message.repeated_float.append(1.1)
message.repeated_float.append(1.3)
message.repeated_float.append(1.2)
message.repeated_float.sort()
self.assertAlmostEqual(message.repeated_float[0], 1.1)
self.assertAlmostEqual(message.repeated_float[1], 1.2)
self.assertAlmostEqual(message.repeated_float[2], 1.3)
message.repeated_string.append('a')
message.repeated_string.append('c')
message.repeated_string.append('b')
message.repeated_string.sort()
self.assertEqual(message.repeated_string[0], 'a')
self.assertEqual(message.repeated_string[1], 'b')
self.assertEqual(message.repeated_string[2], 'c')
self.assertEqual(str(message.repeated_string), str([u'a', u'b', u'c']))
message.repeated_bytes.append(b'a')
message.repeated_bytes.append(b'c')
message.repeated_bytes.append(b'b')
message.repeated_bytes.sort()
self.assertEqual(message.repeated_bytes[0], b'a')
self.assertEqual(message.repeated_bytes[1], b'b')
self.assertEqual(message.repeated_bytes[2], b'c')
self.assertEqual(str(message.repeated_bytes), str([b'a', b'b', b'c']))
def testSortingRepeatedScalarFieldsCustomComparator(self, message_module):
"""Check some different types with custom comparator."""
message = message_module.TestAllTypes()
message.repeated_int32.append(-3)
message.repeated_int32.append(-2)
message.repeated_int32.append(-1)
message.repeated_int32.sort(key=abs)
self.assertEqual(message.repeated_int32[0], -1)
self.assertEqual(message.repeated_int32[1], -2)
self.assertEqual(message.repeated_int32[2], -3)
message.repeated_string.append('aaa')
message.repeated_string.append('bb')
message.repeated_string.append('c')
message.repeated_string.sort(key=len)
self.assertEqual(message.repeated_string[0], 'c')
self.assertEqual(message.repeated_string[1], 'bb')
self.assertEqual(message.repeated_string[2], 'aaa')
def testSortingRepeatedCompositeFieldsCustomComparator(self, message_module):
"""Check passing a custom comparator to sort a repeated composite field."""
message = message_module.TestAllTypes()
message.repeated_nested_message.add().bb = 1
message.repeated_nested_message.add().bb = 3
message.repeated_nested_message.add().bb = 2
message.repeated_nested_message.add().bb = 6
message.repeated_nested_message.add().bb = 5
message.repeated_nested_message.add().bb = 4
message.repeated_nested_message.sort(key=operator.attrgetter('bb'))
self.assertEqual(message.repeated_nested_message[0].bb, 1)
self.assertEqual(message.repeated_nested_message[1].bb, 2)
self.assertEqual(message.repeated_nested_message[2].bb, 3)
self.assertEqual(message.repeated_nested_message[3].bb, 4)
self.assertEqual(message.repeated_nested_message[4].bb, 5)
self.assertEqual(message.repeated_nested_message[5].bb, 6)
self.assertEqual(str(message.repeated_nested_message),
'[bb: 1\n, bb: 2\n, bb: 3\n, bb: 4\n, bb: 5\n, bb: 6\n]')
def testSortingRepeatedCompositeFieldsStable(self, message_module):
"""Check passing a custom comparator to sort a repeated composite field."""
message = message_module.TestAllTypes()
message.repeated_nested_message.add().bb = 21
message.repeated_nested_message.add().bb = 20
message.repeated_nested_message.add().bb = 13
message.repeated_nested_message.add().bb = 33
message.repeated_nested_message.add().bb = 11
message.repeated_nested_message.add().bb = 24
message.repeated_nested_message.add().bb = 10
message.repeated_nested_message.sort(key=lambda z: z.bb // 10)
self.assertEqual(
[13, 11, 10, 21, 20, 24, 33],
[n.bb for n in message.repeated_nested_message])
# Make sure that for the C++ implementation, the underlying fields
# are actually reordered.
pb = message.SerializeToString()
message.Clear()
message.MergeFromString(pb)
self.assertEqual(
[13, 11, 10, 21, 20, 24, 33],
[n.bb for n in message.repeated_nested_message])
def testRepeatedCompositeFieldSortArguments(self, message_module):
"""Check sorting a repeated composite field using list.sort() arguments."""
message = message_module.TestAllTypes()
get_bb = operator.attrgetter('bb')
cmp_bb = lambda a, b: cmp(a.bb, b.bb)
message.repeated_nested_message.add().bb = 1
message.repeated_nested_message.add().bb = 3
message.repeated_nested_message.add().bb = 2
message.repeated_nested_message.add().bb = 6
message.repeated_nested_message.add().bb = 5
message.repeated_nested_message.add().bb = 4
message.repeated_nested_message.sort(key=get_bb)
self.assertEqual([k.bb for k in message.repeated_nested_message],
[1, 2, 3, 4, 5, 6])
message.repeated_nested_message.sort(key=get_bb, reverse=True)
self.assertEqual([k.bb for k in message.repeated_nested_message],
[6, 5, 4, 3, 2, 1])
if sys.version_info >= (3,): return # No cmp sorting in PY3.
message.repeated_nested_message.sort(sort_function=cmp_bb)
self.assertEqual([k.bb for k in message.repeated_nested_message],
[1, 2, 3, 4, 5, 6])
message.repeated_nested_message.sort(cmp=cmp_bb, reverse=True)
self.assertEqual([k.bb for k in message.repeated_nested_message],
[6, 5, 4, 3, 2, 1])
def testRepeatedScalarFieldSortArguments(self, message_module):
"""Check sorting a scalar field using list.sort() arguments."""
message = message_module.TestAllTypes()
message.repeated_int32.append(-3)
message.repeated_int32.append(-2)
message.repeated_int32.append(-1)
message.repeated_int32.sort(key=abs)
self.assertEqual(list(message.repeated_int32), [-1, -2, -3])
message.repeated_int32.sort(key=abs, reverse=True)
self.assertEqual(list(message.repeated_int32), [-3, -2, -1])
if sys.version_info < (3,): # No cmp sorting in PY3.
abs_cmp = lambda a, b: cmp(abs(a), abs(b))
message.repeated_int32.sort(sort_function=abs_cmp)
self.assertEqual(list(message.repeated_int32), [-1, -2, -3])
message.repeated_int32.sort(cmp=abs_cmp, reverse=True)
self.assertEqual(list(message.repeated_int32), [-3, -2, -1])
message.repeated_string.append('aaa')
message.repeated_string.append('bb')
message.repeated_string.append('c')
message.repeated_string.sort(key=len)
self.assertEqual(list(message.repeated_string), ['c', 'bb', 'aaa'])
message.repeated_string.sort(key=len, reverse=True)
self.assertEqual(list(message.repeated_string), ['aaa', 'bb', 'c'])
if sys.version_info < (3,): # No cmp sorting in PY3.
len_cmp = lambda a, b: cmp(len(a), len(b))
message.repeated_string.sort(sort_function=len_cmp)
self.assertEqual(list(message.repeated_string), ['c', 'bb', 'aaa'])
message.repeated_string.sort(cmp=len_cmp, reverse=True)
self.assertEqual(list(message.repeated_string), ['aaa', 'bb', 'c'])
def testRepeatedFieldsComparable(self, message_module):
m1 = message_module.TestAllTypes()
m2 = message_module.TestAllTypes()
m1.repeated_int32.append(0)
m1.repeated_int32.append(1)
m1.repeated_int32.append(2)
m2.repeated_int32.append(0)
m2.repeated_int32.append(1)
m2.repeated_int32.append(2)
m1.repeated_nested_message.add().bb = 1
m1.repeated_nested_message.add().bb = 2
m1.repeated_nested_message.add().bb = 3
m2.repeated_nested_message.add().bb = 1
m2.repeated_nested_message.add().bb = 2
m2.repeated_nested_message.add().bb = 3
if sys.version_info >= (3,): return # No cmp() in PY3.
# These comparisons should not raise errors.
_ = m1 < m2
_ = m1.repeated_nested_message < m2.repeated_nested_message
# Make sure cmp always works. If it wasn't defined, these would be
# id() comparisons and would all fail.
self.assertEqual(cmp(m1, m2), 0)
self.assertEqual(cmp(m1.repeated_int32, m2.repeated_int32), 0)
self.assertEqual(cmp(m1.repeated_int32, [0, 1, 2]), 0)
self.assertEqual(cmp(m1.repeated_nested_message,
m2.repeated_nested_message), 0)
with self.assertRaises(TypeError):
# Can't compare repeated composite containers to lists.
cmp(m1.repeated_nested_message, m2.repeated_nested_message[:])
# TODO(anuraag): Implement extensiondict comparison in C++ and then add test
def testRepeatedFieldsAreSequences(self, message_module):
m = message_module.TestAllTypes()
self.assertIsInstance(m.repeated_int32, collections_abc.MutableSequence)
self.assertIsInstance(m.repeated_nested_message,
collections_abc.MutableSequence)
def testRepeatedFieldsNotHashable(self, message_module):
m = message_module.TestAllTypes()
with self.assertRaises(TypeError):
hash(m.repeated_int32)
with self.assertRaises(TypeError):
hash(m.repeated_nested_message)
def testRepeatedFieldInsideNestedMessage(self, message_module):
m = message_module.NestedTestAllTypes()
m.payload.repeated_int32.extend([])
self.assertTrue(m.HasField('payload'))
def testMergeFrom(self, message_module):
m1 = message_module.TestAllTypes()
m2 = message_module.TestAllTypes()
# Cpp extension will lazily create a sub message which is immutable.
nested = m1.optional_nested_message
self.assertEqual(0, nested.bb)
m2.optional_nested_message.bb = 1
# Make sure cmessage pointing to a mutable message after merge instead of
# the lazily created message.
m1.MergeFrom(m2)
self.assertEqual(1, nested.bb)
# Test more nested sub message.
msg1 = message_module.NestedTestAllTypes()
msg2 = message_module.NestedTestAllTypes()
nested = msg1.child.payload.optional_nested_message
self.assertEqual(0, nested.bb)
msg2.child.payload.optional_nested_message.bb = 1
msg1.MergeFrom(msg2)
self.assertEqual(1, nested.bb)
# Test repeated field.
self.assertEqual(msg1.payload.repeated_nested_message,
msg1.payload.repeated_nested_message)
nested = msg2.payload.repeated_nested_message.add()
nested.bb = 1
msg1.MergeFrom(msg2)
self.assertEqual(1, len(msg1.payload.repeated_nested_message))
self.assertEqual(1, nested.bb)
def testMergeFromString(self, message_module):
m1 = message_module.TestAllTypes()
m2 = message_module.TestAllTypes()
# Cpp extension will lazily create a sub message which is immutable.
self.assertEqual(0, m1.optional_nested_message.bb)
m2.optional_nested_message.bb = 1
# Make sure cmessage pointing to a mutable message after merge instead of
# the lazily created message.
m1.MergeFromString(m2.SerializeToString())
self.assertEqual(1, m1.optional_nested_message.bb)
@unittest.skipIf(six.PY2, 'memoryview objects are not supported on py2')
def testMergeFromStringUsingMemoryViewWorksInPy3(self, message_module):
m2 = message_module.TestAllTypes()
m2.optional_string = 'scalar string'
m2.repeated_string.append('repeated string')
m2.optional_bytes = b'scalar bytes'
m2.repeated_bytes.append(b'repeated bytes')
serialized = m2.SerializeToString()
memview = memoryview(serialized)
m1 = message_module.TestAllTypes.FromString(memview)
self.assertEqual(m1.optional_bytes, b'scalar bytes')
self.assertEqual(m1.repeated_bytes, [b'repeated bytes'])
self.assertEqual(m1.optional_string, 'scalar string')
self.assertEqual(m1.repeated_string, ['repeated string'])
# Make sure that the memoryview was correctly converted to bytes, and
# that a sub-sliced memoryview is not being used.
self.assertIsInstance(m1.optional_bytes, bytes)
self.assertIsInstance(m1.repeated_bytes[0], bytes)
self.assertIsInstance(m1.optional_string, six.text_type)
self.assertIsInstance(m1.repeated_string[0], six.text_type)
@unittest.skipIf(six.PY3, 'memoryview is supported by py3')
def testMergeFromStringUsingMemoryViewIsPy2Error(self, message_module):
memview = memoryview(b'')
with self.assertRaises(TypeError):
message_module.TestAllTypes.FromString(memview)
def testMergeFromEmpty(self, message_module):
m1 = message_module.TestAllTypes()
# Cpp extension will lazily create a sub message which is immutable.
self.assertEqual(0, m1.optional_nested_message.bb)
self.assertFalse(m1.HasField('optional_nested_message'))
# Make sure the sub message is still immutable after merge from empty.
m1.MergeFromString(b'') # field state should not change
self.assertFalse(m1.HasField('optional_nested_message'))
def ensureNestedMessageExists(self, msg, attribute):
"""Make sure that a nested message object exists.
As soon as a nested message attribute is accessed, it will be present in the
_fields dict, without being marked as actually being set.
"""
getattr(msg, attribute)
self.assertFalse(msg.HasField(attribute))
def testOneofGetCaseNonexistingField(self, message_module):
m = message_module.TestAllTypes()
self.assertRaises(ValueError, m.WhichOneof, 'no_such_oneof_field')
self.assertRaises(Exception, m.WhichOneof, 0)
def testOneofDefaultValues(self, message_module):
m = message_module.TestAllTypes()
self.assertIs(None, m.WhichOneof('oneof_field'))
self.assertFalse(m.HasField('oneof_field'))
self.assertFalse(m.HasField('oneof_uint32'))
# Oneof is set even when setting it to a default value.
m.oneof_uint32 = 0
self.assertEqual('oneof_uint32', m.WhichOneof('oneof_field'))
self.assertTrue(m.HasField('oneof_field'))
self.assertTrue(m.HasField('oneof_uint32'))
self.assertFalse(m.HasField('oneof_string'))
m.oneof_string = ""
self.assertEqual('oneof_string', m.WhichOneof('oneof_field'))
self.assertTrue(m.HasField('oneof_string'))
self.assertFalse(m.HasField('oneof_uint32'))
def testOneofSemantics(self, message_module):
m = message_module.TestAllTypes()
self.assertIs(None, m.WhichOneof('oneof_field'))
m.oneof_uint32 = 11
self.assertEqual('oneof_uint32', m.WhichOneof('oneof_field'))
self.assertTrue(m.HasField('oneof_uint32'))
m.oneof_string = u'foo'
self.assertEqual('oneof_string', m.WhichOneof('oneof_field'))
self.assertFalse(m.HasField('oneof_uint32'))
self.assertTrue(m.HasField('oneof_string'))
# Read nested message accessor without accessing submessage.
m.oneof_nested_message
self.assertEqual('oneof_string', m.WhichOneof('oneof_field'))
self.assertTrue(m.HasField('oneof_string'))
self.assertFalse(m.HasField('oneof_nested_message'))
# Read accessor of nested message without accessing submessage.
m.oneof_nested_message.bb
self.assertEqual('oneof_string', m.WhichOneof('oneof_field'))
self.assertTrue(m.HasField('oneof_string'))
self.assertFalse(m.HasField('oneof_nested_message'))
m.oneof_nested_message.bb = 11
self.assertEqual('oneof_nested_message', m.WhichOneof('oneof_field'))
self.assertFalse(m.HasField('oneof_string'))
self.assertTrue(m.HasField('oneof_nested_message'))
m.oneof_bytes = b'bb'
self.assertEqual('oneof_bytes', m.WhichOneof('oneof_field'))
self.assertFalse(m.HasField('oneof_nested_message'))
self.assertTrue(m.HasField('oneof_bytes'))
def testOneofCompositeFieldReadAccess(self, message_module):
m = message_module.TestAllTypes()
m.oneof_uint32 = 11
self.ensureNestedMessageExists(m, 'oneof_nested_message')
self.assertEqual('oneof_uint32', m.WhichOneof('oneof_field'))
self.assertEqual(11, m.oneof_uint32)
def testOneofWhichOneof(self, message_module):
m = message_module.TestAllTypes()
self.assertIs(None, m.WhichOneof('oneof_field'))
if message_module is unittest_pb2:
self.assertFalse(m.HasField('oneof_field'))
m.oneof_uint32 = 11
self.assertEqual('oneof_uint32', m.WhichOneof('oneof_field'))
if message_module is unittest_pb2:
self.assertTrue(m.HasField('oneof_field'))
m.oneof_bytes = b'bb'
self.assertEqual('oneof_bytes', m.WhichOneof('oneof_field'))
m.ClearField('oneof_bytes')
self.assertIs(None, m.WhichOneof('oneof_field'))
if message_module is unittest_pb2:
self.assertFalse(m.HasField('oneof_field'))
def testOneofClearField(self, message_module):
m = message_module.TestAllTypes()
m.oneof_uint32 = 11
m.ClearField('oneof_field')
if message_module is unittest_pb2:
self.assertFalse(m.HasField('oneof_field'))
self.assertFalse(m.HasField('oneof_uint32'))
self.assertIs(None, m.WhichOneof('oneof_field'))
def testOneofClearSetField(self, message_module):
m = message_module.TestAllTypes()
m.oneof_uint32 = 11
m.ClearField('oneof_uint32')
if message_module is unittest_pb2:
self.assertFalse(m.HasField('oneof_field'))
self.assertFalse(m.HasField('oneof_uint32'))
self.assertIs(None, m.WhichOneof('oneof_field'))
def testOneofClearUnsetField(self, message_module):
m = message_module.TestAllTypes()
m.oneof_uint32 = 11
self.ensureNestedMessageExists(m, 'oneof_nested_message')
m.ClearField('oneof_nested_message')
self.assertEqual(11, m.oneof_uint32)
if message_module is unittest_pb2:
self.assertTrue(m.HasField('oneof_field'))
self.assertTrue(m.HasField('oneof_uint32'))
self.assertEqual('oneof_uint32', m.WhichOneof('oneof_field'))
def testOneofDeserialize(self, message_module):
m = message_module.TestAllTypes()
m.oneof_uint32 = 11
m2 = message_module.TestAllTypes()
m2.ParseFromString(m.SerializeToString())
self.assertEqual('oneof_uint32', m2.WhichOneof('oneof_field'))
def testOneofCopyFrom(self, message_module):
m = message_module.TestAllTypes()
m.oneof_uint32 = 11
m2 = message_module.TestAllTypes()
m2.CopyFrom(m)
self.assertEqual('oneof_uint32', m2.WhichOneof('oneof_field'))
def testOneofNestedMergeFrom(self, message_module):
m = message_module.NestedTestAllTypes()
m.payload.oneof_uint32 = 11
m2 = message_module.NestedTestAllTypes()
m2.payload.oneof_bytes = b'bb'
m2.child.payload.oneof_bytes = b'bb'
m2.MergeFrom(m)
self.assertEqual('oneof_uint32', m2.payload.WhichOneof('oneof_field'))
self.assertEqual('oneof_bytes', m2.child.payload.WhichOneof('oneof_field'))
def testOneofMessageMergeFrom(self, message_module):
m = message_module.NestedTestAllTypes()
m.payload.oneof_nested_message.bb = 11
m.child.payload.oneof_nested_message.bb = 12
m2 = message_module.NestedTestAllTypes()
m2.payload.oneof_uint32 = 13
m2.MergeFrom(m)
self.assertEqual('oneof_nested_message',
m2.payload.WhichOneof('oneof_field'))
self.assertEqual('oneof_nested_message',
m2.child.payload.WhichOneof('oneof_field'))
def testOneofNestedMessageInit(self, message_module):
m = message_module.TestAllTypes(
oneof_nested_message=message_module.TestAllTypes.NestedMessage())
self.assertEqual('oneof_nested_message', m.WhichOneof('oneof_field'))
def testOneofClear(self, message_module):
m = message_module.TestAllTypes()
m.oneof_uint32 = 11
m.Clear()
self.assertIsNone(m.WhichOneof('oneof_field'))
m.oneof_bytes = b'bb'
self.assertEqual('oneof_bytes', m.WhichOneof('oneof_field'))
def testAssignByteStringToUnicodeField(self, message_module):
"""Assigning a byte string to a string field should result
in the value being converted to a Unicode string."""
m = message_module.TestAllTypes()
m.optional_string = str('')
self.assertIsInstance(m.optional_string, six.text_type)
def testLongValuedSlice(self, message_module):
"""It should be possible to use long-valued indices in slices.
This didn't used to work in the v2 C++ implementation.
"""
m = message_module.TestAllTypes()
# Repeated scalar
m.repeated_int32.append(1)
sl = m.repeated_int32[long(0):long(len(m.repeated_int32))]
self.assertEqual(len(m.repeated_int32), len(sl))
# Repeated composite
m.repeated_nested_message.add().bb = 3
sl = m.repeated_nested_message[long(0):long(len(m.repeated_nested_message))]
self.assertEqual(len(m.repeated_nested_message), len(sl))
def testExtendShouldNotSwallowExceptions(self, message_module):
"""This didn't use to work in the v2 C++ implementation."""
m = message_module.TestAllTypes()
with self.assertRaises(NameError) as _:
m.repeated_int32.extend(a for i in range(10)) # pylint: disable=undefined-variable
with self.assertRaises(NameError) as _:
m.repeated_nested_enum.extend(
a for i in range(10)) # pylint: disable=undefined-variable
FALSY_VALUES = [None, False, 0, 0.0, b'', u'', bytearray(), [], {}, set()]
def testExtendInt32WithNothing(self, message_module):
"""Test no-ops extending repeated int32 fields."""
m = message_module.TestAllTypes()
self.assertSequenceEqual([], m.repeated_int32)
# TODO(ptucker): Deprecate this behavior. b/18413862
for falsy_value in MessageTest.FALSY_VALUES:
m.repeated_int32.extend(falsy_value)
self.assertSequenceEqual([], m.repeated_int32)
m.repeated_int32.extend([])
self.assertSequenceEqual([], m.repeated_int32)
def testExtendFloatWithNothing(self, message_module):
"""Test no-ops extending repeated float fields."""
m = message_module.TestAllTypes()
self.assertSequenceEqual([], m.repeated_float)
# TODO(ptucker): Deprecate this behavior. b/18413862
for falsy_value in MessageTest.FALSY_VALUES:
m.repeated_float.extend(falsy_value)
self.assertSequenceEqual([], m.repeated_float)
m.repeated_float.extend([])
self.assertSequenceEqual([], m.repeated_float)
def testExtendStringWithNothing(self, message_module):
"""Test no-ops extending repeated string fields."""
m = message_module.TestAllTypes()
self.assertSequenceEqual([], m.repeated_string)
# TODO(ptucker): Deprecate this behavior. b/18413862
for falsy_value in MessageTest.FALSY_VALUES:
m.repeated_string.extend(falsy_value)
self.assertSequenceEqual([], m.repeated_string)
m.repeated_string.extend([])
self.assertSequenceEqual([], m.repeated_string)
def testExtendInt32WithPythonList(self, message_module):
"""Test extending repeated int32 fields with python lists."""
m = message_module.TestAllTypes()
self.assertSequenceEqual([], m.repeated_int32)
m.repeated_int32.extend([0])
self.assertSequenceEqual([0], m.repeated_int32)
m.repeated_int32.extend([1, 2])
self.assertSequenceEqual([0, 1, 2], m.repeated_int32)
m.repeated_int32.extend([3, 4])
self.assertSequenceEqual([0, 1, 2, 3, 4], m.repeated_int32)
def testExtendFloatWithPythonList(self, message_module):
"""Test extending repeated float fields with python lists."""
m = message_module.TestAllTypes()
self.assertSequenceEqual([], m.repeated_float)
m.repeated_float.extend([0.0])
self.assertSequenceEqual([0.0], m.repeated_float)
m.repeated_float.extend([1.0, 2.0])
self.assertSequenceEqual([0.0, 1.0, 2.0], m.repeated_float)
m.repeated_float.extend([3.0, 4.0])
self.assertSequenceEqual([0.0, 1.0, 2.0, 3.0, 4.0], m.repeated_float)
def testExtendStringWithPythonList(self, message_module):
"""Test extending repeated string fields with python lists."""
m = message_module.TestAllTypes()
self.assertSequenceEqual([], m.repeated_string)
m.repeated_string.extend([''])
self.assertSequenceEqual([''], m.repeated_string)
m.repeated_string.extend(['11', '22'])
self.assertSequenceEqual(['', '11', '22'], m.repeated_string)
m.repeated_string.extend(['33', '44'])
self.assertSequenceEqual(['', '11', '22', '33', '44'], m.repeated_string)
def testExtendStringWithString(self, message_module):
"""Test extending repeated string fields with characters from a string."""
m = message_module.TestAllTypes()
self.assertSequenceEqual([], m.repeated_string)
m.repeated_string.extend('abc')
self.assertSequenceEqual(['a', 'b', 'c'], m.repeated_string)
class TestIterable(object):
"""This iterable object mimics the behavior of numpy.array.
__nonzero__ fails for length > 1, and returns bool(item[0]) for length == 1.
"""
def __init__(self, values=None):
self._list = values or []
def __nonzero__(self):
size = len(self._list)
if size == 0:
return False
if size == 1:
return bool(self._list[0])
raise ValueError('Truth value is ambiguous.')
def __len__(self):
return len(self._list)
def __iter__(self):
return self._list.__iter__()
def testExtendInt32WithIterable(self, message_module):
"""Test extending repeated int32 fields with iterable."""
m = message_module.TestAllTypes()
self.assertSequenceEqual([], m.repeated_int32)
m.repeated_int32.extend(MessageTest.TestIterable([]))
self.assertSequenceEqual([], m.repeated_int32)
m.repeated_int32.extend(MessageTest.TestIterable([0]))
self.assertSequenceEqual([0], m.repeated_int32)
m.repeated_int32.extend(MessageTest.TestIterable([1, 2]))
self.assertSequenceEqual([0, 1, 2], m.repeated_int32)
m.repeated_int32.extend(MessageTest.TestIterable([3, 4]))
self.assertSequenceEqual([0, 1, 2, 3, 4], m.repeated_int32)
def testExtendFloatWithIterable(self, message_module):
"""Test extending repeated float fields with iterable."""
m = message_module.TestAllTypes()
self.assertSequenceEqual([], m.repeated_float)
m.repeated_float.extend(MessageTest.TestIterable([]))
self.assertSequenceEqual([], m.repeated_float)
m.repeated_float.extend(MessageTest.TestIterable([0.0]))
self.assertSequenceEqual([0.0], m.repeated_float)
m.repeated_float.extend(MessageTest.TestIterable([1.0, 2.0]))
self.assertSequenceEqual([0.0, 1.0, 2.0], m.repeated_float)
m.repeated_float.extend(MessageTest.TestIterable([3.0, 4.0]))
self.assertSequenceEqual([0.0, 1.0, 2.0, 3.0, 4.0], m.repeated_float)
def testExtendStringWithIterable(self, message_module):
"""Test extending repeated string fields with iterable."""
m = message_module.TestAllTypes()
self.assertSequenceEqual([], m.repeated_string)
m.repeated_string.extend(MessageTest.TestIterable([]))
self.assertSequenceEqual([], m.repeated_string)
m.repeated_string.extend(MessageTest.TestIterable(['']))
self.assertSequenceEqual([''], m.repeated_string)
m.repeated_string.extend(MessageTest.TestIterable(['1', '2']))
self.assertSequenceEqual(['', '1', '2'], m.repeated_string)
m.repeated_string.extend(MessageTest.TestIterable(['3', '4']))
self.assertSequenceEqual(['', '1', '2', '3', '4'], m.repeated_string)
def testPickleRepeatedScalarContainer(self, message_module):
# TODO(tibell): The pure-Python implementation support pickling of
# scalar containers in *some* cases. For now the cpp2 version
# throws an exception to avoid a segfault. Investigate if we
# want to support pickling of these fields.
#
# For more information see: https://b2.corp.google.com/u/0/issues/18677897
if (api_implementation.Type() != 'cpp' or
api_implementation.Version() == 2):
return
m = message_module.TestAllTypes()
with self.assertRaises(pickle.PickleError) as _:
pickle.dumps(m.repeated_int32, pickle.HIGHEST_PROTOCOL)
def testSortEmptyRepeatedCompositeContainer(self, message_module):
"""Exercise a scenario that has led to segfaults in the past.
"""
m = message_module.TestAllTypes()
m.repeated_nested_message.sort()
def testHasFieldOnRepeatedField(self, message_module):
"""Using HasField on a repeated field should raise an exception.
"""
m = message_module.TestAllTypes()
with self.assertRaises(ValueError) as _:
m.HasField('repeated_int32')
def testRepeatedScalarFieldPop(self, message_module):
m = message_module.TestAllTypes()
with self.assertRaises(IndexError) as _:
m.repeated_int32.pop()
m.repeated_int32.extend(range(5))
self.assertEqual(4, m.repeated_int32.pop())
self.assertEqual(0, m.repeated_int32.pop(0))
self.assertEqual(2, m.repeated_int32.pop(1))
self.assertEqual([1, 3], m.repeated_int32)
def testRepeatedCompositeFieldPop(self, message_module):
m = message_module.TestAllTypes()
with self.assertRaises(IndexError) as _:
m.repeated_nested_message.pop()
with self.assertRaises(TypeError) as _:
m.repeated_nested_message.pop('0')
for i in range(5):
n = m.repeated_nested_message.add()
n.bb = i
self.assertEqual(4, m.repeated_nested_message.pop().bb)
self.assertEqual(0, m.repeated_nested_message.pop(0).bb)
self.assertEqual(2, m.repeated_nested_message.pop(1).bb)
self.assertEqual([1, 3], [n.bb for n in m.repeated_nested_message])
def testRepeatedCompareWithSelf(self, message_module):
m = message_module.TestAllTypes()
for i in range(5):
m.repeated_int32.insert(i, i)
n = m.repeated_nested_message.add()
n.bb = i
self.assertSequenceEqual(m.repeated_int32, m.repeated_int32)
self.assertEqual(m.repeated_nested_message, m.repeated_nested_message)
def testReleasedNestedMessages(self, message_module):
"""A case that lead to a segfault when a message detached from its parent
container has itself a child container.
"""
m = message_module.NestedTestAllTypes()
m = m.repeated_child.add()
m = m.child
m = m.repeated_child.add()
self.assertEqual(m.payload.optional_int32, 0)
def testSetRepeatedComposite(self, message_module):
m = message_module.TestAllTypes()
with self.assertRaises(AttributeError):
m.repeated_int32 = []
m.repeated_int32.append(1)
with self.assertRaises(AttributeError):
m.repeated_int32 = []
def testReturningType(self, message_module):
m = message_module.TestAllTypes()
self.assertEqual(float, type(m.optional_float))
self.assertEqual(float, type(m.optional_double))
self.assertEqual(bool, type(m.optional_bool))
m.optional_float = 1
m.optional_double = 1
m.optional_bool = 1
m.repeated_float.append(1)
m.repeated_double.append(1)
m.repeated_bool.append(1)
m.ParseFromString(m.SerializeToString())
self.assertEqual(float, type(m.optional_float))
self.assertEqual(float, type(m.optional_double))
self.assertEqual('1.0', str(m.optional_double))
self.assertEqual(bool, type(m.optional_bool))
self.assertEqual(float, type(m.repeated_float[0]))
self.assertEqual(float, type(m.repeated_double[0]))
self.assertEqual(bool, type(m.repeated_bool[0]))
self.assertEqual(True, m.repeated_bool[0])
# Class to test proto2-only features (required, extensions, etc.)
@testing_refleaks.TestCase
class Proto2Test(unittest.TestCase):
def testFieldPresence(self):
message = unittest_pb2.TestAllTypes()
self.assertFalse(message.HasField("optional_int32"))
self.assertFalse(message.HasField("optional_bool"))
self.assertFalse(message.HasField("optional_nested_message"))
with self.assertRaises(ValueError):
message.HasField("field_doesnt_exist")
with self.assertRaises(ValueError):
message.HasField("repeated_int32")
with self.assertRaises(ValueError):
message.HasField("repeated_nested_message")
self.assertEqual(0, message.optional_int32)
self.assertEqual(False, message.optional_bool)
self.assertEqual(0, message.optional_nested_message.bb)
# Fields are set even when setting the values to default values.
message.optional_int32 = 0
message.optional_bool = False
message.optional_nested_message.bb = 0
self.assertTrue(message.HasField("optional_int32"))
self.assertTrue(message.HasField("optional_bool"))
self.assertTrue(message.HasField("optional_nested_message"))
# Set the fields to non-default values.
message.optional_int32 = 5
message.optional_bool = True
message.optional_nested_message.bb = 15
self.assertTrue(message.HasField(u"optional_int32"))
self.assertTrue(message.HasField("optional_bool"))
self.assertTrue(message.HasField("optional_nested_message"))
# Clearing the fields unsets them and resets their value to default.
message.ClearField("optional_int32")
message.ClearField(u"optional_bool")
message.ClearField("optional_nested_message")
self.assertFalse(message.HasField("optional_int32"))
self.assertFalse(message.HasField("optional_bool"))
self.assertFalse(message.HasField("optional_nested_message"))
self.assertEqual(0, message.optional_int32)
self.assertEqual(False, message.optional_bool)
self.assertEqual(0, message.optional_nested_message.bb)
def testAssignInvalidEnum(self):
"""Assigning an invalid enum number is not allowed in proto2."""
m = unittest_pb2.TestAllTypes()
# Proto2 can not assign unknown enum.
with self.assertRaises(ValueError) as _:
m.optional_nested_enum = 1234567
self.assertRaises(ValueError, m.repeated_nested_enum.append, 1234567)
# Assignment is a different code path than append for the C++ impl.
m.repeated_nested_enum.append(2)
m.repeated_nested_enum[0] = 2
with self.assertRaises(ValueError):
m.repeated_nested_enum[0] = 123456
# Unknown enum value can be parsed but is ignored.
m2 = unittest_proto3_arena_pb2.TestAllTypes()
m2.optional_nested_enum = 1234567
m2.repeated_nested_enum.append(7654321)
serialized = m2.SerializeToString()
m3 = unittest_pb2.TestAllTypes()
m3.ParseFromString(serialized)
self.assertFalse(m3.HasField('optional_nested_enum'))
# 1 is the default value for optional_nested_enum.
self.assertEqual(1, m3.optional_nested_enum)
self.assertEqual(0, len(m3.repeated_nested_enum))
m2.Clear()
m2.ParseFromString(m3.SerializeToString())
self.assertEqual(1234567, m2.optional_nested_enum)
self.assertEqual(7654321, m2.repeated_nested_enum[0])
def testUnknownEnumMap(self):
m = map_proto2_unittest_pb2.TestEnumMap()
m.known_map_field[123] = 0
with self.assertRaises(ValueError):
m.unknown_map_field[1] = 123
def testExtensionsErrors(self):
msg = unittest_pb2.TestAllTypes()
self.assertRaises(AttributeError, getattr, msg, 'Extensions')
def testMergeFromExtensions(self):
msg1 = more_extensions_pb2.TopLevelMessage()
msg2 = more_extensions_pb2.TopLevelMessage()
# Cpp extension will lazily create a sub message which is immutable.
self.assertEqual(0, msg1.submessage.Extensions[
more_extensions_pb2.optional_int_extension])
self.assertFalse(msg1.HasField('submessage'))
msg2.submessage.Extensions[
more_extensions_pb2.optional_int_extension] = 123
# Make sure cmessage and extensions pointing to a mutable message
# after merge instead of the lazily created message.
msg1.MergeFrom(msg2)
self.assertEqual(123, msg1.submessage.Extensions[
more_extensions_pb2.optional_int_extension])
def testGoldenExtensions(self):
golden_data = test_util.GoldenFileData('golden_message')
golden_message = unittest_pb2.TestAllExtensions()
golden_message.ParseFromString(golden_data)
all_set = unittest_pb2.TestAllExtensions()
test_util.SetAllExtensions(all_set)
self.assertEqual(all_set, golden_message)
self.assertEqual(golden_data, golden_message.SerializeToString())
golden_copy = copy.deepcopy(golden_message)
self.assertEqual(golden_data, golden_copy.SerializeToString())
def testGoldenPackedExtensions(self):
golden_data = test_util.GoldenFileData('golden_packed_fields_message')
golden_message = unittest_pb2.TestPackedExtensions()
golden_message.ParseFromString(golden_data)
all_set = unittest_pb2.TestPackedExtensions()
test_util.SetAllPackedExtensions(all_set)
self.assertEqual(all_set, golden_message)
self.assertEqual(golden_data, all_set.SerializeToString())
golden_copy = copy.deepcopy(golden_message)
self.assertEqual(golden_data, golden_copy.SerializeToString())
def testPickleIncompleteProto(self):
golden_message = unittest_pb2.TestRequired(a=1)
pickled_message = pickle.dumps(golden_message)
unpickled_message = pickle.loads(pickled_message)
self.assertEqual(unpickled_message, golden_message)
self.assertEqual(unpickled_message.a, 1)
# This is still an incomplete proto - so serializing should fail
self.assertRaises(message.EncodeError, unpickled_message.SerializeToString)
# TODO(haberman): this isn't really a proto2-specific test except that this
# message has a required field in it. Should probably be factored out so
# that we can test the other parts with proto3.
def testParsingMerge(self):
"""Check the merge behavior when a required or optional field appears
multiple times in the input."""
messages = [
unittest_pb2.TestAllTypes(),
unittest_pb2.TestAllTypes(),
unittest_pb2.TestAllTypes() ]
messages[0].optional_int32 = 1
messages[1].optional_int64 = 2
messages[2].optional_int32 = 3
messages[2].optional_string = 'hello'
merged_message = unittest_pb2.TestAllTypes()
merged_message.optional_int32 = 3
merged_message.optional_int64 = 2
merged_message.optional_string = 'hello'
generator = unittest_pb2.TestParsingMerge.RepeatedFieldsGenerator()
generator.field1.extend(messages)
generator.field2.extend(messages)
generator.field3.extend(messages)
generator.ext1.extend(messages)
generator.ext2.extend(messages)
generator.group1.add().field1.MergeFrom(messages[0])
generator.group1.add().field1.MergeFrom(messages[1])
generator.group1.add().field1.MergeFrom(messages[2])
generator.group2.add().field1.MergeFrom(messages[0])
generator.group2.add().field1.MergeFrom(messages[1])
generator.group2.add().field1.MergeFrom(messages[2])
data = generator.SerializeToString()
parsing_merge = unittest_pb2.TestParsingMerge()
parsing_merge.ParseFromString(data)
# Required and optional fields should be merged.
self.assertEqual(parsing_merge.required_all_types, merged_message)
self.assertEqual(parsing_merge.optional_all_types, merged_message)
self.assertEqual(parsing_merge.optionalgroup.optional_group_all_types,
merged_message)
self.assertEqual(parsing_merge.Extensions[
unittest_pb2.TestParsingMerge.optional_ext],
merged_message)
# Repeated fields should not be merged.
self.assertEqual(len(parsing_merge.repeated_all_types), 3)
self.assertEqual(len(parsing_merge.repeatedgroup), 3)
self.assertEqual(len(parsing_merge.Extensions[
unittest_pb2.TestParsingMerge.repeated_ext]), 3)
def testPythonicInit(self):
message = unittest_pb2.TestAllTypes(
optional_int32=100,
optional_fixed32=200,
optional_float=300.5,
optional_bytes=b'x',
optionalgroup={'a': 400},
optional_nested_message={'bb': 500},
optional_foreign_message={},
optional_nested_enum='BAZ',
repeatedgroup=[{'a': 600},
{'a': 700}],
repeated_nested_enum=['FOO', unittest_pb2.TestAllTypes.BAR],
default_int32=800,
oneof_string='y')
self.assertIsInstance(message, unittest_pb2.TestAllTypes)
self.assertEqual(100, message.optional_int32)
self.assertEqual(200, message.optional_fixed32)
self.assertEqual(300.5, message.optional_float)
self.assertEqual(b'x', message.optional_bytes)
self.assertEqual(400, message.optionalgroup.a)
self.assertIsInstance(message.optional_nested_message,
unittest_pb2.TestAllTypes.NestedMessage)
self.assertEqual(500, message.optional_nested_message.bb)
self.assertTrue(message.HasField('optional_foreign_message'))
self.assertEqual(message.optional_foreign_message,
unittest_pb2.ForeignMessage())
self.assertEqual(unittest_pb2.TestAllTypes.BAZ,
message.optional_nested_enum)
self.assertEqual(2, len(message.repeatedgroup))
self.assertEqual(600, message.repeatedgroup[0].a)
self.assertEqual(700, message.repeatedgroup[1].a)
self.assertEqual(2, len(message.repeated_nested_enum))
self.assertEqual(unittest_pb2.TestAllTypes.FOO,
message.repeated_nested_enum[0])
self.assertEqual(unittest_pb2.TestAllTypes.BAR,
message.repeated_nested_enum[1])
self.assertEqual(800, message.default_int32)
self.assertEqual('y', message.oneof_string)
self.assertFalse(message.HasField('optional_int64'))
self.assertEqual(0, len(message.repeated_float))
self.assertEqual(42, message.default_int64)
message = unittest_pb2.TestAllTypes(optional_nested_enum=u'BAZ')
self.assertEqual(unittest_pb2.TestAllTypes.BAZ,
message.optional_nested_enum)
with self.assertRaises(ValueError):
unittest_pb2.TestAllTypes(
optional_nested_message={'INVALID_NESTED_FIELD': 17})
with self.assertRaises(TypeError):
unittest_pb2.TestAllTypes(
optional_nested_message={'bb': 'INVALID_VALUE_TYPE'})
with self.assertRaises(ValueError):
unittest_pb2.TestAllTypes(optional_nested_enum='INVALID_LABEL')
with self.assertRaises(ValueError):
unittest_pb2.TestAllTypes(repeated_nested_enum='FOO')
def testPythonicInitWithDict(self):
# Both string/unicode field name keys should work.
kwargs = {
'optional_int32': 100,
u'optional_fixed32': 200,
}
msg = unittest_pb2.TestAllTypes(**kwargs)
self.assertEqual(100, msg.optional_int32)
self.assertEqual(200, msg.optional_fixed32)
def test_documentation(self):
# Also used by the interactive help() function.
doc = pydoc.html.document(unittest_pb2.TestAllTypes, 'message')
self.assertIn('class TestAllTypes', doc)
self.assertIn('SerializePartialToString', doc)
self.assertIn('repeated_float', doc)
base = unittest_pb2.TestAllTypes.__bases__[0]
self.assertRaises(AttributeError, getattr, base, '_extensions_by_name')
# Class to test proto3-only features/behavior (updated field presence & enums)
@testing_refleaks.TestCase
class Proto3Test(unittest.TestCase):
# Utility method for comparing equality with a map.
def assertMapIterEquals(self, map_iter, dict_value):
# Avoid mutating caller's copy.
dict_value = dict(dict_value)
for k, v in map_iter:
self.assertEqual(v, dict_value[k])
del dict_value[k]
self.assertEqual({}, dict_value)
def testFieldPresence(self):
message = unittest_proto3_arena_pb2.TestAllTypes()
# We can't test presence of non-repeated, non-submessage fields.
with self.assertRaises(ValueError):
message.HasField('optional_int32')
with self.assertRaises(ValueError):
message.HasField('optional_float')
with self.assertRaises(ValueError):
message.HasField('optional_string')
with self.assertRaises(ValueError):
message.HasField('optional_bool')
# But we can still test presence of submessage fields.
self.assertFalse(message.HasField('optional_nested_message'))
# As with proto2, we can't test presence of fields that don't exist, or
# repeated fields.
with self.assertRaises(ValueError):
message.HasField('field_doesnt_exist')
with self.assertRaises(ValueError):
message.HasField('repeated_int32')
with self.assertRaises(ValueError):
message.HasField('repeated_nested_message')
# Fields should default to their type-specific default.
self.assertEqual(0, message.optional_int32)
self.assertEqual(0, message.optional_float)
self.assertEqual('', message.optional_string)
self.assertEqual(False, message.optional_bool)
self.assertEqual(0, message.optional_nested_message.bb)
# Setting a submessage should still return proper presence information.
message.optional_nested_message.bb = 0
self.assertTrue(message.HasField('optional_nested_message'))
# Set the fields to non-default values.
message.optional_int32 = 5
message.optional_float = 1.1
message.optional_string = 'abc'
message.optional_bool = True
message.optional_nested_message.bb = 15
# Clearing the fields unsets them and resets their value to default.
message.ClearField('optional_int32')
message.ClearField('optional_float')
message.ClearField('optional_string')
message.ClearField('optional_bool')
message.ClearField('optional_nested_message')
self.assertEqual(0, message.optional_int32)
self.assertEqual(0, message.optional_float)
self.assertEqual('', message.optional_string)
self.assertEqual(False, message.optional_bool)
self.assertEqual(0, message.optional_nested_message.bb)
def testProto3ParserDropDefaultScalar(self):
message_proto2 = unittest_pb2.TestAllTypes()
message_proto2.optional_int32 = 0
message_proto2.optional_string = ''
message_proto2.optional_bytes = b''
self.assertEqual(len(message_proto2.ListFields()), 3)
message_proto3 = unittest_proto3_arena_pb2.TestAllTypes()
message_proto3.ParseFromString(message_proto2.SerializeToString())
self.assertEqual(len(message_proto3.ListFields()), 0)
def testProto3Optional(self):
msg = test_proto3_optional_pb2.TestProto3Optional()
self.assertFalse(msg.HasField('optional_int32'))
self.assertFalse(msg.HasField('optional_float'))
self.assertFalse(msg.HasField('optional_string'))
self.assertFalse(msg.HasField('optional_nested_message'))
self.assertFalse(msg.optional_nested_message.HasField('bb'))
# Set fields.
msg.optional_int32 = 1
msg.optional_float = 1.0
msg.optional_string = '123'
msg.optional_nested_message.bb = 1
self.assertTrue(msg.HasField('optional_int32'))
self.assertTrue(msg.HasField('optional_float'))
self.assertTrue(msg.HasField('optional_string'))
self.assertTrue(msg.HasField('optional_nested_message'))
self.assertTrue(msg.optional_nested_message.HasField('bb'))
# Set to default value does not clear the fields
msg.optional_int32 = 0
msg.optional_float = 0.0
msg.optional_string = ''
msg.optional_nested_message.bb = 0
self.assertTrue(msg.HasField('optional_int32'))
self.assertTrue(msg.HasField('optional_float'))
self.assertTrue(msg.HasField('optional_string'))
self.assertTrue(msg.HasField('optional_nested_message'))
self.assertTrue(msg.optional_nested_message.HasField('bb'))
# Test serialize
msg2 = test_proto3_optional_pb2.TestProto3Optional()
msg2.ParseFromString(msg.SerializeToString())
self.assertTrue(msg2.HasField('optional_int32'))
self.assertTrue(msg2.HasField('optional_float'))
self.assertTrue(msg2.HasField('optional_string'))
self.assertTrue(msg2.HasField('optional_nested_message'))
self.assertTrue(msg2.optional_nested_message.HasField('bb'))
self.assertEqual(msg.WhichOneof('_optional_int32'), 'optional_int32')
# Clear these fields.
msg.ClearField('optional_int32')
msg.ClearField('optional_float')
msg.ClearField('optional_string')
msg.ClearField('optional_nested_message')
self.assertFalse(msg.HasField('optional_int32'))
self.assertFalse(msg.HasField('optional_float'))
self.assertFalse(msg.HasField('optional_string'))
self.assertFalse(msg.HasField('optional_nested_message'))
self.assertFalse(msg.optional_nested_message.HasField('bb'))
self.assertEqual(msg.WhichOneof('_optional_int32'), None)
def testAssignUnknownEnum(self):
"""Assigning an unknown enum value is allowed and preserves the value."""
m = unittest_proto3_arena_pb2.TestAllTypes()
# Proto3 can assign unknown enums.
m.optional_nested_enum = 1234567
self.assertEqual(1234567, m.optional_nested_enum)
m.repeated_nested_enum.append(22334455)
self.assertEqual(22334455, m.repeated_nested_enum[0])
# Assignment is a different code path than append for the C++ impl.
m.repeated_nested_enum[0] = 7654321
self.assertEqual(7654321, m.repeated_nested_enum[0])
serialized = m.SerializeToString()
m2 = unittest_proto3_arena_pb2.TestAllTypes()
m2.ParseFromString(serialized)
self.assertEqual(1234567, m2.optional_nested_enum)
self.assertEqual(7654321, m2.repeated_nested_enum[0])
# Map isn't really a proto3-only feature. But there is no proto2 equivalent
# of google/protobuf/map_unittest.proto right now, so it's not easy to
# test both with the same test like we do for the other proto2/proto3 tests.
# (google/protobuf/map_proto2_unittest.proto is very different in the set
# of messages and fields it contains).
def testScalarMapDefaults(self):
msg = map_unittest_pb2.TestMap()
# Scalars start out unset.
self.assertFalse(-123 in msg.map_int32_int32)
self.assertFalse(-2**33 in msg.map_int64_int64)
self.assertFalse(123 in msg.map_uint32_uint32)
self.assertFalse(2**33 in msg.map_uint64_uint64)
self.assertFalse(123 in msg.map_int32_double)
self.assertFalse(False in msg.map_bool_bool)
self.assertFalse('abc' in msg.map_string_string)
self.assertFalse(111 in msg.map_int32_bytes)
self.assertFalse(888 in msg.map_int32_enum)
# Accessing an unset key returns the default.
self.assertEqual(0, msg.map_int32_int32[-123])
self.assertEqual(0, msg.map_int64_int64[-2**33])
self.assertEqual(0, msg.map_uint32_uint32[123])
self.assertEqual(0, msg.map_uint64_uint64[2**33])
self.assertEqual(0.0, msg.map_int32_double[123])
self.assertTrue(isinstance(msg.map_int32_double[123], float))
self.assertEqual(False, msg.map_bool_bool[False])
self.assertTrue(isinstance(msg.map_bool_bool[False], bool))
self.assertEqual('', msg.map_string_string['abc'])
self.assertEqual(b'', msg.map_int32_bytes[111])
self.assertEqual(0, msg.map_int32_enum[888])
# It also sets the value in the map
self.assertTrue(-123 in msg.map_int32_int32)
self.assertTrue(-2**33 in msg.map_int64_int64)
self.assertTrue(123 in msg.map_uint32_uint32)
self.assertTrue(2**33 in msg.map_uint64_uint64)
self.assertTrue(123 in msg.map_int32_double)
self.assertTrue(False in msg.map_bool_bool)
self.assertTrue('abc' in msg.map_string_string)
self.assertTrue(111 in msg.map_int32_bytes)
self.assertTrue(888 in msg.map_int32_enum)
self.assertIsInstance(msg.map_string_string['abc'], six.text_type)
# Accessing an unset key still throws TypeError if the type of the key
# is incorrect.
with self.assertRaises(TypeError):
msg.map_string_string[123]
with self.assertRaises(TypeError):
123 in msg.map_string_string
def testMapGet(self):
# Need to test that get() properly returns the default, even though the dict
# has defaultdict-like semantics.
msg = map_unittest_pb2.TestMap()
self.assertIsNone(msg.map_int32_int32.get(5))
self.assertEqual(10, msg.map_int32_int32.get(5, 10))
self.assertEqual(10, msg.map_int32_int32.get(key=5, default=10))
self.assertIsNone(msg.map_int32_int32.get(5))
msg.map_int32_int32[5] = 15
self.assertEqual(15, msg.map_int32_int32.get(5))
self.assertEqual(15, msg.map_int32_int32.get(5))
with self.assertRaises(TypeError):
msg.map_int32_int32.get('')
self.assertIsNone(msg.map_int32_foreign_message.get(5))
self.assertEqual(10, msg.map_int32_foreign_message.get(5, 10))
self.assertEqual(10, msg.map_int32_foreign_message.get(key=5, default=10))
submsg = msg.map_int32_foreign_message[5]
self.assertIs(submsg, msg.map_int32_foreign_message.get(5))
with self.assertRaises(TypeError):
msg.map_int32_foreign_message.get('')
def testScalarMap(self):
msg = map_unittest_pb2.TestMap()
self.assertEqual(0, len(msg.map_int32_int32))
self.assertFalse(5 in msg.map_int32_int32)
msg.map_int32_int32[-123] = -456
msg.map_int64_int64[-2**33] = -2**34
msg.map_uint32_uint32[123] = 456
msg.map_uint64_uint64[2**33] = 2**34
msg.map_int32_float[2] = 1.2
msg.map_int32_double[1] = 3.3
msg.map_string_string['abc'] = '123'
msg.map_bool_bool[True] = True
msg.map_int32_enum[888] = 2
# Unknown numeric enum is supported in proto3.
msg.map_int32_enum[123] = 456
self.assertEqual([], msg.FindInitializationErrors())
self.assertEqual(1, len(msg.map_string_string))
# Bad key.
with self.assertRaises(TypeError):
msg.map_string_string[123] = '123'
# Verify that trying to assign a bad key doesn't actually add a member to
# the map.
self.assertEqual(1, len(msg.map_string_string))
# Bad value.
with self.assertRaises(TypeError):
msg.map_string_string['123'] = 123
serialized = msg.SerializeToString()
msg2 = map_unittest_pb2.TestMap()
msg2.ParseFromString(serialized)
# Bad key.
with self.assertRaises(TypeError):
msg2.map_string_string[123] = '123'
# Bad value.
with self.assertRaises(TypeError):
msg2.map_string_string['123'] = 123
self.assertEqual(-456, msg2.map_int32_int32[-123])
self.assertEqual(-2**34, msg2.map_int64_int64[-2**33])
self.assertEqual(456, msg2.map_uint32_uint32[123])
self.assertEqual(2**34, msg2.map_uint64_uint64[2**33])
self.assertAlmostEqual(1.2, msg.map_int32_float[2])
self.assertEqual(3.3, msg.map_int32_double[1])
self.assertEqual('123', msg2.map_string_string['abc'])
self.assertEqual(True, msg2.map_bool_bool[True])
self.assertEqual(2, msg2.map_int32_enum[888])
self.assertEqual(456, msg2.map_int32_enum[123])
self.assertEqual('{-123: -456}',
str(msg2.map_int32_int32))
def testMapEntryAlwaysSerialized(self):
msg = map_unittest_pb2.TestMap()
msg.map_int32_int32[0] = 0
msg.map_string_string[''] = ''
self.assertEqual(msg.ByteSize(), 12)
self.assertEqual(b'\n\x04\x08\x00\x10\x00r\x04\n\x00\x12\x00',
msg.SerializeToString())
def testStringUnicodeConversionInMap(self):
msg = map_unittest_pb2.TestMap()
unicode_obj = u'\u1234'
bytes_obj = unicode_obj.encode('utf8')
msg.map_string_string[bytes_obj] = bytes_obj
(key, value) = list(msg.map_string_string.items())[0]
self.assertEqual(key, unicode_obj)
self.assertEqual(value, unicode_obj)
self.assertIsInstance(key, six.text_type)
self.assertIsInstance(value, six.text_type)
def testMessageMap(self):
msg = map_unittest_pb2.TestMap()
self.assertEqual(0, len(msg.map_int32_foreign_message))
self.assertFalse(5 in msg.map_int32_foreign_message)
msg.map_int32_foreign_message[123]
# get_or_create() is an alias for getitem.
msg.map_int32_foreign_message.get_or_create(-456)
self.assertEqual(2, len(msg.map_int32_foreign_message))
self.assertIn(123, msg.map_int32_foreign_message)
self.assertIn(-456, msg.map_int32_foreign_message)
self.assertEqual(2, len(msg.map_int32_foreign_message))
# Bad key.
with self.assertRaises(TypeError):
msg.map_int32_foreign_message['123']
# Can't assign directly to submessage.
with self.assertRaises(ValueError):
msg.map_int32_foreign_message[999] = msg.map_int32_foreign_message[123]
# Verify that trying to assign a bad key doesn't actually add a member to
# the map.
self.assertEqual(2, len(msg.map_int32_foreign_message))
serialized = msg.SerializeToString()
msg2 = map_unittest_pb2.TestMap()
msg2.ParseFromString(serialized)
self.assertEqual(2, len(msg2.map_int32_foreign_message))
self.assertIn(123, msg2.map_int32_foreign_message)
self.assertIn(-456, msg2.map_int32_foreign_message)
self.assertEqual(2, len(msg2.map_int32_foreign_message))
msg2.map_int32_foreign_message[123].c = 1
# TODO(jieluo): Fix text format for message map.
self.assertIn(str(msg2.map_int32_foreign_message),
('{-456: , 123: c: 1\n}', '{123: c: 1\n, -456: }'))
def testNestedMessageMapItemDelete(self):
msg = map_unittest_pb2.TestMap()
msg.map_int32_all_types[1].optional_nested_message.bb = 1
del msg.map_int32_all_types[1]
msg.map_int32_all_types[2].optional_nested_message.bb = 2
self.assertEqual(1, len(msg.map_int32_all_types))
msg.map_int32_all_types[1].optional_nested_message.bb = 1
self.assertEqual(2, len(msg.map_int32_all_types))
serialized = msg.SerializeToString()
msg2 = map_unittest_pb2.TestMap()
msg2.ParseFromString(serialized)
keys = [1, 2]
# The loop triggers PyErr_Occurred() in c extension.
for key in keys:
del msg2.map_int32_all_types[key]
def testMapByteSize(self):
msg = map_unittest_pb2.TestMap()
msg.map_int32_int32[1] = 1
size = msg.ByteSize()
msg.map_int32_int32[1] = 128
self.assertEqual(msg.ByteSize(), size + 1)
msg.map_int32_foreign_message[19].c = 1
size = msg.ByteSize()
msg.map_int32_foreign_message[19].c = 128
self.assertEqual(msg.ByteSize(), size + 1)
def testMergeFrom(self):
msg = map_unittest_pb2.TestMap()
msg.map_int32_int32[12] = 34
msg.map_int32_int32[56] = 78
msg.map_int64_int64[22] = 33
msg.map_int32_foreign_message[111].c = 5
msg.map_int32_foreign_message[222].c = 10
msg2 = map_unittest_pb2.TestMap()
msg2.map_int32_int32[12] = 55
msg2.map_int64_int64[88] = 99
msg2.map_int32_foreign_message[222].c = 15
msg2.map_int32_foreign_message[222].d = 20
old_map_value = msg2.map_int32_foreign_message[222]
msg2.MergeFrom(msg)
# Compare with expected message instead of call
# msg2.map_int32_foreign_message[222] to make sure MergeFrom does not
# sync with repeated field and there is no duplicated keys.
expected_msg = map_unittest_pb2.TestMap()
expected_msg.CopyFrom(msg)
expected_msg.map_int64_int64[88] = 99
self.assertEqual(msg2, expected_msg)
self.assertEqual(34, msg2.map_int32_int32[12])
self.assertEqual(78, msg2.map_int32_int32[56])
self.assertEqual(33, msg2.map_int64_int64[22])
self.assertEqual(99, msg2.map_int64_int64[88])
self.assertEqual(5, msg2.map_int32_foreign_message[111].c)
self.assertEqual(10, msg2.map_int32_foreign_message[222].c)
self.assertFalse(msg2.map_int32_foreign_message[222].HasField('d'))
if api_implementation.Type() != 'cpp':
# During the call to MergeFrom(), the C++ implementation will have
# deallocated the underlying message, but this is very difficult to detect
# properly. The line below is likely to cause a segmentation fault.
# With the Python implementation, old_map_value is just 'detached' from
# the main message. Using it will not crash of course, but since it still
# have a reference to the parent message I'm sure we can find interesting
# ways to cause inconsistencies.
self.assertEqual(15, old_map_value.c)
# Verify that there is only one entry per key, even though the MergeFrom
# may have internally created multiple entries for a single key in the
# list representation.
as_dict = {}
for key in msg2.map_int32_foreign_message:
self.assertFalse(key in as_dict)
as_dict[key] = msg2.map_int32_foreign_message[key].c
self.assertEqual({111: 5, 222: 10}, as_dict)
# Special case: test that delete of item really removes the item, even if
# there might have physically been duplicate keys due to the previous merge.
# This is only a special case for the C++ implementation which stores the
# map as an array.
del msg2.map_int32_int32[12]
self.assertFalse(12 in msg2.map_int32_int32)
del msg2.map_int32_foreign_message[222]
self.assertFalse(222 in msg2.map_int32_foreign_message)
with self.assertRaises(TypeError):
del msg2.map_int32_foreign_message['']
def testMapMergeFrom(self):
msg = map_unittest_pb2.TestMap()
msg.map_int32_int32[12] = 34
msg.map_int32_int32[56] = 78
msg.map_int64_int64[22] = 33
msg.map_int32_foreign_message[111].c = 5
msg.map_int32_foreign_message[222].c = 10
msg2 = map_unittest_pb2.TestMap()
msg2.map_int32_int32[12] = 55
msg2.map_int64_int64[88] = 99
msg2.map_int32_foreign_message[222].c = 15
msg2.map_int32_foreign_message[222].d = 20
msg2.map_int32_int32.MergeFrom(msg.map_int32_int32)
self.assertEqual(34, msg2.map_int32_int32[12])
self.assertEqual(78, msg2.map_int32_int32[56])
msg2.map_int64_int64.MergeFrom(msg.map_int64_int64)
self.assertEqual(33, msg2.map_int64_int64[22])
self.assertEqual(99, msg2.map_int64_int64[88])
msg2.map_int32_foreign_message.MergeFrom(msg.map_int32_foreign_message)
# Compare with expected message instead of call
# msg.map_int32_foreign_message[222] to make sure MergeFrom does not
# sync with repeated field and no duplicated keys.
expected_msg = map_unittest_pb2.TestMap()
expected_msg.CopyFrom(msg)
expected_msg.map_int64_int64[88] = 99
self.assertEqual(msg2, expected_msg)
# Test when cpp extension cache a map.
m1 = map_unittest_pb2.TestMap()
m2 = map_unittest_pb2.TestMap()
self.assertEqual(m1.map_int32_foreign_message,
m1.map_int32_foreign_message)
m2.map_int32_foreign_message[123].c = 10
m1.MergeFrom(m2)
self.assertEqual(10, m2.map_int32_foreign_message[123].c)
# Test merge maps within different message types.
m1 = map_unittest_pb2.TestMap()
m2 = map_unittest_pb2.TestMessageMap()
m2.map_int32_message[123].optional_int32 = 10
m1.map_int32_all_types.MergeFrom(m2.map_int32_message)
self.assertEqual(10, m1.map_int32_all_types[123].optional_int32)
# Test overwrite message value map
msg = map_unittest_pb2.TestMap()
msg.map_int32_foreign_message[222].c = 123
msg2 = map_unittest_pb2.TestMap()
msg2.map_int32_foreign_message[222].d = 20
msg.MergeFromString(msg2.SerializeToString())
self.assertEqual(msg.map_int32_foreign_message[222].d, 20)
self.assertNotEqual(msg.map_int32_foreign_message[222].c, 123)
def testMergeFromBadType(self):
msg = map_unittest_pb2.TestMap()
with self.assertRaisesRegexp(
TypeError,
r'Parameter to MergeFrom\(\) must be instance of same class: expected '
r'.*TestMap got int\.'):
msg.MergeFrom(1)
def testCopyFromBadType(self):
msg = map_unittest_pb2.TestMap()
with self.assertRaisesRegexp(
TypeError,
r'Parameter to [A-Za-z]*From\(\) must be instance of same class: '
r'expected .*TestMap got int\.'):
msg.CopyFrom(1)
def testIntegerMapWithLongs(self):
msg = map_unittest_pb2.TestMap()
msg.map_int32_int32[long(-123)] = long(-456)
msg.map_int64_int64[long(-2**33)] = long(-2**34)
msg.map_uint32_uint32[long(123)] = long(456)
msg.map_uint64_uint64[long(2**33)] = long(2**34)
serialized = msg.SerializeToString()
msg2 = map_unittest_pb2.TestMap()
msg2.ParseFromString(serialized)
self.assertEqual(-456, msg2.map_int32_int32[-123])
self.assertEqual(-2**34, msg2.map_int64_int64[-2**33])
self.assertEqual(456, msg2.map_uint32_uint32[123])
self.assertEqual(2**34, msg2.map_uint64_uint64[2**33])
def testMapAssignmentCausesPresence(self):
msg = map_unittest_pb2.TestMapSubmessage()
msg.test_map.map_int32_int32[123] = 456
serialized = msg.SerializeToString()
msg2 = map_unittest_pb2.TestMapSubmessage()
msg2.ParseFromString(serialized)
self.assertEqual(msg, msg2)
# Now test that various mutations of the map properly invalidate the
# cached size of the submessage.
msg.test_map.map_int32_int32[888] = 999
serialized = msg.SerializeToString()
msg2.ParseFromString(serialized)
self.assertEqual(msg, msg2)
msg.test_map.map_int32_int32.clear()
serialized = msg.SerializeToString()
msg2.ParseFromString(serialized)
self.assertEqual(msg, msg2)
def testMapAssignmentCausesPresenceForSubmessages(self):
msg = map_unittest_pb2.TestMapSubmessage()
msg.test_map.map_int32_foreign_message[123].c = 5
serialized = msg.SerializeToString()
msg2 = map_unittest_pb2.TestMapSubmessage()
msg2.ParseFromString(serialized)
self.assertEqual(msg, msg2)
# Now test that various mutations of the map properly invalidate the
# cached size of the submessage.
msg.test_map.map_int32_foreign_message[888].c = 7
serialized = msg.SerializeToString()
msg2.ParseFromString(serialized)
self.assertEqual(msg, msg2)
msg.test_map.map_int32_foreign_message[888].MergeFrom(
msg.test_map.map_int32_foreign_message[123])
serialized = msg.SerializeToString()
msg2.ParseFromString(serialized)
self.assertEqual(msg, msg2)
msg.test_map.map_int32_foreign_message.clear()
serialized = msg.SerializeToString()
msg2.ParseFromString(serialized)
self.assertEqual(msg, msg2)
def testModifyMapWhileIterating(self):
msg = map_unittest_pb2.TestMap()
string_string_iter = iter(msg.map_string_string)
int32_foreign_iter = iter(msg.map_int32_foreign_message)
msg.map_string_string['abc'] = '123'
msg.map_int32_foreign_message[5].c = 5
with self.assertRaises(RuntimeError):
for key in string_string_iter:
pass
with self.assertRaises(RuntimeError):
for key in int32_foreign_iter:
pass
def testSubmessageMap(self):
msg = map_unittest_pb2.TestMap()
submsg = msg.map_int32_foreign_message[111]
self.assertIs(submsg, msg.map_int32_foreign_message[111])
self.assertIsInstance(submsg, unittest_pb2.ForeignMessage)
submsg.c = 5
serialized = msg.SerializeToString()
msg2 = map_unittest_pb2.TestMap()
msg2.ParseFromString(serialized)
self.assertEqual(5, msg2.map_int32_foreign_message[111].c)
# Doesn't allow direct submessage assignment.
with self.assertRaises(ValueError):
msg.map_int32_foreign_message[88] = unittest_pb2.ForeignMessage()
def testMapIteration(self):
msg = map_unittest_pb2.TestMap()
for k, v in msg.map_int32_int32.items():
# Should not be reached.
self.assertTrue(False)
msg.map_int32_int32[2] = 4
msg.map_int32_int32[3] = 6
msg.map_int32_int32[4] = 8
self.assertEqual(3, len(msg.map_int32_int32))
matching_dict = {2: 4, 3: 6, 4: 8}
self.assertMapIterEquals(msg.map_int32_int32.items(), matching_dict)
def testPython2Map(self):
if sys.version_info < (3,):
msg = map_unittest_pb2.TestMap()
msg.map_int32_int32[2] = 4
msg.map_int32_int32[3] = 6
msg.map_int32_int32[4] = 8
msg.map_int32_int32[5] = 10
map_int32 = msg.map_int32_int32
self.assertEqual(4, len(map_int32))
msg2 = map_unittest_pb2.TestMap()
msg2.ParseFromString(msg.SerializeToString())
def CheckItems(seq, iterator):
self.assertEqual(next(iterator), seq[0])
self.assertEqual(list(iterator), seq[1:])
CheckItems(map_int32.items(), map_int32.iteritems())
CheckItems(map_int32.keys(), map_int32.iterkeys())
CheckItems(map_int32.values(), map_int32.itervalues())
self.assertEqual(6, map_int32.get(3))
self.assertEqual(None, map_int32.get(999))
self.assertEqual(6, map_int32.pop(3))
self.assertEqual(0, map_int32.pop(3))
self.assertEqual(3, len(map_int32))
key, value = map_int32.popitem()
self.assertEqual(2 * key, value)
self.assertEqual(2, len(map_int32))
map_int32.clear()
self.assertEqual(0, len(map_int32))
with self.assertRaises(KeyError):
map_int32.popitem()
self.assertEqual(0, map_int32.setdefault(2))
self.assertEqual(1, len(map_int32))
map_int32.update(msg2.map_int32_int32)
self.assertEqual(4, len(map_int32))
with self.assertRaises(TypeError):
map_int32.update(msg2.map_int32_int32,
msg2.map_int32_int32)
with self.assertRaises(TypeError):
map_int32.update(0)
with self.assertRaises(TypeError):
map_int32.update(value=12)
def testMapItems(self):
# Map items used to have strange behaviors when use c extension. Because
# [] may reorder the map and invalidate any exsting iterators.
# TODO(jieluo): Check if [] reordering the map is a bug or intended
# behavior.
msg = map_unittest_pb2.TestMap()
msg.map_string_string['local_init_op'] = ''
msg.map_string_string['trainable_variables'] = ''
msg.map_string_string['variables'] = ''
msg.map_string_string['init_op'] = ''
msg.map_string_string['summaries'] = ''
items1 = msg.map_string_string.items()
items2 = msg.map_string_string.items()
self.assertEqual(items1, items2)
def testMapDeterministicSerialization(self):
golden_data = (b'r\x0c\n\x07init_op\x12\x01d'
b'r\n\n\x05item1\x12\x01e'
b'r\n\n\x05item2\x12\x01f'
b'r\n\n\x05item3\x12\x01g'
b'r\x0b\n\x05item4\x12\x02QQ'
b'r\x12\n\rlocal_init_op\x12\x01a'
b'r\x0e\n\tsummaries\x12\x01e'
b'r\x18\n\x13trainable_variables\x12\x01b'
b'r\x0e\n\tvariables\x12\x01c')
msg = map_unittest_pb2.TestMap()
msg.map_string_string['local_init_op'] = 'a'
msg.map_string_string['trainable_variables'] = 'b'
msg.map_string_string['variables'] = 'c'
msg.map_string_string['init_op'] = 'd'
msg.map_string_string['summaries'] = 'e'
msg.map_string_string['item1'] = 'e'
msg.map_string_string['item2'] = 'f'
msg.map_string_string['item3'] = 'g'
msg.map_string_string['item4'] = 'QQ'
# If deterministic serialization is not working correctly, this will be
# "flaky" depending on the exact python dict hash seed.
#
# Fortunately, there are enough items in this map that it is extremely
# unlikely to ever hit the "right" in-order combination, so the test
# itself should fail reliably.
self.assertEqual(golden_data, msg.SerializeToString(deterministic=True))
def testMapIterationClearMessage(self):
# Iterator needs to work even if message and map are deleted.
msg = map_unittest_pb2.TestMap()
msg.map_int32_int32[2] = 4
msg.map_int32_int32[3] = 6
msg.map_int32_int32[4] = 8
it = msg.map_int32_int32.items()
del msg
matching_dict = {2: 4, 3: 6, 4: 8}
self.assertMapIterEquals(it, matching_dict)
def testMapConstruction(self):
msg = map_unittest_pb2.TestMap(map_int32_int32={1: 2, 3: 4})
self.assertEqual(2, msg.map_int32_int32[1])
self.assertEqual(4, msg.map_int32_int32[3])
msg = map_unittest_pb2.TestMap(
map_int32_foreign_message={3: unittest_pb2.ForeignMessage(c=5)})
self.assertEqual(5, msg.map_int32_foreign_message[3].c)
def testMapScalarFieldConstruction(self):
msg1 = map_unittest_pb2.TestMap()
msg1.map_int32_int32[1] = 42
msg2 = map_unittest_pb2.TestMap(map_int32_int32=msg1.map_int32_int32)
self.assertEqual(42, msg2.map_int32_int32[1])
def testMapMessageFieldConstruction(self):
msg1 = map_unittest_pb2.TestMap()
msg1.map_string_foreign_message['test'].c = 42
msg2 = map_unittest_pb2.TestMap(
map_string_foreign_message=msg1.map_string_foreign_message)
self.assertEqual(42, msg2.map_string_foreign_message['test'].c)
def testMapFieldRaisesCorrectError(self):
# Should raise a TypeError when given a non-iterable.
with self.assertRaises(TypeError):
map_unittest_pb2.TestMap(map_string_foreign_message=1)
def testMapValidAfterFieldCleared(self):
# Map needs to work even if field is cleared.
# For the C++ implementation this tests the correctness of
# MapContainer::Release()
msg = map_unittest_pb2.TestMap()
int32_map = msg.map_int32_int32
int32_map[2] = 4
int32_map[3] = 6
int32_map[4] = 8
msg.ClearField('map_int32_int32')
self.assertEqual(b'', msg.SerializeToString())
matching_dict = {2: 4, 3: 6, 4: 8}
self.assertMapIterEquals(int32_map.items(), matching_dict)
def testMessageMapValidAfterFieldCleared(self):
# Map needs to work even if field is cleared.
# For the C++ implementation this tests the correctness of
# MapContainer::Release()
msg = map_unittest_pb2.TestMap()
int32_foreign_message = msg.map_int32_foreign_message
int32_foreign_message[2].c = 5
msg.ClearField('map_int32_foreign_message')
self.assertEqual(b'', msg.SerializeToString())
self.assertTrue(2 in int32_foreign_message.keys())
def testMessageMapItemValidAfterTopMessageCleared(self):
# Message map item needs to work even if it is cleared.
# For the C++ implementation this tests the correctness of
# MapContainer::Release()
msg = map_unittest_pb2.TestMap()
msg.map_int32_all_types[2].optional_string = 'bar'
if api_implementation.Type() == 'cpp':
# Need to keep the map reference because of b/27942626.
# TODO(jieluo): Remove it.
unused_map = msg.map_int32_all_types # pylint: disable=unused-variable
msg_value = msg.map_int32_all_types[2]
msg.Clear()
# Reset to trigger sync between repeated field and map in c++.
msg.map_int32_all_types[3].optional_string = 'foo'
self.assertEqual(msg_value.optional_string, 'bar')
def testMapIterInvalidatedByClearField(self):
# Map iterator is invalidated when field is cleared.
# But this case does need to not crash the interpreter.
# For the C++ implementation this tests the correctness of
# ScalarMapContainer::Release()
msg = map_unittest_pb2.TestMap()
it = iter(msg.map_int32_int32)
msg.ClearField('map_int32_int32')
with self.assertRaises(RuntimeError):
for _ in it:
pass
it = iter(msg.map_int32_foreign_message)
msg.ClearField('map_int32_foreign_message')
with self.assertRaises(RuntimeError):
for _ in it:
pass
def testMapDelete(self):
msg = map_unittest_pb2.TestMap()
self.assertEqual(0, len(msg.map_int32_int32))
msg.map_int32_int32[4] = 6
self.assertEqual(1, len(msg.map_int32_int32))
with self.assertRaises(KeyError):
del msg.map_int32_int32[88]
del msg.map_int32_int32[4]
self.assertEqual(0, len(msg.map_int32_int32))
with self.assertRaises(KeyError):
del msg.map_int32_all_types[32]
def testMapsAreMapping(self):
msg = map_unittest_pb2.TestMap()
self.assertIsInstance(msg.map_int32_int32, collections_abc.Mapping)
self.assertIsInstance(msg.map_int32_int32, collections_abc.MutableMapping)
self.assertIsInstance(msg.map_int32_foreign_message, collections_abc.Mapping)
self.assertIsInstance(msg.map_int32_foreign_message,
collections_abc.MutableMapping)
def testMapsCompare(self):
msg = map_unittest_pb2.TestMap()
msg.map_int32_int32[-123] = -456
self.assertEqual(msg.map_int32_int32, msg.map_int32_int32)
self.assertEqual(msg.map_int32_foreign_message,
msg.map_int32_foreign_message)
self.assertNotEqual(msg.map_int32_int32, 0)
def testMapFindInitializationErrorsSmokeTest(self):
msg = map_unittest_pb2.TestMap()
msg.map_string_string['abc'] = '123'
msg.map_int32_int32[35] = 64
msg.map_string_foreign_message['foo'].c = 5
self.assertEqual(0, len(msg.FindInitializationErrors()))
@unittest.skipIf(sys.maxunicode == UCS2_MAXUNICODE, 'Skip for ucs2')
def testStrictUtf8Check(self):
# Test u'\ud801' is rejected at parser in both python2 and python3.
serialized = (b'r\x03\xed\xa0\x81')
msg = unittest_proto3_arena_pb2.TestAllTypes()
with self.assertRaises(Exception) as context:
msg.MergeFromString(serialized)
if api_implementation.Type() == 'python':
self.assertIn('optional_string', str(context.exception))
else:
self.assertIn('Error parsing message', str(context.exception))
# Test optional_string=u'😍' is accepted.
serialized = unittest_proto3_arena_pb2.TestAllTypes(
optional_string=u'😍').SerializeToString()
msg2 = unittest_proto3_arena_pb2.TestAllTypes()
msg2.MergeFromString(serialized)
self.assertEqual(msg2.optional_string, u'😍')
msg = unittest_proto3_arena_pb2.TestAllTypes(
optional_string=u'\ud001')
self.assertEqual(msg.optional_string, u'\ud001')
@unittest.skipIf(six.PY2, 'Surrogates are acceptable in python2')
def testSurrogatesInPython3(self):
# Surrogates like U+D83D is an invalid unicode character, it is
# supported by Python2 only because in some builds, unicode strings
# use 2-bytes code units. Since Python 3.3, we don't have this problem.
#
# Surrogates are utf16 code units, in a unicode string they are invalid
# characters even when they appear in pairs like u'\ud801\udc01'. Protobuf
# Python3 reject such cases at setters and parsers. Python2 accpect it
# to keep same features with the language itself. 'Unpaired pairs'
# like u'\ud801' are rejected at parsers when strict utf8 check is enabled
# in proto3 to keep same behavior with c extension.
# Surrogates are rejected at setters in Python3.
with self.assertRaises(ValueError):
unittest_proto3_arena_pb2.TestAllTypes(
optional_string=u'\ud801\udc01')
with self.assertRaises(ValueError):
unittest_proto3_arena_pb2.TestAllTypes(
optional_string=b'\xed\xa0\x81')
with self.assertRaises(ValueError):
unittest_proto3_arena_pb2.TestAllTypes(
optional_string=u'\ud801')
with self.assertRaises(ValueError):
unittest_proto3_arena_pb2.TestAllTypes(
optional_string=u'\ud801\ud801')
@unittest.skipIf(six.PY3 or sys.maxunicode == UCS2_MAXUNICODE,
'Surrogates are rejected at setters in Python3')
def testSurrogatesInPython2(self):
# Test optional_string=u'\ud801\udc01'.
# surrogate pair is acceptable in python2.
msg = unittest_proto3_arena_pb2.TestAllTypes(
optional_string=u'\ud801\udc01')
# TODO(jieluo): Change pure python to have same behavior with c extension.
# Some build in python2 consider u'\ud801\udc01' and u'\U00010401' are
# equal, some are not equal.
if api_implementation.Type() == 'python':
self.assertEqual(msg.optional_string, u'\ud801\udc01')
else:
self.assertEqual(msg.optional_string, u'\U00010401')
serialized = msg.SerializeToString()
msg2 = unittest_proto3_arena_pb2.TestAllTypes()
msg2.MergeFromString(serialized)
self.assertEqual(msg2.optional_string, u'\U00010401')
# Python2 does not reject surrogates at setters.
msg = unittest_proto3_arena_pb2.TestAllTypes(
optional_string=b'\xed\xa0\x81')
unittest_proto3_arena_pb2.TestAllTypes(
optional_string=u'\ud801')
unittest_proto3_arena_pb2.TestAllTypes(
optional_string=u'\ud801\ud801')
@testing_refleaks.TestCase
class ValidTypeNamesTest(unittest.TestCase):
def assertImportFromName(self, msg, base_name):
# Parse <type 'module.class_name'> to extra 'some.name' as a string.
tp_name = str(type(msg)).split("'")[1]
valid_names = ('Repeated%sContainer' % base_name,
'Repeated%sFieldContainer' % base_name)
self.assertTrue(any(tp_name.endswith(v) for v in valid_names),
'%r does end with any of %r' % (tp_name, valid_names))
parts = tp_name.split('.')
class_name = parts[-1]
module_name = '.'.join(parts[:-1])
__import__(module_name, fromlist=[class_name])
def testTypeNamesCanBeImported(self):
# If import doesn't work, pickling won't work either.
pb = unittest_pb2.TestAllTypes()
self.assertImportFromName(pb.repeated_int32, 'Scalar')
self.assertImportFromName(pb.repeated_nested_message, 'Composite')
@testing_refleaks.TestCase
class PackedFieldTest(unittest.TestCase):
def setMessage(self, message):
message.repeated_int32.append(1)
message.repeated_int64.append(1)
message.repeated_uint32.append(1)
message.repeated_uint64.append(1)
message.repeated_sint32.append(1)
message.repeated_sint64.append(1)
message.repeated_fixed32.append(1)
message.repeated_fixed64.append(1)
message.repeated_sfixed32.append(1)
message.repeated_sfixed64.append(1)
message.repeated_float.append(1.0)
message.repeated_double.append(1.0)
message.repeated_bool.append(True)
message.repeated_nested_enum.append(1)
def testPackedFields(self):
message = packed_field_test_pb2.TestPackedTypes()
self.setMessage(message)
golden_data = (b'\x0A\x01\x01'
b'\x12\x01\x01'
b'\x1A\x01\x01'
b'\x22\x01\x01'
b'\x2A\x01\x02'
b'\x32\x01\x02'
b'\x3A\x04\x01\x00\x00\x00'
b'\x42\x08\x01\x00\x00\x00\x00\x00\x00\x00'
b'\x4A\x04\x01\x00\x00\x00'
b'\x52\x08\x01\x00\x00\x00\x00\x00\x00\x00'
b'\x5A\x04\x00\x00\x80\x3f'
b'\x62\x08\x00\x00\x00\x00\x00\x00\xf0\x3f'
b'\x6A\x01\x01'
b'\x72\x01\x01')
self.assertEqual(golden_data, message.SerializeToString())
def testUnpackedFields(self):
message = packed_field_test_pb2.TestUnpackedTypes()
self.setMessage(message)
golden_data = (b'\x08\x01'
b'\x10\x01'
b'\x18\x01'
b'\x20\x01'
b'\x28\x02'
b'\x30\x02'
b'\x3D\x01\x00\x00\x00'
b'\x41\x01\x00\x00\x00\x00\x00\x00\x00'
b'\x4D\x01\x00\x00\x00'
b'\x51\x01\x00\x00\x00\x00\x00\x00\x00'
b'\x5D\x00\x00\x80\x3f'
b'\x61\x00\x00\x00\x00\x00\x00\xf0\x3f'
b'\x68\x01'
b'\x70\x01')
self.assertEqual(golden_data, message.SerializeToString())
@unittest.skipIf(api_implementation.Type() != 'cpp' or
sys.version_info < (2, 7),
'explicit tests of the C++ implementation for PY27 and above')
@testing_refleaks.TestCase
class OversizeProtosTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
# At the moment, reference cycles between DescriptorPool and Message classes
# are not detected and these objects are never freed.
# To avoid errors with ReferenceLeakChecker, we create the class only once.
file_desc = """
name: "f/f.msg2"
package: "f"
message_type {
name: "msg1"
field {
name: "payload"
number: 1
label: LABEL_OPTIONAL
type: TYPE_STRING
}
}
message_type {
name: "msg2"
field {
name: "field"
number: 1
label: LABEL_OPTIONAL
type: TYPE_MESSAGE
type_name: "msg1"
}
}
"""
pool = descriptor_pool.DescriptorPool()
desc = descriptor_pb2.FileDescriptorProto()
text_format.Parse(file_desc, desc)
pool.Add(desc)
cls.proto_cls = message_factory.MessageFactory(pool).GetPrototype(
pool.FindMessageTypeByName('f.msg2'))
def setUp(self):
self.p = self.proto_cls()
self.p.field.payload = 'c' * (1024 * 1024 * 64 + 1)
self.p_serialized = self.p.SerializeToString()
def testAssertOversizeProto(self):
from google.protobuf.pyext._message import SetAllowOversizeProtos
SetAllowOversizeProtos(False)
q = self.proto_cls()
try:
q.ParseFromString(self.p_serialized)
except message.DecodeError as e:
self.assertEqual(str(e), 'Error parsing message')
def testSucceedOversizeProto(self):
from google.protobuf.pyext._message import SetAllowOversizeProtos
SetAllowOversizeProtos(True)
q = self.proto_cls()
q.ParseFromString(self.p_serialized)
self.assertEqual(self.p.field.payload, q.field.payload)
if __name__ == '__main__':
unittest.main()
| nwjs/chromium.src | third_party/protobuf/python/google/protobuf/internal/message_test.py | Python | bsd-3-clause | 109,614 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import os
from collections import OrderedDict
from django.core.management.base import BaseCommand, CommandParser
from django.core.management.color import no_style
def add_builtin_arguments(parser):
parser.add_argument(
'--noinput',
action='store_false',
dest='interactive',
default=True,
help='Tells Django CMS to NOT prompt the user for input of any kind.'
)
# These are taking "as-is" from Django's management base
# management command.
parser.add_argument('-v', '--verbosity', action='store', dest='verbosity', default='1',
type=int, choices=[0, 1, 2, 3],
help='Verbosity level; 0=minimal output, 1=normal output, 2=verbose output, 3=very verbose output')
parser.add_argument('--settings',
help=(
'The Python path to a settings module, e.g. '
'"myproject.settings.main". If this isn\'t provided, the '
'DJANGO_SETTINGS_MODULE environment variable will be used.'
),
)
parser.add_argument('--pythonpath',
help='A directory to add to the Python path, e.g. "/home/djangoprojects/myproject".')
parser.add_argument('--traceback', action='store_true',
help='Raise on CommandError exceptions')
parser.add_argument('--no-color', action='store_true', dest='no_color', default=False,
help="Don't colorize the command output.")
class SubcommandsCommand(BaseCommand):
subcommands = OrderedDict()
instances = {}
help_string = ''
command_name = ''
subcommand_dest = 'subcmd'
def create_parser(self, prog_name, subcommand):
parser = CommandParser(
self,
prog="%s %s" % (os.path.basename(prog_name), subcommand),
description=self.help or None
)
self.add_arguments(parser)
return parser
def add_arguments(self, parser):
self.instances = {}
if self.subcommands:
subparsers = parser.add_subparsers(dest=self.subcommand_dest)
for command, cls in self.subcommands.items():
instance = cls(self.stdout._out, self.stderr._out)
instance.style = self.style
parser_sub = subparsers.add_parser(
cmd=self, name=instance.command_name, help=instance.help_string,
description=instance.help_string
)
add_builtin_arguments(parser=parser_sub)
instance.add_arguments(parser_sub)
self.instances[command] = instance
def handle(self, *args, **options):
if options[self.subcommand_dest] in self.instances:
command = self.instances[options[self.subcommand_dest]]
if options.get('no_color'):
command.style = no_style()
command.stderr.style_func = None
if options.get('stdout'):
command.stdout._out = options.get('stdout')
if options.get('stderr'):
command.stderr._out = options.get('stderr')
command.handle(*args, **options)
else:
self.print_help('manage.py', 'cms')
| Aendra/django_vgv | management/commands/subcommands/base.py | Python | bsd-3-clause | 3,253 |
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import absolute_import
from __future__ import print_function
def trycmd(config):
from buildbot.clients import tryclient
t = tryclient.Try(config)
t.run()
return 0
| seankelly/buildbot | master/buildbot/scripts/trycmd.py | Python | gpl-2.0 | 903 |
## @file
# process VTF generation
#
# Copyright (c) 2007, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
# Import Modules
#
from GenFdsGlobalVariable import GenFdsGlobalVariable
import os
from CommonDataClass.FdfClass import VtfClassObject
T_CHAR_LF = '\n'
## generate VTF
#
#
class Vtf (VtfClassObject):
## The constructor
#
# @param self The object pointer
#
def __init__(self):
VtfClassObject.__init__(self)
## GenVtf() method
#
# Generate VTF
#
# @param self The object pointer
# @param FdAddressDict dictionary contains FV name and its base address
# @retval Dict FV and corresponding VTF file name
#
def GenVtf(self, FdAddressDict) :
self.GenBsfInf()
OutputFile = os.path.join(GenFdsGlobalVariable.FvDir, self.UiName + '.Vtf')
BaseAddArg = self.GetBaseAddressArg(FdAddressDict)
OutputArg, VtfRawDict = self.GenOutputArg()
Cmd = (
'GenVtf',
) + OutputArg + (
'-f', self.BsfInfName,
) + BaseAddArg
GenFdsGlobalVariable.CallExternalTool(Cmd, "GenFv -Vtf Failed!")
GenFdsGlobalVariable.SharpCounter = 0
return VtfRawDict
## GenBsfInf() method
#
# Generate inf used to generate VTF
#
# @param self The object pointer
#
def GenBsfInf (self):
FvList = self.GetFvList()
self.BsfInfName = os.path.join(GenFdsGlobalVariable.FvDir, self.UiName + '.inf')
BsfInf = open (self.BsfInfName, 'w+')
if self.ResetBin != None:
BsfInf.writelines ("[OPTIONS]" + T_CHAR_LF)
BsfInf.writelines ("IA32_RST_BIN" + \
" = " + \
GenFdsGlobalVariable.MacroExtend(GenFdsGlobalVariable.ReplaceWorkspaceMacro(self.ResetBin)) + \
T_CHAR_LF )
BsfInf.writelines (T_CHAR_LF )
BsfInf.writelines ("[COMPONENTS]" + T_CHAR_LF)
for ComponentObj in self.ComponentStatementList :
BsfInf.writelines ("COMP_NAME" + \
" = " + \
ComponentObj.CompName + \
T_CHAR_LF )
if ComponentObj.CompLoc.upper() == 'NONE':
BsfInf.writelines ("COMP_LOC" + \
" = " + \
'N' + \
T_CHAR_LF )
elif ComponentObj.FilePos != None:
BsfInf.writelines ("COMP_LOC" + \
" = " + \
ComponentObj.FilePos + \
T_CHAR_LF )
else:
Index = FvList.index(ComponentObj.CompLoc.upper())
if Index == 0:
BsfInf.writelines ("COMP_LOC" + \
" = " + \
'F' + \
T_CHAR_LF )
elif Index == 1:
BsfInf.writelines ("COMP_LOC" + \
" = " + \
'S' + \
T_CHAR_LF )
BsfInf.writelines ("COMP_TYPE" + \
" = " + \
ComponentObj.CompType + \
T_CHAR_LF )
BsfInf.writelines ("COMP_VER" + \
" = " + \
ComponentObj.CompVer + \
T_CHAR_LF )
BsfInf.writelines ("COMP_CS" + \
" = " + \
ComponentObj.CompCs + \
T_CHAR_LF )
BinPath = ComponentObj.CompBin
if BinPath != '-':
BinPath = GenFdsGlobalVariable.MacroExtend(GenFdsGlobalVariable.ReplaceWorkspaceMacro(BinPath))
BsfInf.writelines ("COMP_BIN" + \
" = " + \
BinPath + \
T_CHAR_LF )
SymPath = ComponentObj.CompSym
if SymPath != '-':
SymPath = GenFdsGlobalVariable.MacroExtend(GenFdsGlobalVariable.ReplaceWorkspaceMacro(SymPath))
BsfInf.writelines ("COMP_SYM" + \
" = " + \
SymPath + \
T_CHAR_LF )
BsfInf.writelines ("COMP_SIZE" + \
" = " + \
ComponentObj.CompSize + \
T_CHAR_LF )
BsfInf.writelines (T_CHAR_LF )
BsfInf.close()
## GenFvList() method
#
# Get FV list referenced by VTF components
#
# @param self The object pointer
#
def GetFvList(self):
FvList = []
for component in self.ComponentStatementList :
if component.CompLoc.upper() != 'NONE' and not (component.CompLoc.upper() in FvList):
FvList.append(component.CompLoc.upper())
return FvList
## GetBaseAddressArg() method
#
# Get base address arguments for GenVtf
#
# @param self The object pointer
#
def GetBaseAddressArg(self, FdAddressDict):
FvList = self.GetFvList()
CmdStr = tuple()
for i in FvList:
(BaseAddress, Size) = FdAddressDict.get(i)
CmdStr += (
'-r', '0x%x' % BaseAddress,
'-s', '0x%x' %Size,
)
return CmdStr
## GenOutputArg() method
#
# Get output arguments for GenVtf
#
# @param self The object pointer
#
def GenOutputArg(self):
FvVtfDict = {}
OutputFileName = ''
FvList = self.GetFvList()
Index = 0
Arg = tuple()
for FvObj in FvList:
Index = Index + 1
OutputFileName = 'Vtf%d.raw' % Index
OutputFileName = os.path.join(GenFdsGlobalVariable.FvDir, OutputFileName)
Arg += ('-o', OutputFileName)
FvVtfDict[FvObj.upper()] = OutputFileName
return Arg, FvVtfDict
| carmark/vbox | src/VBox/Devices/EFI/Firmware/BaseTools/Source/Python/GenFds/Vtf.py | Python | gpl-2.0 | 7,215 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2013 Nicolas Wack <wackou@gmail.com>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function, unicode_literals
import pkg_resources
from .__version__ import __version__
__all__ = ['Guess', 'Language',
'guess_file_info', 'guess_video_info',
'guess_movie_info', 'guess_episode_info']
# Do python3 detection before importing any other module, to be sure that
# it will then always be available
# with code from http://lucumr.pocoo.org/2011/1/22/forwards-compatible-python/
import sys
if sys.version_info[0] >= 3: # pragma: no cover
PY2, PY3 = False, True
unicode_text_type = str
native_text_type = str
base_text_type = str
def u(x):
return str(x)
def s(x):
return x
class UnicodeMixin(object):
__str__ = lambda x: x.__unicode__()
import binascii
def to_hex(x):
return binascii.hexlify(x).decode('utf-8')
else: # pragma: no cover
PY2, PY3 = True, False
__all__ = [str(s) for s in __all__] # fix imports for python2
unicode_text_type = unicode
native_text_type = str
base_text_type = basestring
def u(x):
if isinstance(x, str):
return x.decode('utf-8')
if isinstance(x, list):
return [u(s) for s in x]
return unicode(x)
def s(x):
if isinstance(x, unicode):
return x.encode('utf-8')
if isinstance(x, list):
return [s(y) for y in x]
if isinstance(x, tuple):
return tuple(s(y) for y in x)
if isinstance(x, dict):
return dict((s(key), s(value)) for key, value in x.items())
return x
class UnicodeMixin(object):
__str__ = lambda x: unicode(x).encode('utf-8')
def to_hex(x):
return x.encode('hex')
range = xrange
from guessit.guess import Guess, merge_all
from guessit.language import Language
from guessit.matcher import IterativeMatcher
from guessit.textutils import clean_string, is_camel, from_camel
import os.path
import logging
import json
log = logging.getLogger(__name__)
class NullHandler(logging.Handler):
def emit(self, record):
pass
# let's be a nicely behaving library
h = NullHandler()
log.addHandler(h)
def _guess_filename(filename, options=None, **kwargs):
mtree = _build_filename_mtree(filename, options=options, **kwargs)
_add_camel_properties(mtree, options=options)
return mtree.matched()
def _build_filename_mtree(filename, options=None, **kwargs):
mtree = IterativeMatcher(filename, options=options, **kwargs)
second_pass_options = mtree.second_pass_options
if second_pass_options:
log.info("Running 2nd pass")
merged_options = dict(options)
merged_options.update(second_pass_options)
mtree = IterativeMatcher(filename, options=merged_options, **kwargs)
return mtree
def _add_camel_properties(mtree, options=None, **kwargs):
prop = 'title' if mtree.matched().get('type') != 'episode' else 'series'
value = mtree.matched().get(prop)
_guess_camel_string(mtree, value, options=options, skip_title=False, **kwargs)
for leaf in mtree.match_tree.unidentified_leaves():
value = leaf.value
_guess_camel_string(mtree, value, options=options, skip_title=True, **kwargs)
def _guess_camel_string(mtree, string, options=None, skip_title=False, **kwargs):
if string and is_camel(string):
log.info('"%s" is camel cased. Try to detect more properties.' % (string,))
uncameled_value = from_camel(string)
camel_tree = _build_filename_mtree(uncameled_value, options=options, name_only=True, skip_title=skip_title, **kwargs)
if len(camel_tree.matched()) > 0:
# Title has changed.
mtree.matched().update(camel_tree.matched())
return True
return False
def guess_file_info(filename, info=None, options=None, **kwargs):
"""info can contain the names of the various plugins, such as 'filename' to
detect filename info, or 'hash_md5' to get the md5 hash of the file.
>>> testfile = os.path.join(os.path.dirname(__file__), 'test/dummy.srt')
>>> g = guess_file_info(testfile, info = ['hash_md5', 'hash_sha1'])
>>> g['hash_md5'], g['hash_sha1']
('64de6b5893cac24456c46a935ef9c359', 'a703fc0fa4518080505809bf562c6fc6f7b3c98c')
"""
info = info or 'filename'
options = options or {}
result = []
hashers = []
# Force unicode as soon as possible
filename = u(filename)
if isinstance(info, base_text_type):
info = [info]
for infotype in info:
if infotype == 'filename':
result.append(_guess_filename(filename, options, **kwargs))
elif infotype == 'hash_mpc':
from guessit.hash_mpc import hash_file
try:
result.append(Guess({infotype: hash_file(filename)},
confidence=1.0))
except Exception as e:
log.warning('Could not compute MPC-style hash because: %s' % e)
elif infotype == 'hash_ed2k':
from guessit.hash_ed2k import hash_file
try:
result.append(Guess({infotype: hash_file(filename)},
confidence=1.0))
except Exception as e:
log.warning('Could not compute ed2k hash because: %s' % e)
elif infotype.startswith('hash_'):
import hashlib
hashname = infotype[5:]
try:
hasher = getattr(hashlib, hashname)()
hashers.append((infotype, hasher))
except AttributeError:
log.warning('Could not compute %s hash because it is not available from python\'s hashlib module' % hashname)
else:
log.warning('Invalid infotype: %s' % infotype)
# do all the hashes now, but on a single pass
if hashers:
try:
blocksize = 8192
hasherobjs = dict(hashers).values()
with open(filename, 'rb') as f:
chunk = f.read(blocksize)
while chunk:
for hasher in hasherobjs:
hasher.update(chunk)
chunk = f.read(blocksize)
for infotype, hasher in hashers:
result.append(Guess({infotype: hasher.hexdigest()},
confidence=1.0))
except Exception as e:
log.warning('Could not compute hash because: %s' % e)
result = merge_all(result)
return result
def guess_video_info(filename, info=None, options=None, **kwargs):
return guess_file_info(filename, info=info, options=options, type='video', **kwargs)
def guess_movie_info(filename, info=None, options=None, **kwargs):
return guess_file_info(filename, info=info, options=options, type='movie', **kwargs)
def guess_episode_info(filename, info=None, options=None, **kwargs):
return guess_file_info(filename, info=info, options=options, type='episode', **kwargs)
| Hellowlol/PyTunes | libs/guessit/__init__.py | Python | gpl-3.0 | 7,831 |
import tempfile
import shutil
import sys
from unittest import mock
import pytest
from tools.wpt import run
from tools import localpaths # noqa: F401
from wptrunner.browsers import product_list
@pytest.fixture(scope="module")
def venv():
from tools.wpt import virtualenv
class Virtualenv(virtualenv.Virtualenv):
def __init__(self):
self.path = tempfile.mkdtemp()
self.skip_virtualenv_setup = False
def create(self):
return
def activate(self):
return
def start(self):
return
def install(self, *requirements):
return
def install_requirements(self, requirements_path):
return
venv = Virtualenv()
yield venv
shutil.rmtree(venv.path)
@pytest.fixture(scope="module")
def logger():
run.setup_logging({})
@pytest.mark.parametrize("platform", ["Windows", "Linux", "Darwin"])
def test_check_environ_fail(platform):
m_open = mock.mock_open(read_data=b"")
with mock.patch.object(run, "open", m_open):
with mock.patch.object(run.platform, "uname",
return_value=(platform, "", "", "", "", "")):
with pytest.raises(run.WptrunError) as excinfo:
run.check_environ("foo")
assert "wpt make-hosts-file" in str(excinfo.value)
@pytest.mark.parametrize("product", product_list)
def test_setup_wptrunner(venv, logger, product):
if product == "firefox_android":
pytest.skip("Android emulator doesn't work on docker")
parser = run.create_parser()
kwargs = vars(parser.parse_args(["--channel=nightly", product]))
kwargs["prompt"] = False
# Hack to get a real existing path
kwargs["binary"] = sys.argv[0]
kwargs["webdriver_binary"] = sys.argv[0]
if kwargs["product"] == "sauce":
kwargs["sauce_browser"] = "firefox"
kwargs["sauce_version"] = "63"
run.setup_wptrunner(venv, **kwargs)
| CYBAI/servo | tests/wpt/web-platform-tests/tools/wpt/tests/test_run.py | Python | mpl-2.0 | 1,969 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# DO NOT MODIFY THIS FILE! It was generated by _shared_params_code_gen.py.
from pyspark.ml.param import *
class HasMaxIter(Params):
"""
Mixin for param maxIter: max number of iterations (>= 0).
"""
maxIter = Param(Params._dummy(), "maxIter", "max number of iterations (>= 0).", typeConverter=TypeConverters.toInt)
def __init__(self):
super(HasMaxIter, self).__init__()
def getMaxIter(self):
"""
Gets the value of maxIter or its default value.
"""
return self.getOrDefault(self.maxIter)
class HasRegParam(Params):
"""
Mixin for param regParam: regularization parameter (>= 0).
"""
regParam = Param(Params._dummy(), "regParam", "regularization parameter (>= 0).", typeConverter=TypeConverters.toFloat)
def __init__(self):
super(HasRegParam, self).__init__()
def getRegParam(self):
"""
Gets the value of regParam or its default value.
"""
return self.getOrDefault(self.regParam)
class HasFeaturesCol(Params):
"""
Mixin for param featuresCol: features column name.
"""
featuresCol = Param(Params._dummy(), "featuresCol", "features column name.", typeConverter=TypeConverters.toString)
def __init__(self):
super(HasFeaturesCol, self).__init__()
self._setDefault(featuresCol='features')
def getFeaturesCol(self):
"""
Gets the value of featuresCol or its default value.
"""
return self.getOrDefault(self.featuresCol)
class HasLabelCol(Params):
"""
Mixin for param labelCol: label column name.
"""
labelCol = Param(Params._dummy(), "labelCol", "label column name.", typeConverter=TypeConverters.toString)
def __init__(self):
super(HasLabelCol, self).__init__()
self._setDefault(labelCol='label')
def getLabelCol(self):
"""
Gets the value of labelCol or its default value.
"""
return self.getOrDefault(self.labelCol)
class HasPredictionCol(Params):
"""
Mixin for param predictionCol: prediction column name.
"""
predictionCol = Param(Params._dummy(), "predictionCol", "prediction column name.", typeConverter=TypeConverters.toString)
def __init__(self):
super(HasPredictionCol, self).__init__()
self._setDefault(predictionCol='prediction')
def getPredictionCol(self):
"""
Gets the value of predictionCol or its default value.
"""
return self.getOrDefault(self.predictionCol)
class HasProbabilityCol(Params):
"""
Mixin for param probabilityCol: Column name for predicted class conditional probabilities. Note: Not all models output well-calibrated probability estimates! These probabilities should be treated as confidences, not precise probabilities.
"""
probabilityCol = Param(Params._dummy(), "probabilityCol", "Column name for predicted class conditional probabilities. Note: Not all models output well-calibrated probability estimates! These probabilities should be treated as confidences, not precise probabilities.", typeConverter=TypeConverters.toString)
def __init__(self):
super(HasProbabilityCol, self).__init__()
self._setDefault(probabilityCol='probability')
def getProbabilityCol(self):
"""
Gets the value of probabilityCol or its default value.
"""
return self.getOrDefault(self.probabilityCol)
class HasRawPredictionCol(Params):
"""
Mixin for param rawPredictionCol: raw prediction (a.k.a. confidence) column name.
"""
rawPredictionCol = Param(Params._dummy(), "rawPredictionCol", "raw prediction (a.k.a. confidence) column name.", typeConverter=TypeConverters.toString)
def __init__(self):
super(HasRawPredictionCol, self).__init__()
self._setDefault(rawPredictionCol='rawPrediction')
def getRawPredictionCol(self):
"""
Gets the value of rawPredictionCol or its default value.
"""
return self.getOrDefault(self.rawPredictionCol)
class HasInputCol(Params):
"""
Mixin for param inputCol: input column name.
"""
inputCol = Param(Params._dummy(), "inputCol", "input column name.", typeConverter=TypeConverters.toString)
def __init__(self):
super(HasInputCol, self).__init__()
def getInputCol(self):
"""
Gets the value of inputCol or its default value.
"""
return self.getOrDefault(self.inputCol)
class HasInputCols(Params):
"""
Mixin for param inputCols: input column names.
"""
inputCols = Param(Params._dummy(), "inputCols", "input column names.", typeConverter=TypeConverters.toListString)
def __init__(self):
super(HasInputCols, self).__init__()
def getInputCols(self):
"""
Gets the value of inputCols or its default value.
"""
return self.getOrDefault(self.inputCols)
class HasOutputCol(Params):
"""
Mixin for param outputCol: output column name.
"""
outputCol = Param(Params._dummy(), "outputCol", "output column name.", typeConverter=TypeConverters.toString)
def __init__(self):
super(HasOutputCol, self).__init__()
self._setDefault(outputCol=self.uid + '__output')
def getOutputCol(self):
"""
Gets the value of outputCol or its default value.
"""
return self.getOrDefault(self.outputCol)
class HasOutputCols(Params):
"""
Mixin for param outputCols: output column names.
"""
outputCols = Param(Params._dummy(), "outputCols", "output column names.", typeConverter=TypeConverters.toListString)
def __init__(self):
super(HasOutputCols, self).__init__()
def getOutputCols(self):
"""
Gets the value of outputCols or its default value.
"""
return self.getOrDefault(self.outputCols)
class HasNumFeatures(Params):
"""
Mixin for param numFeatures: Number of features. Should be greater than 0.
"""
numFeatures = Param(Params._dummy(), "numFeatures", "Number of features. Should be greater than 0.", typeConverter=TypeConverters.toInt)
def __init__(self):
super(HasNumFeatures, self).__init__()
self._setDefault(numFeatures=262144)
def getNumFeatures(self):
"""
Gets the value of numFeatures or its default value.
"""
return self.getOrDefault(self.numFeatures)
class HasCheckpointInterval(Params):
"""
Mixin for param checkpointInterval: set checkpoint interval (>= 1) or disable checkpoint (-1). E.g. 10 means that the cache will get checkpointed every 10 iterations. Note: this setting will be ignored if the checkpoint directory is not set in the SparkContext.
"""
checkpointInterval = Param(Params._dummy(), "checkpointInterval", "set checkpoint interval (>= 1) or disable checkpoint (-1). E.g. 10 means that the cache will get checkpointed every 10 iterations. Note: this setting will be ignored if the checkpoint directory is not set in the SparkContext.", typeConverter=TypeConverters.toInt)
def __init__(self):
super(HasCheckpointInterval, self).__init__()
def getCheckpointInterval(self):
"""
Gets the value of checkpointInterval or its default value.
"""
return self.getOrDefault(self.checkpointInterval)
class HasSeed(Params):
"""
Mixin for param seed: random seed.
"""
seed = Param(Params._dummy(), "seed", "random seed.", typeConverter=TypeConverters.toInt)
def __init__(self):
super(HasSeed, self).__init__()
self._setDefault(seed=hash(type(self).__name__))
def getSeed(self):
"""
Gets the value of seed or its default value.
"""
return self.getOrDefault(self.seed)
class HasTol(Params):
"""
Mixin for param tol: the convergence tolerance for iterative algorithms (>= 0).
"""
tol = Param(Params._dummy(), "tol", "the convergence tolerance for iterative algorithms (>= 0).", typeConverter=TypeConverters.toFloat)
def __init__(self):
super(HasTol, self).__init__()
def getTol(self):
"""
Gets the value of tol or its default value.
"""
return self.getOrDefault(self.tol)
class HasRelativeError(Params):
"""
Mixin for param relativeError: the relative target precision for the approximate quantile algorithm. Must be in the range [0, 1]
"""
relativeError = Param(Params._dummy(), "relativeError", "the relative target precision for the approximate quantile algorithm. Must be in the range [0, 1]", typeConverter=TypeConverters.toFloat)
def __init__(self):
super(HasRelativeError, self).__init__()
self._setDefault(relativeError=0.001)
def getRelativeError(self):
"""
Gets the value of relativeError or its default value.
"""
return self.getOrDefault(self.relativeError)
class HasStepSize(Params):
"""
Mixin for param stepSize: Step size to be used for each iteration of optimization (>= 0).
"""
stepSize = Param(Params._dummy(), "stepSize", "Step size to be used for each iteration of optimization (>= 0).", typeConverter=TypeConverters.toFloat)
def __init__(self):
super(HasStepSize, self).__init__()
def getStepSize(self):
"""
Gets the value of stepSize or its default value.
"""
return self.getOrDefault(self.stepSize)
class HasHandleInvalid(Params):
"""
Mixin for param handleInvalid: how to handle invalid entries. Options are skip (which will filter out rows with bad values), or error (which will throw an error). More options may be added later.
"""
handleInvalid = Param(Params._dummy(), "handleInvalid", "how to handle invalid entries. Options are skip (which will filter out rows with bad values), or error (which will throw an error). More options may be added later.", typeConverter=TypeConverters.toString)
def __init__(self):
super(HasHandleInvalid, self).__init__()
def getHandleInvalid(self):
"""
Gets the value of handleInvalid or its default value.
"""
return self.getOrDefault(self.handleInvalid)
class HasElasticNetParam(Params):
"""
Mixin for param elasticNetParam: the ElasticNet mixing parameter, in range [0, 1]. For alpha = 0, the penalty is an L2 penalty. For alpha = 1, it is an L1 penalty.
"""
elasticNetParam = Param(Params._dummy(), "elasticNetParam", "the ElasticNet mixing parameter, in range [0, 1]. For alpha = 0, the penalty is an L2 penalty. For alpha = 1, it is an L1 penalty.", typeConverter=TypeConverters.toFloat)
def __init__(self):
super(HasElasticNetParam, self).__init__()
self._setDefault(elasticNetParam=0.0)
def getElasticNetParam(self):
"""
Gets the value of elasticNetParam or its default value.
"""
return self.getOrDefault(self.elasticNetParam)
class HasFitIntercept(Params):
"""
Mixin for param fitIntercept: whether to fit an intercept term.
"""
fitIntercept = Param(Params._dummy(), "fitIntercept", "whether to fit an intercept term.", typeConverter=TypeConverters.toBoolean)
def __init__(self):
super(HasFitIntercept, self).__init__()
self._setDefault(fitIntercept=True)
def getFitIntercept(self):
"""
Gets the value of fitIntercept or its default value.
"""
return self.getOrDefault(self.fitIntercept)
class HasStandardization(Params):
"""
Mixin for param standardization: whether to standardize the training features before fitting the model.
"""
standardization = Param(Params._dummy(), "standardization", "whether to standardize the training features before fitting the model.", typeConverter=TypeConverters.toBoolean)
def __init__(self):
super(HasStandardization, self).__init__()
self._setDefault(standardization=True)
def getStandardization(self):
"""
Gets the value of standardization or its default value.
"""
return self.getOrDefault(self.standardization)
class HasThresholds(Params):
"""
Mixin for param thresholds: Thresholds in multi-class classification to adjust the probability of predicting each class. Array must have length equal to the number of classes, with values > 0, excepting that at most one value may be 0. The class with largest value p/t is predicted, where p is the original probability of that class and t is the class's threshold.
"""
thresholds = Param(Params._dummy(), "thresholds", "Thresholds in multi-class classification to adjust the probability of predicting each class. Array must have length equal to the number of classes, with values > 0, excepting that at most one value may be 0. The class with largest value p/t is predicted, where p is the original probability of that class and t is the class's threshold.", typeConverter=TypeConverters.toListFloat)
def __init__(self):
super(HasThresholds, self).__init__()
def getThresholds(self):
"""
Gets the value of thresholds or its default value.
"""
return self.getOrDefault(self.thresholds)
class HasThreshold(Params):
"""
Mixin for param threshold: threshold in binary classification prediction, in range [0, 1]
"""
threshold = Param(Params._dummy(), "threshold", "threshold in binary classification prediction, in range [0, 1]", typeConverter=TypeConverters.toFloat)
def __init__(self):
super(HasThreshold, self).__init__()
self._setDefault(threshold=0.5)
def getThreshold(self):
"""
Gets the value of threshold or its default value.
"""
return self.getOrDefault(self.threshold)
class HasWeightCol(Params):
"""
Mixin for param weightCol: weight column name. If this is not set or empty, we treat all instance weights as 1.0.
"""
weightCol = Param(Params._dummy(), "weightCol", "weight column name. If this is not set or empty, we treat all instance weights as 1.0.", typeConverter=TypeConverters.toString)
def __init__(self):
super(HasWeightCol, self).__init__()
def getWeightCol(self):
"""
Gets the value of weightCol or its default value.
"""
return self.getOrDefault(self.weightCol)
class HasSolver(Params):
"""
Mixin for param solver: the solver algorithm for optimization. If this is not set or empty, default value is 'auto'.
"""
solver = Param(Params._dummy(), "solver", "the solver algorithm for optimization. If this is not set or empty, default value is 'auto'.", typeConverter=TypeConverters.toString)
def __init__(self):
super(HasSolver, self).__init__()
self._setDefault(solver='auto')
def getSolver(self):
"""
Gets the value of solver or its default value.
"""
return self.getOrDefault(self.solver)
class HasVarianceCol(Params):
"""
Mixin for param varianceCol: column name for the biased sample variance of prediction.
"""
varianceCol = Param(Params._dummy(), "varianceCol", "column name for the biased sample variance of prediction.", typeConverter=TypeConverters.toString)
def __init__(self):
super(HasVarianceCol, self).__init__()
def getVarianceCol(self):
"""
Gets the value of varianceCol or its default value.
"""
return self.getOrDefault(self.varianceCol)
class HasAggregationDepth(Params):
"""
Mixin for param aggregationDepth: suggested depth for treeAggregate (>= 2).
"""
aggregationDepth = Param(Params._dummy(), "aggregationDepth", "suggested depth for treeAggregate (>= 2).", typeConverter=TypeConverters.toInt)
def __init__(self):
super(HasAggregationDepth, self).__init__()
self._setDefault(aggregationDepth=2)
def getAggregationDepth(self):
"""
Gets the value of aggregationDepth or its default value.
"""
return self.getOrDefault(self.aggregationDepth)
class HasParallelism(Params):
"""
Mixin for param parallelism: the number of threads to use when running parallel algorithms (>= 1).
"""
parallelism = Param(Params._dummy(), "parallelism", "the number of threads to use when running parallel algorithms (>= 1).", typeConverter=TypeConverters.toInt)
def __init__(self):
super(HasParallelism, self).__init__()
self._setDefault(parallelism=1)
def getParallelism(self):
"""
Gets the value of parallelism or its default value.
"""
return self.getOrDefault(self.parallelism)
class HasCollectSubModels(Params):
"""
Mixin for param collectSubModels: Param for whether to collect a list of sub-models trained during tuning. If set to false, then only the single best sub-model will be available after fitting. If set to true, then all sub-models will be available. Warning: For large models, collecting all sub-models can cause OOMs on the Spark driver.
"""
collectSubModels = Param(Params._dummy(), "collectSubModels", "Param for whether to collect a list of sub-models trained during tuning. If set to false, then only the single best sub-model will be available after fitting. If set to true, then all sub-models will be available. Warning: For large models, collecting all sub-models can cause OOMs on the Spark driver.", typeConverter=TypeConverters.toBoolean)
def __init__(self):
super(HasCollectSubModels, self).__init__()
self._setDefault(collectSubModels=False)
def getCollectSubModels(self):
"""
Gets the value of collectSubModels or its default value.
"""
return self.getOrDefault(self.collectSubModels)
class HasLoss(Params):
"""
Mixin for param loss: the loss function to be optimized.
"""
loss = Param(Params._dummy(), "loss", "the loss function to be optimized.", typeConverter=TypeConverters.toString)
def __init__(self):
super(HasLoss, self).__init__()
def getLoss(self):
"""
Gets the value of loss or its default value.
"""
return self.getOrDefault(self.loss)
class HasDistanceMeasure(Params):
"""
Mixin for param distanceMeasure: the distance measure. Supported options: 'euclidean' and 'cosine'.
"""
distanceMeasure = Param(Params._dummy(), "distanceMeasure", "the distance measure. Supported options: 'euclidean' and 'cosine'.", typeConverter=TypeConverters.toString)
def __init__(self):
super(HasDistanceMeasure, self).__init__()
self._setDefault(distanceMeasure='euclidean')
def getDistanceMeasure(self):
"""
Gets the value of distanceMeasure or its default value.
"""
return self.getOrDefault(self.distanceMeasure)
class HasValidationIndicatorCol(Params):
"""
Mixin for param validationIndicatorCol: name of the column that indicates whether each row is for training or for validation. False indicates training; true indicates validation.
"""
validationIndicatorCol = Param(Params._dummy(), "validationIndicatorCol", "name of the column that indicates whether each row is for training or for validation. False indicates training; true indicates validation.", typeConverter=TypeConverters.toString)
def __init__(self):
super(HasValidationIndicatorCol, self).__init__()
def getValidationIndicatorCol(self):
"""
Gets the value of validationIndicatorCol or its default value.
"""
return self.getOrDefault(self.validationIndicatorCol)
class HasBlockSize(Params):
"""
Mixin for param blockSize: block size for stacking input data in matrices. Data is stacked within partitions. If block size is more than remaining data in a partition then it is adjusted to the size of this data.
"""
blockSize = Param(Params._dummy(), "blockSize", "block size for stacking input data in matrices. Data is stacked within partitions. If block size is more than remaining data in a partition then it is adjusted to the size of this data.", typeConverter=TypeConverters.toInt)
def __init__(self):
super(HasBlockSize, self).__init__()
def getBlockSize(self):
"""
Gets the value of blockSize or its default value.
"""
return self.getOrDefault(self.blockSize)
| ConeyLiu/spark | python/pyspark/ml/param/shared.py | Python | apache-2.0 | 21,249 |
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import tabs
from horizon import version
from openstack_dashboard.dashboards.admin.info import constants
from openstack_dashboard.dashboards.admin.info import tabs as project_tabs
class IndexView(tabs.TabbedTableView):
tab_group_class = project_tabs.SystemInfoTabs
template_name = constants.INFO_TEMPLATE_NAME
def get_context_data(self, **kwargs):
context = super(IndexView, self).get_context_data(**kwargs)
try:
context["version"] = version.version_info.version_string()
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve version information.'))
return context
| JioCloud/horizon | openstack_dashboard/dashboards/admin/info/views.py | Python | apache-2.0 | 1,536 |
'''test using string.find() in if statement as a boolean. it returns an int'''
class X:
'''check string.find() usage'''
def foo(self):
s = 'abc'
if s.find('ab'):
print 'this is a bug'
if not s.find('ab'):
print 'this is also a bug'
if s.find('ab') >= 0:
print 'this is not a bug'
if s.find('ab') < 0:
print 'this also is not a bug'
| lavjain/incubator-hawq | tools/bin/pythonSrc/pychecker-0.8.18/test_input/test96.py | Python | apache-2.0 | 387 |
#!/usr/bin/env python3
# Copyright 2018 The Crashpad Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Various code adapted from:
# https://cs.chromium.org/chromium/src/build/linux/sysroot_scripts/install-sysroot.py
import os
import shutil
import subprocess
import sys
import urllib.request
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
# Sysroot revision from:
# https://cs.chromium.org/chromium/src/build/linux/sysroot_scripts/sysroots.json
SERVER = 'https://commondatastorage.googleapis.com'
PATH = 'chrome-linux-sysroot/toolchain'
REVISION = '43a87bbebccad99325fdcf34166295b121ee15c7'
FILENAME = 'debian_sid_amd64_sysroot.tar.xz'
def main():
url = '%s/%s/%s/%s' % (SERVER, PATH, REVISION, FILENAME)
sysroot = os.path.join(SCRIPT_DIR, os.pardir, 'third_party', 'linux',
'sysroot')
stamp = os.path.join(sysroot, '.stamp')
if os.path.exists(stamp):
with open(stamp) as s:
if s.read() == url:
return
print('Installing Debian root image from %s' % url)
if os.path.isdir(sysroot):
shutil.rmtree(sysroot)
os.mkdir(sysroot)
tarball = os.path.join(sysroot, FILENAME)
print('Downloading %s' % url)
for _ in range(3):
response = urllib.request.urlopen(url)
with open(tarball, 'wb') as f:
f.write(response.read())
break
else:
raise Exception('Failed to download %s' % url)
subprocess.check_call(['tar', 'xf', tarball, '-C', sysroot])
os.remove(tarball)
with open(stamp, 'w') as s:
s.write(url)
if __name__ == '__main__':
main()
sys.exit(0)
| nwjs/chromium.src | third_party/crashpad/crashpad/build/install_linux_sysroot.py | Python | bsd-3-clause | 2,174 |
from __future__ import absolute_import
from django.core.urlresolvers import reverse
from exam import fixture
from sentry.testutils import TestCase
class ReactivateAccountTest(TestCase):
@fixture
def path(self):
return reverse('sentry-reactivate-account')
def test_renders(self):
user = self.create_user('foo@example.com', is_active=False)
self.login_as(user)
resp = self.client.get(self.path)
assert resp.status_code == 200
self.assertTemplateUsed(resp, 'sentry/reactivate-account.html')
def test_does_reactivate(self):
user = self.create_user('foo@example.com', is_active=False)
self.login_as(user)
resp = self.client.post(self.path, data={'op': 'confirm'})
assert resp.status_code == 302
| zenefits/sentry | tests/sentry/web/frontend/test_reactivate_account.py | Python | bsd-3-clause | 796 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'PayPalPDT.ipaddress'
db.alter_column('paypal_pdt', 'ipaddress', self.gf('django.db.models.fields.GenericIPAddressField')(max_length=39, null=True))
def backwards(self, orm):
# Changing field 'PayPalPDT.ipaddress'
db.alter_column('paypal_pdt', 'ipaddress', self.gf('django.db.models.fields.IPAddressField')(default='', max_length=15))
models = {
u'pdt.paypalpdt': {
'Meta': {'object_name': 'PayPalPDT', 'db_table': "'paypal_pdt'"},
'address_city': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}),
'address_country': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'address_country_code': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'address_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'address_state': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}),
'address_status': ('django.db.models.fields.CharField', [], {'max_length': '11', 'blank': 'True'}),
'address_street': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'address_zip': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'amount': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'amount1': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'amount2': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'amount3': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'amount_per_cycle': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'amt': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'auction_buyer_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'auction_closing_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'auction_multi_item': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'auth_amount': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'auth_exp': ('django.db.models.fields.CharField', [], {'max_length': '28', 'blank': 'True'}),
'auth_id': ('django.db.models.fields.CharField', [], {'max_length': '19', 'blank': 'True'}),
'auth_status': ('django.db.models.fields.CharField', [], {'max_length': '9', 'blank': 'True'}),
'business': ('django.db.models.fields.CharField', [], {'max_length': '127', 'blank': 'True'}),
'case_creation_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'case_id': ('django.db.models.fields.CharField', [], {'max_length': '14', 'blank': 'True'}),
'case_type': ('django.db.models.fields.CharField', [], {'max_length': '24', 'blank': 'True'}),
'charset': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'cm': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'contact_phone': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'currency_code': ('django.db.models.fields.CharField', [], {'default': "'USD'", 'max_length': '32', 'blank': 'True'}),
'custom': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'exchange_rate': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '16', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'flag': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'flag_code': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'flag_info': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'for_auction': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'from_view': ('django.db.models.fields.CharField', [], {'max_length': '6', 'null': 'True', 'blank': 'True'}),
'handling_amount': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'initial_payment_amount': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'invoice': ('django.db.models.fields.CharField', [], {'max_length': '127', 'blank': 'True'}),
'ipaddress': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True', 'blank': 'True'}),
'item_name': ('django.db.models.fields.CharField', [], {'max_length': '127', 'blank': 'True'}),
'item_number': ('django.db.models.fields.CharField', [], {'max_length': '127', 'blank': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'mc_amount1': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'mc_amount2': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'mc_amount3': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'mc_currency': ('django.db.models.fields.CharField', [], {'default': "'USD'", 'max_length': '32', 'blank': 'True'}),
'mc_fee': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'mc_gross': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'mc_handling': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'mc_shipping': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'memo': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'mp_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'next_payment_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'notify_version': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'num_cart_items': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'option_name1': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'option_name2': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'outstanding_balance': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'parent_txn_id': ('django.db.models.fields.CharField', [], {'max_length': '19', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '24', 'blank': 'True'}),
'payer_business_name': ('django.db.models.fields.CharField', [], {'max_length': '127', 'blank': 'True'}),
'payer_email': ('django.db.models.fields.CharField', [], {'max_length': '127', 'blank': 'True'}),
'payer_id': ('django.db.models.fields.CharField', [], {'max_length': '13', 'blank': 'True'}),
'payer_status': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'payment_cycle': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'payment_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'payment_gross': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'payment_status': ('django.db.models.fields.CharField', [], {'max_length': '17', 'blank': 'True'}),
'payment_type': ('django.db.models.fields.CharField', [], {'max_length': '7', 'blank': 'True'}),
'pending_reason': ('django.db.models.fields.CharField', [], {'max_length': '14', 'blank': 'True'}),
'period1': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'period2': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'period3': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'period_type': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'product_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'product_type': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'profile_status': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'protection_eligibility': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'quantity': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True', 'blank': 'True'}),
'query': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'reason_code': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'}),
'reattempt': ('django.db.models.fields.CharField', [], {'max_length': '1', 'blank': 'True'}),
'receipt_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'receiver_email': ('django.db.models.fields.EmailField', [], {'max_length': '127', 'blank': 'True'}),
'receiver_id': ('django.db.models.fields.CharField', [], {'max_length': '127', 'blank': 'True'}),
'recur_times': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'recurring': ('django.db.models.fields.CharField', [], {'max_length': '1', 'blank': 'True'}),
'recurring_payment_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'remaining_settle': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'residence_country': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'response': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'retry_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'rp_invoice_id': ('django.db.models.fields.CharField', [], {'max_length': '127', 'blank': 'True'}),
'settle_amount': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'settle_currency': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'shipping': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'shipping_method': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'sig': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'st': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'subscr_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'subscr_effective': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'subscr_id': ('django.db.models.fields.CharField', [], {'max_length': '19', 'blank': 'True'}),
'tax': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'test_ipn': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'time_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'transaction_entity': ('django.db.models.fields.CharField', [], {'max_length': '7', 'blank': 'True'}),
'transaction_subject': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'tx': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'txn_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '19', 'blank': 'True'}),
'txn_type': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'verify_sign': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
}
}
complete_apps = ['pdt'] | aarticianpc/greenpointtrees | src/paypal/standard/pdt/south_migrations/0004_auto__chg_field_paypalpdt_ipaddress.py | Python | mit | 15,085 |
#!/usr/bin/env python2
# -*- coding: utf-8; -*-
"""
Copyright (C) 2007-2012 Lincoln de Sousa <lincoln@minaslivre.org>
Copyright (C) 2007 Gabriel Falcão <gabrielteratos@gmail.com>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation; either version 2 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public
License along with this program; if not, write to the
Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
Boston, MA 02110-1301 USA
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import dbus
import gtk
import logging
import os
import subprocess
import sys
from optparse import OptionParser
from guake.common import ShowableError
from guake.common import _
from guake.common import test_gconf
from guake.dbusiface import DBUS_NAME
from guake.dbusiface import DBUS_PATH
from guake.dbusiface import DbusManager
from guake.globals import KEY
from guake.guake_app import Guake
log = logging.getLogger(__name__)
def main():
"""Parses the command line parameters and decide if dbus methods
should be called or not. If there is already a guake instance
running it will be used and a True value will be returned,
otherwise, false will be returned.
"""
# COLORTERM is an environment variable set by some terminal emulators such as gnome-terminal.
# To avoid confusing applications running inside Guake, clean up COLORTERM at startup.
if "COLORTERM" in os.environ:
del os.environ['COLORTERM']
# Force to xterm-256 colors for compatibility with some old command line programs
os.environ["TERM"] = "xterm-256color"
parser = OptionParser()
parser.add_option('-f', '--fullscreen', dest='fullscreen',
action='store_true', default=False,
help=_('Put Guake in fullscreen mode'))
parser.add_option('-t', '--toggle-visibility', dest='show_hide',
action='store_true', default=False,
help=_('Toggles the visibility of the terminal window'))
parser.add_option('--show', dest="show",
action='store_true', default=False,
help=_('Shows Guake main window'))
parser.add_option('--hide', dest='hide',
action='store_true', default=False,
help=_('Hides Guake main window'))
parser.add_option('-p', '--preferences', dest='show_preferences',
action='store_true', default=False,
help=_('Shows Guake preference window'))
parser.add_option('-a', '--about', dest='show_about',
action='store_true', default=False,
help=_('Shows Guake\'s about info'))
parser.add_option('-n', '--new-tab', dest='new_tab',
action='store', default='',
help=_('Add a new tab (with current directory set to NEW_TAB)'))
parser.add_option('-s', '--select-tab', dest='select_tab',
action='store', default='',
help=_('Select a tab (SELECT_TAB is the index of the tab)'))
parser.add_option('-g', '--selected-tab', dest='selected_tab',
action='store_true', default=False,
help=_('Return the selected tab index.'))
parser.add_option('-e', '--execute-command', dest='command',
action='store', default='',
help=_('Execute an arbitrary command in the selected tab.'))
parser.add_option('-i', '--tab-index', dest='tab_index',
action='store', default='0',
help=_('Specify the tab to rename. Default is 0.'))
parser.add_option('--bgcolor', dest='bgcolor',
action='store', default='',
help=_('Set the hexadecimal (#rrggbb) background color of '
'the selected tab.'))
parser.add_option('--fgcolor', dest='fgcolor',
action='store', default='',
help=_('Set the hexadecimal (#rrggbb) foreground color of the '
'selected tab.'))
parser.add_option('--rename-tab', dest='rename_tab',
metavar='TITLE',
action='store', default='',
help=_('Rename the specified tab. Reset to default if TITLE is '
'a single dash "-".'))
parser.add_option('-r', '--rename-current-tab', dest='rename_current_tab',
metavar='TITLE',
action='store', default='',
help=_('Rename the current tab. Reset to default if TITLE is a '
'single dash "-".'))
parser.add_option('-q', '--quit', dest='quit',
action='store_true', default=False,
help=_('Says to Guake go away =('))
parser.add_option('-u', '--no-startup-script', dest='execute_startup_script',
action='store_false', default=True,
help=_('Do not execute the start up script'))
options = parser.parse_args()[0]
instance = None
# Trying to get an already running instance of guake. If it is not
# possible, lets create a new instance. This function will return
# a boolean value depending on this decision.
try:
bus = dbus.SessionBus()
remote_object = bus.get_object(DBUS_NAME, DBUS_PATH)
already_running = True
except dbus.DBusException:
instance = Guake()
remote_object = DbusManager(instance)
already_running = False
only_show_hide = True
if options.fullscreen:
remote_object.fullscreen()
if options.show:
remote_object.show_from_remote()
if options.hide:
remote_object.hide_from_remote()
if options.show_preferences:
remote_object.show_prefs()
only_show_hide = False
if options.new_tab:
remote_object.add_tab(options.new_tab)
only_show_hide = False
if options.select_tab:
selected = int(options.select_tab)
i = remote_object.select_tab(selected)
if i is None:
sys.stdout.write('invalid index: %d\n' % selected)
only_show_hide = False
if options.selected_tab:
selected = remote_object.get_selected_tab()
sys.stdout.write('%d\n' % selected)
only_show_hide = False
if options.command:
remote_object.execute_command(options.command)
only_show_hide = False
if options.tab_index and options.rename_tab:
remote_object.rename_tab(int(options.tab_index), options.rename_tab)
only_show_hide = False
if options.bgcolor:
remote_object.set_bgcolor(options.bgcolor)
only_show_hide = False
if options.fgcolor:
remote_object.set_fgcolor(options.fgcolor)
only_show_hide = False
if options.rename_current_tab:
remote_object.rename_current_tab(options.rename_current_tab)
only_show_hide = False
if options.show_about:
remote_object.show_about()
only_show_hide = False
if already_running and only_show_hide:
# here we know that guake was called without any parameter and
# it is already running, so, lets toggle its visibility.
remote_object.show_hide()
if options.execute_startup_script:
if not already_running:
startup_script = instance.client.get_string(KEY("/general/startup_script"))
if startup_script:
log.info("Calling startup script: %s", startup_script)
pid = subprocess.Popen([startup_script], shell=True, stdin=None, stdout=None,
stderr=None, close_fds=True)
log.info("Startup script started with pid: %s", pid)
# Please ensure this is the last line !!!!
else:
log.info("--no-startup-script argument defined, so don't execute the startup script")
return already_running
def exec_main():
if not test_gconf():
raise ShowableError(_('Guake can not init!'),
_('Gconf Error.\n'
'Have you installed <b>guake.schemas</b> properly?'))
if not main():
gtk.main()
if __name__ == '__main__':
exec_main()
| TempleBishop/guake | src/guake/main.py | Python | gpl-2.0 | 8,822 |
"""
E-commerce Tab Instructor Dashboard Coupons Operations views
"""
from django.contrib.auth.decorators import login_required
from django.core.exceptions import ObjectDoesNotExist
from django.db.models import Q
from django.views.decorators.http import require_POST
from django.utils.translation import ugettext as _
from util.json_request import JsonResponse
from django.http import HttpResponse, HttpResponseNotFound
from shoppingcart.models import Coupon, CourseRegistrationCode
import logging
log = logging.getLogger(__name__)
@require_POST
@login_required
def remove_coupon(request, course_id): # pylint: disable=W0613
"""
remove the coupon against the coupon id
set the coupon is_active flag to false
"""
coupon_id = request.POST.get('id', None)
if not coupon_id:
return JsonResponse({
'message': _('coupon id is None')
}, status=400) # status code 400: Bad Request
try:
coupon = Coupon.objects.get(id=coupon_id)
except ObjectDoesNotExist:
return JsonResponse({
'message': _('coupon with the coupon id ({coupon_id}) DoesNotExist').format(coupon_id=coupon_id)
}, status=400) # status code 400: Bad Request
if not coupon.is_active:
return JsonResponse({
'message': _('coupon with the coupon id ({coupon_id}) is already inactive').format(coupon_id=coupon_id)
}, status=400) # status code 400: Bad Request
coupon.is_active = False
coupon.save()
return JsonResponse({
'message': _('coupon with the coupon id ({coupon_id}) updated successfully').format(coupon_id=coupon_id)
}) # status code 200: OK by default
@require_POST
@login_required
def add_coupon(request, course_id): # pylint: disable=W0613
"""
add coupon in the Coupons Table
"""
code = request.POST.get('code')
# check if the code is already in the Coupons Table and active
coupon = Coupon.objects.filter(is_active=True, code=code)
if coupon:
return HttpResponseNotFound(_("coupon with the coupon code ({code}) already exist").format(code=code))
# check if the coupon code is in the CourseRegistrationCode Table
course_registration_code = CourseRegistrationCode.objects.filter(code=code)
if course_registration_code:
return HttpResponseNotFound(_(
"The code ({code}) that you have tried to define is already in use as a registration code").format(code=code)
)
description = request.POST.get('description')
course_id = request.POST.get('course_id')
try:
discount = int(request.POST.get('discount'))
except ValueError:
return HttpResponseNotFound(_("Please Enter the Integer Value for Coupon Discount"))
if discount > 100:
return HttpResponseNotFound(_("Please Enter the Coupon Discount Value Less than or Equal to 100"))
coupon = Coupon(
code=code, description=description, course_id=course_id,
percentage_discount=discount, created_by_id=request.user.id
)
coupon.save()
return HttpResponse(_("coupon with the coupon code ({code}) added successfully").format(code=code))
@require_POST
@login_required
def update_coupon(request, course_id): # pylint: disable=W0613
"""
update the coupon object in the database
"""
coupon_id = request.POST.get('coupon_id', None)
if not coupon_id:
return HttpResponseNotFound(_("coupon id not found"))
try:
coupon = Coupon.objects.get(pk=coupon_id)
except ObjectDoesNotExist:
return HttpResponseNotFound(_("coupon with the coupon id ({coupon_id}) DoesNotExist").format(coupon_id=coupon_id))
code = request.POST.get('code')
filtered_coupons = Coupon.objects.filter(~Q(id=coupon_id), code=code, is_active=True)
if filtered_coupons:
return HttpResponseNotFound(_("coupon with the coupon id ({coupon_id}) already exists").format(coupon_id=coupon_id))
# check if the coupon code is in the CourseRegistrationCode Table
course_registration_code = CourseRegistrationCode.objects.filter(code=code)
if course_registration_code:
return HttpResponseNotFound(_(
"The code ({code}) that you have tried to define is already in use as a registration code").format(code=code)
)
description = request.POST.get('description')
course_id = request.POST.get('course_id')
try:
discount = int(request.POST.get('discount'))
except ValueError:
return HttpResponseNotFound(_("Please Enter the Integer Value for Coupon Discount"))
if discount > 100:
return HttpResponseNotFound(_("Please Enter the Coupon Discount Value Less than or Equal to 100"))
coupon.code = code
coupon.description = description
coupon.course_id = course_id
coupon.percentage_discount = discount
coupon.save()
return HttpResponse(_("coupon with the coupon id ({coupon_id}) updated Successfully").format(coupon_id=coupon_id))
@require_POST
@login_required
def get_coupon_info(request, course_id): # pylint: disable=W0613
"""
get the coupon information to display in the pop up form
"""
coupon_id = request.POST.get('id', None)
if not coupon_id:
return JsonResponse({
'message': _("coupon id not found")
}, status=400) # status code 400: Bad Request
try:
coupon = Coupon.objects.get(id=coupon_id)
except ObjectDoesNotExist:
return JsonResponse({
'message': _("coupon with the coupon id ({coupon_id}) DoesNotExist").format(coupon_id=coupon_id)
}, status=400) # status code 400: Bad Request
if not coupon.is_active:
return JsonResponse({
'message': _("coupon with the coupon id ({coupon_id}) is already inactive").format(coupon_id=coupon_id)
}, status=400) # status code 400: Bad Request
return JsonResponse({
'coupon_code': coupon.code,
'coupon_description': coupon.description,
'coupon_course_id': coupon.course_id.to_deprecated_string(),
'coupon_discount': coupon.percentage_discount,
'message': _('coupon with the coupon id ({coupon_id}) updated successfully').format(coupon_id=coupon_id)
}) # status code 200: OK by default
| xiandiancloud/edxplaltfom-xusong | lms/djangoapps/instructor/views/coupons.py | Python | agpl-3.0 | 6,252 |
import os, glob
__all__ = [os.path.basename(f)[:-3] for f in glob.glob(os.path.dirname(__file__) + "/*.py")] | dana-i2cat/felix | msjp/module/common/__init__.py | Python | apache-2.0 | 108 |
# test basic complex number functionality
# constructor
print(complex(1))
print(complex(1.2))
print(complex(1.2j))
print(complex("1"))
print(complex("1.2"))
print(complex("1.2j"))
print(complex(1, 2))
print(complex(1j, 2j))
# unary ops
print(bool(1j))
print(+(1j))
print(-(1 + 2j))
# binary ops
print(1j + False)
print(1j + True)
print(1j + 2)
print(1j + 2j)
print(1j - 2)
print(1j - 2j)
print(1j * 2)
print(1j * 2j)
print(1j / 2)
print((1j / 2j).real)
print(1j / (1 + 2j))
ans = 0j ** 0; print("%.5g %.5g" % (ans.real, ans.imag))
ans = 0j ** 1; print("%.5g %.5g" % (ans.real, ans.imag))
ans = 0j ** 0j; print("%.5g %.5g" % (ans.real, ans.imag))
ans = 1j ** 2.5; print("%.5g %.5g" % (ans.real, ans.imag))
ans = 1j ** 2.5j; print("%.5g %.5g" % (ans.real, ans.imag))
# comparison
print(1j == 1)
print(1j == 1j)
# comparison of nan is special
nan = float('nan') * 1j
print(nan == 1j)
print(nan == nan)
# builtin abs
print(abs(1j))
print("%.5g" % abs(1j + 2))
# builtin hash
print(hash(1 + 0j))
print(type(hash(1j)))
# float on lhs should delegate to complex
print(1.2 + 3j)
# negative base and fractional power should create a complex
ans = (-1) ** 2.3; print("%.5g %.5g" % (ans.real, ans.imag))
ans = (-1.2) ** -3.4; print("%.5g %.5g" % (ans.real, ans.imag))
# check printing of inf/nan
print(float('nan') * 1j)
print(float('-nan') * 1j)
print(float('inf') * (1 + 1j))
print(float('-inf') * (1 + 1j))
# can't assign to attributes
try:
(1j).imag = 0
except AttributeError:
print('AttributeError')
# can't convert rhs to complex
try:
1j + []
except TypeError:
print("TypeError")
# unsupported unary op
try:
~(1j)
except TypeError:
print("TypeError")
# unsupported binary op
try:
1j // 2
except TypeError:
print("TypeError")
# unsupported binary op
try:
1j < 2j
except TypeError:
print("TypeError")
#small int on LHS, complex on RHS, unsupported op
try:
print(1 | 1j)
except TypeError:
print('TypeError')
# zero division
try:
1j / 0
except ZeroDivisionError:
print("ZeroDivisionError")
# zero division via power
try:
0j ** -1
except ZeroDivisionError:
print("ZeroDivisionError")
try:
0j ** 1j
except ZeroDivisionError:
print("ZeroDivisionError")
| AriZuu/micropython | tests/float/complex1.py | Python | mit | 2,231 |
import os
import sys
import stat
from ._compat import open_stream, text_type, filename_to_ui, get_streerror
from .exceptions import BadParameter
from .utils import safecall, LazyFile
class ParamType(object):
"""Helper for converting values through types. The following is
necessary for a valid type:
* it needs a name
* it needs to pass through None unchanged
* it needs to convert from a string
* it needs to convert its result type through unchanged
(eg: needs to be idempotent)
* it needs to be able to deal with param and context being `None`.
This can be the case when the object is used with prompt
inputs.
"""
#: the descriptive name of this type
name = None
#: if a list of this type is expected and the value is pulled from a
#: string environment variable, this is what splits it up. `None`
#: means any whitespace. For all parameters the general rule is that
#: whitespace splits them up. The exception are paths and files which
#: are split by ``os.path.pathsep`` by default (":" on Unix and ";" on
#: Windows).
envvar_list_splitter = None
def __call__(self, value, param=None, ctx=None):
if value is not None:
return self.convert(value, param, ctx)
def get_metavar(self, param):
"""Returns the metavar default for this param if it provides one."""
def get_missing_message(self, param):
"""Optionally might return extra information about a missing
parameter.
.. versionadded:: 2.0
"""
def convert(self, value, param, ctx):
"""Converts the value. This is not invoked for values that are
`None` (the missing value).
"""
return value
def split_envvar_value(self, rv):
"""Given a value from an environment variable this splits it up
into small chunks depending on the defined envvar list splitter.
If the splitter is set to `None`, which means that whitespace splits,
then leading and trailing whitespace is ignored. Otherwise, leading
and trailing splitters usually lead to empty items being included.
"""
return (rv or '').split(self.envvar_list_splitter)
def fail(self, message, param=None, ctx=None):
"""Helper method to fail with an invalid value message."""
raise BadParameter(message, ctx=ctx, param=param)
class FuncParamType(ParamType):
def __init__(self, func):
self.name = func.__name__
self.func = func
def convert(self, value, param, ctx):
try:
return self.func(value)
except ValueError:
try:
value = text_type(value)
except UnicodeError:
value = str(value).decode('utf-8', 'replace')
self.fail(value, param, ctx)
class StringParamType(ParamType):
name = 'text'
def convert(self, value, param, ctx):
if isinstance(value, bytes):
try:
enc = getattr(sys.stdin, 'encoding', None)
if enc is not None:
value = value.decode(enc)
except UnicodeError:
try:
value = value.decode(sys.getfilesystemencoding())
except UnicodeError:
value = value.decode('utf-8', 'replace')
return value
return value
def __repr__(self):
return 'STRING'
class Choice(ParamType):
"""The choice type allows a value to checked against a fixed set of
supported values. All of these values have to be strings.
See :ref:`choice-opts` for an example.
"""
name = 'choice'
def __init__(self, choices):
self.choices = choices
def get_metavar(self, param):
return '[%s]' % '|'.join(self.choices)
def get_missing_message(self, param):
return 'Choose from %s.' % ', '.join(self.choices)
def convert(self, value, param, ctx):
# Exact match
if value in self.choices:
return value
# Match through normalization
if ctx is not None and \
ctx.token_normalize_func is not None:
value = ctx.token_normalize_func(value)
for choice in self.choices:
if ctx.token_normalize_func(choice) == value:
return choice
self.fail('invalid choice: %s. (choose from %s)' %
(value, ', '.join(self.choices)), param, ctx)
def __repr__(self):
return 'Choice(%r)' % list(self.choices)
class IntParamType(ParamType):
name = 'integer'
def convert(self, value, param, ctx):
try:
return int(value)
except ValueError:
self.fail('%s is not a valid integer' % value, param, ctx)
def __repr__(self):
return 'INT'
class IntRange(IntParamType):
"""A parameter that works similar to :data:`click.INT` but restricts
the value to fit into a range. The default behavior is to fail if the
value falls outside the range, but it can also be silently clamped
between the two edges.
See :ref:`ranges` for an example.
"""
name = 'integer range'
def __init__(self, min=None, max=None, clamp=False):
self.min = min
self.max = max
self.clamp = clamp
def convert(self, value, param, ctx):
rv = IntParamType.convert(self, value, param, ctx)
if self.clamp:
if self.min is not None and rv < self.min:
return self.min
if self.max is not None and rv > self.max:
return self.max
if self.min is not None and rv < self.min or \
self.max is not None and rv > self.max:
if self.min is None:
self.fail('%s is bigger than the maximum valid value '
'%s.' % (rv, self.max), param, ctx)
elif self.max is None:
self.fail('%s is smaller than the minimum valid value '
'%s.' % (rv, self.min), param, ctx)
else:
self.fail('%s is not in the valid range of %s to %s.'
% (rv, self.min, self.max), param, ctx)
return rv
def __repr__(self):
return 'IntRange(%r, %r)' % (self.min, self.max)
class BoolParamType(ParamType):
name = 'boolean'
def convert(self, value, param, ctx):
if isinstance(value, bool):
return bool(value)
value = value.lower()
if value in ('true', '1', 'yes', 'y'):
return True
elif value in ('false', '0', 'no', 'n'):
return False
self.fail('%s is not a valid boolean' % value, param, ctx)
def __repr__(self):
return 'BOOL'
class FloatParamType(ParamType):
name = 'float'
def convert(self, value, param, ctx):
try:
return float(value)
except ValueError:
self.fail('%s is not a valid floating point value' %
value, param, ctx)
def __repr__(self):
return 'FLOAT'
class UUIDParameterType(ParamType):
name = 'uuid'
def convert(self, value, param, ctx):
import uuid
try:
return uuid.UUID(value)
except ValueError:
self.fail('%s is not a valid UUID value' % value, param, ctx)
def __repr__(self):
return 'UUID'
class File(ParamType):
"""Declares a parameter to be a file for reading or writing. The file
is automatically closed once the context tears down (after the command
finished working).
Files can be opened for reading or writing. The special value ``-``
indicates stdin or stdout depending on the mode.
By default, the file is opened for reading text data, but it can also be
opened in binary mode or for writing. The encoding parameter can be used
to force a specific encoding.
The `lazy` flag controls if the file should be opened immediately or
upon first IO. The default is to be non lazy for standard input and
output streams as well as files opened for reading, lazy otherwise.
Starting with Click 2.0, files can also be opened atomically in which
case all writes go into a separate file in the same folder and upon
completion the file will be moved over to the original location. This
is useful if a file regularly read by other users is modified.
See :ref:`file-args` for more information.
"""
name = 'filename'
envvar_list_splitter = os.path.pathsep
def __init__(self, mode='r', encoding=None, errors='strict', lazy=None,
atomic=False):
self.mode = mode
self.encoding = encoding
self.errors = errors
self.lazy = lazy
self.atomic = atomic
def resolve_lazy_flag(self, value):
if self.lazy is not None:
return self.lazy
if value == '-':
return False
elif 'w' in self.mode:
return True
return False
def convert(self, value, param, ctx):
try:
if hasattr(value, 'read') or hasattr(value, 'write'):
return value
lazy = self.resolve_lazy_flag(value)
if lazy:
f = LazyFile(value, self.mode, self.encoding, self.errors,
atomic=self.atomic)
if ctx is not None:
ctx.call_on_close(f.close_intelligently)
return f
f, should_close = open_stream(value, self.mode,
self.encoding, self.errors,
atomic=self.atomic)
# If a context is provided, we automatically close the file
# at the end of the context execution (or flush out). If a
# context does not exist, it's the caller's responsibility to
# properly close the file. This for instance happens when the
# type is used with prompts.
if ctx is not None:
if should_close:
ctx.call_on_close(safecall(f.close))
else:
ctx.call_on_close(safecall(f.flush))
return f
except (IOError, OSError) as e:
self.fail('Could not open file: %s: %s' % (
filename_to_ui(value),
get_streerror(e),
), param, ctx)
class Path(ParamType):
"""The path type is similar to the :class:`File` type but it performs
different checks. First of all, instead of returning a open file
handle it returns just the filename. Secondly, it can perform various
basic checks about what the file or directory should be.
:param exists: if set to true, the file or directory needs to exist for
this value to be valid. If this is not required and a
file does indeed not exist, then all further checks are
silently skipped.
:param file_okay: controls if a file is a possible value.
:param dir_okay: controls if a directory is a possible value.
:param writable: if true, a writable check is performed.
:param readable: if true, a readable check is performed.
:param resolve_path: if this is true, then the path is fully resolved
before the value is passed onwards. This means
that it's absolute and symlinks are resolved.
"""
envvar_list_splitter = os.path.pathsep
def __init__(self, exists=False, file_okay=True, dir_okay=True,
writable=False, readable=True, resolve_path=False):
self.exists = exists
self.file_okay = file_okay
self.dir_okay = dir_okay
self.writable = writable
self.readable = readable
self.resolve_path = resolve_path
if self.file_okay and not self.dir_okay:
self.name = 'file'
self.path_type = 'File'
if self.dir_okay and not self.file_okay:
self.name = 'directory'
self.path_type = 'Directory'
else:
self.name = 'path'
self.path_type = 'Path'
def convert(self, value, param, ctx):
rv = value
if self.resolve_path:
rv = os.path.realpath(rv)
try:
st = os.stat(rv)
except OSError:
if not self.exists:
return rv
self.fail('%s "%s" does not exist.' % (
self.path_type,
filename_to_ui(value)
), param, ctx)
if not self.file_okay and stat.S_ISREG(st.st_mode):
self.fail('%s "%s" is a file.' % (
self.path_type,
filename_to_ui(value)
), param, ctx)
if not self.dir_okay and stat.S_ISDIR(st.st_mode):
self.fail('%s "%s" is a directory.' % (
self.path_type,
filename_to_ui(value)
), param, ctx)
if self.writable and not os.access(value, os.W_OK):
self.fail('%s "%s" is not writable.' % (
self.path_type,
filename_to_ui(value)
), param, ctx)
if self.readable and not os.access(value, os.R_OK):
self.fail('%s "%s" is not readable.' % (
self.path_type,
filename_to_ui(value)
), param, ctx)
return rv
def convert_type(ty, default=None):
"""Converts a callable or python ty into the most appropriate param
ty.
"""
if isinstance(ty, ParamType):
return ty
guessed_type = False
if ty is None and default is not None:
ty = type(default)
guessed_type = True
if ty is text_type or ty is str or ty is None:
return STRING
if ty is int:
return INT
# Booleans are only okay if not guessed. This is done because for
# flags the default value is actually a bit of a lie in that it
# indicates which of the flags is the one we want. See get_default()
# for more information.
if ty is bool and not guessed_type:
return BOOL
if ty is float:
return FLOAT
if guessed_type:
return STRING
# Catch a common mistake
if __debug__:
try:
if issubclass(ty, ParamType):
raise AssertionError('Attempted to use an uninstantiated '
'parameter type (%s).' % ty)
except TypeError:
pass
return FuncParamType(ty)
#: A unicode string parameter type which is the implicit default. This
#: can also be selected by using ``str`` as type.
STRING = StringParamType()
#: An integer parameter. This can also be selected by using ``int`` as
#: type.
INT = IntParamType()
#: A floating point value parameter. This can also be selected by using
#: ``float`` as type.
FLOAT = FloatParamType()
#: A boolean parameter. This is the default for boolean flags. This can
#: also be selected by using ``bool`` as a type.
BOOL = BoolParamType()
#: A UUID parameter.
UUID = UUIDParameterType()
| patrickwind/My_Blog | venv/lib/python2.7/site-packages/click/types.py | Python | gpl-2.0 | 15,176 |
# -*- coding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
from openerp import models, fields
class StockMove(models.Model):
_inherit = 'stock.move'
main_project_id = fields.Many2one('project.project',
string="Main Project")
| Eficent/odoomrp-wip | mrp_project_link_mto/models/stock_move.py | Python | agpl-3.0 | 1,072 |
# This file is part of OpenHatch.
# Copyright (C) 2010 John Stumpo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.db import models
import mysite.search.models
class Step(models.Model):
name = models.CharField(max_length=255, unique=True)
class StepCompletion(mysite.search.models.OpenHatchModel):
person = models.ForeignKey('profile.Person')
step = models.ForeignKey('Step')
# Current mission status (True - user have completed it, False - reseted)
is_currently_completed = models.BooleanField(default=True)
class Meta:
unique_together = ('person', 'step')
class IrcMissionSession(models.Model):
person = models.ForeignKey('profile.Person', null=True)
nick = models.CharField(max_length=255, unique=True)
password = models.CharField(max_length=255)
| nirmeshk/oh-mainline | mysite/missions/models.py | Python | agpl-3.0 | 1,422 |
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.openstack import extensions
class User_data(extensions.ExtensionDescriptor):
"""Add user_data to the Create Server v1.1 API."""
name = "UserData"
alias = "os-user-data"
namespace = ("http://docs.openstack.org/compute/ext/"
"userdata/api/v1.1")
updated = "2012-08-07T00:00:00+00:00"
| petrutlucian94/nova_dev | nova/api/openstack/compute/contrib/user_data.py | Python | apache-2.0 | 949 |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.nn_ops.Pad."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
class PadOpTest(tf.test.TestCase):
def _npPad(self, inp, paddings, mode):
return np.pad(inp, paddings, mode=mode.lower())
def testNpPad(self):
self.assertAllEqual(
np.array([[0, 0, 0, 0, 0, 0],
[0, 3, 3, 0, 0, 0],
[0, 4, 4, 0, 0, 0],
[0, 5, 5, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]]),
self._npPad(
np.array([[3, 3], [4, 4], [5, 5]]),
[[1, 2], [1, 3]],
mode="constant"))
self.assertAllEqual(
np.array([[4, 3, 4, 9, 4, 3],
[1, 0, 1, 2, 1, 0],
[4, 3, 4, 9, 4, 3],
[1, 0, 1, 2, 1, 0]]),
self._npPad(
np.array([[0, 1, 2], [3, 4, 9]]),
[[1, 1], [1, 2]],
mode="reflect"))
self.assertAllEqual(
np.array([[0, 0, 1, 2, 2, 1],
[0, 0, 1, 2, 2, 1],
[3, 3, 4, 9, 9, 4],
[3, 3, 4, 9, 9, 4]]),
self._npPad(
np.array([[0, 1, 2], [3, 4, 9]]),
[[1, 1], [1, 2]],
mode="symmetric"))
def _testPad(self, np_inputs, paddings, mode, use_gpu=False):
np_val = self._npPad(np_inputs, paddings, mode=mode)
with self.test_session(use_gpu=use_gpu):
tf_val = tf.pad(np_inputs, paddings, mode=mode)
out = tf_val.eval()
self.assertAllEqual(np_val, out)
self.assertShapeEqual(np_val, tf_val)
def _testGradient(self, x, a, mode):
with self.test_session():
inx = tf.convert_to_tensor(x)
xs = list(x.shape)
ina = tf.convert_to_tensor(a)
y = tf.pad(inx, ina, mode=mode)
# Expected y's shape to be:
ys = list(np.array(x.shape) + np.sum(np.array(a), axis=1))
jacob_t, jacob_n = tf.test.compute_gradient(inx,
xs,
y,
ys,
x_init_value=x)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
def _testAll(self, np_inputs, paddings):
for mode in ("CONSTANT", "REFLECT", "SYMMETRIC"):
self._testPad(np_inputs, paddings, mode=mode, use_gpu=False)
self._testPad(np_inputs, paddings, mode=mode, use_gpu=True)
if np_inputs.dtype == np.float32:
self._testGradient(np_inputs, paddings, mode=mode)
def testInputDims(self):
with self.test_session():
with self.assertRaises(ValueError):
tf.pad(
tf.reshape([1, 2], shape=[1, 2, 1, 1, 1, 1]),
tf.reshape([1, 2], shape=[1, 2]))
def testPaddingsDim(self):
with self.test_session():
with self.assertRaises(ValueError):
tf.pad(
tf.reshape([1, 2], shape=[1, 2]),
tf.reshape([1, 2], shape=[2]))
def testPaddingsDim2(self):
with self.test_session():
with self.assertRaises(ValueError):
tf.pad(
tf.reshape([1, 2], shape=[1, 2]),
tf.reshape([1, 2], shape=[2, 1]))
def testPaddingsDim3(self):
with self.test_session():
with self.assertRaises(ValueError):
tf.pad(
tf.reshape([1, 2], shape=[1, 2]),
tf.reshape([1, 2], shape=[1, 2]))
def testPaddingsDim4(self):
with self.test_session():
with self.assertRaises(ValueError):
tf.pad(
tf.reshape([1, 2], shape=[1, 2]),
tf.reshape([1, 2, 3, 4, 5, 6], shape=[3, 2]))
def testPaddingsNonNegative(self):
with self.test_session():
with self.assertRaisesRegexp(ValueError, "must be non-negative"):
tf.pad(
tf.constant([1], shape=[1]),
tf.constant([-1, 0], shape=[1, 2]))
def testPaddingsNonNegative2(self):
with self.test_session():
with self.assertRaisesRegexp(ValueError, "must be non-negative"):
tf.pad(
tf.constant([1], shape=[1]),
tf.constant([-1, 0], shape=[1, 2]))
def testPaddingsMaximum(self):
with self.test_session():
with self.assertRaises(Exception):
tf.pad(
tf.constant([1], shape=[2]),
tf.constant([2, 0], shape=[1, 2]),
mode="REFLECT").eval()
with self.assertRaises(Exception):
tf.pad(
tf.constant([1], shape=[2]),
tf.constant([0, 3], shape=[1, 2]),
mode="SYMMETRIC").eval()
def testIntTypes(self):
# TODO(touts): Figure out why the padding tests do not work on GPU
# for int types and rank > 2.
for t in [np.int32, np.int64]:
self._testAll((np.random.rand(4, 4, 3) * 100).astype(t),
[[1, 0], [2, 3], [0, 2]])
def testFloatTypes(self):
for t in [np.float32, np.float64, np.complex64]:
self._testAll(np.random.rand(2, 5).astype(t),
[[1, 0], [2, 0]])
def testShapeFunctionEdgeCases(self):
# Unknown paddings shape.
inp = tf.constant(0.0, shape=[4, 4, 4, 4])
padded = tf.pad(inp, tf.placeholder(tf.int32))
self.assertEqual([None, None, None, None], padded.get_shape().as_list())
# Unknown input shape.
inp = tf.placeholder(tf.float32)
padded = tf.pad(inp, [[2, 2], [2, 2]])
self.assertEqual([None, None], padded.get_shape().as_list())
# Unknown input and paddings shape.
inp = tf.placeholder(tf.float32)
padded = tf.pad(inp, tf.placeholder(tf.int32))
self.assertAllEqual(None, padded.get_shape().ndims)
def testScalars(self):
paddings = np.zeros((0, 2), dtype=np.int32)
inp = np.asarray(7)
for use_gpu in False, True:
with self.test_session(use_gpu=use_gpu):
tf_val = tf.pad(inp, paddings)
out = tf_val.eval()
self.assertAllEqual(inp, out)
self.assertShapeEqual(inp, tf_val)
if __name__ == "__main__":
tf.test.main()
| ivano666/tensorflow | tensorflow/python/kernel_tests/pad_op_test.py | Python | apache-2.0 | 6,742 |
# vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
logger = logging.getLogger('sparktk')
from sparktk import TkContext
# import constructors for the API's sake (not actually dependencies of the Dicom class)
from sparktk.dicom.constructors.import_dcm import import_dcm
__all__ = ["Dicom",
"import_dcm",
"load"]
class Dicom(object):
"""
sparktk Dicom
Represents a collection of DICOM data objects. Reference: [https://en.wikipedia.org/wiki/DICOM](https://en.wikipedia.org/wiki/DICOM)
The metadata property is a sparktk frame which defines the metadata of the collection of DICOM objects.
Its schema has a column named "id" which holds a unique integer ID for the record and another column which
holds a string of XML comprised of the metadata. Users can run XQuery or invoke canned column extraction/filter
operations on this frame.
The pixeldata property is a sparktk frame which defines the pixeldata of the collection of DICOM objects.
Its schema has a column named "id" which holds a unique integer ID for the record and another column which
holds a matrix(internally it is a numpy.ndarray) comprised of the pixeldata. Users can run numpy supported transformations on it.
dcm4che-3.x dependencies are used to support various operations on dicom images. It is available as java library
Reference: [https://github.com/dcm4che/dcm4che](https://github.com/dcm4che/dcm4che)
Note: Currently sparktk Dicom supports only uncompressed dicom images
Load a set of uncompressed sample .dcm files from path (integration-tests/datasets/dicom_uncompressed)
and create a dicom object. The below examples helps you to understand how to access dicom object properties.
Examples
--------
#Path can be local/hdfs to dcm file(s)
>>> dicom_path = "../datasets/dicom_uncompressed"
#use import_dcm available inside dicom module to create a dicom object from given dicom_path
>>> dicom = tc.dicom.import_dcm(dicom_path)
#Type of dicom object created
>>> type(dicom)
<class 'sparktk.dicom.dicom.Dicom'>
>>> dicom.metadata.count()
3
>>> dicom.pixeldata.count()
3
<skip>
>>> dicom.metadata.inspect(truncate=30)
[#] id metadata
=======================================
[0] 0 <?xml version="1.0" encodin...
[1] 1 <?xml version="1.0" encodin...
[2] 2 <?xml version="1.0" encodin...
</skip>
#Part of xml string looks as below
<?xml version="1.0" encoding="UTF-8"?>
<NativeDicomModel xml:space="preserve">
<DicomAttribute keyword="FileMetaInformationVersion" tag="00020001" vr="OB"><InlineBinary>AAE=</InlineBinary></DicomAttribute>
<DicomAttribute keyword="MediaStorageSOPClassUID" tag="00020002" vr="UI"><Value number="1">1.2.840.10008.5.1.4.1.1.4</Value></DicomAttribute>
<DicomAttribute keyword="MediaStorageSOPInstanceUID" tag="00020003" vr="UI"><Value number="1">1.3.6.1.4.1.14519.5.2.1.7308.2101.234736319276602547946349519685</Value></DicomAttribute>
...
#pixeldata property is sparktk frame
>>> pixeldata = dicom.pixeldata.take(1)
#Display
<skip>
>>> pixeldata
[[0L, array([[ 0., 0., 0., ..., 0., 0., 0.],
[ 0., 125., 103., ..., 120., 213., 319.],
[ 0., 117., 94., ..., 135., 223., 325.],
...,
[ 0., 62., 21., ..., 896., 886., 854.],
[ 0., 63., 23., ..., 941., 872., 897.],
[ 0., 60., 30., ..., 951., 822., 906.]])]]
</skip>
#Access ndarray
>>> image_ndarray= pixeldata[0][1]
>>> type(image_ndarray)
<type 'numpy.ndarray'>
#Dimesions of the image matrix stored
>>> image_ndarray.shape
(320, 320)
<skip>
#Use python matplot lib package to verify image visually
>>> import pylab
>>> pylab.imshow(image_ndarray, cmap=pylab.cm.bone)
>>> pylab.show()
#Save method persists the dicom object to disk
>>> dicom.save("sandbox/dicom_data")
#loads the saved dicom object
>>> load_dicom = tc.load("sandbox/dicom_data")
#Re-check whether we loaded back the dicom object or not
>>> type(load_dicom)
<class 'sparktk.dicom.dicom.Dicom'>
#Again access pixeldata and perform same operations as above
>>> load_pixeldata = load_dicom.pixeldata.take(1)
#Order may differ when you load back dicom object
>>> load_pixeldata
[[0L, array([[ 0., 0., 0., ..., 0., 0., 0.],
[ 0., 125., 103., ..., 120., 213., 319.],
[ 0., 117., 94., ..., 135., 223., 325.],
...,
[ 0., 62., 21., ..., 896., 886., 854.],
[ 0., 63., 23., ..., 941., 872., 897.],
[ 0., 60., 30., ..., 951., 822., 906.]])]]
>>> load_image_ndarray= load_pixeldata[0][1]
>>> type(load_image_ndarray)
<type 'numpy.ndarray'>
>>> load_image_ndarray.shape
(320, 320)
#Inspect metadata property to see dicom metadata xml content
>>> load_dicom.metadata.inspect(truncate=30)
[#] id metadata
=======================================
[0] 0 <?xml version="1.0" encodin...
[1] 1 <?xml version="1.0" encodin...
[2] 2 <?xml version="1.0" encodin...
</skip>
#Using to built-in xml libraries to run xquery on metadata
>>> import xml.etree.ElementTree as ET
#Performing add_columns operation.
#Add xml tag as column in dicom metadata frame
#Here we add SOPInstanceUID as column to metadaframe
#sample function to apply on row - add_columns
>>> def extractor(tag_name):
... def _extractor(row):
... root = ET.fromstring(row["metadata"])
... for attribute in root.findall('DicomAttribute'):
... keyword = attribute.get('keyword')
... value = None
... if attribute.find('Value') is not None:
... value = attribute.find('Value').text
... if keyword == tag_name:
... return value
... return _extractor
>>> tag_name = "SOPInstanceUID"
>>> dicom.metadata.add_columns(extractor(tag_name), (tag_name, str))
>>> dicom.metadata.count()
3
<skip>
>>> dicom.metadata.inspect(truncate=30)
[#] id metadata SOPInstanceUID
=======================================================================
[0] 0 <?xml version="1.0" encodin... 1.3.6.1.4.1.14519.5.2.1.730...
[1] 1 <?xml version="1.0" encodin... 1.3.6.1.4.1.14519.5.2.1.730...
[2] 2 <?xml version="1.0" encodin... 1.3.6.1.4.1.14519.5.2.1.730...
</skip>
"""
def __init__(self, tc, scala_dicom):
self._tc = tc
from sparktk.frame.frame import Frame
self._metadata = Frame(self._tc, scala_dicom.metadata())
self._pixeldata = Frame(self._tc, scala_dicom.pixeldata())
def __repr__(self):
#TODO Python friendly repr
#Write a string summary
return self._get_new_scala().toString()
@property
def metadata(self):
return self._metadata
@property
def pixeldata(self):
return self._pixeldata
@staticmethod
def _from_scala(tc, scala_dicom):
"""creates a python dicom for the given scala dicom"""
return Dicom(tc, scala_dicom)
#Creating new scala dicom to handle mutability issue.
# When import_dcm is invoked, it returns scala dicom object(scala metadata frame and pixeldata frame).
# When user performs add_columns or any operation which turns scala frame to python frame, the link is lost
# To avoid such issues, we create new dicom object using (metadata and pixeldata frames) when accessing scala method
def _get_new_scala(self):
return self._tc.sc._jvm.org.trustedanalytics.sparktk.dicom.Dicom(self._metadata._scala, self._pixeldata._scala)
#method to call passed function with new scala dicom
def _call_scala(self, func):
from sparktk.frame.frame import Frame
scala_dicom = self._get_new_scala()
results = func(scala_dicom)
self._metadata = Frame(self._tc, scala_dicom.metadata())
self._pixeldata = Frame(self._tc, scala_dicom.pixeldata())
return results
# Dicom Operations
from sparktk.dicom.ops.drop_rows import drop_rows
from sparktk.dicom.ops.drop_rows_by_keywords import drop_rows_by_keywords
from sparktk.dicom.ops.drop_rows_by_tags import drop_rows_by_tags
from sparktk.dicom.ops.extract_keywords import extract_keywords
from sparktk.dicom.ops.extract_tags import extract_tags
from sparktk.dicom.ops.export_to_dcm import export_to_dcm
from sparktk.dicom.ops.filter import filter
from sparktk.dicom.ops.filter_by_keywords import filter_by_keywords
from sparktk.dicom.ops.filter_by_tags import filter_by_tags
from sparktk.dicom.ops.save import save
def load(path, tc=TkContext.implicit):
"""load Dicom from given path"""
TkContext.validate(tc)
return tc.load(path, Dicom)
| dmsuehir/spark-tk | python/sparktk/dicom/dicom.py | Python | apache-2.0 | 10,173 |
# pylint: skip-file
# pylint: disable=too-many-instance-attributes
class OCVersion(OpenShiftCLI):
''' Class to wrap the oc command line tools '''
# pylint allows 5
# pylint: disable=too-many-arguments
def __init__(self,
config,
debug):
''' Constructor for OCVersion '''
super(OCVersion, self).__init__(None, config)
self.debug = debug
@staticmethod
def openshift_installed():
''' check if openshift is installed '''
import yum
yum_base = yum.YumBase()
if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
return True
else:
return False
@staticmethod
def filter_versions(stdout):
''' filter the oc version output '''
version_dict = {}
version_search = ['oc', 'openshift', 'kubernetes']
for line in stdout.strip().split('\n'):
for term in version_search:
if not line:
continue
if line.startswith(term):
version_dict[term] = line.split()[-1]
# horrible hack to get openshift version in Openshift 3.2
# By default "oc version in 3.2 does not return an "openshift" version
if "openshift" not in version_dict:
version_dict["openshift"] = version_dict["oc"]
return version_dict
@staticmethod
def add_custom_versions(versions):
''' create custom versions strings '''
versions_dict = {}
for tech, version in versions.items():
# clean up "-" from version
if "-" in version:
version = version.split("-")[0]
if version.startswith('v'):
versions_dict[tech + '_numeric'] = version[1:].split('+')[0]
# "v3.3.0.33" is what we have, we want "3.3"
versions_dict[tech + '_short'] = version[1:4]
return versions_dict
def get(self):
'''get and return version information '''
results = {}
results["installed"] = OCVersion.openshift_installed()
if not results["installed"]:
return results
version_results = self.openshift_cmd(['version'], output=True, output_type='raw')
if version_results['returncode'] == 0:
filtered_vers = OCVersion.filter_versions(version_results['results'])
custom_vers = OCVersion.add_custom_versions(filtered_vers)
results['returncode'] = version_results['returncode']
results.update(filtered_vers)
results.update(custom_vers)
return results
raise OpenShiftCLIError('Problem detecting openshift version.')
| appuio/ansible-role-openshift-zabbix-monitoring | vendor/openshift-tools/ansible/roles/lib_openshift_3.2/build/src/oc_version.py | Python | apache-2.0 | 2,732 |
# -*- coding:utf-8 -*-
from __future__ import unicode_literals
import logging
import warnings
from admin_scripts.tests import AdminScriptTestCase
from django.core import mail
from django.core.files.temp import NamedTemporaryFile
from django.test import RequestFactory, TestCase, override_settings
from django.test.utils import patch_logger
from django.utils.deprecation import RemovedInNextVersionWarning
from django.utils.encoding import force_text
from django.utils.log import (
AdminEmailHandler, CallbackFilter, RequireDebugFalse, RequireDebugTrue,
)
from django.utils.six import StringIO
from .logconfig import MyEmailBackend
# logging config prior to using filter with mail_admins
OLD_LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
class LoggingFiltersTest(TestCase):
def test_require_debug_false_filter(self):
"""
Test the RequireDebugFalse filter class.
"""
filter_ = RequireDebugFalse()
with self.settings(DEBUG=True):
self.assertEqual(filter_.filter("record is not used"), False)
with self.settings(DEBUG=False):
self.assertEqual(filter_.filter("record is not used"), True)
def test_require_debug_true_filter(self):
"""
Test the RequireDebugTrue filter class.
"""
filter_ = RequireDebugTrue()
with self.settings(DEBUG=True):
self.assertEqual(filter_.filter("record is not used"), True)
with self.settings(DEBUG=False):
self.assertEqual(filter_.filter("record is not used"), False)
class DefaultLoggingTest(TestCase):
def setUp(self):
self.logger = logging.getLogger('django')
self.old_stream = self.logger.handlers[0].stream
def tearDown(self):
self.logger.handlers[0].stream = self.old_stream
def test_django_logger(self):
"""
The 'django' base logger only output anything when DEBUG=True.
"""
output = StringIO()
self.logger.handlers[0].stream = output
self.logger.error("Hey, this is an error.")
self.assertEqual(output.getvalue(), '')
with self.settings(DEBUG=True):
self.logger.error("Hey, this is an error.")
self.assertEqual(output.getvalue(), 'Hey, this is an error.\n')
class WarningLoggerTests(TestCase):
"""
Tests that warnings output for RemovedInDjangoXXWarning (XX being the next
Django version) is enabled and captured to the logging system
"""
def setUp(self):
# If tests are invoke with "-Wall" (or any -W flag actually) then
# warning logging gets disabled (see configure_logging in django/utils/log.py).
# However, these tests expect warnings to be logged, so manually force warnings
# to the logs. Use getattr() here because the logging capture state is
# undocumented and (I assume) brittle.
self._old_capture_state = bool(getattr(logging, '_warnings_showwarning', False))
logging.captureWarnings(True)
# this convoluted setup is to avoid printing this deprecation to
# stderr during test running - as the test runner forces deprecations
# to be displayed at the global py.warnings level
self.logger = logging.getLogger('py.warnings')
self.outputs = []
self.old_streams = []
for handler in self.logger.handlers:
self.old_streams.append(handler.stream)
self.outputs.append(StringIO())
handler.stream = self.outputs[-1]
def tearDown(self):
for i, handler in enumerate(self.logger.handlers):
self.logger.handlers[i].stream = self.old_streams[i]
# Reset warnings state.
logging.captureWarnings(self._old_capture_state)
@override_settings(DEBUG=True)
def test_warnings_capture(self):
with warnings.catch_warnings():
warnings.filterwarnings('always')
warnings.warn('Foo Deprecated', RemovedInNextVersionWarning)
output = force_text(self.outputs[0].getvalue())
self.assertIn('Foo Deprecated', output)
def test_warnings_capture_debug_false(self):
with warnings.catch_warnings():
warnings.filterwarnings('always')
warnings.warn('Foo Deprecated', RemovedInNextVersionWarning)
output = force_text(self.outputs[0].getvalue())
self.assertNotIn('Foo Deprecated', output)
@override_settings(DEBUG=True)
def test_error_filter_still_raises(self):
with warnings.catch_warnings():
warnings.filterwarnings(
'error',
category=RemovedInNextVersionWarning
)
with self.assertRaises(RemovedInNextVersionWarning):
warnings.warn('Foo Deprecated', RemovedInNextVersionWarning)
class CallbackFilterTest(TestCase):
def test_sense(self):
f_false = CallbackFilter(lambda r: False)
f_true = CallbackFilter(lambda r: True)
self.assertEqual(f_false.filter("record"), False)
self.assertEqual(f_true.filter("record"), True)
def test_passes_on_record(self):
collector = []
def _callback(record):
collector.append(record)
return True
f = CallbackFilter(_callback)
f.filter("a record")
self.assertEqual(collector, ["a record"])
class AdminEmailHandlerTest(TestCase):
logger = logging.getLogger('django.request')
def get_admin_email_handler(self, logger):
# Inspired from views/views.py: send_log()
# ensuring the AdminEmailHandler does not get filtered out
# even with DEBUG=True.
admin_email_handler = [
h for h in logger.handlers
if h.__class__.__name__ == "AdminEmailHandler"
][0]
return admin_email_handler
def test_fail_silently(self):
admin_email_handler = self.get_admin_email_handler(self.logger)
self.assertTrue(admin_email_handler.connection().fail_silently)
@override_settings(
ADMINS=(('whatever admin', 'admin@example.com'),),
EMAIL_SUBJECT_PREFIX='-SuperAwesomeSubject-'
)
def test_accepts_args(self):
"""
Ensure that user-supplied arguments and the EMAIL_SUBJECT_PREFIX
setting are used to compose the email subject.
Refs #16736.
"""
message = "Custom message that says '%s' and '%s'"
token1 = 'ping'
token2 = 'pong'
admin_email_handler = self.get_admin_email_handler(self.logger)
# Backup then override original filters
orig_filters = admin_email_handler.filters
try:
admin_email_handler.filters = []
self.logger.error(message, token1, token2)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to, ['admin@example.com'])
self.assertEqual(mail.outbox[0].subject,
"-SuperAwesomeSubject-ERROR: Custom message that says 'ping' and 'pong'")
finally:
# Restore original filters
admin_email_handler.filters = orig_filters
@override_settings(
ADMINS=(('whatever admin', 'admin@example.com'),),
EMAIL_SUBJECT_PREFIX='-SuperAwesomeSubject-',
INTERNAL_IPS=('127.0.0.1',),
)
def test_accepts_args_and_request(self):
"""
Ensure that the subject is also handled if being
passed a request object.
"""
message = "Custom message that says '%s' and '%s'"
token1 = 'ping'
token2 = 'pong'
admin_email_handler = self.get_admin_email_handler(self.logger)
# Backup then override original filters
orig_filters = admin_email_handler.filters
try:
admin_email_handler.filters = []
rf = RequestFactory()
request = rf.get('/')
self.logger.error(message, token1, token2,
extra={
'status_code': 403,
'request': request,
}
)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to, ['admin@example.com'])
self.assertEqual(mail.outbox[0].subject,
"-SuperAwesomeSubject-ERROR (internal IP): Custom message that says 'ping' and 'pong'")
finally:
# Restore original filters
admin_email_handler.filters = orig_filters
@override_settings(
ADMINS=(('admin', 'admin@example.com'),),
EMAIL_SUBJECT_PREFIX='',
DEBUG=False,
)
def test_subject_accepts_newlines(self):
"""
Ensure that newlines in email reports' subjects are escaped to avoid
AdminErrorHandler to fail.
Refs #17281.
"""
message = 'Message \r\n with newlines'
expected_subject = 'ERROR: Message \\r\\n with newlines'
self.assertEqual(len(mail.outbox), 0)
self.logger.error(message)
self.assertEqual(len(mail.outbox), 1)
self.assertNotIn('\n', mail.outbox[0].subject)
self.assertNotIn('\r', mail.outbox[0].subject)
self.assertEqual(mail.outbox[0].subject, expected_subject)
@override_settings(
ADMINS=(('admin', 'admin@example.com'),),
EMAIL_SUBJECT_PREFIX='',
DEBUG=False,
)
def test_truncate_subject(self):
"""
RFC 2822's hard limit is 998 characters per line.
So, minus "Subject: ", the actual subject must be no longer than 989
characters.
Refs #17281.
"""
message = 'a' * 1000
expected_subject = 'ERROR: aa' + 'a' * 980
self.assertEqual(len(mail.outbox), 0)
self.logger.error(message)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, expected_subject)
@override_settings(
ADMINS=(('admin', 'admin@example.com'),),
DEBUG=False,
)
def test_uses_custom_email_backend(self):
"""
Refs #19325
"""
message = 'All work and no play makes Jack a dull boy'
admin_email_handler = self.get_admin_email_handler(self.logger)
mail_admins_called = {'called': False}
def my_mail_admins(*args, **kwargs):
connection = kwargs['connection']
self.assertIsInstance(connection, MyEmailBackend)
mail_admins_called['called'] = True
# Monkeypatches
orig_mail_admins = mail.mail_admins
orig_email_backend = admin_email_handler.email_backend
mail.mail_admins = my_mail_admins
admin_email_handler.email_backend = (
'logging_tests.logconfig.MyEmailBackend')
try:
self.logger.error(message)
self.assertTrue(mail_admins_called['called'])
finally:
# Revert Monkeypatches
mail.mail_admins = orig_mail_admins
admin_email_handler.email_backend = orig_email_backend
@override_settings(
ADMINS=(('whatever admin', 'admin@example.com'),),
)
def test_emit_non_ascii(self):
"""
#23593 - AdminEmailHandler should allow Unicode characters in the
request.
"""
handler = self.get_admin_email_handler(self.logger)
record = self.logger.makeRecord('name', logging.ERROR, 'function', 'lno', 'message', None, None)
rf = RequestFactory()
url_path = '/º'
record.request = rf.get(url_path)
handler.emit(record)
self.assertEqual(len(mail.outbox), 1)
msg = mail.outbox[0]
self.assertEqual(msg.to, ['admin@example.com'])
self.assertEqual(msg.subject, "[Django] ERROR (EXTERNAL IP): message")
self.assertIn("path:%s" % url_path, msg.body)
@override_settings(
MANAGERS=(('manager', 'manager@example.com'),),
DEBUG=False,
)
def test_customize_send_mail_method(self):
class ManagerEmailHandler(AdminEmailHandler):
def send_mail(self, subject, message, *args, **kwargs):
mail.mail_managers(subject, message, *args, connection=self.connection(), **kwargs)
handler = ManagerEmailHandler()
record = self.logger.makeRecord('name', logging.ERROR, 'function', 'lno', 'message', None, None)
self.assertEqual(len(mail.outbox), 0)
handler.emit(record)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to, ['manager@example.com'])
class SettingsConfigTest(AdminScriptTestCase):
"""
Test that accessing settings in a custom logging handler does not trigger
a circular import error.
"""
def setUp(self):
log_config = """{
'version': 1,
'handlers': {
'custom_handler': {
'level': 'INFO',
'class': 'logging_tests.logconfig.MyHandler',
}
}
}"""
self.write_settings('settings.py', sdict={'LOGGING': log_config})
def tearDown(self):
self.remove_settings('settings.py')
def test_circular_dependency(self):
# validate is just an example command to trigger settings configuration
out, err = self.run_manage(['validate'])
self.assertNoOutput(err)
self.assertOutput(out, "System check identified no issues (0 silenced).")
def dictConfig(config):
dictConfig.called = True
dictConfig.called = False
class SetupConfigureLogging(TestCase):
"""
Test that calling django.setup() initializes the logging configuration.
"""
@override_settings(LOGGING_CONFIG='logging_tests.tests.dictConfig',
LOGGING=OLD_LOGGING)
def test_configure_initializes_logging(self):
from django import setup
setup()
self.assertTrue(dictConfig.called)
@override_settings(DEBUG=True, ROOT_URLCONF='logging_tests.urls')
class SecurityLoggerTest(TestCase):
def test_suspicious_operation_creates_log_message(self):
with patch_logger('django.security.SuspiciousOperation', 'error') as calls:
self.client.get('/suspicious/')
self.assertEqual(len(calls), 1)
self.assertEqual(calls[0], 'dubious')
def test_suspicious_operation_uses_sublogger(self):
with patch_logger('django.security.DisallowedHost', 'error') as calls:
self.client.get('/suspicious_spec/')
self.assertEqual(len(calls), 1)
self.assertEqual(calls[0], 'dubious')
@override_settings(
ADMINS=(('admin', 'admin@example.com'),),
DEBUG=False,
)
def test_suspicious_email_admins(self):
self.client.get('/suspicious/')
self.assertEqual(len(mail.outbox), 1)
self.assertIn('path:/suspicious/,', mail.outbox[0].body)
class SettingsCustomLoggingTest(AdminScriptTestCase):
"""
Test that using a logging defaults are still applied when using a custom
callable in LOGGING_CONFIG (i.e., logging.config.fileConfig).
"""
def setUp(self):
logging_conf = """
[loggers]
keys=root
[handlers]
keys=stream
[formatters]
keys=simple
[logger_root]
handlers=stream
[handler_stream]
class=StreamHandler
formatter=simple
args=(sys.stdout,)
[formatter_simple]
format=%(message)s
"""
self.temp_file = NamedTemporaryFile()
self.temp_file.write(logging_conf.encode('utf-8'))
self.temp_file.flush()
sdict = {'LOGGING_CONFIG': '"logging.config.fileConfig"',
'LOGGING': 'r"%s"' % self.temp_file.name}
self.write_settings('settings.py', sdict=sdict)
def tearDown(self):
self.temp_file.close()
self.remove_settings('settings.py')
def test_custom_logging(self):
out, err = self.run_manage(['validate'])
self.assertNoOutput(err)
self.assertOutput(out, "System check identified no issues (0 silenced).")
| h4r5h1t/django-hauthy | tests/logging_tests/tests.py | Python | bsd-3-clause | 16,206 |