code
stringlengths
3
1.05M
repo_name
stringlengths
5
104
path
stringlengths
4
251
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
3
1.05M
"""Restricted resources Revision ID: 35a6ffd7a079 Revises: 3b3583fcbaea Create Date: 2014-03-19 04:55:01.382718 """ # revision identifiers, used by Alembic. revision = '35a6ffd7a079' down_revision = '3b3583fcbaea' from alembic import op import sqlalchemy as sa def upgrade(): op.alter_column('resource', 'trusted', new_column_name='restricted') def downgrade(): op.alter_column('resource', 'restricted', new_column_name='trusted')
sindhus/lastuser
alembic/versions/35a6ffd7a079_restricted_resources.py
Python
bsd-2-clause
447
import unittest from katas.beta.strange_strings_parser import parser class ParserTestCase(unittest.TestCase): def test_equal_1(self): self.assertEqual(parser('12:56C:144:1000:1200'), ['12', '56C', '144', '1000', '1200']) def test_equal_2(self): self.assertEqual(parser('23;RPM;300;PSI;MODE;FORWARD'), ['23', 'RPM', '300', 'PSI', 'MODE', 'FORWARD']) def test_equal_3(self): self.assertEqual( parser('340000.00*-140.49902*ELEVATION*24000000*END'), ['340000.00', '-140.49902', 'ELEVATION', '24000000', 'END'])
the-zebulan/CodeWars
tests/beta_tests/test_strange_strings_parser.py
Python
mit
622
# -*- coding: utf-8 #------------------------------------------------------------------# __author__ = "Xavier MARCELET <xavier@marcelet.com>" #------------------------------------------------------------------# import sys import logging import termcolor #------------------------------------------------------------------# class LocationFormatter(logging.Formatter): def __init__(self, fmt = "%(asctime)s (%(name)s) [%(levelname)s] : %(message)s %(location)s", datefmt = "%Y-%m-%d %H:%M:%S", locfmt = "at %(pathname)s:%(lineno)s -> %(funcName)s", locstyle = None): super(LocationFormatter, self).__init__(fmt, datefmt) self.m_fmt = fmt self.m_locFmt = locfmt self.m_datefmt = datefmt self.m_locstyle = locstyle if locstyle is None: self.m_locstyle = { "colors" : [], "attrs" : [] } def _get_loc(self, p_record): l_loc = self.m_locFmt % { x : getattr(p_record, x) for x in dir(p_record) if x[0] != "_" } l_args = {} l_colors = self.m_locstyle.get("colors", []) l_attrs = self.m_locstyle.get("attrs", []) if not isinstance(l_colors, list): l_colors = [ l_colors ] if not isinstance(l_attrs, list): l_attrs = [ l_attrs ] l_args["attrs"] = l_attrs for c_arg in l_colors: if c_arg[0:3] == "on_": l_args["on_color"] = c_arg else: l_args["color"] = c_arg return termcolor.colored(l_loc, **l_args) def format(self, p_record): l_loc = self._get_loc(p_record) if sys.version_info[0] >= 3: #pylint: disable=no-member,protected-access self._style._fmt = self.m_fmt.replace("%(location)s", l_loc) else: #pylint: disable=no-member,protected-access self._fmt = self.m_fmt.replace("%(location)s", l_loc) return super(LocationFormatter, self).format(p_record)
psycofdj/xtdpy
xtd/core/logger/formatter.py
Python
gpl-3.0
1,884
# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from oslo_utils import timeutils import cinder.api.contrib.availability_zones import cinder.context import cinder.test import cinder.volume.api created_time = datetime.datetime(2012, 11, 14, 1, 20, 41, 95099) current_time = timeutils.utcnow() def list_availability_zones(self): return ( {'name': 'ping', 'available': True}, {'name': 'pong', 'available': False}, ) class FakeRequest(object): environ = {'cinder.context': cinder.context.get_admin_context()} GET = {} class ControllerTestCase(cinder.test.TestCase): def setUp(self): super(ControllerTestCase, self).setUp() self.controller = cinder.api.contrib.availability_zones.Controller() self.req = FakeRequest() self.stubs.Set(cinder.volume.api.API, 'list_availability_zones', list_availability_zones) def test_list_hosts(self): """Verify that the volume hosts are returned.""" actual = self.controller.index(self.req) expected = { 'availabilityZoneInfo': [ {'zoneName': 'ping', 'zoneState': {'available': True}}, {'zoneName': 'pong', 'zoneState': {'available': False}}, ], } self.assertEqual(expected, actual)
bswartz/cinder
cinder/tests/unit/api/contrib/test_availability_zones.py
Python
apache-2.0
1,945
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import argparse as _argparse import brokerlib as _brokerlib import collections as _collections import plano as _plano import proton as _proton import proton.handlers as _handlers import proton.reactor as _reactor import shlex as _shlex import subprocess as _subprocess import uuid as _uuid from .common import * from .common import __version__, _epilog_address_urls, _epilog_server_impls _description = """ Start a message server with the given queue. """ _epilog = """ {_epilog_address_urls} {_epilog_server_impls} """.format(**globals()) class QuiverServerCommand(Command): def __init__(self, home_dir): super(QuiverServerCommand, self).__init__(home_dir) self.parser.description = _description.lstrip() self.parser.epilog = _epilog.lstrip() self.parser.add_argument("url", metavar="ADDRESS-URL", help="The location of a message source or target") self.parser.add_argument("--impl", metavar="NAME", help="Use NAME implementation", default=DEFAULT_SERVER_IMPL) self.parser.add_argument("--info", action="store_true", help="Print implementation details and exit") self.parser.add_argument("--impl-info", action="store_true", dest="info", help=_argparse.SUPPRESS) self.parser.add_argument("--ready-file", metavar="FILE", help="The file used to indicate the server is ready") self.parser.add_argument("--prelude", metavar="PRELUDE", default="", help="Commands to precede the implementation invocation") self.parser.add_argument("--cert", metavar="CERT.PEM", help="Certificate filename") self.parser.add_argument("--key", metavar="PRIVATE-KEY.PEM", help="Private key filename") self.parser.add_argument("--key-password", metavar="key_password", help="Certificate password (required for encrypted private keys)") self.parser.add_argument("--trusted-db", metavar="TRUSTED-DB.PEM", help="Database of trusted CA certificate(s). If specified the peer's client is tested" "against this source of trust.") self.parser.add_argument("--sasl-user", metavar="SASL USERNAME", help="SASL username that the peer must present") self.parser.add_argument("--sasl-password", metavar="SASL PASSWORD", help="SASL password that the peer must present. Ignored is --sasl-user is not present.") self.add_common_tool_arguments() def init(self): self.intercept_info_request(DEFAULT_SERVER_IMPL) super(QuiverServerCommand, self).init() self.impl = require_impl(self.args.impl) self.ready_file = self.args.ready_file self.prelude = _shlex.split(self.args.prelude) self.cert = self.args.cert self.key = self.args.key self.key_password = self.args.key_password self.trusted_db = self.args.trusted_db self.sasl_user = self.args.sasl_user self.sasl_password = self.args.sasl_password if self.ready_file is None: self.ready_file = "-" self.init_url_attributes() self.init_common_tool_attributes() def run(self): args = self.prelude + [ self.impl.file, "host={}".format(self.host), "port={}".format(self.port), "path={}".format(self.path), "ready-file={}".format(self.ready_file), ] if self.scheme: args.append("scheme={}".format(self.scheme)) if self.cert: args.append("cert={}".format(self.cert)) if self.key: args.append("key={}".format(self.key)) if self.key_password: args.append("key-password={}".format(self.key_password)) if self.trusted_db: args.append("trusted-db={}".format(self.trusted_db)) if self.sasl_user: args.append("user={}".format(self.sasl_user)) if self.sasl_password: args.append("password={}".format(self.sasl_password)) _plano.call(args) class BuiltinBroker(_brokerlib.Broker): def __init__(self, scheme, host, port, path, ready_file, cert=None, key=None, key_password=None, trusted_db=None, user=None, password=None): if ready_file == "-": ready_file = None super().__init__(scheme, host, port, id="quiver-server-builtin", ready_file=ready_file, cert=cert, key=key, key_password=key_password, trusted_db=trusted_db, user=user, password=password) self.path = path def info(self, message, *args): _plano.notice(message, *args) def notice(self, message, *args): _plano.notice(message, *args) def warn(self, message, *args): _plano.warn(message, *args) def error(self, message, *args): _plano.error(message, *args) def fail(self, message, *args): _plano.fail(message, *args)
tabish121/quiver
python/quiver/server.py
Python
apache-2.0
6,188
"""Unsupervised nearest neighbors learner""" from .base import NeighborsBase from .base import KNeighborsMixin from .base import RadiusNeighborsMixin from .base import UnsupervisedMixin class NearestNeighbors(NeighborsBase, KNeighborsMixin, RadiusNeighborsMixin, UnsupervisedMixin): """Unsupervised learner for implementing neighbor searches. Parameters ---------- n_neighbors : int, optional (default = 5) Number of neighbors to use by default for :meth:`k_neighbors` queries. radius : float, optional (default = 1.0) Range of parameter space to use by default for :meth`radius_neighbors` queries. algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional Algorithm used to compute the nearest neighbors: - 'ball_tree' will use :class:`BallTree` - 'kd_tree' will use :class:`scipy.spatial.cKDtree` - 'brute' will use a brute-force search. - 'auto' will attempt to decide the most appropriate algorithm based on the values passed to :meth:`fit` method. Note: fitting on sparse input will override the setting of this parameter, using brute force. leaf_size : int, optional (default = 30) Leaf size passed to BallTree or cKDTree. This can affect the speed of the construction and query, as well as the memory required to store the tree. The optimal value depends on the nature of the problem. p: integer, optional (default = 2) Parameter for the Minkowski metric from sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is equivalent to using manhattan_distance (l1), and euclidean_distance (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used. Examples -------- >>> from sklearn.neighbors import NearestNeighbors >>> samples = [[0, 0, 2], [1, 0, 0], [0, 0, 1]] >>> neigh = NearestNeighbors(2, 0.4) >>> neigh.fit(samples) #doctest: +ELLIPSIS NearestNeighbors(...) >>> neigh.kneighbors([[0, 0, 1.3]], 2, return_distance=False) ... #doctest: +ELLIPSIS array([[2, 0]]...) >>> neigh.radius_neighbors([0, 0, 1.3], 0.4, return_distance=False) array([[2]]) See also -------- KNeighborsClassifier RadiusNeighborsClassifier KNeighborsRegressor RadiusNeighborsRegressor BallTree Notes ----- See :ref:`Nearest Neighbors <neighbors>` in the online documentation for a discussion of the choice of ``algorithm`` and ``leaf_size``. http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm """ def __init__(self, n_neighbors=5, radius=1.0, algorithm='auto', leaf_size=30, p=2): self._init_params(n_neighbors=n_neighbors, radius=radius, algorithm=algorithm, leaf_size=leaf_size, p=p)
florian-f/sklearn
sklearn/neighbors/unsupervised.py
Python
bsd-3-clause
2,953
#!/usr/bin/python # # Copyright 2014 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This example adds several text ads to a given ad group. To get ad_group_id, run get_ad_groups.py. The LoadFromStorage method is pulling credentials and properties from a "googleads.yaml" file. By default, it looks for this file in your home directory. For more information, see the "Caching authentication information" section of our README. Tags: AdGroupAdService.mutate Api: AdWordsOnly """ __author__ = ('api.kwinter@gmail.com (Kevin Winter)' 'Joseph DiLallo') from googleads import adwords AD_GROUP_ID = 'INSERT_AD_GROUP_ID_HERE' def main(client, ad_group_id): # Initialize appropriate service. ad_group_ad_service = client.GetService('AdGroupAdService', version='v201406') # Construct operations and add ads. # If needed, you could specify an exemption request here, e.g.: # 'exemptionRequests': [{ # # This comes back in a PolicyViolationError. # 'key' { # 'policyName': '...', # 'violatingText': '...' # } # }] operations = [ { 'operator': 'ADD', 'operand': { 'xsi_type': 'AdGroupAd', 'adGroupId': ad_group_id, 'ad': { 'xsi_type': 'TextAd', 'url': 'http://www.example.com', 'displayUrl': 'example.com', 'description1': 'Visit the Red Planet in style.', 'description2': 'Low-gravity fun for everyone!', 'headline': 'Luxury Cruise to Mars' }, # Optional fields. 'status': 'PAUSED' } }, { 'operator': 'ADD', 'operand': { 'xsi_type': 'AdGroupAd', 'adGroupId': ad_group_id, 'ad': { 'xsi_type': 'TextAd', 'url': 'http://www.example.com', 'displayUrl': 'example.com', 'description1': 'Enjoy your stay at Red Planet.', 'description2': 'Buy your tickets now!', 'headline': 'Luxury Cruise to Mars' } } } ] ads = ad_group_ad_service.mutate(operations) # Display results. for ad in ads['value']: print ('Ad with id \'%s\' and of type \'%s\' was added.' % (ad['ad']['id'], ad['ad']['Ad.Type'])) if __name__ == '__main__': # Initialize client object. adwords_client = adwords.AdWordsClient.LoadFromStorage() main(adwords_client, AD_GROUP_ID)
dietrichc/streamline-ppc-reports
examples/adwords/v201406/basic_operations/add_text_ads.py
Python
apache-2.0
3,105
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from __future__ import print_function import sys import warnings from functools import reduce from threading import RLock if sys.version >= '3': basestring = unicode = str else: from itertools import imap as map from pyspark import since from pyspark.rdd import RDD, ignore_unicode_prefix from pyspark.sql.catalog import Catalog from pyspark.sql.conf import RuntimeConfig from pyspark.sql.dataframe import DataFrame from pyspark.sql.readwriter import DataFrameReader from pyspark.sql.streaming import DataStreamReader from pyspark.sql.types import Row, DataType, StringType, StructType, _verify_type, \ _infer_schema, _has_nulltype, _merge_type, _create_converter, _parse_datatype_string from pyspark.sql.utils import install_exception_handler __all__ = ["SparkSession"] def _monkey_patch_RDD(sparkSession): def toDF(self, schema=None, sampleRatio=None): """ Converts current :class:`RDD` into a :class:`DataFrame` This is a shorthand for ``spark.createDataFrame(rdd, schema, sampleRatio)`` :param schema: a :class:`pyspark.sql.types.StructType` or list of names of columns :param samplingRatio: the sample ratio of rows used for inferring :return: a DataFrame >>> rdd.toDF().collect() [Row(name=u'Alice', age=1)] """ return sparkSession.createDataFrame(self, schema, sampleRatio) RDD.toDF = toDF class SparkSession(object): """The entry point to programming Spark with the Dataset and DataFrame API. A SparkSession can be used create :class:`DataFrame`, register :class:`DataFrame` as tables, execute SQL over tables, cache tables, and read parquet files. To create a SparkSession, use the following builder pattern: >>> spark = SparkSession.builder \\ ... .master("local") \\ ... .appName("Word Count") \\ ... .config("spark.some.config.option", "some-value") \\ ... .getOrCreate() """ class Builder(object): """Builder for :class:`SparkSession`. """ _lock = RLock() _options = {} @since(2.0) def config(self, key=None, value=None, conf=None): """Sets a config option. Options set using this method are automatically propagated to both :class:`SparkConf` and :class:`SparkSession`'s own configuration. For an existing SparkConf, use `conf` parameter. >>> from pyspark.conf import SparkConf >>> SparkSession.builder.config(conf=SparkConf()) <pyspark.sql.session... For a (key, value) pair, you can omit parameter names. >>> SparkSession.builder.config("spark.some.config.option", "some-value") <pyspark.sql.session... :param key: a key name string for configuration property :param value: a value for configuration property :param conf: an instance of :class:`SparkConf` """ with self._lock: if conf is None: self._options[key] = str(value) else: for (k, v) in conf.getAll(): self._options[k] = v return self @since(2.0) def master(self, master): """Sets the Spark master URL to connect to, such as "local" to run locally, "local[4]" to run locally with 4 cores, or "spark://master:7077" to run on a Spark standalone cluster. :param master: a url for spark master """ return self.config("spark.master", master) @since(2.0) def appName(self, name): """Sets a name for the application, which will be shown in the Spark web UI. If no application name is set, a randomly generated name will be used. :param name: an application name """ return self.config("spark.app.name", name) @since(2.0) def enableHiveSupport(self): """Enables Hive support, including connectivity to a persistent Hive metastore, support for Hive serdes, and Hive user-defined functions. """ return self.config("spark.sql.catalogImplementation", "hive") @since(2.0) def getOrCreate(self): """Gets an existing :class:`SparkSession` or, if there is no existing one, creates a new one based on the options set in this builder. This method first checks whether there is a valid global default SparkSession, and if yes, return that one. If no valid global default SparkSession exists, the method creates a new SparkSession and assigns the newly created SparkSession as the global default. >>> s1 = SparkSession.builder.config("k1", "v1").getOrCreate() >>> s1.conf.get("k1") == s1.sparkContext.getConf().get("k1") == "v1" True In case an existing SparkSession is returned, the config options specified in this builder will be applied to the existing SparkSession. >>> s2 = SparkSession.builder.config("k2", "v2").getOrCreate() >>> s1.conf.get("k1") == s2.conf.get("k1") True >>> s1.conf.get("k2") == s2.conf.get("k2") True """ with self._lock: from pyspark.context import SparkContext from pyspark.conf import SparkConf session = SparkSession._instantiatedSession if session is None or session._sc._jsc is None: sparkConf = SparkConf() for key, value in self._options.items(): sparkConf.set(key, value) sc = SparkContext.getOrCreate(sparkConf) # This SparkContext may be an existing one. for key, value in self._options.items(): # we need to propagate the confs # before we create the SparkSession. Otherwise, confs like # warehouse path and metastore url will not be set correctly ( # these confs cannot be changed once the SparkSession is created). sc._conf.set(key, value) session = SparkSession(sc) for key, value in self._options.items(): session._jsparkSession.sessionState().conf().setConfString(key, value) for key, value in self._options.items(): session.sparkContext._conf.set(key, value) return session builder = Builder() _instantiatedSession = None @ignore_unicode_prefix def __init__(self, sparkContext, jsparkSession=None): """Creates a new SparkSession. >>> from datetime import datetime >>> spark = SparkSession(sc) >>> allTypes = sc.parallelize([Row(i=1, s="string", d=1.0, l=1, ... b=True, list=[1, 2, 3], dict={"s": 0}, row=Row(a=1), ... time=datetime(2014, 8, 1, 14, 1, 5))]) >>> df = allTypes.toDF() >>> df.createOrReplaceTempView("allTypes") >>> spark.sql('select i+1, d+1, not b, list[1], dict["s"], time, row.a ' ... 'from allTypes where b and i > 0').collect() [Row((i + CAST(1 AS BIGINT))=2, (d + CAST(1 AS DOUBLE))=2.0, (NOT b)=False, list[1]=2, \ dict[s]=0, time=datetime.datetime(2014, 8, 1, 14, 1, 5), a=1)] >>> df.rdd.map(lambda x: (x.i, x.s, x.d, x.l, x.b, x.time, x.row.a, x.list)).collect() [(1, u'string', 1.0, 1, True, datetime.datetime(2014, 8, 1, 14, 1, 5), 1, [1, 2, 3])] """ from pyspark.sql.context import SQLContext self._sc = sparkContext self._jsc = self._sc._jsc self._jvm = self._sc._jvm if jsparkSession is None: jsparkSession = self._jvm.SparkSession(self._jsc.sc()) self._jsparkSession = jsparkSession self._jwrapped = self._jsparkSession.sqlContext() self._wrapped = SQLContext(self._sc, self, self._jwrapped) _monkey_patch_RDD(self) install_exception_handler() # If we had an instantiated SparkSession attached with a SparkContext # which is stopped now, we need to renew the instantiated SparkSession. # Otherwise, we will use invalid SparkSession when we call Builder.getOrCreate. if SparkSession._instantiatedSession is None \ or SparkSession._instantiatedSession._sc._jsc is None: SparkSession._instantiatedSession = self def _repr_html_(self): return """ <div> <p><b>SparkSession - {catalogImplementation}</b></p> {sc_HTML} </div> """.format( catalogImplementation=self.conf.get("spark.sql.catalogImplementation"), sc_HTML=self.sparkContext._repr_html_() ) @since(2.0) def newSession(self): """ Returns a new SparkSession as new session, that has separate SQLConf, registered temporary views and UDFs, but shared SparkContext and table cache. """ return self.__class__(self._sc, self._jsparkSession.newSession()) @property @since(2.0) def sparkContext(self): """Returns the underlying :class:`SparkContext`.""" return self._sc @property @since(2.0) def version(self): """The version of Spark on which this application is running.""" return self._jsparkSession.version() @property @since(2.0) def conf(self): """Runtime configuration interface for Spark. This is the interface through which the user can get and set all Spark and Hadoop configurations that are relevant to Spark SQL. When getting the value of a config, this defaults to the value set in the underlying :class:`SparkContext`, if any. """ if not hasattr(self, "_conf"): self._conf = RuntimeConfig(self._jsparkSession.conf()) return self._conf @property @since(2.0) def catalog(self): """Interface through which the user may create, drop, alter or query underlying databases, tables, functions etc. """ if not hasattr(self, "_catalog"): self._catalog = Catalog(self) return self._catalog @property @since(2.0) def udf(self): """Returns a :class:`UDFRegistration` for UDF registration. :return: :class:`UDFRegistration` """ from pyspark.sql.context import UDFRegistration return UDFRegistration(self._wrapped) @since(2.0) def range(self, start, end=None, step=1, numPartitions=None): """ Create a :class:`DataFrame` with single :class:`pyspark.sql.types.LongType` column named ``id``, containing elements in a range from ``start`` to ``end`` (exclusive) with step value ``step``. :param start: the start value :param end: the end value (exclusive) :param step: the incremental step (default: 1) :param numPartitions: the number of partitions of the DataFrame :return: :class:`DataFrame` >>> spark.range(1, 7, 2).collect() [Row(id=1), Row(id=3), Row(id=5)] If only one argument is specified, it will be used as the end value. >>> spark.range(3).collect() [Row(id=0), Row(id=1), Row(id=2)] """ if numPartitions is None: numPartitions = self._sc.defaultParallelism if end is None: jdf = self._jsparkSession.range(0, int(start), int(step), int(numPartitions)) else: jdf = self._jsparkSession.range(int(start), int(end), int(step), int(numPartitions)) return DataFrame(jdf, self._wrapped) def _inferSchemaFromList(self, data): """ Infer schema from list of Row or tuple. :param data: list of Row or tuple :return: :class:`pyspark.sql.types.StructType` """ if not data: raise ValueError("can not infer schema from empty dataset") first = data[0] if type(first) is dict: warnings.warn("inferring schema from dict is deprecated," "please use pyspark.sql.Row instead") schema = reduce(_merge_type, map(_infer_schema, data)) if _has_nulltype(schema): raise ValueError("Some of types cannot be determined after inferring") return schema def _inferSchema(self, rdd, samplingRatio=None): """ Infer schema from an RDD of Row or tuple. :param rdd: an RDD of Row or tuple :param samplingRatio: sampling ratio, or no sampling (default) :return: :class:`pyspark.sql.types.StructType` """ first = rdd.first() if not first: raise ValueError("The first row in RDD is empty, " "can not infer schema") if type(first) is dict: warnings.warn("Using RDD of dict to inferSchema is deprecated. " "Use pyspark.sql.Row instead") if samplingRatio is None: schema = _infer_schema(first) if _has_nulltype(schema): for row in rdd.take(100)[1:]: schema = _merge_type(schema, _infer_schema(row)) if not _has_nulltype(schema): break else: raise ValueError("Some of types cannot be determined by the " "first 100 rows, please try again with sampling") else: if samplingRatio < 0.99: rdd = rdd.sample(False, float(samplingRatio)) schema = rdd.map(_infer_schema).reduce(_merge_type) return schema def _createFromRDD(self, rdd, schema, samplingRatio): """ Create an RDD for DataFrame from an existing RDD, returns the RDD and schema. """ if schema is None or isinstance(schema, (list, tuple)): struct = self._inferSchema(rdd, samplingRatio) converter = _create_converter(struct) rdd = rdd.map(converter) if isinstance(schema, (list, tuple)): for i, name in enumerate(schema): struct.fields[i].name = name struct.names[i] = name schema = struct elif not isinstance(schema, StructType): raise TypeError("schema should be StructType or list or None, but got: %s" % schema) # convert python objects to sql data rdd = rdd.map(schema.toInternal) return rdd, schema def _createFromLocal(self, data, schema): """ Create an RDD for DataFrame from a list or pandas.DataFrame, returns the RDD and schema. """ # make sure data could consumed multiple times if not isinstance(data, list): data = list(data) if schema is None or isinstance(schema, (list, tuple)): struct = self._inferSchemaFromList(data) converter = _create_converter(struct) data = map(converter, data) if isinstance(schema, (list, tuple)): for i, name in enumerate(schema): struct.fields[i].name = name struct.names[i] = name schema = struct elif not isinstance(schema, StructType): raise TypeError("schema should be StructType or list or None, but got: %s" % schema) # convert python objects to sql data data = [schema.toInternal(row) for row in data] return self._sc.parallelize(data), schema @since(2.0) @ignore_unicode_prefix def createDataFrame(self, data, schema=None, samplingRatio=None, verifySchema=True): """ Creates a :class:`DataFrame` from an :class:`RDD`, a list or a :class:`pandas.DataFrame`. When ``schema`` is a list of column names, the type of each column will be inferred from ``data``. When ``schema`` is ``None``, it will try to infer the schema (column names and types) from ``data``, which should be an RDD of :class:`Row`, or :class:`namedtuple`, or :class:`dict`. When ``schema`` is :class:`pyspark.sql.types.DataType` or a datatype string, it must match the real data, or an exception will be thrown at runtime. If the given schema is not :class:`pyspark.sql.types.StructType`, it will be wrapped into a :class:`pyspark.sql.types.StructType` as its only field, and the field name will be "value", each record will also be wrapped into a tuple, which can be converted to row later. If schema inference is needed, ``samplingRatio`` is used to determined the ratio of rows used for schema inference. The first row will be used if ``samplingRatio`` is ``None``. :param data: an RDD of any kind of SQL data representation(e.g. row, tuple, int, boolean, etc.), or :class:`list`, or :class:`pandas.DataFrame`. :param schema: a :class:`pyspark.sql.types.DataType` or a datatype string or a list of column names, default is ``None``. The data type string format equals to :class:`pyspark.sql.types.DataType.simpleString`, except that top level struct type can omit the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use ``byte`` instead of ``tinyint`` for :class:`pyspark.sql.types.ByteType`. We can also use ``int`` as a short name for ``IntegerType``. :param samplingRatio: the sample ratio of rows used for inferring :param verifySchema: verify data types of every row against schema. :return: :class:`DataFrame` .. versionchanged:: 2.1 Added verifySchema. >>> l = [('Alice', 1)] >>> spark.createDataFrame(l).collect() [Row(_1=u'Alice', _2=1)] >>> spark.createDataFrame(l, ['name', 'age']).collect() [Row(name=u'Alice', age=1)] >>> d = [{'name': 'Alice', 'age': 1}] >>> spark.createDataFrame(d).collect() [Row(age=1, name=u'Alice')] >>> rdd = sc.parallelize(l) >>> spark.createDataFrame(rdd).collect() [Row(_1=u'Alice', _2=1)] >>> df = spark.createDataFrame(rdd, ['name', 'age']) >>> df.collect() [Row(name=u'Alice', age=1)] >>> from pyspark.sql import Row >>> Person = Row('name', 'age') >>> person = rdd.map(lambda r: Person(*r)) >>> df2 = spark.createDataFrame(person) >>> df2.collect() [Row(name=u'Alice', age=1)] >>> from pyspark.sql.types import * >>> schema = StructType([ ... StructField("name", StringType(), True), ... StructField("age", IntegerType(), True)]) >>> df3 = spark.createDataFrame(rdd, schema) >>> df3.collect() [Row(name=u'Alice', age=1)] >>> spark.createDataFrame(df.toPandas()).collect() # doctest: +SKIP [Row(name=u'Alice', age=1)] >>> spark.createDataFrame(pandas.DataFrame([[1, 2]])).collect() # doctest: +SKIP [Row(0=1, 1=2)] >>> spark.createDataFrame(rdd, "a: string, b: int").collect() [Row(a=u'Alice', b=1)] >>> rdd = rdd.map(lambda row: row[1]) >>> spark.createDataFrame(rdd, "int").collect() [Row(value=1)] >>> spark.createDataFrame(rdd, "boolean").collect() # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... Py4JJavaError: ... """ if isinstance(data, DataFrame): raise TypeError("data is already a DataFrame") if isinstance(schema, basestring): schema = _parse_datatype_string(schema) try: import pandas has_pandas = True except Exception: has_pandas = False if has_pandas and isinstance(data, pandas.DataFrame): if schema is None: schema = [str(x) for x in data.columns] data = [r.tolist() for r in data.to_records(index=False)] verify_func = _verify_type if verifySchema else lambda _, t: True if isinstance(schema, StructType): def prepare(obj): verify_func(obj, schema) return obj elif isinstance(schema, DataType): dataType = schema schema = StructType().add("value", schema) def prepare(obj): verify_func(obj, dataType) return obj, else: if isinstance(schema, list): schema = [x.encode('utf-8') if not isinstance(x, str) else x for x in schema] prepare = lambda obj: obj if isinstance(data, RDD): rdd, schema = self._createFromRDD(data.map(prepare), schema, samplingRatio) else: rdd, schema = self._createFromLocal(map(prepare, data), schema) jrdd = self._jvm.SerDeUtil.toJavaArray(rdd._to_java_object_rdd()) jdf = self._jsparkSession.applySchemaToPythonRDD(jrdd.rdd(), schema.json()) df = DataFrame(jdf, self._wrapped) df._schema = schema return df @ignore_unicode_prefix @since(2.0) def sql(self, sqlQuery): """Returns a :class:`DataFrame` representing the result of the given query. :return: :class:`DataFrame` >>> df.createOrReplaceTempView("table1") >>> df2 = spark.sql("SELECT field1 AS f1, field2 as f2 from table1") >>> df2.collect() [Row(f1=1, f2=u'row1'), Row(f1=2, f2=u'row2'), Row(f1=3, f2=u'row3')] """ return DataFrame(self._jsparkSession.sql(sqlQuery), self._wrapped) @since(2.0) def table(self, tableName): """Returns the specified table as a :class:`DataFrame`. :return: :class:`DataFrame` >>> df.createOrReplaceTempView("table1") >>> df2 = spark.table("table1") >>> sorted(df.collect()) == sorted(df2.collect()) True """ return DataFrame(self._jsparkSession.table(tableName), self._wrapped) @property @since(2.0) def read(self): """ Returns a :class:`DataFrameReader` that can be used to read data in as a :class:`DataFrame`. :return: :class:`DataFrameReader` """ return DataFrameReader(self._wrapped) @property @since(2.0) def readStream(self): """ Returns a :class:`DataStreamReader` that can be used to read data streams as a streaming :class:`DataFrame`. .. note:: Evolving. :return: :class:`DataStreamReader` """ return DataStreamReader(self._wrapped) @property @since(2.0) def streams(self): """Returns a :class:`StreamingQueryManager` that allows managing all the :class:`StreamingQuery` StreamingQueries active on `this` context. .. note:: Evolving. :return: :class:`StreamingQueryManager` """ from pyspark.sql.streaming import StreamingQueryManager return StreamingQueryManager(self._jsparkSession.streams()) @since(2.0) def stop(self): """Stop the underlying :class:`SparkContext`. """ self._sc.stop() SparkSession._instantiatedSession = None @since(2.0) def __enter__(self): """ Enable 'with SparkSession.builder.(...).getOrCreate() as session: app' syntax. """ return self @since(2.0) def __exit__(self, exc_type, exc_val, exc_tb): """ Enable 'with SparkSession.builder.(...).getOrCreate() as session: app' syntax. Specifically stop the SparkSession on exit of the with block. """ self.stop() def _test(): import os import doctest from pyspark.context import SparkContext from pyspark.sql import Row import pyspark.sql.session os.chdir(os.environ["SPARK_HOME"]) globs = pyspark.sql.session.__dict__.copy() sc = SparkContext('local[4]', 'PythonTest') globs['sc'] = sc globs['spark'] = SparkSession(sc) globs['rdd'] = rdd = sc.parallelize( [Row(field1=1, field2="row1"), Row(field1=2, field2="row2"), Row(field1=3, field2="row3")]) globs['df'] = rdd.toDF() (failure_count, test_count) = doctest.testmod( pyspark.sql.session, globs=globs, optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE) globs['sc'].stop() if failure_count: exit(-1) if __name__ == "__main__": _test()
bOOm-X/spark
python/pyspark/sql/session.py
Python
apache-2.0
25,557
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """This pip smoke test verifies dependency files exist in the pip package. This script runs bazel queries to see what python files are required by the tests and ensures they are in the pip package superset. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import subprocess PIP_PACKAGE_QUERY = """bazel query \ 'deps(//tensorflow/tools/pip_package:build_pip_package)'""" PY_TEST_QUERY = """bazel query 'deps(\ filter("^((?!benchmark).)*$",\ kind(py_test,\ //tensorflow/python/... \ + //tensorflow/tensorboard/... \ + //tensorflow/contrib/... \ - attr(tags, "manual|no_pip", //tensorflow/...))), 1)'""" # Hard-coded blacklist of files if not included in pip package # TODO(amitpatankar): Clean up blacklist. BLACKLIST = [ "//tensorflow/python:extra_py_tests_deps", "//tensorflow/cc/saved_model:saved_model_half_plus_two", "//tensorflow:no_tensorflow_py_deps", "//tensorflow/python:test_ops_2", "//tensorflow/python:tf_optimizer", "//tensorflow/python:compare_test_proto_py", "//tensorflow/core:image_testdata", "//tensorflow/core/kernels/cloud:bigquery_reader_ops", "//tensorflow/python/feature_column:vocabulary_testdata", "//tensorflow/python:framework/test_file_system.so", # contrib "//tensorflow/contrib/session_bundle:session_bundle_half_plus_two", "//tensorflow/contrib/keras:testing_utils", "//tensorflow/contrib/ffmpeg:test_data", "//tensorflow/contrib/factorization/examples:mnist", "//tensorflow/contrib/factorization/examples:mnist.py", "//tensorflow/contrib/factorization:factorization_py_CYCLIC_DEPENDENCIES_THAT_NEED_TO_GO", # pylint:disable=line-too-long "//tensorflow/contrib/framework:checkpoint_ops_testdata", "//tensorflow/contrib/bayesflow:reinforce_simple_example", "//tensorflow/contrib/bayesflow:examples/reinforce_simple/reinforce_simple_example.py", # pylint:disable=line-too-long ] def main(): """This script runs the pip smoke test. Raises: RuntimeError: If any dependencies for py_tests exist in subSet Prerequisites: 1. Bazel is installed. 2. Running in github repo of tensorflow. 3. Configure has been run. """ # pip_package_dependencies_list is the list of included files in pip packages pip_package_dependencies = subprocess.check_output( PIP_PACKAGE_QUERY, shell=True) pip_package_dependencies_list = pip_package_dependencies.strip().split("\n") print("Pip package superset size: %d" % len(pip_package_dependencies_list)) # tf_py_test_dependencies is the list of dependencies for all python # tests in tensorflow tf_py_test_dependencies = subprocess.check_output( PY_TEST_QUERY, shell=True) tf_py_test_dependencies_list = tf_py_test_dependencies.strip().split("\n") print("Pytest dependency subset size: %d" % len(tf_py_test_dependencies_list)) missing_dependencies = [] # File extensions and endings to ignore ignore_extensions = ["_test", "_test.py"] ignored_files = 0 blacklisted_files = len(BLACKLIST) # Compare dependencies for dependency in tf_py_test_dependencies_list: if dependency and dependency.startswith("//tensorflow"): ignore = False # Ignore extensions if any(dependency.endswith(ext) for ext in ignore_extensions): ignore = True ignored_files += 1 # Check if the dependency is in the pip package, the blacklist, or # should be ignored because of its file extension if (ignore or dependency in pip_package_dependencies_list or dependency in BLACKLIST): continue else: missing_dependencies.append(dependency) print("Ignored files: %d" % ignored_files) print("Blacklisted files: %d" % blacklisted_files) if missing_dependencies: print("Missing the following dependencies from pip_packages:") for missing_dependency in missing_dependencies: print("\nMissing dependency: %s " % missing_dependency) print("Affected Tests:") rdep_query = """bazel query 'rdeps(kind(py_test, \ //tensorflow/python/...), %s)'""" % missing_dependency affected_tests = subprocess.check_output(rdep_query, shell=True) affected_tests_list = affected_tests.split("\n")[:-2] print("\n".join(affected_tests_list)) raise RuntimeError("""One or more dependencies are not in the pip package. Please either blacklist the dependencies in tensorflow/tensorflow/tensorflow/tools/pip_package/pip_smoke_test.py or add them to tensorflow/tensorflow/tensorflow/tools/pip_package/BUILD.""") else: print("TEST PASSED") if __name__ == "__main__": main()
nightjean/Deep-Learning
tensorflow/tools/pip_package/pip_smoke_test.py
Python
apache-2.0
5,367
#!/usr/bin/env python # encoding: utf-8 """ knockouts.py Created by Nikolaus Sonnenschein on 2008-02-25. Copyright (c) 2008 Jacobs University of Bremen. All rights reserved. """ from metabolism import Metabolism from util import ImportCplex from ifba.glpki.glpki import * class KnockOut(object): """A class putting the necessary functionality for GeneKnockOuts to the Metabolism class.""" def __init__(self, lp): # super(KnockOut, self).__init__(lp) self.lp = lp self.lp.smcp.presolve = GLP_OFF def knockOut(self, gene): """Knocks out a gene.""" self.lp.modifyColumnBounds({gene: (0., 0.)}) def knockOuts(self, listOfGenes): """Knocks out a list of genes.""" for gene in listOfGenes: self.knockOut(gene) if __name__ == '__main__': def init(path): struct = ImportCplex(path) return Metabolism(struct) def main(): import util ecoli = init('test_data/model.lp') ecoli.simplex() print ecoli.getObjVal() KnockOut(ecoli).knockOuts(['R("R_PGK")', 'R("R_PGK_Rev")']) ecoli.simplex() print ecoli.getObjVal() main()
phantomas1234/fbaproject
ifba/GlpkWrap/knockouts.py
Python
mit
1,226
from yanntricks import * def SenoTopologo(): pspict,fig = SinglePicture("SenoTopologo") pspict.dilatation(5) a=-0.5 b=0.5 x=var('x') f=phyFunction(x*sin(1/x)) G=f.graph(a,b) G.linear_plotpoints=2000 pspict.DrawGraphs(G) pspict.axes.Dx=0.3 pspict.axes.Dy=0.3 pspict.DrawDefaultAxes() fig.conclude() fig.write_the_file()
LaurentClaessens/mazhe
src_yanntricks/yanntricksSenoTopologo.py
Python
gpl-3.0
380
""" This thorium state is used to track the status beacon events and keep track of the active status of minions .. versionadded:: 2016.11.0 """ import fnmatch import time def reg(name): """ Activate this register to turn on a minion status tracking register, this register keeps the current status beacon data and the time that each beacon was last checked in. """ ret = {"name": name, "changes": {}, "comment": "", "result": True} now = time.time() if "status" not in __reg__: __reg__["status"] = {} __reg__["status"]["val"] = {} for event in __events__: if fnmatch.fnmatch(event["tag"], "salt/beacon/*/status/*"): # Got one! idata = {"recv_time": now} for key in event["data"]["data"]: if key in ("id", "recv_time"): continue idata[key] = event["data"]["data"][key] __reg__["status"]["val"][event["data"]["id"]] = idata ret["changes"][event["data"]["id"]] = True return ret
saltstack/salt
salt/thorium/status.py
Python
apache-2.0
1,056
from __future__ import absolute_import, print_function, unicode_literals import json from io import StringIO as IO import yaml from adr import config from adr import query from adr.query import format_query class RunQuery(object): def __init__(self, query_test): self.query_test = query_test def __call__(self, query, *args, **kwargs): return self.query_test['mock_data'] def test_query(monkeypatch, query_test, set_config): set_config(**{ 'query': query_test['query'], 'fmt': 'json', 'debug': False, 'debug_url': "https://activedata.allizom.org/tools/query.html#query_id={}", }) monkeypatch.setattr(query, 'query_activedata', RunQuery(query_test)) def print_diff(): buf = IO() yaml.dump(result, buf) print("Yaml formatted result for copy/paste:") print(buf.getvalue()) buf = IO() yaml.dump(query_test['expected'], buf) print("\nYaml formatted expected:") print(buf.getvalue()) if "--debug" in query_test["args"]: set_config(debug=True) monkeypatch.setattr(query, 'query_activedata', RunQuery(query_test)) formatted_query = format_query(query_test['query']) result = json.loads(formatted_query[0]) debug_url = formatted_query[1] print_diff() assert result == query_test["expected"] assert debug_url == config.debug_url.format( query_test["expected"]["meta"]["saved_as"]) elif "--table" in query_test["args"]: set_config(fmt='table') monkeypatch.setattr(query, 'query_activedata', RunQuery(query_test)) formatted_query = format_query(query_test['query']) result = formatted_query[0] debug_url = formatted_query[1] expected = query_test["expected"]["data"] print("Table formatted result:") print(result) print("Table formatted expected:") print(expected) assert result == expected assert debug_url is None else: formatted_query = format_query(query_test['query']) result = json.loads(formatted_query[0]) debug_url = formatted_query[1] print_diff() assert result == query_test["expected"] assert debug_url is None
ahal/active-data-recipes
test/test_queries.py
Python
mpl-2.0
2,301
import winsound #Import winsound library for winsound.Beep() function import time #Import time library for time.sleep() function morse_code = { #Dictionary containing each letter and their respective morse code "a" : [0,1], "b" : [1,0,0,0], "c" : [1,0,1,0], "d" : [1,0,0], "e" : [0], "f" : [0,0,1,0], "g" : [1,1,0], "h" : [0,0,0,0], "i" : [0,0], "j" : [0,1,1,1], "k" : [1,0,1], "l" : [0,1,0,0], "m" : [1,1], "n" : [1,0], "o" : [1,1,1], "p" : [0,1,1,0], "q" : [1,1,0,1], "r" : [0,1,0], "s" : [0,0,0], "t" : [1], "u" : [0,0,1], "v" : [0,0,0,1], "w" : [0,1,1], "x" : [1,0,0,1], "y" : [1,0,1,1], "z" : [1,1,0,0], " " : [2] } while True: #Loops program morse_input = input("Enter a word\n> ").lower() #Asks user for input morse_buffer = [] #Buffer for input count = 0 while count < len(morse_input): morse_buffer.append(morse_code[morse_input[count]]) #Adds input to buffer and converts to morse code count += 1 count = 0 innercount = 0 while count < len(morse_buffer): currentlength = len(morse_buffer[count]) while innercount < currentlength: if morse_buffer[count][innercount] == 0: winsound.Beep(1000, 500) #Plays a dot time.sleep(0.2) elif morse_buffer[count][innercount] == 1: winsound.Beep(1000, 980) #Plays a dash time.sleep(0.1) elif morse_buffer[count][innercount] == 2: time.sleep(2.1) #Space innercount += 1 innercount = 0 count += 1 time.sleep(1)
MorseDecoder/Morse-Code-Project
Input/User Input.py
Python
gpl-3.0
1,443
# analysis profile data from .base import * class FDPSProfile(DictNpArrayMix): """ FDPS time profile for tree for one tree step Keys: (class members) collect_sam_ptcl (1D): collect sample decompose_domain (1D): decompose domains exchange_ptcl (1D): exchange particles *set_particle_local_tree (1D): set particle in local tree *set_particle_global_tree (1D): set particle in local tree make_local_tree (1D): make local tree make_global_tree (1D): make global tree *set_root_cell (1D): set root cell calc_force (1D): calculate force calc_mom_loc_tree: calculate superparticle momentum in local tree calc_mom_gb_tree: calcualte superparticle momentum in global tree make_LET_1st: make local essential tree 1st make_LET_2nd: make local essential tree 2nd exchange_LET_1st: exchange local essential tree 1st exchange_LET_2nd: exchange local essential tree 2nd *write_back (1D): write back PS: the prefix '*" indicates that these items do not exist for the old PeTar version before 984 Using the keyword argument 'FDPS_version=old' in the initialization for the old version. """ def __init__ (self, _dat=None, _offset=int(0), _append=False, **kwargs): keys = [["collect_sam_ptcl",np.float64], ["decompose_domain",np.float64], ["exchange_ptcl",np.float64], ["set_particle_local_tree",np.float64],["set_particle_global_tree",np.float64], ["make_local_tree",np.float64], ["make_global_tree",np.float64], ["set_root_cell",np.float64], ["calc_force",np.float64], ["calc_mom_loc_tree",np.float64], ["calc_mom_gb_tree",np.float64], ["make_LET_1st",np.float64], ["make_LET_2nd",np.float64], ["exchange_LET_1st",np.float64], ["exchange_LET_2nd",np.float64], ["write_back",np.float64]] if ('FDPS_version' in kwargs.keys()): if (kwargs['FDPS_version']=='old'): keys = [["collect_sam_ptcl",np.float64], ["decompose_domain",np.float64], ["exchange_ptcl",np.float64], ["make_local_tree",np.float64], ["make_global_tree",np.float64], ["calc_force",np.float64], ["calc_mom_loc_tree",np.float64], ["calc_mom_gb_tree",np.float64], ["make_LET_1st",np.float64], ["make_LET_2nd",np.float64], ["exchange_LET_1st",np.float64], ["exchange_LET_2nd",np.float64]] DictNpArrayMix.__init__(self, keys, _dat, _offset, _append, **kwargs) class PeTarProfile(DictNpArrayMix): """ PeTar computing wallclock time profile for one tree step Keys: (class members) total (1D): total time per tree step hard_single (1D): short-range integration of clusters with only one particle (pure drift) hard_isolated (1D): short-range integration of clusters with multiple particles in local MPI process (Hermite + SDAR) hard_connected (1D): short-range integration of clusters with multiple particles crossing multiple MPI processes (Hermite + SDAR; MPI communication) hard_interrupt (1D): short-range integration of interrupted clusters tree_neighbor (1D): particle-tree construction of n_real and neighbor searching tree_force (1D): particle-tree construction of n_all and tree forace calculattion force_correct (1D): force correction for changeover function kick (1D): kick particle velocity search_cluster (1D): find clusters for short-range interactions create_group (1D): find particle groups and create artificial particles domain_decomp (1D): domain decomposition exchange_ptcl (1D): exchange particles between MPI processes output (1D): output snapshot and data status (1D): calculate status of system other (1D): other cost """ def __init__ (self, _dat=None, _offset=int(0), _append=False, **kwargs): """ DictNpArrayMix type initialzation, see help(DictNpArrayMix.__init__) """ keys = [["total",np.float64], ["hard_single",np.float64], ["hard_isolated",np.float64], ["hard_connected",np.float64], ["hard_interrupt",np.float64], ["tree_neighbor",np.float64], ["tree_force",np.float64], ["force_correct",np.float64], ["kick",np.float64], ["search_cluster",np.float64], ["create_group",np.float64], ["domain_decomp",np.float64], ["exchange_ptcl",np.float64], ["output",np.float64], ["status",np.float64],["other",np.float64]] DictNpArrayMix.__init__(self, keys, _dat, _offset, _append, **kwargs) class GPUProfile(DictNpArrayMix): """ GPU time profile for one tree force calculation Keys: (class members) copy: copy data for sending send: host to GPU memory sending receive: GPU to host memory receiving calc_force: GPU force calculation n_walk: number of multiple walks (FDPS) n_epi: total number of i particles n_epj: total number of j particles n_spj: total number of super particles n_call: number of calls of force kernel function """ def __init__ (self, _dat=None, _offset=int(0), _append=False, **kwargs): """ DictNpArrayMix type initialzation, see help(DictNpArrayMix.__init__) """ keys = [["copy",np.float64], ["send",np.float64], ["receive",np.float64], ["calc_force",np.float64], ["n_walk",np.int64], ["n_epi",np.int64], ["n_epj",np.int64], ["n_spj",np.int64], ["n_call",np.int64]] DictNpArrayMix.__init__(self, keys, _dat, _offset, _append, **kwargs) class PeTarCount(DictNpArrayMix): """ PeTar number count for one tree step Keys: (class members) hard_single: number of particles in single clusters hard_isolated: number of particles in isolated clusters hard_connected: number of particles in connected clusters hard_interrupt: number of clusters suffering interruptions cluster_isolated: number of clusters with multiple particles in local MPI process cluster_connected: number of clusters with multiple particles crosing multiple MPI processes AR_step_sum: total AR steps AR_tsyn_step_sum: total AR steps for time synchronization AR_group_number: number of AR groups iso_group_number: number of isolated AR groups Hermite_step_sum: total Hermite steps n_neighbor_zero: particles have zero neighbors in Hermite Ep_Ep_interaction: number of essential (active) i and j particle interactions Ep_Sp_interaction: number of essential (active) i and superparticle interactions """ def __init__(self, _dat=None, _offset=int(0), _append=False, **kwargs): """ DictNpArrayMix type initialzation, see help(DictNpArrayMix.__init__) """ keys = [["hard_single",np.int64], ["hard_isolated",np.int64], ["hard_connected",np.int64], ["hard_interrupt",np.int64], ["cluster_isolated",np.int64], ["cluster_connected",np.int64], ["AR_step_sum",np.int64], ["AR_tsyn_step_sum",np.int64], ["AR_group_number",np.int64], ["iso_group_number",np.int64], ["Hermite_step_sum",np.int64], ["n_neighbor_zero",np.int64], ["Ep_Ep_interaction",np.int64], ["Ep_Sp_interaction",np.int64]] DictNpArrayMix.__init__(self, keys, _dat, _offset, _append, **kwargs) class Profile(DictNpArrayMix): """ Profile class Keys: (class members) rank (1D): MPI rank time (1D): evolved time nstep (1D): number of steps per output n_loc (1D): number of partiles locally comp (PeTarProfile): time profiling for each components of PeTar comp_bar (PeTarProfile): MPI barrier waiting time of each components of PeTar tree_soft (FDPSProfile): FDPS long-range force particle-tree profile tree_nb (FDPSProfile): FDPS particle-tree for neighbor searching if keyword arguments "use_gpu" == True: gpu (GPUProfile): GPU profile for tree force calculation count (PeTarCount): number counts """ def __init__ (self, _dat=None, _offset=int(0), _append=False, **kwargs): """ DictNpArrayMix type initialzation, see help(DictNpArrayMix.__init__) Parameters ---------- keyword arguments: use_gpu: bool (True) whether cuda is used FDPS_version: string ('new') when 'old' is set, use the old data format before the version 984 """ use_gpu=True if ('use_gpu' in kwargs.keys()): use_gpu=kwargs['use_gpu'] if (use_gpu): keys = [['rank',np.int64], ['time',np.float64], ['nstep',np.int64], ['n_loc',np.int64], ['comp',PeTarProfile], ['comp_bar', PeTarProfile], ['tree_soft', FDPSProfile], ['tree_nb', FDPSProfile], ['gpu',GPUProfile],['count',PeTarCount]] DictNpArrayMix.__init__(self, keys, _dat, _offset, _append, **kwargs) else: keys = [['rank',np.int64], ['time',np.float64], ['nstep',np.int64], ['n_loc',np.int64], ['comp',PeTarProfile], ['comp_bar', PeTarProfile], ['tree_soft', FDPSProfile], ['tree_nb', FDPSProfile], ['count',PeTarCount]] DictNpArrayMix.__init__(self, keys, _dat, _offset, _append, **kwargs)
P3T-ARC/P3TARC
tools/analysis/profile.py
Python
gpl-3.0
9,230
from utool.experimental import file_organizer import utool as ut SourceDir = file_organizer.SourceDir d1 = self = SourceDir('/media/joncrall/store/Videos') d2 = SourceDir('/media/joncrall/media/TV') self, other = d1, d2 # TODO: NEED TO PURGE EVERY FILE NAMED Thumbs.db comp = d1.isect_info(d2) # Cascading duplicate checks d1.get_prop('md5_stride') d2.get_prop('md5_stride') d1.populate() d2.populate() self.populate() # strategy """ GOAL: organize files, remove duplicates, put data on a RAID * Need to ensure that everything on backup exists in either media or store. * Move things that are ensured to exist to a special folder * Do this until backup can be formatted * Need to have notion of what needs to go where. * Needs to figure out what goes in videos / movies / tv / etc.. """ def populate2(self): # Find all mounted drives import csv with open('/proc/mounts') as file_: row_iter = csv.reader(file_, delimiter=str(' ')) rows = [ut.lmap(ut.ensure_unicode, row) for row in row_iter] mount_dirs = [p for p in ut.take_column(rows, 1) if p.startswith('/media')] mount_dirs = [ '/', '/media/joncrall/store', # '/media/joncrall/media', # '/media/joncrall/backup', ] # Calculate the approx number of files in the system import os for mount in mount_dirs: print('mount = %r' % (mount,)) result = os.statvfs(mount) # print('result = %r' % (result,)) result.f_files print('result.f_files = %r' % (result.f_files,)) result.f_ffree # out = ut.cmd2('df --inodes / %s' % p, shell=True, verbose=True)['out'] dpath = '/home/joncrall/code' dpath = '/media/joncrall/store' self = SourceDir(dpath) ext_list = ['.jpg'] ext_regex = '\(' + '\|'.join(ext_list) + '\)' linux_find_command = 'find %s -regex ".*%s"' % (self.dpath, ext_regex) print('linux_find_command = %r' % (linux_find_command,)) nfiles = os.statvfs(self.dpath).f_files import itertools as it import six prog = ut.ProgIter(it.count(), length=nfiles, label='walking filesystem') piter = iter(prog) for root, dirs, files in os.walk(self.dpath, topdown=False): for fpath in files: six.next(piter) # import re # row_data = [re.split(' +', line) for line in out.split('\n')] # row_data = list(filter(len, row_data)) # data = ut.CSV(row_data[1:], col_headers=row_data[0])
Erotemic/local
misc/ooo_script.py
Python
gpl-3.0
2,513
# Copyright (c) 2009 Upi Tamminen <desaster@gmail.com> # See the COPYRIGHT file for more information from kippo.core.honeypot import HoneyPotCommand from twisted.internet import reactor import time, re, hashlib, getopt commands = {} class command_ssh(HoneyPotCommand): def start(self): try: optlist, args = getopt.getopt(self.args, '-1246AaCfgKkMNnqsTtVvXxYb:c:D:e:F:i:L:l:m:O:o:p:R:S:w:') except getopt.GetoptError, err: self.writeln('Unrecognized option') self.exit() if not len(args): for l in ( 'usage: ssh [-1246AaCfgKkMNnqsTtVvXxY] [-b bind_address] [-c cipher_spec]', ' [-D [bind_address:]port] [-e escape_char] [-F configfile]', ' [-i identity_file] [-L [bind_address:]port:host:hostport]', ' [-l login_name] [-m mac_spec] [-O ctl_cmd] [-o option] [-p port]', ' [-R [bind_address:]port:host:hostport] [-S ctl_path]', ' [-w local_tun[:remote_tun]] [user@]hostname [command]', ): self.writeln(l) self.exit() return user, host = 'root', args[0] for opt in optlist: if opt[0] == '-l': user = opt[1] if args[0].count('@'): user, host = args[0].split('@', 1) if re.match('^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$', host): self.ip = host else: s = hashlib.md5(host).hexdigest() self.ip = '.'.join([str(int(x, 16)) for x in (s[0:2], s[2:4], s[4:6], s[6:8])]) self.host = host self.user = user self.writeln('The authenticity of host \'%s (%s)\' can\'t be established.' % \ (self.host, self.ip)) self.writeln('RSA key fingerprint is 9d:30:97:8a:9e:48:0d:de:04:8d:76:3a:7b:4b:30:f8.') self.write('Are you sure you want to continue connecting (yes/no)? ') self.callbacks = [self.yesno, self.wait] def yesno(self, line): self.writeln( 'Warning: Permanently added \'%s\' (RSA) to the list of known hosts.' % \ self.host) self.write('%s@%s\'s password: ' % (self.user, self.host)) self.honeypot.password_input = True def wait(self, line): reactor.callLater(2, self.finish, line) def finish(self, line): self.pause = False rest, host = self.host, 'localhost' rest = self.host.strip().split('.') if len(rest) and rest[0].isalpha(): host = rest[0] self.honeypot.hostname = host self.honeypot.cwd = '/root' if not self.fs.exists(self.honeypot.cwd): self.honeypot.cwd = '/' self.honeypot.password_input = False self.writeln( 'Linux %s 2.6.26-2-686 #1 SMP Wed Nov 4 20:45:37 UTC 2009 i686' % \ self.honeypot.hostname) self.writeln('Last login: %s from 192.168.9.4' % \ time.ctime(time.time() - 123123)) self.exit() def lineReceived(self, line): print 'INPUT (ssh):', line if len(self.callbacks): self.callbacks.pop(0)(line) commands['/usr/bin/ssh'] = command_ssh # vim: set sw=4 et:
jullrich/dshieldhoneypot
kippo/kippo-0.8/kippo/commands/ssh.py
Python
gpl-2.0
3,350
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright (C) 2015 by YOUR NAME HERE # # This file is part of RoboComp # # RoboComp is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RoboComp is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with RoboComp. If not, see <http://www.gnu.org/licenses/>. # # \mainpage RoboComp::rcdns # # \section intro_sec Introduction # # Some information about the component... # # \section interface_sec Interface # # Descroption of the interface provided... # # \section install_sec Installation # # \subsection install1_ssec Software depencences # Software dependences.... # # \subsection install2_ssec Compile and install # How to compile/install the component... # # \section guide_sec User guide # # \subsection config_ssec Configuration file # # <p> # The configuration file... # </p> # # \subsection execution_ssec Execution # # Just: "${PATH_TO_BINARY}/rcdns --Ice.Config=${PATH_TO_CONFIG_FILE}" # # \subsection running_ssec Once running # # # import sys, traceback, Ice, IceStorm, subprocess, threading, time, Queue, os, copy # Ctrl+c handling import signal signal.signal(signal.SIGINT, signal.SIG_DFL) from PySide import * from specificworker import * ROBOCOMP = '' try: ROBOCOMP = os.environ['ROBOCOMP'] except: print '$ROBOCOMP environment variable not set, using the default value /opt/robocomp' ROBOCOMP = '/opt/robocomp' if len(ROBOCOMP)<1: print 'ROBOCOMP environment variable not set! Exiting.' sys.exit() preStr = "-I"+ROBOCOMP+"/interfaces/ --all "+ROBOCOMP+"/interfaces/" Ice.loadSlice(preStr+"CommonBehavior.ice") import RoboCompCommonBehavior Ice.loadSlice(preStr+"rcdns.ice") import RoboCompRCDNS class CommonBehaviorI(RoboCompCommonBehavior.CommonBehavior): def __init__(self, _handler, _communicator): self.handler = _handler self.communicator = _communicator def getFreq(self, current = None): self.handler.getFreq() def setFreq(self, freq, current = None): self.handler.setFreq() def timeAwake(self, current = None): try: return self.handler.timeAwake() except: print 'Problem getting timeAwake' def killYourSelf(self, current = None): self.handler.killYourSelf() def getAttrList(self, current = None): try: return self.handler.getAttrList(self.communicator) except: print 'Problem getting getAttrList' traceback.print_exc() status = 1 return if __name__ == '__main__': app = QtCore.QCoreApplication(sys.argv) params = copy.deepcopy(sys.argv) if len(params) > 1: if not params[1].startswith('--Ice.Config='): params[1] = '--Ice.Config=' + params[1] elif len(params) == 0: params.append('--Ice.Config=config') ic = Ice.initialize(params) status = 0 mprx = {} if status == 0: worker = SpecificWorker(mprx) adapter = ic.createObjectAdapter('rcdns') adapter.add(rcdnsI(worker), ic.stringToIdentity('rcdns')) adapter.activate() # adapter.add(CommonBehaviorI(<LOWER>I, ic), ic.stringToIdentity('commonbehavior')) app.exec_() if ic: try: ic.destroy() except: traceback.print_exc() status = 1
rajathkumarmp/robocomp
tools/rcdns/src/rcdns.py
Python
gpl-3.0
3,535
# Copyright (C) 2008-2010 Adam Olsen # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2, or (at your option) # any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # # The developers of the Exaile media player hereby grant permission # for non-GPL compatible GStreamer and Exaile plugins to be used and # distributed together with GStreamer and Exaile. This permission is # above and beyond the permissions granted by the GPL license by which # Exaile is covered. If you modify this code, you may extend this # exception to your version of the code, but you are not obligated to # do so. If you do not wish to do so, delete this exception statement # from your version. """ General functions and classes shared in the codebase """ import inspect from gi.repository import Gio from gi.repository import GLib from gi.repository import GObject import logging import os import os.path import shelve import subprocess import sys import threading import urllib2 import urlparse import weakref from functools import wraps, partial from collections import deque from UserDict import DictMixin logger = logging.getLogger(__name__) # ArchLinux disabled bsddb in python2, so we have to use the external module # -> msys2 did also, so now we force using bsddb to simplify things try: import bsddb3 as bsddb except ImportError: try: import bsddb except ImportError: logger.error("Exaile requires bsddb to be installed") raise # TODO: get rid of this. only plugins/cd/ uses it. VALID_TAGS = ( # Ogg Vorbis spec tags "title version album tracknumber artist genre performer copyright " "license organization description location contact isrc date " # Other tags we like "arranger author composer conductor lyricist discnumber labelid part " "website language encodedby bpm albumartist originaldate originalalbum " "originalartist recordingdate" ).split() PICKLE_PROTOCOL = 2 # Default tags for track sorting. Unless you have good reason to do # otherwise, use this. # TODO: make this a setting? BASE_SORT_TAGS = ('albumartist', 'date', 'album', 'discnumber', 'tracknumber', 'title') def clamp(value, minimum, maximum): """ Clamps a value to the given boundaries :param value: the value to clamp :param minimum: the minimum value to return :param maximum: the maximum value to return """ return max(minimum, min(value, maximum)) def enum(**enums): """ Creates an enum type :see: https://stackoverflow.com/a/1695250 """ return type('Enum', (), enums) def sanitize_url(url): """ Removes the password part from an url :param url: the URL to sanitize :type url: string :returns: the sanitized url """ try: components = list(urlparse.urlparse(url)) auth, host = components[1].split('@') username, password = auth.split(':') except (AttributeError, ValueError): pass else: # Replace password with fixed amount of "*" auth = ':'.join((username, 5 * '*')) components[1] = '@'.join((auth, host)) url = urlparse.urlunparse(components) return url def get_url_contents(url, user_agent): ''' Retrieves data from a URL and sticks a user-agent on it. You can use exaile.get_user_agent_string(pluginname) to get this. Added in Exaile 3.4 :returns: Contents of page located at URL :raises: urllib2.URLError ''' headers = {'User-Agent': user_agent} req = urllib2.Request(url, None, headers) fp = urllib2.urlopen(req) data = fp.read() fp.close() return data def threaded(func): """ A decorator that will make any function run in a new thread :param func: the function to run threaded """ @wraps(func) def wrapper(*args, **kwargs): t = threading.Thread(target=func, args=args, kwargs=kwargs) t.daemon = True t.start() return wrapper def synchronized(func): """ A decorator to make a function synchronized - which means only one thread is allowed to access it at a time. This only works on class functions, and creates a variable in the instance called _sync_lock. If this function is used on multiple functions in an object, they will be locked with respect to each other. The lock is re-entrant. """ @wraps(func) def wrapper(self, *__args, **__kw): try: rlock = self._sync_lock except AttributeError: from threading import RLock rlock = self.__dict__.setdefault('_sync_lock', RLock()) rlock.acquire() try: return func(self, *__args, **__kw) finally: rlock.release() return wrapper def _idle_callback(func, callback, *args, **kwargs): value = func(*args, **kwargs) if callback and callable(callback): callback(value) def idle_add(callback=None): """ A decorator that will wrap the function in a GLib.idle_add call NOTE: Although this decorator will probably work in more cases than the gtkrun decorator does, you CANNOT expect to get a return value from the function that calls a function with this decorator. Instead, you must use the callback parameter. If the wrapped function returns a value, it will be passed in as a parameter to the callback function. @param callback: optional callback that will be called when the wrapped function is done running """ def wrap(f): @wraps(f) def wrapped(*args, **kwargs): GLib.idle_add(_idle_callback, f, callback, *args, **kwargs) return wrapped return wrap def _glib_wait_inner(timeout, glib_timeout_func): # Have to hold the value in a mutable structure because python's scoping # rules prevent us assigning to an outer scope directly. # # Additionally, we hold source ids per-instance, otherwise this would # restrict calls across all instances of an object with the glib_wait* # decorators, which would have surprising results id_by_obj = weakref.WeakKeyDictionary() def waiter(function): # ensure this is only used on class methods callargs = inspect.getargspec(function) if len(callargs.args) == 0 or callargs.args[0] != 'self': raise RuntimeError("Must only use glib_wait* on instance methods!") def thunk(*args, **kwargs): id_by_obj[args[0]] = None # if a function returns True, it wants to be called again; in that # case, treat it as an additional call, otherwise you can potentially # get lots of callbacks piling up if function(*args, **kwargs): delayer(*args, **kwargs) def delayer(*args, **kwargs): self = args[0] srcid = id_by_obj.get(self) if srcid: GLib.source_remove(srcid) id_by_obj[self] = glib_timeout_func(timeout, thunk, *args, **kwargs) return delayer return waiter def glib_wait(timeout): """ Decorator to make a function run only after 'timeout' milliseconds have elapsed since the most recent call to the function. For example, if a function was given a timeout of 1000 and called once, then again half a second later, it would run only once, 1.5 seconds after the first call to it. If arguments are given to the function, only the last call's set of arguments will be used. If the function returns a value that evaluates to True, it will be called again under the same timeout rules. .. warning:: Can only be used with instance methods """ return _glib_wait_inner(timeout, GLib.timeout_add) def glib_wait_seconds(timeout): """ Same as glib_wait, but uses GLib.timeout_add_seconds instead of GLib.timeout_add and takes its timeout in seconds. See the glib documention for why you might want to use one over the other. """ return _glib_wait_inner(timeout, GLib.timeout_add_seconds) def profileit(func): """ Decorator to profile a function """ import cProfile import pstats @wraps(func) def wrapper(*args, **kwargs): prof = cProfile.Profile() res = prof.runcall(func, *args, **kwargs) stats = pstats.Stats(prof) stats.strip_dirs() stats.sort_stats('time', 'calls') print(">>>---- Begin profiling print") stats.print_stats() print(">>>---- End profiling print") prof = stats = None return res return wrapper class classproperty(object): """ Decorator allowing for class property access """ def __init__(self, function): self.function = function def __get__(self, obj, type): return self.function(type) class VersionError(Exception): """ Represents version discrepancies """ #: the error message message = None def __init__(self, message): Exception.__init__(self) self.message = message def __str__(self): return repr(self.message) def open_file(path): """ Opens a file or folder using the system configured program """ platform = sys.platform if platform == 'win32': # pylint will error here on non-windows platforms unless we do this # pylint: disable-msg=E1101 os.startfile(path) # pylint: enable-msg=E1101 elif platform == 'darwin': subprocess.Popen(["open", path]) else: subprocess.Popen(["xdg-open", path]) def open_file_directory(path_or_uri): """ Opens the parent directory of a file, selecting the file if possible. """ f = Gio.File.new_for_commandline_arg(path_or_uri) platform = sys.platform if platform == 'win32': # Normally we can just run `explorer /select, filename`, but Python 2 # always calls CreateProcessA, which doesn't support Unicode. We could # call CreateProcessW with ctypes, but the following is more robust. import ctypes ctypes.windll.ole32.CoInitialize(None) # Not sure why this is always UTF-8. upath = f.get_path().decode('utf-8') pidl = ctypes.windll.shell32.ILCreateFromPathW(upath) ctypes.windll.shell32.SHOpenFolderAndSelectItems(pidl, 0, None, 0) ctypes.windll.shell32.ILFree(pidl) ctypes.windll.ole32.CoUninitialize() elif platform == 'darwin': subprocess.Popen(["open", f.get_parent().get_parse_name()]) else: subprocess.Popen(["xdg-open", f.get_parent().get_parse_name()]) def open_shelf(path): ''' Opens a python shelf file, used to store various types of metadata ''' # As of Exaile 4, new DBs will only be created as Berkeley DB Hash databases # using either bsddb3 (external) or bsddb (stdlib but sometimes removed). # Existing DBs created with other backends will be migrated to Berkeley DB. # We do this because BDB is generally considered more performant, # and because gdbm currently doesn't work at all in MSYS2. # Some DBM modules don't use the path we give them, but rather they have # multiple filenames. If the specified path doesn't exist, double check # to see if whichdb returns a result before trying to open it with bsddb force_migrate = False if not os.path.exists(path): from whichdb import whichdb if whichdb(path) is not None: force_migrate = True if not force_migrate: try: db = bsddb.hashopen(path, 'c') return shelve.BsdDbShelf(db, protocol=PICKLE_PROTOCOL) except bsddb.db.DBInvalidArgError: logger.warning("%s was created with an old backend, migrating it", path) except Exception: raise # special case: zero-length file if not force_migrate and os.path.getsize(path) == 0: os.unlink(path) else: from xl.migrations.database.to_bsddb import migrate migrate(path) db = bsddb.hashopen(path, 'c') return shelve.BsdDbShelf(db, protocol=PICKLE_PROTOCOL) if hasattr(os, 'replace'): # introduced in python 3.3 replace_file = os.replace elif sys.platform != 'win32': replace_file = os.rename else: # https://stupidpythonideas.blogspot.com/2014/07/getting-atomic-writes-right.html import ctypes _kernel32 = ctypes.WinDLL('kernel32', use_last_error=True) _MoveFileEx = _kernel32.MoveFileExW _MoveFileEx.argtypes = [ctypes.c_wchar_p, ctypes.c_wchar_p, ctypes.c_uint32] _MoveFileEx.restype = ctypes.c_bool def replace_file(src, dst): if not _MoveFileEx(src, dst, 1): raise ctypes.WinError(ctypes.get_last_error()) class LimitedCache(DictMixin): """ Simple cache that acts much like a dict, but has a maximum # of items """ def __init__(self, limit): self.limit = limit self.order = deque() self.cache = dict() def __iter__(self): return self.cache.__iter__() def __contains__(self, item): return self.cache.__contains__(item) def __delitem__(self, item): del self.cache[item] self.order.remove(item) def __getitem__(self, item): val = self.cache[item] self.order.remove(item) self.order.append(item) return val def __setitem__(self, item, value): self.cache[item] = value if item in self.order: self.order.remove(item) self.order.append(item) while len(self) > self.limit: del self.cache[self.order.popleft()] def __repr__(self): '''prevent repr(self) from changing cache order''' return repr(self.cache) def __str__(self): '''prevent str(self) from changing cache order''' return str(self.cache) def keys(self): return self.cache.keys() class cached(object): """ Decorator to make a function's results cached does not cache if there is an exception. .. note:: This probably breaks on functions that modify their arguments """ def __init__(self, limit): self.limit = limit @staticmethod def _freeze(d): return frozenset(d.iteritems()) def __call__(self, f): try: f._cache except AttributeError: f._cache = LimitedCache(self.limit) @wraps(f) def wrapper(*args, **kwargs): try: return f._cache[(args, self._freeze(kwargs))] except KeyError: pass ret = f(*args, **kwargs) try: f._cache[(args, self._freeze(kwargs))] = ret except TypeError: # args can't be hashed pass return ret return wrapper def __get__(self, obj, objtype): """Support instance methods.""" return partial(self.__call__, obj) def walk(root): """ Walk through a Gio directory, yielding each file Files are enumerated in the following order: first the directory, then the files in that directory. Once one directory's files have all been listed, it moves on to the next directory. Order of files within a directory and order of directory traversal is not specified. :param root: a :class:`Gio.File` representing the directory to walk through :returns: a generator object :rtype: :class:`Gio.File` """ queue = deque() queue.append(root) while len(queue) > 0: dir = queue.pop() yield dir try: for fileinfo in dir.enumerate_children( "standard::type," "standard::is-symlink,standard::name," "standard::symlink-target,time::modified", Gio.FileQueryInfoFlags.NONE, None, ): fil = dir.get_child(fileinfo.get_name()) # FIXME: recursive symlinks could cause an infinite loop if fileinfo.get_is_symlink(): target = fileinfo.get_symlink_target() if "://" not in target and not os.path.isabs(target): fil2 = dir.get_child(target) else: fil2 = Gio.File.new_for_uri(target) # already in the collection, we'll get it anyway if fil2.has_prefix(root): continue type = fileinfo.get_file_type() if type == Gio.FileType.DIRECTORY: queue.append(fil) elif type == Gio.FileType.REGULAR: yield fil except GLib.Error: # why doesnt gio offer more-specific errors? logger.exception("Unhandled exception while walking on %s.", dir) def walk_directories(root): """ Walk through a Gio directory, yielding each subdirectory :param root: a :class:`Gio.File` representing the directory to walk through :returns: a generator object :rtype: :class:`Gio.File` """ yield root directory = None subdirectory = None try: for fileinfo in root.enumerate_children( 'standard::name,standard::type', Gio.FileQueryInfoFlags.NONE, None ): if fileinfo.get_file_type() == Gio.FileType.DIRECTORY: directory = root.get_child(fileinfo.get_name()) for subdirectory in walk_directories(directory): yield subdirectory except GLib.Error: logger.exception( "Unhandled exception while walking dirs on %s, %s, %s", root, directory, subdirectory, ) class TimeSpan(object): """ Calculates the number of days, hours, minutes, and seconds in a time span """ #: number of days days = 0 #: number of hours hours = 0 #: number of minutes minutes = 0 #: number of seconds seconds = 0 def __init__(self, span): """ :param span: Time span in seconds :type span: float """ try: span = float(span) except (ValueError, TypeError): span = 0 span, self.seconds = divmod(span, 60) span, self.minutes = divmod(span, 60) self.days, self.hours = divmod(span, 24) def __repr__(self): span = self.days * 24 + self.hours span = span * 60 + self.minutes span = span * 60 + self.seconds return '%s(%s)' % (self.__class__.__name__, span) def __str__(self): return '%dd, %dh, %dm, %ds' % ( self.days, self.hours, self.minutes, self.seconds, ) class MetadataList(object): """ Like a list, but also associates an object of metadata with each entry. ``(get|set|del)_meta_key`` are the metadata interface - they allow the metadata to act much like a dictionary, with a few optimizations. List aspects that are not supported: * sort * comparisons other than equality * multiply """ __slots__ = ['__list', 'metadata'] def __init__(self, iterable=[], metadata=[]): self.__list = list(iterable) meta = list(metadata) if meta and len(meta) != len(self.__list): raise ValueError("Length of metadata must match length of items.") if not meta: meta = [None] * len(self.__list) self.metadata = meta def __repr__(self): return "MetadataList(%s)" % self.__list def __len__(self): return len(self.__list) def __iter__(self): return self.__list.__iter__() def __add__(self, other): l = MetadataList(self, self.metadata) l.extend(other) return l def __iadd__(self, other): self.extend(other) return self def __eq__(self, other): if isinstance(other, MetadataList): other = list(other) return self.__list == other def __getitem__(self, i): val = self.__list.__getitem__(i) if isinstance(i, slice): return MetadataList(val, self.metadata.__getitem__(i)) else: return val def __setitem__(self, i, value): self.__list.__setitem__(i, value) if isinstance(value, MetadataList): metadata = list(value.metadata) else: metadata = [None] * len(value) self.metadata.__setitem__(i, metadata) def __delitem__(self, i): self.__list.__delitem__(i) self.metadata.__delitem__(i) def append(self, other, metadata=None): self.insert(len(self), other, metadata=metadata) def extend(self, other): self[len(self) : len(self)] = other def insert(self, i, item, metadata=None): if i >= len(self): i = len(self) e = len(self) + 1 else: e = i self[i:e] = [item] self.metadata[i:e] = [metadata] def pop(self, i=-1): item = self[i] del self[i] return item def remove(self, item): del self[self.index(item)] def reverse(self): self.__list.reverse() self.metadata.reverse() def index(self, i, start=0, end=None): if end is None: return self.__list.index(i, start) else: return self.__list.index(i, start, end) def count(self, i): return self.__list.count(i) def get_meta_key(self, index, key, default=None): if not self.metadata[index]: return default return self.metadata[index].get(key, default) def set_meta_key(self, index, key, value): if not self.metadata[index]: self.metadata[index] = {} self.metadata[index][key] = value def del_meta_key(self, index, key): if not self.metadata[index]: raise KeyError(key) del self.metadata[index][key] if not self.metadata[index]: self.metadata[index] = None class ProgressThread(GObject.GObject, threading.Thread): """ A basic thread with progress updates. The thread should emit the progress-update signal periodically. The contents must be number between 0 and 100, or a tuple of (n, total) where n is the current step. """ __gsignals__ = { 'progress-update': ( GObject.SignalFlags.RUN_FIRST, None, (GObject.TYPE_PYOBJECT,), ), # TODO: Check if 'stopped' is required 'done': (GObject.SignalFlags.RUN_FIRST, None, ()), } def __init__(self): GObject.GObject.__init__(self) threading.Thread.__init__(self) self.setDaemon(True) def stop(self): """ Stops the thread """ self.emit('done') def run(self): """ Override and make sure that the 'progress-update' signal is emitted regularly with the progress """ pass class SimpleProgressThread(ProgressThread): ''' Simpler version of ProgressThread that uses a generator to manage the thread and its progress. Instead of overriding run, just pass a callable that returns a generator to the constructor. The callable must either yield a number between 0 and 100, or yield a tuple of (n, total) where n is the current step. :: def long_running_thing(): l = len(stuff) try: for i in stuff: yield (i, l) finally: # if the thread is stopped, GeneratorExit will # be raised the next time yield is called pass ''' def __init__(self, target, *args, **kwargs): ProgressThread.__init__(self) self.__target = (target, args, kwargs) self.__stop = False def stop(self): ''' Causes the thread to stop at the next yield point ''' self.__stop = True def run(self): ''' Runs a generator ''' target, args, kwargs = self.__target try: for progress in target(*args, **kwargs): self.emit('progress-update', progress) if self.__stop: break except GeneratorExit: pass except Exception: logger.exception("Unhandled exception") finally: self.emit('done') class PosetItem(object): def __init__(self, name, after, priority, value=None): """ :param name: unique identifier for this item :type name: string :param after: which items this item comes after :type after: list of string :param priority: tiebreaker, higher values come later :type priority: int :param value: arbitrary data associated with the item """ self.name = name self.after = list(after) self.priority = priority self.children = [] self.value = value def order_poset(items): """ :param items: poset to order :type items: list of :class:`PosetItem` """ items = {item.name: item for item in items} for name, item in items.iteritems(): for after in item.after: k = items.get(after) if k: k.children.append(item) else: item.after.remove(after) result = [] next = [i[1] for i in items.items() if not i[1].after] while next: current = sorted((i.priority, i.name, i) for i in next) result.extend(i[2] for i in current) nextset = dict() for i in current: for c in i[2].children: nextset[c.name] = c removals = [] for name, item in nextset.iteritems(): for after in item.after: if after in nextset: removals.append(name) break for r in removals: del nextset[r] next = nextset.values() return result class LazyDict(object): __slots__ = ['_dict', '_funcs', 'args', '_locks'] def __init__(self, *args): self.args = args self._dict = {} self._funcs = {} self._locks = {} def __setitem__(self, item, value): if inspect.isfunction(value): self._funcs[item] = value else: self._dict[item] = value def __getitem__(self, item): lock = self._locks.get(item, threading.Lock()) with lock: try: return self._dict[item] except KeyError: self._locks[item] = lock val = self._funcs[item](item, *self.args) self._dict[item] = val return val def get(self, item, default=None): try: return self[item] except KeyError: return default class _GioFileStream(object): __slots__ = ['stream'] def __enter__(self): return self def __exit__(self, *exc_info): self.stream.close() def seek(self, offset, whence=os.SEEK_CUR): if whence == os.SEEK_CUR: self.stream.seek(offset, GLib.SeekType.CUR) elif whence == os.SEEK_SET: self.stream.seek(offset, GLib.SeekType.SET) elif whence == os.SEEK_END: self.stream.seek(offset, GLib.SeekType.END) else: raise IOError("Invalid whence") def tell(self): return self.stream.tell() class GioFileInputStream(_GioFileStream): ''' Wrap a Gio.File so it looks like a python file object for reading. TODO: More complete wrapper ''' __slots__ = ['stream', 'gfile'] def __init__(self, gfile): self.gfile = gfile self.stream = Gio.DataInputStream.new(gfile.read()) def __iter__(self): return self def next(self): r = self.stream.read_line()[0] if not r: raise StopIteration() return r def read(self, size=None): if size: return self.stream.read_bytes(size).get_data() else: return self.gfile.load_contents()[1] def readline(self): return self.stream.read_line()[0] class GioFileOutputStream(_GioFileStream): ''' Wrapper around Gio.File for writing like a python file object ''' __slots__ = ['stream'] def __init__(self, gfile, mode='w'): if mode != 'w': raise IOError("Not implemented") self.stream = gfile.replace('', False, Gio.FileCreateFlags.REPLACE_DESTINATION) def flush(self): self.stream.flush() def write(self, s): if isinstance(s, unicode): s = s.encode('utf-8') return self.stream.write(s) def subscribe_for_settings(section, options, self): ''' Allows you designate attributes on an object to be dynamically updated when a particular setting changes. If you want to be notified of a setting update, use a @property for the attribute. Only works for a options in a single section :param section: Settings section :param options: Dictionary of key: option name, value: attribute on 'self' to set when the setting has been updated. The attribute must already have a default value in it :param self: Object to set attribute values on :returns: A function that can be called to unsubscribe .. versionadded:: 3.5.0 ''' from xl import event from xl import settings def _on_option_set(unused_name, unused_object, data): attrname = options.get(data) if attrname is not None: setattr(self, attrname, settings.get_option(data, getattr(self, attrname))) for k in options: if not k.startswith('%s/' % section): raise ValueError("Option is not part of section %s" % section) _on_option_set(None, None, k) return event.add_callback( _on_option_set, '%s_option_set' % section.replace('/', '_') ) class AsyncLoader: """ Async loader based on a generator Threaded, load it and put it in `result_list` """ def __init__(self, item_generator): """ Constructs and already start processing (starts thread) :param item_generator: iterable """ self.__end = False self.__result_list = [] self.__thread = threading.Thread(target=self.run, args=(item_generator,)) self.__thread.start() def run(self, item_generator): """ Process items putting in `result_list` :param item_generator: iterable :return: None """ for i in item_generator: if self.__end: break self.__result_list.append(i) if self.__end: break def end(self, timeout=None): """ Request process ending if it doesn't occurs in timeout :param timeout: float representing seconds or None to wait infinitely (default) :return: None """ self.__thread.join(timeout) self.__end = True def ended(self): """ If it has ended :return: bool """ return not self.__thread.is_alive() @property def result(self): """ Gets the result :return: list """ return self.__result_list[:] # vim: et sts=4 sw=4
genodeftest/exaile
xl/common.py
Python
gpl-2.0
32,749
#!/usr/local/bin/python """ Grid graph nodes objective evolution with increasing k """ from __future__ import division import sys sys.path.insert(1, '..') import __builtin__ import os from MarkovChain import * from MarkovChain.edge_objectives import * import networkx as nx import pandas as pd PLOTS_DATA_DIR = "/home/grad3/harshal/Desktop/MCMonitor/Plots_data/" dataframe_rows = [] def get_objective_evolution(method, k, iteration): rows = get_evolution(method, k) for row in rows: row['iteration'] = iteration global dataframe_rows dataframe_rows += rows df = pd.DataFrame(dataframe_rows) df.to_csv(PLOTS_DATA_DIR + "grid_edges_k_objective_evolution.csv.gz", sep=",", header=True, index=False, compression="gzip") if __name__ == "__main__": G = nx.grid_2d_graph(100, 10, create_using=nx.DiGraph()) G = nx.convert_node_labels_to_integers(G) G = nx.stochastic_graph(G, weight='weight') num_nodes = len(G) num_items = len(G) k = 50 item_distributions = ['uniform', 'direct', 'inverse', 'ego'] for item_distribution in item_distributions: if item_distribution == 'ego': iterations = 10 else: iterations = 1 for iteration in xrange(iterations): print "Evaluating item distribution {}".format(item_distribution) __builtin__.mc = MarkovChain(num_nodes=num_nodes, num_items=num_items, item_distribution=item_distribution, G=G) print "Starting evaluation of methods" methods = [random_edges, highest_item_edges, highest_probability_edges, highest_betweenness_centrality_edges, smart_greedy_parallel, smart_greedy_heuristic] for method in methods: print "Evaluating method {}".format(method.func_name) get_objective_evolution(method, k, iteration)
chdhr-harshal/MCMonitor
src/python/experiments/grid_edges.py
Python
mit
2,102
# Generated by YCM Generator at 2019-06-16 12:38:36.545120 # This file is NOT licensed under the GPLv3, which is the license for the rest # of YouCompleteMe. # # Here's the license text for this file: # # This is free and unencumbered software released into the public domain. # # Anyone is free to copy, modify, publish, use, compile, sell, or # distribute this software, either in source code form or as a compiled # binary, for any purpose, commercial or non-commercial, and by any # means. # # In jurisdictions that recognize copyright laws, the author or authors # of this software dedicate any and all copyright interest in the # software to the public domain. We make this dedication for the benefit # of the public at large and to the detriment of our heirs and # successors. We intend this dedication to be an overt act of # relinquishment in perpetuity of all present and future rights to this # software under copyright law. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, # ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR # OTHER DEALINGS IN THE SOFTWARE. # # For more information, please refer to <http://unlicense.org/> import os import ycm_core flags = [ '-x', 'c++', ] # Set this to the absolute path to the folder (NOT the file!) containing the # compile_commands.json file to use that instead of 'flags'. See here for # more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html # # You can get CMake to generate this file for you by adding: # set( CMAKE_EXPORT_COMPILE_COMMANDS 1 ) # to your CMakeLists.txt file. # # Most projects will NOT need to set this to anything; you can just change the # 'flags' list of compilation flags. Notice that YCM itself uses that approach. compilation_database_folder = '' if os.path.exists( compilation_database_folder ): database = ycm_core.CompilationDatabase( compilation_database_folder ) else: database = None SOURCE_EXTENSIONS = [ '.C', '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ] def DirectoryOfThisScript(): return os.path.dirname( os.path.abspath( __file__ ) ) def MakeRelativePathsInFlagsAbsolute( flags, working_directory ): if not working_directory: return list( flags ) new_flags = [] make_next_absolute = False path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ] for flag in flags: new_flag = flag if make_next_absolute: make_next_absolute = False if not flag.startswith( '/' ): new_flag = os.path.join( working_directory, flag ) for path_flag in path_flags: if flag == path_flag: make_next_absolute = True break if flag.startswith( path_flag ): path = flag[ len( path_flag ): ] new_flag = path_flag + os.path.join( working_directory, path ) break if new_flag: new_flags.append( new_flag ) return new_flags def IsHeaderFile( filename ): extension = os.path.splitext( filename )[ 1 ] return extension in [ '.H', '.h', '.hxx', '.hpp', '.hh' ] def GetCompilationInfoForFile( filename ): # The compilation_commands.json file generated by CMake does not have entries # for header files. So we do our best by asking the db for flags for a # corresponding source file, if any. If one exists, the flags for that file # should be good enough. if IsHeaderFile( filename ): basename = os.path.splitext( filename )[ 0 ] for extension in SOURCE_EXTENSIONS: replacement_file = basename + extension if os.path.exists( replacement_file ): compilation_info = database.GetCompilationInfoForFile( replacement_file ) if compilation_info.compiler_flags_: return compilation_info return None return database.GetCompilationInfoForFile( filename ) def FlagsForFile( filename, **kwargs ): if database: # Bear in mind that compilation_info.compiler_flags_ does NOT return a # python list, but a "list-like" StringVec object compilation_info = GetCompilationInfoForFile( filename ) if not compilation_info: return None final_flags = MakeRelativePathsInFlagsAbsolute( compilation_info.compiler_flags_, compilation_info.compiler_working_dir_ ) else: relative_to = DirectoryOfThisScript() final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to ) return { 'flags': final_flags, 'do_cache': True }
PysKa-Ratzinger/personal_project_euler_solutions
solutions/051-075/74/.ycm_extra_conf.py
Python
mit
4,675
# This file is part of project Sverchok. It's copyrighted by the contributors # recorded in the version control history of the file, available from # its original location https://github.com/nortikin/sverchok/commit/master # # SPDX-License-Identifier: GPL3 # License-Filename: LICENSE import bpy from bpy.props import BoolProperty, FloatProperty, EnumProperty from sverchok.node_tree import SverchCustomTreeNode from sverchok.data_structure import updateNode, zip_long_repeat, map_unzip_recursirve from sverchok.dependencies import FreeCAD from sverchok.utils.dummy_nodes import add_dummy if FreeCAD is None: add_dummy('SvSolidValidateNode', 'Validate & Fix Solid', 'FreeCAD') else: import Part class SvSolidValidateNode(bpy.types.Node, SverchCustomTreeNode): """ Triggers: Validate Fix Solid Tooltip: Validate or fix Solid objects """ bl_idname = 'SvSolidValidateNode' bl_label = 'Validate & Fix Solid' bl_icon = 'OUTLINER_OB_EMPTY' solid_catergory = "Operators" precision : FloatProperty( name = "Precision", default = 0.001, precision = 6, update=updateNode) def draw_buttons(self, context, layout): layout.prop(self, 'precision') def sv_init(self, context): self.inputs.new('SvSolidSocket', "Solid") self.outputs.new('SvSolidSocket', "FixedSolid") self.outputs.new('SvStringsSocket', "IsValid") def _process(self, solid): try: valid = solid.isValid() except: valid = False if valid: fixed = solid elif self.outputs['FixedSolid'].is_linked: fixed = solid.copy() ok = fixed.fix(self.precision, self.precision, self.precision) if not ok: raise Exception("The provided Solid is not valid and can not be fixed automatically") else: fixed = None return fixed, valid def process(self): if not any(socket.is_linked for socket in self.outputs): return solids_in = self.inputs['Solid'].sv_get() solids_out, valid_out = map_unzip_recursirve(self._process, solids_in, data_types=(Part.Shape,)) self.outputs['FixedSolid'].sv_set(solids_out) self.outputs['IsValid'].sv_set(valid_out) def register(): if FreeCAD is not None: bpy.utils.register_class(SvSolidValidateNode) def unregister(): if FreeCAD is not None: bpy.utils.unregister_class(SvSolidValidateNode)
nortikin/sverchok
nodes/solid/validate.py
Python
gpl-3.0
2,513
# -*- coding: utf-8 -*- """ tests.regression ~~~~~~~~~~~~~~~~~~~~~~~~~~ Tests regressions. :copyright: (c) 2015 by Armin Ronacher. :license: BSD, see LICENSE for more details. """ import pytest import os import gc import sys import flask import threading from werkzeug.exceptions import NotFound _gc_lock = threading.Lock() class assert_no_leak(object): def __enter__(self): gc.disable() _gc_lock.acquire() loc = flask._request_ctx_stack._local # Force Python to track this dictionary at all times. # This is necessary since Python only starts tracking # dicts if they contain mutable objects. It's a horrible, # horrible hack but makes this kinda testable. loc.__storage__['FOOO'] = [1, 2, 3] gc.collect() self.old_objects = len(gc.get_objects()) def __exit__(self, exc_type, exc_value, tb): if not hasattr(sys, 'getrefcount'): gc.collect() new_objects = len(gc.get_objects()) if new_objects > self.old_objects: pytest.fail('Example code leaked') _gc_lock.release() gc.enable() def test_memory_consumption(): app = flask.Flask(__name__) @app.route('/') def index(): return flask.render_template('simple_template.html', whiskey=42) def fire(): with app.test_client() as c: rv = c.get('/') assert rv.status_code == 200 assert rv.data == b'<h1>42</h1>' # Trigger caches fire() # This test only works on CPython 2.7. if sys.version_info >= (2, 7) and \ not hasattr(sys, 'pypy_translation_info'): with assert_no_leak(): for x in range(10): fire() def test_safe_join_toplevel_pardir(): from flask.helpers import safe_join with pytest.raises(NotFound): safe_join('/foo', '..') def test_aborting(): class Foo(Exception): whatever = 42 app = flask.Flask(__name__) app.testing = True @app.errorhandler(Foo) def handle_foo(e): return str(e.whatever) @app.route('/') def index(): raise flask.abort(flask.redirect(flask.url_for('test'))) @app.route('/test') def test(): raise Foo() with app.test_client() as c: rv = c.get('/') assert rv.headers['Location'] == 'http://localhost/test' rv = c.get('/test') assert rv.data == b'42'
nwags/flask
tests/test_regression.py
Python
bsd-3-clause
2,467
with open("file.txt") as f: print("a<caret>bc")
asedunov/intellij-community
python/testData/breadcrumbs/withAs.py
Python
apache-2.0
51
from const import * import widget import pguglobals class ProgressBar(widget.Widget): """A progress bar widget. Example: w = gui.ProgressBar(0,0,100) w.value = 25 """ def __init__(self,value,min,max,**params): params.setdefault('cls','progressbar') widget.Widget.__init__(self,**params) self.min,self.max,self.value = min,max,value def paint(self,s): r = pygame.rect.Rect(0,0,self.rect.w,self.rect.h) r.w = r.w*(self.value-self.min)/(self.max-self.min) self.bar = r pguglobals.app.theme.render(s,self.style.bar,r) def __setattr__(self,k,v): if k == 'value': v = int(v) v = max(v,self.min) v = min(v,self.max) _v = self.__dict__.get(k,NOATTR) self.__dict__[k]=v if k == 'value' and _v != NOATTR and _v != v: self.send(CHANGE) self.repaint()
Southpaw-TACTIC/Team
src/python/Lib/site-packages/pgu/gui/misc.py
Python
epl-1.0
952
# Import required modules from assets.modules.game import * from assets.modules.screens.title_screen import * from assets.modules.screens.win_screen import * from assets.modules.mechanics2.background_music import * pygame.init() def main(): background_music("On") while True: title_screen() gameplay() if game_over.is_it_over == True: win_screen(game_over.winner_name, game_over.winner_score) main()
Pimorez/INFPRJ02
main.py
Python
gpl-3.0
449
#!/usr/bin/env python3 # Copyright (c) 2014 Wladimir J. van der Laan # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. ''' A script to check that release executables only contain certain symbols and are only linked against allowed libraries. Example usage: find ../path/to/binaries -type f -executable | xargs python3 contrib/devtools/symbol-check.py ''' import sys from typing import List, Dict import lief #type:ignore # temporary constant, to be replaced with lief.ELF.ARCH.RISCV # https://github.com/lief-project/LIEF/pull/562 LIEF_ELF_ARCH_RISCV = lief.ELF.ARCH(243) # Debian 9 (Stretch) EOL: 2022. https://wiki.debian.org/DebianReleases#Production_Releases # # - g++ version 6.3.0 (https://packages.debian.org/search?suite=stretch&arch=any&searchon=names&keywords=g%2B%2B) # - libc version 2.24 (https://packages.debian.org/search?suite=stretch&arch=any&searchon=names&keywords=libc6) # # Ubuntu 16.04 (Xenial) EOL: 2026. https://wiki.ubuntu.com/Releases # # - g++ version 5.3.1 # - libc version 2.23 # # CentOS Stream 8 EOL: 2024. https://wiki.centos.org/About/Product # # - g++ version 8.5.0 (http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/Packages/) # - libc version 2.28 (http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/Packages/) # # See https://gcc.gnu.org/onlinedocs/libstdc++/manual/abi.html for more info. MAX_VERSIONS = { 'GCC': (4,8,0), 'GLIBC': { lief.ELF.ARCH.i386: (2,18), lief.ELF.ARCH.x86_64: (2,18), lief.ELF.ARCH.ARM: (2,18), lief.ELF.ARCH.AARCH64:(2,18), lief.ELF.ARCH.PPC64: (2,18), LIEF_ELF_ARCH_RISCV: (2,27), }, 'LIBATOMIC': (1,0), 'V': (0,5,0), # xkb (bitcoin-qt only) } # See here for a description of _IO_stdin_used: # https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=634261#109 # Ignore symbols that are exported as part of every executable IGNORE_EXPORTS = { '_edata', '_end', '__end__', '_init', '__bss_start', '__bss_start__', '_bss_end__', '__bss_end__', '_fini', '_IO_stdin_used', 'stdin', 'stdout', 'stderr', 'environ', '_environ', '__environ', } # Expected linker-loader names can be found here: # https://sourceware.org/glibc/wiki/ABIList?action=recall&rev=16 ELF_INTERPRETER_NAMES: Dict[lief.ELF.ARCH, Dict[lief.ENDIANNESS, str]] = { lief.ELF.ARCH.i386: { lief.ENDIANNESS.LITTLE: "/lib/ld-linux.so.2", }, lief.ELF.ARCH.x86_64: { lief.ENDIANNESS.LITTLE: "/lib64/ld-linux-x86-64.so.2", }, lief.ELF.ARCH.ARM: { lief.ENDIANNESS.LITTLE: "/lib/ld-linux-armhf.so.3", }, lief.ELF.ARCH.AARCH64: { lief.ENDIANNESS.LITTLE: "/lib/ld-linux-aarch64.so.1", }, lief.ELF.ARCH.PPC64: { lief.ENDIANNESS.BIG: "/lib64/ld64.so.1", lief.ENDIANNESS.LITTLE: "/lib64/ld64.so.2", }, LIEF_ELF_ARCH_RISCV: { lief.ENDIANNESS.LITTLE: "/lib/ld-linux-riscv64-lp64d.so.1", }, } # Allowed NEEDED libraries ELF_ALLOWED_LIBRARIES = { # bitcoind and bitcoin-qt 'libgcc_s.so.1', # GCC base support 'libc.so.6', # C library 'libpthread.so.0', # threading 'libm.so.6', # math library 'librt.so.1', # real-time (clock) 'libatomic.so.1', 'ld-linux-x86-64.so.2', # 64-bit dynamic linker 'ld-linux.so.2', # 32-bit dynamic linker 'ld-linux-aarch64.so.1', # 64-bit ARM dynamic linker 'ld-linux-armhf.so.3', # 32-bit ARM dynamic linker 'ld64.so.1', # POWER64 ABIv1 dynamic linker 'ld64.so.2', # POWER64 ABIv2 dynamic linker 'ld-linux-riscv64-lp64d.so.1', # 64-bit RISC-V dynamic linker # bitcoin-qt only 'libxcb.so.1', # part of X11 'libxkbcommon.so.0', # keyboard keymapping 'libxkbcommon-x11.so.0', # keyboard keymapping 'libfontconfig.so.1', # font support 'libfreetype.so.6', # font parsing 'libdl.so.2', # programming interface to dynamic linker 'libxcb-icccm.so.4', 'libxcb-image.so.0', 'libxcb-shm.so.0', 'libxcb-keysyms.so.1', 'libxcb-randr.so.0', 'libxcb-render-util.so.0', 'libxcb-render.so.0', 'libxcb-shape.so.0', 'libxcb-sync.so.1', 'libxcb-xfixes.so.0', 'libxcb-xinerama.so.0', 'libxcb-xkb.so.1', } MACHO_ALLOWED_LIBRARIES = { # bitcoind and bitcoin-qt 'libc++.1.dylib', # C++ Standard Library 'libSystem.B.dylib', # libc, libm, libpthread, libinfo # bitcoin-qt only 'AppKit', # user interface 'ApplicationServices', # common application tasks. 'Carbon', # deprecated c back-compat API 'ColorSync', 'CoreFoundation', # low level func, data types 'CoreGraphics', # 2D rendering 'CoreServices', # operating system services 'CoreText', # interface for laying out text and handling fonts. 'CoreVideo', # video processing 'Foundation', # base layer functionality for apps/frameworks 'ImageIO', # read and write image file formats. 'IOKit', # user-space access to hardware devices and drivers. 'IOSurface', # cross process image/drawing buffers 'libobjc.A.dylib', # Objective-C runtime library 'Metal', # 3D graphics 'Security', # access control and authentication 'QuartzCore', # animation } PE_ALLOWED_LIBRARIES = { 'ADVAPI32.dll', # security & registry 'IPHLPAPI.DLL', # IP helper API 'KERNEL32.dll', # win32 base APIs 'msvcrt.dll', # C standard library for MSVC 'SHELL32.dll', # shell API 'USER32.dll', # user interface 'WS2_32.dll', # sockets # bitcoin-qt only 'dwmapi.dll', # desktop window manager 'GDI32.dll', # graphics device interface 'IMM32.dll', # input method editor 'NETAPI32.dll', 'ole32.dll', # component object model 'OLEAUT32.dll', # OLE Automation API 'SHLWAPI.dll', # light weight shell API 'USERENV.dll', 'UxTheme.dll', 'VERSION.dll', # version checking 'WINMM.dll', # WinMM audio API 'WTSAPI32.dll', } def check_version(max_versions, version, arch) -> bool: (lib, _, ver) = version.rpartition('_') ver = tuple([int(x) for x in ver.split('.')]) if not lib in max_versions: return False if isinstance(max_versions[lib], tuple): return ver <= max_versions[lib] else: return ver <= max_versions[lib][arch] def check_imported_symbols(binary) -> bool: ok: bool = True for symbol in binary.imported_symbols: if not symbol.imported: continue version = symbol.symbol_version if symbol.has_version else None if version: aux_version = version.symbol_version_auxiliary.name if version.has_auxiliary_version else None if aux_version and not check_version(MAX_VERSIONS, aux_version, binary.header.machine_type): print(f'{filename}: symbol {symbol.name} from unsupported version {version}') ok = False return ok def check_exported_symbols(binary) -> bool: ok: bool = True for symbol in binary.dynamic_symbols: if not symbol.exported: continue name = symbol.name if binary.header.machine_type == LIEF_ELF_ARCH_RISCV or name in IGNORE_EXPORTS: continue print(f'{binary.name}: export of symbol {name} not allowed!') ok = False return ok def check_ELF_libraries(binary) -> bool: ok: bool = True for library in binary.libraries: if library not in ELF_ALLOWED_LIBRARIES: print(f'{filename}: {library} is not in ALLOWED_LIBRARIES!') ok = False return ok def check_MACHO_libraries(binary) -> bool: ok: bool = True for dylib in binary.libraries: split = dylib.name.split('/') if split[-1] not in MACHO_ALLOWED_LIBRARIES: print(f'{split[-1]} is not in ALLOWED_LIBRARIES!') ok = False return ok def check_MACHO_min_os(binary) -> bool: if binary.build_version.minos == [10,15,0]: return True return False def check_MACHO_sdk(binary) -> bool: if binary.build_version.sdk == [10, 15, 6]: return True return False def check_PE_libraries(binary) -> bool: ok: bool = True for dylib in binary.libraries: if dylib not in PE_ALLOWED_LIBRARIES: print(f'{dylib} is not in ALLOWED_LIBRARIES!') ok = False return ok def check_PE_subsystem_version(binary) -> bool: major: int = binary.optional_header.major_subsystem_version minor: int = binary.optional_header.minor_subsystem_version if major == 6 and minor == 1: return True return False def check_ELF_interpreter(binary) -> bool: expected_interpreter = ELF_INTERPRETER_NAMES[binary.header.machine_type][binary.abstract.header.endianness] return binary.concrete.interpreter == expected_interpreter CHECKS = { lief.EXE_FORMATS.ELF: [ ('IMPORTED_SYMBOLS', check_imported_symbols), ('EXPORTED_SYMBOLS', check_exported_symbols), ('LIBRARY_DEPENDENCIES', check_ELF_libraries), ('INTERPRETER_NAME', check_ELF_interpreter), ], lief.EXE_FORMATS.MACHO: [ ('DYNAMIC_LIBRARIES', check_MACHO_libraries), ('MIN_OS', check_MACHO_min_os), ('SDK', check_MACHO_sdk), ], lief.EXE_FORMATS.PE: [ ('DYNAMIC_LIBRARIES', check_PE_libraries), ('SUBSYSTEM_VERSION', check_PE_subsystem_version), ] } if __name__ == '__main__': retval: int = 0 for filename in sys.argv[1:]: try: binary = lief.parse(filename) etype = binary.format if etype == lief.EXE_FORMATS.UNKNOWN: print(f'{filename}: unknown executable format') retval = 1 continue failed: List[str] = [] for (name, func) in CHECKS[etype]: if not func(binary): failed.append(name) if failed: print(f'{filename}: failed {" ".join(failed)}') retval = 1 except IOError: print(f'{filename}: cannot open') retval = 1 sys.exit(retval)
jamesob/bitcoin
contrib/devtools/symbol-check.py
Python
mit
9,709
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import print_function, absolute_import import sys, os, logging, socket, time, struct, select import ctypes, errno, signal, threading, multiprocessing import cPickle as pickle import cpylmnl.linux.netlinkh as netlink import cpylmnl.linux.netfilter.nfnetlinkh as nfnl import cpylmnl.linux.netfilter.nfnetlink_conntrackh as nfnlct import cpylmnl.linux.netfilter.nfnetlink_compath as nfnlcm import cpylmnl as mnl import cpylmnfct as nfct log = logging.getLogger(__name__) CARBON_SERVER = '192.168.1.1' CARBON_PORT = 2004 CARBON_PREFIX = "myrouter" nstats = dict() # {Tuple: Counter} nstats_lock = threading.Lock() # write: set True in data_cb if NLM_F_MULTI set in nlmsg_flgas and # set False if cb_run returns MNL_CB_STOP. e.g. nlmsg_type is NLMSG_DONE # read: in periodic task, not send dump request if True # Or not using dump, catch EBUSY on send_nlmsg() dumping = False """ This program send data to carbon sever specified by CARBON_ prefix above. The carbon path in the data is: <CARBON_PREFIX>.<src addr>.<dst addr>.<L4 proto>.<port> Supported L4 protos are only TCP, UDP and ICMP. <port> will be type in case of ICMP. And value is converted from bytes to bit per second. iptables setup is done by: iptables -t nat -I PREROUTING -m connbytes \ --connbytes 0 --connbytes-dir both --connbytes-mode bytes * Working with another network equipment's monitor port # adversaria for me ASCII art: +---------------------+ +------------------+ <--- uplink ---+ <Network switch> | | <This box> | | | | | | communication port +---+ eth0 | | | | | | uplink monitor port +---+ eth1 | +---------------------+ +------------------+ eth0 is normal connection to login by ssh or stuff like that and has IP address. eth1 is connected to the monitorport which mirrors both tx/rx of network switch's uplink port. I need to collect conntrack data from eth1 and send carbon data through eth0. Before setting iptables I need to: 1. set route from eth1 to network /dev/null * enable ip forwarding # echo 1 > /proc/sys/net/ipv4/ip_forwarding * prepare dummy0 # ip link add null0 type dummy # ip link set null0 up * create route to null0 in a new table # ip route add default dev null0 table 200 * create routing rule for packets from eth1 # ip rule add iif eth1 table 200 2. setup bridge * create bridg and add eth1 to it # brctl addbr br0 # ip link set br0 up # brctl addif br0 eth1 * tossing up L2 frame to L3 # ebtables -t broute -I BROUTING -i eth1 \ # -j redirect --redirect-target DROP 3. set eth1 promisc mode # ip link set eth1 promisc on At this time, I can see packets from eth1 on null0 without promisc mode. e.g. tcpdump -npi null0. And ``can not see on eth0.'' For so you know through, packets generaged in the box and sending through the network switch uplink will doubly count. Use connmark target to avoid it, iptables -t mangle -I PREROUTING -i eth1 -j CONNMARK --set-mark 1 and uncomment CTA_MARK lines below appropriately. """ class Tuple(object): """simple flow representation""" def __init__(self, l3, server, client, l4, port=0): self.l3proto = l3 self.server = server # big endian byte[4 or 16] self.client = client # big endian byte[4 or 16] self.l4proto = l4 self.port = port # dst port or icmp type def __hash__(self): return self.l3proto + hash(str(self.server)) + hash(str(self.client)) \ + self.l4proto + self.port def __eq__(self, other): return self.l3proto == other.l3proto \ and self.server == other.server \ and self.client == other.client \ and self.l4proto == other.l4proto \ and self.port == other.port def __str__(self): # carbon path if self.l4proto == socket.IPPROTO_ICMP: l4 = "ICMP.%d" % self.port elif self.l4proto == socket.IPPROTO_TCP: l4 = "TCP.%d" % socket.ntohs(self.port) elif self.l4proto == socket.IPPROTO_UDP: l4 = "UDP.%d" % socket.ntohs(self.port) else: l4 = "unknown.%d" % self.l4proto if self.l3proto == socket.AF_INET: # address is not dotted quad, colon quad path = ".".join([CARBON_PREFIX, ":".join(["%d" % i for i in self.server]), "%s" % l4, ":".join(["%d" % i for i in self.client])]) elif self.l3proto == socket.AF_INET6: path = ".".join([CARBON_PREFIX, ":".join(["%x%x" % (self.server[i], self.server[i + 1]) for i in range(0, len(self.server), 2)]), "%s" % l4, ":".join(["%x%x" % (self.client[i], self.client[i + 1]) for i in range(0, len(self.client), 2)])]) return path class Counter(object): def __init__(self, pkts, b): self.pkts = pkts self.bytes = b self.deleting = False def make_tuple(ct): """create tuple from nf_conntrack """ try: l3proto = ct.get_attr_u8(nfct.ATTR_L3PROTO) except Exception as e: log.error("could not get L3PROTO: %s" % e) return None try: l4proto = ct.get_attr_u8(nfct.ATTR_L4PROTO) except Exception as e: log.warn("could not get L4PROTO: %s, L3PROTO: %d" % (e, l3proto)) return None if l3proto == socket.AF_INET: server = bytearray(struct.pack("I", ct.get_attr_u32(nfct.ATTR_IPV4_DST))) client = bytearray(struct.pack("I", ct.get_attr_u32(nfct.ATTR_IPV4_SRC))) elif l3proto == socket.AF_INET6: # I don't know why bytearray cast is needed server = bytearray(ct.get_attr_as(nfct.ATTR_IPV6_DST, (ctypes.c_ubyte * 16))) client = bytearray(ct.get_attr_as(nfct.ATTR_IPV6_SRC, (ctypes.c_ubyte * 16))) else: log.warn("unknow L3 proto: %d" % l3proto) return None if l4proto == socket.IPPROTO_ICMP: port = ct.get_attr_u8(nfct.ATTR_ICMP_TYPE) elif l4proto in (socket.IPPROTO_TCP, socket.IPPROTO_UDP): port = ct.get_attr_u16(nfct.ATTR_PORT_DST) else: port = 0 return Tuple(l3proto, server, client, l4proto, port) def mark_cmp(ct, value, mask): return ct.attr_is_set(nfct.ATTR_MARK) \ and ct.get_attr_u32(nfct.ATTR_MARK) & mask == value @mnl.nlmsg_cb def data_cb(nlh, data): """mnl callback which update tuple's counter """ global nstats global dumping if nlh.nlmsg_flags & netlink.NLM_F_MULTI == netlink.NLM_F_MULTI: dumping = True with nfct.Conntrack() as ct: try: ct.nlmsg_parse(nlh) except Exception as e: log.error("nlmsg_parse: %s" % e) return mnl.MNL_CB_OK # CTA_MARK: # if you want to filter by mark - only want event entries whose mark is one # if not mark_cmp(ct, 1, 0xffffffff): # return mnl.MNL_CB_OK t = make_tuple(ct) if t is None: return mnl.MNL_CB_OK counter = nstats.setdefault(t, Counter(0, 0)) if nlh.nlmsg_type & 0xff == nfnlct.IPCTNL_MSG_CT_DELETE: counter.deleting = True try: orig_packets = ct.get_attr_u64(nfct.ATTR_ORIG_COUNTER_PACKETS) except Exception as e: log.error("could not get ORIG_COUNTER_PACKETS: %s" % e) return mnl.MNL_CB_OK try: repl_packets = ct.get_attr_u64(nfct.ATTR_REPL_COUNTER_PACKETS) except Exception as e: log.error("could not get REPL_COUNTER_PACKETS: %s" % e) return mnl.MNL_CB_OK if orig_packets + repl_packets == 0: return mnl.MNL_CB_OK try: orig_bytes = ct.get_attr_u64(nfct.ATTR_ORIG_COUNTER_BYTES) except Exception as e: log.error("could not get ORIG_COUNTER_BYTES: %s" % e) return mnl.MNL_CB_OK try: repl_bytes = ct.get_attr_u64(nfct.ATTR_REPL_COUNTER_BYTES) except Exception as e: log.error("could not get REPL_COUNTER_BYTES: %s" % e) return mnl.MNL_CB_OK counter.pkts += orig_packets + repl_packets counter.bytes += orig_bytes + repl_bytes return mnl.MNL_CB_OK def start_periodic_task(secs, nl, q): """Executing periodic actions in Python http://stackoverflow.com/questions/8600161/executing-periodic-actions-in-python Unfortunately we could not acquire remainded time from Python's select """ nlh = mnl.Nlmsg.put_new_header(mnl.MNL_SOCKET_BUFFER_SIZE) # Counters are atomically zerod in each dump nlh.nlmsg_type = (nfnl.NFNL_SUBSYS_CTNETLINK << 8) | nfnlct.IPCTNL_MSG_CT_GET_CTRZERO nlh.nlmsg_flags = netlink.NLM_F_REQUEST|netlink.NLM_F_DUMP nfh = nlh.put_extra_header_as(nfnl.Nfgenmsg) nfh.nfgen_family = socket.AF_INET nfh.version = nfnl.NFNETLINK_V0 nfh.res_id = 0 # if you want to filter by mark - only want to dump entries whose mark is one # nlh.put_u32(nfnlct.CTA_MARK, socket.htonl(1)) # nlh.put_u32(nfnlct.CTA_MARK_MASK, socket.htonl(0xffffffff)) args = (secs, nl, nlh, q) next_call = [time.time()] def _doit(secs, nl, nlh, q): global nstats global nstats_lock global dumping if dumping: log.warn("dump takes more than interval secs, increasing it is recommended") else: nl.send_nlmsg(nlh) # XXX: no exception check with nstats_lock: now = int(next_call[0]) listOfMetricTuples = [] deleting_keys = [] for k, v in nstats.iteritems(): if v.deleting: deleting_keys.append(k) if v.pkts == 0: continue listOfMetricTuples.append((str(k), (now, v.bytes * 8 / secs))) # bit per sec v.pkts = 0 v.bytes = 0 for k in deleting_keys: del nstats[k] log.info("deletig #: %d" % len(deleting_keys)) log.info("current nstats #: %d" % len(nstats)) q.put(listOfMetricTuples) next_call[0] += secs # it seems negative first param is allowed t = threading.Timer(next_call[0] - time.time(), _doit, args) t.daemon = True t.start() _doit(*args) def send_process(sk, q): """sending list of metrics to carbon """ while True: listOfMetricTuples = q.get() if listOfMetricTuples is None: return if len(listOfMetricTuples) == 0: continue payload = pickle.dumps(listOfMetricTuples) header = struct.pack("!L", len(payload)) message = header + payload sk.sendall(message) log.info("sent entries #: %d, size: %d" % (len(listOfMetricTuples), len(message))) def mnl_socket_poll(nl): """polling for mmaped nl socket """ fd = nl.get_fd() p = select.poll() while True: p.register(fd, select.POLLIN | select.POLLERR) try: events = p.poll(-1) except select.error as e: if e[0] == errno.EINTR: # by SIGALRM continue raise for efd, event in events: if efd == fd: if event == select.POLLIN: return if event == select.POLLERR: s = socket.fromfd(nl.get_fd(), socket.AF_NETLINK, socket.SOCK_RAW) sock_err = s.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR) if sock_err != 0: raise OSError(sock_err, errno.errorcode[sock_err]) def main(): global nstats global nstats_lock global dumping if len(sys.argv) != 2: print("Usage: %s <poll-secs>" % sys.argv[0]) sys.exit(1) secs = int(sys.argv[1]) print("Polling every %s seconds from kernel..." % secs) # Set high priority for this process, less chances to overrun # the netlink receiver buffer since the scheduler gives this process # more chances to run os.nice(-20) # setup netlink socket - see examples/netfilter/nfct-daemon in libmnl nl = mnl.Socket(netlink.NETLINK_NETFILTER) nl.set_ringopt(mnl.MNL_RING_RX, mnl.MNL_SOCKET_BUFFER_SIZE, 64, mnl.MNL_SOCKET_BUFFER_SIZE / 4, 4 * 64) nl.map_ring() rxring = nl.get_ring(mnl.MNL_RING_RX) nl.bind(nfnlcm.NF_NETLINK_CONNTRACK_DESTROY, mnl.MNL_SOCKET_AUTOPID) sock = socket.fromfd(nl.get_fd(), socket.AF_NETLINK, socket.SOCK_RAW) sock.setsockopt(socket.SOL_SOCKET, 33, 1 << 22) # SO_RCVBUFFORCE on = struct.pack("i", 1) nl.setsockopt(netlink.NETLINK_BROADCAST_ERROR, on) nl.setsockopt(netlink.NETLINK_NO_ENOBUFS, on) # start sending process q = multiprocessing.Queue(8) # from periodic task to carbon sender carbon_socket = socket.socket() carbon_socket.connect((CARBON_SERVER, CARBON_PORT)) p = multiprocessing.Process(target=send_process, args=(carbon_socket, q)) p.start() # start sending process q = multiprocessing.Queue(8) # from periodic task to carbon sender carbon_socket = socket.socket() carbon_socket.connect((CARBON_SERVER, CARBON_PORT)) p = multiprocessing.Process(target=send_process, args=(carbon_socket, q)) p.start() # start periodic task start_periodic_task(secs, nl, q) # receive and aggregate loop cpbuf = bytearray(mnl.MNL_SOCKET_BUFFER_SIZE) while True: frame = rxring.get_frame() if frame.status == netlink.NL_MMAP_STATUS_VALID: buf = mnl.MNL_FRAME_PAYLOAD(frame) elif frame.status == netlink.NL_MMAP_STATUS_COPY: rsize = nl.recv_into(cpbuf) # XXX: no exception check buf = cpbuf[:rsize] else: mnl_socket_poll(nl) # XXX: no exception check continue with nstats_lock: ret = mnl.cb_run(buf, 0, 0, data_cb, None) if ret == mnl.MNL_CB_STOP: dumping = False elif ret < 0: q.put(None) # let sending process finish sys.exit(-1) frame.status = netlink.NL_MMAP_STATUS_UNUSED rxring.advance() if __name__ == '__main__': logging.basicConfig(level=logging.INFO, # format='%(asctime)s %(levelname)s %(module)s.%(funcName)s line: %(lineno)d %(message)s') format='%(asctime)s %(levelname)s: %(message)s') main()
chamaken/cpylmnfct
examples/nfct-mnl-graphite-mmap.py
Python
gpl-2.0
14,893
# -*- coding: utf-8 -*- from collections import namedtuple SQLStatement = namedtuple('SQLStatement', 'sql_template, default_joins, default_where') overview_running_runs_count = ''' SELECT IF(stop_date is null, 'running', 'finished') AS stop_status, COUNT(*) AS total_count FROM test_runs INNER JOIN test_plans ON (test_runs.plan_id = test_plans.plan_id) WHERE test_plans.product_id = %s GROUP BY stop_status WITH ROLLUP''' overview_case_run_status_count = ''' SELECT tcrss.name, COUNT(*) AS total_count FROM test_case_run_status AS tcrss INNER JOIN test_case_runs AS tcrs ON (tcrss.case_run_status_id = tcrs.case_run_status_id) INNER JOIN test_runs AS trs ON (tcrs.run_id = trs.run_id) INNER JOIN test_plans AS tps ON (trs.plan_id = tps.plan_id) WHERE tps.product_id = %s GROUP BY tcrss.name WITH ROLLUP''' build_builds_total_runs_count = ''' SELECT test_runs.build_id, COUNT(*) AS total_count FROM test_runs INNER JOIN test_builds ON (test_runs.build_id = test_builds.build_id) WHERE test_builds.product_id = %s GROUP BY test_runs.build_id''' build_builds_finished_runs_count = ''' SELECT test_runs.build_id, COUNT(*) AS total_count FROM test_runs INNER JOIN test_builds ON (test_runs.build_id = test_builds.build_id) WHERE test_builds.product_id = %s AND test_runs.stop_date IS NOT NULL GROUP BY test_runs.build_id''' build_finished_caseruns_count = ''' SELECT tbs.build_id, COUNT(*) AS total_count FROM test_builds AS tbs INNER JOIN test_runs AS trs ON (trs.build_id = tbs.build_id) INNER JOIN test_case_runs AS tcrs ON (tcrs.run_id = trs.run_id) WHERE tbs.product_id = %s AND tcrs.case_run_status_id NOT IN (1, 4, 5, 6) GROUP BY tbs.build_id''' build_failed_caseruns_count = ''' SELECT tbs.build_id, COUNT(*) AS total_count FROM test_builds AS tbs INNER JOIN test_runs AS trs ON (trs.build_id = tbs.build_id) INNER JOIN test_case_runs AS tcrs ON (tcrs.run_id = trs.run_id) INNER JOIN test_case_run_status AS tcrss ON (tcrs.case_run_status_id = tcrss.case_run_status_id) WHERE tbs.product_id = %s AND tcrss.name = 'FAILED' GROUP BY tbs.build_id''' build_caseruns_count = ''' SELECT tbs.build_id, COUNT(*) AS total_count FROM test_builds AS tbs INNER JOIN test_runs AS trs ON (trs.build_id = tbs.build_id) INNER JOIN test_case_runs AS tcrs ON (tcrs.run_id = trs.run_id) WHERE tbs.product_id = %s GROUP BY tbs.build_id''' build_caserun_status_subtotal = ''' SELECT tcrss.name, COUNT(*) AS total_count FROM test_case_run_status AS tcrss INNER JOIN test_case_runs AS tcrs ON (tcrss.case_run_status_id = tcrs.case_run_status_id) INNER JOIN test_runs AS trs ON (trs.run_id = tcrs.run_id) INNER JOIN test_builds AS tbs ON (trs.build_id = tbs.build_id) WHERE tbs.product_id = %s AND tbs.build_id = %s GROUP BY tcrss.name WITH ROLLUP''' component_total_cases = ''' SELECT tccs.component_id, COUNT(*) AS total_count FROM test_case_components AS tccs INNER JOIN components ON (tccs.component_id = components.id) WHERE components.product_id = %s GROUP BY tccs.component_id''' component_failed_case_runs_count = ''' SELECT tccs.component_id, COUNT(*) AS total_count FROM test_case_components AS tccs INNER JOIN components ON (tccs.component_id = components.id) INNER JOIN test_cases AS tcs ON (tccs.case_id = tcs.case_id) INNER JOIN test_case_runs AS tcrs ON (tcs.case_id = tcrs.case_id) WHERE tcrs.case_run_status_id = 3 AND components.product_id = %s GROUP BY tccs.component_id''' component_finished_case_runs_count = ''' SELECT tccs.component_id, COUNT(*) AS total_count FROM test_case_components AS tccs INNER JOIN components ON (tccs.component_id = components.id) INNER JOIN test_cases AS tcs ON (tccs.case_id = tcs.case_id) INNER JOIN test_case_runs AS tcrs ON (tcs.case_id = tcrs.case_id) WHERE tcrs.case_run_status_id NOT IN (1, 4, 5, 6) and components.product_id = %s GROUP BY tccs.component_id''' component_total_case_runs_count = ''' SELECT tccs.component_id, COUNT(*) AS total_count FROM test_case_components AS tccs INNER JOIN components ON (tccs.component_id = components.id) INNER JOIN test_cases AS tcs ON (tccs.case_id = tcs.case_id) INNER JOIN test_case_runs AS tcrs ON (tcs.case_id = tcrs.case_id) WHERE components.product_id = %s GROUP BY tccs.component_id''' component_case_runs_count = ''' SELECT tcrss.name, COUNT(*) AS total_count FROM test_case_run_status AS tcrss INNER JOIN test_case_runs AS tcrs ON (tcrss.case_run_status_id = tcrs.case_run_status_id) INNER JOIN test_cases AS tcs ON (tcrs.case_id = tcs.case_id) INNER JOIN test_case_components AS tccs ON (tcs.case_id = tccs.case_id) WHERE tccs.component_id = %s GROUP BY tcrss.name WITH ROLLUP''' version_plans_subtotal = ''' SELECT test_plans.product_version_id, count(*) as total_count FROM test_plans WHERE test_plans.product_id = %s GROUP BY test_plans.product_version_id''' version_running_runs_subtotal = ''' SELECT test_plans.product_version_id, COUNT(*) as total_count FROM test_plans INNER JOIN test_runs ON (test_plans.plan_id = test_runs.plan_id) WHERE test_plans.product_id = %s AND test_runs.stop_date IS NULL GROUP BY test_plans.product_version_id''' version_finished_runs_subtotal = ''' SELECT test_plans.product_version_id, COUNT(*) as total_count FROM test_plans INNER JOIN test_runs ON (test_plans.plan_id = test_runs.plan_id) WHERE test_plans.product_id = %s AND test_runs.stop_date IS NOT NULL GROUP BY test_plans.product_version_id''' version_cases_subtotal = ''' SELECT test_plans.product_version_id, COUNT(*) as total_count FROM test_plans INNER JOIN test_case_plans ON (test_plans.plan_id = test_case_plans.plan_id) WHERE test_plans.product_id = %s GROUP BY test_plans.product_version_id''' version_case_runs_subtotal = ''' SELECT test_plans.product_version_id, COUNT(*) AS total_count FROM test_plans INNER JOIN test_runs ON (test_plans.plan_id = test_runs.plan_id) INNER JOIN test_case_runs ON (test_runs.run_id = test_case_runs.run_id) WHERE test_plans.product_id = %s GROUP BY test_plans.product_version_id''' version_finished_case_runs_subtotal = ''' SELECT test_plans.product_version_id, COUNT(*) AS total_count FROM test_plans INNER JOIN test_runs ON (test_plans.plan_id = test_runs.plan_id) INNER JOIN test_case_runs ON (test_runs.run_id = test_case_runs.run_id) WHERE test_plans.product_id = %s AND test_case_runs.case_run_status_id NOT IN (1, 4, 5, 6) GROUP BY test_plans.product_version_id''' version_failed_case_runs_subtotal = ''' SELECT test_plans.product_version_id, COUNT(*) AS total_count FROM test_plans INNER JOIN test_runs ON (test_plans.plan_id = test_runs.plan_id) INNER JOIN test_case_runs ON (test_runs.run_id = test_case_runs.run_id) WHERE test_plans.product_id = %s AND test_case_runs.case_run_status_id = 3 GROUP BY test_plans.product_version_id''' version_case_run_status_subtotal = ''' SELECT tcrss.name, COUNT(*) AS total_count FROM test_case_runs AS tcrs INNER JOIN test_case_run_status AS tcrss ON (tcrss.case_run_status_id = tcrs.case_run_status_id) INNER JOIN test_runs AS trs ON (tcrs.run_id = trs.run_id) INNER JOIN test_plans AS tps ON (trs.plan_id = tps.plan_id) WHERE tps.product_id = %s AND tps.product_version_id = %s GROUP BY tcrss.name WITH ROLLUP''' ### SQL for custom report ### custom_builds = SQLStatement( sql_template=''' SELECT DISTINCT test_builds.build_id, test_builds.name FROM test_builds %(joins)s WHERE %(where)s''', default_joins=(), default_where=()) custom_builds_runs_subtotal = SQLStatement( sql_template=''' SELECT test_builds.build_id, COUNT(DISTINCT test_runs.run_id) AS total_count FROM test_builds %(joins)s WHERE %(where)s GROUP BY test_builds.build_id WITH ROLLUP''', default_joins=( 'INNER JOIN test_runs ON (test_builds.build_id = test_runs.build_id)', ), default_where=()) custom_builds_plans_subtotal = SQLStatement( sql_template=''' SELECT test_builds.build_id, COUNT(DISTINCT test_runs.plan_id) AS total_count FROM test_builds %(joins)s WHERE %(where)s GROUP BY test_builds.build_id WITH ROLLUP''', default_joins=( 'INNER JOIN test_runs ON (test_builds.build_id = test_runs.build_id)', ), default_where=()) custom_builds_cases_isautomated_subtotal = SQLStatement( sql_template=''' SELECT test_cases.isautomated, COUNT(DISTINCT test_cases.case_id) AS total_count FROM test_builds %(joins)s WHERE %(where)s GROUP BY test_cases.isautomated WITH ROLLUP''', default_joins=( 'INNER JOIN test_runs ON (test_builds.build_id = test_runs.build_id)', 'INNER JOIN test_case_runs ON (test_runs.run_id = test_case_runs.run_id)', 'INNER JOIN test_cases ON (test_case_runs.case_id = test_cases.case_id)' ), default_where=()) # Percentage of passed and failed case runs custom_builds_case_runs_subtotal_by_status = SQLStatement( sql_template=''' SELECT test_builds.build_id, test_case_runs.case_run_status_id, COUNT(DISTINCT test_case_runs.case_run_id) AS total_count FROM test_builds %(joins)s WHERE %(where)s GROUP BY test_builds.build_id, test_case_runs.case_run_status_id''', default_joins=( 'INNER JOIN test_runs ON (test_builds.build_id = test_runs.build_id)', 'INNER JOIN test_case_runs ON (test_runs.run_id = test_case_runs.run_id)', ), default_where=( 'test_case_runs.case_run_status_id IN (2, 3)',)) custom_builds_case_runs_subtotal = SQLStatement( sql_template=''' SELECT test_builds.build_id, COUNT(DISTINCT test_case_runs.case_run_id) AS total_count FROM test_builds %(joins)s WHERE %(where)s GROUP BY test_builds.build_id''', default_joins=( 'INNER JOIN test_runs ON (test_builds.build_id = test_runs.build_id)', 'INNER JOIN test_case_runs ON (test_runs.run_id = test_case_runs.run_id)', ), default_where=()) custom_details_status_matrix = ''' SELECT test_plans.plan_id, test_plans.name, test_runs.run_id, test_runs.summary, test_case_run_status.name, count(*) as total_count FROM test_plans INNER JOIN test_runs on (test_plans.plan_id = test_runs.plan_id) INNER JOIN test_case_runs on (test_case_runs.run_id = test_runs.run_id) INNER JOIN test_case_run_status on (test_case_runs.case_run_status_id = test_case_run_status.case_run_status_id) WHERE test_runs.build_id IN %s GROUP BY test_plans.plan_id, test_runs.run_id, test_case_runs.case_run_status_id ORDER BY test_plans.plan_id, test_runs.run_id''' custom_details_case_runs_comments = ''' SELECT test_case_runs.case_run_id, django_comments.comment, django_comments.submit_date, auth_user.username FROM django_comments INNER JOIN auth_user ON (django_comments.user_id = auth_user.id) INNER JOIN test_case_runs ON (django_comments.object_pk = test_case_runs.case_run_id) INNER JOIN test_runs ON (test_runs.run_id = test_case_runs.run_id) WHERE django_comments.content_type_id = %s AND django_comments.site_id = 1 AND django_comments.is_removed = 0 AND test_runs.build_id IN %s AND test_case_runs.case_run_status_id IN %s''' #### Testing report ####### testing_report_plans_total = ''' select count(distinct test_plans.plan_id) as total_count from test_runs inner join test_plans on (test_runs.plan_id = test_plans.plan_id) inner join test_builds on (test_runs.build_id = test_builds.build_id) where {0}''' testing_report_runs_total = ''' SELECT COUNT(*) AS total_count FROM test_builds INNER JOIN test_runs ON (test_builds.build_id = test_runs.build_id) WHERE {0}''' testing_report_case_runs_total = ''' SELECT COUNT(*) AS total_count FROM test_builds INNER JOIN test_runs ON (test_runs.build_id = test_builds.build_id) INNER JOIN test_case_runs ON (test_case_runs.run_id = test_runs.run_id) WHERE {0} ''' testing_report_runs_subtotal = ''' select test_runs.build_id, count(*) as total_count from test_runs inner join test_builds on (test_runs.build_id = test_builds.build_id) where {0} group by test_runs.build_id with rollup''' # SQLs for report "By Case-Run Tester" ### Report data group by builds ### by_case_run_tester_status_matrix_groupby_build = ''' select test_builds.build_id, test_case_runs.tested_by_id, test_case_run_status.name, count(*) as total_count from test_builds inner join test_runs on (test_builds.build_id = test_runs.build_id) inner join test_case_runs on (test_runs.run_id = test_case_runs.run_id) inner join test_case_run_status on (test_case_run_status.case_run_status_id = test_case_runs.case_run_status_id) where {0} group by test_builds.build_id, test_case_runs.tested_by_id, test_case_run_status.name''' by_case_run_tester_runs_subtotal_groupby_build = ''' select build_id, tested_by_id, count(*) as total_count from ( select test_builds.build_id, test_case_runs.tested_by_id, test_case_runs.run_id from test_builds inner join test_runs on (test_builds.build_id = test_runs.build_id) inner join test_case_runs on (test_runs.run_id = test_case_runs.run_id) where {0} group by test_builds.build_id, test_case_runs.tested_by_id, test_case_runs.run_id ) as t1 group by build_id, tested_by_id''' ### Report data WITHOUT selecting builds ### by_case_run_tester_status_matrix = ''' select test_case_runs.tested_by_id, test_case_run_status.name, count(*) as total_count from test_builds inner join test_runs on (test_builds.build_id = test_runs.build_id) inner join test_case_runs on (test_runs.run_id = test_case_runs.run_id) inner join test_case_run_status on (test_case_run_status.case_run_status_id = test_case_runs.case_run_status_id) where {0} group by test_case_runs.tested_by_id, test_case_run_status.name''' by_case_run_tester_runs_subtotal = ''' select tested_by_id, count(*) as total_count from ( select test_case_runs.tested_by_id, test_case_runs.run_id from test_builds inner join test_runs on (test_builds.build_id = test_runs.build_id) inner join test_case_runs on (test_runs.run_id = test_case_runs.run_id) where {0} group by test_case_runs.tested_by_id, test_case_runs.run_id ) as t1 group by tested_by_id''' ### Report data By Case Priority ### by_case_priority_subtotal = ''' select test_builds.build_id, priority.id as priority_id, priority.value as priority_value, test_case_run_status.name, count(*) as total_count from test_builds inner join test_runs on (test_builds.build_id = test_runs.build_id) inner join test_case_runs on (test_runs.run_id = test_case_runs.run_id) inner join test_cases on (test_case_runs.case_id = test_cases.case_id) inner join test_case_run_status on ( test_case_runs.case_run_status_id = test_case_run_status.case_run_status_id) inner join priority on (test_cases.priority_id = priority.id) where {0} group by test_builds.build_id, priority.id, test_case_run_status.name''' ### Report data By Plan Tags ### by_plan_tags_plans_subtotal = ''' select test_plan_tags.tag_id, count(distinct test_plans.plan_id) as total_count from test_builds inner join test_runs on (test_builds.build_id = test_runs.build_id) inner join test_plans on (test_runs.plan_id = test_plans.plan_id) left join test_plan_tags on (test_plans.plan_id = test_plan_tags.plan_id) where {0} group by test_plan_tags.tag_id''' by_plan_tags_runs_subtotal = ''' select test_plan_tags.tag_id, count(distinct test_runs.run_id) as total_count from test_builds inner join test_runs on (test_builds.build_id = test_runs.build_id) inner join test_plans on (test_runs.plan_id = test_plans.plan_id) left join test_plan_tags on (test_plans.plan_id = test_plan_tags.plan_id) where {0} group by test_plan_tags.tag_id''' by_plan_tags_passed_failed_case_runs_subtotal = ''' select test_plan_tags.tag_id, test_case_run_status.name, count(distinct test_case_runs.case_run_id) as total_count from test_plans inner join test_runs on (test_plans.plan_id = test_runs.plan_id) inner join test_case_runs on (test_runs.run_id = test_case_runs.run_id) inner join test_case_run_status on (test_case_run_status.case_run_status_id = test_case_runs.case_run_status_id) inner join test_builds on (test_builds.build_id = test_runs.build_id) left join test_plan_tags on (test_plans.plan_id = test_plan_tags.plan_id) where test_case_run_status.name in ('PASSED', 'FAILED') and {0} group by test_plan_tags.tag_id, test_case_run_status.name''' ### Report data of details of By Plan Tags ### by_plan_tags_detail_status_matrix = ''' select test_plan_tags.tag_id, test_builds.build_id, test_builds.name as build_name, test_plans.plan_id, test_plans.name as plan_name, test_runs.run_id, test_runs.summary, test_case_run_status.name as status_name, count(*) as total_count from test_builds inner join test_runs on (test_builds.build_id = test_runs.build_id) inner join test_plans on (test_runs.plan_id = test_plans.plan_id) inner join test_case_runs on (test_runs.run_id = test_case_runs.run_id) inner join test_case_run_status on (test_case_runs.case_run_status_id = test_case_run_status.case_run_status_id) left join test_plan_tags on (test_plans.plan_id = test_plan_tags.plan_id) where {0} group by test_plan_tags.tag_id, test_builds.build_id, test_plans.plan_id, test_runs.run_id, test_case_run_status.name''' ### Report data of By Plan Build ### by_plan_build_builds_subtotal = ''' select test_plans.plan_id, test_plans.name, count(distinct test_builds.build_id) as total_count from test_builds inner join test_runs on (test_runs.build_id = test_builds.build_id) inner join test_plans on (test_runs.plan_id = test_plans.plan_id) where {0} group by test_runs.plan_id''' by_plan_build_runs_subtotal = ''' select test_plans.plan_id, test_plans.name, count(distinct test_runs.run_id) as total_count from test_builds inner join test_runs on (test_runs.build_id = test_builds.build_id) inner join test_plans on (test_runs.plan_id = test_plans.plan_id) where {0} group by test_runs.plan_id''' by_plan_build_status_matrix = ''' select test_plans.plan_id, test_plans.name, test_case_run_status.name, count(distinct test_runs.run_id) as total_count from test_builds inner join test_runs on (test_runs.build_id = test_builds.build_id) inner join test_plans on (test_runs.plan_id = test_plans.plan_id) inner join test_case_runs on (test_case_runs.run_id = test_runs.run_id) inner join test_case_run_status on ( test_case_runs.case_run_status_id = test_case_run_status.case_run_status_id) where test_case_run_status.name in ('PASSED', 'FAILED') AND {0} group by test_runs.plan_id, test_case_run_status.name''' ### Report data of By Plan Build detail ### by_plan_build_detail_status_matrix = ''' select test_runs.plan_id, test_plans.name as plan_name, test_runs.build_id, test_builds.name as build_name, test_runs.run_id, test_runs.summary as run_summary, test_case_run_status.name as status_name, count(*) as total_count from test_builds inner join test_runs on (test_runs.build_id = test_builds.build_id) inner join test_plans on (test_runs.plan_id = test_plans.plan_id) inner join test_case_runs on (test_case_runs.run_id = test_runs.run_id) inner join test_case_run_status on ( test_case_runs.case_run_status_id = test_case_run_status.case_run_status_id) where {0} group by test_runs.plan_id, test_runs.build_id, test_runs.run_id, test_case_run_status.name'''
ShaolongHu/Nitrate
tcms/report/sqls.py
Python
gpl-2.0
19,100
from __future__ import absolute_import, unicode_literals import email import logging from email.utils import formataddr from collections import defaultdict from django.conf import settings from django.contrib.auth.models import User from django.contrib.sites.models import Site from django.core.urlresolvers import reverse from django.db.models import Q from django.template.loader import render_to_string from django.utils import six, timezone from django.utils.datastructures import MultiValueDict from django.utils.six.moves.urllib.parse import urljoin from djblets.mail.message import EmailMessage as DjbletsEmailMessage from djblets.siteconfig.models import SiteConfiguration from djblets.auth.signals import user_registered from reviewboard.accounts.models import ReviewRequestVisit from reviewboard.admin.server import get_server_url from reviewboard.reviews.models import Group, ReviewRequest, Review from reviewboard.reviews.signals import (review_request_published, review_published, reply_published, review_request_closed) from reviewboard.reviews.views import build_diff_comment_fragments # A mapping of signals to EmailHooks. _hooks = defaultdict(set) def _ensure_unicode(text): """Return a unicode object for the given text. Args: text (bytes or unicode): The text to decode. Returns: unicode: The decoded text. """ if isinstance(text, bytes): text = text.decode('utf-8') return text def register_email_hook(signal, handler): """Register an e-mail hook. Args: signal (django.dispatch.Signal): The signal that will trigger the e-mail to be sent. This is one of :py:data:`~reviewboard.reviews.signals.review_request_published`, :py:data:`~reviewboard.reviews.signals.review_request_closed`, :py:data:`~reviewboard.reviews.signals.review_published`, or :py:data:`~reviewboard.reviews.signals.reply_published`. handler (reviewboard.extensions.hooks.EmailHook): The ``EmailHook`` that will be triggered when an e-mail of the chosen type is about to be sent. """ assert signal in (review_request_published, review_request_closed, review_published, reply_published), ( 'Invalid signal %r' % signal) _hooks[signal].add(handler) def unregister_email_hook(signal, handler): """Unregister an e-mail hook. Args: signal (django.dispatch.Signal): The signal that will trigger the e-mail to be sent. This is one of :py:data:`~reviewboard.reviews.signals.review_request_published`, :py:data:`~reviewboard.reviews.signals.review_request_closed`, :py:data:`~reviewboard.reviews.signals.review_published`, or :py:data:`~reviewboard.reviews.signals.reply_published`. handler (reviewboard.extensions.hooks.EmailHook): The ``EmailHook`` that will be triggered when an e-mail of the chosen type is about to be sent. """ assert signal in (review_request_published, review_request_closed, review_published, reply_published), ( 'Invalid signal %r' % signal) _hooks[signal].discard(handler) def review_request_closed_cb(sender, user, review_request, type, **kwargs): """Send e-mail when a review request is closed. Listens to the :py:data:`~reviewboard.reviews.signals.review_request_closed` signal and sends an e-mail if this type of notification is enabled (through the ``mail_send_review_close_mail`` site configuration setting). """ siteconfig = SiteConfiguration.objects.get_current() if siteconfig.get('mail_send_review_close_mail'): mail_review_request(review_request, user, close_type=type) def review_request_published_cb(sender, user, review_request, trivial, changedesc, **kwargs): """Send e-mail when a review request is published. Listens to the :py:data:`~reviewboard.reviews.signals.review_request_published` signal and sends an e-mail if this type of notification is enabled through the ``mail_send_review_mail`` site configuration setting). """ siteconfig = SiteConfiguration.objects.get_current() if siteconfig.get('mail_send_review_mail') and not trivial: mail_review_request(review_request, user, changedesc) def review_published_cb(sender, user, review, to_submitter_only, **kwargs): """Send e-mail when a review is published. Listens to the :py:data:`~reviewboard.reviews.signals.review_published` signal and sends e-mail if this type of notification is enabled through the ``mail_send_review_mail`` site configuration setting). """ siteconfig = SiteConfiguration.objects.get_current() if siteconfig.get('mail_send_review_mail'): mail_review(review, user, to_submitter_only) def reply_published_cb(sender, user, reply, trivial, **kwargs): """Send e-mail when a review reply is published. Listens to the :py:data:`~reviewboard.reviews.signals.reply_published` signal and sends an e-mail if this type of notification is enabled (through ``mail_send_review_mail`` site configuration). """ siteconfig = SiteConfiguration.objects.get_current() if siteconfig.get('mail_send_review_mail') and not trivial: mail_reply(reply, user) def user_registered_cb(user, **kwargs): """Send e-mail when a user is registered. Listens for new user registrations and sends a new user registration e-mail to administrators, if this type of notification is enabled (through ``mail_send_new_user_mail`` site configuration). """ siteconfig = SiteConfiguration.objects.get_current() if siteconfig.get('mail_send_new_user_mail'): mail_new_user(user) def connect_signals(): """Connect e-mail callbacks to signals.""" review_request_published.connect(review_request_published_cb, sender=ReviewRequest) review_published.connect(review_published_cb, sender=Review) reply_published.connect(reply_published_cb, sender=Review) review_request_closed.connect(review_request_closed_cb, sender=ReviewRequest) user_registered.connect(user_registered_cb) def build_email_address(fullname, email): """Build an e-mail address for the name and e-mail address. Args: fullname (unicode): The full name associated with the e-mail address (or ``None``). email (unicode): The e-mail address. Returns: unicode: A properly formatted e-mail address. """ return formataddr((fullname, email)) def get_email_address_for_user(user): """Build an e-mail address for the given user. Args: user (django.contrib.auth.models.User): The user. Returns: unicode: A properly formatted e-mail address for the user. """ return build_email_address(user.get_full_name(), user.email) def get_email_addresses_for_group(group, review_request_id=None): """Build a list of e-mail addresses for the group. Args: group (reviewboard.reviews.models.Group): The review group to build the e-mail addresses for. Returns: list: A list of properly formatted e-mail addresses for all users in the review group. """ addresses = [] if group.mailing_list: if ',' not in group.mailing_list: # The mailing list field has only one e-mail address in it, # so we can just use that and the group's display name. addresses = [build_email_address(group.display_name, group.mailing_list)] else: # The mailing list field has multiple e-mail addresses in it. # We don't know which one should have the group's display name # attached to it, so just return their custom list as-is. addresses = group.mailing_list.split(',') if not (group.mailing_list and group.email_list_only): users_q = Q(is_active=True) local_site = group.local_site if local_site: users_q = users_q & (Q(local_site=local_site) | Q(local_site_admins=local_site)) users = group.users.filter(users_q).select_related('profile') if review_request_id: users = users.extra(select={ 'visibility': """ SELECT accounts_reviewrequestvisit.visibility FROM accounts_reviewrequestvisit WHERE accounts_reviewrequestvisit.review_request_id = %s AND accounts_reviewrequestvisit.user_id = reviews_group_users.user_id """ % review_request_id }) addresses.extend([ get_email_address_for_user(u) for u in users if (u.should_send_email() and (not review_request_id or u.visibility != ReviewRequestVisit.MUTED)) ]) return addresses class EmailMessage(DjbletsEmailMessage): """The Review Board EmailMessage subclass. This class only differs from Djblets' :py:class:`EmailMessage <djblets.email.message.EmailMessage>` by using the site configuration to generate some e-mail settings. """ def __init__(self, subject, text_body, html_body, from_email, sender, to, cc=None, in_reply_to=None, headers=None): siteconfig = SiteConfiguration.objects.get_current() auto_generated = siteconfig.get('mail_enable_autogenerated_header') super(EmailMessage, self).__init__( subject=subject, text_body=text_body, html_body=html_body, from_email=from_email, to=to, cc=cc, sender=sender, in_reply_to=in_reply_to, headers=headers, auto_generated=auto_generated, prevent_auto_responses=True) def build_recipients(user, review_request, extra_recipients=None, limit_recipients_to=None): """Build the recipient sets for an e-mail. By default, the user sending the e-mail, the review request submitter (if they are active), all active reviewers, and all active members of review groups will be recipients of the e-mail. If the ``limit_recipients_to`` parameter is provided, the given ``user`` and the review request submitter (if active) will still be recipients of the e-mail, but all reviewers and members of review groups will not. Instead, the recipients given in ``limit_recipients_to`` will be used. Args: user (django.contrib.auth.models.User): The user sending the e-mail. review_request (reviewboard.reviews.models.ReviewRequest): The review request the e-mail corresponds to. extra_recipients (list): An optional list of extra recipients as :py:class:`Users <django.contrib.auth.models.User>` and :py:class:`Groups <reviewboard.reviews.models.Group>` that will receive the e-mail. limit_recipients_to (list): An optional list of recipients as :py:class:`Users <django.contrib.auth.models.User>` and :py:class:`Groups <reviewboard.reviews.models.Group>` who will receive the e-mail in place of the normal recipients. Returns: tuple: A 2-tuple of the To field and the CC field, as sets of :py:class:`Users <django.contrib.auth.models.User>` and :py:class:`Groups <reviewboard.reviews.models.Group>`. """ recipients = set() to_field = set() local_site = review_request.local_site_id submitter = review_request.submitter target_people = review_request.target_people.filter(is_active=True).extra( select={ 'visibility': """ SELECT accounts_reviewrequestvisit.visibility FROM accounts_reviewrequestvisit WHERE accounts_reviewrequestvisit.review_request_id = reviews_reviewrequest_target_people.reviewrequest_id AND accounts_reviewrequestvisit.user_id = reviews_reviewrequest_target_people.user_id """ }) starred_users = User.objects.filter( is_active=True, profile__starred_review_requests=review_request, profile__should_send_email=True) local_site_q = Q() if local_site: # Filter out users who are on the reviewer list in some form or have # starred the review request but are no longer part of the LocalSite. local_site_q = (Q(local_site=local_site) | Q(local_site_admins=local_site)) target_people = target_people.filter(local_site_q) starred_users = starred_users.filter(local_site_q) if not extra_recipients: extra_recipients = User.objects.none() if user.should_send_email(): recipients.add(user) if submitter.is_active and submitter.should_send_email(): recipients.add(submitter) recipients.update(starred_users) def _filter_recipients(to_filter): """Filter the given recipients. All groups will be added to the resulting recipients. Only users with a matching local site will be added to the resulting recipients. Args: to_filter (list): A list of recipients as :py:class:`Users <django.contrib.auth.models.User>` and :py:class:`Groups <reviewboard.reviews.models.Group>`. """ pks = set() for recipient in to_filter: if isinstance(recipient, User): pks.add(recipient.pk) elif isinstance(recipient, Group): recipients.add(recipient) else: logging.error( 'Unexpected e-mail recipient %r; expected ' 'django.contrib.auth.models.User or ' 'reviewboard.reviews.models.Group.', recipient) if pks: filtered_users = User.objects.filter( Q(is_active=True, pk__in=pks), local_site_q) recipients.update( recipient for recipient in filtered_users.select_related('Profile') if recipient.should_send_email() ) if limit_recipients_to is not None: _filter_recipients(limit_recipients_to) else: _filter_recipients(extra_recipients) target_people = target_people.filter(is_active=True) to_field.update( recipient for recipient in target_people.select_related('Profile') if (recipient.should_send_email() and recipient.visibility != ReviewRequestVisit.MUTED) ) recipients.update(to_field) recipients.update(review_request.target_groups.all()) if not user.should_send_own_updates(): recipients.discard(user) to_field.discard(user) if to_field: cc_field = recipients.symmetric_difference(to_field) else: to_field = recipients cc_field = set() return to_field, cc_field def recipients_to_addresses(recipients, review_request_id=None): """Return the set of e-mail addresses for the recipients. Args: recipients (list): A list of :py:class:`Users <django.contrib.auth.models.User>` and :py:class:`Groups <reviewboard.reviews.models.Group>`. Returns: set: The e-mail addresses for all recipients. """ addresses = set() for recipient in recipients: assert isinstance(recipient, User) or isinstance(recipient, Group) if isinstance(recipient, User): addresses.add(get_email_address_for_user(recipient)) else: addresses.update(get_email_addresses_for_group(recipient, review_request_id)) return addresses def send_review_mail(user, review_request, subject, in_reply_to, to_field, cc_field, text_template_name, html_template_name, context=None, extra_headers=None): """Format and send an e-mail out. Args: user (django.contrib.auth.models.User): The user who is sending the e-mail. review_request (reviewboard.reviews.models.ReviewRequest): The review request that the e-mail is about. subject (unicode): The subject of the e-mail address. in_reply_to (unicode): The e-mail message ID for threading. to_field (list): The recipients to send the e-mail to. This should be a list of :py:class:`Users <django.contrib.auth.models.User>` and :py:class:`Groups <reviewboard.reviews.models.Group>`. cc_field (list): The addresses to be CC'ed on the e-mail. This should be a list of :py:class:`Users <django.contrib.auth.models.User>` and :py:class:`Groups <reviewboard.reviews.models.Group>`. text_template_name (unicode): The name for the text e-mail template. html_template_name (unicode): The name for the HTML e-mail template. context (dict): Optional extra context to provide to the template. extra_headers (dict): Either a dict or :py:class:`~django.utils.datastructures.MultiValueDict` providing additional headers to send with the e-mail. Returns: unicode: The resulting e-mail message ID. """ current_site = Site.objects.get_current() local_site = review_request.local_site from_email = get_email_address_for_user(user) to_field = recipients_to_addresses(to_field, review_request.id) cc_field = recipients_to_addresses(cc_field, review_request.id) - to_field if not user.should_send_own_updates(): to_field.discard(get_email_address_for_user(user)) if not to_field and not cc_field: # Nothing to send. return siteconfig = current_site.config.get() domain_method = siteconfig.get("site_domain_method") if not context: context = {} context['user'] = user context['domain'] = current_site.domain context['domain_method'] = domain_method context['review_request'] = review_request if review_request.local_site: context['local_site_name'] = review_request.local_site.name text_body = render_to_string(text_template_name, context) html_body = render_to_string(html_template_name, context) base_url = get_server_url(local_site=local_site) headers = MultiValueDict({ 'X-ReviewBoard-URL': [base_url], 'X-ReviewRequest-URL': [urljoin(base_url, review_request.get_absolute_url())], 'X-ReviewGroup': [', '.join(group.name for group in review_request.target_groups.all())], }) if extra_headers: if not isinstance(extra_headers, MultiValueDict): extra_headers = MultiValueDict( (key, [value]) for (key, value) in six.iteritems(extra_headers) ) headers.update(extra_headers) if review_request.repository: headers['X-ReviewRequest-Repository'] = review_request.repository.name latest_diffset = review_request.get_latest_diffset() if latest_diffset: modified_files = set() for filediff in latest_diffset.files.all(): if filediff.deleted or filediff.copied or filediff.moved: modified_files.add(filediff.source_file) if filediff.is_new or filediff.copied or filediff.moved: modified_files.add(filediff.dest_file) for filename in modified_files: headers.appendlist('X-ReviewBoard-Diff-For', filename) sender = None if settings.DEFAULT_FROM_EMAIL: sender = build_email_address(user.get_full_name(), settings.DEFAULT_FROM_EMAIL) if sender == from_email: # RFC 2822 states that we should only include Sender if the # two are not equal. sender = None message = EmailMessage(subject.strip(), text_body.encode('utf-8'), html_body.encode('utf-8'), from_email, sender, list(to_field), list(cc_field), in_reply_to, headers) try: message.send() except Exception: logging.exception("Error sending e-mail notification with subject " "'%s' on behalf of '%s' to '%s'", subject.strip(), from_email, ','.join(list(to_field) + list(cc_field))) return message.message_id def mail_review_request(review_request, user, changedesc=None, close_type=None): """Send an e-mail representing the supplied review request. Args: review_request (reviewboard.reviews.models.ReviewRequest): The review request to send an e-mail about. user (django.contrib.auth.models.User): The user who triggered the e-mail (i.e., they published or closed the review request). changedesc (reviewboard.changedescs.models.ChangeDescription): An optional change description showing what has changed in the review request, possibly with explanatory text from the submitter. This is created when saving a draft on a public review request and will be ``None`` when publishing initially. This is used by the template to add contextual (updated) flags to inform people what has changed. close_type (unicode): How the review request was closed or ``None`` if it was published. If this is not ``None`` it must be one of :py:attr:`~reviewboard.reviews.models.ReviewRequest.SUBMITTED` or :py:attr:`~reviewboard.reviews.models.ReviewRequest.DISCARDED`. """ # If the review request is not yet public or has been discarded, don't send # any mail. Relax the "discarded" rule when e-mails are sent on closing # review requests if (not review_request.public or (not close_type and review_request.status == 'D')): return summary = _ensure_unicode(review_request.summary) subject = "Review Request %d: %s" % (review_request.display_id, summary) reply_message_id = None if review_request.email_message_id: # Fancy quoted "replies" subject = "Re: " + subject reply_message_id = review_request.email_message_id extra_recipients = review_request.participants else: extra_recipients = None extra_context = {} if close_type: changedesc = review_request.changedescs.filter(public=True).latest() limit_recipients_to = None if changedesc: extra_context['change_text'] = changedesc.text extra_context['changes'] = changedesc.fields_changed fields_changed = changedesc.fields_changed changed_field_names = set(fields_changed.keys()) if (changed_field_names and changed_field_names.issubset(['target_people', 'target_groups'])): # If the only changes are to the target reviewers, try to send a # much more targeted e-mail (rather than sending it out to # everyone, only send it to new people). limit_recipients_to = set() if 'target_people' in changed_field_names: user_pks = [ item[2] for item in fields_changed['target_people']['added'] ] limit_recipients_to.update(User.objects.filter( pk__in=user_pks)) if 'target_groups' in changed_field_names: group_pks = [ item[2] for item in fields_changed['target_groups']['added'] ] limit_recipients_to.update(Group.objects.filter( pk__in=group_pks)) submitter = review_request.submitter to_field, cc_field = build_recipients(submitter, review_request, extra_recipients, limit_recipients_to) extra_filter_kwargs = {} if close_type: signal = review_request_closed extra_filter_kwargs['close_type'] = close_type else: signal = review_request_published to_field, cc_field = filter_email_recipients_from_hooks( to_field, cc_field, signal, review_request=review_request, user=user, **extra_filter_kwargs) review_request.time_emailed = timezone.now() review_request.email_message_id = \ send_review_mail(review_request.submitter, review_request, subject, reply_message_id, to_field, cc_field, 'notifications/review_request_email.txt', 'notifications/review_request_email.html', extra_context) review_request.save() def mail_review(review, user, to_submitter_only): """Send an e-mail representing the supplied review. Args: review (reviewboard.reviews.models.Review): The review to send an e-mail about. to_submitter_only (bool): Determines if the review is to the submitter only or not. """ review_request = review.review_request if not review_request.public: return review.ordered_comments = \ review.comments.order_by('filediff', 'first_line') extra_context = { 'user': review.user, 'review': review, } extra_headers = {} if review.ship_it: extra_headers['X-ReviewBoard-ShipIt'] = '1' if review.ship_it_only: extra_headers['X-ReviewBoard-ShipIt-Only'] = '1' has_error, extra_context['comment_entries'] = \ build_diff_comment_fragments( review.ordered_comments, extra_context, "notifications/email_diff_comment_fragment.html") reviewer = review.user limit_to=None if to_submitter_only: limit_to = set([review_request.submitter, review.user]) to_field, cc_field = build_recipients(reviewer, review_request, limit_recipients_to=limit_to) to_field, cc_field = filter_email_recipients_from_hooks( to_field, cc_field, review_published, review=review, user=user, review_request=review_request) summary = _ensure_unicode(review_request.summary) review.email_message_id = send_review_mail( reviewer, review_request, ('Re: Review Request %d: %s' % (review_request.display_id, summary)), review_request.email_message_id, to_field, cc_field, 'notifications/review_email.txt', 'notifications/review_email.html', extra_context, extra_headers=extra_headers) review.time_emailed = timezone.now() review.save() def mail_reply(reply, user): """Send an e-mail representing the supplied reply to a review. Args: reply (reviewboard.reviews.models.Review): The review reply to send an e-mail about. """ review = reply.base_reply_to review_request = review.review_request if not review_request.public: return extra_context = { 'user': reply.user, 'review': review, 'reply': reply, } has_error, extra_context['comment_entries'] = \ build_diff_comment_fragments( reply.comments.order_by('filediff', 'first_line'), extra_context, "notifications/email_diff_comment_fragment.html") reviewer = reply.user to_field, cc_field = build_recipients(reviewer, review_request, review_request.participants) to_field, cc_field = filter_email_recipients_from_hooks( to_field, cc_field, reply_published, reply=reply, user=user, review=review, review_request=review_request) summary = _ensure_unicode(review_request.summary) reply.email_message_id = send_review_mail( user, review_request, ('Re: Review Request %d: %s' % (review_request.display_id, summary)), review.email_message_id, to_field, cc_field, 'notifications/reply_email.txt', 'notifications/reply_email.html', extra_context) reply.time_emailed = timezone.now() reply.save() def mail_new_user(user): """Send an e-mail to administrators for newly registered users. Args: user (django.contrib.auth.models.User): The user to send an e-mail about. """ current_site = Site.objects.get_current() siteconfig = current_site.config.get_current() domain_method = siteconfig.get("site_domain_method") subject = "New Review Board user registration for %s" % user.username from_email = get_email_address_for_user(user) context = { 'domain': current_site.domain, 'domain_method': domain_method, 'user': user, 'user_url': reverse('admin:auth_user_change', args=(user.id,)) } text_message = render_to_string('notifications/new_user_email.txt', context) html_message = render_to_string('notifications/new_user_email.html', context) message = EmailMessage(subject.strip(), text_message, html_message, settings.SERVER_EMAIL, settings.SERVER_EMAIL, [build_email_address(*a) for a in settings.ADMINS], None, None) try: message.send() except Exception as e: logging.error("Error sending e-mail notification with subject '%s' on " "behalf of '%s' to admin: %s", subject.strip(), from_email, e, exc_info=1) def filter_email_recipients_from_hooks(to_field, cc_field, signal, **kwargs): """Filter the e-mail recipients through configured e-mail hooks. Args: to_field (set): The original To field of the e-mail, as a set of :py:class:`Users <django.contrib.auth.models.User>` and :py:class:`Groups <reviewboard.reviews.models.Group>`. cc_field (set): The original CC field of the e-mail, as a set of :py:class:`Users <django.contrib.auth.models.User>` and :py:class:`Groups <reviewboard.reviews.models.Group>`. signal (django.dispatch.Signal): The signal that triggered the e-mail. **kwargs (dict): Extra keyword arguments to pass to the e-mail hook. Returns: tuple: A 2-tuple of the To field and the CC field, as sets of :py:class:`Users <django.contrib.auth.models.User>` and :py:class:`Groups <reviewboard.reviews.models.Group>`. """ if signal in _hooks: for hook in _hooks[signal]: to_field = hook.get_to_field(to_field, **kwargs) cc_field = hook.get_cc_field(cc_field, **kwargs) return to_field, cc_field # Fixes bug #3613 _old_header_init = email.header.Header.__init__ def _unified_header_init(self, *args, **kwargs): kwargs['continuation_ws'] = b' ' _old_header_init(self, *args, **kwargs) email.header.Header.__init__ = _unified_header_init
beol/reviewboard
reviewboard/notifications/email.py
Python
mit
32,206
### / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / ### ### SCRIPT TO ANALYZE A TWEET CORPUS ### ### THIS SCRIPT READS DATA FROM A JSON FILE AND OFFERS THE USER ### THE FOLLOWING FOUR FUNCTIONS: ### - baseFreq() RETURNS UNIQUE WORDS IN THE CORPUS AS A ### DICTIONARY WITH THE WORDS' RESPECTIVE FREQUENCIES. ### ### - relFreq() RETURNS A DICTIONARY OF UNIQUE WORDS AND ### THEIR FREQUENCIES RELATIVE TO THE TOTAL # OF WORDS. ### ### - lexDiver() RETURNS THE LEXICAL DIVERSITY OF THE CORPUS, ### A RATIO OF UNIQUE WORDS TO TOTAL WORDS IN THE CORPUS. ### ### - topFreq(n) RETURNS THE TOP n WORDS IN THE CORPUS AND ### THE PROPORTION OF THE CORPUS THEY ACCOUNT FOR. ### n MUST BE SPECIFIED BY THE USER. ### ### ### WRITTEN BY Alberto M.H. FOR LINGUIST492A, FEBRUARY 2016 ### / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / ### import ast import json import operator from collections import Counter from collections import OrderedDict ### LOAD SCRUBBED CORPUS FROM JSON FILE. with open('output50k.json') as json_corpus: l_corpus = json.load(json_corpus) n = range(len(l_corpus)) ### n IS THE NUMBER OF TWEETS IN THE CORPUS. ### CREATE THE LIST l_lex AND POPULATE IT USING THE CONTENTS OF ### THE FIELD 'parsed_tweet' FROM THE JSON GENERATED BY THE SCRUBBER. l_lex = [] for twt in n: l_lex.extend(ast.literal_eval(l_corpus[twt]['parsed_tweet'])) ### CREATE THE SET OF UNIQUE WORDS IN CORPUS. unique_lex = set(l_lex) ### USE SET TO CREATE LIST OF UNIQUE WORDS IN ALPHABETICAL ORDER. unique_lex = list(unique_lex) unique_lex.sort() ### c_lex IS A DICTIONARY OF THE FORM {'unique_word': word_count} count = Counter(l_lex) c_lex = dict(count) ### LIST OF ('unique_word': word_count) TUPLES SORTED BY DECREASING FREQUENCY. ### //ATTRIBUTION:// DICTIONARY SORTING BASED ON CODE FOUND AT: http://bit.ly/1KvaLod count_sort = sorted(c_lex.items(), key=operator.itemgetter(1), reverse=True) count_dict = dict(count_sort) ### // // // // // // // // // // // // // // // ### THE FUNCTION baseFreq PRINTS A DICTIONARY OF THE WORDS USED IN THE CORPUS ### AND THE NUMBER OF TIMES THEY ARE USED. def baseFreq(): count_sort = sorted(c_lex.items(), key=operator.itemgetter(1), reverse=True) count_dict = dict(count_sort) ### OUTPUT TO THE USER. Code to remove unicode formatting adapted from: bit.ly/1Py5OYT print "The following is a dictionary of the form {word: frequency} for individual words in the corpus:\n\n", ast.literal_eval(json.dumps(count_dict)) ### SOME COMPUTERS MAY STRAIN TO PRINT THE ENTIRE DICTIONARY. FOR CONVENIENCE ### THE BELOW LINES OF CODE MAY BE UN-COMMENTED AND USED TO WRITE THE ### DICTIONARY TO A FILE INSTEAD. # path_out = 'cA_baseFreq.json' # strout_bf = ast.literal_eval(json.dumps(count_dict)) # # with open(path_out, 'w') as outfile: # json.dump(strout_bf, outfile) # outfile.close() ### // // // // // // // // // // // // // // // ### THE FUNCTION relFreq COMPUTES THE RELATIVE FREQUENCY OF ALL WORDS PRESENT ### IN THE CORPUS AND PRINTS A DICTIONARY OF THE FORM {word: ratio} WHERE ### RATIO IS THE NUMBER OF TIMES A GIVEN WORD IS PRESENT IN THE CORPUS DIVIDED ### BY THE TOTAL NUMBER OF WORDS IN THE CORPUS. def relFreq(): freq_dict = dict() for key, value in count_dict.items(): freq_dict[key] = round(float(value) / len(count_dict), 5) freq_list = sorted(freq_dict.iteritems(), key=operator.itemgetter(1), reverse=True) ### OUTPUT TO THE USER. print freq_dict ### SOME COMPUTERS MAY STRAIN TO PRINT THE ENTIRE DICTIONARY. FOR CONVENIENCE ### THE BELOW LINES OF CODE MAY BE UN-COMMENTED AND USED TO WRITE THE ### DICTIONARY TO A FILE INSTEAD. # path_out = 'cA_relFreq.json' # strout_rf = ast.literal_eval(json.dumps(freq_dict)) # # with open(path_out, 'w') as outfile: # json.dump(strout_rf, outfile) # outfile.close() ### // // // // // // // // // // // // // // // ### THE FUNCTION lexDiver COMPUTES A RATIO OF THE UNIQUE WORDS IN THE CORPUS ### DIVIDED BY THE TOTAL NUMBER OF WORDS IN THE CORPUS. def lexDiver(): lex_diver = float(len(unique_lex)) / float(len(l_lex)) ### OUTPUT TO THE USER. print "The lexical diversity of the corpus is:", round(lex_diver, 3) ### // // // // // // // // // // // // // // // ### THE FUNCTION topFreq(n) COMPUTES AND PRINTS THE n MOST FREQUENT WORDS ### IN THE CORPUS. n MUST BE SPECIFIED BY THE USER. def topFreq(n): freq_dict = dict() ### freq_list LISTS WORDS IN ORDER OF DECREASING FREQUENCY. for key, value in count_dict.items(): freq_dict[key] = round(float(value) / len(count_dict), 3) freq_list = sorted(freq_dict.iteritems(), key=operator.itemgetter(1), reverse=True) ### top_ratio CALLS count_sort AND FINDS THE SUM OF THE FREQUENCIES OF THE ### TOP n WORDS AND DIVIDES IT BY THE TOTAL NUMBER OF WORDS IN THE CORPUS. top_ratio = float(sum(n for _, n in count_sort[:n])) / float(len(l_lex)) ### OUTPUT TO THE USER. if n == 0: print "Please enter a value greater that zero." elif n == 1: print "The most frequent word in the corpus is:\n\n", ast.literal_eval(json.dumps([x for x,_ in freq_list[:n]])) print "\nIt accounts for " + str(round(top_ratio * 100, 1)) + "% of words in the corpus." elif n > 1: print "The following are the top", n, "words in the corpus:", "\n\n", ast.literal_eval(json.dumps([x for x,_ in freq_list[:n]])) print "\nThey account for " + str(round(top_ratio * 100, 1)) + "% of words in the corpus." ### // // // // // // // // // // // // // // // ### FUNCTION TO INTRODUCE THE USER TO THE SCRIPT AND ITS FUNCTIONS. def welcomeUser(): print "This script calculates a variety of metrics for a given corpus.\n" print "Functions available include:\n - baseFreq() | Prints a dictionary of all individual words in the corpus and their respective frequencies.\n" print " - relFreq() | Prints a dictionary of individual words and their frequency relative to the total number of words in the corpus.\n" print " - lexDiver() | Returns the lexical diversity of the corpus, a ratio of unique words to total words.\n" print " - topFreq(n) | Returns the n most frequent words in the corpus and what proportion of the text they account for. n must be specified by the user.\n" ### // // // // // // // // // // // // // // // welcomeUser();
albertomh/py-tokentweet
corpusAnalyzer.py
Python
mit
6,613
# -*- coding: utf-8 -*- # Asymmetric Base Framework - A collection of utilities for django frameworks # Copyright (C) 2013 Asymmetric Ventures Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; version 2 of the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. from __future__ import absolute_import, division, print_function, unicode_literals class FieldPosition(object): pass class Before(FieldPosition): def __init__(self, target, field_type): self.target = target self.field_type = field_type class After(FieldPosition): def __init__(self, target, field_type): self.target = target self.field_type = field_type class Between(FieldPosition): def __init__(self, before, after, field_type): self.before = before self.after = after self.field_type = field_type
AsymmetricVentures/asym-displaymanager
asymmetricbase/displaymanager/field_position.py
Python
gpl-2.0
1,365
# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ Defined log handler to be used to log to RPC connection. """ import logging from ryu.services.protocols.bgp.net_ctrl import NET_CONTROLLER from ryu.services.protocols.bgp.net_ctrl import NOTIFICATION_LOG class RpcLogHandler(logging.Handler): """Outputs log records to `NET_CONTROLLER`.""" def emit(self, record): msg = self.format(record) NET_CONTROLLER.send_rpc_notification( NOTIFICATION_LOG, { 'level': record.levelname, 'msg': msg } )
pichuang/ryu
ryu/services/protocols/bgp/api/rpc_log_handler.py
Python
apache-2.0
1,161
#!/usr/bin/python # -*- Mode: Python; coding: utf-8; indent-tabs-mode: t; c-basic-offset: 4; tab-width: 4 -*- # # main.py # Copyright (C) 2014 Rigo Macario <rigomacario@localhost.localdomain> # # GradienteDescendente is free software: you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # GradienteDescendente is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program. If not, see <http://www.gnu.org/licenses/>. from gi.repository import Gtk, GdkPixbuf, Gdk import os, sys, csv,math,random #Comment the first line and uncomment the second before installing #or making the tarball (alternatively, use project variables) UI_FILE = "src/gradientedescendente.ui" #UI_FILE = "/usr/local/share/gradientedescendente/ui/gradientedescendente.ui" class GUI: def __init__(self): self.builder = Gtk.Builder() self.builder.add_from_file(UI_FILE) self.builder.connect_signals(self) window = self.builder.get_object('window') self.eArchivoX = self.builder.get_object('eArchivoX') self.eArchivoY = self.builder.get_object('eArchivoY') self.entry_m = self.builder.get_object('entry_m') self.entry_n = self.builder.get_object('entry_n') self.entry_alfa = self.builder.get_object('entry_alfa') self.entry_tolerancia = self.builder.get_object('entry_tolerancia') self.entry_iteraciones = self.builder.get_object('entry_iteraciones') self.bLeerArchivos = self.builder.get_object('bLeerArchivos') self.bCalcular = self.builder.get_object('bCalcular') self.entry_results = self.builder.get_object('entry_results') self.tmpTethas=[]; self.store = Gtk.ListStore(int,float) self.treeCostos = self.builder.get_object('treeCostos') self.cntCosto=0 colNum = Gtk.TreeViewColumn("NUMERO") title2 = Gtk.CellRendererText() colNum.pack_start(title2, True) colNum.add_attribute(title2, "text", 0) self.treeCostos.append_column(colNum) colCosto = Gtk.TreeViewColumn("COSTO") title = Gtk.CellRendererText() colCosto.pack_start(title, True) colCosto.add_attribute(title, "text", 1) self.treeCostos.append_column(colCosto) self.treeCostos.set_model(self.store) window.show_all() def destroy(window, self): Gtk.main_quit() #*************************************************************************** METHOD *************** def getVarsXFromFile(self,fileX):#obtiene todas las vars X en un arreglo xList=[]; if(os.path.exists(fileX)): reader = csv.reader(open(fileX,'rb')) for index, row in enumerate(reader): xList.append(row); return xList; def getVarsYFromFile(self,fileY):#obtiene todas las vars Y en un arreglo yList=[]; if(os.path.exists(fileY)): reader = csv.reader(open(fileY,'rb')) for index, row in enumerate(reader): yList.append(row); return yList; def n(self,rutaX):#obtiene el parametro "n" numVarsX=0; if(os.path.exists(rutaX)): reader =csv.reader(open(rutaX,'rb')) for index,row in enumerate(reader): numVarsX=len(row); return numVarsX-1; def m(self,rutaX):#obtiene el parametro "m" lista=[]; if(os.path.exists(rutaX)): reader =csv.reader(open(rutaX,'rb')) for index,row in enumerate(reader): lista.append(row) return len(lista)-1; def derivate(self,m,n,varx,vary,tethas): r=0.0 for i in range (0,m+1):#para todas las filas m r=r+(self.hxi(i,n,varx,vary,tethas)-self.yi(vary,i))*self.xi(varx,i,n);# return r; def hxi(self,m,n,varsX,varsY,tethas): x=varsX[m]; y=varsY[m]; h=0.0; for i in range(0,n+1): h=h+float(tethas[i])*float(x[i]); return float(h); def costFunction(self,m,n,varx,vary,tethas):#calcula la funcion de costo cte=m*0.5;#1/2m sumat=0.0; for i in range (0,m+1): sumat=sumat+(self.hxi(i,n,varx,vary,tethas)-self.yi(vary,i)); sumat=sumat**2 self.store.append([self.cntCosto,float(sumat)]) self.cntCosto=self.cntCosto+1 costo=float(cte*sumat) return costo; def yi(self,varsY,m):#arreglo, m Y_m, la unica columna en fila m varY=varsY[m]; return float(varY[0]); def xi(self,varsX,m,n):#arreglo, m -> Retorna valor X_n^(m) o X_j^(i), x en columna n y fila m varX=varsX[m]; return float(varX[n]) def GradDesc(self,iteraciones,m,n,varx,vary,tethas,alfa,tolerancia):#m,n,varx[],vary[],tethas[],alfa,tol for j in range(0,n+1):#genero tethas aleatorias. self.tmpTethas.append(float(random.randint(1,10))); i=0; while (i<iteraciones and self.costFunction(m,n,varx,vary,self.tmpTethas)>tolerancia): for k in range (0,n+1): self.tmpTethas[k]=self.tmpTethas[k]-(alfa/m)*self.derivate(m,k,varx,vary,self.tmpTethas); i=i+1; return self.tmpTethas; #*************************************************************************** HANDLERS *************** def leerClicked(self,widget): rutaX=self.eArchivoX.get_text() rutaY=self.eArchivoY.get_text() self.entry_m.set_text(str(self.m(rutaX))) self.entry_n.set_text(str(self.n(rutaX))) def calcularClicked(self,widget): self.tmpTethas=[]; self.cntCosto=0 self.store.clear() self.treeCostos.set_model(self.store) rutaX=self.eArchivoX.get_text() rutaY=self.eArchivoY.get_text() iteraciones=int(self.entry_iteraciones.get_text()) alfa=float(self.entry_alfa.get_text()) tolerancia=float(self.entry_tolerancia.get_text()) self.entry_results.set_text(str(self.GradDesc(iteraciones,self.m(rutaX),self.n(rutaX),self.getVarsXFromFile(rutaX),self.getVarsYFromFile(rutaY),self.tmpTethas,alfa,tolerancia))) self.treeCostos.set_model(self.store) def main(): app = GUI() Gtk.main() if __name__ == "__main__": sys.exit(main())
rigomac/P1IA1
src/gradientedescendente.py
Python
gpl-3.0
5,997
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for fedjax.training.logging.""" import os from absl.testing import absltest from fedjax.training import logging class LoggingTest(absltest.TestCase): def test_log_no_root_dir(self): logger = logging.Logger() logger.log( writer_name='train', metric_name='loss', metric_value=4., round_num=0) self.assertEmpty(logger._summary_writers) def test_log_root_dir(self): root_dir = self.create_tempdir() logger = logging.Logger(root_dir) logger.log( writer_name='train', metric_name='loss', metric_value=4.1, round_num=0) logger.log( writer_name='eval', metric_name='loss', metric_value=5.3, round_num=0) self.assertCountEqual(['train', 'eval'], os.listdir(root_dir)) if __name__ == '__main__': absltest.main()
google/fedjax
fedjax/training/logging_test.py
Python
apache-2.0
1,360
#!/usr/bin/env python # -*- coding: utf-8 -*- import telebot import sys reload(sys) sys.setdefaultencoding("utf-8") bot = telebot.TeleBot("YOUR TOKEN HERE") # It's something like 123456789:ABCd1eFg2HijklmnOpqrstUvWXyz12A345 @bot.message_handler(commands=['start']) # This will react to the /start command def send_welcome(message): una = message.from_user.first_name bot.reply_to(message, "Hello " + str(una)) # This will be the reaction of the bot to the /start command @bot.message_handler(commands=['about']) # The /about command def send_welcome(message): bot.reply_to(message, "Hi there\nl This bot was based on the: [pyTelegramBotAPI](https://github.com/eternnoir/pyTelegramBotAPI/)", disable_web_page_preview="True", parse_mode="Markdown") # The reaction to the /about command @bot.message_handler(commands=['help']) # This is the /help command def send_welcome(message): bot.reply_to(message, "Why do you need help?") # This is the reaction of the bot to the /help command @bot.message_handler( func=lambda message: message.text in ['Who are you?']) # This is a example message def example(m): cid = m.chat.id uid = m.from_user.id bot.send_message(cid, "I'm Daniel, a telegram-bot created by [Daniel](http://telegram.me/useless/)", disable_web_page_preview="True", parse_mode="Markdown") # This is the reaction to the example message @bot.message_handler(func=lambda message: True) def echo_all(message): # This means that when the user sends a message or command to the bot that it doens't supports it will react with a message bot.send_chat_action(message.chat.id, 'typing') # This simple piece of script will let the user know that the bot is typing... bot.reply_to(message, "Sorry, I currently don't support that message.", parse_mode="Markdown") # Here you can change the custom unknown message / command message bot.polling()
danjol/telegram-chatbot
launch.py
Python
gpl-3.0
1,910
# coding=utf-8 # Copyright 2021 The vMF Embeddings Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Compute performance metrics given vMF embeddings. First argument is the path to the directory containing some number of .npz embedding files. The code will recurse to find them all. """ import os import random import sys import faiss import numpy as np from scipy.special import softmax from scipy.stats import mode import torch from vmf_embeddings import eval_utils from vmf_embeddings import utils from vmf_embeddings.third_party.s_vae_pytorch import distributions def softmax_accuracy(logits, ids): """Computes accuracy of class logits and ids marginalizing over samples.""" softmax_probs = np.mean(softmax(logits, axis=2), axis=1) correct = np.argmax(softmax_probs, axis=1) == ids acc = np.sum(correct) / float(len(ids)) return (acc, softmax_probs) def recall_at_1(embs, embs_norms, ids, n_samples=10): """Computes recall@1 for embeddings and ground-truth ids maringalizing samples. Args: embs: An ndarray of embeddings. embs_norms: ndarray of norms of the embeddings. ids: An ndarray of ground-truth class ids for each embedding. n_samples: Number of samples for marginalization. Returns: recall@1 metric value. """ with torch.no_grad(): z_dist = distributions.VonMisesFisher( torch.from_numpy(embs), torch.from_numpy(embs_norms)) z_samples = z_dist.sample(torch.Size([n_samples])).permute(1, 0, 2).numpy() res = faiss.StandardGpuResources() corrects = [] for i in range(n_samples): z = z_samples[:, i, :] index = faiss.GpuIndexFlatIP(res, z.shape[1]) index.add(z) _, idxs = index.search(z, 2) preds = ids[idxs[:, 1]] correct = ids == preds corrects.append(correct) corrects = np.array(corrects) correct_mode = mode(corrects, axis=0)[0] return np.mean(correct_mode) def map_at_r(embs, embs_norms, ids, n_samples=10): """Computes mAP@R for embeddings and ground-truth ids maringalizing samples. mAP@r code adapted from https://github.com/KevinMusgrave/pytorch-metric-learning/blob/master/src/pytorch_metric_learning/utils/accuracy_calculator.py Args: embs: An ndarray of embeddings. embs_norms: ndarray of norms of the embeddings. ids: An ndarray of ground-truth class ids for each embedding. n_samples: Number of samples for marginalization. Returns: mAP@R metric value. """ with torch.no_grad(): z_dist = distributions.VonMisesFisher( torch.from_numpy(embs), torch.from_numpy(embs_norms)) z_samples = z_dist.sample(torch.Size([n_samples])).permute(1, 0, 2).numpy() _, counts = np.unique(ids, return_counts=True) r_mask = np.zeros((embs.shape[0], np.max(counts) - 1), dtype=np.bool) for i, count in enumerate(counts): r_mask[np.where(ids == i), :count - 1] = True res = faiss.StandardGpuResources() maps = [] for i in range(n_samples): z = z_samples[:, i, :] index = faiss.GpuIndexFlatIP(res, z.shape[1]) index.add(z) try: # If search uses too much memory on GPU, switch to CPU _, all_idxs = index.search(z, int(np.max(counts))) except: index = faiss.index_gpu_to_cpu(index) _, all_idxs = index.search(z, int(np.max(counts))) all_idxs = all_idxs[:, 1:] ids_matrix = ids[all_idxs] correct = (ids_matrix == ids[:, np.newaxis]) * r_mask cumulative_correct = np.cumsum(correct, axis=1) k_idx = np.tile(np.arange(1, r_mask.shape[1] + 1), (r_mask.shape[0], 1)) precision_at_ks = (cumulative_correct * correct) / k_idx summed_precision_per_row = np.sum(precision_at_ks * r_mask, axis=1) max_possible_matches_per_row = np.sum(r_mask, axis=1) aps = summed_precision_per_row / max_possible_matches_per_row maps.append(np.mean(aps)) return np.mean(maps) def main(): path = sys.argv[1] n_samples = 10 n_bins = 15 torch.manual_seed(1234) random.seed(1234) np.random.seed(1234) norm_method = utils.get_norm_method_by_name("l2") split_by_dataset = { "mnist": "softmax", "fashionmnist": "softmax", "cifar10": "softmax", "cifar100": "softmax", "cars196": "retrieval", "stanfordonlineproducts": "retrieval", "synthetic": "softmax", "cub200": "retrieval" } if "fashion_mnist" in path: dataset = "fashionmnist" elif "mnist" in path: dataset = "mnist" elif "cifar100" in path: dataset = "cifar100" elif "cifar10" in path: dataset = "cifar10" elif "cars196" in path: dataset = "cars196" elif "synthetic" in path: dataset = "synthetic" elif "cub200" in path: dataset = "cub200" else: dataset = "stanfordonlineproducts" results = {} split = split_by_dataset[dataset] # Softmax computes different metrics compared to retrieval. if split == "softmax": results[split] = {"acc": [], "ece": []} else: results[split] = { "map@r": [], "r@1": [], } for root, _, files in os.walk(path): for f in files: if not f.endswith(".npz"): continue print("\nPath {}".format(root)) data_files = [ os.path.join(root, f) for f in os.listdir(root) if f == dataset + "_test.npz" ] for df in data_files: print("Split: {}".format(df.split("/")[-1])) data = np.load(df) ids = data["ids"] if split_by_dataset[dataset] == "softmax": logits = data["logits"] softmax_acc, softmax_probs = softmax_accuracy(logits, ids) ece = eval_utils.calc_ece_em_quant( np.max(softmax_probs, axis=1), np.argmax(softmax_probs, axis=1) == ids, n_bins, lambda_exp=2, ) results["softmax"]["acc"].append(softmax_acc) results["softmax"]["ece"].append(ece) else: embs = data["embeddings"] embs, embs_norms = norm_method( embs, use_torch=False, return_norms=True) r1_acc = recall_at_1(embs, embs_norms, ids, n_samples=n_samples) results[split]["r@1"].append(r1_acc) map_at_r_val = map_at_r(embs, embs_norms, ids, n_samples=n_samples) results[split]["map@r"].append(map_at_r_val) break for k, v in results.items(): print("\n=== {} ===".format(k)) if k != "softmax": print("Mean Recall@1: acc = {:.4f} +/- {:.4f}".format( 100.0 * np.mean(v["r@1"]), 100.0 * np.std(v["r@1"]) / np.sqrt(len(v["r@1"])), )) print("Mean mAP@R: val = {:.4f} +/- {:.4f}".format( np.mean(v["map@r"]), np.std(v["map@r"]) / np.sqrt(len(v["map@r"])), )) else: print( ("Mean {}: acc = {:.4f} +/- {:.4f}, ece = {:.4f} +/- {:.4f}").format( k, 100.0 * np.mean(v["acc"]), 100.0 * np.std(v["acc"]) / np.sqrt(len(v["acc"])), np.mean(v["ece"]), np.std(v["ece"]) / np.sqrt(len(v["ece"])), )) if __name__ == "__main__": main()
google-research/vmf_embeddings
vmf_embeddings/third_party/pytorch_metric_learning/compute_vmf_performance_metrics.py
Python
apache-2.0
7,548
# -*- coding: utf-8 -*- # Copyright 2017, Digital Reasoning # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from __future__ import unicode_literals import logging import actstream from celery import chain from stackdio.api.stacks import tasks from stackdio.core.constants import Action, Activity logger = logging.getLogger(__name__) class WorkflowOptions(object): DEFAULTS = { 'max_attempts': 3, } def __init__(self, opts): self.user_opts = opts def __getattr__(self, item): if item in self.user_opts: return self.user_opts[item] elif item in self.DEFAULTS: return self.DEFAULTS[item] else: raise AttributeError(item) class LaunchWorkflowOptions(WorkflowOptions): DEFAULTS = { 'max_attempts': 3, # Skips launching if set to False 'launch': True, 'provision': True, # Launches in parallel mode if set to True 'parallel': True, # See stacks.tasks::launch_hosts for information on these params 'simulate_launch_failures': False, 'simulate_ssh_failures': False, 'failure_percent': 0.3, } class DestroyWorkflowOptions(WorkflowOptions): DEFAULTS = { 'parallel': True, } class BaseWorkflow(object): _options_class = WorkflowOptions def __init__(self, stack, host_ids=None, opts=None): if opts is None: opts = {} self.stack = stack self.host_ids = host_ids self.opts = self._options_class(opts) def task_list(self): return [] def execute(self): task_chain = chain(*self.task_list()) task_chain.apply_async() class LaunchWorkflow(BaseWorkflow): """ Encapsulates all tasks required to launch a new stack or new hosts into a stack. """ _options_class = LaunchWorkflowOptions def task_list(self): stack_id = self.stack.id host_ids = self.host_ids opts = self.opts if not opts.launch: return [] task_list = [ tasks.launch_hosts.si( stack_id, parallel=opts.parallel, max_attempts=opts.max_attempts, simulate_launch_failures=opts.simulate_launch_failures, simulate_ssh_failures=opts.simulate_ssh_failures, failure_percent=opts.failure_percent ), tasks.update_metadata.si(stack_id, Activity.LAUNCHING, host_ids=host_ids), tasks.tag_infrastructure.si(stack_id, activity=Activity.LAUNCHING, host_ids=host_ids), tasks.register_dns.si(stack_id, Activity.LAUNCHING, host_ids=host_ids), tasks.ping.si(stack_id, Activity.LAUNCHING), tasks.sync_all.si(stack_id), tasks.highstate.si(stack_id, max_attempts=opts.max_attempts), tasks.global_orchestrate.si(stack_id, max_attempts=opts.max_attempts), ] if opts.provision: task_list.append(tasks.orchestrate.si(stack_id, max_attempts=opts.max_attempts)) task_list.append(tasks.finish_stack.si(stack_id)) self.stack.set_activity(Activity.QUEUED) actstream.action.send(self.stack, verb='was submitted to launch queue') return task_list class DestroyHostsWorkflow(BaseWorkflow): """ Encapsulates all tasks required to destroy a set of hosts on a stack. """ _options_class = DestroyWorkflowOptions def task_list(self): stack_id = self.stack.pk host_ids = self.host_ids return [ tasks.update_metadata.si(stack_id, Activity.TERMINATING, host_ids=host_ids), tasks.register_volume_delete.si(stack_id, host_ids=host_ids), tasks.unregister_dns.si(stack_id, Activity.TERMINATING, host_ids=host_ids), tasks.destroy_hosts.si(stack_id, host_ids=host_ids, delete_security_groups=False), tasks.finish_stack.si(stack_id, Activity.IDLE), ] class DestroyStackWorkflow(BaseWorkflow): """ Encapsulates all tasks required to destroy an entire stack. """ _options_class = DestroyWorkflowOptions def __init__(self, stack, opts=None): super(DestroyStackWorkflow, self).__init__(stack, opts=opts) # Force host_ids to None since we're destroying the entire stack self.host_ids = None def task_list(self): stack_id = self.stack.pk return [ tasks.update_metadata.si(stack_id, Activity.TERMINATING), tasks.register_volume_delete.si(stack_id), tasks.unregister_dns.si(stack_id, Activity.TERMINATING), tasks.destroy_hosts.si(stack_id, parallel=self.opts.parallel), tasks.destroy_stack.si(stack_id), ] class ActionWorkflow(BaseWorkflow): """ Runs an action """ def __init__(self, stack, action, args): super(ActionWorkflow, self).__init__(stack) self.action = action self.args = args def task_list(self): # TODO: not generic enough base_tasks = { Action.LAUNCH: [ tasks.launch_hosts.si(self.stack.id), ], Action.TERMINATE: [ tasks.update_metadata.si(self.stack.id, Activity.TERMINATING), tasks.register_volume_delete.si(self.stack.id), tasks.unregister_dns.si(self.stack.id, Activity.TERMINATING), tasks.destroy_hosts.si(self.stack.id, delete_hosts=False, delete_security_groups=False), ], Action.PAUSE: [ tasks.execute_action.si(self.stack.id, self.action, Activity.PAUSING, *self.args), ], Action.RESUME: [ tasks.execute_action.si(self.stack.id, self.action, Activity.RESUMING, *self.args), ], Action.PROPAGATE_SSH: [ tasks.propagate_ssh.si(self.stack.id), ], Action.SINGLE_SLS: [ tasks.single_sls.si(self.stack.id, arg['component'], arg.get('host_target')) for arg in self.args ], } action_to_activity = { Action.LAUNCH: Activity.LAUNCHING, Action.TERMINATE: Activity.TERMINATING, Action.PAUSE: Activity.PAUSING, Action.RESUME: Activity.RESUMING, Action.PROVISION: Activity.PROVISIONING, Action.ORCHESTRATE: Activity.ORCHESTRATING, Action.PROPAGATE_SSH: Activity.PROVISIONING, Action.SINGLE_SLS: Activity.ORCHESTRATING, } action_to_end_activity = { Action.LAUNCH: Activity.IDLE, Action.TERMINATE: Activity.TERMINATED, Action.PAUSE: Activity.PAUSED, Action.RESUME: Activity.IDLE, Action.PROVISION: Activity.IDLE, Action.ORCHESTRATE: Activity.IDLE, Action.PROPAGATE_SSH: Activity.IDLE, Action.SINGLE_SLS: Activity.IDLE, } # Start off with the base task_list = base_tasks.get(self.action, []) # Update the metadata after the main action has been executed if self.action not in (Action.SINGLE_SLS, Action.TERMINATE): task_list.append(tasks.update_metadata.si(self.stack.id, action_to_activity[self.action])) # Resuming and launching requires DNS updates if self.action in (Action.RESUME, Action.LAUNCH): task_list.append(tasks.tag_infrastructure.si( self.stack.id, activity=action_to_activity[self.action], )) task_list.append(tasks.register_dns.si(self.stack.id, action_to_activity[self.action])) # resuming, launching, or reprovisioning requires us to execute the # provisioning tasks if self.action in (Action.RESUME, Action.LAUNCH, Action.PROVISION, Action.ORCHESTRATE): task_list.append(tasks.ping.si(self.stack.id, action_to_activity[self.action])) task_list.append(tasks.sync_all.si(self.stack.id)) if self.action in (Action.LAUNCH, Action.PROVISION): task_list.append(tasks.highstate.si(self.stack.id)) if self.action in (Action.LAUNCH, Action.PROVISION, Action.ORCHESTRATE): task_list.append(tasks.global_orchestrate.si(self.stack.id)) task_list.append(tasks.orchestrate.si(self.stack.id, self.opts.max_attempts)) # Always finish the stack task_list.append(tasks.finish_stack.si(self.stack.id, action_to_end_activity[self.action])) return task_list
clarkperkins/stackdio
stackdio/api/stacks/workflows.py
Python
apache-2.0
9,270
from vsg import parser class assignment(parser.item): ''' unique_id = association_element : assignment ''' def __init__(self, sString): parser.item.__init__(self, sString) class formal_part(parser.item): ''' unique_id = association_element : formal_part ''' def __init__(self, sString): parser.item.__init__(self, sString) class actual_part(parser.item): ''' unique_id = association_element : actual_part ''' def __init__(self, sString): parser.item.__init__(self, sString)
jeremiah-c-leary/vhdl-style-guide
vsg/token/association_element.py
Python
gpl-3.0
556
class IdentifiableElement(): def __init__(self): self.id = None self.label = None
Somae/mdsd-factory-project
resources/de.mdelab.languages.resources/code-gen/factory/IdentifiableElement.py
Python
gpl-3.0
88
try: graph = ximport("graph") except ImportError: graph = ximport("__init__") reload(graph) size(600, 600) # A graph object. g = graph.create(iterations=500, distance=1.0) # Add nodes with a random id, # connected to other random nodes. for i in range(50): node1 = g.add_node(random(500)) if random() > 0.5: for i in range(choice((2, 3))): node2 = choice(g.nodes) g.add_edge(node1.id, node2.id, weight=random()) # We leave out any orphaned nodes. g.prune() # Colorize nodes. # Nodes with higher importance are blue. g.styles.apply() # Update the graph layout until it's done. g.solve() # Show the shortest path between two random nodes. path = [] id1 = choice(g.keys()) id2 = choice(g.keys()) path = g.shortest_path(id1, id2) # Draw the graph and display the shortest path. g.draw(highlight=path, weighted=True, directed=True)
gt-ros-pkg/rcommander-core
nodebox_qt/src/graph/graph_example2.py
Python
bsd-3-clause
887
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (c) 2010-2014 Elico Corp. All Rights Reserved. # Augustin Cisterne-Kaas <augustin.cisterne-kaas@elico-corp.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from . import website from . import models
Elico-Corp/odoo-addons
website_recaptcha/__init__.py
Python
agpl-3.0
1,093
import unittest import uuid from unittest import mock from tinydb import TinyDB, Query from motey.repositories import service_repository class TestServiceRepository(unittest.TestCase): @classmethod def setUp(self): self.text_service_id = uuid.uuid4().hex self.test_service = {'id': self.text_service_id, 'service_name': 'test service name', 'images': ['test image']} service_repository.config = {'DATABASE': {'path': '/tmp/testpath'}} service_repository.BaseRepository = mock.Mock(service_repository.BaseRepository) service_repository.TinyDB = mock.Mock(TinyDB) service_repository.Query = mock.Mock(Query) self.test_service_repository = service_repository.ServiceRepository() def test_construction(self): self.assertIsNotNone(self.test_service_repository.db) def test_add_service_does_not_exist(self): self.test_service_repository.has = mock.MagicMock(return_value=False) self.test_service_repository.db.insert = mock.MagicMock(return_value='123') self.test_service_repository.add(service=self.test_service) self.assertTrue(self.test_service_repository.db.insert.called) def test_add_servie_exist(self): self.test_service_repository.has = mock.MagicMock(return_value=True) self.test_service_repository.db.insert = mock.MagicMock(return_value='123') self.test_service_repository.add(service=self.test_service) self.assertFalse(self.test_service_repository.db.insert.called) def test_udpate(self): self.test_service_repository.update(service=self.test_service) self.assertTrue(self.test_service_repository.db.update.called) def test_remove(self): self.test_service_repository.remove(service_id=self.test_service['id']) self.assertTrue(self.test_service_repository.db.remove.called) def test_has_entry(self): self.test_service_repository.db.search = mock.MagicMock(return_value=[1, 2]) result = self.test_service_repository.has(service_id=self.test_service['id']) self.assertTrue(self.test_service_repository.db.search.called) self.assertTrue(result) def test_has_no_entry(self): self.test_service_repository.db.search = mock.MagicMock(return_value=[]) result = self.test_service_repository.has(service_id=self.test_service['id']) self.assertTrue(self.test_service_repository.db.search.called) self.assertFalse(result) if __name__ == '__main__': unittest.main()
Neoklosch/Motey
tests/repositories/test_service_repository.py
Python
apache-2.0
2,546
# generated from catkin/cmake/template/pkg.context.pc.in CATKIN_PACKAGE_PREFIX = "" PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else [] PROJECT_CATKIN_DEPENDS = "".replace(';', ' ') PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else [] PROJECT_NAME = "shotcrete_selector" PROJECT_SPACE_DIR = "/home/mike/catkin_ws/devel" PROJECT_VERSION = "1.0.0"
mikewrock/phd_backup_full
build/shotcrete_points/catkin_generated/pkg.develspace.context.pc.py
Python
apache-2.0
378
# -*- coding: utf-8 -*- """ Created on Tue Sep 26 14:42:10 2017 @author: ksagilop Use requests modules to retrieve info from website and download files A nice tutorial: https://gist.github.com/phillipsm/0ed98b2585f0ada5a769 TODO: List of books available TODO: Merge this function with Gender Detection script TODO: Analyze all books in url and provide statistics """ # Modules import requests, bs4 # Assignments url = r'http://english-e-reader.net/findbook' book_example = r'http://english-e-reader.net/download?link=macbeth-william-shakespeare&format=txt' # Functions def library(url): '''Get a list of available books to download from an specified <url>''' lib = requests.get(url) lib.raise_for_status() print(url) print('Text is', type(lib), 'of length', len(lib.text)) libSoup = bs4.BeautifulSoup(lib.text, 'lxml') print(libSoup) print('Soup is', type(libSoup), 'of length', len(libSoup)) elems = libSoup.select('table.table') print('Select is', type(elems), 'of length', len(elems)) print(elems) for table_row in libSoup.select('table.table tr'): cells = table_row.findAll('td') if len(cells) > 0: books = cells[1] #print(books) return lib.text def lease(book): '''Construct a list of available books to download''' res = requests.get(book) # Requests a response from the url 'book' res.raise_for_status() # Check call success print(type(res)) print(len(res.text)) print(res.text[:300]) return res.text # Execute #lease(book_example) #library(url) lib = requests.get(url) lib.raise_for_status()
pandastrail/InfoEng
scripting/exercises/p06_requests_web_files.py
Python
gpl-3.0
1,645
import sys from unittest.mock import patch import pytest from importlib import reload import pynamodb.settings @pytest.mark.parametrize('settings_str', [ "session_cls = object()", "request_timeout_seconds = 5", ]) def test_override_old_attributes(settings_str, tmpdir): custom_settings = tmpdir.join("pynamodb_settings.py") custom_settings.write(settings_str) with patch.dict('os.environ', {'PYNAMODB_CONFIG': str(custom_settings)}): with pytest.warns(UserWarning) as warns: reload(pynamodb.settings) assert len(warns) == 1 assert 'options are no longer supported' in str(warns[0].message)
jlafon/PynamoDB
tests/test_settings.py
Python
mit
643
# -*- coding: utf-8 -*- # Authors: See README.RST for Contributors # Copyright 2015-2016 See __openerp__.py for Authors # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). { "name": "Report qweb auto generation", "version": "9.0.1.0.0", "depends": [ "report", ], "external_dependencies": { "python": [ "unidecode", ], }, "author": "OdooMRP team, " "AvanzOSC, " "Serv. Tecnol. Avanzados - Pedro M. Baeza, " "Odoo Community Association (OCA), ", "website": "http://www.odoomrp.com", "license": "AGPL-3", "contributors": [ "Oihane Crucelaegui <oihanecrucelaegi@avanzosc.es>", "Pedro M. Baeza <pedro.baeza@serviciosbaeza.com>", "Ana Juaristi <anajuaristi@avanzosc.es>", ], "category": "Tools", "data": [ "wizard/report_duplicate_view.xml", "views/report_xml_view.xml", ], 'installable': True, }
be-cloud-be/horizon-addons
server-tools/base_report_auto_create_qweb/__openerp__.py
Python
agpl-3.0
987
# # colors.py -- color definitions # # Eric Jeschke (eric@naoj.org) # # Copyright (c) Eric R. Jeschke. All rights reserved. # This is open-source software licensed under a BSD license. # Please see the file LICENSE.txt for details. # import re color_dict = { 'aliceblue': (0.9411764705882353, 0.9725490196078431, 1.0), 'antiquewhite': (0.9803921568627451, 0.9215686274509803, 0.8431372549019608), 'antiquewhite1': (1.0, 0.9372549019607843, 0.8588235294117647), 'antiquewhite2': (0.9333333333333333, 0.8745098039215686, 0.8), 'antiquewhite3': (0.803921568627451, 0.7529411764705882, 0.6901960784313725), 'antiquewhite4': (0.5450980392156862, 0.5137254901960784, 0.47058823529411764), 'aquamarine': (0.4980392156862745, 1.0, 0.8313725490196079), 'aquamarine1': (0.4980392156862745, 1.0, 0.8313725490196079), 'aquamarine2': (0.4627450980392157, 0.9333333333333333, 0.7764705882352941), 'aquamarine3': (0.4, 0.803921568627451, 0.6666666666666666), 'aquamarine4': (0.27058823529411763, 0.5450980392156862, 0.4549019607843137), 'azure': (0.9411764705882353, 1.0, 1.0), 'azure1': (0.9411764705882353, 1.0, 1.0), 'azure2': (0.8784313725490196, 0.9333333333333333, 0.9333333333333333), 'azure3': (0.7568627450980392, 0.803921568627451, 0.803921568627451), 'azure4': (0.5137254901960784, 0.5450980392156862, 0.5450980392156862), 'beige': (0.9607843137254902, 0.9607843137254902, 0.8627450980392157), 'bisque': (1.0, 0.8941176470588236, 0.7686274509803922), 'bisque1': (1.0, 0.8941176470588236, 0.7686274509803922), 'bisque2': (0.9333333333333333, 0.8352941176470589, 0.7176470588235294), 'bisque3': (0.803921568627451, 0.7176470588235294, 0.6196078431372549), 'bisque4': (0.5450980392156862, 0.49019607843137253, 0.4196078431372549), 'black': (0.0, 0.0, 0.0), 'blanchedalmond': (1.0, 0.9215686274509803, 0.803921568627451), 'blue': (0.0, 0.0, 1.0), 'blue1': (0.0, 0.0, 1.0), 'blue2': (0.0, 0.0, 0.9333333333333333), 'blue3': (0.0, 0.0, 0.803921568627451), 'blue4': (0.0, 0.0, 0.5450980392156862), 'blueviolet': (0.5411764705882353, 0.16862745098039217, 0.8862745098039215), 'brown': (0.6470588235294118, 0.16470588235294117, 0.16470588235294117), 'brown1': (1.0, 0.25098039215686274, 0.25098039215686274), 'brown2': (0.9333333333333333, 0.23137254901960785, 0.23137254901960785), 'brown3': (0.803921568627451, 0.2, 0.2), 'brown4': (0.5450980392156862, 0.13725490196078433, 0.13725490196078433), 'burlywood': (0.8705882352941177, 0.7215686274509804, 0.5294117647058824), 'burlywood1': (1.0, 0.8274509803921568, 0.6078431372549019), 'burlywood2': (0.9333333333333333, 0.7725490196078432, 0.5686274509803921), 'burlywood3': (0.803921568627451, 0.6666666666666666, 0.49019607843137253), 'burlywood4': (0.5450980392156862, 0.45098039215686275, 0.3333333333333333), 'cadetblue': (0.37254901960784315, 0.6196078431372549, 0.6274509803921569), 'cadetblue1': (0.596078431372549, 0.9607843137254902, 1.0), 'cadetblue2': (0.5568627450980392, 0.8980392156862745, 0.9333333333333333), 'cadetblue3': (0.47843137254901963, 0.7725490196078432, 0.803921568627451), 'cadetblue4': (0.3254901960784314, 0.5254901960784314, 0.5450980392156862), 'chartreuse': (0.4980392156862745, 1.0, 0.0), 'chartreuse1': (0.4980392156862745, 1.0, 0.0), 'chartreuse2': (0.4627450980392157, 0.9333333333333333, 0.0), 'chartreuse3': (0.4, 0.803921568627451, 0.0), 'chartreuse4': (0.27058823529411763, 0.5450980392156862, 0.0), 'chocolate': (0.8235294117647058, 0.4117647058823529, 0.11764705882352941), 'chocolate1': (1.0, 0.4980392156862745, 0.1411764705882353), 'chocolate2': (0.9333333333333333, 0.4627450980392157, 0.12941176470588237), 'chocolate3': (0.803921568627451, 0.4, 0.11372549019607843), 'chocolate4': (0.5450980392156862, 0.27058823529411763, 0.07450980392156863), 'coral': (1.0, 0.4980392156862745, 0.3137254901960784), 'coral1': (1.0, 0.4470588235294118, 0.33725490196078434), 'coral2': (0.9333333333333333, 0.41568627450980394, 0.3137254901960784), 'coral3': (0.803921568627451, 0.3568627450980392, 0.27058823529411763), 'coral4': (0.5450980392156862, 0.24313725490196078, 0.1843137254901961), 'cornflowerblue': (0.39215686274509803, 0.5843137254901961, 0.9294117647058824), 'cornsilk': (1.0, 0.9725490196078431, 0.8627450980392157), 'cornsilk1': (1.0, 0.9725490196078431, 0.8627450980392157), 'cornsilk2': (0.9333333333333333, 0.9098039215686274, 0.803921568627451), 'cornsilk3': (0.803921568627451, 0.7843137254901961, 0.6941176470588235), 'cornsilk4': (0.5450980392156862, 0.5333333333333333, 0.47058823529411764), 'cyan': (0.0, 1.0, 1.0), 'cyan1': (0.0, 1.0, 1.0), 'cyan2': (0.0, 0.9333333333333333, 0.9333333333333333), 'cyan3': (0.0, 0.803921568627451, 0.803921568627451), 'cyan4': (0.0, 0.5450980392156862, 0.5450980392156862), 'darkblue': (0.0, 0.0, 0.5450980392156862), 'darkcyan': (0.0, 0.5450980392156862, 0.5450980392156862), 'darkgoldenrod': (0.7215686274509804, 0.5254901960784314, 0.043137254901960784), 'darkgoldenrod1': (1.0, 0.7254901960784313, 0.058823529411764705), 'darkgoldenrod2': (0.9333333333333333, 0.6784313725490196, 0.054901960784313725), 'darkgoldenrod3': (0.803921568627451, 0.5843137254901961, 0.047058823529411764), 'darkgoldenrod4': (0.5450980392156862, 0.396078431372549, 0.03137254901960784), 'darkgray': (0.6627450980392157, 0.6627450980392157, 0.6627450980392157), 'darkgreen': (0.0, 0.39215686274509803, 0.0), 'darkgrey': (0.6627450980392157, 0.6627450980392157, 0.6627450980392157), 'darkkhaki': (0.7411764705882353, 0.7176470588235294, 0.4196078431372549), 'darkmagenta': (0.5450980392156862, 0.0, 0.5450980392156862), 'darkolivegreen': (0.3333333333333333, 0.4196078431372549, 0.1843137254901961), 'darkolivegreen1': (0.792156862745098, 1.0, 0.4392156862745098), 'darkolivegreen2': (0.7372549019607844, 0.9333333333333333, 0.40784313725490196), 'darkolivegreen3': (0.6352941176470588, 0.803921568627451, 0.35294117647058826), 'darkolivegreen4': (0.43137254901960786, 0.5450980392156862, 0.23921568627450981), 'darkorange': (1.0, 0.5490196078431373, 0.0), 'darkorange1': (1.0, 0.4980392156862745, 0.0), 'darkorange2': (0.9333333333333333, 0.4627450980392157, 0.0), 'darkorange3': (0.803921568627451, 0.4, 0.0), 'darkorange4': (0.5450980392156862, 0.27058823529411763, 0.0), 'darkorchid': (0.6, 0.19607843137254902, 0.8), 'darkorchid1': (0.7490196078431373, 0.24313725490196078, 1.0), 'darkorchid2': (0.6980392156862745, 0.22745098039215686, 0.9333333333333333), 'darkorchid3': (0.6039215686274509, 0.19607843137254902, 0.803921568627451), 'darkorchid4': (0.40784313725490196, 0.13333333333333333, 0.5450980392156862), 'darkred': (0.5450980392156862, 0.0, 0.0), 'darksalmon': (0.9137254901960784, 0.5882352941176471, 0.47843137254901963), 'darkseagreen': (0.5607843137254902, 0.7372549019607844, 0.5607843137254902), 'darkseagreen1': (0.7568627450980392, 1.0, 0.7568627450980392), 'darkseagreen2': (0.7058823529411765, 0.9333333333333333, 0.7058823529411765), 'darkseagreen3': (0.6078431372549019, 0.803921568627451, 0.6078431372549019), 'darkseagreen4': (0.4117647058823529, 0.5450980392156862, 0.4117647058823529), 'darkslateblue': (0.2823529411764706, 0.23921568627450981, 0.5450980392156862), 'darkslategray': (0.1843137254901961, 0.30980392156862746, 0.30980392156862746), 'darkslategray1': (0.592156862745098, 1.0, 1.0), 'darkslategray2': (0.5529411764705883, 0.9333333333333333, 0.9333333333333333), 'darkslategray3': (0.4745098039215686, 0.803921568627451, 0.803921568627451), 'darkslategray4': (0.3215686274509804, 0.5450980392156862, 0.5450980392156862), 'darkslategrey': (0.1843137254901961, 0.30980392156862746, 0.30980392156862746), 'darkturquoise': (0.0, 0.807843137254902, 0.8196078431372549), 'darkviolet': (0.5803921568627451, 0.0, 0.8274509803921568), 'debianred': (0.8431372549019608, 0.027450980392156862, 0.3176470588235294), 'deeppink': (1.0, 0.0784313725490196, 0.5764705882352941), 'deeppink1': (1.0, 0.0784313725490196, 0.5764705882352941), 'deeppink2': (0.9333333333333333, 0.07058823529411765, 0.5372549019607843), 'deeppink3': (0.803921568627451, 0.06274509803921569, 0.4627450980392157), 'deeppink4': (0.5450980392156862, 0.0392156862745098, 0.3137254901960784), 'deepskyblue': (0.0, 0.7490196078431373, 1.0), 'deepskyblue1': (0.0, 0.7490196078431373, 1.0), 'deepskyblue2': (0.0, 0.6980392156862745, 0.9333333333333333), 'deepskyblue3': (0.0, 0.6039215686274509, 0.803921568627451), 'deepskyblue4': (0.0, 0.40784313725490196, 0.5450980392156862), 'dimgray': (0.4117647058823529, 0.4117647058823529, 0.4117647058823529), 'dimgrey': (0.4117647058823529, 0.4117647058823529, 0.4117647058823529), 'dodgerblue': (0.11764705882352941, 0.5647058823529412, 1.0), 'dodgerblue1': (0.11764705882352941, 0.5647058823529412, 1.0), 'dodgerblue2': (0.10980392156862745, 0.5254901960784314, 0.9333333333333333), 'dodgerblue3': (0.09411764705882353, 0.4549019607843137, 0.803921568627451), 'dodgerblue4': (0.06274509803921569, 0.3058823529411765, 0.5450980392156862), 'firebrick': (0.6980392156862745, 0.13333333333333333, 0.13333333333333333), 'firebrick1': (1.0, 0.18823529411764706, 0.18823529411764706), 'firebrick2': (0.9333333333333333, 0.17254901960784313, 0.17254901960784313), 'firebrick3': (0.803921568627451, 0.14901960784313725, 0.14901960784313725), 'firebrick4': (0.5450980392156862, 0.10196078431372549, 0.10196078431372549), 'floralwhite': (1.0, 0.9803921568627451, 0.9411764705882353), 'forestgreen': (0.13333333333333333, 0.5450980392156862, 0.13333333333333333), 'gainsboro': (0.8627450980392157, 0.8627450980392157, 0.8627450980392157), 'ghostwhite': (0.9725490196078431, 0.9725490196078431, 1.0), 'gold': (1.0, 0.8431372549019608, 0.0), 'gold1': (1.0, 0.8431372549019608, 0.0), 'gold2': (0.9333333333333333, 0.788235294117647, 0.0), 'gold3': (0.803921568627451, 0.6784313725490196, 0.0), 'gold4': (0.5450980392156862, 0.4588235294117647, 0.0), 'goldenrod': (0.8549019607843137, 0.6470588235294118, 0.12549019607843137), 'goldenrod1': (1.0, 0.7568627450980392, 0.1450980392156863), 'goldenrod2': (0.9333333333333333, 0.7058823529411765, 0.13333333333333333), 'goldenrod3': (0.803921568627451, 0.6078431372549019, 0.11372549019607843), 'goldenrod4': (0.5450980392156862, 0.4117647058823529, 0.0784313725490196), 'gray': (0.7450980392156863, 0.7450980392156863, 0.7450980392156863), 'gray0': (0.0, 0.0, 0.0), 'gray1': (0.011764705882352941, 0.011764705882352941, 0.011764705882352941), 'gray10': (0.10196078431372549, 0.10196078431372549, 0.10196078431372549), 'gray100': (1.0, 1.0, 1.0), 'gray11': (0.10980392156862745, 0.10980392156862745, 0.10980392156862745), 'gray12': (0.12156862745098039, 0.12156862745098039, 0.12156862745098039), 'gray13': (0.12941176470588237, 0.12941176470588237, 0.12941176470588237), 'gray14': (0.1411764705882353, 0.1411764705882353, 0.1411764705882353), 'gray15': (0.14901960784313725, 0.14901960784313725, 0.14901960784313725), 'gray16': (0.1607843137254902, 0.1607843137254902, 0.1607843137254902), 'gray17': (0.16862745098039217, 0.16862745098039217, 0.16862745098039217), 'gray18': (0.1803921568627451, 0.1803921568627451, 0.1803921568627451), 'gray19': (0.18823529411764706, 0.18823529411764706, 0.18823529411764706), 'gray2': (0.0196078431372549, 0.0196078431372549, 0.0196078431372549), 'gray20': (0.2, 0.2, 0.2), 'gray21': (0.21176470588235294, 0.21176470588235294, 0.21176470588235294), 'gray22': (0.2196078431372549, 0.2196078431372549, 0.2196078431372549), 'gray23': (0.23137254901960785, 0.23137254901960785, 0.23137254901960785), 'gray24': (0.23921568627450981, 0.23921568627450981, 0.23921568627450981), 'gray25': (0.25098039215686274, 0.25098039215686274, 0.25098039215686274), 'gray26': (0.25882352941176473, 0.25882352941176473, 0.25882352941176473), 'gray27': (0.27058823529411763, 0.27058823529411763, 0.27058823529411763), 'gray28': (0.2784313725490196, 0.2784313725490196, 0.2784313725490196), 'gray29': (0.2901960784313726, 0.2901960784313726, 0.2901960784313726), 'gray3': (0.03137254901960784, 0.03137254901960784, 0.03137254901960784), 'gray30': (0.30196078431372547, 0.30196078431372547, 0.30196078431372547), 'gray31': (0.30980392156862746, 0.30980392156862746, 0.30980392156862746), 'gray32': (0.3215686274509804, 0.3215686274509804, 0.3215686274509804), 'gray33': (0.32941176470588235, 0.32941176470588235, 0.32941176470588235), 'gray34': (0.3411764705882353, 0.3411764705882353, 0.3411764705882353), 'gray35': (0.34901960784313724, 0.34901960784313724, 0.34901960784313724), 'gray36': (0.3607843137254902, 0.3607843137254902, 0.3607843137254902), 'gray37': (0.3686274509803922, 0.3686274509803922, 0.3686274509803922), 'gray38': (0.3803921568627451, 0.3803921568627451, 0.3803921568627451), 'gray39': (0.38823529411764707, 0.38823529411764707, 0.38823529411764707), 'gray4': (0.0392156862745098, 0.0392156862745098, 0.0392156862745098), 'gray40': (0.4, 0.4, 0.4), 'gray41': (0.4117647058823529, 0.4117647058823529, 0.4117647058823529), 'gray42': (0.4196078431372549, 0.4196078431372549, 0.4196078431372549), 'gray43': (0.43137254901960786, 0.43137254901960786, 0.43137254901960786), 'gray44': (0.4392156862745098, 0.4392156862745098, 0.4392156862745098), 'gray45': (0.45098039215686275, 0.45098039215686275, 0.45098039215686275), 'gray46': (0.4588235294117647, 0.4588235294117647, 0.4588235294117647), 'gray47': (0.47058823529411764, 0.47058823529411764, 0.47058823529411764), 'gray48': (0.47843137254901963, 0.47843137254901963, 0.47843137254901963), 'gray49': (0.49019607843137253, 0.49019607843137253, 0.49019607843137253), 'gray5': (0.050980392156862744, 0.050980392156862744, 0.050980392156862744), 'gray50': (0.4980392156862745, 0.4980392156862745, 0.4980392156862745), 'gray51': (0.5098039215686274, 0.5098039215686274, 0.5098039215686274), 'gray52': (0.5215686274509804, 0.5215686274509804, 0.5215686274509804), 'gray53': (0.5294117647058824, 0.5294117647058824, 0.5294117647058824), 'gray54': (0.5411764705882353, 0.5411764705882353, 0.5411764705882353), 'gray55': (0.5490196078431373, 0.5490196078431373, 0.5490196078431373), 'gray56': (0.5607843137254902, 0.5607843137254902, 0.5607843137254902), 'gray57': (0.5686274509803921, 0.5686274509803921, 0.5686274509803921), 'gray58': (0.5803921568627451, 0.5803921568627451, 0.5803921568627451), 'gray59': (0.5882352941176471, 0.5882352941176471, 0.5882352941176471), 'gray6': (0.058823529411764705, 0.058823529411764705, 0.058823529411764705), 'gray60': (0.6, 0.6, 0.6), 'gray61': (0.611764705882353, 0.611764705882353, 0.611764705882353), 'gray62': (0.6196078431372549, 0.6196078431372549, 0.6196078431372549), 'gray63': (0.6313725490196078, 0.6313725490196078, 0.6313725490196078), 'gray64': (0.6392156862745098, 0.6392156862745098, 0.6392156862745098), 'gray65': (0.6509803921568628, 0.6509803921568628, 0.6509803921568628), 'gray66': (0.6588235294117647, 0.6588235294117647, 0.6588235294117647), 'gray67': (0.6705882352941176, 0.6705882352941176, 0.6705882352941176), 'gray68': (0.6784313725490196, 0.6784313725490196, 0.6784313725490196), 'gray69': (0.6901960784313725, 0.6901960784313725, 0.6901960784313725), 'gray7': (0.07058823529411765, 0.07058823529411765, 0.07058823529411765), 'gray70': (0.7019607843137254, 0.7019607843137254, 0.7019607843137254), 'gray71': (0.7098039215686275, 0.7098039215686275, 0.7098039215686275), 'gray72': (0.7215686274509804, 0.7215686274509804, 0.7215686274509804), 'gray73': (0.7294117647058823, 0.7294117647058823, 0.7294117647058823), 'gray74': (0.7411764705882353, 0.7411764705882353, 0.7411764705882353), 'gray75': (0.7490196078431373, 0.7490196078431373, 0.7490196078431373), 'gray76': (0.7607843137254902, 0.7607843137254902, 0.7607843137254902), 'gray77': (0.7686274509803922, 0.7686274509803922, 0.7686274509803922), 'gray78': (0.7803921568627451, 0.7803921568627451, 0.7803921568627451), 'gray79': (0.788235294117647, 0.788235294117647, 0.788235294117647), 'gray8': (0.0784313725490196, 0.0784313725490196, 0.0784313725490196), 'gray80': (0.8, 0.8, 0.8), 'gray81': (0.8117647058823529, 0.8117647058823529, 0.8117647058823529), 'gray82': (0.8196078431372549, 0.8196078431372549, 0.8196078431372549), 'gray83': (0.8313725490196079, 0.8313725490196079, 0.8313725490196079), 'gray84': (0.8392156862745098, 0.8392156862745098, 0.8392156862745098), 'gray85': (0.8509803921568627, 0.8509803921568627, 0.8509803921568627), 'gray86': (0.8588235294117647, 0.8588235294117647, 0.8588235294117647), 'gray87': (0.8705882352941177, 0.8705882352941177, 0.8705882352941177), 'gray88': (0.8784313725490196, 0.8784313725490196, 0.8784313725490196), 'gray89': (0.8901960784313725, 0.8901960784313725, 0.8901960784313725), 'gray9': (0.09019607843137255, 0.09019607843137255, 0.09019607843137255), 'gray90': (0.8980392156862745, 0.8980392156862745, 0.8980392156862745), 'gray91': (0.9098039215686274, 0.9098039215686274, 0.9098039215686274), 'gray92': (0.9215686274509803, 0.9215686274509803, 0.9215686274509803), 'gray93': (0.9294117647058824, 0.9294117647058824, 0.9294117647058824), 'gray94': (0.9411764705882353, 0.9411764705882353, 0.9411764705882353), 'gray95': (0.9490196078431372, 0.9490196078431372, 0.9490196078431372), 'gray96': (0.9607843137254902, 0.9607843137254902, 0.9607843137254902), 'gray97': (0.9686274509803922, 0.9686274509803922, 0.9686274509803922), 'gray98': (0.9803921568627451, 0.9803921568627451, 0.9803921568627451), 'gray99': (0.9882352941176471, 0.9882352941176471, 0.9882352941176471), 'green': (0.0, 1.0, 0.0), 'green1': (0.0, 1.0, 0.0), 'green2': (0.0, 0.9333333333333333, 0.0), 'green3': (0.0, 0.803921568627451, 0.0), 'green4': (0.0, 0.5450980392156862, 0.0), 'greenyellow': (0.6784313725490196, 1.0, 0.1843137254901961), 'grey': (0.7450980392156863, 0.7450980392156863, 0.7450980392156863), 'grey0': (0.0, 0.0, 0.0), 'grey1': (0.011764705882352941, 0.011764705882352941, 0.011764705882352941), 'grey10': (0.10196078431372549, 0.10196078431372549, 0.10196078431372549), 'grey100': (1.0, 1.0, 1.0), 'grey11': (0.10980392156862745, 0.10980392156862745, 0.10980392156862745), 'grey12': (0.12156862745098039, 0.12156862745098039, 0.12156862745098039), 'grey13': (0.12941176470588237, 0.12941176470588237, 0.12941176470588237), 'grey14': (0.1411764705882353, 0.1411764705882353, 0.1411764705882353), 'grey15': (0.14901960784313725, 0.14901960784313725, 0.14901960784313725), 'grey16': (0.1607843137254902, 0.1607843137254902, 0.1607843137254902), 'grey17': (0.16862745098039217, 0.16862745098039217, 0.16862745098039217), 'grey18': (0.1803921568627451, 0.1803921568627451, 0.1803921568627451), 'grey19': (0.18823529411764706, 0.18823529411764706, 0.18823529411764706), 'grey2': (0.0196078431372549, 0.0196078431372549, 0.0196078431372549), 'grey20': (0.2, 0.2, 0.2), 'grey21': (0.21176470588235294, 0.21176470588235294, 0.21176470588235294), 'grey22': (0.2196078431372549, 0.2196078431372549, 0.2196078431372549), 'grey23': (0.23137254901960785, 0.23137254901960785, 0.23137254901960785), 'grey24': (0.23921568627450981, 0.23921568627450981, 0.23921568627450981), 'grey25': (0.25098039215686274, 0.25098039215686274, 0.25098039215686274), 'grey26': (0.25882352941176473, 0.25882352941176473, 0.25882352941176473), 'grey27': (0.27058823529411763, 0.27058823529411763, 0.27058823529411763), 'grey28': (0.2784313725490196, 0.2784313725490196, 0.2784313725490196), 'grey29': (0.2901960784313726, 0.2901960784313726, 0.2901960784313726), 'grey3': (0.03137254901960784, 0.03137254901960784, 0.03137254901960784), 'grey30': (0.30196078431372547, 0.30196078431372547, 0.30196078431372547), 'grey31': (0.30980392156862746, 0.30980392156862746, 0.30980392156862746), 'grey32': (0.3215686274509804, 0.3215686274509804, 0.3215686274509804), 'grey33': (0.32941176470588235, 0.32941176470588235, 0.32941176470588235), 'grey34': (0.3411764705882353, 0.3411764705882353, 0.3411764705882353), 'grey35': (0.34901960784313724, 0.34901960784313724, 0.34901960784313724), 'grey36': (0.3607843137254902, 0.3607843137254902, 0.3607843137254902), 'grey37': (0.3686274509803922, 0.3686274509803922, 0.3686274509803922), 'grey38': (0.3803921568627451, 0.3803921568627451, 0.3803921568627451), 'grey39': (0.38823529411764707, 0.38823529411764707, 0.38823529411764707), 'grey4': (0.0392156862745098, 0.0392156862745098, 0.0392156862745098), 'grey40': (0.4, 0.4, 0.4), 'grey41': (0.4117647058823529, 0.4117647058823529, 0.4117647058823529), 'grey42': (0.4196078431372549, 0.4196078431372549, 0.4196078431372549), 'grey43': (0.43137254901960786, 0.43137254901960786, 0.43137254901960786), 'grey44': (0.4392156862745098, 0.4392156862745098, 0.4392156862745098), 'grey45': (0.45098039215686275, 0.45098039215686275, 0.45098039215686275), 'grey46': (0.4588235294117647, 0.4588235294117647, 0.4588235294117647), 'grey47': (0.47058823529411764, 0.47058823529411764, 0.47058823529411764), 'grey48': (0.47843137254901963, 0.47843137254901963, 0.47843137254901963), 'grey49': (0.49019607843137253, 0.49019607843137253, 0.49019607843137253), 'grey5': (0.050980392156862744, 0.050980392156862744, 0.050980392156862744), 'grey50': (0.4980392156862745, 0.4980392156862745, 0.4980392156862745), 'grey51': (0.5098039215686274, 0.5098039215686274, 0.5098039215686274), 'grey52': (0.5215686274509804, 0.5215686274509804, 0.5215686274509804), 'grey53': (0.5294117647058824, 0.5294117647058824, 0.5294117647058824), 'grey54': (0.5411764705882353, 0.5411764705882353, 0.5411764705882353), 'grey55': (0.5490196078431373, 0.5490196078431373, 0.5490196078431373), 'grey56': (0.5607843137254902, 0.5607843137254902, 0.5607843137254902), 'grey57': (0.5686274509803921, 0.5686274509803921, 0.5686274509803921), 'grey58': (0.5803921568627451, 0.5803921568627451, 0.5803921568627451), 'grey59': (0.5882352941176471, 0.5882352941176471, 0.5882352941176471), 'grey6': (0.058823529411764705, 0.058823529411764705, 0.058823529411764705), 'grey60': (0.6, 0.6, 0.6), 'grey61': (0.611764705882353, 0.611764705882353, 0.611764705882353), 'grey62': (0.6196078431372549, 0.6196078431372549, 0.6196078431372549), 'grey63': (0.6313725490196078, 0.6313725490196078, 0.6313725490196078), 'grey64': (0.6392156862745098, 0.6392156862745098, 0.6392156862745098), 'grey65': (0.6509803921568628, 0.6509803921568628, 0.6509803921568628), 'grey66': (0.6588235294117647, 0.6588235294117647, 0.6588235294117647), 'grey67': (0.6705882352941176, 0.6705882352941176, 0.6705882352941176), 'grey68': (0.6784313725490196, 0.6784313725490196, 0.6784313725490196), 'grey69': (0.6901960784313725, 0.6901960784313725, 0.6901960784313725), 'grey7': (0.07058823529411765, 0.07058823529411765, 0.07058823529411765), 'grey70': (0.7019607843137254, 0.7019607843137254, 0.7019607843137254), 'grey71': (0.7098039215686275, 0.7098039215686275, 0.7098039215686275), 'grey72': (0.7215686274509804, 0.7215686274509804, 0.7215686274509804), 'grey73': (0.7294117647058823, 0.7294117647058823, 0.7294117647058823), 'grey74': (0.7411764705882353, 0.7411764705882353, 0.7411764705882353), 'grey75': (0.7490196078431373, 0.7490196078431373, 0.7490196078431373), 'grey76': (0.7607843137254902, 0.7607843137254902, 0.7607843137254902), 'grey77': (0.7686274509803922, 0.7686274509803922, 0.7686274509803922), 'grey78': (0.7803921568627451, 0.7803921568627451, 0.7803921568627451), 'grey79': (0.788235294117647, 0.788235294117647, 0.788235294117647), 'grey8': (0.0784313725490196, 0.0784313725490196, 0.0784313725490196), 'grey80': (0.8, 0.8, 0.8), 'grey81': (0.8117647058823529, 0.8117647058823529, 0.8117647058823529), 'grey82': (0.8196078431372549, 0.8196078431372549, 0.8196078431372549), 'grey83': (0.8313725490196079, 0.8313725490196079, 0.8313725490196079), 'grey84': (0.8392156862745098, 0.8392156862745098, 0.8392156862745098), 'grey85': (0.8509803921568627, 0.8509803921568627, 0.8509803921568627), 'grey86': (0.8588235294117647, 0.8588235294117647, 0.8588235294117647), 'grey87': (0.8705882352941177, 0.8705882352941177, 0.8705882352941177), 'grey88': (0.8784313725490196, 0.8784313725490196, 0.8784313725490196), 'grey89': (0.8901960784313725, 0.8901960784313725, 0.8901960784313725), 'grey9': (0.09019607843137255, 0.09019607843137255, 0.09019607843137255), 'grey90': (0.8980392156862745, 0.8980392156862745, 0.8980392156862745), 'grey91': (0.9098039215686274, 0.9098039215686274, 0.9098039215686274), 'grey92': (0.9215686274509803, 0.9215686274509803, 0.9215686274509803), 'grey93': (0.9294117647058824, 0.9294117647058824, 0.9294117647058824), 'grey94': (0.9411764705882353, 0.9411764705882353, 0.9411764705882353), 'grey95': (0.9490196078431372, 0.9490196078431372, 0.9490196078431372), 'grey96': (0.9607843137254902, 0.9607843137254902, 0.9607843137254902), 'grey97': (0.9686274509803922, 0.9686274509803922, 0.9686274509803922), 'grey98': (0.9803921568627451, 0.9803921568627451, 0.9803921568627451), 'grey99': (0.9882352941176471, 0.9882352941176471, 0.9882352941176471), 'honeydew': (0.9411764705882353, 1.0, 0.9411764705882353), 'honeydew1': (0.9411764705882353, 1.0, 0.9411764705882353), 'honeydew2': (0.8784313725490196, 0.9333333333333333, 0.8784313725490196), 'honeydew3': (0.7568627450980392, 0.803921568627451, 0.7568627450980392), 'honeydew4': (0.5137254901960784, 0.5450980392156862, 0.5137254901960784), 'hotpink': (1.0, 0.4117647058823529, 0.7058823529411765), 'hotpink1': (1.0, 0.43137254901960786, 0.7058823529411765), 'hotpink2': (0.9333333333333333, 0.41568627450980394, 0.6549019607843137), 'hotpink3': (0.803921568627451, 0.3764705882352941, 0.5647058823529412), 'hotpink4': (0.5450980392156862, 0.22745098039215686, 0.3843137254901961), 'indianred': (0.803921568627451, 0.3607843137254902, 0.3607843137254902), 'indianred1': (1.0, 0.41568627450980394, 0.41568627450980394), 'indianred2': (0.9333333333333333, 0.38823529411764707, 0.38823529411764707), 'indianred3': (0.803921568627451, 0.3333333333333333, 0.3333333333333333), 'indianred4': (0.5450980392156862, 0.22745098039215686, 0.22745098039215686), 'ivory': (1.0, 1.0, 0.9411764705882353), 'ivory1': (1.0, 1.0, 0.9411764705882353), 'ivory2': (0.9333333333333333, 0.9333333333333333, 0.8784313725490196), 'ivory3': (0.803921568627451, 0.803921568627451, 0.7568627450980392), 'ivory4': (0.5450980392156862, 0.5450980392156862, 0.5137254901960784), 'khaki': (0.9411764705882353, 0.9019607843137255, 0.5490196078431373), 'khaki1': (1.0, 0.9647058823529412, 0.5607843137254902), 'khaki2': (0.9333333333333333, 0.9019607843137255, 0.5215686274509804), 'khaki3': (0.803921568627451, 0.7764705882352941, 0.45098039215686275), 'khaki4': (0.5450980392156862, 0.5254901960784314, 0.3058823529411765), 'lavender': (0.9019607843137255, 0.9019607843137255, 0.9803921568627451), 'lavenderblush': (1.0, 0.9411764705882353, 0.9607843137254902), 'lavenderblush1': (1.0, 0.9411764705882353, 0.9607843137254902), 'lavenderblush2': (0.9333333333333333, 0.8784313725490196, 0.8980392156862745), 'lavenderblush3': (0.803921568627451, 0.7568627450980392, 0.7725490196078432), 'lavenderblush4': (0.5450980392156862, 0.5137254901960784, 0.5254901960784314), 'lawngreen': (0.48627450980392156, 0.9882352941176471, 0.0), 'lemonchiffon': (1.0, 0.9803921568627451, 0.803921568627451), 'lemonchiffon1': (1.0, 0.9803921568627451, 0.803921568627451), 'lemonchiffon2': (0.9333333333333333, 0.9137254901960784, 0.7490196078431373), 'lemonchiffon3': (0.803921568627451, 0.788235294117647, 0.6470588235294118), 'lemonchiffon4': (0.5450980392156862, 0.5372549019607843, 0.4392156862745098), 'lightblue': (0.6784313725490196, 0.8470588235294118, 0.9019607843137255), 'lightblue1': (0.7490196078431373, 0.9372549019607843, 1.0), 'lightblue2': (0.6980392156862745, 0.8745098039215686, 0.9333333333333333), 'lightblue3': (0.6039215686274509, 0.7529411764705882, 0.803921568627451), 'lightblue4': (0.40784313725490196, 0.5137254901960784, 0.5450980392156862), 'lightcoral': (0.9411764705882353, 0.5019607843137255, 0.5019607843137255), 'lightcyan': (0.8784313725490196, 1.0, 1.0), 'lightcyan1': (0.8784313725490196, 1.0, 1.0), 'lightcyan2': (0.8196078431372549, 0.9333333333333333, 0.9333333333333333), 'lightcyan3': (0.7058823529411765, 0.803921568627451, 0.803921568627451), 'lightcyan4': (0.47843137254901963, 0.5450980392156862, 0.5450980392156862), 'lightgoldenrod': (0.9333333333333333, 0.8666666666666667, 0.5098039215686274), 'lightgoldenrod1': (1.0, 0.9254901960784314, 0.5450980392156862), 'lightgoldenrod2': (0.9333333333333333, 0.8627450980392157, 0.5098039215686274), 'lightgoldenrod3': (0.803921568627451, 0.7450980392156863, 0.4392156862745098), 'lightgoldenrod4': (0.5450980392156862, 0.5058823529411764, 0.2980392156862745), 'lightgoldenrodyellow': (0.9803921568627451, 0.9803921568627451, 0.8235294117647058), 'lightgray': (0.8274509803921568, 0.8274509803921568, 0.8274509803921568), 'lightgreen': (0.5647058823529412, 0.9333333333333333, 0.5647058823529412), 'lightgrey': (0.8274509803921568, 0.8274509803921568, 0.8274509803921568), 'lightpink': (1.0, 0.7137254901960784, 0.7568627450980392), 'lightpink1': (1.0, 0.6823529411764706, 0.7254901960784313), 'lightpink2': (0.9333333333333333, 0.6352941176470588, 0.6784313725490196), 'lightpink3': (0.803921568627451, 0.5490196078431373, 0.5843137254901961), 'lightpink4': (0.5450980392156862, 0.37254901960784315, 0.396078431372549), 'lightsalmon': (1.0, 0.6274509803921569, 0.47843137254901963), 'lightsalmon1': (1.0, 0.6274509803921569, 0.47843137254901963), 'lightsalmon2': (0.9333333333333333, 0.5843137254901961, 0.4470588235294118), 'lightsalmon3': (0.803921568627451, 0.5058823529411764, 0.3843137254901961), 'lightsalmon4': (0.5450980392156862, 0.3411764705882353, 0.25882352941176473), 'lightseagreen': (0.12549019607843137, 0.6980392156862745, 0.6666666666666666), 'lightskyblue': (0.5294117647058824, 0.807843137254902, 0.9803921568627451), 'lightskyblue1': (0.6901960784313725, 0.8862745098039215, 1.0), 'lightskyblue2': (0.6431372549019608, 0.8274509803921568, 0.9333333333333333), 'lightskyblue3': (0.5529411764705883, 0.7137254901960784, 0.803921568627451), 'lightskyblue4': (0.3764705882352941, 0.4823529411764706, 0.5450980392156862), 'lightslateblue': (0.5176470588235295, 0.4392156862745098, 1.0), 'lightslategray': (0.4666666666666667, 0.5333333333333333, 0.6), 'lightslategrey': (0.4666666666666667, 0.5333333333333333, 0.6), 'lightsteelblue': (0.6901960784313725, 0.7686274509803922, 0.8705882352941177), 'lightsteelblue1': (0.792156862745098, 0.8823529411764706, 1.0), 'lightsteelblue2': (0.7372549019607844, 0.8235294117647058, 0.9333333333333333), 'lightsteelblue3': (0.6352941176470588, 0.7098039215686275, 0.803921568627451), 'lightsteelblue4': (0.43137254901960786, 0.4823529411764706, 0.5450980392156862), 'lightyellow': (1.0, 1.0, 0.8784313725490196), 'lightyellow1': (1.0, 1.0, 0.8784313725490196), 'lightyellow2': (0.9333333333333333, 0.9333333333333333, 0.8196078431372549), 'lightyellow3': (0.803921568627451, 0.803921568627451, 0.7058823529411765), 'lightyellow4': (0.5450980392156862, 0.5450980392156862, 0.47843137254901963), 'limegreen': (0.19607843137254902, 0.803921568627451, 0.19607843137254902), 'linen': (0.9803921568627451, 0.9411764705882353, 0.9019607843137255), 'magenta': (1.0, 0.0, 1.0), 'magenta1': (1.0, 0.0, 1.0), 'magenta2': (0.9333333333333333, 0.0, 0.9333333333333333), 'magenta3': (0.803921568627451, 0.0, 0.803921568627451), 'magenta4': (0.5450980392156862, 0.0, 0.5450980392156862), 'maroon': (0.6901960784313725, 0.18823529411764706, 0.3764705882352941), 'maroon1': (1.0, 0.20392156862745098, 0.7019607843137254), 'maroon2': (0.9333333333333333, 0.18823529411764706, 0.6549019607843137), 'maroon3': (0.803921568627451, 0.1607843137254902, 0.5647058823529412), 'maroon4': (0.5450980392156862, 0.10980392156862745, 0.3843137254901961), 'mediumaquamarine': (0.4, 0.803921568627451, 0.6666666666666666), 'mediumblue': (0.0, 0.0, 0.803921568627451), 'mediumorchid': (0.7294117647058823, 0.3333333333333333, 0.8274509803921568), 'mediumorchid1': (0.8784313725490196, 0.4, 1.0), 'mediumorchid2': (0.8196078431372549, 0.37254901960784315, 0.9333333333333333), 'mediumorchid3': (0.7058823529411765, 0.3215686274509804, 0.803921568627451), 'mediumorchid4': (0.47843137254901963, 0.21568627450980393, 0.5450980392156862), 'mediumpurple': (0.5764705882352941, 0.4392156862745098, 0.8588235294117647), 'mediumpurple1': (0.6705882352941176, 0.5098039215686274, 1.0), 'mediumpurple2': (0.6235294117647059, 0.4745098039215686, 0.9333333333333333), 'mediumpurple3': (0.5372549019607843, 0.40784313725490196, 0.803921568627451), 'mediumpurple4': (0.36470588235294116, 0.2784313725490196, 0.5450980392156862), 'mediumseagreen': (0.23529411764705882, 0.7019607843137254, 0.44313725490196076), 'mediumslateblue': (0.4823529411764706, 0.40784313725490196, 0.9333333333333333), 'mediumspringgreen': (0.0, 0.9803921568627451, 0.6039215686274509), 'mediumturquoise': (0.2823529411764706, 0.8196078431372549, 0.8), 'mediumvioletred': (0.7803921568627451, 0.08235294117647059, 0.5215686274509804), 'midnightblue': (0.09803921568627451, 0.09803921568627451, 0.4392156862745098), 'mintcream': (0.9607843137254902, 1.0, 0.9803921568627451), 'mistyrose': (1.0, 0.8941176470588236, 0.8823529411764706), 'mistyrose1': (1.0, 0.8941176470588236, 0.8823529411764706), 'mistyrose2': (0.9333333333333333, 0.8352941176470589, 0.8235294117647058), 'mistyrose3': (0.803921568627451, 0.7176470588235294, 0.7098039215686275), 'mistyrose4': (0.5450980392156862, 0.49019607843137253, 0.4823529411764706), 'moccasin': (1.0, 0.8941176470588236, 0.7098039215686275), 'navajowhite': (1.0, 0.8705882352941177, 0.6784313725490196), 'navajowhite1': (1.0, 0.8705882352941177, 0.6784313725490196), 'navajowhite2': (0.9333333333333333, 0.8117647058823529, 0.6313725490196078), 'navajowhite3': (0.803921568627451, 0.7019607843137254, 0.5450980392156862), 'navajowhite4': (0.5450980392156862, 0.4745098039215686, 0.3686274509803922), 'navy': (0.0, 0.0, 0.5019607843137255), 'navyblue': (0.0, 0.0, 0.5019607843137255), 'oldlace': (0.9921568627450981, 0.9607843137254902, 0.9019607843137255), 'olivedrab': (0.4196078431372549, 0.5568627450980392, 0.13725490196078433), 'olivedrab1': (0.7529411764705882, 1.0, 0.24313725490196078), 'olivedrab2': (0.7019607843137254, 0.9333333333333333, 0.22745098039215686), 'olivedrab3': (0.6039215686274509, 0.803921568627451, 0.19607843137254902), 'olivedrab4': (0.4117647058823529, 0.5450980392156862, 0.13333333333333333), 'orange': (1.0, 0.6470588235294118, 0.0), 'orange1': (1.0, 0.6470588235294118, 0.0), 'orange2': (0.9333333333333333, 0.6039215686274509, 0.0), 'orange3': (0.803921568627451, 0.5215686274509804, 0.0), 'orange4': (0.5450980392156862, 0.35294117647058826, 0.0), 'orangered': (1.0, 0.27058823529411763, 0.0), 'orangered1': (1.0, 0.27058823529411763, 0.0), 'orangered2': (0.9333333333333333, 0.25098039215686274, 0.0), 'orangered3': (0.803921568627451, 0.21568627450980393, 0.0), 'orangered4': (0.5450980392156862, 0.1450980392156863, 0.0), 'orchid': (0.8549019607843137, 0.4392156862745098, 0.8392156862745098), 'orchid1': (1.0, 0.5137254901960784, 0.9803921568627451), 'orchid2': (0.9333333333333333, 0.47843137254901963, 0.9137254901960784), 'orchid3': (0.803921568627451, 0.4117647058823529, 0.788235294117647), 'orchid4': (0.5450980392156862, 0.2784313725490196, 0.5372549019607843), 'palegoldenrod': (0.9333333333333333, 0.9098039215686274, 0.6666666666666666), 'palegreen': (0.596078431372549, 0.984313725490196, 0.596078431372549), 'palegreen1': (0.6039215686274509, 1.0, 0.6039215686274509), 'palegreen2': (0.5647058823529412, 0.9333333333333333, 0.5647058823529412), 'palegreen3': (0.48627450980392156, 0.803921568627451, 0.48627450980392156), 'palegreen4': (0.32941176470588235, 0.5450980392156862, 0.32941176470588235), 'paleturquoise': (0.6862745098039216, 0.9333333333333333, 0.9333333333333333), 'paleturquoise1': (0.7333333333333333, 1.0, 1.0), 'paleturquoise2': (0.6823529411764706, 0.9333333333333333, 0.9333333333333333), 'paleturquoise3': (0.5882352941176471, 0.803921568627451, 0.803921568627451), 'paleturquoise4': (0.4, 0.5450980392156862, 0.5450980392156862), 'palevioletred': (0.8588235294117647, 0.4392156862745098, 0.5764705882352941), 'palevioletred1': (1.0, 0.5098039215686274, 0.6705882352941176), 'palevioletred2': (0.9333333333333333, 0.4745098039215686, 0.6235294117647059), 'palevioletred3': (0.803921568627451, 0.40784313725490196, 0.5372549019607843), 'palevioletred4': (0.5450980392156862, 0.2784313725490196, 0.36470588235294116), 'papayawhip': (1.0, 0.9372549019607843, 0.8352941176470589), 'peachpuff': (1.0, 0.8549019607843137, 0.7254901960784313), 'peachpuff1': (1.0, 0.8549019607843137, 0.7254901960784313), 'peachpuff2': (0.9333333333333333, 0.796078431372549, 0.6784313725490196), 'peachpuff3': (0.803921568627451, 0.6862745098039216, 0.5843137254901961), 'peachpuff4': (0.5450980392156862, 0.4666666666666667, 0.396078431372549), 'peru': (0.803921568627451, 0.5215686274509804, 0.24705882352941178), 'pink': (1.0, 0.7529411764705882, 0.796078431372549), 'pink1': (1.0, 0.7098039215686275, 0.7725490196078432), 'pink2': (0.9333333333333333, 0.6627450980392157, 0.7215686274509804), 'pink3': (0.803921568627451, 0.5686274509803921, 0.6196078431372549), 'pink4': (0.5450980392156862, 0.38823529411764707, 0.4235294117647059), 'plum': (0.8666666666666667, 0.6274509803921569, 0.8666666666666667), 'plum1': (1.0, 0.7333333333333333, 1.0), 'plum2': (0.9333333333333333, 0.6823529411764706, 0.9333333333333333), 'plum3': (0.803921568627451, 0.5882352941176471, 0.803921568627451), 'plum4': (0.5450980392156862, 0.4, 0.5450980392156862), 'powderblue': (0.6901960784313725, 0.8784313725490196, 0.9019607843137255), 'purple': (0.6274509803921569, 0.12549019607843137, 0.9411764705882353), 'purple1': (0.6078431372549019, 0.18823529411764706, 1.0), 'purple2': (0.5686274509803921, 0.17254901960784313, 0.9333333333333333), 'purple3': (0.49019607843137253, 0.14901960784313725, 0.803921568627451), 'purple4': (0.3333333333333333, 0.10196078431372549, 0.5450980392156862), 'red': (1.0, 0.0, 0.0), 'red1': (1.0, 0.0, 0.0), 'red2': (0.9333333333333333, 0.0, 0.0), 'red3': (0.803921568627451, 0.0, 0.0), 'red4': (0.5450980392156862, 0.0, 0.0), 'rosybrown': (0.7372549019607844, 0.5607843137254902, 0.5607843137254902), 'rosybrown1': (1.0, 0.7568627450980392, 0.7568627450980392), 'rosybrown2': (0.9333333333333333, 0.7058823529411765, 0.7058823529411765), 'rosybrown3': (0.803921568627451, 0.6078431372549019, 0.6078431372549019), 'rosybrown4': (0.5450980392156862, 0.4117647058823529, 0.4117647058823529), 'royalblue': (0.2549019607843137, 0.4117647058823529, 0.8823529411764706), 'royalblue1': (0.2823529411764706, 0.4627450980392157, 1.0), 'royalblue2': (0.2627450980392157, 0.43137254901960786, 0.9333333333333333), 'royalblue3': (0.22745098039215686, 0.37254901960784315, 0.803921568627451), 'royalblue4': (0.15294117647058825, 0.25098039215686274, 0.5450980392156862), 'saddlebrown': (0.5450980392156862, 0.27058823529411763, 0.07450980392156863), 'salmon': (0.9803921568627451, 0.5019607843137255, 0.4470588235294118), 'salmon1': (1.0, 0.5490196078431373, 0.4117647058823529), 'salmon2': (0.9333333333333333, 0.5098039215686274, 0.3843137254901961), 'salmon3': (0.803921568627451, 0.4392156862745098, 0.32941176470588235), 'salmon4': (0.5450980392156862, 0.2980392156862745, 0.2235294117647059), 'sandybrown': (0.9568627450980393, 0.6431372549019608, 0.3764705882352941), 'seagreen': (0.1803921568627451, 0.5450980392156862, 0.3411764705882353), 'seagreen1': (0.32941176470588235, 1.0, 0.6235294117647059), 'seagreen2': (0.3058823529411765, 0.9333333333333333, 0.5803921568627451), 'seagreen3': (0.2627450980392157, 0.803921568627451, 0.5019607843137255), 'seagreen4': (0.1803921568627451, 0.5450980392156862, 0.3411764705882353), 'seashell': (1.0, 0.9607843137254902, 0.9333333333333333), 'seashell1': (1.0, 0.9607843137254902, 0.9333333333333333), 'seashell2': (0.9333333333333333, 0.8980392156862745, 0.8705882352941177), 'seashell3': (0.803921568627451, 0.7725490196078432, 0.7490196078431373), 'seashell4': (0.5450980392156862, 0.5254901960784314, 0.5098039215686274), 'sienna': (0.6274509803921569, 0.3215686274509804, 0.17647058823529413), 'sienna1': (1.0, 0.5098039215686274, 0.2784313725490196), 'sienna2': (0.9333333333333333, 0.4745098039215686, 0.25882352941176473), 'sienna3': (0.803921568627451, 0.40784313725490196, 0.2235294117647059), 'sienna4': (0.5450980392156862, 0.2784313725490196, 0.14901960784313725), 'skyblue': (0.5294117647058824, 0.807843137254902, 0.9215686274509803), 'skyblue1': (0.5294117647058824, 0.807843137254902, 1.0), 'skyblue2': (0.49411764705882355, 0.7529411764705882, 0.9333333333333333), 'skyblue3': (0.4235294117647059, 0.6509803921568628, 0.803921568627451), 'skyblue4': (0.2901960784313726, 0.4392156862745098, 0.5450980392156862), 'slateblue': (0.41568627450980394, 0.35294117647058826, 0.803921568627451), 'slateblue1': (0.5137254901960784, 0.43529411764705883, 1.0), 'slateblue2': (0.47843137254901963, 0.403921568627451, 0.9333333333333333), 'slateblue3': (0.4117647058823529, 0.34901960784313724, 0.803921568627451), 'slateblue4': (0.2784313725490196, 0.23529411764705882, 0.5450980392156862), 'slategray': (0.4392156862745098, 0.5019607843137255, 0.5647058823529412), 'slategray1': (0.7764705882352941, 0.8862745098039215, 1.0), 'slategray2': (0.7254901960784313, 0.8274509803921568, 0.9333333333333333), 'slategray3': (0.6235294117647059, 0.7137254901960784, 0.803921568627451), 'slategray4': (0.4235294117647059, 0.4823529411764706, 0.5450980392156862), 'slategrey': (0.4392156862745098, 0.5019607843137255, 0.5647058823529412), 'snow': (1.0, 0.9803921568627451, 0.9803921568627451), 'snow1': (1.0, 0.9803921568627451, 0.9803921568627451), 'snow2': (0.9333333333333333, 0.9137254901960784, 0.9137254901960784), 'snow3': (0.803921568627451, 0.788235294117647, 0.788235294117647), 'snow4': (0.5450980392156862, 0.5372549019607843, 0.5372549019607843), 'springgreen': (0.0, 1.0, 0.4980392156862745), 'springgreen1': (0.0, 1.0, 0.4980392156862745), 'springgreen2': (0.0, 0.9333333333333333, 0.4627450980392157), 'springgreen3': (0.0, 0.803921568627451, 0.4), 'springgreen4': (0.0, 0.5450980392156862, 0.27058823529411763), 'steelblue': (0.27450980392156865, 0.5098039215686274, 0.7058823529411765), 'steelblue1': (0.38823529411764707, 0.7215686274509804, 1.0), 'steelblue2': (0.3607843137254902, 0.6745098039215687, 0.9333333333333333), 'steelblue3': (0.30980392156862746, 0.5803921568627451, 0.803921568627451), 'steelblue4': (0.21176470588235294, 0.39215686274509803, 0.5450980392156862), 'tan': (0.8235294117647058, 0.7058823529411765, 0.5490196078431373), 'tan1': (1.0, 0.6470588235294118, 0.30980392156862746), 'tan2': (0.9333333333333333, 0.6039215686274509, 0.28627450980392155), 'tan3': (0.803921568627451, 0.5215686274509804, 0.24705882352941178), 'tan4': (0.5450980392156862, 0.35294117647058826, 0.16862745098039217), 'thistle': (0.8470588235294118, 0.7490196078431373, 0.8470588235294118), 'thistle1': (1.0, 0.8823529411764706, 1.0), 'thistle2': (0.9333333333333333, 0.8235294117647058, 0.9333333333333333), 'thistle3': (0.803921568627451, 0.7098039215686275, 0.803921568627451), 'thistle4': (0.5450980392156862, 0.4823529411764706, 0.5450980392156862), 'tomato': (1.0, 0.38823529411764707, 0.2784313725490196), 'tomato1': (1.0, 0.38823529411764707, 0.2784313725490196), 'tomato2': (0.9333333333333333, 0.3607843137254902, 0.25882352941176473), 'tomato3': (0.803921568627451, 0.30980392156862746, 0.2235294117647059), 'tomato4': (0.5450980392156862, 0.21176470588235294, 0.14901960784313725), 'turquoise': (0.25098039215686274, 0.8784313725490196, 0.8156862745098039), 'turquoise1': (0.0, 0.9607843137254902, 1.0), 'turquoise2': (0.0, 0.8980392156862745, 0.9333333333333333), 'turquoise3': (0.0, 0.7725490196078432, 0.803921568627451), 'turquoise4': (0.0, 0.5254901960784314, 0.5450980392156862), 'violet': (0.9333333333333333, 0.5098039215686274, 0.9333333333333333), 'violetred': (0.8156862745098039, 0.12549019607843137, 0.5647058823529412), 'violetred1': (1.0, 0.24313725490196078, 0.5882352941176471), 'violetred2': (0.9333333333333333, 0.22745098039215686, 0.5490196078431373), 'violetred3': (0.803921568627451, 0.19607843137254902, 0.47058823529411764), 'violetred4': (0.5450980392156862, 0.13333333333333333, 0.3215686274509804), 'wheat': (0.9607843137254902, 0.8705882352941177, 0.7019607843137254), 'wheat1': (1.0, 0.9058823529411765, 0.7294117647058823), 'wheat2': (0.9333333333333333, 0.8470588235294118, 0.6823529411764706), 'wheat3': (0.803921568627451, 0.7294117647058823, 0.5882352941176471), 'wheat4': (0.5450980392156862, 0.49411764705882355, 0.4), 'white': (1.0, 1.0, 1.0), 'whitesmoke': (0.9607843137254902, 0.9607843137254902, 0.9607843137254902), 'yellow': (1.0, 1.0, 0.0), 'yellow1': (1.0, 1.0, 0.0), 'yellow2': (0.9333333333333333, 0.9333333333333333, 0.0), 'yellow3': (0.803921568627451, 0.803921568627451, 0.0), 'yellow4': (0.5450980392156862, 0.5450980392156862, 0.0), 'yellowgreen': (0.6039215686274509, 0.803921568627451, 0.19607843137254902), } color_list = [] def recalc_color_list(): global color_list color_list = list(color_dict.keys()) color_list.sort() def lookup_color(name, format='tuple'): color = color_dict[name] if format == 'tuple': return color elif format == 'hash': return "#%02x%02x%02x" % ( int(color[0]*255), int(color[1]*255), int(color[2]*255)) else: raise ValueError("format needs to be 'tuple' or 'hash'") def add_color(name, tup): global color_dict color_dict[name] = tup recalc_color_list() def remove_color(name): global color_dict del color_dict[name] recalc_color_list() def get_colors(): return color_list def scan_rgbtxt(filepath): with open(filepath, 'r') as in_f: buf = in_f.read() res = {} for line in buf.split('\n'): match = re.match(r"^\s*(\d+)\s+(\d+)\s+(\d+)\s+([\w_]+)\s*$", line) if match: r, g, b, name = match.groups() r = float(r) / 255.0 g = float(g) / 255.0 b = float(b) / 255.0 name = name.lower() res[name] = (r, g, b) return res # create initial color list recalc_color_list() if __name__ == "__main__": import sys, pprint res = scan_rgbtxt(sys.argv[1]) pprint.pprint(res) #END
eteq/ginga
ginga/colors.py
Python
bsd-3-clause
48,361
############################################################################## # # Copyright (c) 2001, 2002 Zope Corporation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################## """IPublicationRequest base test $Id: basetestipublicationrequest.py 65511 2006-02-27 05:24:24Z philikon $ """ import sys from zope.interface import Interface, directlyProvides, implements from zope.interface.verify import verifyObject from zope.publisher.interfaces import IPublicationRequest, IHeld from zope.publisher.interfaces.browser import IBrowserSkinType class Held: implements(IHeld) released = False def release(self): self.released = True class BaseTestIPublicationRequest(object): def testVerifyIPublicationRequest(self): verifyObject(IPublicationRequest, self._Test__new()) def testHaveCustomTestsForIPublicationRequest(self): # Make sure that tests are defined for things we can't test here self.test_IPublicationRequest_getPositionalArguments def testTraversalStack(self): request = self._Test__new() stack = ['Engineering', 'ZopeCorp'] request.setTraversalStack(stack) self.assertEqual(list(request.getTraversalStack()), stack) def testHoldCloseAndGetResponse(self): request = self._Test__new() response = request.response rcresponse = sys.getrefcount(response) resource = object() rcresource = sys.getrefcount(resource) request.hold(resource) resource2 = Held() rcresource2 = sys.getrefcount(resource2) request.hold(resource2) self.failUnless(sys.getrefcount(resource) > rcresource) self.failUnless(sys.getrefcount(resource2) > rcresource2) self.failIf(resource2.released) request.close() self.failUnless(resource2.released) # Responses are not unreferenced during close() self.failUnless(sys.getrefcount(response) >= rcresponse) self.assertEqual(sys.getrefcount(resource), rcresource) self.assertEqual(sys.getrefcount(resource2), rcresource2) def testSkinManagement(self): request = self._Test__new() class IMoreFoo(Interface): pass directlyProvides(IMoreFoo, IBrowserSkinType) self.assertEqual(IMoreFoo.providedBy(request), False) directlyProvides(request, IMoreFoo) self.assertEqual(IMoreFoo.providedBy(request), True)
Donkyhotay/MoonPy
zope/publisher/tests/basetestipublicationrequest.py
Python
gpl-3.0
2,904
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright (c) 2009-2019 atrain_match developers # # This file is part of atrain_match. # # atrain_match is free software: you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # atrain_match is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with atrain_match. If not, see <http://www.gnu.org/licenses/>. """ This module is the main entry point for matching imager data with Cloudsat and Calipso. """ from atrain_match.utils.runutils import parse_scenesfile_v2014 from atrain_match.utils.runutils import parse_scenesfile_cci from atrain_match.utils.runutils import parse_scene from atrain_match.utils.runutils import parse_scenesfile_maia from atrain_match.utils.runutils import parse_scenesfile_reshaped from atrain_match.utils.common import Cross from atrain_match.libs import truth_imager_match import atrain_match.config as config from atrain_match.utils.common import MatchupError import logging logging.basicConfig( format='%(levelname)s |%(asctime)s|: %(message)s', level=logging.INFO, # datefmt='%Y-%m-%d %H:%M:%S') datefmt='%H:%M:%S') logger = logging.getLogger(__name__) def process_matchups(matchups, reprocess=False, debug=False): """ Run the given *matchups* through the validation system. *matchups* should be a list of :class:`common.Cross` instances. If *reprocess* is True, disregard any previously generated matchup files. """ from atrain_match.utils.runutils import read_config_info AM_PATHS, SETTINGS = read_config_info() problematic = set() no_matchup_files = [] outstatus = 0 for match in matchups: try: truth_imager_match.run(match, AM_PATHS, SETTINGS, reprocess) except MatchupError as err: logger.warning("Matchup problem: %s", str(err)) import traceback traceback.print_exc() no_matchup_files.append(match) outstatus = 5 except: import traceback outstatus = 5 traceback.print_exc() problematic.add(match) logger.warning("Couldn't run truth_imager_match.") if debug is True: raise if len(no_matchup_files) > 0: logger.warning( "%d of %d cases had no matchups in region, within the time window:\n%s", len(no_matchup_files), len(matchups), '\n'.join([str(m) for m in no_matchup_files])) if len(problematic) > 0: logger.warning("%d of %d cases had unknown problems:\n%s", len(problematic), len(matchups), '\n'.join([str(m) for m in problematic])) return outstatus def main(): """ Process command line options and run matchup and validation. If *args* is provided, it should be a list of command line arguments (e.g. sys.argv[1:]). For a complete usage description, run 'python process_master -h'. """ import argparse parser = argparse.ArgumentParser() group = parser.add_mutually_exclusive_group(required=True) parser.add_argument('--reprocess', '-r', const=True, nargs='?', required=False, help="Disregard any previously generated Cloudsat- and " "Calipso-IMAGER matchup files.") parser.add_argument('-d', '--debug', const=True, nargs='?', required=False, help="Get debug logging") group.add_argument('--pps_okay_scene', '-os', help="Interpret arguments noaa19_20101201_1345_27891") group.add_argument('--pps_product_file', '-pf', help="Interpret arguments as inputfile with " "list of pps files") group.add_argument('--cci_product_file', '-cf', help="Interpret arguments as inputfile with " "list of cci files") group.add_argument('--maia_product_file', '-mf', help="Interpret arguments as inputfile with " "list of maia files") group.add_argument('--reshaped_product_file', '-rf', help="Interpret arguments as reshaped_output_file") options = parser.parse_args() reprocess = False if options.reprocess is not None: reprocess = options.reprocess config.DEBUG = options.debug if options.debug: logging.getLogger().setLevel(logging.DEBUG) matchups = [] if options.pps_okay_scene: # Simulate crosses from PPS scenes scene = options.pps_okay_scene satname, time, orbit = parse_scene(scene) matchups.append(Cross(satname, time)) elif options.pps_product_file is not None: pps_output_file = options.pps_product_file read_from_file = open(pps_output_file, 'r') for line in read_from_file: if line.rstrip() in "": pass else: satname, time = parse_scenesfile_v2014(line) matchups.append(Cross(satname, time)) elif options.cci_product_file is not None: cci_output_file = options.cci_product_file read_from_file = open(cci_output_file, 'r') for line in read_from_file: if line.rstrip() in "": pass else: satname, time = parse_scenesfile_cci(line) matchups.append(Cross(satname, time)) elif options.maia_product_file is not None: maia_output_file = options.maia_product_file read_from_file = open(maia_output_file, 'r') for line in read_from_file: if line.rstrip() in "": pass else: satname, time = parse_scenesfile_maia(line) matchups.append(Cross(satname, time)) elif options.reshaped_product_file is not None: reshaped_output_file = options.reshaped_product_file read_from_file = open(reshaped_output_file, 'r') for line in read_from_file: if line.rstrip() in "": pass else: satname, time = parse_scenesfile_reshaped(line) # print time matchups.append(Cross(satname, time)) process_matchups(matchups, reprocess, options.debug) return 0 # ------------------------------------------------------------------------------ if __name__ == '__main__': main()
foua-pps/atrain_match
atrain_match/process_atrain_match.py
Python
gpl-3.0
6,827
"""This module is part of Swampy, a suite of programs available from allendowney.com/swampy. Copyright 2010 Allen B. Downey Distributed under the GNU General Public License at gnu.org/licenses/gpl.html. """ import unittest import TurmiteWorld class Tests(unittest.TestCase): def test_turmite_world(self): tw = TurmiteWorld.TurmiteWorld() tw.setup() turmite = tw.make_turmite() tw.clear() tw.quit() def test_turmite(self): tw = TurmiteWorld.TurmiteWorld() t = TurmiteWorld.Turmite(tw) t.draw() t.fd() t.bk() t.rt() t.lt() cell = t.get_cell() t.step() t.undraw() tw.quit() if __name__ == '__main__': unittest.main()
DoWhatILove/turtle
programming/python/design/swampy/TurmiteWorld_test.py
Python
mit
769
# Example of low-level Python wrapper for rpi_ws281x library. # Author: Tony DiCola (tony@tonydicola.com), Jeremy Garff (jer@jers.net) # # This is an example of how to use the SWIG-generated _rpi_ws281x module. # You probably don't want to use this unless you are building your own library, # because the SWIG generated module is clunky and verbose. Instead look at the # high level Python port of Adafruit's NeoPixel Arduino library in strandtest.py. # # This code will animate a number of WS281x LEDs displaying rainbow colors. import os, sys, time import _rpi_ws281x as ws from threading import Lock # checks assert os.access("/dev/mem", os.W_OK) assert sys.version_info >= (3,0) # LED configuration. LED_CHANNEL = 0 LED_COUNT = 68 # How many LEDs to light. LED_FREQ_HZ = 800000 # Frequency of the LED signal. Should be 800khz or 400khz. LED_DMA_NUM = 10 # DMA channel to use, can be 0-14. LED_GPIO = 18 # GPIO connected to the LED signal line. Must support PWM! LED_BRIGHTNESS = 255 # Set to 0 for darkest and 255 for brightest LED_INVERT = 0 # Set to 1 to invert the LED signal, good if using NPN # transistor as a 3.3V->5V level converter. Keep # at 0 for a normal/non-inverted signal. # Options: ws.WS2811_STRIP_RGB, ws.SK6812_STRIP_RGBW, ws.SK6812W_STRIP LED_STRIP = ws.WS2811_STRIP_BRG # 2811 is the 12v strip class Controller: """ .. note:: animations are placed into 1 second buckets based on start time """ def __init__(self): self.__animations = {} # active is a list of the animations that are currently running self.__active = [] self.__lock = Lock() def add_animation(self, _animation): bucket_index = int(_animation.get_start_time()) if not bucket_index in self.__animations: self.__animations[bucket_index] = [] self.__animations[bucket_index].append(_animation) print("Animations:", self.__animations) def get_animations(self, time): result = [] t = int(time) if t in self.__animations: result = self.__animations.pop(t) print("Animations:", self.__animations) return result def spin_once(self, leds, ws, channel): self.__lock.acquire() now = time.time() # Add animations self.__active.extend(self.get_animations(now)) # Call each active animation i = 0 while i < len(self.__active): a = self.__active[i] if a.get_stop_time() < time.time(): self.__active.pop(i) elif a.get_start_time() > time.time(): # skip if the animation should not have started yet # # this occurs because animations are set as active by second, # not by exact timestamp continue else: a_size = a.get_stop_pos() - a.get_start_pos() for led in range(int(a.get_start_pos()*LED_COUNT), int(a.get_stop_pos()*LED_COUNT)): abs_pos = led * 1. / LED_COUNT rel_pos = (abs_pos - a.get_start_pos()) / a_size col = a.get_color(now, rel_pos) ws.ws2811_led_set(channel, led, col) i += 1 # Send the LED color data to the hardware. resp = ws.ws2811_render(leds) if resp != ws.WS2811_SUCCESS: message = ws.ws2811_get_return_t_str(resp) raise RuntimeError('ws2811_render failed with code {0} ({1})' \ .format(resp, message)) self.__lock.release() def spin(self): """ loop forever """ # Create a ws2811_t structure from the LED configuration. # Note that this structure will be created on the heap so you need to be careful # that you delete its memory by calling delete_ws2811_t when it's not needed. leds = ws.new_ws2811_t() # Initialize all channels to off for channum in range(2): channel = ws.ws2811_channel_get(leds, channum) ws.ws2811_channel_t_count_set(channel, 0) ws.ws2811_channel_t_gpionum_set(channel, 0) ws.ws2811_channel_t_invert_set(channel, 0) ws.ws2811_channel_t_brightness_set(channel, 0) channel = ws.ws2811_channel_get(leds, LED_CHANNEL) ws.ws2811_channel_t_count_set(channel, LED_COUNT) ws.ws2811_channel_t_gpionum_set(channel, LED_GPIO) ws.ws2811_channel_t_invert_set(channel, LED_INVERT) ws.ws2811_channel_t_brightness_set(channel, LED_BRIGHTNESS) ws.ws2811_channel_t_strip_type_set(channel, LED_STRIP) ws.ws2811_t_freq_set(leds, LED_FREQ_HZ) ws.ws2811_t_dmanum_set(leds, LED_DMA_NUM) # Initialize library with LED configuration. resp = ws.ws2811_init(leds) if resp != ws.WS2811_SUCCESS: message = ws.ws2811_get_return_t_str(resp) raise RuntimeError('ws2811_init failed with code {0} ({1})' \ .format(resp, message)) # Wrap following code in a try/finally to ensure cleanup functions are called # after library is initialized. try: while True: self.spin_once(leds, ws, channel) # Delay for a small period of time. time.sleep(0.05) finally: print("We got out") # Ensure ws2811_fini is called before the program quits. ws.ws2811_fini(leds) # clean up structure memory ws.delete_ws2811_t(leds)
bitoffdev/perkins-blues
src/controller_model.py
Python
mit
5,788
# Django settings for Town_Country project. DEBUG = True TEMPLATE_DEBUG = DEBUG ADMINS = ( # ('Your Name', 'your_email@example.com'), ) MANAGERS = ADMINS DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'. 'NAME': '/Users/kelseyhawley/Documents/Mine/Dropbox/Websites/Town_Country/town_country.db', # Or path to database file if using sqlite3. 'USER': '', # Not used with sqlite3. 'PASSWORD': '', # Not used with sqlite3. 'HOST': '', # Set to empty string for localhost. Not used with sqlite3. 'PORT': '', # Set to empty string for default. Not used with sqlite3. } } # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # In a Windows environment this must be set to your system time zone. TIME_ZONE = 'America/Los_Angeles' # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'en-us' SITE_ID = 1 # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # If you set this to False, Django will not format dates, numbers and # calendars according to the current locale. USE_L10N = True # If you set this to False, Django will not use timezone-aware datetimes. USE_TZ = True # Absolute filesystem path to the directory that will hold user-uploaded files. # Example: "/home/media/media.lawrence.com/media/" MEDIA_ROOT = '/Users/kelseyhawley/Documents/Mine/Dropbox/Websites/Town_Country/media/' # URL that handles the media served from MEDIA_ROOT. Make sure to use a # trailing slash. # Examples: "http://media.lawrence.com/media/", "http://example.com/media/" MEDIA_URL = '/media/' # Absolute path to the directory static files should be collected to. # Don't put anything in this directory yourself; store your static files # in apps' "static/" subdirectories and in STATICFILES_DIRS. # Example: "/home/media/media.lawrence.com/static/" STATIC_ROOT = '/Users/kelseyhawley/Documents/Mine/Dropbox/Websites/Town_Country/static/' # URL prefix for static files. # Example: "http://media.lawrence.com/static/" STATIC_URL = '/static/' # Additional locations of static files STATICFILES_DIRS = ( # Put strings here, like "/home/html/static" or "C:/www/django/static". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. ) # List of finder classes that know how to find static files in # various locations. STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', # 'django.contrib.staticfiles.finders.DefaultStorageFinder', ) # Make this unique, and don't share it with anybody. SECRET_KEY = 'zt+w%mvvbrxoo&amp;h2o^8l$49!w2d2sa_eipf%+nbf*+$4goe3uu' # List of callables that know how to import templates from various sources. TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', # 'django.template.loaders.eggs.Loader', ) MIDDLEWARE_CLASSES = ( 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', # Uncomment the next line for simple clickjacking protection: # 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) ROOT_URLCONF = 'Town_Country.urls' # Python dotted path to the WSGI application used by Django's runserver. WSGI_APPLICATION = 'Town_Country.wsgi.application' TEMPLATE_DIRS = ( # Put strings here, like "/home/html/django_templates" or "C:/www/django/templates". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. '/Users/kelseyhawley/Documents/Mine/Dropbox/Websites/Town_Country/templates/' ) INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', # When left uncommented, the admin site DOES NOT function #'django.contrib.sites', 'django.contrib.messages', 'django.contrib.staticfiles', # Uncomment the next line to enable the admin: 'django.contrib.admin', # Uncomment the next line to enable admin documentation: # 'django.contrib.admindocs', #'polls', 'properties', 'snippets', ) # A sample logging configuration. The only tangible logging # performed by this configuration is to send an email to # the site admins on every HTTP 500 error when DEBUG=False. # See http://docs.djangoproject.com/en/dev/topics/logging for # more details on how to customize your logging configuration. LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'filters': { 'require_debug_false': { '()': 'django.utils.log.RequireDebugFalse' } }, 'handlers': { 'mail_admins': { 'level': 'ERROR', 'filters': ['require_debug_false'], 'class': 'django.utils.log.AdminEmailHandler' } }, 'loggers': { 'django.request': { 'handlers': ['mail_admins'], 'level': 'ERROR', 'propagate': True, }, } }
khawley/Town_Country
Town_Country/settings.py
Python
mit
5,665
# -*- coding: utf-8 -*- import sys import pip.download from pip.req import parse_requirements from setuptools import setup, find_packages exec(open('steenzout/barcode/metadata.py').read()) PREFIX = 'py%s%s' % (sys.version_info.major, sys.version_info.minor) def requirements(requirements_file): """Return package mentioned in the given file. Args: requirements_file (str): path to the requirements file to be parsed. Returns: (list): 3rd-party package dependencies contained in the file. """ return [ str(pkg.req) for pkg in parse_requirements( requirements_file, session=pip.download.PipSession())] setup( name=__project__, description=__description__, author=__author__, author_email=__author_email__, version=__version__, maintainer=__maintainer__, maintainer_email=__maintainer_email__, url=__url__, namespace_packages=['steenzout'], packages=find_packages(exclude=('*.tests', '*.tests.*', 'tests.*', 'tests')), package_data={ '': [ 'LICENSE', 'NOTICE.md', 'README.md'], 'steenzout.barcode': [ 'fonts/*'] }, classifiers=__classifiers__, install_requires=requirements('requirements.txt'), tests_require=requirements('requirements-test.txt'), license=__license__, extras_require={ 'cli': requirements('requirements-extra-cli.txt'), 'image': requirements('requirements-extra-image.txt'), }, entry_points={ 'console_scripts': ['%s-barcode = steenzout.barcode.cli:cli' % PREFIX] } )
steenzout/python-barcode
setup.py
Python
mit
1,606
## # Copyright 2013 Ghent University # # This file is part of EasyBuild, # originally created by the HPC team of Ghent University (http://ugent.be/hpc/en), # with support of Ghent University (http://ugent.be/hpc), # the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en), # the Hercules foundation (http://www.herculesstichting.be/in_English) # and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en). # # http://github.com/hpcugent/easybuild # # EasyBuild is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation v2. # # EasyBuild is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with EasyBuild. If not, see <http://www.gnu.org/licenses/>. ## """ EasyBuild support for building and installing ESMF, implemented as an easyblock @author: Kenneth Hoste (Ghent University) """ import os import easybuild.tools.environment as env import easybuild.tools.toolchain as toolchain from easybuild.easyblocks.generic.configuremake import ConfigureMake from easybuild.framework.easyblock import EasyBlock from easybuild.framework.easyconfig import BUILD from easybuild.tools.filetools import run_cmd from easybuild.tools.modules import get_software_root class EB_ESMF(ConfigureMake): """Support for building/installing ESMF.""" def configure_step(self): """Custom configuration procedure for ESMF through environment variables.""" env.setvar('ESMF_DIR', self.cfg['start_dir']) env.setvar('ESMF_INSTALL_PREFIX', self.installdir) env.setvar('ESMF_INSTALL_BINDIR', 'bin') env.setvar('ESMF_INSTALL_LIBDIR', 'lib') env.setvar('ESMF_INSTALL_MODDIR', 'mod') # specify compiler comp_family = self.toolchain.comp_family() if comp_family in [toolchain.GCC]: compiler = 'gfortran' else: compiler = comp_family.lower() env.setvar('ESMF_COMPILER', compiler) # specify MPI communications library comm = None mpi_family = self.toolchain.mpi_family() if mpi_family in [toolchain.QLOGICMPI]: comm = 'mpich2' else: comm = mpi_family.lower() env.setvar('ESMF_COMM', comm) # specify decent LAPACK lib env.setvar('ESMF_LAPACK', 'user') env.setvar('ESMF_LAPACK_LIBS', '%s %s' % (os.getenv('LDFLAGS'), os.getenv('LIBLAPACK_MT'))) # specify netCDF netcdf = get_software_root('netCDF') if netcdf: env.setvar('ESMF_NETCDF', 'user') netcdf_libs = ['-L%s/lib' % netcdf, '-lnetcdf'] # Fortran netcdff = get_software_root('netCDF-Fortran') if netcdff: netcdf_libs = ["-L%s/lib" % netcdff] + netcdf_libs + ["-lnetcdff"] else: netcdf_libs.append('-lnetcdff') # C++ netcdfcxx = get_software_root('netCDF-C++') if netcdfcxx: netcdf_libs = ["-L%s/lib" % netcdfcxx] + netcdf_libs + ["-lnetcdf_c++"] else: netcdf_libs.append('-lnetcdf_c++') env.setvar('ESMF_NETCDF_LIBS', ' '.join(netcdf_libs)) # 'make info' provides useful debug info cmd = "make info" run_cmd(cmd, log_all=True, simple=True, log_ok=True) def sanity_check_step(self): """Custom sanity check for ESMF.""" custom_paths = { 'files': [os.path.join('bin', x) for x in ['ESMF_Info', 'ESMF_InfoC', 'ESMF_RegridWeightGen', 'ESMF_WebServController']] + [os.path.join('lib', x) for x in ['libesmf.a', 'libesmf.so']], 'dirs': ['include', 'mod'], } super(EB_ESMF, self).sanity_check_step(custom_paths=custom_paths)
geimer/easybuild-easyblocks
easybuild/easyblocks/e/esmf.py
Python
gpl-2.0
4,100
from dbdownload import * __version__ = VERSION
rcarmo/DBdownload
dbdownload/__init__.py
Python
mit
48
# Copyright Hybrid Logic Ltd. See LICENSE file for details. """ Tests for ``flocker.node.agents.ebs``. """ from twisted.python.filepath import FilePath from twisted.trial.unittest import SynchronousTestCase from ..ebs import AttachedUnexpectedDevice, _expected_device class AttachedUnexpectedDeviceTests(SynchronousTestCase): """ Tests for ``AttachedUnexpectedDevice``. """ def test_repr(self): """ The string representation of ``AttachedUnexpectedDevice`` includes the requested device name and the discovered device name. """ requested = FilePath(b"/dev/sda") discovered = FilePath(b"/dev/sdb") expected = ( "AttachedUnexpectedDevice(" "requested='/dev/sda', discovered='/dev/sdb'" ")" ) self.assertEqual( expected, repr(AttachedUnexpectedDevice(requested, discovered)) ) class ExpectedDeviceTests(SynchronousTestCase): """ Tests for ``_expected_device``. """ def test_sdX_to_xvdX(self): """ ``sdX``-style devices are rewritten to corresponding ``xvdX`` devices. """ self.assertEqual( (FilePath(b"/dev/xvdj"), FilePath(b"/dev/xvdo")), (_expected_device(b"/dev/sdj"), _expected_device(b"/dev/sdo")), ) def test_non_dev_rejected(self): """ Devices not in ``/dev`` are rejected with ``ValueError``. """ self.assertRaises( ValueError, _expected_device, b"/sys/block/sda", ) def test_non_sdX_rejected(self): """ Devices not in the ``sdX`` category are rejected with ``ValueError``. """ self.assertRaises( ValueError, _expected_device, b"/dev/hda", )
agonzalezro/flocker
flocker/node/agents/test/test_ebs.py
Python
apache-2.0
1,829
from __future__ import absolute_import import logging import os.path import re from pip._vendor.packaging.version import parse as parse_version from pip._vendor.six.moves.urllib import parse as urllib_parse from pip._vendor.six.moves.urllib import request as urllib_request from pip._internal.exceptions import BadCommand from pip._internal.utils.compat import samefile from pip._internal.utils.misc import display_path, redact_password_from_url from pip._internal.utils.temp_dir import TempDirectory from pip._internal.vcs import RemoteNotFoundError, VersionControl, vcs urlsplit = urllib_parse.urlsplit urlunsplit = urllib_parse.urlunsplit logger = logging.getLogger(__name__) HASH_REGEX = re.compile('[a-fA-F0-9]{40}') def looks_like_hash(sha): return bool(HASH_REGEX.match(sha)) class Git(VersionControl): name = 'git' dirname = '.git' repo_name = 'clone' schemes = ( 'git', 'git+http', 'git+https', 'git+ssh', 'git+git', 'git+file', ) # Prevent the user's environment variables from interfering with pip: # https://github.com/pypa/pip/issues/1130 unset_environ = ('GIT_DIR', 'GIT_WORK_TREE') default_arg_rev = 'HEAD' def __init__(self, url=None, *args, **kwargs): # Works around an apparent Git bug # (see https://article.gmane.org/gmane.comp.version-control.git/146500) if url: scheme, netloc, path, query, fragment = urlsplit(url) if scheme.endswith('file'): initial_slashes = path[:-len(path.lstrip('/'))] newpath = ( initial_slashes + urllib_request.url2pathname(path) .replace('\\', '/').lstrip('/') ) url = urlunsplit((scheme, netloc, newpath, query, fragment)) after_plus = scheme.find('+') + 1 url = scheme[:after_plus] + urlunsplit( (scheme[after_plus:], netloc, newpath, query, fragment), ) super(Git, self).__init__(url, *args, **kwargs) @staticmethod def get_base_rev_args(rev): return [rev] def get_git_version(self): VERSION_PFX = 'git version ' version = self.run_command(['version'], show_stdout=False) if version.startswith(VERSION_PFX): version = version[len(VERSION_PFX):].split()[0] else: version = '' # get first 3 positions of the git version because # on windows it is x.y.z.windows.t, and this parses as # LegacyVersion which always smaller than a Version. version = '.'.join(version.split('.')[:3]) return parse_version(version) @classmethod def get_current_branch(cls, location): """ Return the current branch, or None if HEAD isn't at a branch (e.g. detached HEAD). """ # git-symbolic-ref exits with empty stdout if "HEAD" is a detached # HEAD rather than a symbolic ref. In addition, the -q causes the # command to exit with status code 1 instead of 128 in this case # and to suppress the message to stderr. args = ['symbolic-ref', '-q', 'HEAD'] output = cls.run_command( args, extra_ok_returncodes=(1, ), show_stdout=False, cwd=location, ) ref = output.strip() if ref.startswith('refs/heads/'): return ref[len('refs/heads/'):] return None def export(self, location): """Export the Git repository at the url to the destination location""" if not location.endswith('/'): location = location + '/' with TempDirectory(kind="export") as temp_dir: self.unpack(temp_dir.path) self.run_command( ['checkout-index', '-a', '-f', '--prefix', location], show_stdout=False, cwd=temp_dir.path ) @classmethod def get_revision_sha(cls, dest, rev): """ Return (sha_or_none, is_branch), where sha_or_none is a commit hash if the revision names a remote branch or tag, otherwise None. Args: dest: the repository directory. rev: the revision name. """ # Pass rev to pre-filter the list. output = cls.run_command(['show-ref', rev], cwd=dest, show_stdout=False, on_returncode='ignore') refs = {} for line in output.strip().splitlines(): try: sha, ref = line.split() except ValueError: # Include the offending line to simplify troubleshooting if # this error ever occurs. raise ValueError('unexpected show-ref line: {!r}'.format(line)) refs[ref] = sha branch_ref = 'refs/remotes/origin/{}'.format(rev) tag_ref = 'refs/tags/{}'.format(rev) sha = refs.get(branch_ref) if sha is not None: return (sha, True) sha = refs.get(tag_ref) return (sha, False) @classmethod def resolve_revision(cls, dest, url, rev_options): """ Resolve a revision to a new RevOptions object with the SHA1 of the branch, tag, or ref if found. Args: rev_options: a RevOptions object. """ rev = rev_options.arg_rev sha, is_branch = cls.get_revision_sha(dest, rev) if sha is not None: rev_options = rev_options.make_new(sha) rev_options.branch_name = rev if is_branch else None return rev_options # Do not show a warning for the common case of something that has # the form of a Git commit hash. if not looks_like_hash(rev): logger.warning( "Did not find branch or tag '%s', assuming revision or ref.", rev, ) if not rev.startswith('refs/'): return rev_options # If it looks like a ref, we have to fetch it explicitly. cls.run_command( ['fetch', '-q', url] + rev_options.to_args(), cwd=dest, ) # Change the revision to the SHA of the ref we fetched sha = cls.get_revision(dest, rev='FETCH_HEAD') rev_options = rev_options.make_new(sha) return rev_options @classmethod def is_commit_id_equal(cls, dest, name): """ Return whether the current commit hash equals the given name. Args: dest: the repository directory. name: a string name. """ if not name: # Then avoid an unnecessary subprocess call. return False return cls.get_revision(dest) == name @classmethod def fetch_new(cls, dest, url, rev_options): rev_display = rev_options.to_display() logger.info( 'Cloning %s%s to %s', redact_password_from_url(url), rev_display, display_path(dest), ) cls.run_command(['clone', '-q', url, dest]) if rev_options.rev: # Then a specific revision was requested. rev_options = cls.resolve_revision(dest, url, rev_options) branch_name = getattr(rev_options, 'branch_name', None) if branch_name is None: # Only do a checkout if the current commit id doesn't match # the requested revision. if not cls.is_commit_id_equal(dest, rev_options.rev): cmd_args = ['checkout', '-q'] + rev_options.to_args() cls.run_command(cmd_args, cwd=dest) elif cls.get_current_branch(dest) != branch_name: # Then a specific branch was requested, and that branch # is not yet checked out. track_branch = 'origin/{}'.format(branch_name) cmd_args = [ 'checkout', '-b', branch_name, '--track', track_branch, ] cls.run_command(cmd_args, cwd=dest) #: repo may contain submodules cls.update_submodules(dest) def switch(self, dest, url, rev_options): self.run_command(['config', 'remote.origin.url', url], cwd=dest) cmd_args = ['checkout', '-q'] + rev_options.to_args() self.run_command(cmd_args, cwd=dest) self.update_submodules(dest) def update(self, dest, url, rev_options): # First fetch changes from the default remote if self.get_git_version() >= parse_version('1.9.0'): # fetch tags in addition to everything else self.run_command(['fetch', '-q', '--tags'], cwd=dest) else: self.run_command(['fetch', '-q'], cwd=dest) # Then reset to wanted revision (maybe even origin/master) rev_options = self.resolve_revision(dest, url, rev_options) cmd_args = ['reset', '--hard', '-q'] + rev_options.to_args() self.run_command(cmd_args, cwd=dest) #: update submodules self.update_submodules(dest) @classmethod def get_remote_url(cls, location): """ Return URL of the first remote encountered. Raises RemoteNotFoundError if the repository does not have a remote url configured. """ # We need to pass 1 for extra_ok_returncodes since the command # exits with return code 1 if there are no matching lines. stdout = cls.run_command( ['config', '--get-regexp', r'remote\..*\.url'], extra_ok_returncodes=(1, ), show_stdout=False, cwd=location, ) remotes = stdout.splitlines() try: found_remote = remotes[0] except IndexError: raise RemoteNotFoundError for remote in remotes: if remote.startswith('remote.origin.url '): found_remote = remote break url = found_remote.split(' ')[1] return url.strip() @classmethod def get_revision(cls, location, rev=None): if rev is None: rev = 'HEAD' current_rev = cls.run_command( ['rev-parse', rev], show_stdout=False, cwd=location, ) return current_rev.strip() @classmethod def get_subdirectory(cls, location): # find the repo root git_dir = cls.run_command(['rev-parse', '--git-dir'], show_stdout=False, cwd=location).strip() if not os.path.isabs(git_dir): git_dir = os.path.join(location, git_dir) root_dir = os.path.join(git_dir, '..') # find setup.py orig_location = location while not os.path.exists(os.path.join(location, 'setup.py')): last_location = location location = os.path.dirname(location) if location == last_location: # We've traversed up to the root of the filesystem without # finding setup.py logger.warning( "Could not find setup.py for directory %s (tried all " "parent directories)", orig_location, ) return None # relative path of setup.py to repo root if samefile(root_dir, location): return None return os.path.relpath(location, root_dir) @classmethod def get_url_rev_and_auth(cls, url): """ Prefixes stub URLs like 'user@hostname:user/repo.git' with 'ssh://'. That's required because although they use SSH they sometimes don't work with a ssh:// scheme (e.g. GitHub). But we need a scheme for parsing. Hence we remove it again afterwards and return it as a stub. """ if '://' not in url: assert 'file:' not in url url = url.replace('git+', 'git+ssh://') url, rev, user_pass = super(Git, cls).get_url_rev_and_auth(url) url = url.replace('ssh://', '') else: url, rev, user_pass = super(Git, cls).get_url_rev_and_auth(url) return url, rev, user_pass @classmethod def update_submodules(cls, location): if not os.path.exists(os.path.join(location, '.gitmodules')): return cls.run_command( ['submodule', 'update', '--init', '--recursive', '-q'], cwd=location, ) @classmethod def controls_location(cls, location): if super(Git, cls).controls_location(location): return True try: r = cls.run_command(['rev-parse'], cwd=location, show_stdout=False, on_returncode='ignore') return not r except BadCommand: logger.debug("could not determine if %s is under git control " "because git is not available", location) return False vcs.register(Git)
gnmiller/craig-bot
craig-bot/lib/python3.6/site-packages/pip/_internal/vcs/git.py
Python
mit
12,960
"""Tests for the Config Entry Flow helper.""" from unittest.mock import patch, Mock import pytest from homeassistant import config_entries, data_entry_flow, setup from homeassistant.helpers import config_entry_flow from tests.common import ( MockConfigEntry, MockModule, mock_coro, mock_integration) @pytest.fixture def discovery_flow_conf(hass): """Register a handler.""" handler_conf = { 'discovered': False, } async def has_discovered_devices(hass): """Mock if we have discovered devices.""" return handler_conf['discovered'] with patch.dict(config_entries.HANDLERS): config_entry_flow.register_discovery_flow( 'test', 'Test', has_discovered_devices, config_entries.CONN_CLASS_LOCAL_POLL) yield handler_conf @pytest.fixture def webhook_flow_conf(hass): """Register a handler.""" with patch.dict(config_entries.HANDLERS): config_entry_flow.register_webhook_flow( 'test_single', 'Test Single', {}, False) config_entry_flow.register_webhook_flow( 'test_multiple', 'Test Multiple', {}, True) yield {} async def test_single_entry_allowed(hass, discovery_flow_conf): """Test only a single entry is allowed.""" flow = config_entries.HANDLERS['test']() flow.hass = hass MockConfigEntry(domain='test').add_to_hass(hass) result = await flow.async_step_user() assert result['type'] == data_entry_flow.RESULT_TYPE_ABORT assert result['reason'] == 'single_instance_allowed' async def test_user_no_devices_found(hass, discovery_flow_conf): """Test if no devices found.""" flow = config_entries.HANDLERS['test']() flow.hass = hass flow.context = { 'source': config_entries.SOURCE_USER } result = await flow.async_step_confirm(user_input={}) assert result['type'] == data_entry_flow.RESULT_TYPE_ABORT assert result['reason'] == 'no_devices_found' async def test_user_has_confirmation(hass, discovery_flow_conf): """Test user requires no confirmation to setup.""" flow = config_entries.HANDLERS['test']() flow.hass = hass discovery_flow_conf['discovered'] = True result = await flow.async_step_user() assert result['type'] == data_entry_flow.RESULT_TYPE_FORM async def test_discovery_single_instance(hass, discovery_flow_conf): """Test we ask for confirmation via discovery.""" flow = config_entries.HANDLERS['test']() flow.hass = hass MockConfigEntry(domain='test').add_to_hass(hass) result = await flow.async_step_discovery({}) assert result['type'] == data_entry_flow.RESULT_TYPE_ABORT assert result['reason'] == 'single_instance_allowed' async def test_discovery_confirmation(hass, discovery_flow_conf): """Test we ask for confirmation via discovery.""" flow = config_entries.HANDLERS['test']() flow.hass = hass result = await flow.async_step_discovery({}) assert result['type'] == data_entry_flow.RESULT_TYPE_FORM assert result['step_id'] == 'confirm' result = await flow.async_step_confirm({}) assert result['type'] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY async def test_multiple_discoveries(hass, discovery_flow_conf): """Test we only create one instance for multiple discoveries.""" mock_integration(hass, MockModule('test')) result = await hass.config_entries.flow.async_init( 'test', context={'source': config_entries.SOURCE_DISCOVERY}, data={}) assert result['type'] == data_entry_flow.RESULT_TYPE_FORM # Second discovery result = await hass.config_entries.flow.async_init( 'test', context={'source': config_entries.SOURCE_DISCOVERY}, data={}) assert result['type'] == data_entry_flow.RESULT_TYPE_ABORT async def test_only_one_in_progress(hass, discovery_flow_conf): """Test a user initialized one will finish and cancel discovered one.""" mock_integration(hass, MockModule('test')) # Discovery starts flow result = await hass.config_entries.flow.async_init( 'test', context={'source': config_entries.SOURCE_DISCOVERY}, data={}) assert result['type'] == data_entry_flow.RESULT_TYPE_FORM # User starts flow result = await hass.config_entries.flow.async_init( 'test', context={'source': config_entries.SOURCE_USER}, data={}) assert result['type'] == data_entry_flow.RESULT_TYPE_FORM # Discovery flow has not been aborted assert len(hass.config_entries.flow.async_progress()) == 2 # Discovery should be aborted once user confirms result = await hass.config_entries.flow.async_configure( result['flow_id'], {}) assert result['type'] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert len(hass.config_entries.flow.async_progress()) == 0 async def test_import_no_confirmation(hass, discovery_flow_conf): """Test import requires no confirmation to set up.""" flow = config_entries.HANDLERS['test']() flow.hass = hass discovery_flow_conf['discovered'] = True result = await flow.async_step_import(None) assert result['type'] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY async def test_import_single_instance(hass, discovery_flow_conf): """Test import doesn't create second instance.""" flow = config_entries.HANDLERS['test']() flow.hass = hass discovery_flow_conf['discovered'] = True MockConfigEntry(domain='test').add_to_hass(hass) result = await flow.async_step_import(None) assert result['type'] == data_entry_flow.RESULT_TYPE_ABORT async def test_webhook_single_entry_allowed(hass, webhook_flow_conf): """Test only a single entry is allowed.""" flow = config_entries.HANDLERS['test_single']() flow.hass = hass MockConfigEntry(domain='test_single').add_to_hass(hass) result = await flow.async_step_user() assert result['type'] == data_entry_flow.RESULT_TYPE_ABORT assert result['reason'] == 'one_instance_allowed' async def test_webhook_multiple_entries_allowed(hass, webhook_flow_conf): """Test multiple entries are allowed when specified.""" flow = config_entries.HANDLERS['test_multiple']() flow.hass = hass MockConfigEntry(domain='test_multiple').add_to_hass(hass) hass.config.api = Mock(base_url='http://example.com') result = await flow.async_step_user() assert result['type'] == data_entry_flow.RESULT_TYPE_FORM async def test_webhook_config_flow_registers_webhook(hass, webhook_flow_conf): """Test setting up an entry creates a webhook.""" flow = config_entries.HANDLERS['test_single']() flow.hass = hass hass.config.api = Mock(base_url='http://example.com') result = await flow.async_step_user(user_input={}) assert result['type'] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result['data']['webhook_id'] is not None async def test_webhook_create_cloudhook(hass, webhook_flow_conf): """Test only a single entry is allowed.""" assert await setup.async_setup_component(hass, 'cloud', {}) async_setup_entry = Mock(return_value=mock_coro(True)) async_unload_entry = Mock(return_value=mock_coro(True)) mock_integration(hass, MockModule( 'test_single', async_setup_entry=async_setup_entry, async_unload_entry=async_unload_entry, async_remove_entry=config_entry_flow.webhook_async_remove_entry, )) result = await hass.config_entries.flow.async_init( 'test_single', context={'source': config_entries.SOURCE_USER}) assert result['type'] == data_entry_flow.RESULT_TYPE_FORM coro = mock_coro({ 'cloudhook_url': 'https://example.com' }) with patch('hass_nabucasa.cloudhooks.Cloudhooks.async_create', return_value=coro) as mock_create, \ patch('homeassistant.components.cloud.async_active_subscription', return_value=True), \ patch('homeassistant.components.cloud.async_is_logged_in', return_value=True): result = await hass.config_entries.flow.async_configure( result['flow_id'], {}) assert result['type'] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result['description_placeholders']['webhook_url'] == \ 'https://example.com' assert len(mock_create.mock_calls) == 1 assert len(async_setup_entry.mock_calls) == 1 with patch('hass_nabucasa.cloudhooks.Cloudhooks.async_delete', return_value=coro) as mock_delete: result = \ await hass.config_entries.async_remove(result['result'].entry_id) assert len(mock_delete.mock_calls) == 1 assert result['require_restart'] is False
MartinHjelmare/home-assistant
tests/helpers/test_config_entry_flow.py
Python
apache-2.0
8,659
from common_fixtures import * # NOQA def test_snapshot_with_root_on_longhorn_1(super_client, client): port = 6091 snapshot_count = 5 snapshot_revert_index = 3 env, service, con, snapshots = revert_to_snapshot( super_client, client, "root-snap5-revert3", port, snapshot_count, snapshot_revert_index, is_root=True) delete_all(client, [env]) delete_vm_volumes(client, con[0], service) def test_snapshot_with_root_and_data_on_longhorn_1(super_client, client): port = 6092 snapshot_count = 5 snapshot_revert_index = 3 env, service, con, snapshots = revert_to_snapshot( super_client, client, "data-snap5-revert3", port, snapshot_count, snapshot_revert_index, is_root=False) delete_all(client, [env]) delete_vm_volumes(client, con[0], service) def test_snapshot_with_root_on_longhorn_2(super_client, client): port = 6093 snapshot_count = 5 snapshot_revert_index = 1 env, service, con, snapshots = revert_to_snapshot( super_client, client, "root-snap5-revert1", port, snapshot_count, snapshot_revert_index, is_root=True) delete_all(client, [env]) delete_vm_volumes(client, con[0], service) def test_snapshot_with_root_and_data_on_longhorn_2(super_client, client): port = 6094 snapshot_count = 5 snapshot_revert_index = 1 env, service, con, snapshots = revert_to_snapshot( super_client, client, "data-snap5-revert1", port, snapshot_count, snapshot_revert_index, is_root=False) delete_all(client, [env]) delete_vm_volumes(client, con[0], service) def test_snapshot_with_root_on_longhorn_4(super_client, client): port = 6095 snapshot_count = 5 snapshot_revert_index = 5 env, service, vms, snapshots = revert_to_snapshot( super_client, client, "root-snap5-revert5-1", port, snapshot_count, snapshot_revert_index, is_root=True) vm_host = get_host_for_vm(client, vms[0]) validate_writes(vm_host, port, is_root=True) revert_volume_to_snapshot_for_vm_service(super_client, client, port, service, snapshots, snapshot_revert_index=4, is_root=True) validate_writes(vm_host, port, is_root=True) revert_volume_to_snapshot_for_vm_service(super_client, client, port, service, snapshots, snapshot_revert_index=3, is_root=True) validate_writes(vm_host, port, is_root=True) revert_volume_to_snapshot_for_vm_service(super_client, client, port, service, snapshots, snapshot_revert_index=2, is_root=True) validate_writes(vm_host, port, is_root=True) revert_volume_to_snapshot_for_vm_service(super_client, client, port, service, snapshots, snapshot_revert_index=1, is_root=True) validate_writes(vm_host, port, is_root=True) delete_all(client, [env]) delete_vm_volumes(client, vms[0], service) def test_snapshot_with_root_and_data_on_longhorn_4(super_client, client): port = 6096 snapshot_count = 5 snapshot_revert_index = 5 env, service, vms, snapshots = revert_to_snapshot( super_client, client, "data-snap5-revert5-1", port, snapshot_count, snapshot_revert_index, is_root=False) vm_host = get_host_for_vm(client, vms[0]) validate_writes(vm_host, port, is_root=False) revert_volume_to_snapshot_for_vm_service(super_client, client, port, service, snapshots, snapshot_revert_index=4, is_root=False) validate_writes(vm_host, port, is_root=False) revert_volume_to_snapshot_for_vm_service(super_client, client, port, service, snapshots, snapshot_revert_index=3, is_root=False) validate_writes(vm_host, port, is_root=False) revert_volume_to_snapshot_for_vm_service(super_client, client, port, service, snapshots, snapshot_revert_index=2, is_root=False) validate_writes(vm_host, port, is_root=False) revert_volume_to_snapshot_for_vm_service(super_client, client, port, service, snapshots, snapshot_revert_index=1, is_root=False) validate_writes(vm_host, port, is_root=False) delete_all(client, [env]) delete_vm_volumes(client, vms[0], service) def test_snapshot_with_root_on_longhorn_with_deletes_1(super_client, client): port = 7001 snapshot_count = 3 snapshot_revert_index = 3 snapshot_delete_indexes = [2] vm_name = "snap-del2-revert-3" env, service, vms = revert_to_snapshot_after_snapshot_deletes( super_client, client, vm_name, port, snapshot_count, snapshot_delete_indexes, snapshot_revert_index, is_root=True) delete_all(client, [env]) delete_vm_volumes(client, vms[0], service) def test_snapshot_with_root_on_longhorn_with_deletes_2(super_client, client): port = 7002 snapshot_count = 3 snapshot_revert_index = 1 snapshot_delete_indexes = [2] vm_name = "r-snap-del2-r-1" env, service, vms = revert_to_snapshot_after_snapshot_deletes( super_client, client, vm_name, port, snapshot_count, snapshot_delete_indexes, snapshot_revert_index, is_root=True) delete_all(client, [env]) delete_vm_volumes(client, vms[0], service) def test_snapshot_with_root_on_longhorn_with_deletes_3(super_client, client): port = 7003 snapshot_count = 6 snapshot_revert_index = 1 snapshot_delete_indexes = [2, 3, 5] vm_name = "r-snap-del235-r-1" env, service, vms = revert_to_snapshot_after_snapshot_deletes( super_client, client, vm_name, port, snapshot_count, snapshot_delete_indexes, snapshot_revert_index, is_root=True) delete_all(client, [env]) delete_vm_volumes(client, vms[0], service) def test_snapshot_with_root_on_longhorn_with_deletes_4(super_client, client): port = 7004 snapshot_count = 6 snapshot_revert_index = 4 snapshot_delete_indexes = [2, 3, 5] vm_name = "r-snap-del235-r-4" env, service, vms = revert_to_snapshot_after_snapshot_deletes( super_client, client, vm_name, port, snapshot_count, snapshot_delete_indexes, snapshot_revert_index, is_root=True) delete_all(client, [env]) delete_vm_volumes(client, vms[0], service) def test_snapshot_with_root_on_longhorn_with_deletes_5(super_client, client): port = 7005 snapshot_count = 3 snapshot_revert_index = 2 snapshot_delete_indexes = [1] vm_name = "r-snap-del1-revert-2" env, service, vms = revert_to_snapshot_after_snapshot_deletes( super_client, client, vm_name, port, snapshot_count, snapshot_delete_indexes, snapshot_revert_index, is_root=True) delete_all(client, [env]) delete_vm_volumes(client, vms[0], service) def test_snapshot_with_data_on_longhorn_with_deletes_1(super_client, client): port = 8001 snapshot_count = 3 snapshot_revert_index = 3 snapshot_delete_indexes = [2] vm_name = "d-snap-del2-revert-3" env, service, vms = revert_to_snapshot_after_snapshot_deletes( super_client, client, vm_name, port, snapshot_count, snapshot_delete_indexes, snapshot_revert_index, is_root=False) delete_all(client, [env]) delete_vm_volumes(client, vms[0], service) def test_snapshot_with_data_on_longhorn_with_deletes_2(super_client, client): port = 8002 snapshot_count = 3 snapshot_revert_index = 1 snapshot_delete_indexes = [2] vm_name = "d-snap-del2-revert-1" env, service, vms = revert_to_snapshot_after_snapshot_deletes( super_client, client, vm_name, port, snapshot_count, snapshot_delete_indexes, snapshot_revert_index, is_root=False) delete_all(client, [env]) delete_vm_volumes(client, vms[0], service) def test_snapshot_with_data_on_longhorn_with_deletes_3(super_client, client): port = 8003 snapshot_count = 6 snapshot_revert_index = 1 snapshot_delete_indexes = [2, 3, 5] vm_name = "d-snap-del235-r-1" env, service, vms = revert_to_snapshot_after_snapshot_deletes( super_client, client, vm_name, port, snapshot_count, snapshot_delete_indexes, snapshot_revert_index, is_root=False) delete_all(client, [env]) delete_vm_volumes(client, vms[0], service) def test_snapshot_with_data_on_longhorn_with_deletes_4(super_client, client): port = 8004 snapshot_count = 6 snapshot_revert_index = 4 snapshot_delete_indexes = [2, 3, 5] vm_name = "d-snap-del235-r-4" env, service, vms = revert_to_snapshot_after_snapshot_deletes( super_client, client, vm_name, port, snapshot_count, snapshot_delete_indexes, snapshot_revert_index, is_root=False) delete_all(client, [env]) delete_vm_volumes(client, vms[0], service) def test_snapshot_with_data_on_longhorn_with_deletes_5(super_client, client): port = 8005 snapshot_count = 6 snapshot_revert_index = 1 snapshot_delete_indexes = [2, 3, 5] vm_name = "d-snap-del235-r-1" env, service, vms = revert_to_snapshot_after_snapshot_deletes( super_client, client, vm_name, port, snapshot_count, snapshot_delete_indexes, snapshot_revert_index, is_root=False) delete_all(client, [env]) delete_vm_volumes(client, vms[0], service) def test_snapshot_with_data_on_longhorn_with_deletes_6(super_client, client): port = 7005 snapshot_count = 3 snapshot_revert_index = 2 snapshot_delete_indexes = [1] vm_name = "d-snap-del1-revert-2" env, service, vms = revert_to_snapshot_after_snapshot_deletes( super_client, client, vm_name, port, snapshot_count, snapshot_delete_indexes, snapshot_revert_index, is_root=False) delete_all(client, [env]) delete_vm_volumes(client, vms[0], service) def test_backup_with_root_on_longhorn_1(super_client, client): port = 7010 snapshot_count = 5 snapshot_revert_index = 4 env, service, vms, snapshots, backup = restore_from_backup( super_client, client, "root-snap5-backup4", port, snapshot_count, snapshot_revert_index, is_root=True) delete_all(client, [env]) delete_vm_volumes(client, vms[0], service) def test_backup_with_data_on_longhorn_1(super_client, client): port = 7011 snapshot_count = 5 snapshot_revert_index = 4 env, service, vms, snapshots, backup = restore_from_backup( super_client, client, "data-snap5-backup4", port, snapshot_count, snapshot_revert_index, is_root=False) delete_all(client, [env]) delete_vm_volumes(client, vms[0], service) def test_backup_with_root_on_longhorn_2(super_client, client): port = 7012 snapshot_count = 5 snapshot_revert_index = 1 env, service, vms, snapshots, backup = restore_from_backup( super_client, client, "root-snap5-backup1", port, snapshot_count, snapshot_revert_index, is_root=True) delete_all(client, [env]) delete_vm_volumes(client, vms[0], service) def test_backup_with_data_on_longhorn_2(super_client, client): port = 7013 snapshot_count = 5 snapshot_revert_index = 1 env, service, vms, snapshots, backup = restore_from_backup( super_client, client, "data-snap5-backup1", port, snapshot_count, snapshot_revert_index, is_root=False) delete_all(client, [env]) delete_vm_volumes(client, vms[0], service) def test_backup_with_root_on_longhorn_with_deletes_1(super_client, client): port = 7021 snapshot_count = 3 snapshot_backup_index = 3 snapshot_delete_indexes = [2] vm_name = "r-snap-del2-revert-3" env, service, vms = restore_from_backup_after_snapshot_deletes( super_client, client, vm_name, port, snapshot_count, snapshot_delete_indexes, snapshot_backup_index, is_root=True) delete_all(client, [env]) delete_vm_volumes(client, vms[0], service) def test_backup_with_root_on_longhorn_with_deletes_2(super_client, client): port = 7022 snapshot_count = 3 snapshot_revert_index = 1 snapshot_backup_indexes = [2] vm_name = "r-snap-del2-revert-1" env, service, vms = restore_from_backup_after_snapshot_deletes( super_client, client, vm_name, port, snapshot_count, snapshot_backup_indexes, snapshot_revert_index, is_root=True) delete_all(client, [env]) delete_vm_volumes(client, vms[0], service) def test_backup_with_data_on_longhorn_with_deletes_1(super_client, client): port = 7021 snapshot_count = 3 snapshot_backup_index = 3 snapshot_delete_indexes = [2] vm_name = "r-snap-del2-revert-3" env, service, vms = restore_from_backup_after_snapshot_deletes( super_client, client, vm_name, port, snapshot_count, snapshot_delete_indexes, snapshot_backup_index, is_root=False) delete_all(client, [env]) delete_vm_volumes(client, vms[0], service) def test_backup_with_data_on_longhorn_with_deletes_2(super_client, client): port = 7022 snapshot_count = 3 snapshot_revert_index = 1 snapshot_backup_indexes = [2] vm_name = "r-snap-del2-revert-1" env, service, vms = restore_from_backup_after_snapshot_deletes( super_client, client, vm_name, port, snapshot_count, snapshot_backup_indexes, snapshot_revert_index, is_root=False) delete_all(client, [env]) delete_vm_volumes(client, vms[0], service) def test_root_restore_from_backup_after_backup_deletes(super_client, client): port = 7023 vm_name = "r-backup2-del1-r-1" snapshot_count = 3 snapshot_backup_index = 2 snapshot_count_2 = 2 snapshot_backup_index_2 = 1 env, service, vms = restore_from_backup_after_backup_deletes( super_client, client, vm_name, port, snapshot_count, snapshot_backup_index, snapshot_count_2, snapshot_backup_index_2, is_root=True) delete_all(client, [env]) delete_vm_volumes(client, vms[0], service) def test_data_restore_from_backup_after_backup_deletes(super_client, client): port = 7025 vm_name = "d-backup2-del1-r-1" snapshot_count = 4 snapshot_backup_index = 3 snapshot_count_2 = 3 snapshot_backup_index_2 = 2 env, service, vms = restore_from_backup_after_backup_deletes( super_client, client, vm_name, port, snapshot_count, snapshot_backup_index, snapshot_count_2, snapshot_backup_index_2, is_root=False) delete_all(client, [env]) delete_vm_volumes(client, vms[0], service) def revert_to_snapshot(super_client, client, vm_name, port, snapshot_count, snapshot_revert_index, is_root=True): # Create service with root disk using longhorn driver env, service, vms = createVMService( super_client, client, vm_name, str(port), root_disk=True, data_disk=True) # Take 5 snapshots of root disk snapshots = take_snapshots_for_vm_service(super_client, client, port, service, is_root=is_root, snapshot_count=snapshot_count) # Restore Root/Data volume to snapshot revert_volume_to_snapshot_for_vm_service(super_client, client, port, service, snapshots, snapshot_revert_index, is_root=is_root) return env, service, vms, snapshots def restore_from_backup(super_client, client, vm_name, port, snapshot_count, snapshot_backup_index, is_root=True): # Create service with root disk using longhorn driver env, service, vms = createVMService( super_client, client, vm_name, str(port), root_disk=True, data_disk=True) # Take snapshots of root disk snapshots = take_snapshots_for_vm_service(super_client, client, port, service, is_root=is_root, snapshot_count=snapshot_count) # Restore Root/Data volume from Backup backup = restore_volume_from_backup_for_vm_service( super_client, client, port, service, snapshots, snapshot_backup_index, is_root=is_root) return env, service, vms, snapshots, backup def revert_to_snapshot_after_snapshot_deletes(super_client, client, vm_name, port, snapshot_count, snapshot_delete_indexes, snapshot_revert_index, is_root=True): # Create service with root disk using longhorn driver env, service, vms = createVMService( super_client, client, vm_name, str(port), root_disk=True, data_disk=True) # Take snapshots of root disk snapshots = take_snapshots_for_vm_service(super_client, client, port, service, is_root=is_root, snapshot_count=snapshot_count) # Delete snapshots for snapshot_delete_index in snapshot_delete_indexes: snapshot_delete = snapshots[snapshot_delete_index - 1]["snapshot"] snapshot_delete = client.wait_success(client.delete(snapshot_delete)) assert snapshot_delete.state == 'removed' # Restore Root/Data volume to snapshot revert_volume_to_snapshot_for_vm_service(super_client, client, port, service, snapshots, snapshot_revert_index, is_root=is_root) return env, service, vms def restore_from_backup_after_snapshot_deletes(super_client, client, vm_name, port, snapshot_count, snapshot_delete_indexes, snapshot_backup_index, is_root=True): # Create service with root disk using longhorn driver env, service, vms = createVMService( super_client, client, vm_name, str(port), root_disk=True, data_disk=True) # Take snapshots of root disk snapshots = take_snapshots_for_vm_service(super_client, client, port, service, is_root=is_root, snapshot_count=snapshot_count) # Delete snapshots for snapshot_delete_index in snapshot_delete_indexes: snapshot_delete = snapshots[snapshot_delete_index - 1]["snapshot"] snapshot_delete = client.wait_success(client.delete(snapshot_delete)) assert snapshot_delete.state == 'removed' # Restore Root/Data volume from Backup restore_volume_from_backup_for_vm_service(super_client, client, port, service, snapshots, snapshot_backup_index, is_root=is_root) return env, service, vms def restore_from_backup_after_backup_deletes(super_client, client, vm_name, port, snapshot_count, snapshot_backup_index, snapshot_count_2, snapshot_backup_index_2, is_root=True): env, service, vms, snapshots1, backup1 = restore_from_backup( super_client, client, vm_name, port, snapshot_count, snapshot_backup_index, is_root=is_root) # Take snapshots of root/data disk snapshots2 = take_snapshots_for_vm_service(super_client, client, port, service, is_root=is_root, snapshot_count=snapshot_count_2) # Create a backup snapshot_backup = snapshots2[snapshot_backup_index_2 - 1]["snapshot"] backup2 = \ client.wait_success( snapshot_backup.backup(backupTargetId=default_backup_target["id"]), timeout=120) assert backup2.state == "created" # Take snapshots of root/data disk snapshot_count_3 = 2 snapshots3 = take_snapshots_for_vm_service(super_client, client, port, service, is_root=is_root, snapshot_count=snapshot_count_3) # Delete the first backup backup1 = client.wait_success(client.delete(backup1)) assert backup1.state == "removed" # Restore volume to backup2 restore_volume_from_backup(super_client, client, port, service, snapshots2, snapshot_backup_index_2, backup2, is_root=is_root) # Check for existence files that was created as part of first backup for vm in vms: vm_host = get_host_for_vm(client, vm) dir = ROOT_DIR if not is_root: mount_data_dir(vm_host, port) dir = DATA_DIR for i in range(0, snapshot_backup_index): file = snapshots1[i]["filename"] content = snapshots1[i]["content"] assert check_if_file_exists(vm_host, port, dir + "/" + file) assert read_data(vm_host, port, dir, file) == content if snapshot_backup_index < len(snapshots1): for i in range(snapshot_backup_index, len(snapshots1)): file = snapshots1[i]["filename"] assert not check_if_file_exists( vm_host, port, dir + "/" + file) # Check for non existence of files that was created after the # second backup for snapshot in snapshots3: file = snapshot["filename"] assert not check_if_file_exists(vm_host, port, dir + "/" + file) return env, service, vms def test_createVM_with_root_and_data_on_longhorn_with_iops( super_client, client): port = 9993 readiops = 100 writeiops = 200 env, service, con = createVMService( super_client, client, "root-data-iop", str(port), root_disk=True, data_disk=True, readiops=readiops, writeiops=writeiops) vms = get_service_vm_list(super_client, service) assert len(vms) == 1 for vm in vms: assert vm.state == "running" vm_host = get_host_for_vm(client, vm) validate_writes(vm_host, port, is_root=True) validate_writes(vm_host, port, is_root=False) system_envname = \ get_system_env_name_for_vm_service(service, vms[0], root_disk=True) replica_containers = get_service_containers_with_name( super_client, service, system_envname + "_" + REPLICA) assert len(replica_containers) == 2 for con in replica_containers: docker_client = get_docker_client(con.hosts[0]) inspect = docker_client.inspect_container(con.externalId) print inspect assert \ inspect ["HostConfig"]["BlkioDeviceReadIOps"][0]["Rate"] == readiops assert \ inspect ["HostConfig"]["BlkioDeviceWriteIOps"][0]["Rate"] == writeiops delete_all(client, [env]) delete_vm_volumes(client, vms[0], service) def test_multiple_service_deployment(client): root_disk = True data_disk = True scale = 1 health_check_on = True readiops = 0 writeiops = 0 memory = 512 cpu = 1 env = create_env(client) root_lh_disk = {"name": ROOT_DISK, "root": True, "size": "10g", "driver": VOLUME_DRIVER} data_lh_disk = {"name": DATA_DISK, "root": False, "size": "1g", "driver": VOLUME_DRIVER} if readiops != 0: root_lh_disk["readIops"] = readiops data_lh_disk["readIops"] = readiops if writeiops != 0: data_lh_disk["writeIops"] = writeiops root_lh_disk["writeIops"] = writeiops longhorn_disk = [] if root_disk: longhorn_disk.append(root_lh_disk) if data_disk: longhorn_disk.append(data_lh_disk) health_check = {"name": "check1", "responseTimeout": 2000, "interval": 2000, "healthyThreshold": 2, "unhealthyThreshold": 3, "requestLine": "", "port": 22} launch_config = {"kind": "virtualMachine", "disks": longhorn_disk, "imageUuid": VM_IMAGE_UUID, "memoryMb": memory, "vcpu": cpu, "networkMode": "managed", } if health_check_on is not None: launch_config["healthCheck"] = health_check services = [] for i in range(0, 10): service = create_svc(client, env, launch_config, scale, service_name="test") service = client.wait_success(service) assert service.state == "inactive" service = service.activate() services.append(service) time.sleep(5)
jimengliu/longhorn-tests
validation-test/storagetest/core/test_longhorn_vmsnapshot.py
Python
apache-2.0
26,670
"""=========================== Geneset analysis =========================== :Author: Andreas Heger :Release: $Id$ :Date: |today| :Tags: Python Overview ======== This pipeline performs gene set analysis of one or more genesets. Input data are two collections of files, genelists and pathways. Genelists are tabular data with a gene for each row and associated attributes in additional columns such as expression level, probability of being called differentially expressed, etc. Pathways are tabular data linking genes to pathways that they exist in. Generally, it performs the following tasks: 1. The pipeline merges separately prepared gene lists into a single gene list matrix. There is a continuous scale version (P-Values, expression values, ...) and a thresholded version (0 and 1 for genelist membership). 2. The pipeline builds a matrix of gene list annotations to test against. To this end, it collects: ENSEMBL GO annotations KEGG Pathways User supplied pathways GSEA database signatures 3. The pipeline performs various gene set enrichment analyses. These are: 1. Hypergeometric GO analysis 2. Gene set enrichment analysis 4. The pipeline creates various QC metrics. To this end it looks for biases in any of the gene lists supplied. Biases the pipeline looks at are: 1. Gene length 2. Nucleotide composition 3. Gene intron/exon structure 4. User supplied table with biases. Usage ===== See :ref:`PipelineSettingUp` and :ref:`PipelineRunning` on general information how to use CGAT pipelines. Configuration ------------- The pipeline requires a configured :file:`pipeline.ini` file. The sphinxreport report requires a :file:`conf.py` and :file:`sphinxreport.ini` file (see :ref:`PipelineReporting`). To start with, use the files supplied with the Example_ data. Input ----- Optional inputs +++++++++++++++ Requirements ------------ The pipeline requires the results from :doc:`pipeline_annotations`. Set the configuration variable :py:data:`annotations_database` and :py:data:`annotations_dir`. On top of the default CGAT setup, the pipeline requires the following software to be in the path: +----------+-----------+---------------------------+ |*Program* |*Version* |*Purpose* | +----------+-----------+---------------------------+ | | | | +----------+-----------+---------------------------+ Pipeline output =============== The major output is in the database file :file:`csvdb`. Glossary ======== .. glossary:: Code ==== """ from ruffus import * import sys import os import sqlite3 import pandas import CGAT.Experiment as E import CGAT.IOTools as IOTools import CGAT.Database as Database import CGAT.SetTools as SetTools import CGATPipelines.PipelineGO as PipelineGO ################################################### ################################################### ################################################### # Pipeline configuration ################################################### # load options from the config file import CGATPipelines.Pipeline as P PARAMS = P.getParameters( ["%s/pipeline.ini" % os.path.splitext(__file__)[0], "../pipeline.ini", "pipeline.ini"]) PARAMS.update(P.peekParameters( PARAMS["annotations_dir"], "pipeline_annotations.py", prefix="annotations_", update_interface=True)) # Update the PARAMS dictionary in any PipelineModules # e.g.: # import CGATPipelines.PipelineGeneset as PipelineGeneset # PipelineGeneset.PARAMS = PARAMS def connect(): '''connect to database. Use this method to connect to additional databases. Returns a database connection. ''' dbh = sqlite3.connect(PARAMS["database_name"]) statement = '''ATTACH DATABASE '%s' as annotations''' % ( PARAMS["annotations_database"]) cc = dbh.cursor() cc.execute(statement) cc.close() return dbh @transform('genelists.dir/*.tsv.gz', suffix(".tsv.gz"), ".load") def loadGeneLists(infile, outfile): '''load gene list data into database.''' P.load(infile, outfile, tablename="genelist_%s" % P.toTable(outfile)) @merge('genelists.dir/*.tsv.gz', 'genelists.tsv.gz') def buildGeneListMatrix(infiles, outfile): '''build a gene list matrix for simple pathway analysis based on hypergeometric test. A gene list is derived from a gene set by applying thresholds to the input data set. The thresholds are defined in the configuration file. ''' genesets = [] backgrounds = [] headers = [] for infile in infiles: genelist = pandas.read_csv( IOTools.openFile(infile), index_col=0, sep='\t') track = P.snip(os.path.basename(infile), ".tsv.gz") headers.append(track) field = PARAMS[P.matchParameter("%s_foreground_field" % track)] min_threshold = PARAMS[P.matchParameter( "%s_foreground_min_threshold" % track)] max_threshold = PARAMS[P.matchParameter( "%s_foreground_max_threshold" % track)] genesets.append(set(genelist[ (genelist[field] >= min_threshold) & (genelist[field] <= max_threshold)].index)) E.info('%s: foreground: %f <= %s <= %f' % (track, min_threshold, field, max_threshold)) field = PARAMS[P.matchParameter("%s_background_field" % track)] min_threshold = PARAMS[P.matchParameter( "%s_background_min_threshold" % track)] max_threshold = PARAMS[P.matchParameter( "%s_background_max_threshold" % track)] E.info('%s: background: %f <= %s <= %f' % (track, min_threshold, field, max_threshold)) backgrounds.append(set(genelist[ (genelist[field] >= min_threshold) & (genelist[field] <= max_threshold)].index)) E.info("%s: fg=%i, bg=%i" % (track, len(genesets[-1]), len(backgrounds[-1]))) E.info("writing gene list matrix") with IOTools.openFile(outfile, "w") as outf: SetTools.writeSets(outf, genesets, labels=headers) with IOTools.openFile(outfile + ".bg.tsv.gz", "w") as outf: SetTools.writeSets(outf, backgrounds, labels=headers) E.info("writing intersection/union matrix") # build set intersection matrix matrix = SetTools.unionIntersectionMatrix(genesets) with IOTools.openFile(outfile + ".matrix.gz", "w") as outf: IOTools.writeMatrix(outf, matrix, headers, headers) matrix = SetTools.unionIntersectionMatrix(backgrounds) with IOTools.openFile(outfile + ".bg.matrix.gz", "w") as outf: IOTools.writeMatrix(outf, matrix, headers, headers) @transform(buildGeneListMatrix, suffix(".tsv.gz"), ".load") def loadGeneListMatrix(infile, outfile): '''load fgene list matrix into table.''' track = P.snip(infile, ".tsv.gz") P.load(infile, outfile, tablename="%s_foreground" % track) P.load(infile + ".bg.tsv.gz", outfile, tablename="%s_background" % track) @transform('pathways.dir/*.tsv.gz', regex('.*/(.*).tsv.gz'), r"pathways_\1.load") def loadPathways(infile, outfile): '''load pathway information into database.''' P.load(infile, outfile, "--add-index=gene_id --add-index=go_id") @follows(mkdir('hypergeometric.dir')) @transform('pathways.dir/*.tsv.gz', regex('.*/(.*).tsv.gz'), add_inputs(buildGeneListMatrix), r'hypergeometric.dir/\1.tsv') def runHypergeometricAnalysis(infiles, outfile): '''run pathway analysis on pathway files in the directory pathways.dir. ''' infile_pathways, infile_genelist = infiles infile_background = infile_genelist + ".bg.tsv.gz" # TODO: # gene annotations # category annotations # # os.path.join( # PARAMS["annotations_dir"], # PARAMS_ANNOTATIONS["interface_go_obo"]), PipelineGO.runGOFromFiles( outfile=outfile, outdir=outfile + ".dir", fg_file=infile_genelist, bg_file=infile_background, go_file=infile_pathways, ontology_file=None, minimum_counts=PARAMS["hypergeometric_minimum_counts"], pairs=False, gene2name=None) def computePathwayBiases(infile, outfile): pass @transform(runHypergeometricAnalysis, suffix(".tsv"), r"\1.load") def loadHypergeometricAnalysis(infile, outfile): '''load GO results.''' track = P.toTable(outfile) tablename = 'hypergeometric_%s_summary' % track P.load(infile, outfile, tablename=tablename) dbh = connect() ontologies = [x[0] for x in Database.executewait( dbh, '''SELECT DISTINCT ontology FROM %s''' % tablename).fetchall()] genelists = [x[0] for x in Database.executewait( dbh, '''SELECT DISTINCT genelist FROM %s''' % tablename).fetchall()] # output files from runGO.py sections = ('results', 'parameters', 'withgenes') for section in sections: tablename = 'hypergeometric_%s_%s' % (track, section) load_statement = P.build_load_statement( tablename=tablename) statement = ''' cgat combine_tables --cat=track --regex-filename="hypergeometric.dir/%(track)s.tsv.dir/(\S+).%(section)s" hypergeometric.dir/%(track)s.tsv.dir/*.%(section)s | %(load_statement)s >> %(outfile)s''' P.run() for ontology in ontologies: fn = os.path.join(infile + ".dir", "all_alldesc.%s.l2fold" % ontology) if not os.path.exists(fn): E.warn("file %s does not exist" % fn) continue P.load(fn, outfile, tablename='hypergeometric_%s_%s_l2fold' % (track, ontology), options='--allow-empty-file') fn = os.path.join( infile + ".dir", "all_alldesc.%s.l10pvalue" % ontology) P.load(fn, outfile, tablename='hypergeometric_%s_%s_l10pvalue' % (track, ontology), options='--allow-empty-file') fn = os.path.join( infile + ".dir", "all_alldesc.%s.l10qvalue" % ontology) P.load(fn, outfile, tablename='hypergeometric_%s_%s_l10qvalue' % (track, ontology), options='--allow-empty-file') @merge(runHypergeometricAnalysis, "hypergeometric_summary.load") def loadHypergeometricResultsSummary(infiles, outfile): '''load GO summary results.''' infiles = glob.glob("hypergeometric.dir/*/*.parameters") P.mergeAndLoad(infiles, outfile) @collate("hypergeometric.dir/go.tsv.dir/*.results", regex(r"hypergeometric.dir/go.tsv.dir/(.*)\.(.*).results"), r"hypergeometric.go.dir/go.tsv.dir/\1.revigo") def plotGOResults(infiles, outfile): '''.''' infiles = " ".join(infiles) track = P.snip(outfile, ".revigo") statement = ''' cat %(infiles)s | cgat revigo --go-tsv-file=%(annotations_filename_go)s --output-filename-pattern=%(track)s.%%s --ontology=all --max-similarity=0.5 --reverse-palette --force-output -v 2 > %(outfile)s ''' P.run() @follows(loadPathways, loadGeneLists, loadGeneListMatrix, loadHypergeometricAnalysis) def full(): pass @follows(mkdir("report")) def build_report(): '''build report from scratch.''' E.info("starting report build process from scratch") P.run_report(clean=True) @follows(mkdir("report")) def update_report(): '''update report.''' E.info("updating report") P.run_report(clean=False) @follows(update_report) def publish_report(): '''publish report.''' E.info("publishing report") P.publish_report() if __name__ == "__main__": # P.checkFiles( ("genome.fasta", "genome.idx" ) ) sys.exit(P.main(sys.argv))
CGATOxford/CGATPipelines
obsolete/pipeline_genesets.py
Python
mit
12,292
from django import forms from django.contrib.auth.models import User from UserManagement.models import Attendent from django.core.exceptions import ValidationError # --- Entitiy Management Forms ------ class UserModelForm(forms.ModelForm): password = forms.CharField(widget=forms.PasswordInput()) confirm_password = forms.CharField(widget=forms.PasswordInput()) class Meta: model = User fields=['first_name', 'last_name', 'email', 'password', 'confirm_password'] def clean(self): if (self.cleaned_data.get('password') != self.cleaned_data.get('confirm_password')): raise ValidationError("Password does not match.") return self.cleaned_data # ---- Authentication Action Forms --------- class LoginForm(forms.Form): email = forms.CharField(widget=forms.EmailInput()) password = forms.CharField(widget=forms.PasswordInput()) class ChangePasswordForm(forms.Form): oldPassword = forms.CharField(widget=forms.PasswordInput()) newPassword = forms.CharField(widget=forms.PasswordInput()) newPassword_repeat = forms.CharField(widget=forms.PasswordInput()) def clean(self, *args, **kwargs): cleaned_data = super(ChangePasswordForm, self).clean(*args, **kwargs) if( self.cleaned_data['newPassword'] != self.cleaned_data['newPassword_repeat']): raise ValidationError("The new Passwords given do not match.") return cleaned_data # ---- Profile Edit Forms ------- class UserAccountForm(UserModelForm): exclude = ['password', 'confirm_password']
SkillSmart/ConferenceManagementSystem
Authentication/forms.py
Python
mit
1,565
# -*- coding: utf-8 -*- from django.conf import settings as django_settings from . import defaults __all__ = ['settings'] class Settings: """ Get a setting from django settings or\ Spirit's defaults. In that order """ def __getattr__(self, item): try: return getattr(django_settings, item) except AttributeError: return getattr(defaults, item) settings = Settings()
nitely/Spirit
spirit/core/conf/settings.py
Python
mit
434
from urlparse import urlparse from django.core.paginator import Paginator from django.http import QueryDict from nose.tools import eq_ from test_utils import RequestFactory from amo.tests import ESTestCase, TestCase from mkt.api.paginator import MetaSerializer, ESPaginator from mkt.webapps.indexers import WebappIndexer class TestSearchPaginator(ESTestCase): def test_single_hit(self): """Test the ESPaginator only queries ES one time.""" es = WebappIndexer.get_es() orig_search = es.search es.counter = 0 def monkey_search(*args, **kwargs): es.counter += 1 return orig_search(*args, **kwargs) es.search = monkey_search ESPaginator(WebappIndexer.search(), 5).object_list.execute() eq_(es.counter, 1) es.search = orig_search class TestMetaSerializer(TestCase): def setUp(self): self.url = '/api/whatever' self.request = RequestFactory().get(self.url) def get_serialized_data(self, page): return MetaSerializer(page, context={'request': self.request}).data def test_simple(self): data = ['a', 'b', 'c'] per_page = 3 page = Paginator(data, per_page).page(1) serialized = self.get_serialized_data(page) eq_(serialized['offset'], 0) eq_(serialized['next'], None) eq_(serialized['previous'], None) eq_(serialized['total_count'], len(data)) eq_(serialized['limit'], per_page) def test_first_page_of_two(self): data = ['a', 'b', 'c', 'd', 'e'] per_page = 3 page = Paginator(data, per_page).page(1) serialized = self.get_serialized_data(page) eq_(serialized['offset'], 0) eq_(serialized['total_count'], len(data)) eq_(serialized['limit'], per_page) eq_(serialized['previous'], None) next = urlparse(serialized['next']) eq_(next.path, self.url) eq_(QueryDict(next.query), QueryDict('limit=3&offset=3')) def test_third_page_of_four(self): data = ['a', 'b', 'c', 'd', 'e', 'f', 'g'] per_page = 2 page = Paginator(data, per_page).page(3) serialized = self.get_serialized_data(page) # Third page will begin after fourth item # (per_page * number of pages before) item. eq_(serialized['offset'], 4) eq_(serialized['total_count'], len(data)) eq_(serialized['limit'], per_page) prev = urlparse(serialized['previous']) eq_(prev.path, self.url) eq_(QueryDict(prev.query), QueryDict('limit=2&offset=2')) next = urlparse(serialized['next']) eq_(next.path, self.url) eq_(QueryDict(next.query), QueryDict('limit=2&offset=6')) def test_fourth_page_of_four(self): data = ['a', 'b', 'c', 'd', 'e', 'f', 'g'] per_page = 2 page = Paginator(data, per_page).page(4) serialized = self.get_serialized_data(page) # Third page will begin after fourth item # (per_page * number of pages before) item. eq_(serialized['offset'], 6) eq_(serialized['total_count'], len(data)) eq_(serialized['limit'], per_page) prev = urlparse(serialized['previous']) eq_(prev.path, self.url) eq_(QueryDict(prev.query), QueryDict('limit=2&offset=4')) eq_(serialized['next'], None) def test_without_request_path(self): data = ['a', 'b', 'c', 'd', 'e'] per_page = 2 page = Paginator(data, per_page).page(2) serialized = MetaSerializer(page).data eq_(serialized['offset'], 2) eq_(serialized['total_count'], len(data)) eq_(serialized['limit'], per_page) prev = urlparse(serialized['previous']) eq_(prev.path, '') eq_(QueryDict(prev.query), QueryDict('limit=2&offset=0')) next = urlparse(serialized['next']) eq_(next.path, '') eq_(QueryDict(next.query), QueryDict('limit=2&offset=4')) def test_with_request_path_override_existing_params(self): self.url = '/api/whatever/?limit=0&offset=xxx&extra&superfluous=yes' self.request = RequestFactory().get(self.url) data = ['a', 'b', 'c', 'd', 'e', 'f'] per_page = 2 page = Paginator(data, per_page).page(2) serialized = self.get_serialized_data(page) eq_(serialized['offset'], 2) eq_(serialized['total_count'], len(data)) eq_(serialized['limit'], per_page) prev = urlparse(serialized['previous']) eq_(prev.path, '/api/whatever/') eq_(QueryDict(prev.query), QueryDict('limit=2&offset=0&extra=&superfluous=yes')) next = urlparse(serialized['next']) eq_(next.path, '/api/whatever/') eq_(QueryDict(next.query), QueryDict('limit=2&offset=4&extra=&superfluous=yes'))
andymckay/zamboni
mkt/api/tests/test_paginator.py
Python
bsd-3-clause
4,864
from math import exp class Contagion(): def __init__(self, network, canBeInfected, tryToInfect, infect): self.network = network self.canBeInfected = canBeInfected self.tryToInfect = tryToInfect self.infect = infect def aggrSpread(self, alreadyChecked, nodesLeft, conditions, receiverId): """Automatically infects neighbors if receiver is infected""" if receiverId not in alreadyChecked: alreadyChecked[receiverId] = True receiver = self.network.nodes[receiverId] self.infect(conditions, receiver) for neighbor in self.network.edges[receiverId]: if self.canBeInfected(conditions, self.network.nodes[neighbor]): if neighbor not in alreadyChecked: nodesLeft.append(neighbor) if len(nodesLeft) < 1: return else: nextNode = nodesLeft.pop() return self.aggrSpread(alreadyChecked, nodesLeft, conditions, nextNode) def spread(self, alreadyChecked, nodesLeft, conditions, receiverId): """Infects neighbors using tryToInfect. Does not auto infect""" if receiverId not in alreadyChecked: receiver = self.network.nodes[receiverId] if self.tryToInfect(conditions, receiver): self.infect(conditions, receiver) for neighbor in self.network.edges[receiverId]: if neighbor not in alreadyChecked: if self.canBeInfected(conditions, self.network.nodes[neighbor]): nodesLeft.append(neighbor) alreadyChecked[receiverId] = True if len(nodesLeft) < 1: return else: nextNode = nodesLeft.pop() return self.spread(alreadyChecked, nodesLeft, conditions, nextNode) class WolffIsing(Contagion): """Used to create an infection model that matches the Wolff Ising program, flipping all matching neighbors spins using Wolff spread probability.""" def __init__(self, network, beta, randomNumGen): self.beta = beta self.network = network self.randomNumGen = randomNumGen def infect(self, conditions, receiver): receiver['spin'] = conditions['spin'] def canBeInfected(self, conditions, receiver): return receiver['spin'] != conditions['spin'] def tryToInfect(self, transmitter, receiver): """Uses Wolff spread probability to transmit infection""" wolfSpreadProbability = 1 - exp( - 2 * self.beta) return self.randomNumGen() < wolfSpreadProbability class WolffIsingCFP(WolffIsing): """A Wolff Ising spread model that also has a price""" def infect(self, conditions, receiver): receiver['spin'] = conditions['spin'] receiver['price'] = conditions['price']
jfarid27/CFP-Simplified
simulation/Contagion/__init__.py
Python
gpl-3.0
2,868
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import Any, TYPE_CHECKING from azure.core.configuration import Configuration from azure.core.pipeline import policies from azure.mgmt.core.policies import ARMChallengeAuthenticationPolicy, ARMHttpLoggingPolicy from ._version import VERSION if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports from azure.core.credentials import TokenCredential class TemplateSpecsClientConfiguration(Configuration): """Configuration for TemplateSpecsClient. Note that all parameters used to create this instance are saved as instance attributes. :param credential: Credential needed for the client to connect to Azure. :type credential: ~azure.core.credentials.TokenCredential :param subscription_id: Subscription Id which forms part of the URI for every service call. :type subscription_id: str """ def __init__( self, credential: "TokenCredential", subscription_id: str, **kwargs: Any ) -> None: super(TemplateSpecsClientConfiguration, self).__init__(**kwargs) if credential is None: raise ValueError("Parameter 'credential' must not be None.") if subscription_id is None: raise ValueError("Parameter 'subscription_id' must not be None.") self.credential = credential self.subscription_id = subscription_id self.api_version = "2019-06-01-preview" self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default']) kwargs.setdefault('sdk_moniker', 'mgmt-resource/{}'.format(VERSION)) self._configure(**kwargs) def _configure( self, **kwargs # type: Any ): # type: (...) -> None self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs) self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs) self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs) self.authentication_policy = kwargs.get('authentication_policy') if self.credential and not self.authentication_policy: self.authentication_policy = ARMChallengeAuthenticationPolicy(self.credential, *self.credential_scopes, **kwargs)
Azure/azure-sdk-for-python
sdk/resources/azure-mgmt-resource/azure/mgmt/resource/templatespecs/v2019_06_01_preview/_configuration.py
Python
mit
3,261
#8.4 Open the file romeo.txt and read it line by line. For each line, split the line into a #list of words using the split() function. #The program should build a list of words. #For each word on each line check to see if the word is already in the list and if not append it to the list. # When the program completes, sort and print the resulting words in alphabetical order. #You can download the sample data at http://www.pythonlearn.com/code/romeo.txt fname = raw_input("Enter file name: ") fhand = None try: fhand = open(fname) except: print 'File cannot be opened.', fname exit() #list constructor lst = [] read_file = fhand.read().strip() while ( False ): listwords = read_file.split() listwords in lst lst.append(listwords) print lst #for line in read_file: # listwords = read_file.split() # # if listwords in lst == False: # lst.append(listwords) #for line in fh # sline = split() # lst.append(sline) #lst.append(b_words) #while # lst.append() #for line in fh: #print line.rstrip()
missulmer/Pythonstudy
coursera_python_specialization/8_4.py
Python
cc0-1.0
1,047
import requests from collections import defaultdict from requests.exceptions import RequestException from django.conf import settings from django.utils.dateparse import parse_datetime import sal.plugin import server.utils as utils class CryptStatus(sal.plugin.DetailPlugin): description = 'FileVault Escrow Status' supported_os_families = [sal.plugin.OSFamilies.darwin] def get_context(self, machine, **kwargs): context = defaultdict(str) context['title'] = self.description crypt_url = utils.get_setting('crypt_url', None) machine_url = crypt_url if crypt_url: crypt_url = crypt_url.rstrip() if crypt_url: try: verify = settings.ROOT_CA except AttributeError: verify = True request_url = '{}/verify/{}/recovery_key/'.format(crypt_url, machine.serial) output = None machine_url = crypt_url try: response = requests.get(request_url, verify=verify) if response.status_code == requests.codes.ok: output = response.json() # Have template link to machine info page rather # than Crypt root. machine_url = '{}/info/{}'.format(crypt_url, machine.serial) except RequestException: # Either there was an error or the machine hasn't been # seen. pass if output: context['escrowed'] = output['escrowed'] if output['escrowed']: context['date_escrowed'] = parse_datetime(output['date_escrowed']) context['crypt_url'] = machine_url return context
salopensource/sal
server/plugins/cryptstatus/cryptstatus.py
Python
apache-2.0
1,772
# This file is part of wger Workout Manager. # # wger Workout Manager is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # wger Workout Manager is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU Affero General Public License import datetime from django.contrib.auth.models import Permission from django.contrib.auth.models import User from django.contrib.contenttypes.models import ContentType from django.core.urlresolvers import reverse from wger.core.models import UserProfile from wger.core.tests.base_testcase import WorkoutManagerTestCase from wger.gym.models import Gym from wger.gym.models import GymAdminConfig class GymAddUserTestCase(WorkoutManagerTestCase): ''' Tests admin adding users to gyms ''' def add_user(self, fail=False): ''' Helper function to add users ''' count_before = User.objects.all().count() GymAdminConfig.objects.all().delete() response = self.client.get(reverse('gym:gym:add-user', kwargs={'gym_pk': 1})) self.assertEqual(GymAdminConfig.objects.all().count(), 0) if fail: self.assertEqual(response.status_code, 403) else: self.assertEqual(response.status_code, 200) response = self.client.post(reverse('gym:gym:add-user', kwargs={'gym_pk': 1}), {'first_name': 'Cletus', 'last_name': 'Spuckle', 'username': 'cletus', 'email': 'cletus@spuckle-megacorp.com', 'role': 'admin'}) count_after = User.objects.all().count() if fail: self.assertEqual(response.status_code, 403) self.assertEqual(count_before, count_after) self.assertFalse(self.client.session.get('gym.user')) else: self.assertEqual(count_before + 1, count_after) self.assertEqual(response.status_code, 302) self.assertTrue(self.client.session['gym.user']['user_pk'], 3) self.assertTrue(self.client.session['gym.user']['password']) self.assertEqual(len(self.client.session['gym.user']['password']), 15) new_user = User.objects.get(pk=self.client.session['gym.user']['user_pk']) self.assertEqual(GymAdminConfig.objects.all().count(), 1) self.assertEqual(new_user.userprofile.gym_id, 1) def test_add_user_authorized(self): ''' Tests adding a user as authorized user ''' self.user_login('admin') self.add_user() def test_add_user_authorized2(self): ''' Tests adding a user as authorized user ''' self.user_login('general_manager1') self.add_user() def test_add_user_unauthorized(self): ''' Tests adding a user an unauthorized user ''' self.user_login('test') self.add_user(fail=True) def test_add_user_unauthorized2(self): ''' Tests adding a user an unauthorized user ''' self.user_login('trainer1') self.add_user(fail=True) def test_add_user_unauthorized3(self): ''' Tests adding a user an unauthorized user ''' self.user_login('manager3') self.add_user(fail=True) def test_add_user_logged_out(self): ''' Tests adding a user a logged out user ''' self.add_user(fail=True) def new_user_data_export(self, fail=False): ''' Helper function to test exporting the data of a newly created user ''' response = self.client.get(reverse('gym:gym:new-user-data-export')) if fail: self.assertIn(response.status_code, (302, 403)) else: self.assertEqual(response.status_code, 200) self.assertEqual(response['Content-Type'], 'text/csv') today = datetime.date.today() filename = 'User-data-{t.year}-{t.month:02d}-{t.day:02d}-cletus.csv'.format(t=today) self.assertEqual(response['Content-Disposition'], 'attachment; filename={}'.format(filename)) self.assertGreaterEqual(len(response.content), 90) self.assertLessEqual(len(response.content), 120) def test_new_user_data_export(self): ''' Test exporting the data of a newly created user ''' self.user_login('admin') self.add_user() self.new_user_data_export(fail=False) self.user_logout() self.new_user_data_export(fail=True) self.user_logout() self.user_login('test') self.new_user_data_export(fail=True) class TrainerLoginTestCase(WorkoutManagerTestCase): ''' Tests the trainer login view (switching to user ID) ''' def test_anonymous(self): ''' Test the trainer login as an anonymous user ''' response = self.client.get(reverse('core:user:trainer-login', kwargs={'user_pk': 1})) self.assertEqual(response.status_code, 302) self.assertFalse(self.client.session.get('trainer.identity')) def test_user(self): ''' Test the trainer login as a logged in user without rights ''' self.user_login('test') response = self.client.get(reverse('core:user:trainer-login', kwargs={'user_pk': 1})) self.assertEqual(response.status_code, 403) self.assertFalse(self.client.session.get('trainer.identity')) def test_trainer(self): ''' Test the trainer login as a logged in user with enough rights ''' self.user_login('admin') response = self.client.get(reverse('core:user:trainer-login', kwargs={'user_pk': 2})) self.assertEqual(response.status_code, 302) self.assertTrue(self.client.session.get('trainer.identity')) def test_wrong_gym(self): ''' Test changing the identity to a user in a different gym ''' profile = UserProfile.objects.get(user_id=2) profile.gym_id = 2 profile.save() self.user_login('admin') response = self.client.get(reverse('core:user:trainer-login', kwargs={'user_pk': 2})) self.assertEqual(response.status_code, 403) self.assertFalse(self.client.session.get('trainer.identity')) def test_gym_trainer(self): ''' Test changing the identity to a user with trainer rights ''' user = User.objects.get(pk=2) content_type = ContentType.objects.get_for_model(Gym) permission = Permission.objects.get(content_type=content_type, codename='gym_trainer') user.user_permissions.add(permission) self.user_login('admin') response = self.client.get(reverse('core:user:trainer-login', kwargs={'user_pk': 2})) self.assertEqual(response.status_code, 403) self.assertFalse(self.client.session.get('trainer.identity')) def test_gym_manager(self): ''' Test changing the identity to a user with gym management rights ''' user = User.objects.get(pk=2) content_type = ContentType.objects.get_for_model(Gym) permission = Permission.objects.get(content_type=content_type, codename='manage_gym') user.user_permissions.add(permission) self.user_login('admin') response = self.client.get(reverse('core:user:trainer-login', kwargs={'user_pk': 2})) self.assertEqual(response.status_code, 403) self.assertFalse(self.client.session.get('trainer.identity')) def test_gyms_manager(self): ''' Test changing the identity to a user with gyms management rights ''' user = User.objects.get(pk=2) content_type = ContentType.objects.get_for_model(Gym) permission = Permission.objects.get(content_type=content_type, codename='manage_gyms') user.user_permissions.add(permission) self.user_login('admin') response = self.client.get(reverse('core:user:trainer-login', kwargs={'user_pk': 2})) self.assertEqual(response.status_code, 403) self.assertFalse(self.client.session.get('trainer.identity')) class TrainerLogoutTestCase(WorkoutManagerTestCase): ''' Tests the trainer logout view (switching back to trainer ID) ''' def test_logout(self): ''' Test the trainer login as an anonymous user ''' self.user_login('admin') self.client.get(reverse('core:user:trainer-login', kwargs={'user_pk': 2})) self.assertTrue(self.client.session.get('trainer.identity')) self.client.get(reverse('core:user:trainer-login', kwargs={'user_pk': 1})) self.assertFalse(self.client.session.get('trainer.identity'))
DeveloperMal/wger
wger/gym/tests/test_user.py
Python
agpl-3.0
9,229
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os import pydoc import time import unittest from pyspark.sql import SparkSession, Row from pyspark.sql.types import * from pyspark.sql.utils import AnalysisException, IllegalArgumentException from pyspark.testing.sqlutils import ReusedSQLTestCase, SQLTestUtils, have_pyarrow, have_pandas, \ pandas_requirement_message, pyarrow_requirement_message from pyspark.testing.utils import QuietTest class DataFrameTests(ReusedSQLTestCase): def test_range(self): self.assertEqual(self.spark.range(1, 1).count(), 0) self.assertEqual(self.spark.range(1, 0, -1).count(), 1) self.assertEqual(self.spark.range(0, 1 << 40, 1 << 39).count(), 2) self.assertEqual(self.spark.range(-2).count(), 0) self.assertEqual(self.spark.range(3).count(), 3) def test_duplicated_column_names(self): df = self.spark.createDataFrame([(1, 2)], ["c", "c"]) row = df.select('*').first() self.assertEqual(1, row[0]) self.assertEqual(2, row[1]) self.assertEqual("Row(c=1, c=2)", str(row)) # Cannot access columns self.assertRaises(AnalysisException, lambda: df.select(df[0]).first()) self.assertRaises(AnalysisException, lambda: df.select(df.c).first()) self.assertRaises(AnalysisException, lambda: df.select(df["c"]).first()) def test_freqItems(self): vals = [Row(a=1, b=-2.0) if i % 2 == 0 else Row(a=i, b=i * 1.0) for i in range(100)] df = self.sc.parallelize(vals).toDF() items = df.stat.freqItems(("a", "b"), 0.4).collect()[0] self.assertTrue(1 in items[0]) self.assertTrue(-2.0 in items[1]) def test_help_command(self): # Regression test for SPARK-5464 rdd = self.sc.parallelize(['{"foo":"bar"}', '{"foo":"baz"}']) df = self.spark.read.json(rdd) # render_doc() reproduces the help() exception without printing output pydoc.render_doc(df) pydoc.render_doc(df.foo) pydoc.render_doc(df.take(1)) def test_dropna(self): schema = StructType([ StructField("name", StringType(), True), StructField("age", IntegerType(), True), StructField("height", DoubleType(), True)]) # shouldn't drop a non-null row self.assertEqual(self.spark.createDataFrame( [(u'Alice', 50, 80.1)], schema).dropna().count(), 1) # dropping rows with a single null value self.assertEqual(self.spark.createDataFrame( [(u'Alice', None, 80.1)], schema).dropna().count(), 0) self.assertEqual(self.spark.createDataFrame( [(u'Alice', None, 80.1)], schema).dropna(how='any').count(), 0) # if how = 'all', only drop rows if all values are null self.assertEqual(self.spark.createDataFrame( [(u'Alice', None, 80.1)], schema).dropna(how='all').count(), 1) self.assertEqual(self.spark.createDataFrame( [(None, None, None)], schema).dropna(how='all').count(), 0) # how and subset self.assertEqual(self.spark.createDataFrame( [(u'Alice', 50, None)], schema).dropna(how='any', subset=['name', 'age']).count(), 1) self.assertEqual(self.spark.createDataFrame( [(u'Alice', None, None)], schema).dropna(how='any', subset=['name', 'age']).count(), 0) # threshold self.assertEqual(self.spark.createDataFrame( [(u'Alice', None, 80.1)], schema).dropna(thresh=2).count(), 1) self.assertEqual(self.spark.createDataFrame( [(u'Alice', None, None)], schema).dropna(thresh=2).count(), 0) # threshold and subset self.assertEqual(self.spark.createDataFrame( [(u'Alice', 50, None)], schema).dropna(thresh=2, subset=['name', 'age']).count(), 1) self.assertEqual(self.spark.createDataFrame( [(u'Alice', None, 180.9)], schema).dropna(thresh=2, subset=['name', 'age']).count(), 0) # thresh should take precedence over how self.assertEqual(self.spark.createDataFrame( [(u'Alice', 50, None)], schema).dropna( how='any', thresh=2, subset=['name', 'age']).count(), 1) def test_fillna(self): schema = StructType([ StructField("name", StringType(), True), StructField("age", IntegerType(), True), StructField("height", DoubleType(), True), StructField("spy", BooleanType(), True)]) # fillna shouldn't change non-null values row = self.spark.createDataFrame([(u'Alice', 10, 80.1, True)], schema).fillna(50).first() self.assertEqual(row.age, 10) # fillna with int row = self.spark.createDataFrame([(u'Alice', None, None, None)], schema).fillna(50).first() self.assertEqual(row.age, 50) self.assertEqual(row.height, 50.0) # fillna with double row = self.spark.createDataFrame( [(u'Alice', None, None, None)], schema).fillna(50.1).first() self.assertEqual(row.age, 50) self.assertEqual(row.height, 50.1) # fillna with bool row = self.spark.createDataFrame( [(u'Alice', None, None, None)], schema).fillna(True).first() self.assertEqual(row.age, None) self.assertEqual(row.spy, True) # fillna with string row = self.spark.createDataFrame([(None, None, None, None)], schema).fillna("hello").first() self.assertEqual(row.name, u"hello") self.assertEqual(row.age, None) # fillna with subset specified for numeric cols row = self.spark.createDataFrame( [(None, None, None, None)], schema).fillna(50, subset=['name', 'age']).first() self.assertEqual(row.name, None) self.assertEqual(row.age, 50) self.assertEqual(row.height, None) self.assertEqual(row.spy, None) # fillna with subset specified for string cols row = self.spark.createDataFrame( [(None, None, None, None)], schema).fillna("haha", subset=['name', 'age']).first() self.assertEqual(row.name, "haha") self.assertEqual(row.age, None) self.assertEqual(row.height, None) self.assertEqual(row.spy, None) # fillna with subset specified for bool cols row = self.spark.createDataFrame( [(None, None, None, None)], schema).fillna(True, subset=['name', 'spy']).first() self.assertEqual(row.name, None) self.assertEqual(row.age, None) self.assertEqual(row.height, None) self.assertEqual(row.spy, True) # fillna with dictionary for boolean types row = self.spark.createDataFrame([Row(a=None), Row(a=True)]).fillna({"a": True}).first() self.assertEqual(row.a, True) def test_repartitionByRange_dataframe(self): schema = StructType([ StructField("name", StringType(), True), StructField("age", IntegerType(), True), StructField("height", DoubleType(), True)]) df1 = self.spark.createDataFrame( [(u'Bob', 27, 66.0), (u'Alice', 10, 10.0), (u'Bob', 10, 66.0)], schema) df2 = self.spark.createDataFrame( [(u'Alice', 10, 10.0), (u'Bob', 10, 66.0), (u'Bob', 27, 66.0)], schema) # test repartitionByRange(numPartitions, *cols) df3 = df1.repartitionByRange(2, "name", "age") self.assertEqual(df3.rdd.getNumPartitions(), 2) self.assertEqual(df3.rdd.first(), df2.rdd.first()) self.assertEqual(df3.rdd.take(3), df2.rdd.take(3)) # test repartitionByRange(numPartitions, *cols) df4 = df1.repartitionByRange(3, "name", "age") self.assertEqual(df4.rdd.getNumPartitions(), 3) self.assertEqual(df4.rdd.first(), df2.rdd.first()) self.assertEqual(df4.rdd.take(3), df2.rdd.take(3)) # test repartitionByRange(*cols) df5 = df1.repartitionByRange("name", "age") self.assertEqual(df5.rdd.first(), df2.rdd.first()) self.assertEqual(df5.rdd.take(3), df2.rdd.take(3)) def test_replace(self): schema = StructType([ StructField("name", StringType(), True), StructField("age", IntegerType(), True), StructField("height", DoubleType(), True)]) # replace with int row = self.spark.createDataFrame([(u'Alice', 10, 10.0)], schema).replace(10, 20).first() self.assertEqual(row.age, 20) self.assertEqual(row.height, 20.0) # replace with double row = self.spark.createDataFrame( [(u'Alice', 80, 80.0)], schema).replace(80.0, 82.1).first() self.assertEqual(row.age, 82) self.assertEqual(row.height, 82.1) # replace with string row = self.spark.createDataFrame( [(u'Alice', 10, 80.1)], schema).replace(u'Alice', u'Ann').first() self.assertEqual(row.name, u"Ann") self.assertEqual(row.age, 10) # replace with subset specified by a string of a column name w/ actual change row = self.spark.createDataFrame( [(u'Alice', 10, 80.1)], schema).replace(10, 20, subset='age').first() self.assertEqual(row.age, 20) # replace with subset specified by a string of a column name w/o actual change row = self.spark.createDataFrame( [(u'Alice', 10, 80.1)], schema).replace(10, 20, subset='height').first() self.assertEqual(row.age, 10) # replace with subset specified with one column replaced, another column not in subset # stays unchanged. row = self.spark.createDataFrame( [(u'Alice', 10, 10.0)], schema).replace(10, 20, subset=['name', 'age']).first() self.assertEqual(row.name, u'Alice') self.assertEqual(row.age, 20) self.assertEqual(row.height, 10.0) # replace with subset specified but no column will be replaced row = self.spark.createDataFrame( [(u'Alice', 10, None)], schema).replace(10, 20, subset=['name', 'height']).first() self.assertEqual(row.name, u'Alice') self.assertEqual(row.age, 10) self.assertEqual(row.height, None) # replace with lists row = self.spark.createDataFrame( [(u'Alice', 10, 80.1)], schema).replace([u'Alice'], [u'Ann']).first() self.assertTupleEqual(row, (u'Ann', 10, 80.1)) # replace with dict row = self.spark.createDataFrame( [(u'Alice', 10, 80.1)], schema).replace({10: 11}).first() self.assertTupleEqual(row, (u'Alice', 11, 80.1)) # test backward compatibility with dummy value dummy_value = 1 row = self.spark.createDataFrame( [(u'Alice', 10, 80.1)], schema).replace({'Alice': 'Bob'}, dummy_value).first() self.assertTupleEqual(row, (u'Bob', 10, 80.1)) # test dict with mixed numerics row = self.spark.createDataFrame( [(u'Alice', 10, 80.1)], schema).replace({10: -10, 80.1: 90.5}).first() self.assertTupleEqual(row, (u'Alice', -10, 90.5)) # replace with tuples row = self.spark.createDataFrame( [(u'Alice', 10, 80.1)], schema).replace((u'Alice', ), (u'Bob', )).first() self.assertTupleEqual(row, (u'Bob', 10, 80.1)) # replace multiple columns row = self.spark.createDataFrame( [(u'Alice', 10, 80.0)], schema).replace((10, 80.0), (20, 90)).first() self.assertTupleEqual(row, (u'Alice', 20, 90.0)) # test for mixed numerics row = self.spark.createDataFrame( [(u'Alice', 10, 80.0)], schema).replace((10, 80), (20, 90.5)).first() self.assertTupleEqual(row, (u'Alice', 20, 90.5)) row = self.spark.createDataFrame( [(u'Alice', 10, 80.0)], schema).replace({10: 20, 80: 90.5}).first() self.assertTupleEqual(row, (u'Alice', 20, 90.5)) # replace with boolean row = (self .spark.createDataFrame([(u'Alice', 10, 80.0)], schema) .selectExpr("name = 'Bob'", 'age <= 15') .replace(False, True).first()) self.assertTupleEqual(row, (True, True)) # replace string with None and then drop None rows row = self.spark.createDataFrame( [(u'Alice', 10, 80.0)], schema).replace(u'Alice', None).dropna() self.assertEqual(row.count(), 0) # replace with number and None row = self.spark.createDataFrame( [(u'Alice', 10, 80.0)], schema).replace([10, 80], [20, None]).first() self.assertTupleEqual(row, (u'Alice', 20, None)) # should fail if subset is not list, tuple or None with self.assertRaises(ValueError): self.spark.createDataFrame( [(u'Alice', 10, 80.1)], schema).replace({10: 11}, subset=1).first() # should fail if to_replace and value have different length with self.assertRaises(ValueError): self.spark.createDataFrame( [(u'Alice', 10, 80.1)], schema).replace(["Alice", "Bob"], ["Eve"]).first() # should fail if when received unexpected type with self.assertRaises(ValueError): from datetime import datetime self.spark.createDataFrame( [(u'Alice', 10, 80.1)], schema).replace(datetime.now(), datetime.now()).first() # should fail if provided mixed type replacements with self.assertRaises(ValueError): self.spark.createDataFrame( [(u'Alice', 10, 80.1)], schema).replace(["Alice", 10], ["Eve", 20]).first() with self.assertRaises(ValueError): self.spark.createDataFrame( [(u'Alice', 10, 80.1)], schema).replace({u"Alice": u"Bob", 10: 20}).first() with self.assertRaisesRegexp( TypeError, 'value argument is required when to_replace is not a dictionary.'): self.spark.createDataFrame( [(u'Alice', 10, 80.0)], schema).replace(["Alice", "Bob"]).first() def test_with_column_with_existing_name(self): keys = self.df.withColumn("key", self.df.key).select("key").collect() self.assertEqual([r.key for r in keys], list(range(100))) # regression test for SPARK-10417 def test_column_iterator(self): def foo(): for x in self.df.key: break self.assertRaises(TypeError, foo) def test_generic_hints(self): from pyspark.sql import DataFrame df1 = self.spark.range(10e10).toDF("id") df2 = self.spark.range(10e10).toDF("id") self.assertIsInstance(df1.hint("broadcast"), DataFrame) self.assertIsInstance(df1.hint("broadcast", []), DataFrame) # Dummy rules self.assertIsInstance(df1.hint("broadcast", "foo", "bar"), DataFrame) self.assertIsInstance(df1.hint("broadcast", ["foo", "bar"]), DataFrame) plan = df1.join(df2.hint("broadcast"), "id")._jdf.queryExecution().executedPlan() self.assertEqual(1, plan.toString().count("BroadcastHashJoin")) # add tests for SPARK-23647 (test more types for hint) def test_extended_hint_types(self): from pyspark.sql import DataFrame df = self.spark.range(10e10).toDF("id") such_a_nice_list = ["itworks1", "itworks2", "itworks3"] hinted_df = df.hint("my awesome hint", 1.2345, "what", such_a_nice_list) logical_plan = hinted_df._jdf.queryExecution().logical() self.assertEqual(1, logical_plan.toString().count("1.2345")) self.assertEqual(1, logical_plan.toString().count("what")) self.assertEqual(3, logical_plan.toString().count("itworks")) def test_sample(self): self.assertRaisesRegexp( TypeError, "should be a bool, float and number", lambda: self.spark.range(1).sample()) self.assertRaises( TypeError, lambda: self.spark.range(1).sample("a")) self.assertRaises( TypeError, lambda: self.spark.range(1).sample(seed="abc")) self.assertRaises( IllegalArgumentException, lambda: self.spark.range(1).sample(-1.0)) def test_toDF_with_schema_string(self): data = [Row(key=i, value=str(i)) for i in range(100)] rdd = self.sc.parallelize(data, 5) df = rdd.toDF("key: int, value: string") self.assertEqual(df.schema.simpleString(), "struct<key:int,value:string>") self.assertEqual(df.collect(), data) # different but compatible field types can be used. df = rdd.toDF("key: string, value: string") self.assertEqual(df.schema.simpleString(), "struct<key:string,value:string>") self.assertEqual(df.collect(), [Row(key=str(i), value=str(i)) for i in range(100)]) # field names can differ. df = rdd.toDF(" a: int, b: string ") self.assertEqual(df.schema.simpleString(), "struct<a:int,b:string>") self.assertEqual(df.collect(), data) # number of fields must match. self.assertRaisesRegexp(Exception, "Length of object", lambda: rdd.toDF("key: int").collect()) # field types mismatch will cause exception at runtime. self.assertRaisesRegexp(Exception, "FloatType can not accept", lambda: rdd.toDF("key: float, value: string").collect()) # flat schema values will be wrapped into row. df = rdd.map(lambda row: row.key).toDF("int") self.assertEqual(df.schema.simpleString(), "struct<value:int>") self.assertEqual(df.collect(), [Row(key=i) for i in range(100)]) # users can use DataType directly instead of data type string. df = rdd.map(lambda row: row.key).toDF(IntegerType()) self.assertEqual(df.schema.simpleString(), "struct<value:int>") self.assertEqual(df.collect(), [Row(key=i) for i in range(100)]) def test_join_without_on(self): df1 = self.spark.range(1).toDF("a") df2 = self.spark.range(1).toDF("b") with self.sql_conf({"spark.sql.crossJoin.enabled": False}): self.assertRaises(AnalysisException, lambda: df1.join(df2, how="inner").collect()) with self.sql_conf({"spark.sql.crossJoin.enabled": True}): actual = df1.join(df2, how="inner").collect() expected = [Row(a=0, b=0)] self.assertEqual(actual, expected) # Regression test for invalid join methods when on is None, Spark-14761 def test_invalid_join_method(self): df1 = self.spark.createDataFrame([("Alice", 5), ("Bob", 8)], ["name", "age"]) df2 = self.spark.createDataFrame([("Alice", 80), ("Bob", 90)], ["name", "height"]) self.assertRaises(IllegalArgumentException, lambda: df1.join(df2, how="invalid-join-type")) # Cartesian products require cross join syntax def test_require_cross(self): df1 = self.spark.createDataFrame([(1, "1")], ("key", "value")) df2 = self.spark.createDataFrame([(1, "1")], ("key", "value")) with self.sql_conf({"spark.sql.crossJoin.enabled": False}): # joins without conditions require cross join syntax self.assertRaises(AnalysisException, lambda: df1.join(df2).collect()) # works with crossJoin self.assertEqual(1, df1.crossJoin(df2).count()) def test_cache(self): spark = self.spark with self.tempView("tab1", "tab2"): spark.createDataFrame([(2, 2), (3, 3)]).createOrReplaceTempView("tab1") spark.createDataFrame([(2, 2), (3, 3)]).createOrReplaceTempView("tab2") self.assertFalse(spark.catalog.isCached("tab1")) self.assertFalse(spark.catalog.isCached("tab2")) spark.catalog.cacheTable("tab1") self.assertTrue(spark.catalog.isCached("tab1")) self.assertFalse(spark.catalog.isCached("tab2")) spark.catalog.cacheTable("tab2") spark.catalog.uncacheTable("tab1") self.assertFalse(spark.catalog.isCached("tab1")) self.assertTrue(spark.catalog.isCached("tab2")) spark.catalog.clearCache() self.assertFalse(spark.catalog.isCached("tab1")) self.assertFalse(spark.catalog.isCached("tab2")) self.assertRaisesRegexp( AnalysisException, "does_not_exist", lambda: spark.catalog.isCached("does_not_exist")) self.assertRaisesRegexp( AnalysisException, "does_not_exist", lambda: spark.catalog.cacheTable("does_not_exist")) self.assertRaisesRegexp( AnalysisException, "does_not_exist", lambda: spark.catalog.uncacheTable("does_not_exist")) def _to_pandas(self): from datetime import datetime, date schema = StructType().add("a", IntegerType()).add("b", StringType())\ .add("c", BooleanType()).add("d", FloatType())\ .add("dt", DateType()).add("ts", TimestampType()) data = [ (1, "foo", True, 3.0, date(1969, 1, 1), datetime(1969, 1, 1, 1, 1, 1)), (2, "foo", True, 5.0, None, None), (3, "bar", False, -1.0, date(2012, 3, 3), datetime(2012, 3, 3, 3, 3, 3)), (4, "bar", False, 6.0, date(2100, 4, 4), datetime(2100, 4, 4, 4, 4, 4)), ] df = self.spark.createDataFrame(data, schema) return df.toPandas() @unittest.skipIf(not have_pandas, pandas_requirement_message) def test_to_pandas(self): import numpy as np pdf = self._to_pandas() types = pdf.dtypes self.assertEquals(types[0], np.int32) self.assertEquals(types[1], np.object) self.assertEquals(types[2], np.bool) self.assertEquals(types[3], np.float32) self.assertEquals(types[4], np.object) # datetime.date self.assertEquals(types[5], 'datetime64[ns]') @unittest.skipIf(have_pandas, "Required Pandas was found.") def test_to_pandas_required_pandas_not_found(self): with QuietTest(self.sc): with self.assertRaisesRegexp(ImportError, 'Pandas >= .* must be installed'): self._to_pandas() @unittest.skipIf(not have_pandas, pandas_requirement_message) def test_to_pandas_avoid_astype(self): import numpy as np schema = StructType().add("a", IntegerType()).add("b", StringType())\ .add("c", IntegerType()) data = [(1, "foo", 16777220), (None, "bar", None)] df = self.spark.createDataFrame(data, schema) types = df.toPandas().dtypes self.assertEquals(types[0], np.float64) # doesn't convert to np.int32 due to NaN value. self.assertEquals(types[1], np.object) self.assertEquals(types[2], np.float64) @unittest.skipIf(not have_pandas, pandas_requirement_message) def test_to_pandas_from_empty_dataframe(self): with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": False}): # SPARK-29188 test that toPandas() on an empty dataframe has the correct dtypes import numpy as np sql = """ SELECT CAST(1 AS TINYINT) AS tinyint, CAST(1 AS SMALLINT) AS smallint, CAST(1 AS INT) AS int, CAST(1 AS BIGINT) AS bigint, CAST(0 AS FLOAT) AS float, CAST(0 AS DOUBLE) AS double, CAST(1 AS BOOLEAN) AS boolean, CAST('foo' AS STRING) AS string, CAST('2019-01-01' AS TIMESTAMP) AS timestamp """ dtypes_when_nonempty_df = self.spark.sql(sql).toPandas().dtypes dtypes_when_empty_df = self.spark.sql(sql).filter("False").toPandas().dtypes self.assertTrue(np.all(dtypes_when_empty_df == dtypes_when_nonempty_df)) @unittest.skipIf(not have_pandas, pandas_requirement_message) def test_to_pandas_from_null_dataframe(self): with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": False}): # SPARK-29188 test that toPandas() on a dataframe with only nulls has correct dtypes import numpy as np sql = """ SELECT CAST(NULL AS TINYINT) AS tinyint, CAST(NULL AS SMALLINT) AS smallint, CAST(NULL AS INT) AS int, CAST(NULL AS BIGINT) AS bigint, CAST(NULL AS FLOAT) AS float, CAST(NULL AS DOUBLE) AS double, CAST(NULL AS BOOLEAN) AS boolean, CAST(NULL AS STRING) AS string, CAST(NULL AS TIMESTAMP) AS timestamp """ pdf = self.spark.sql(sql).toPandas() types = pdf.dtypes self.assertEqual(types[0], np.float64) self.assertEqual(types[1], np.float64) self.assertEqual(types[2], np.float64) self.assertEqual(types[3], np.float64) self.assertEqual(types[4], np.float32) self.assertEqual(types[5], np.float64) self.assertEqual(types[6], np.object) self.assertEqual(types[7], np.object) self.assertTrue(np.can_cast(np.datetime64, types[8])) @unittest.skipIf(not have_pandas, pandas_requirement_message) def test_to_pandas_from_mixed_dataframe(self): with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": False}): # SPARK-29188 test that toPandas() on a dataframe with some nulls has correct dtypes import numpy as np sql = """ SELECT CAST(col1 AS TINYINT) AS tinyint, CAST(col2 AS SMALLINT) AS smallint, CAST(col3 AS INT) AS int, CAST(col4 AS BIGINT) AS bigint, CAST(col5 AS FLOAT) AS float, CAST(col6 AS DOUBLE) AS double, CAST(col7 AS BOOLEAN) AS boolean, CAST(col8 AS STRING) AS string, CAST(col9 AS TIMESTAMP) AS timestamp FROM VALUES (1, 1, 1, 1, 1, 1, 1, 1, 1), (NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL) """ pdf_with_some_nulls = self.spark.sql(sql).toPandas() pdf_with_only_nulls = self.spark.sql(sql).filter('tinyint is null').toPandas() self.assertTrue(np.all(pdf_with_only_nulls.dtypes == pdf_with_some_nulls.dtypes)) def test_create_dataframe_from_array_of_long(self): import array data = [Row(longarray=array.array('l', [-9223372036854775808, 0, 9223372036854775807]))] df = self.spark.createDataFrame(data) self.assertEqual(df.first(), Row(longarray=[-9223372036854775808, 0, 9223372036854775807])) @unittest.skipIf(not have_pandas, pandas_requirement_message) def test_create_dataframe_from_pandas_with_timestamp(self): import pandas as pd from datetime import datetime pdf = pd.DataFrame({"ts": [datetime(2017, 10, 31, 1, 1, 1)], "d": [pd.Timestamp.now().date()]}, columns=["d", "ts"]) # test types are inferred correctly without specifying schema df = self.spark.createDataFrame(pdf) self.assertTrue(isinstance(df.schema['ts'].dataType, TimestampType)) self.assertTrue(isinstance(df.schema['d'].dataType, DateType)) # test with schema will accept pdf as input df = self.spark.createDataFrame(pdf, schema="d date, ts timestamp") self.assertTrue(isinstance(df.schema['ts'].dataType, TimestampType)) self.assertTrue(isinstance(df.schema['d'].dataType, DateType)) @unittest.skipIf(have_pandas, "Required Pandas was found.") def test_create_dataframe_required_pandas_not_found(self): with QuietTest(self.sc): with self.assertRaisesRegexp( ImportError, "(Pandas >= .* must be installed|No module named '?pandas'?)"): import pandas as pd from datetime import datetime pdf = pd.DataFrame({"ts": [datetime(2017, 10, 31, 1, 1, 1)], "d": [pd.Timestamp.now().date()]}) self.spark.createDataFrame(pdf) # Regression test for SPARK-23360 @unittest.skipIf(not have_pandas, pandas_requirement_message) def test_create_dataframe_from_pandas_with_dst(self): import pandas as pd from pandas.util.testing import assert_frame_equal from datetime import datetime pdf = pd.DataFrame({'time': [datetime(2015, 10, 31, 22, 30)]}) df = self.spark.createDataFrame(pdf) assert_frame_equal(pdf, df.toPandas()) orig_env_tz = os.environ.get('TZ', None) try: tz = 'America/Los_Angeles' os.environ['TZ'] = tz time.tzset() with self.sql_conf({'spark.sql.session.timeZone': tz}): df = self.spark.createDataFrame(pdf) assert_frame_equal(pdf, df.toPandas()) finally: del os.environ['TZ'] if orig_env_tz is not None: os.environ['TZ'] = orig_env_tz time.tzset() def test_repr_behaviors(self): import re pattern = re.compile(r'^ *\|', re.MULTILINE) df = self.spark.createDataFrame([(1, "1"), (22222, "22222")], ("key", "value")) # test when eager evaluation is enabled and _repr_html_ will not be called with self.sql_conf({"spark.sql.repl.eagerEval.enabled": True}): expected1 = """+-----+-----+ || key|value| |+-----+-----+ || 1| 1| ||22222|22222| |+-----+-----+ |""" self.assertEquals(re.sub(pattern, '', expected1), df.__repr__()) with self.sql_conf({"spark.sql.repl.eagerEval.truncate": 3}): expected2 = """+---+-----+ ||key|value| |+---+-----+ || 1| 1| ||222| 222| |+---+-----+ |""" self.assertEquals(re.sub(pattern, '', expected2), df.__repr__()) with self.sql_conf({"spark.sql.repl.eagerEval.maxNumRows": 1}): expected3 = """+---+-----+ ||key|value| |+---+-----+ || 1| 1| |+---+-----+ |only showing top 1 row |""" self.assertEquals(re.sub(pattern, '', expected3), df.__repr__()) # test when eager evaluation is enabled and _repr_html_ will be called with self.sql_conf({"spark.sql.repl.eagerEval.enabled": True}): expected1 = """<table border='1'> |<tr><th>key</th><th>value</th></tr> |<tr><td>1</td><td>1</td></tr> |<tr><td>22222</td><td>22222</td></tr> |</table> |""" self.assertEquals(re.sub(pattern, '', expected1), df._repr_html_()) with self.sql_conf({"spark.sql.repl.eagerEval.truncate": 3}): expected2 = """<table border='1'> |<tr><th>key</th><th>value</th></tr> |<tr><td>1</td><td>1</td></tr> |<tr><td>222</td><td>222</td></tr> |</table> |""" self.assertEquals(re.sub(pattern, '', expected2), df._repr_html_()) with self.sql_conf({"spark.sql.repl.eagerEval.maxNumRows": 1}): expected3 = """<table border='1'> |<tr><th>key</th><th>value</th></tr> |<tr><td>1</td><td>1</td></tr> |</table> |only showing top 1 row |""" self.assertEquals(re.sub(pattern, '', expected3), df._repr_html_()) # test when eager evaluation is disabled and _repr_html_ will be called with self.sql_conf({"spark.sql.repl.eagerEval.enabled": False}): expected = "DataFrame[key: bigint, value: string]" self.assertEquals(None, df._repr_html_()) self.assertEquals(expected, df.__repr__()) with self.sql_conf({"spark.sql.repl.eagerEval.truncate": 3}): self.assertEquals(None, df._repr_html_()) self.assertEquals(expected, df.__repr__()) with self.sql_conf({"spark.sql.repl.eagerEval.maxNumRows": 1}): self.assertEquals(None, df._repr_html_()) self.assertEquals(expected, df.__repr__()) def test_to_local_iterator(self): df = self.spark.range(8, numPartitions=4) expected = df.collect() it = df.toLocalIterator() self.assertEqual(expected, list(it)) # Test DataFrame with empty partition df = self.spark.range(3, numPartitions=4) it = df.toLocalIterator() expected = df.collect() self.assertEqual(expected, list(it)) def test_to_local_iterator_prefetch(self): df = self.spark.range(8, numPartitions=4) expected = df.collect() it = df.toLocalIterator(prefetchPartitions=True) self.assertEqual(expected, list(it)) def test_to_local_iterator_not_fully_consumed(self): # SPARK-23961: toLocalIterator throws exception when not fully consumed # Create a DataFrame large enough so that write to socket will eventually block df = self.spark.range(1 << 20, numPartitions=2) it = df.toLocalIterator() self.assertEqual(df.take(1)[0], next(it)) with QuietTest(self.sc): it = None # remove iterator from scope, socket is closed when cleaned up # Make sure normal df operations still work result = [] for i, row in enumerate(df.toLocalIterator()): result.append(row) if i == 7: break self.assertEqual(df.take(8), result) class QueryExecutionListenerTests(unittest.TestCase, SQLTestUtils): # These tests are separate because it uses 'spark.sql.queryExecutionListeners' which is # static and immutable. This can't be set or unset, for example, via `spark.conf`. @classmethod def setUpClass(cls): import glob from pyspark.find_spark_home import _find_spark_home SPARK_HOME = _find_spark_home() filename_pattern = ( "sql/core/target/scala-*/test-classes/org/apache/spark/sql/" "TestQueryExecutionListener.class") cls.has_listener = bool(glob.glob(os.path.join(SPARK_HOME, filename_pattern))) if cls.has_listener: # Note that 'spark.sql.queryExecutionListeners' is a static immutable configuration. cls.spark = SparkSession.builder \ .master("local[4]") \ .appName(cls.__name__) \ .config( "spark.sql.queryExecutionListeners", "org.apache.spark.sql.TestQueryExecutionListener") \ .getOrCreate() def setUp(self): if not self.has_listener: raise self.skipTest( "'org.apache.spark.sql.TestQueryExecutionListener' is not " "available. Will skip the related tests.") @classmethod def tearDownClass(cls): if hasattr(cls, "spark"): cls.spark.stop() def tearDown(self): self.spark._jvm.OnSuccessCall.clear() def test_query_execution_listener_on_collect(self): self.assertFalse( self.spark._jvm.OnSuccessCall.isCalled(), "The callback from the query execution listener should not be called before 'collect'") self.spark.sql("SELECT * FROM range(1)").collect() self.spark.sparkContext._jsc.sc().listenerBus().waitUntilEmpty(10000) self.assertTrue( self.spark._jvm.OnSuccessCall.isCalled(), "The callback from the query execution listener should be called after 'collect'") @unittest.skipIf( not have_pandas or not have_pyarrow, pandas_requirement_message or pyarrow_requirement_message) def test_query_execution_listener_on_collect_with_arrow(self): with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": True}): self.assertFalse( self.spark._jvm.OnSuccessCall.isCalled(), "The callback from the query execution listener should not be " "called before 'toPandas'") self.spark.sql("SELECT * FROM range(1)").toPandas() self.spark.sparkContext._jsc.sc().listenerBus().waitUntilEmpty(10000) self.assertTrue( self.spark._jvm.OnSuccessCall.isCalled(), "The callback from the query execution listener should be called after 'toPandas'") if __name__ == "__main__": from pyspark.sql.tests.test_dataframe import * try: import xmlrunner testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2) except ImportError: testRunner = None unittest.main(testRunner=testRunner, verbosity=2)
ptkool/spark
python/pyspark/sql/tests/test_dataframe.py
Python
apache-2.0
38,168
# Software selection text spoke # # Copyright (C) 2013 Red Hat, Inc. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions of # the GNU General Public License v.2, or (at your option) any later version. # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY expressed or implied, including the implied warranties of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. You should have received a copy of the # GNU General Public License along with this program; if not, write to the # Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. Any Red Hat trademarks that are incorporated in the # source code or documentation are not subject to the GNU General Public # License and may only be used or replicated with the express permission of # Red Hat, Inc. # from pyanaconda.flags import flags from pyanaconda.ui.categories.software import SoftwareCategory from pyanaconda.ui.tui.spokes import NormalTUISpoke from pyanaconda.threading import threadMgr, AnacondaThread from pyanaconda.payload import DependencyError, PackagePayload, payloadMgr, NoSuchGroup from pyanaconda.core.i18n import N_, _, C_ from pyanaconda.core.constants import THREAD_PAYLOAD, THREAD_CHECK_SOFTWARE, \ THREAD_SOFTWARE_WATCHER from simpleline.render.containers import ListColumnContainer from simpleline.render.screen import InputState from simpleline.render.screen_handler import ScreenHandler from simpleline.render.widgets import TextWidget, CheckboxWidget from pyanaconda.anaconda_loggers import get_module_logger log = get_module_logger(__name__) __all__ = ["SoftwareSpoke"] class SoftwareSpoke(NormalTUISpoke): """ Spoke used to read new value of text to represent source repo. .. inheritance-diagram:: SoftwareSpoke :parts: 3 """ helpFile = "SoftwareSpoke.txt" category = SoftwareCategory def __init__(self, data, storage, payload, instclass): super().__init__(data, storage, payload, instclass) self.title = N_("Software selection") self._container = None self.errors = [] self._tx_id = None self._selected_environment = None self.environment = None self._addons_selection = set() self.addons = set() # for detecting later whether any changes have been made self._origEnv = None self._origAddons = set() # are we taking values (package list) from a kickstart file? self._kickstarted = flags.automatedInstall and self.data.packages.seen # Register event listeners to update our status on payload events payloadMgr.addListener(payloadMgr.STATE_START, self._payload_start) payloadMgr.addListener(payloadMgr.STATE_FINISHED, self._payload_finished) payloadMgr.addListener(payloadMgr.STATE_ERROR, self._payload_error) def initialize(self): # Start a thread to wait for the payload and run the first, automatic # dependency check self.initialize_start() super().initialize() threadMgr.add(AnacondaThread(name=THREAD_SOFTWARE_WATCHER, target=self._initialize)) def _initialize(self): threadMgr.wait(THREAD_PAYLOAD) if not self._kickstarted: # If an environment was specified in the instclass, use that. # Otherwise, select the first environment. if self.payload.environments: environments = self.payload.environments instclass = self.payload.instclass if instclass and instclass.defaultPackageEnvironment and \ instclass.defaultPackageEnvironment in environments: self._selected_environment = instclass.defaultPackageEnvironment else: self._selected_environment = environments[0] # Apply the initial selection self._apply() # Wait for the software selection thread that might be started by _apply(). # We are already running in a thread, so it should not needlessly block anything # and only like this we can be sure we are really initialized. threadMgr.wait(THREAD_CHECK_SOFTWARE) # report that the software spoke has been initialized self.initialize_done() def _payload_start(self): # Source is changing, invalidate the software selection and clear the # errors self._selected_environment = None self._addons_selection = set() self.errors = [] def _payload_finished(self): self.environment = self.data.packages.environment self.addons = self._get_selected_addons() self._origEnv = None self._origAddons = None log.debug("Payload restarted, set new info and clear the old one.") def _payload_error(self): self.errors = [payloadMgr.error] def _translate_env_name_to_id(self, environment): """ Return the id of the selected environment or None. """ if environment is None: return None try: return self.payload.environmentId(environment) except NoSuchGroup: return None def _get_available_addons(self, environment_id): """ Return all add-ons of the specific environment. """ addons = [] if environment_id in self.payload.environmentAddons: for addons_list in self.payload.environmentAddons[environment_id]: addons.extend(addons_list) return addons def _get_selected_addons(self): """ Return selected add-ons. """ return {group.name for group in self.payload.data.packages.groupList} @property def showable(self): return isinstance(self.payload, PackagePayload) @property def status(self): """ Where we are in the process """ if self.errors: return _("Error checking software selection") if not self.ready: return _("Processing...") if not self.payload.baseRepo: return _("Installation source not set up") if not self.txid_valid: return _("Source changed - please verify") if not self.environment: # Ks installs with %packages will have an env selected, unless # they did an install without a desktop environment. This should # catch that one case. if self._kickstarted: return _("Custom software selected") return _("Nothing selected") return self.payload.environmentDescription(self.environment)[0] @property def completed(self): """ Make sure our threads are done running and vars are set. WARNING: This can be called before the spoke is finished initializing if the spoke starts a thread. It should make sure it doesn't access things until they are completely setup. """ processing_done = self.ready and not self.errors and self.txid_valid if flags.automatedInstall or self._kickstarted: return processing_done and self.payload.baseRepo and self.data.packages.seen else: return processing_done and self.payload.baseRepo and self.environment is not None def refresh(self, args=None): """ Refresh screen. """ NormalTUISpoke.refresh(self, args) threadMgr.wait(THREAD_PAYLOAD) self._container = None if not self.payload.baseRepo: message = TextWidget(_("Installation source needs to be set up first.")) self.window.add_with_separator(message) return threadMgr.wait(THREAD_CHECK_SOFTWARE) self._container = ListColumnContainer(2, columns_width=38, spacing=2) if args is None: msg = self._refresh_environments() else: msg = self._refresh_addons(args) self.window.add_with_separator(TextWidget(msg)) self.window.add_with_separator(self._container) def _refresh_environments(self): environments = self.payload.environments for env in environments: name = self.payload.environmentDescription(env)[0] selected = (env == self._selected_environment) widget = CheckboxWidget(title="%s" % name, completed=selected) self._container.add(widget, callback=self._set_environment_callback, data=env) return _("Base environment") def _refresh_addons(self, available_addons): for addon_id in available_addons: name = self.payload.groupDescription(addon_id)[0] selected = addon_id in self._addons_selection widget = CheckboxWidget(title="%s" % name, completed=selected) self._container.add(widget, callback=self._set_addons_callback, data=addon_id) if available_addons: return _("Add-ons for selected environment") else: return _("No add-ons to select.") def _set_environment_callback(self, data): self._selected_environment = data def _set_addons_callback(self, data): addon = data if addon not in self._addons_selection: self._addons_selection.add(addon) else: self._addons_selection.remove(addon) def input(self, args, key): """ Handle the input; this chooses the desktop environment. """ if self._container is not None and self._container.process_user_input(key): self.redraw() else: # TRANSLATORS: 'c' to continue if key.lower() == C_('TUI|Spoke Navigation', 'c'): # No environment was selected, close if self._selected_environment is None: self.close() # The environment was selected, switch screen elif args is None: # Get addons for the selected environment environment = self._selected_environment environment_id = self._translate_env_name_to_id(environment) addons = self._get_available_addons(environment_id) # Switch the screen ScreenHandler.replace_screen(self, addons) # The addons were selected, apply and close else: self.apply() self.close() else: return super().input(args, key) return InputState.PROCESSED @property def ready(self): """ If we're ready to move on. """ return (not threadMgr.get(THREAD_PAYLOAD) and not threadMgr.get(THREAD_CHECK_SOFTWARE) and not threadMgr.get(THREAD_SOFTWARE_WATCHER)) def apply(self): """ Apply our selections """ # no longer using values from kickstart self._kickstarted = False self.data.packages.seen = True # _apply depends on a value of _kickstarted self._apply() def _apply(self): """ Private apply. """ self.environment = self._selected_environment self.addons = self._addons_selection if self.environment is not None else set() log.debug("Apply called old env %s, new env %s and addons %s", self._origEnv, self.environment, self.addons) if self.environment is None: return changed = False # Not a kickstart with packages, setup the selected environment and addons if not self._kickstarted: # Changed the environment or addons, clear and setup if not self._origEnv \ or self._origEnv != self.environment \ or set(self._origAddons) != set(self.addons): log.debug("Setting new software selection old env %s, new env %s and addons %s", self._origEnv, self.environment, self.addons) self.payload.data.packages.packageList = [] self.data.packages.groupList = [] self.payload.selectEnvironment(self.environment) environment_id = self._translate_env_name_to_id(self.environment) available_addons = self._get_available_addons(environment_id) for addon_id in available_addons: if addon_id in self.addons: self.payload.selectGroup(addon_id) changed = True self._origEnv = self.environment self._origAddons = set(self.addons) # Check the software selection if changed or self._kickstarted: threadMgr.add(AnacondaThread(name=THREAD_CHECK_SOFTWARE, target=self.checkSoftwareSelection)) def checkSoftwareSelection(self): """ Depsolving """ try: self.payload.checkSoftwareSelection() except DependencyError as e: self.errors = [str(e)] self._tx_id = None log.warning("Transaction error %s", str(e)) else: self._tx_id = self.payload.txID @property def txid_valid(self): """ Whether we have a valid dnf tx id. """ return self._tx_id == self.payload.txID
vathpela/anaconda
pyanaconda/ui/tui/spokes/software_selection.py
Python
gpl-2.0
13,495
# coding: utf-8 # In[ ]: import numpy as np import numexpr as ne def sym_decorrelation_ne(W): """ Symmetric decorrelation """ K = np.dot(W, W.T) s, u = np.linalg.eigh(K) return (u @ np.diag(1.0/np.sqrt(s)) @ u.T) @ W # logcosh def g_logcosh_ne(wx,alpha): """derivatives of logcosh""" return ne.evaluate('tanh(alpha * wx)') def gprime_logcosh_ne(wx,alpha): """second derivatives of logcosh""" return alpha * (1-ne.evaluate('tanh(alpha*wx)**2')) # exp def g_exp_ne(wx,alpha): """derivatives of exp""" return ne.evaluate('wx * exp(-wx**2/2)') def gprime_exp_ne(wx,alpha): """second derivatives of exp""" return (1-np.square(wx)) * ne.evaluate('exp(-wx**2/2)') def fastica_s(X, f,alpha=None,n_comp=None,maxit=200, tol=1e-04): n,p = X.shape #check if n_comp is valid if n_comp is None: n_comp = min(n,p) elif n_comp > min(n,p): print("n_comp is too large") n_comp = min(n,p) #centering #by subtracting the mean of each column of X (array). X = X - X.mean(axis=0)[None,:] X = X.T #whitening s = np.linalg.svd(X @ (X.T) / n) D = np.diag(1/np.sqrt(s[1])) k = D @ (s[0].T) k = k[:n_comp,:] X1 = k @ X # initial random weght vector w_init = np.random.normal(size=(n_comp, n_comp)) W = sym_decorrelation_ne(w_init) lim = 1 it = 0 # The FastICA algorithm while lim > tol and it < maxit : wx = W @ X1 if f =="logcosh": gwx = g_logcosh_ne(wx,alpha) g_wx = gprime_logcosh_ne(wx,alpha) elif f =="exp": gwx = g_exp_ne(wx,alpha) g_wx = gprimeg_exp_ne(wx,alpha) else: print("doesn't support this approximation negentropy function") W1 = np.dot(gwx,X1.T)/X1.shape[1] - np.dot(np.diag(g_wx.mean(axis=1)),W) W1 = sym_decorrelation_ne(W1) it = it +1 lim = np.max(np.abs(np.abs(np.diag(W1 @ W.T))) - 1.0) W = W1 S = W @ X1 #A = np.linalg.inv(W @ k) return{'X':X1.T,'S':S.T}
663project/fastica_lz
fastica_lz/fastica_lz.py
Python
mit
2,082
#!/usr/bin/env python3 from modules.pastafari.models import servers from paramecio.cromosoma.webmodel import WebModel from paramecio.cromosoma import corefields from paramecio.cromosoma.extrafields.dictfield import DictField from paramecio.cromosoma.extrafields.arrayfield import ArrayField from paramecio.cromosoma.extrafields.datefield import DateField from paramecio.cromosoma.extrafields.urlfield import UrlField from paramecio.cromosoma.extrafields.ipfield import IpField import requests from settings import config from modules.pastafari.libraries.configclass import config_task from paramecio.citoplasma.urls import redirect, make_url server_task=config_task.server_task server_task=server_task+'/exec/'+config_task.api_key+'/' pastafari_folder='pastafari' if hasattr(config, 'pastafari_folder'): pastafari_folder=config.pastafari_folder class Task(WebModel): def __init__(self, connection): super().__init__(connection) self.connection=connection self.register(corefields.CharField('name_task'), True) self.register(corefields.CharField('description_task'), True) self.register(corefields.CharField('codename_task')) self.register(ArrayField('files', ArrayField('', corefields.CharField('')))) self.register(ArrayField('commands_to_execute', ArrayField('', corefields.CharField('')))) self.register(ArrayField('delete_files', corefields.CharField(''))) self.register(ArrayField('delete_directories', corefields.CharField(''))) self.register(corefields.BooleanField('error')) self.register(corefields.BooleanField('status')) self.register(corefields.CharField('url_return')) self.register(IpField('server')) self.register(corefields.TextField('where_sql_server')) self.fields['where_sql_server'].escape=True self.register(corefields.IntegerField('num_servers')) self.register(corefields.CharField('user')) self.register(corefields.CharField('password')) self.register(corefields.CharField('path')) self.register(corefields.BooleanField('one_time')) self.register(corefields.CharField('version')) self.register(corefields.CharField('post_func')) self.register(corefields.CharField('pre_func')) self.register(corefields.CharField('error_func')) self.register(DictField('extra_data', corefields.CharField(''))) self.error=False self.txt_error='' def run_task(self, url, name_task, codename_task, description_task, files, commands_to_execute, delete_files, delete_directories, server, pre_func, post_func, error_func, extra_data): logtask=LogTask(self.connection) self.create_forms() logtask.create_forms() if self.insert({'name_task': name_task,'description_task': description_task, 'url_return': url, 'files': files, 'commands_to_execute': commands_to_execute, 'delete_files': delete_files, 'delete_directories': delete_directories, 'server': server, 'where_sql_server':'', 'pre_func': pre_func, 'post_func': post_func, 'error_func': error_func, 'extra_data': extra_data }): task_id=self.insert_id() #try: r=requests.get(server_task+str(task_id)) arr_data=r.json() arr_data['task_id']=task_id if not logtask.insert(arr_data): self.error=True self.txt_error="Error:Wrong format of json data..." else: redirect(make_url(pastafari_folder+'/showprogress/'+str(task_id)+'/'+server)) else: self.error=True self.txt_error="Cannot insert the task" class LogTask(WebModel): def __init__(self, connection): super().__init__(connection) self.register(DateField('date')) self.register(corefields.ForeignKeyField('task_id', Task(connection)), True) self.register(IpField('server')) self.register(corefields.DoubleField('progress')) self.register(corefields.BooleanField('no_progress')) self.register(corefields.TextField('message'), True) self.register(corefields.BooleanField('error')) self.register(corefields.BooleanField('status')) self.register(DictField('data', corefields.CharField('data')))
paramecio/pastafari
models/tasks.py
Python
gpl-2.0
4,555
# Copyright 2013 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from oslo.config import cfg from neutron.common import constants as n_consts from neutron.common import exceptions as n_exc from neutron import context from neutron.plugins.vmware.api_client import exception from neutron.plugins.vmware.common import exceptions as p_exc from neutron.plugins.vmware.dbexts import lsn_db from neutron.plugins.vmware.dhcp_meta import constants from neutron.plugins.vmware.dhcp_meta import lsnmanager as lsn_man from neutron.plugins.vmware.dhcp_meta import migration as mig_man from neutron.plugins.vmware.dhcp_meta import nsx from neutron.plugins.vmware.dhcp_meta import rpc from neutron.tests import base from neutron.tests.unit import testlib_api class DhcpMetadataBuilderTestCase(base.BaseTestCase): def setUp(self): super(DhcpMetadataBuilderTestCase, self).setUp() self.builder = mig_man.DhcpMetadataBuilder(mock.Mock(), mock.Mock()) self.network_id = 'foo_network_id' self.subnet_id = 'foo_subnet_id' self.router_id = 'foo_router_id' def test_dhcp_agent_get_all(self): expected = [] self.builder.plugin.list_dhcp_agents_hosting_network.return_value = ( {'agents': expected}) agents = self.builder.dhcp_agent_get_all(mock.ANY, self.network_id) self.assertEqual(expected, agents) def test_dhcp_port_get_all(self): expected = [] self.builder.plugin.get_ports.return_value = expected ports = self.builder.dhcp_port_get_all(mock.ANY, self.network_id) self.assertEqual(expected, ports) def test_router_id_get(self): port = { 'device_id': self.router_id, 'network_id': self.network_id, 'fixed_ips': [{'subnet_id': self.subnet_id}] } subnet = { 'id': self.subnet_id, 'network_id': self.network_id } self.builder.plugin.get_ports.return_value = [port] result = self.builder.router_id_get(context, subnet) self.assertEqual(self.router_id, result) def test_router_id_get_none_subnet(self): self.assertIsNone(self.builder.router_id_get(mock.ANY, None)) def test_router_id_get_none_no_router(self): self.builder.plugin.get_ports.return_value = [] subnet = {'network_id': self.network_id} self.assertIsNone(self.builder.router_id_get(mock.ANY, subnet)) def test_metadata_deallocate(self): self.builder.metadata_deallocate( mock.ANY, self.router_id, self.subnet_id) self.assertTrue(self.builder.plugin.remove_router_interface.call_count) def test_metadata_allocate(self): self.builder.metadata_allocate( mock.ANY, self.router_id, self.subnet_id) self.assertTrue(self.builder.plugin.add_router_interface.call_count) def test_dhcp_deallocate(self): agents = [{'id': 'foo_agent_id'}] ports = [{'id': 'foo_port_id'}] self.builder.dhcp_deallocate(mock.ANY, self.network_id, agents, ports) self.assertTrue( self.builder.plugin.remove_network_from_dhcp_agent.call_count) self.assertTrue(self.builder.plugin.delete_port.call_count) def _test_dhcp_allocate(self, subnet, expected_notify_count): with mock.patch.object(mig_man.nsx, 'handle_network_dhcp_access') as f: self.builder.dhcp_allocate(mock.ANY, self.network_id, subnet) self.assertTrue(f.call_count) self.assertEqual(expected_notify_count, self.builder.notifier.notify.call_count) def test_dhcp_allocate(self): subnet = {'network_id': self.network_id, 'id': self.subnet_id} self._test_dhcp_allocate(subnet, 2) def test_dhcp_allocate_none_subnet(self): self._test_dhcp_allocate(None, 0) class MigrationManagerTestCase(base.BaseTestCase): def setUp(self): super(MigrationManagerTestCase, self).setUp() self.manager = mig_man.MigrationManager(mock.Mock(), mock.Mock(), mock.Mock()) self.network_id = 'foo_network_id' self.router_id = 'foo_router_id' self.subnet_id = 'foo_subnet_id' self.mock_builder_p = mock.patch.object(self.manager, 'builder') self.mock_builder = self.mock_builder_p.start() def _test_validate(self, lsn_exists=False, ext_net=False, subnets=None): network = {'router:external': ext_net} self.manager.manager.lsn_exists.return_value = lsn_exists self.manager.plugin.get_network.return_value = network self.manager.plugin.get_subnets.return_value = subnets result = self.manager.validate(mock.ANY, self.network_id) if len(subnets): self.assertEqual(subnets[0], result) else: self.assertIsNone(result) def test_validate_no_subnets(self): self._test_validate(subnets=[]) def test_validate_with_one_subnet(self): self._test_validate(subnets=[{'cidr': '0.0.0.0/0'}]) def test_validate_raise_conflict_many_subnets(self): self.assertRaises(p_exc.LsnMigrationConflict, self._test_validate, subnets=[{'id': 'sub1'}, {'id': 'sub2'}]) def test_validate_raise_conflict_lsn_exists(self): self.assertRaises(p_exc.LsnMigrationConflict, self._test_validate, lsn_exists=True) def test_validate_raise_badrequest_external_net(self): self.assertRaises(n_exc.BadRequest, self._test_validate, ext_net=True) def test_validate_raise_badrequest_metadata_net(self): self.assertRaises(n_exc.BadRequest, self._test_validate, ext_net=False, subnets=[{'cidr': rpc.METADATA_SUBNET_CIDR}]) def _test_migrate(self, router, subnet, expected_calls): self.mock_builder.router_id_get.return_value = router self.manager.migrate(mock.ANY, self.network_id, subnet) # testing the exact the order of calls is important self.assertEqual(expected_calls, self.mock_builder.mock_calls) def test_migrate(self): subnet = { 'id': self.subnet_id, 'network_id': self.network_id } call_sequence = [ mock.call.router_id_get(mock.ANY, subnet), mock.call.metadata_deallocate( mock.ANY, self.router_id, self.subnet_id), mock.call.dhcp_agent_get_all(mock.ANY, self.network_id), mock.call.dhcp_port_get_all(mock.ANY, self.network_id), mock.call.dhcp_deallocate( mock.ANY, self.network_id, mock.ANY, mock.ANY), mock.call.dhcp_allocate(mock.ANY, self.network_id, subnet), mock.call.metadata_allocate( mock.ANY, self.router_id, self.subnet_id) ] self._test_migrate(self.router_id, subnet, call_sequence) def test_migrate_no_router_uplink(self): subnet = { 'id': self.subnet_id, 'network_id': self.network_id } call_sequence = [ mock.call.router_id_get(mock.ANY, subnet), mock.call.dhcp_agent_get_all(mock.ANY, self.network_id), mock.call.dhcp_port_get_all(mock.ANY, self.network_id), mock.call.dhcp_deallocate( mock.ANY, self.network_id, mock.ANY, mock.ANY), mock.call.dhcp_allocate(mock.ANY, self.network_id, subnet), ] self._test_migrate(None, subnet, call_sequence) def test_migrate_no_subnet(self): call_sequence = [ mock.call.router_id_get(mock.ANY, None), mock.call.dhcp_allocate(mock.ANY, self.network_id, None), ] self._test_migrate(None, None, call_sequence) def _test_report(self, lsn_attrs, expected): self.manager.manager.lsn_port_get.return_value = lsn_attrs report = self.manager.report(mock.ANY, self.network_id, self.subnet_id) self.assertEqual(expected, report) def test_report_for_lsn(self): self._test_report(('foo_lsn_id', 'foo_lsn_port_id'), {'ports': ['foo_lsn_port_id'], 'services': ['foo_lsn_id'], 'type': 'lsn'}) def test_report_for_lsn_without_lsn_port(self): self._test_report(('foo_lsn_id', None), {'ports': [], 'services': ['foo_lsn_id'], 'type': 'lsn'}) def _test_report_for_lsn_without_subnet(self, validated_subnet): with mock.patch.object(self.manager.plugin, 'get_subnets', return_value=validated_subnet): self.manager.manager.lsn_port_get.return_value = ( ('foo_lsn_id', 'foo_lsn_port_id')) report = self.manager.report(context, self.network_id) expected = { 'ports': ['foo_lsn_port_id'] if validated_subnet else [], 'services': ['foo_lsn_id'], 'type': 'lsn' } self.assertEqual(expected, report) def test_report_for_lsn_without_subnet_subnet_found(self): self._test_report_for_lsn_without_subnet([{'id': self.subnet_id}]) def test_report_for_lsn_without_subnet_subnet_not_found(self): self.manager.manager.lsn_get.return_value = 'foo_lsn_id' self._test_report_for_lsn_without_subnet(None) def test_report_for_dhcp_agent(self): self.manager.manager.lsn_port_get.return_value = (None, None) self.mock_builder.dhcp_agent_get_all.return_value = ( [{'id': 'foo_agent_id'}]) self.mock_builder.dhcp_port_get_all.return_value = ( [{'id': 'foo_dhcp_port_id'}]) result = self.manager.report(mock.ANY, self.network_id, self.subnet_id) expected = { 'ports': ['foo_dhcp_port_id'], 'services': ['foo_agent_id'], 'type': 'agent' } self.assertEqual(expected, result) class LsnManagerTestCase(base.BaseTestCase): def setUp(self): super(LsnManagerTestCase, self).setUp() self.net_id = 'foo_network_id' self.sub_id = 'foo_subnet_id' self.port_id = 'foo_port_id' self.lsn_id = 'foo_lsn_id' self.mac = 'aa:bb:cc:dd:ee:ff' self.switch_id = 'foo_switch_id' self.lsn_port_id = 'foo_lsn_port_id' self.tenant_id = 'foo_tenant_id' self.manager = lsn_man.LsnManager(mock.Mock()) self.context = context.get_admin_context() self.mock_lsn_api_p = mock.patch.object(lsn_man, 'lsn_api') self.mock_lsn_api = self.mock_lsn_api_p.start() self.mock_nsx_utils_p = mock.patch.object(lsn_man, 'nsx_utils') self.mock_nsx_utils = self.mock_nsx_utils_p.start() nsx.register_dhcp_opts(cfg) nsx.register_metadata_opts(cfg) def test_lsn_get(self): self.mock_lsn_api.lsn_for_network_get.return_value = self.lsn_id expected = self.manager.lsn_get(mock.ANY, self.net_id) self.mock_lsn_api.lsn_for_network_get.assert_called_once_with( mock.ANY, self.net_id) self.assertEqual(expected, self.lsn_id) def _test_lsn_get_raise_not_found_with_exc(self, exc): self.mock_lsn_api.lsn_for_network_get.side_effect = exc self.assertRaises(p_exc.LsnNotFound, self.manager.lsn_get, mock.ANY, self.net_id) self.mock_lsn_api.lsn_for_network_get.assert_called_once_with( mock.ANY, self.net_id) def test_lsn_get_raise_not_found_with_not_found(self): self._test_lsn_get_raise_not_found_with_exc(n_exc.NotFound) def test_lsn_get_raise_not_found_with_api_error(self): self._test_lsn_get_raise_not_found_with_exc(exception.NsxApiException) def _test_lsn_get_silent_raise_with_exc(self, exc): self.mock_lsn_api.lsn_for_network_get.side_effect = exc expected = self.manager.lsn_get( mock.ANY, self.net_id, raise_on_err=False) self.mock_lsn_api.lsn_for_network_get.assert_called_once_with( mock.ANY, self.net_id) self.assertIsNone(expected) def test_lsn_get_silent_raise_with_not_found(self): self._test_lsn_get_silent_raise_with_exc(n_exc.NotFound) def test_lsn_get_silent_raise_with_api_error(self): self._test_lsn_get_silent_raise_with_exc(exception.NsxApiException) def test_lsn_create(self): self.mock_lsn_api.lsn_for_network_create.return_value = self.lsn_id self.manager.lsn_create(mock.ANY, self.net_id) self.mock_lsn_api.lsn_for_network_create.assert_called_once_with( mock.ANY, self.net_id) def test_lsn_create_raise_api_error(self): self.mock_lsn_api.lsn_for_network_create.side_effect = ( exception.NsxApiException) self.assertRaises(p_exc.NsxPluginException, self.manager.lsn_create, mock.ANY, self.net_id) self.mock_lsn_api.lsn_for_network_create.assert_called_once_with( mock.ANY, self.net_id) def test_lsn_delete(self): self.manager.lsn_delete(mock.ANY, self.lsn_id) self.mock_lsn_api.lsn_delete.assert_called_once_with( mock.ANY, self.lsn_id) def _test_lsn_delete_with_exc(self, exc): self.mock_lsn_api.lsn_delete.side_effect = exc self.manager.lsn_delete(mock.ANY, self.lsn_id) self.mock_lsn_api.lsn_delete.assert_called_once_with( mock.ANY, self.lsn_id) def test_lsn_delete_with_not_found(self): self._test_lsn_delete_with_exc(n_exc.NotFound) def test_lsn_delete_api_exception(self): self._test_lsn_delete_with_exc(exception.NsxApiException) def test_lsn_delete_by_network(self): self.mock_lsn_api.lsn_for_network_get.return_value = self.lsn_id with mock.patch.object(self.manager, 'lsn_delete') as f: self.manager.lsn_delete_by_network(mock.ANY, self.net_id) self.mock_lsn_api.lsn_for_network_get.assert_called_once_with( mock.ANY, self.net_id) f.assert_called_once_with(mock.ANY, self.lsn_id) def _test_lsn_delete_by_network_with_exc(self, exc): self.mock_lsn_api.lsn_for_network_get.side_effect = exc with mock.patch.object(lsn_man.LOG, 'warn') as l: self.manager.lsn_delete_by_network(mock.ANY, self.net_id) self.assertEqual(1, l.call_count) def test_lsn_delete_by_network_with_not_found(self): self._test_lsn_delete_by_network_with_exc(n_exc.NotFound) def test_lsn_delete_by_network_with_not_api_error(self): self._test_lsn_delete_by_network_with_exc(exception.NsxApiException) def test_lsn_port_get(self): self.mock_lsn_api.lsn_port_by_subnet_get.return_value = ( self.lsn_port_id) with mock.patch.object( self.manager, 'lsn_get', return_value=self.lsn_id): expected = self.manager.lsn_port_get( mock.ANY, self.net_id, self.sub_id) self.assertEqual(expected, (self.lsn_id, self.lsn_port_id)) def test_lsn_port_get_lsn_not_found_on_raise(self): with mock.patch.object( self.manager, 'lsn_get', side_effect=p_exc.LsnNotFound(entity='network', entity_id=self.net_id)): self.assertRaises(p_exc.LsnNotFound, self.manager.lsn_port_get, mock.ANY, self.net_id, self.sub_id) def test_lsn_port_get_lsn_not_found_silent_raise(self): with mock.patch.object(self.manager, 'lsn_get', return_value=None): expected = self.manager.lsn_port_get( mock.ANY, self.net_id, self.sub_id, raise_on_err=False) self.assertEqual(expected, (None, None)) def test_lsn_port_get_port_not_found_on_raise(self): self.mock_lsn_api.lsn_port_by_subnet_get.side_effect = n_exc.NotFound with mock.patch.object( self.manager, 'lsn_get', return_value=self.lsn_id): self.assertRaises(p_exc.LsnPortNotFound, self.manager.lsn_port_get, mock.ANY, self.net_id, self.sub_id) def test_lsn_port_get_port_not_found_silent_raise(self): self.mock_lsn_api.lsn_port_by_subnet_get.side_effect = n_exc.NotFound with mock.patch.object( self.manager, 'lsn_get', return_value=self.lsn_id): expected = self.manager.lsn_port_get( mock.ANY, self.net_id, self.sub_id, raise_on_err=False) self.assertEqual(expected, (self.lsn_id, None)) def test_lsn_port_create(self): self.mock_lsn_api.lsn_port_create.return_value = self.lsn_port_id expected = self.manager.lsn_port_create(mock.ANY, mock.ANY, mock.ANY) self.assertEqual(expected, self.lsn_port_id) def _test_lsn_port_create_with_exc(self, exc, expected): self.mock_lsn_api.lsn_port_create.side_effect = exc self.assertRaises(expected, self.manager.lsn_port_create, mock.ANY, mock.ANY, mock.ANY) def test_lsn_port_create_with_not_found(self): self._test_lsn_port_create_with_exc(n_exc.NotFound, p_exc.LsnNotFound) def test_lsn_port_create_api_exception(self): self._test_lsn_port_create_with_exc(exception.NsxApiException, p_exc.NsxPluginException) def test_lsn_port_delete(self): self.manager.lsn_port_delete(mock.ANY, mock.ANY, mock.ANY) self.assertEqual(1, self.mock_lsn_api.lsn_port_delete.call_count) def _test_lsn_port_delete_with_exc(self, exc): self.mock_lsn_api.lsn_port_delete.side_effect = exc with mock.patch.object(lsn_man.LOG, 'warn') as l: self.manager.lsn_port_delete(mock.ANY, mock.ANY, mock.ANY) self.assertEqual(1, self.mock_lsn_api.lsn_port_delete.call_count) self.assertEqual(1, l.call_count) def test_lsn_port_delete_with_not_found(self): self._test_lsn_port_delete_with_exc(n_exc.NotFound) def test_lsn_port_delete_api_exception(self): self._test_lsn_port_delete_with_exc(exception.NsxApiException) def _test_lsn_port_dhcp_setup(self, ret_val, sub): self.mock_nsx_utils.get_nsx_switch_ids.return_value = [self.switch_id] self.mock_lsn_api.lsn_port_create.return_value = self.lsn_port_id with mock.patch.object( self.manager, 'lsn_get', return_value=self.lsn_id): with mock.patch.object(lsn_man.switch_api, 'get_port_by_neutron_tag'): expected = self.manager.lsn_port_dhcp_setup( mock.Mock(), mock.ANY, mock.ANY, mock.ANY, subnet_config=sub) self.assertEqual( 1, self.mock_lsn_api.lsn_port_create.call_count) self.assertEqual( 1, self.mock_lsn_api.lsn_port_plug_network.call_count) self.assertEqual(expected, ret_val) def test_lsn_port_dhcp_setup(self): self._test_lsn_port_dhcp_setup((self.lsn_id, self.lsn_port_id), None) def test_lsn_port_dhcp_setup_with_config(self): with mock.patch.object(self.manager, 'lsn_port_dhcp_configure') as f: self._test_lsn_port_dhcp_setup(None, mock.ANY) self.assertEqual(1, f.call_count) def test_lsn_port_dhcp_setup_with_not_found(self): self.mock_nsx_utils.get_nsx_switch_ids.return_value = [self.switch_id] with mock.patch.object(lsn_man.switch_api, 'get_port_by_neutron_tag') as f: f.side_effect = n_exc.NotFound self.assertRaises(p_exc.PortConfigurationError, self.manager.lsn_port_dhcp_setup, mock.Mock(), mock.ANY, mock.ANY, mock.ANY) def test_lsn_port_dhcp_setup_with_conflict(self): self.mock_lsn_api.lsn_port_plug_network.side_effect = ( p_exc.LsnConfigurationConflict(lsn_id=self.lsn_id)) self.mock_nsx_utils.get_nsx_switch_ids.return_value = [self.switch_id] with mock.patch.object(lsn_man.switch_api, 'get_port_by_neutron_tag'): with mock.patch.object(self.manager, 'lsn_port_delete') as g: self.assertRaises(p_exc.PortConfigurationError, self.manager.lsn_port_dhcp_setup, mock.Mock(), mock.ANY, mock.ANY, mock.ANY) self.assertEqual(1, g.call_count) def _test_lsn_port_dhcp_configure_with_subnet( self, expected, dns=None, gw=None, routes=None): subnet = { 'enable_dhcp': True, 'dns_nameservers': dns or [], 'gateway_ip': gw, 'host_routes': routes } self.manager.lsn_port_dhcp_configure(mock.ANY, self.lsn_id, self.lsn_port_id, subnet) self.mock_lsn_api.lsn_port_dhcp_configure.assert_called_once_with( mock.ANY, self.lsn_id, self.lsn_port_id, subnet['enable_dhcp'], expected) def test_lsn_port_dhcp_configure(self): expected = { 'routers': '127.0.0.1', 'default_lease_time': cfg.CONF.NSX_DHCP.default_lease_time, 'domain_name': cfg.CONF.NSX_DHCP.domain_name } self._test_lsn_port_dhcp_configure_with_subnet( expected, dns=[], gw='127.0.0.1', routes=[]) def test_lsn_port_dhcp_configure_gatewayless(self): expected = { 'default_lease_time': cfg.CONF.NSX_DHCP.default_lease_time, 'domain_name': cfg.CONF.NSX_DHCP.domain_name } self._test_lsn_port_dhcp_configure_with_subnet(expected, gw=None) def test_lsn_port_dhcp_configure_with_extra_dns_servers(self): expected = { 'default_lease_time': cfg.CONF.NSX_DHCP.default_lease_time, 'domain_name_servers': '8.8.8.8,9.9.9.9', 'domain_name': cfg.CONF.NSX_DHCP.domain_name } self._test_lsn_port_dhcp_configure_with_subnet( expected, dns=['8.8.8.8', '9.9.9.9']) def test_lsn_port_dhcp_configure_with_host_routes(self): expected = { 'default_lease_time': cfg.CONF.NSX_DHCP.default_lease_time, 'domain_name': cfg.CONF.NSX_DHCP.domain_name, 'classless_static_routes': '8.8.8.8,9.9.9.9' } self._test_lsn_port_dhcp_configure_with_subnet( expected, routes=['8.8.8.8', '9.9.9.9']) def _test_lsn_metadata_configure(self, is_enabled): with mock.patch.object(self.manager, 'lsn_port_dispose') as f: self.manager.plugin.get_subnet.return_value = ( {'network_id': self.net_id}) self.manager.lsn_metadata_configure(mock.ANY, self.sub_id, is_enabled) expected = { 'metadata_server_port': 8775, 'metadata_server_ip': '127.0.0.1', 'metadata_proxy_shared_secret': '' } self.mock_lsn_api.lsn_metadata_configure.assert_called_once_with( mock.ANY, mock.ANY, is_enabled, expected) if is_enabled: self.assertEqual( 1, self.mock_lsn_api.lsn_port_by_subnet_get.call_count) else: self.assertEqual(1, f.call_count) def test_lsn_metadata_configure_enabled(self): self._test_lsn_metadata_configure(True) def test_lsn_metadata_configure_disabled(self): self._test_lsn_metadata_configure(False) def test_lsn_metadata_configure_not_found(self): self.mock_lsn_api.lsn_metadata_configure.side_effect = ( p_exc.LsnNotFound(entity='lsn', entity_id=self.lsn_id)) self.manager.plugin.get_subnet.return_value = ( {'network_id': self.net_id}) self.assertRaises(p_exc.NsxPluginException, self.manager.lsn_metadata_configure, mock.ANY, self.sub_id, True) def test_lsn_port_metadata_setup(self): subnet = { 'cidr': '0.0.0.0/0', 'id': self.sub_id, 'network_id': self.net_id, 'tenant_id': self.tenant_id } expected_data = { 'subnet_id': subnet['id'], 'ip_address': subnet['cidr'], 'mac_address': constants.METADATA_MAC } self.mock_nsx_utils.get_nsx_switch_ids.return_value = [self.switch_id] with mock.patch.object(lsn_man.switch_api, 'create_lport') as f: with mock.patch.object(self.manager, 'lsn_port_create') as g: f.return_value = {'uuid': self.port_id} self.manager.lsn_port_metadata_setup( self.context, self.lsn_id, subnet) (self.mock_lsn_api.lsn_port_plug_network. assert_called_once_with(mock.ANY, self.lsn_id, mock.ANY, self.port_id)) g.assert_called_once_with( self.context, self.lsn_id, expected_data) def test_lsn_port_metadata_setup_raise_not_found(self): subnet = { 'cidr': '0.0.0.0/0', 'id': self.sub_id, 'network_id': self.net_id, 'tenant_id': self.tenant_id } self.mock_nsx_utils.get_nsx_switch_ids.return_value = [self.switch_id] with mock.patch.object(lsn_man.switch_api, 'create_lport') as f: f.side_effect = n_exc.NotFound self.assertRaises(p_exc.PortConfigurationError, self.manager.lsn_port_metadata_setup, mock.Mock(), self.lsn_id, subnet) def test_lsn_port_metadata_setup_raise_conflict(self): subnet = { 'cidr': '0.0.0.0/0', 'id': self.sub_id, 'network_id': self.net_id, 'tenant_id': self.tenant_id } self.mock_nsx_utils.get_nsx_switch_ids.return_value = [self.switch_id] with mock.patch.object(lsn_man.switch_api, 'create_lport') as f: with mock.patch.object(lsn_man.switch_api, 'delete_port') as g: f.return_value = {'uuid': self.port_id} self.mock_lsn_api.lsn_port_plug_network.side_effect = ( p_exc.LsnConfigurationConflict(lsn_id=self.lsn_id)) self.assertRaises(p_exc.PortConfigurationError, self.manager.lsn_port_metadata_setup, mock.Mock(), self.lsn_id, subnet) self.assertEqual(1, self.mock_lsn_api.lsn_port_delete.call_count) self.assertEqual(1, g.call_count) def _test_lsn_port_dispose_with_values(self, lsn_id, lsn_port_id, count): with mock.patch.object(self.manager, 'lsn_port_get_by_mac', return_value=(lsn_id, lsn_port_id)): self.manager.lsn_port_dispose(mock.ANY, self.net_id, self.mac) self.assertEqual(count, self.mock_lsn_api.lsn_port_delete.call_count) def test_lsn_port_dispose(self): self._test_lsn_port_dispose_with_values( self.lsn_id, self.lsn_port_id, 1) def test_lsn_port_dispose_meta_mac(self): self.mac = constants.METADATA_MAC with mock.patch.object(lsn_man.switch_api, 'get_port_by_neutron_tag') as f: with mock.patch.object(lsn_man.switch_api, 'delete_port') as g: f.return_value = {'uuid': self.port_id} self._test_lsn_port_dispose_with_values( self.lsn_id, self.lsn_port_id, 1) f.assert_called_once_with( mock.ANY, self.net_id, constants.METADATA_PORT_ID) g.assert_called_once_with(mock.ANY, self.net_id, self.port_id) def test_lsn_port_dispose_lsn_not_found(self): self._test_lsn_port_dispose_with_values(None, None, 0) def test_lsn_port_dispose_lsn_port_not_found(self): self._test_lsn_port_dispose_with_values(self.lsn_id, None, 0) def test_lsn_port_dispose_api_error(self): self.mock_lsn_api.lsn_port_delete.side_effect = ( exception.NsxApiException) with mock.patch.object(lsn_man.LOG, 'warn') as l: self.manager.lsn_port_dispose(mock.ANY, self.net_id, self.mac) self.assertEqual(1, l.call_count) def test_lsn_port_host_conf(self): with mock.patch.object(self.manager, 'lsn_port_get', return_value=(self.lsn_id, self.lsn_port_id)): f = mock.Mock() self.manager._lsn_port_host_conf(mock.ANY, self.net_id, self.sub_id, mock.ANY, f) self.assertEqual(1, f.call_count) def test_lsn_port_host_conf_lsn_port_not_found(self): with mock.patch.object( self.manager, 'lsn_port_get', return_value=(None, None)) as f: self.manager._lsn_port_host_conf( mock.ANY, self.net_id, self.sub_id, mock.ANY, mock.Mock()) self.assertEqual(1, f.call_count) def _test_lsn_port_update(self, dhcp=None, meta=None): self.manager.lsn_port_update( mock.ANY, self.net_id, self.sub_id, dhcp, meta) count = 1 if dhcp else 0 count = count + 1 if meta else count self.assertEqual(count, (self.mock_lsn_api. lsn_port_host_entries_update.call_count)) def test_lsn_port_update(self): self._test_lsn_port_update() def test_lsn_port_update_dhcp_meta(self): self._test_lsn_port_update(mock.ANY, mock.ANY) def test_lsn_port_update_dhcp_and_nometa(self): self._test_lsn_port_update(mock.ANY, None) def test_lsn_port_update_nodhcp_and_nmeta(self): self._test_lsn_port_update(None, mock.ANY) def test_lsn_port_update_raise_error(self): self.mock_lsn_api.lsn_port_host_entries_update.side_effect = ( exception.NsxApiException) self.assertRaises(p_exc.PortConfigurationError, self.manager.lsn_port_update, mock.ANY, mock.ANY, mock.ANY, mock.ANY) class PersistentLsnManagerTestCase(testlib_api.SqlTestCase): def setUp(self): super(PersistentLsnManagerTestCase, self).setUp() self.net_id = 'foo_network_id' self.sub_id = 'foo_subnet_id' self.port_id = 'foo_port_id' self.lsn_id = 'foo_lsn_id' self.mac = 'aa:bb:cc:dd:ee:ff' self.lsn_port_id = 'foo_lsn_port_id' self.tenant_id = 'foo_tenant_id' nsx.register_dhcp_opts(cfg) nsx.register_metadata_opts(cfg) lsn_man.register_lsn_opts(cfg) self.manager = lsn_man.PersistentLsnManager(mock.Mock()) self.context = context.get_admin_context() self.mock_lsn_api_p = mock.patch.object(lsn_man, 'lsn_api') self.mock_lsn_api = self.mock_lsn_api_p.start() def test_lsn_get(self): lsn_db.lsn_add(self.context, self.net_id, self.lsn_id) result = self.manager.lsn_get(self.context, self.net_id) self.assertEqual(self.lsn_id, result) def test_lsn_get_raise_not_found(self): self.assertRaises(p_exc.LsnNotFound, self.manager.lsn_get, self.context, self.net_id) def test_lsn_get_silent_not_found(self): result = self.manager.lsn_get( self.context, self.net_id, raise_on_err=False) self.assertIsNone(result) def test_lsn_get_sync_on_missing(self): cfg.CONF.set_override('sync_on_missing_data', True, 'NSX_LSN') self.manager = lsn_man.PersistentLsnManager(mock.Mock()) with mock.patch.object(self.manager, 'lsn_save') as f: self.manager.lsn_get(self.context, self.net_id, raise_on_err=True) self.assertTrue(self.mock_lsn_api.lsn_for_network_get.call_count) self.assertTrue(f.call_count) def test_lsn_save(self): self.manager.lsn_save(self.context, self.net_id, self.lsn_id) result = self.manager.lsn_get(self.context, self.net_id) self.assertEqual(self.lsn_id, result) def test_lsn_create(self): self.mock_lsn_api.lsn_for_network_create.return_value = self.lsn_id with mock.patch.object(self.manager, 'lsn_save') as f: result = self.manager.lsn_create(self.context, self.net_id) self.assertTrue( self.mock_lsn_api.lsn_for_network_create.call_count) self.assertTrue(f.call_count) self.assertEqual(self.lsn_id, result) def test_lsn_create_failure(self): with mock.patch.object( self.manager, 'lsn_save', side_effect=p_exc.NsxPluginException(err_msg='')): self.assertRaises(p_exc.NsxPluginException, self.manager.lsn_create, self.context, self.net_id) self.assertTrue(self.mock_lsn_api.lsn_delete.call_count) def test_lsn_delete(self): self.mock_lsn_api.lsn_for_network_create.return_value = self.lsn_id self.manager.lsn_create(self.context, self.net_id) self.manager.lsn_delete(self.context, self.lsn_id) self.assertIsNone(self.manager.lsn_get( self.context, self.net_id, raise_on_err=False)) def test_lsn_delete_not_existent(self): self.manager.lsn_delete(self.context, self.lsn_id) self.assertTrue(self.mock_lsn_api.lsn_delete.call_count) def test_lsn_port_get(self): lsn_db.lsn_add(self.context, self.net_id, self.lsn_id) lsn_db.lsn_port_add_for_lsn(self.context, self.lsn_port_id, self.sub_id, self.mac, self.lsn_id) res = self.manager.lsn_port_get(self.context, self.net_id, self.sub_id) self.assertEqual((self.lsn_id, self.lsn_port_id), res) def test_lsn_port_get_raise_not_found(self): self.assertRaises(p_exc.LsnPortNotFound, self.manager.lsn_port_get, self.context, self.net_id, self.sub_id) def test_lsn_port_get_silent_not_found(self): result = self.manager.lsn_port_get( self.context, self.net_id, self.sub_id, raise_on_err=False) self.assertEqual((None, None), result) def test_lsn_port_get_sync_on_missing(self): return cfg.CONF.set_override('sync_on_missing_data', True, 'NSX_LSN') self.manager = lsn_man.PersistentLsnManager(mock.Mock()) self.mock_lsn_api.lsn_for_network_get.return_value = self.lsn_id self.mock_lsn_api.lsn_port_by_subnet_get.return_value = ( self.lsn_id, self.lsn_port_id) with mock.patch.object(self.manager, 'lsn_save') as f: with mock.patch.object(self.manager, 'lsn_port_save') as g: self.manager.lsn_port_get( self.context, self.net_id, self.sub_id) self.assertTrue( self.mock_lsn_api.lsn_port_by_subnet_get.call_count) self.assertTrue( self.mock_lsn_api.lsn_port_info_get.call_count) self.assertTrue(f.call_count) self.assertTrue(g.call_count) def test_lsn_port_get_by_mac(self): lsn_db.lsn_add(self.context, self.net_id, self.lsn_id) lsn_db.lsn_port_add_for_lsn(self.context, self.lsn_port_id, self.sub_id, self.mac, self.lsn_id) res = self.manager.lsn_port_get_by_mac( self.context, self.net_id, self.mac) self.assertEqual((self.lsn_id, self.lsn_port_id), res) def test_lsn_port_get_by_mac_raise_not_found(self): self.assertRaises(p_exc.LsnPortNotFound, self.manager.lsn_port_get_by_mac, self.context, self.net_id, self.sub_id) def test_lsn_port_get_by_mac_silent_not_found(self): result = self.manager.lsn_port_get_by_mac( self.context, self.net_id, self.sub_id, raise_on_err=False) self.assertEqual((None, None), result) def test_lsn_port_create(self): lsn_db.lsn_add(self.context, self.net_id, self.lsn_id) self.mock_lsn_api.lsn_port_create.return_value = self.lsn_port_id subnet = {'subnet_id': self.sub_id, 'mac_address': self.mac} with mock.patch.object(self.manager, 'lsn_port_save') as f: result = self.manager.lsn_port_create( self.context, self.net_id, subnet) self.assertTrue( self.mock_lsn_api.lsn_port_create.call_count) self.assertTrue(f.call_count) self.assertEqual(self.lsn_port_id, result) def test_lsn_port_create_failure(self): subnet = {'subnet_id': self.sub_id, 'mac_address': self.mac} with mock.patch.object( self.manager, 'lsn_port_save', side_effect=p_exc.NsxPluginException(err_msg='')): self.assertRaises(p_exc.NsxPluginException, self.manager.lsn_port_create, self.context, self.net_id, subnet) self.assertTrue(self.mock_lsn_api.lsn_port_delete.call_count) def test_lsn_port_delete(self): lsn_db.lsn_add(self.context, self.net_id, self.lsn_id) lsn_db.lsn_port_add_for_lsn(self.context, self.lsn_port_id, self.sub_id, self.mac, self.lsn_id) self.manager.lsn_port_delete( self.context, self.lsn_id, self.lsn_port_id) self.assertEqual((None, None), self.manager.lsn_port_get( self.context, self.lsn_id, self.sub_id, raise_on_err=False)) def test_lsn_port_delete_not_existent(self): self.manager.lsn_port_delete( self.context, self.lsn_id, self.lsn_port_id) self.assertTrue(self.mock_lsn_api.lsn_port_delete.call_count) def test_lsn_port_save(self): self.manager.lsn_save(self.context, self.net_id, self.lsn_id) self.manager.lsn_port_save(self.context, self.lsn_port_id, self.sub_id, self.mac, self.lsn_id) result = self.manager.lsn_port_get( self.context, self.net_id, self.sub_id, raise_on_err=False) self.assertEqual((self.lsn_id, self.lsn_port_id), result) class DhcpAgentNotifyAPITestCase(base.BaseTestCase): def setUp(self): super(DhcpAgentNotifyAPITestCase, self).setUp() self.notifier = nsx.DhcpAgentNotifyAPI(mock.Mock(), mock.Mock()) self.plugin = self.notifier.plugin self.lsn_manager = self.notifier.lsn_manager def _test_notify_port_update( self, ports, expected_count, expected_args=None): port = { 'id': 'foo_port_id', 'network_id': 'foo_network_id', 'fixed_ips': [{'subnet_id': 'foo_subnet_id'}] } self.notifier.plugin.get_ports.return_value = ports self.notifier.notify(mock.ANY, {'port': port}, 'port.update.end') self.lsn_manager.lsn_port_update.assert_has_calls(expected_args) def test_notify_ports_update_no_ports(self): self._test_notify_port_update(None, 0, []) self._test_notify_port_update([], 0, []) def test_notify_ports_update_one_port(self): ports = [{ 'fixed_ips': [{'subnet_id': 'foo_subnet_id', 'ip_address': '1.2.3.4'}], 'device_id': 'foo_device_id', 'device_owner': 'foo_device_owner', 'mac_address': 'fa:16:3e:da:1d:46' }] call_args = mock.call( mock.ANY, 'foo_network_id', 'foo_subnet_id', dhcp=[{'ip_address': '1.2.3.4', 'mac_address': 'fa:16:3e:da:1d:46'}], meta=[{'instance_id': 'foo_device_id', 'ip_address': '1.2.3.4'}]) self._test_notify_port_update(ports, 1, call_args) def test_notify_ports_update_ports_with_empty_device_id(self): ports = [{ 'fixed_ips': [{'subnet_id': 'foo_subnet_id', 'ip_address': '1.2.3.4'}], 'device_id': '', 'device_owner': 'foo_device_owner', 'mac_address': 'fa:16:3e:da:1d:46' }] call_args = mock.call( mock.ANY, 'foo_network_id', 'foo_subnet_id', dhcp=[{'ip_address': '1.2.3.4', 'mac_address': 'fa:16:3e:da:1d:46'}], meta=[] ) self._test_notify_port_update(ports, 1, call_args) def test_notify_ports_update_ports_with_no_fixed_ips(self): ports = [{ 'fixed_ips': [], 'device_id': 'foo_device_id', 'device_owner': 'foo_device_owner', 'mac_address': 'fa:16:3e:da:1d:46' }] call_args = mock.call( mock.ANY, 'foo_network_id', 'foo_subnet_id', dhcp=[], meta=[]) self._test_notify_port_update(ports, 1, call_args) def test_notify_ports_update_ports_with_no_fixed_ips_and_no_device(self): ports = [{ 'fixed_ips': [], 'device_id': '', 'device_owner': 'foo_device_owner', 'mac_address': 'fa:16:3e:da:1d:46' }] call_args = mock.call( mock.ANY, 'foo_network_id', 'foo_subnet_id', dhcp=[], meta=[]) self._test_notify_port_update(ports, 0, call_args) def test_notify_ports_update_with_special_ports(self): ports = [{'fixed_ips': [], 'device_id': '', 'device_owner': n_consts.DEVICE_OWNER_DHCP, 'mac_address': 'fa:16:3e:da:1d:46'}, {'fixed_ips': [{'subnet_id': 'foo_subnet_id', 'ip_address': '1.2.3.4'}], 'device_id': 'foo_device_id', 'device_owner': n_consts.DEVICE_OWNER_ROUTER_GW, 'mac_address': 'fa:16:3e:da:1d:46'}] call_args = mock.call( mock.ANY, 'foo_network_id', 'foo_subnet_id', dhcp=[], meta=[]) self._test_notify_port_update(ports, 0, call_args) def test_notify_ports_update_many_ports(self): ports = [{'fixed_ips': [], 'device_id': '', 'device_owner': 'foo_device_owner', 'mac_address': 'fa:16:3e:da:1d:46'}, {'fixed_ips': [{'subnet_id': 'foo_subnet_id', 'ip_address': '1.2.3.4'}], 'device_id': 'foo_device_id', 'device_owner': 'foo_device_owner', 'mac_address': 'fa:16:3e:da:1d:46'}] call_args = mock.call( mock.ANY, 'foo_network_id', 'foo_subnet_id', dhcp=[{'ip_address': '1.2.3.4', 'mac_address': 'fa:16:3e:da:1d:46'}], meta=[{'instance_id': 'foo_device_id', 'ip_address': '1.2.3.4'}]) self._test_notify_port_update(ports, 1, call_args) def _test_notify_subnet_action(self, action): with mock.patch.object(self.notifier, '_subnet_%s' % action) as f: self.notifier._handle_subnet_dhcp_access[action] = f subnet = {'subnet': mock.ANY} self.notifier.notify( mock.ANY, subnet, 'subnet.%s.end' % action) f.assert_called_once_with(mock.ANY, subnet) def test_notify_subnet_create(self): self._test_notify_subnet_action('create') def test_notify_subnet_update(self): self._test_notify_subnet_action('update') def test_notify_subnet_delete(self): self._test_notify_subnet_action('delete') def _test_subnet_create(self, enable_dhcp, exc=None, exc_obj=None, call_notify=True): subnet = { 'id': 'foo_subnet_id', 'enable_dhcp': enable_dhcp, 'network_id': 'foo_network_id', 'tenant_id': 'foo_tenant_id', 'cidr': '0.0.0.0/0' } if exc: self.plugin.create_port.side_effect = exc_obj or exc self.assertRaises(exc, self.notifier.notify, mock.ANY, {'subnet': subnet}, 'subnet.create.end') self.plugin.delete_subnet.assert_called_with( mock.ANY, subnet['id']) else: if call_notify: self.notifier.notify( mock.ANY, {'subnet': subnet}, 'subnet.create.end') if enable_dhcp: dhcp_port = { 'name': '', 'admin_state_up': True, 'network_id': 'foo_network_id', 'tenant_id': 'foo_tenant_id', 'device_owner': n_consts.DEVICE_OWNER_DHCP, 'mac_address': mock.ANY, 'fixed_ips': [{'subnet_id': 'foo_subnet_id'}], 'device_id': '' } self.plugin.create_port.assert_called_once_with( mock.ANY, {'port': dhcp_port}) else: self.assertEqual(0, self.plugin.create_port.call_count) def test_subnet_create_enabled_dhcp(self): self._test_subnet_create(True) def test_subnet_create_disabled_dhcp(self): self._test_subnet_create(False) def test_subnet_create_raise_port_config_error(self): with mock.patch.object(nsx.db_base_plugin_v2.NeutronDbPluginV2, 'delete_port') as d: self._test_subnet_create( True, exc=n_exc.Conflict, exc_obj=p_exc.PortConfigurationError(lsn_id='foo_lsn_id', net_id='foo_net_id', port_id='foo_port_id')) d.assert_called_once_with(self.plugin, mock.ANY, 'foo_port_id') def test_subnet_update(self): subnet = { 'id': 'foo_subnet_id', 'network_id': 'foo_network_id', } self.lsn_manager.lsn_port_get.return_value = ('foo_lsn_id', 'foo_lsn_port_id') self.notifier.notify( mock.ANY, {'subnet': subnet}, 'subnet.update.end') self.lsn_manager.lsn_port_dhcp_configure.assert_called_once_with( mock.ANY, 'foo_lsn_id', 'foo_lsn_port_id', subnet) def test_subnet_update_raise_lsn_not_found(self): subnet = { 'id': 'foo_subnet_id', 'network_id': 'foo_network_id', } self.lsn_manager.lsn_port_get.side_effect = ( p_exc.LsnNotFound(entity='network', entity_id=subnet['network_id'])) self.assertRaises(p_exc.LsnNotFound, self.notifier.notify, mock.ANY, {'subnet': subnet}, 'subnet.update.end') def _test_subnet_update_lsn_port_not_found(self, dhcp_port): subnet = { 'id': 'foo_subnet_id', 'enable_dhcp': True, 'network_id': 'foo_network_id', 'tenant_id': 'foo_tenant_id' } self.lsn_manager.lsn_port_get.side_effect = ( p_exc.LsnPortNotFound(lsn_id='foo_lsn_id', entity='subnet', entity_id=subnet['id'])) self.notifier.plugin.get_ports.return_value = dhcp_port count = 0 if dhcp_port is None else 1 with mock.patch.object(nsx, 'handle_port_dhcp_access') as h: self.notifier.notify( mock.ANY, {'subnet': subnet}, 'subnet.update.end') self.assertEqual(count, h.call_count) if not dhcp_port: self._test_subnet_create(enable_dhcp=True, exc=None, call_notify=False) def test_subnet_update_lsn_port_not_found_without_dhcp_port(self): self._test_subnet_update_lsn_port_not_found(None) def test_subnet_update_lsn_port_not_found_with_dhcp_port(self): self._test_subnet_update_lsn_port_not_found([mock.ANY]) def _test_subnet_delete(self, ports=None): subnet = { 'id': 'foo_subnet_id', 'network_id': 'foo_network_id', 'cidr': '0.0.0.0/0' } self.plugin.get_ports.return_value = ports self.notifier.notify(mock.ANY, {'subnet': subnet}, 'subnet.delete.end') filters = { 'network_id': [subnet['network_id']], 'device_owner': [n_consts.DEVICE_OWNER_DHCP] } self.plugin.get_ports.assert_called_once_with( mock.ANY, filters=filters) if ports: self.plugin.delete_port.assert_called_once_with( mock.ANY, ports[0]['id']) else: self.assertEqual(0, self.plugin.delete_port.call_count) def test_subnet_delete_enabled_dhcp_no_ports(self): self._test_subnet_delete() def test_subnet_delete_enabled_dhcp_with_dhcp_port(self): self._test_subnet_delete([{'id': 'foo_port_id'}]) class DhcpTestCase(base.BaseTestCase): def setUp(self): super(DhcpTestCase, self).setUp() self.plugin = mock.Mock() self.plugin.lsn_manager = mock.Mock() def test_handle_create_network(self): network = {'id': 'foo_network_id'} nsx.handle_network_dhcp_access( self.plugin, mock.ANY, network, 'create_network') self.plugin.lsn_manager.lsn_create.assert_called_once_with( mock.ANY, network['id']) def test_handle_create_network_router_external(self): network = {'id': 'foo_network_id', 'router:external': True} nsx.handle_network_dhcp_access( self.plugin, mock.ANY, network, 'create_network') self.assertFalse(self.plugin.lsn_manager.lsn_create.call_count) def test_handle_delete_network(self): network_id = 'foo_network_id' self.plugin.lsn_manager.lsn_delete_by_network.return_value = ( 'foo_lsn_id') nsx.handle_network_dhcp_access( self.plugin, mock.ANY, network_id, 'delete_network') self.plugin.lsn_manager.lsn_delete_by_network.assert_called_once_with( mock.ANY, 'foo_network_id') def _test_handle_create_dhcp_owner_port(self, exc=None): subnet = { 'cidr': '0.0.0.0/0', 'id': 'foo_subnet_id' } port = { 'id': 'foo_port_id', 'device_owner': n_consts.DEVICE_OWNER_DHCP, 'mac_address': 'aa:bb:cc:dd:ee:ff', 'network_id': 'foo_network_id', 'fixed_ips': [{'subnet_id': subnet['id']}] } expected_data = { 'subnet_id': subnet['id'], 'ip_address': subnet['cidr'], 'mac_address': port['mac_address'] } self.plugin.get_subnet.return_value = subnet if exc is None: nsx.handle_port_dhcp_access( self.plugin, mock.ANY, port, 'create_port') (self.plugin.lsn_manager.lsn_port_dhcp_setup. assert_called_once_with(mock.ANY, port['network_id'], port['id'], expected_data, subnet)) else: self.plugin.lsn_manager.lsn_port_dhcp_setup.side_effect = exc self.assertRaises(n_exc.NeutronException, nsx.handle_port_dhcp_access, self.plugin, mock.ANY, port, 'create_port') def test_handle_create_dhcp_owner_port(self): self._test_handle_create_dhcp_owner_port() def test_handle_create_dhcp_owner_port_raise_port_config_error(self): config_error = p_exc.PortConfigurationError(lsn_id='foo_lsn_id', net_id='foo_net_id', port_id='foo_port_id') self._test_handle_create_dhcp_owner_port(exc=config_error) def test_handle_delete_dhcp_owner_port(self): port = { 'id': 'foo_port_id', 'device_owner': n_consts.DEVICE_OWNER_DHCP, 'network_id': 'foo_network_id', 'fixed_ips': [], 'mac_address': 'aa:bb:cc:dd:ee:ff' } nsx.handle_port_dhcp_access(self.plugin, mock.ANY, port, 'delete_port') self.plugin.lsn_manager.lsn_port_dispose.assert_called_once_with( mock.ANY, port['network_id'], port['mac_address']) def _test_handle_user_port(self, action, handler): port = { 'id': 'foo_port_id', 'device_owner': 'foo_device_owner', 'network_id': 'foo_network_id', 'mac_address': 'aa:bb:cc:dd:ee:ff', 'fixed_ips': [{'subnet_id': 'foo_subnet_id', 'ip_address': '1.2.3.4'}] } expected_data = { 'ip_address': '1.2.3.4', 'mac_address': 'aa:bb:cc:dd:ee:ff' } self.plugin.get_subnet.return_value = {'enable_dhcp': True} nsx.handle_port_dhcp_access(self.plugin, mock.ANY, port, action) handler.assert_called_once_with( mock.ANY, port['network_id'], 'foo_subnet_id', expected_data) def test_handle_create_user_port(self): self._test_handle_user_port( 'create_port', self.plugin.lsn_manager.lsn_port_dhcp_host_add) def test_handle_delete_user_port(self): self._test_handle_user_port( 'delete_port', self.plugin.lsn_manager.lsn_port_dhcp_host_remove) def _test_handle_user_port_disabled_dhcp(self, action, handler): port = { 'id': 'foo_port_id', 'device_owner': 'foo_device_owner', 'network_id': 'foo_network_id', 'mac_address': 'aa:bb:cc:dd:ee:ff', 'fixed_ips': [{'subnet_id': 'foo_subnet_id', 'ip_address': '1.2.3.4'}] } self.plugin.get_subnet.return_value = {'enable_dhcp': False} nsx.handle_port_dhcp_access(self.plugin, mock.ANY, port, action) self.assertEqual(0, handler.call_count) def test_handle_create_user_port_disabled_dhcp(self): self._test_handle_user_port_disabled_dhcp( 'create_port', self.plugin.lsn_manager.lsn_port_dhcp_host_add) def test_handle_delete_user_port_disabled_dhcp(self): self._test_handle_user_port_disabled_dhcp( 'delete_port', self.plugin.lsn_manager.lsn_port_dhcp_host_remove) def _test_handle_user_port_no_fixed_ips(self, action, handler): port = { 'id': 'foo_port_id', 'device_owner': 'foo_device_owner', 'network_id': 'foo_network_id', 'fixed_ips': [] } nsx.handle_port_dhcp_access(self.plugin, mock.ANY, port, action) self.assertEqual(0, handler.call_count) def test_handle_create_user_port_no_fixed_ips(self): self._test_handle_user_port_no_fixed_ips( 'create_port', self.plugin.lsn_manager.lsn_port_dhcp_host_add) def test_handle_delete_user_port_no_fixed_ips(self): self._test_handle_user_port_no_fixed_ips( 'delete_port', self.plugin.lsn_manager.lsn_port_dhcp_host_remove) class MetadataTestCase(base.BaseTestCase): def setUp(self): super(MetadataTestCase, self).setUp() self.plugin = mock.Mock() self.plugin.lsn_manager = mock.Mock() def _test_handle_port_metadata_access_special_owners( self, owner, dev_id='foo_device_id', ips=None): port = { 'id': 'foo_port_id', 'device_owner': owner, 'device_id': dev_id, 'fixed_ips': ips or [] } nsx.handle_port_metadata_access(self.plugin, mock.ANY, port, mock.ANY) self.assertFalse( self.plugin.lsn_manager.lsn_port_meta_host_add.call_count) self.assertFalse( self.plugin.lsn_manager.lsn_port_meta_host_remove.call_count) def test_handle_port_metadata_access_external_network(self): port = { 'id': 'foo_port_id', 'device_owner': 'foo_device_owner', 'device_id': 'foo_device_id', 'network_id': 'foo_network_id', 'fixed_ips': [{'subnet_id': 'foo_subnet'}] } self.plugin.get_network.return_value = {'router:external': True} nsx.handle_port_metadata_access(self.plugin, mock.ANY, port, mock.ANY) self.assertFalse( self.plugin.lsn_manager.lsn_port_meta_host_add.call_count) self.assertFalse( self.plugin.lsn_manager.lsn_port_meta_host_remove.call_count) def test_handle_port_metadata_access_dhcp_port(self): self._test_handle_port_metadata_access_special_owners( n_consts.DEVICE_OWNER_DHCP, [{'subnet_id': 'foo_subnet'}]) def test_handle_port_metadata_access_router_port(self): self._test_handle_port_metadata_access_special_owners( n_consts.DEVICE_OWNER_ROUTER_INTF, [{'subnet_id': 'foo_subnet'}]) def test_handle_port_metadata_access_no_device_id(self): self._test_handle_port_metadata_access_special_owners( n_consts.DEVICE_OWNER_DHCP, '') def test_handle_port_metadata_access_no_fixed_ips(self): self._test_handle_port_metadata_access_special_owners( 'foo', 'foo', None) def _test_handle_port_metadata_access(self, is_delete, raise_exc=False): port = { 'id': 'foo_port_id', 'device_owner': 'foo_device_id', 'network_id': 'foo_network_id', 'device_id': 'foo_device_id', 'tenant_id': 'foo_tenant_id', 'fixed_ips': [ {'subnet_id': 'foo_subnet_id', 'ip_address': '1.2.3.4'} ] } meta = { 'instance_id': port['device_id'], 'tenant_id': port['tenant_id'], 'ip_address': port['fixed_ips'][0]['ip_address'] } self.plugin.get_network.return_value = {'router:external': False} if is_delete: mock_func = self.plugin.lsn_manager.lsn_port_meta_host_remove else: mock_func = self.plugin.lsn_manager.lsn_port_meta_host_add if raise_exc: mock_func.side_effect = p_exc.PortConfigurationError( lsn_id='foo_lsn_id', net_id='foo_net_id', port_id=None) with mock.patch.object(nsx.db_base_plugin_v2.NeutronDbPluginV2, 'delete_port') as d: self.assertRaises(p_exc.PortConfigurationError, nsx.handle_port_metadata_access, self.plugin, mock.ANY, port, is_delete=is_delete) if not is_delete: d.assert_called_once_with(mock.ANY, mock.ANY, port['id']) else: self.assertFalse(d.call_count) else: nsx.handle_port_metadata_access( self.plugin, mock.ANY, port, is_delete=is_delete) mock_func.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY, meta) def test_handle_port_metadata_access_on_delete_true(self): self._test_handle_port_metadata_access(True) def test_handle_port_metadata_access_on_delete_false(self): self._test_handle_port_metadata_access(False) def test_handle_port_metadata_access_on_delete_true_raise(self): self._test_handle_port_metadata_access(True, raise_exc=True) def test_handle_port_metadata_access_on_delete_false_raise(self): self._test_handle_port_metadata_access(False, raise_exc=True) def _test_handle_router_metadata_access( self, is_port_found, raise_exc=False): subnet = { 'id': 'foo_subnet_id', 'network_id': 'foo_network_id' } interface = { 'subnet_id': subnet['id'], 'port_id': 'foo_port_id' } mock_func = self.plugin.lsn_manager.lsn_metadata_configure if not is_port_found: self.plugin.get_port.side_effect = n_exc.NotFound if raise_exc: with mock.patch.object(nsx.l3_db.L3_NAT_db_mixin, 'remove_router_interface') as d: mock_func.side_effect = p_exc.NsxPluginException(err_msg='') self.assertRaises(p_exc.NsxPluginException, nsx.handle_router_metadata_access, self.plugin, mock.ANY, 'foo_router_id', interface) d.assert_called_once_with(mock.ANY, mock.ANY, 'foo_router_id', interface) else: nsx.handle_router_metadata_access( self.plugin, mock.ANY, 'foo_router_id', interface) mock_func.assert_called_once_with( mock.ANY, subnet['id'], is_port_found) def test_handle_router_metadata_access_add_interface(self): self._test_handle_router_metadata_access(True) def test_handle_router_metadata_access_delete_interface(self): self._test_handle_router_metadata_access(False) def test_handle_router_metadata_access_raise_error_on_add(self): self._test_handle_router_metadata_access(True, raise_exc=True) def test_handle_router_metadata_access_raise_error_on_delete(self): self._test_handle_router_metadata_access(True, raise_exc=False)
gkotton/vmware-nsx
vmware-nsx/neutron/tests/unit/vmware/test_dhcpmeta.py
Python
apache-2.0
62,619
#!/usr/bin/python # To add two numbers var1 = input("Enter the first number : ") var2 = input("Enter the second number : ") var1, var2 = var2, var1 print "Swapped numbers are {0} and {1}".format(var1, var2)
ramesharpu/python
basic-coding/swap-two-variables.py
Python
gpl-2.0
211
import os DEBUG = True TEMPLATE_DEBUG = DEBUG ADMINS = ( ) MANAGERS = ADMINS BASE_DIR = os.path.abspath(__file__) for _ in range(3): BASE_DIR = os.path.dirname(BASE_DIR) DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql_psycopg2', 'NAME': 'weaverbird', 'USER': 'postgres', 'PASSWORD': '', 'HOST': '', 'PORT': '', } } CACHES = { 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', } } TIME_ZONE = 'GMT' TIME_FORMAT = 'H:i' LANGUAGE_CODE = 'en-us' USE_I18N = True USE_L10N = True USE_TZ = True MEDIA_ROOT = os.path.join(BASE_DIR, 'media') MEDIA_URL = '/media/' STATIC_ROOT = os.path.join(BASE_DIR, 'static_serve') STATIC_URL = '/static/' # Additional locations of static files STATICFILES_DIRS = ( os.path.join(BASE_DIR, 'static'), ) # List of finder classes that know how to find static files in # various locations. STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', ) SECRET_KEY = '{{ secret_key }}' TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ) TEMPLATE_CONTEXT_PROCESSORS = ( 'django.contrib.auth.context_processors.auth', 'django.core.context_processors.debug', 'django.core.context_processors.i18n', 'django.core.context_processors.media', 'django.core.context_processors.static', 'django.core.context_processors.tz', 'django.core.context_processors.request', 'django.contrib.messages.context_processors.messages', ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) ROOT_URLCONF = '{{ project_name }}.urls' WSGI_APPLICATION = 'wsgi.application' TEMPLATE_DIRS = ( os.path.join(BASE_DIR, 'templates'), ) SESSION_COOKIE_AGE = 60*60*24*365 INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'django.contrib.admin', ) LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'filters': { 'require_debug_false': { '()': 'django.utils.log.RequireDebugFalse' } }, 'handlers': { 'mail_admins': { 'level': 'ERROR', 'filters': ['require_debug_false'], 'class': 'django.utils.log.AdminEmailHandler' } }, 'loggers': { 'django.request': { 'handlers': ['mail_admins'], 'level': 'ERROR', 'propagate': True, }, } }
weaverbird/weaverbird-backend
src/weaverbird/settings/base.py
Python
mit
3,046
#!/usr/bin/env python3 # # PyQt5Template. Created on 21.04.2015 # Copyright (c) 2015 Andreas Schulz # # The MIT License (MIT) # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import sys import os from PyQt5.QtWidgets import QApplication from widgets.mainwindow import MainWindow # inserting this file into sys.path to allow absolute imports in project sys.path.insert(0, os.path.normpath(os.path.join( os.path.dirname(os.path.abspath(__file__)), '..'))) def main(argv=None): if not argv: argv = sys.argv app = QApplication(argv) window = MainWindow() window.show() return app.exec_() if __name__ == '__main__': sys.exit(main(sys.argv))
dev-platypus/templates
PyQt5Template/main.py
Python
gpl-3.0
1,689
# Documentation: # qute://help/configuring.html # qute://help/settings.html # Don't load autoconfig.yml (this is required as of v2.0.0) config.load_autoconfig(False) # Name of the session to load by default c.session.default_name = "default" # Automatically save the current session c.auto_save.session = True # Disable <video> autoplay c.content.autoplay = False config.set("content.autoplay", True, "play.google.com/music/*") # Prevent websites from asking for geolocation c.content.geolocation = False # Disable host blocking; this is redundant as I handle host blocking at the # gateway level for all devices on my LAN and locally, using /etc/hosts, for # non-static devices (e.g. laptops) c.content.blocking.enabled = False # Enable JavaScript c.content.javascript.enabled = True config.set("content.javascript.enabled", True, "file://*") config.set("content.javascript.enabled", True, "chrome://*/*") config.set("content.javascript.enabled", True, "qute://*/*") # Set default content.notifications behavior for specific URLs c.content.notifications.enabled = False config.set("content.notifications.enabled", True, "calendar.google.com") config.set("content.notifications.enabled", True, "play.google.com") config.set("content.notifications.enabled", True, "music.youtube.com") config.set("content.notifications.enabled", True, "messages.google.com") config.set("content.notifications.enabled", True, "www.fastmail.com") # Set default content.register_protocol_handler behavior for specific URLs c.content.register_protocol_handler = False config.set("content.register_protocol_handler", True, "calendar.google.com") config.set("content.register_protocol_handler", True, "www.fastmail.com") # Which interfaces to expose via WebRTC c.content.webrtc_ip_handling_policy = "default-public-interface-only" # Directory to save downloads c.downloads.location.directory = "~/downloads" # Open new tabs in the background c.tabs.background = True # Open new tabs at the end of the stack c.tabs.new_position.related = "last" # Page to open if :open -t/-b/-w is used without URL. c.url.default_page = "about:blank" # Set colors for private browsing mode # Note that "private browsing mode" above refers the Qutebrowser's internal # "private mode", and as such these settings only apply to windows opened in # private mode (`open -p`) within a session using this configuration file. c.colors.statusbar.command.private.bg = "#7A378B" c.colors.statusbar.command.private.fg = "#FDF8FF" c.colors.statusbar.private.bg = "#68228B" c.colors.statusbar.private.fg = "#FDF8FF" # This ensures that any changes to the default value for `url.searchengines` are # not propagated without updating the value below, explicitly. c.url.searchengines = { "DEFAULT": "https://duckduckgo.com/?q={}", "!a": "https://www.amazon.com/s?k={}", "!ao": "https://www.amazon.com/gp/your-account/order-history/ref=ppx_yo_dt_b_search?search={}", "!ap": "https://www.archlinux.org/packages/?q={}", "!aur": "https://aur.archlinux.org/packages/?K={}", "!aw": "https://wiki.archlinux.org/index.php?search={}", "!cb": "https://www.crunchbase.com/textsearch?q={}", "!crates": "https://crates.io/search?q={}", "!domain": "https://domains.google.com/registrar/search?searchTerm={}", "!evo": "https://www.evo.com/shop?text={}", "!gh": "https://github.com/search?q={}", "!gm": "https://play.google.com/store/search?c=movies&q={}", "!li": "https://www.linkedin.com/search/results/all/?keywords={}", "!lic": "https://www.linkedin.com/search/results/people/?facetNetwork=%5B%22F%22%5D&keywords={}", "!lim": "https://www.linkedin.com/messaging/?searchTerm={}", "!lip": "https://www.linkedin.com/search/results/people/?keywords={}", "!lij": "https://www.linkedin.com/jobs/search/?keywords={}", "!mdn": "https://developer.mozilla.org/en-US/search?q={}", "!music": "https://music.youtube.com/search?q={}", "!od": "https://www.opendota.com/search?q={}", "!r": "https://www.reddit.com/search?q={}", "!rsw": "https://runescape.wiki/w/Special:Search?search={}", "!rust": "https://doc.rust-lang.org/std/?search={}", "!rustn": "https://doc.rust-lang.org/nightly/std/?search={}", "!tf": "https://registry.terraform.io/search/?q={}", "!tfm": "https://registry.terraform.io/search/modules?q={}", "!tfp": "https://registry.terraform.io/search/providers?q={}", "!w": "https://en.wikipedia.org/w/index.php?search={}", "!wh": "https://www.wowhead.com/search?q={}", "!yt": "https://www.youtube.com/results?search_query={}", "!yth": "https://www.youtube.com/feed/history?query={}", "!ytm": "https://music.youtube.com/search?q={}", } # Hide window decoration (e.g. Gnome title bar) c.window.hide_decoration = True # Format the window title c.window.title_format = "{perc}{current_title}" ################################################################################ # Aliases ################################################################################ c.aliases = { "read": "spawn --userscript readability-js", } ################################################################################ # Keybinds # # The default method for binding keys to different modes is fairly cumbersome # when binding lots of keys to different modes. The `keybinds` dict below is # has a single top-level key for each supported mode, whose value is another # dict of key:command mappings. ################################################################################ keybinds = { "normal": { "<alt+0>": "tab-focus 10", "<alt+9>": "tab-focus 9", "<ctrl+j>": "mode-enter passthrough", "<ctrl+pgdown>": None, "<ctrl+pgup>": None, "<ctrl+shift+n>": None, "<ctrl+shift+o>": "set-cmd-text -s :open -p", "<ctrl+shift+w>": None, "<ctrl+v>": None, "<ctrl+w>": None, "g$": None, "g^": None, "po": "open -p -- {clipboard}", "pt": "open -t -- {clipboard}", }, "caret": { "<ctrl+j>": "mode-leave", "<escape>": None, }, "command": { "<Tab>": None, "<ctrl+j>": "mode-leave", "<ctrl+n>": "completion-item-focus next", "<ctrl+p>": "completion-item-focus prev", "<ctrl+shift+n>": "completion-item-focus --history next", "<ctrl+shift+p>": "completion-item-focus --history prev", "<escape>": None, }, "hint": { "<ctrl+j>": "mode-leave", "<escape>": None, }, "insert": { "<ctrl+j>": "mode-leave", "<escape>": None, }, "passthrough": { "<ctrl+j>": "mode-leave", "<shift+escape>": None, }, "prompt": { "<ctrl+j>": "mode-leave", "<escape>": None, }, "register": { "<ctrl+j>": "mode-leave", "<escape>": None, }, "yesno": { "<ctrl+j>": "mode-leave", "<escape>": None, }, } for mode, binds in keybinds.items(): for k, v in binds.items(): if v is None: config.unbind(k, mode=f"{mode}") else: config.bind(k, v, mode=f"{mode}")
bddenhartog/dotfiles
qutebrowser/.config/qutebrowser/config.py
Python
mit
7,171
from __future__ import unicode_literals from hashlib import md5 class KnowledgeMiddleware(object): def process_request(self, request): request._secret_token = self.generate_token(request) def generate_token(self, request): if request.user.is_authenticated(): return request.user.username s = ''.join((request.META['REMOTE_ADDR'], request.META.get('HTTP_USER_AGENT', ''))) return md5(s.encode('utf-8')).hexdigest()
eliostvs/django-kb
kb/middleware.py
Python
bsd-3-clause
472
#!/usr/bin/env python3 # Copyright (c) 2014-2017 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Base class for RPC testing.""" from enum import Enum from io import BytesIO import logging import optparse import os import pdb import shutil from struct import pack import sys import tempfile import time from . import coverage from .address import wif_to_privkey from .authproxy import JSONRPCException from .blocktools import ( create_block, create_coinbase_pos, create_transaction_from_outpoint, is_zerocoin, ) from .key import CECKey from .messages import ( COIN, COutPoint, CTransaction, CTxIn, CTxOut, hash256, ) from .script import ( CScript, OP_CHECKSIG, ) from .test_node import TestNode from .util import ( MAX_NODES, PortSeed, assert_equal, assert_greater_than, check_json_precision, connect_nodes_bi, connect_nodes_clique, disconnect_nodes, DEFAULT_FEE, get_datadir_path, hex_str_to_bytes, bytes_to_hex_str, initialize_datadir, set_node_times, SPORK_ACTIVATION_TIME, SPORK_DEACTIVATION_TIME, sync_blocks, sync_mempools, vZC_DENOMS, ) class TestStatus(Enum): PASSED = 1 FAILED = 2 SKIPPED = 3 TEST_EXIT_PASSED = 0 TEST_EXIT_FAILED = 1 TEST_EXIT_SKIPPED = 77 TMPDIR_PREFIX = "pivx_func_test_" class PivxTestFramework(): """Base class for a pivx test script. Individual pivx test scripts should subclass this class and override the set_test_params() and run_test() methods. Individual tests can also override the following methods to customize the test setup: - add_options() - setup_chain() - setup_network() - setup_nodes() The __init__() and main() methods should not be overridden. This class also contains various public and private helper methods.""" def __init__(self): """Sets test framework defaults. Do not override this method. Instead, override the set_test_params() method""" self.setup_clean_chain = False self.nodes = [] self.mocktime = 0 self.supports_cli = False self.set_test_params() assert hasattr(self, "num_nodes"), "Test must set self.num_nodes in set_test_params()" def main(self): """Main function. This should not be overridden by the subclass test scripts.""" parser = optparse.OptionParser(usage="%prog [options]") parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true", help="Leave pivxds and test.* datadir on exit or error") parser.add_option("--noshutdown", dest="noshutdown", default=False, action="store_true", help="Don't stop pivxds after the test execution") parser.add_option("--srcdir", dest="srcdir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__))+"/../../../src"), help="Source directory containing pivxd/pivx-cli (default: %default)") parser.add_option("--cachedir", dest="cachedir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../../cache"), help="Directory for caching pregenerated datadirs") parser.add_option("--tmpdir", dest="tmpdir", help="Root directory for datadirs") parser.add_option("-l", "--loglevel", dest="loglevel", default="INFO", help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.") parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true", help="Print out all RPC calls as they are made") parser.add_option("--portseed", dest="port_seed", default=os.getpid(), type='int', help="The seed to use for assigning port numbers (default: current process id)") parser.add_option("--coveragedir", dest="coveragedir", help="Write tested RPC commands into this directory") parser.add_option("--configfile", dest="configfile", help="Location of the test framework config file") parser.add_option("--pdbonfailure", dest="pdbonfailure", default=False, action="store_true", help="Attach a python debugger if test fails") parser.add_option("--usecli", dest="usecli", default=False, action="store_true", help="use pivx-cli instead of RPC for all commands") self.add_options(parser) (self.options, self.args) = parser.parse_args() PortSeed.n = self.options.port_seed os.environ['PATH'] = self.options.srcdir + ":" + self.options.srcdir + "/qt:" + os.environ['PATH'] check_json_precision() self.options.cachedir = os.path.abspath(self.options.cachedir) # Set up temp directory and start logging if self.options.tmpdir: self.options.tmpdir = os.path.abspath(self.options.tmpdir) os.makedirs(self.options.tmpdir, exist_ok=False) else: self.options.tmpdir = tempfile.mkdtemp(prefix=TMPDIR_PREFIX) self._start_logging() success = TestStatus.FAILED try: if self.options.usecli and not self.supports_cli: raise SkipTest("--usecli specified but test does not support using CLI") self.setup_chain() self.setup_network() time.sleep(5) self.run_test() success = TestStatus.PASSED except JSONRPCException as e: self.log.exception("JSONRPC error") except SkipTest as e: self.log.warning("Test Skipped: %s" % e.message) success = TestStatus.SKIPPED except AssertionError as e: self.log.exception("Assertion failed") except KeyError as e: self.log.exception("Key error") except Exception as e: self.log.exception("Unexpected exception caught during testing") except KeyboardInterrupt as e: self.log.warning("Exiting after keyboard interrupt") if success == TestStatus.FAILED and self.options.pdbonfailure: print("Testcase failed. Attaching python debugger. Enter ? for help") pdb.set_trace() if not self.options.noshutdown: self.log.info("Stopping nodes") if self.nodes: self.stop_nodes() else: for node in self.nodes: node.cleanup_on_exit = False self.log.info("Note: pivxds were not stopped and may still be running") if not self.options.nocleanup and not self.options.noshutdown and success != TestStatus.FAILED: self.log.info("Cleaning up") shutil.rmtree(self.options.tmpdir) else: self.log.warning("Not cleaning up dir %s" % self.options.tmpdir) if success == TestStatus.PASSED: self.log.info("Tests successful") exit_code = TEST_EXIT_PASSED elif success == TestStatus.SKIPPED: self.log.info("Test skipped") exit_code = TEST_EXIT_SKIPPED else: self.log.error("Test failed. Test logging available at %s/test_framework.log", self.options.tmpdir) self.log.error("Hint: Call {} '{}' to consolidate all logs".format(os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../combine_logs.py"), self.options.tmpdir)) exit_code = TEST_EXIT_FAILED logging.shutdown() sys.exit(exit_code) # Methods to override in subclass test scripts. def set_test_params(self): """Tests must this method to change default values for number of nodes, topology, etc""" raise NotImplementedError def add_options(self, parser): """Override this method to add command-line options to the test""" pass def setup_chain(self): """Override this method to customize blockchain setup""" self.log.info("Initializing test directory " + self.options.tmpdir) if self.setup_clean_chain: self._initialize_chain_clean() else: self._initialize_chain() def setup_network(self): """Override this method to customize test network topology""" self.setup_nodes() # Connect the nodes as a "chain". This allows us # to split the network between nodes 1 and 2 to get # two halves that can work on competing chains. for i in range(self.num_nodes - 1): connect_nodes_bi(self.nodes, i, i + 1) self.sync_all() def setup_nodes(self): """Override this method to customize test node setup""" extra_args = None if hasattr(self, "extra_args"): extra_args = self.extra_args self.add_nodes(self.num_nodes, extra_args) self.start_nodes() def run_test(self): """Tests must override this method to define test logic""" raise NotImplementedError # Public helper methods. These can be accessed by the subclass test scripts. def add_nodes(self, num_nodes, extra_args=None, rpchost=None, timewait=None, binary=None): """Instantiate TestNode objects""" if extra_args is None: extra_args = [[]] * num_nodes if binary is None: binary = [None] * num_nodes assert_equal(len(extra_args), num_nodes) assert_equal(len(binary), num_nodes) for i in range(num_nodes): self.nodes.append(TestNode(i, self.options.tmpdir, extra_args[i], rpchost, timewait=timewait, binary=binary[i], stderr=None, mocktime=self.mocktime, coverage_dir=self.options.coveragedir, use_cli=self.options.usecli)) def start_node(self, i, *args, **kwargs): """Start a pivxd""" node = self.nodes[i] node.start(*args, **kwargs) node.wait_for_rpc_connection() time.sleep(10) if self.options.coveragedir is not None: coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc) def start_nodes(self, extra_args=None, *args, **kwargs): """Start multiple pivxds""" if extra_args is None: extra_args = [None] * self.num_nodes assert_equal(len(extra_args), self.num_nodes) try: for i, node in enumerate(self.nodes): node.start(extra_args[i], *args, **kwargs) for node in self.nodes: node.wait_for_rpc_connection() except: # If one node failed to start, stop the others self.stop_nodes() raise time.sleep(10) if self.options.coveragedir is not None: for node in self.nodes: coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc) def stop_node(self, i): """Stop a pivxd test node""" self.nodes[i].stop_node() self.nodes[i].wait_until_stopped() def stop_nodes(self): """Stop multiple pivxd test nodes""" for node in self.nodes: # Issue RPC to stop nodes node.stop_node() for node in self.nodes: # Wait for nodes to stop time.sleep(5) node.wait_until_stopped() def restart_node(self, i, extra_args=None): """Stop and start a test node""" self.stop_node(i) self.start_node(i, extra_args) def assert_start_raises_init_error(self, i, extra_args=None, expected_msg=None, *args, **kwargs): with tempfile.SpooledTemporaryFile(max_size=2**16) as log_stderr: try: self.start_node(i, extra_args, stderr=log_stderr, *args, **kwargs) self.stop_node(i) except Exception as e: assert 'pivxd exited' in str(e) # node must have shutdown self.nodes[i].running = False self.nodes[i].process = None if expected_msg is not None: log_stderr.seek(0) stderr = log_stderr.read().decode('utf-8') if expected_msg not in stderr: raise AssertionError("Expected error \"" + expected_msg + "\" not found in:\n" + stderr) else: if expected_msg is None: assert_msg = "pivxd should have exited with an error" else: assert_msg = "pivxd should have exited with expected error " + expected_msg raise AssertionError(assert_msg) def wait_for_node_exit(self, i, timeout): self.nodes[i].process.wait(timeout) def split_network(self): """ Split the network of four nodes into nodes 0/1 and 2/3. """ disconnect_nodes(self.nodes[1], 2) disconnect_nodes(self.nodes[2], 1) self.sync_all([self.nodes[:2], self.nodes[2:]]) def join_network(self): """ Join the (previously split) network halves together. """ connect_nodes_bi(self.nodes, 1, 2) self.sync_all() def sync_all(self, node_groups=None): if not node_groups: node_groups = [self.nodes] for group in node_groups: sync_blocks(group) sync_mempools(group) def enable_mocktime(self): """Enable mocktime for the script. mocktime may be needed for scripts that use the cached version of the blockchain. If the cached version of the blockchain is used without mocktime then the mempools will not sync due to IBD. Sets mocktime to Tuesday, October 31, 2017 6:21:20 PM GMT (1572546080) """ self.mocktime = 1572546080 def disable_mocktime(self): self.mocktime = 0 # Private helper methods. These should not be accessed by the subclass test scripts. def _start_logging(self): # Add logger and logging handlers self.log = logging.getLogger('TestFramework') self.log.setLevel(logging.DEBUG) # Create file handler to log all messages fh = logging.FileHandler(self.options.tmpdir + '/test_framework.log') fh.setLevel(logging.DEBUG) # Create console handler to log messages to stderr. By default this logs only error messages, but can be configured with --loglevel. ch = logging.StreamHandler(sys.stdout) # User can provide log level as a number or string (eg DEBUG). loglevel was caught as a string, so try to convert it to an int ll = int(self.options.loglevel) if self.options.loglevel.isdigit() else self.options.loglevel.upper() ch.setLevel(ll) # Format logs the same as pivxd's debug.log with microprecision (so log files can be concatenated and sorted) formatter = logging.Formatter(fmt='%(asctime)s.%(msecs)03d000 %(name)s (%(levelname)s): %(message)s', datefmt='%Y-%m-%d %H:%M:%S') formatter.converter = time.gmtime fh.setFormatter(formatter) ch.setFormatter(formatter) # add the handlers to the logger self.log.addHandler(fh) self.log.addHandler(ch) if self.options.trace_rpc: rpc_logger = logging.getLogger("BitcoinRPC") rpc_logger.setLevel(logging.DEBUG) rpc_handler = logging.StreamHandler(sys.stdout) rpc_handler.setLevel(logging.DEBUG) rpc_logger.addHandler(rpc_handler) def _initialize_chain(self, toPosPhase=False): """Initialize a pre-mined blockchain for use by the test.""" def create_cachedir(cachedir): if os.path.isdir(cachedir): shutil.rmtree(cachedir) os.makedirs(cachedir) def copy_cachedir(origin, destination, num_nodes=MAX_NODES): for i in range(num_nodes): from_dir = get_datadir_path(origin, i) to_dir = get_datadir_path(destination, i) shutil.copytree(from_dir, to_dir) initialize_datadir(destination, i) # Overwrite port/rpcport in pivx.conf def clone_cache_from_node_1(cachedir, from_num=4): """ Clones cache subdir from node 1 to nodes from 'from_num' to MAX_NODES""" def copy_and_overwrite(from_path, to_path): if os.path.exists(to_path): shutil.rmtree(to_path) shutil.copytree(from_path, to_path) assert from_num < MAX_NODES node_0_datadir = os.path.join(get_datadir_path(cachedir, 0), "regtest") for i in range(from_num, MAX_NODES): node_i_datadir = os.path.join(get_datadir_path(cachedir, i), "regtest") for subdir in ["blocks", "chainstate", "sporks", "zerocoin"]: copy_and_overwrite(os.path.join(node_0_datadir, subdir), os.path.join(node_i_datadir, subdir)) initialize_datadir(cachedir, i) # Overwrite port/rpcport in pivx.conf def cachedir_valid(cachedir): for i in range(MAX_NODES): if not os.path.isdir(get_datadir_path(cachedir, i)): return False # nodes directories exist. check if the first one has the .incomplete flagfile return (not os.path.exists(os.path.join(get_datadir_path(cachedir, 0), ".incomplete"))) def clean_cache_subdir(cachedir): os.remove(os.path.join(get_datadir_path(cachedir, 0), ".incomplete")) def cache_path(n, *paths): return os.path.join(get_datadir_path(cachedir, n), "regtest", *paths) for i in range(MAX_NODES): for entry in os.listdir(cache_path(i)): if entry not in ['wallet.dat', 'chainstate', 'blocks', 'sporks', 'zerocoin', 'backups']: os.remove(cache_path(i, entry)) def clean_cache_dir(): if os.path.isdir(self.options.cachedir): # migrate old cache dir if cachedir_valid(self.options.cachedir): powcachedir = os.path.join(self.options.cachedir, "pow") self.log.info("Found old cachedir. Migrating to %s" % str(powcachedir)) copy_cachedir(self.options.cachedir, powcachedir) # remove everything except pow and pos subdirs for entry in os.listdir(self.options.cachedir): if entry not in ['pow', 'pos']: entry_path = os.path.join(self.options.cachedir, entry) if os.path.isfile(entry_path): os.remove(entry_path) elif os.path.isdir(entry_path): shutil.rmtree(entry_path) # no cachedir found else: os.makedirs(self.options.cachedir) def start_nodes_from_dir(ddir, num_nodes=MAX_NODES): self.log.info("Starting %d nodes..." % num_nodes) for i in range(num_nodes): datadir = initialize_datadir(ddir, i) if i == 0: # Add .incomplete flagfile # (removed at the end during clean_cache_subdir) open(os.path.join(datadir, ".incomplete"), 'a').close() args = [os.getenv("BITCOIND", "pivxd"), "-spendzeroconfchange=1", "-server", "-keypool=1", "-datadir=" + datadir, "-discover=0"] self.nodes.append( TestNode(i, ddir, extra_args=[], rpchost=None, timewait=None, binary=None, stderr=None, mocktime=self.mocktime, coverage_dir=None)) self.nodes[i].args = args self.start_node(i) self.log.info("Node %d started." % i) # Wait for RPC connections to be ready self.log.info("Nodes started. Waiting for RPC connections...") for node in range(4): self.nodes[node].wait_for_rpc_connection() self.log.info("Connecting nodes") connect_nodes_clique(self.nodes) def stop_and_clean_cache_dir(ddir): self.stop_nodes() self.nodes = [] # Copy cache for nodes 5 to MAX_NODES self.log.info("Copying cache dir to non-started nodes") clone_cache_from_node_1(ddir) self.log.info("Cleaning up.") clean_cache_subdir(ddir) def generate_pow_cache(): ### POW Cache ### # Create a 200-block-long chain; each of the 4 first nodes # gets 25 mature blocks and 25 immature. # Note: To preserve compatibility with older versions of # initialize_chain, only 4 nodes will generate coins. # # blocks are created with timestamps 1 minutes apart # starting from 331 minutes in the past # Create cache directories, run pivxds: create_cachedir(powcachedir) self.log.info("Creating 'PoW-chain': 200 blocks") start_nodes_from_dir(powcachedir, 4) # Mine the blocks self.log.info("Mining 200 blocks") self.enable_mocktime() block_time = self.mocktime - (331 * 60) for i in range(2): for peer in range(4): for j in range(25): set_node_times(self.nodes, block_time) self.nodes[peer].generate(1) block_time += 60 # Must sync before next peer starts generating blocks sync_blocks(self.nodes) # Shut them down, and clean up cache directories: self.log.info("Stopping nodes") stop_and_clean_cache_dir(powcachedir) self.log.info("---> pow cache created") self.disable_mocktime() assert self.num_nodes <= MAX_NODES clean_cache_dir() powcachedir = os.path.join(self.options.cachedir, "pow") is_powcache_valid = cachedir_valid(powcachedir) poscachedir = os.path.join(self.options.cachedir, "pos") is_poscache_valid = cachedir_valid(poscachedir) if not toPosPhase and not is_powcache_valid: self.log.info("PoW-CACHE NOT FOUND or INVALID.") self.log.info("Creating new cached blockchain data.") generate_pow_cache() elif toPosPhase and not is_poscache_valid: self.log.info("PoS-CACHE NOT FOUND or INVALID.") self.log.info("Creating new cached blockchain data.") # check if first 200 blocks (pow cache) is present. if not generate it. if not is_powcache_valid: self.log.info("PoW-CACHE NOT FOUND or INVALID. Generating it first.") generate_pow_cache() self.enable_mocktime() block_time = self.mocktime - (131 * 60) ### POS Cache ### # Create a 330-block-long chain # First 200 PoW blocks are copied from PoW chain. # The next 48 PoW blocks are mined in 12-blocks bursts by the first 4 nodes. # The last 2 PoW blocks are then mined by the last node (Node 3). # Then 80 PoS blocks are generated in 20-blocks bursts by the first 4 nodes. # # - Node 0 and node 1 get 62 mature blocks (pow) + 20 immmature (pos) # 42 rewards spendable (62 mature blocks - 20 spent rewards) # - Node 2 gets 56 mature blocks (pow) + 26 immmature (6 pow + 20 pos) # 35 rewards spendable (55 mature blocks - 20 spent rewards) # - Node 3 gets 50 mature blocks (pow) + 34 immmature (14 pow + 20 pos) # 30 rewards spendable (50 mature blocks - 20 spent rewards) # - Nodes 2 and 3 mint one zerocoin for each denom (tot 6666 PIV) on block 301/302 # 8 mature zc + 8/3 rewards spendable (35/30 - 27 spent) + change 83.92 # # Block 331-336 will mature last 6 pow blocks mined by node 2. # Then 337-350 will mature last 14 pow blocks mined by node 3. # Then staked blocks start maturing at height 351. # Create cache directories, run pivxds: create_cachedir(poscachedir) self.log.info("Creating 'PoS-chain': 330 blocks") self.log.info("Copying 200 initial blocks from pow cache") copy_cachedir(powcachedir, poscachedir) # Change datadir and restart the nodes (only 4 of them) start_nodes_from_dir(poscachedir, 4) # Mine 50 more blocks to reach PoS start. self.log.info("Mining 50 more blocks to reach PoS phase") for peer in range(4): for j in range(12): set_node_times(self.nodes, block_time) self.nodes[peer].generate(1) block_time += 60 # Must sync before next peer starts generating blocks if peer < 3: sync_blocks(self.nodes) set_node_times(self.nodes, block_time) self.nodes[3].generate(2) block_time += 60 sync_blocks(self.nodes) # Then stake 80 blocks. self.log.info("Staking 80 blocks...") nBlocks = 250 res = [] # used to save the two txids for change outputs of mints (locked) for peer in range(4): for j in range(20): # Stake block block_time = self.generate_pos(peer, block_time) nBlocks += 1 # Mint zerocoins with node-2 at block 301 and with node-3 at block 302 if nBlocks == 301 or nBlocks == 302: # mints 7 zerocoins, one for each denom (tot 6666 PIV), fee = 0.01 * 8 # consumes 27 utxos (tot 6750 PIV), change = 6750 - 6666 - fee res.append(self.nodes[nBlocks-299].mintzerocoin(6666)) self.sync_all() # lock the change output (so it's not used as stake input in generate_pos) assert (self.nodes[nBlocks-299].lockunspent(False, [{"txid": res[-1]['txid'], "vout": 8}])) # Must sync before next peer starts generating blocks sync_blocks(self.nodes) time.sleep(1) self.log.info("80 blocks staked") # Unlock previously locked change outputs for i in [2, 3]: assert (self.nodes[i].lockunspent(True, [{"txid": res[i-2]['txid'], "vout": 8}])) # Verify height and balances self.test_PoS_chain_balances() # Shut nodes down, and clean up cache directories: self.log.info("Stopping nodes") stop_and_clean_cache_dir(poscachedir) self.log.info("--> pos cache created") self.disable_mocktime() else: self.log.info("CACHE FOUND.") # Copy requested cache to tempdir if toPosPhase: self.log.info("Copying datadir from %s to %s" % (poscachedir, self.options.tmpdir)) copy_cachedir(poscachedir, self.options.tmpdir, self.num_nodes) else: self.log.info("Copying datadir from %s to %s" % (powcachedir, self.options.tmpdir)) copy_cachedir(powcachedir, self.options.tmpdir, self.num_nodes) def _initialize_chain_clean(self): """Initialize empty blockchain for use by the test. Create an empty blockchain and num_nodes wallets. Useful if a test case wants complete control over initialization.""" for i in range(self.num_nodes): initialize_datadir(self.options.tmpdir, i) ### PIVX Specific TestFramework ### ################################### def init_dummy_key(self): self.DUMMY_KEY = CECKey() self.DUMMY_KEY.set_secretbytes(hash256(pack('<I', 0xffff))) def test_PoS_chain_balances(self): from .util import DecimalAmt # 330 blocks # - Nodes 0 and 1 get 82 blocks: # 62 pow + 20 pos (20 immature) # - Nodes 2 gets 82 blocks: # 62 pow + 20 pos (26 immature) # - Nodes 3 gets 84 blocks: # 64 pow + 20 pos (34 immature) # - Nodes 2 and 3 have 6666 PIV worth of zerocoins zc_tot = sum(vZC_DENOMS) zc_fee = len(vZC_DENOMS) * 0.01 used_utxos = (zc_tot // 250) + 1 zc_change = 250 * used_utxos - zc_tot - zc_fee # check at least 1 node and at most 5 num_nodes = min(5, len(self.nodes)) assert_greater_than(num_nodes, 0) # each node has the same height and tip best_block = self.nodes[0].getbestblockhash() for i in range(num_nodes): assert_equal(self.nodes[i].getblockcount(), 330) if i > 0: assert_equal(self.nodes[i].getbestblockhash(), best_block) # balance is mature pow blocks rewards minus stake inputs (spent) w_info = [self.nodes[i].getwalletinfo() for i in range(num_nodes)] assert_equal(w_info[0]["balance"], DecimalAmt(250.0 * (62 - 20))) assert_equal(w_info[1]["balance"], DecimalAmt(250.0 * (62 - 20))) assert_equal(w_info[2]["balance"], DecimalAmt(250.0 * (56 - 20) - (used_utxos * 250) + zc_change)) assert_equal(w_info[3]["balance"], DecimalAmt(250.0 * (50 - 20) - (used_utxos * 250) + zc_change)) for i in range(4, num_nodes): # only first 4 nodes have mined/staked assert_equal(w_info[i]["balance"], DecimalAmt(0)) # immature balance is immature pow blocks rewards plus # immature stakes (outputs=inputs+rewards) assert_equal(w_info[0]["immature_balance"], DecimalAmt(500.0 * 20)) assert_equal(w_info[1]["immature_balance"], DecimalAmt(500.0 * 20)) assert_equal(w_info[2]["immature_balance"], DecimalAmt((250.0 * 6) + (500.0 * 20))) assert_equal(w_info[3]["immature_balance"], DecimalAmt((250.0 * 14) + (500.0 * 20))) for i in range(4, num_nodes): # only first 4 nodes have mined/staked assert_equal(w_info[i]["immature_balance"], DecimalAmt(0)) # check zerocoin balances / mints for peer in [2, 3]: if num_nodes > peer: zcBalance = self.nodes[peer].getzerocoinbalance() zclist = self.nodes[peer].listmintedzerocoins(True) zclist_spendable = self.nodes[peer].listmintedzerocoins(True, True) assert_equal(len(zclist), len(vZC_DENOMS)) assert_equal(zcBalance['Total'], 6666) assert_equal(zcBalance['Immature'], 0) if peer == 2: assert_equal(len(zclist), len(zclist_spendable)) assert_equal(set([x['denomination'] for x in zclist]), set(vZC_DENOMS)) assert_equal([x['confirmations'] for x in zclist], [30-peer] * len(vZC_DENOMS)) self.log.info("Balances of first %d nodes check out" % num_nodes) def get_prevouts(self, node_id, utxo_list, zpos=False, nHeight=-1): """ get prevouts (map) for each utxo in a list :param node_id: (int) index of the CTestNode used as rpc connection. Must own the utxos. utxo_list: <if zpos=False> (JSON list) utxos returned from listunspent used as input <if zpos=True> (JSON list) mints returned from listmintedzerocoins used as input zpos: (bool) type of utxo_list nHeight: (int) height of the previous block. used only if zpos=True for stake checksum. Optional, if not provided rpc_conn's height is used. :return: prevouts: ({bytes --> (int, bytes, int)} dictionary) maps CStake "uniqueness" (i.e. serialized COutPoint -or hash stake, for zpiv-) to (amount, prevScript, timeBlockFrom). For zpiv prevScript is replaced with serialHash hex string. """ assert_greater_than(len(self.nodes), node_id) rpc_conn = self.nodes[node_id] prevouts = {} for utxo in utxo_list: if not zpos: outPoint = COutPoint(int(utxo['txid'], 16), utxo['vout']) outValue = int(utxo['amount']) * COIN prevtx_json = rpc_conn.getrawtransaction(utxo['txid'], 1) prevTx = CTransaction() prevTx.deserialize(BytesIO(hex_str_to_bytes(prevtx_json['hex']))) if (prevTx.is_coinbase() or prevTx.is_coinstake()) and utxo['confirmations'] < 100: # skip immature coins continue prevScript = prevtx_json['vout'][utxo['vout']]['scriptPubKey']['hex'] prevTime = prevtx_json['blocktime'] prevouts[outPoint.serialize_uniqueness()] = (outValue, prevScript, prevTime) else: uniqueness = bytes.fromhex(utxo['hash stake'])[::-1] prevouts[uniqueness] = (int(utxo["denomination"]) * COIN, utxo["serial hash"], 0) return prevouts def make_txes(self, node_id, spendingPrevOuts, to_pubKey): """ makes a list of CTransactions each spending an input from spending PrevOuts to an output to_pubKey :param node_id: (int) index of the CTestNode used as rpc connection. Must own spendingPrevOuts. spendingPrevouts: ({bytes --> (int, bytes, int)} dictionary) maps CStake "uniqueness" (i.e. serialized COutPoint -or hash stake, for zpiv-) to (amount, prevScript, timeBlockFrom). For zpiv prevScript is replaced with serialHash hex string. to_pubKey (bytes) recipient public key :return: block_txes: ([CTransaction] list) """ assert_greater_than(len(self.nodes), node_id) rpc_conn = self.nodes[node_id] block_txes = [] for uniqueness in spendingPrevOuts: if is_zerocoin(uniqueness): # spend zPIV _, serialHash, _ = spendingPrevOuts[uniqueness] raw_spend = rpc_conn.createrawzerocoinspend(serialHash, "", False) else: # spend PIV value_out = int(spendingPrevOuts[uniqueness][0] - DEFAULT_FEE * COIN) scriptPubKey = CScript([to_pubKey, OP_CHECKSIG]) prevout = COutPoint() prevout.deserialize_uniqueness(BytesIO(uniqueness)) tx = create_transaction_from_outpoint(prevout, b"", value_out, scriptPubKey) # sign tx raw_spend = rpc_conn.signrawtransaction(bytes_to_hex_str(tx.serialize()))['hex'] # add signed tx to the list signed_tx = CTransaction() signed_tx.from_hex(raw_spend) block_txes.append(signed_tx) return block_txes def stake_block(self, node_id, nHeight, prevHhash, stakeableUtxos, startTime=None, privKeyWIF=None, vtx=[], fDoubleSpend=False): """ manually stakes a block selecting the coinstake input from a list of candidates :param node_id: (int) index of the CTestNode used as rpc connection. Must own stakeableUtxos. nHeight: (int) height of the block being produced prevHash: (string) hex string of the previous block hash stakeableUtxos: ({bytes --> (int, bytes, int)} dictionary) maps CStake "uniqueness" (i.e. serialized COutPoint -or hash stake, for zpiv-) to (amount, prevScript, timeBlockFrom). For zpiv prevScript is replaced with serialHash hex string. startTime: (int) epoch time to be used as blocktime (iterated in solve_stake) privKeyWIF: (string) private key to be used for staking/signing If empty string, it will be used the pk from the stake input (dumping the sk from rpc_conn). If None, then the DUMMY_KEY will be used. vtx: ([CTransaction] list) transactions to add to block.vtx fDoubleSpend: (bool) wether any tx in vtx is allowed to spend the coinstake input :return: block: (CBlock) block produced, must be manually relayed """ assert_greater_than(len(self.nodes), node_id) rpc_conn = self.nodes[node_id] if not len(stakeableUtxos) > 0: raise Exception("Need at least one stakeable utxo to stake a block!") # Get start time to stake if startTime is None: startTime = time.time() # Create empty block with coinbase nTime = int(startTime) & 0xfffffff0 coinbaseTx = create_coinbase_pos(nHeight) block = create_block(int(prevHhash, 16), coinbaseTx, nTime) # Find valid kernel hash - iterates stakeableUtxos, then block.nTime block.solve_stake(stakeableUtxos) # Check if this is a zPoS block or regular/cold stake - sign stake tx block_sig_key = CECKey() prevout = None isZPoS = is_zerocoin(block.prevoutStake) if isZPoS: _, serialHash, _ = stakeableUtxos[block.prevoutStake] raw_stake = rpc_conn.createrawzerocoinstake(serialHash) stake_tx_signed_raw_hex = raw_stake["hex"] stake_pkey = raw_stake["private-key"] block_sig_key.set_compressed(True) block_sig_key.set_secretbytes(bytes.fromhex(stake_pkey)) else: coinstakeTx_unsigned = CTransaction() prevout = COutPoint() prevout.deserialize_uniqueness(BytesIO(block.prevoutStake)) coinstakeTx_unsigned.vin.append(CTxIn(prevout, b"", 0xffffffff)) coinstakeTx_unsigned.vout.append(CTxOut()) amount, prevScript, _ = stakeableUtxos[block.prevoutStake] outNValue = int(amount + 250 * COIN) coinstakeTx_unsigned.vout.append(CTxOut(outNValue, hex_str_to_bytes(prevScript))) if privKeyWIF == "": # Use dummy key if not hasattr(self, 'DUMMY_KEY'): self.init_dummy_key() block_sig_key = self.DUMMY_KEY # replace coinstake output script coinstakeTx_unsigned.vout[1].scriptPubKey = CScript([block_sig_key.get_pubkey(), OP_CHECKSIG]) else: if privKeyWIF == None: # Use pk of the input. Ask sk from rpc_conn rawtx = rpc_conn.getrawtransaction('{:064x}'.format(prevout.hash), True) privKeyWIF = rpc_conn.dumpprivkey(rawtx["vout"][prevout.n]["scriptPubKey"]["addresses"][0]) # Use the provided privKeyWIF (cold staking). # export the corresponding private key to sign block privKey, compressed = wif_to_privkey(privKeyWIF) block_sig_key.set_compressed(compressed) block_sig_key.set_secretbytes(bytes.fromhex(privKey)) # Sign coinstake TX and add it to the block stake_tx_signed_raw_hex = rpc_conn.signrawtransaction( bytes_to_hex_str(coinstakeTx_unsigned.serialize()))['hex'] # Add coinstake to the block coinstakeTx = CTransaction() coinstakeTx.from_hex(stake_tx_signed_raw_hex) block.vtx.append(coinstakeTx) # Add provided transactions to the block. # Don't add tx doublespending the coinstake input, unless fDoubleSpend=True for tx in vtx: if not fDoubleSpend: # assume txes don't double spend zPIV inputs when fDoubleSpend is false. It needs to # be checked outside until a convenient tx.spends(zerocoin) is added to the framework. if not isZPoS and tx.spends(prevout): continue block.vtx.append(tx) # Get correct MerkleRoot and rehash block block.hashMerkleRoot = block.calc_merkle_root() block.rehash() # sign block with block signing key and return it block.sign_block(block_sig_key) return block def stake_next_block(self, node_id, stakeableUtxos, btime=None, privKeyWIF=None, vtx=[], fDoubleSpend=False): """ Calls stake_block appending to the current tip""" assert_greater_than(len(self.nodes), node_id) nHeight = self.nodes[node_id].getblockcount() prevHhash = self.nodes[node_id].getblockhash(nHeight) return self.stake_block(node_id, nHeight+1, prevHhash, stakeableUtxos, btime, privKeyWIF, vtx, fDoubleSpend) def check_tx_in_chain(self, node_id, txid): assert_greater_than(len(self.nodes), node_id) rawTx = self.nodes[node_id].getrawtransaction(txid, 1) assert_greater_than(rawTx["confirmations"], 0) def spend_inputs(self, node_id, inputs, outputs): """ auxiliary function used by spend_utxo / spend_utxos """ assert_greater_than(len(self.nodes), node_id) rpc_conn = self.nodes[node_id] spendingTx = rpc_conn.createrawtransaction(inputs, outputs) spendingTx_signed = rpc_conn.signrawtransaction(spendingTx) if spendingTx_signed["complete"]: txhash = rpc_conn.sendrawtransaction(spendingTx_signed["hex"]) return txhash else: return "" def spend_utxo(self, node_id, utxo, recipient=''): """ spend amount from previously unspent output to a provided address :param node_id: (int) index of the CTestNode used as rpc connection. Must own the utxo. utxo: (JSON) returned from listunspent used as input recipient: (string) destination address (new one if not provided) :return: txhash: (string) tx hash if successful, empty string otherwise """ assert_greater_than(len(self.nodes), node_id) rpc_conn = self.nodes[node_id] inputs = [{"txid": utxo["txid"], "vout": utxo["vout"]}] out_amount = float(utxo["amount"]) - DEFAULT_FEE outputs = {} if recipient == '': recipient = rpc_conn.getnewaddress() outputs[recipient] = out_amount return self.spend_inputs(node_id, inputs, outputs) def spend_utxos(self, node_id, utxo_list, recipient='', fMultiple=False): """ spend utxos to provided list of addresses or 10 new generate ones. :param node_id: (int) index of the CTestNode used as rpc connection. Must own the utxo. utxo_list: (JSON list) returned from listunspent used as input recipient: (string, optional) destination address (new one if not provided) fMultiple: (boolean, optional, default=false) spend each utxo on a different tx :return: txHashes: (string list) list of hashes of completed txs """ assert_greater_than(len(self.nodes), node_id) rpc_conn = self.nodes[node_id] txHashes = [] # If no recipient is given, create a new one if recipient == '': recipient = rpc_conn.getnewaddress() # If fMultiple=True send one tx for each utxo if fMultiple: for utxo in utxo_list: txHash = self.spend_utxo(node_id, utxo, recipient) if txHash != "": txHashes.append(txHash) # Otherwise make a single tx with all the inputs else: inputs = [{"txid": x["txid"], "vout": x["vout"]} for x in utxo_list] out_amount = sum([float(x["amount"]) for x in utxo_list]) - DEFAULT_FEE outputs = {} if recipient == '': recipient = rpc_conn.getnewaddress() outputs[recipient] = out_amount txHash = self.spend_inputs(node_id, inputs, outputs) if txHash != "": txHashes.append(txHash) return txHashes def generate_pos(self, node_id, btime=None): """ stakes a block using generate on nodes[node_id]""" assert_greater_than(len(self.nodes), node_id) rpc_conn = self.nodes[node_id] ss = rpc_conn.getstakingstatus() assert ss["walletunlocked"] assert ss["stakeablecoins"] if btime is not None: next_btime = btime + 60 fStaked = False failures = 0 while not fStaked: try: rpc_conn.generate(1) fStaked = True except JSONRPCException as e: if ("Couldn't create new block" in str(e)): failures += 1 # couldn't generate block. check that this node can still stake (after 60 failures) if failures > 60: ss = rpc_conn.getstakingstatus() if not (ss["walletunlocked"] and ss["stakeablecoins"]): raise AssertionError("Node %d unable to stake!" % node_id) # try to stake one sec in the future if btime is not None: btime += 1 set_node_times(self.nodes, btime) else: time.sleep(1) else: raise e # block generated. adjust block time if btime is not None: btime = max(btime + 1, next_btime) set_node_times(self.nodes, btime) return btime else: return None def generate_pow(self, node_id, btime=None): """ stakes a block using generate on nodes[node_id]""" assert_greater_than(len(self.nodes), node_id) self.nodes[node_id].generate(1) if btime is not None: btime += 60 set_node_times(self.nodes, btime) return btime def set_spork(self, node_id, sporkName, value): assert_greater_than(len(self.nodes), node_id) return self.nodes[node_id].spork(sporkName, value) def get_spork(self, node_id, sporkName): assert_greater_than(len(self.nodes), node_id) return self.nodes[node_id].spork("show")[sporkName] def activate_spork(self, node_id, sporkName): return self.set_spork(node_id, sporkName, SPORK_ACTIVATION_TIME) def deactivate_spork(self, node_id, sporkName): return self.set_spork(node_id, sporkName, SPORK_DEACTIVATION_TIME) def is_spork_active(self, node_id, sporkName): assert_greater_than(len(self.nodes), node_id) return self.nodes[node_id].spork("active")[sporkName] ### ------------------------------------------------------ class ComparisonTestFramework(PivxTestFramework): """Test framework for doing p2p comparison testing Sets up some pivxd binaries: - 1 binary: test binary - 2 binaries: 1 test binary, 1 ref binary - n>2 binaries: 1 test binary, n-1 ref binaries""" def set_test_params(self): self.num_nodes = 2 self.setup_clean_chain = True def add_options(self, parser): parser.add_option("--testbinary", dest="testbinary", default=os.getenv("BITCOIND", "pivxd"), help="pivxd binary to test") parser.add_option("--refbinary", dest="refbinary", default=os.getenv("BITCOIND", "pivxd"), help="pivxd binary to use for reference nodes (if any)") def setup_network(self): extra_args = [['-whitelist=127.0.0.1']] * self.num_nodes if hasattr(self, "extra_args"): extra_args = self.extra_args self.add_nodes(self.num_nodes, extra_args, binary=[self.options.testbinary] + [self.options.refbinary] * (self.num_nodes - 1)) self.start_nodes() class SkipTest(Exception): """This exception is raised to skip a test""" def __init__(self, message): self.message = message
Mrs-X/PIVX
test/functional/test_framework/test_framework.py
Python
mit
49,063
# Some useful functions to extract data out of emails # Copyright (C) 2002-2012 John Goerzen & contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA import email from email.Parser import Parser as MailParser import time def get_message_date(content, header='Date'): """ Parses mail and returns resulting timestamp. :param header: the header to extract date from; :returns: timestamp or `None` in the case of failure. """ message = MailParser().parsestr(content, True) dateheader = message.get(header) # parsedate_tz returns a 10-tuple that can be passed to mktime_tz # Will be None if missing or not in a valid format. Note that # indexes 6, 7, and 8 of the result tuple are not usable. datetuple = email.utils.parsedate_tz(dateheader) if datetuple is None: return None return email.utils.mktime_tz(datetuple)
styk-tv/offlineimap
offlineimap/emailutil.py
Python
gpl-2.0
1,573
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Contains AutoCastVariable, a variable which automatically casts itself.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import threading from tensorflow.python.distribute import ps_values as ps_distribute_values from tensorflow.python.distribute import values as distribute_values from tensorflow.python.eager import context from tensorflow.python.framework import ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import resource_variable_ops from tensorflow.python.ops import variables from tensorflow.python.types import core # _autocast_dtype.dtype is the dtype AutoCastVariables should be cast to, or # None if AutoCastVariables should not be cast. _autocast_dtype = threading.local() def numpy_text(tensor, is_repr=False): """Human readable representation of a tensor's numpy value.""" if tensor.dtype.is_numpy_compatible: # pylint: disable=protected-access text = repr(tensor._numpy()) if is_repr else str(tensor._numpy()) # pylint: enable=protected-access else: text = '<unprintable>' if '\n' in text: text = '\n' + text return text class AutoCastVariable(variables.Variable, core.Tensor): """Variable that will cast itself to a different dtype in applicable contexts. This class wraps a floating-point `tf.Variable`. It emulates the variable interface and delegates to the wrapped variable, but it additionally will cast the wrapped variable under an `enable_auto_cast_variables(dtype)` context manager. For example: >>> v = tf.Variable(1.0, dtype=tf.float32) >>> v = AutoCastVariable(v) >>> tf.identity(v).dtype tf.float32 >>> with enable_auto_cast_variables(tf.float16): ... tf.identity(v).dtype tf.float16 The purpose of this class is to allow Keras layers to create variables in float32, and automatically cast them to float16 or bfloat16 when the layer is called. """ def __init__(self, variable, op=None): """Creates an AutoCastVariable instance. Args: variable: A floating-point resource variable to wrap. op: Optional operation of this variable. Raises: ValueError: If `variable` is not a floating-point resource variable """ if not isinstance(variable, variables.Variable): raise ValueError('variable must be of type tf.ResourceVariable, but got: ' '%s' % variable) if not variable.dtype.is_floating: raise ValueError('variable must be a floating point variable but has ' 'type: %s' % variable.dtype.name) self._variable = variable self._op = op def _should_cast(self): """Returns True if this variable should be casted when accessed.""" autocast_dtype = getattr(_autocast_dtype, 'dtype', None) return autocast_dtype is not None and self.dtype != autocast_dtype @property def dtype(self): """The dtype of the underlying variable, before any casts are done.""" return self._variable.dtype @property def true_dtype(self): """Deprecated alias of `dtype`.""" return self._variable.dtype @property def _cast_dtype(self): dtype = getattr(_autocast_dtype, 'dtype', None) return dtype or self._variable.dtype def value(self): val = self._variable.value() if not self._should_cast(): return val return math_ops.cast(val, self._cast_dtype) def read_value(self): val = self._variable.read_value() return math_ops.cast(val, self._cast_dtype) def sparse_read(self, indices, name=None): """Reads the value of this variable sparsely, using `gather`.""" val = self._variable.sparse_read(indices, name=name) return math_ops.cast(val, self._cast_dtype) def gather_nd(self, indices, name=None): """Gather slices of the variable into a Tensor.""" val = self._variable.gather_nd(indices, name=name) return math_ops.cast(val, self._cast_dtype) def __getattr__(self, name): return getattr(self._variable, name) def _dense_var_to_tensor(self, dtype=None, name=None, as_ref=False): """Converts this variable to a tensor.""" if not self._should_cast(): return ops.convert_to_tensor(self._variable, dtype, name, as_ref) # TODO(reedwm): Support as_ref? assert not as_ref if dtype is not None and not dtype.is_compatible_with(self._cast_dtype): raise ValueError( 'Incompatible type conversion requested to type {!r} for ' 'AutoCastVariable which is casted to type {!r}'.format( dtype.name, self._cast_dtype.name)) val = ops.convert_to_tensor_v2_with_dispatch( self._variable, dtype=self._variable.dtype, name=name) return math_ops.cast(val, self._cast_dtype) def _should_act_as_resource_variable(self): """Pass resource_variable_ops.is_resource_variable check.""" pass def __repr__(self): if context.executing_eagerly() and not self._in_graph_mode: repr_str = ("<AutoCastVariable '{v.name}' shape={v.shape} " 'dtype={v.dtype.name} dtype_to_cast_to={v._cast_dtype.name}, ' 'numpy={np_repr}>') return repr_str.format( v=self, np_repr=numpy_text(self.read_value(), is_repr=True)) else: repr_str = ("<AutoCastVariable '{v.name}' shape={v.shape} " 'dtype={v.dtype.name} dtype_to_cast_to={v._cast_dtype.name}>') return repr_str.format(v=self) # Method delegations: We delegate the following methods to self._variable. # Each of these methods simply calls the same method on self._variable. The # base Variable raises NotImplementedError for most of these, so we must # override them. # # We do not define the following methods from Variable for the following # reasons: # * 'count_up_to': This method only applies to int variables, which cannot # be wrapped with an AutoCastVariable. # * 'ref': Instead we inherit the definition from Variable. # If we defined and delegated to Variable, the ref of an AutoCastVariable # would be the same as the ref of the underlying variable, which would be # strange as they are different Python objects. def set_shape(self, shape): return self._variable.set_shape(self, shape) @property def trainable(self): return self._variable.trainable @property def synchronization(self): return self._variable.synchronization @property def aggregation(self): return self._variable.aggregation def eval(self, session=None): return self._variable.eval(session) def initialized_value(self): return self._variable.initialized_value() @property def initial_value(self): return self._variable.initial_value @property def constraint(self): return self._variable.constraint def _apply_assign_update(self, update_fn, value, use_locking=None, name=None, read_value=True): if ops.executing_eagerly_outside_functions(): assign_op = update_fn(value, use_locking, name, False) if read_value: return create_autocast_variable(self._variable, op=assign_op) return assign_op # Fallback to wrapping the returned variable in graph mode if possible assign_var = update_fn(value, use_locking, name, read_value) if read_value and resource_variable_ops.is_resource_variable(assign_var): return create_autocast_variable(assign_var) return assign_var def _apply_update(self, update_fn, *args, **kwargs): update_var = update_fn(*args, **kwargs) if ops.executing_eagerly_outside_functions(): return self # Fallback to wrapping the returned variable in graph mode if possible if resource_variable_ops.is_resource_variable(update_var): return create_autocast_variable(update_var) return update_var def assign(self, value, use_locking=None, name=None, read_value=True): return self._apply_assign_update(self._variable.assign, value, use_locking, name, read_value) def assign_add(self, delta, use_locking=None, name=None, read_value=True): return self._apply_assign_update(self._variable.assign_add, delta, use_locking, name, read_value) def assign_sub(self, delta, use_locking=None, name=None, read_value=True): return self._apply_assign_update(self._variable.assign_sub, delta, use_locking, name, read_value) def scatter_sub(self, sparse_delta, use_locking=False, name=None): return self._apply_update(self._variable.scatter_sub, sparse_delta, use_locking, name) def scatter_add(self, sparse_delta, use_locking=False, name=None): return self._apply_update(self._variable.scatter_add, sparse_delta, use_locking, name) def scatter_max(self, sparse_delta, use_locking=False, name=None): return self._apply_update(self._variable.scatter_max, sparse_delta, use_locking, name) def scatter_min(self, sparse_delta, use_locking=False, name=None): return self._apply_update(self._variable.scatter_min, sparse_delta, use_locking, name) def scatter_mul(self, sparse_delta, use_locking=False, name=None): return self._apply_update(self._variable.scatter_mul, sparse_delta, use_locking, name) def scatter_div(self, sparse_delta, use_locking=False, name=None): return self._apply_update(self._variable.scatter_div, sparse_delta, use_locking, name) def scatter_update(self, sparse_delta, use_locking=False, name=None): return self._apply_update(self._variable.scatter_update, sparse_delta, use_locking, name) def batch_scatter_update(self, sparse_delta, use_locking=False, name=None): return self._apply_update(self._variable.batch_scatter_update, sparse_delta, use_locking, name) def scatter_nd_sub(self, indices, updates, name=None): return self._apply_update(self._variable.scatter_nd_sub, indices, updates, name) def scatter_nd_add(self, indices, updates, name=None): return self._apply_update(self._variable.scatter_nd_add, indices, updates, name) def scatter_nd_update(self, indices, updates, name=None): return self._apply_update(self._variable.scatter_nd_update, indices, updates, name) def load(self, value, session=None): return self._variable.load(value, session) @property def name(self): return self._variable.name @property def _shared_name(self): return self._variable._shared_name # pylint:disable=protected-access @property def initializer(self): return self._variable.initializer @property def device(self): return self._variable.device @property def op(self): if self._op is not None: return self._op return self._variable.op def _as_graph_element(self): graph_element = self._variable._as_graph_element() # pylint:disable=protected-access if graph_element is None: return self._op return graph_element @property def graph(self): return self._variable.graph @property def shape(self): return self._variable.shape def get_shape(self): return self._variable.get_shape() def _gather_saveables_for_checkpoint(self): # By delegating this method to the wrapped variable, checkpoints with # AutoCastVariables are identical to checkpoints with normal variables. # Therefore models checkpointed with AutoCastVariables can be restored on # models with normal variables, and vice versa. return self._variable._gather_saveables_for_checkpoint() # pylint:disable=protected-access def _map_resources(self, save_options): # By delegating this method to the wrapped variable, SavedModel with # AutoCastVariables are identical to SavedModel with normal variables. obj_map, resource_map = self._variable._map_resources(save_options) # pylint:disable=protected-access obj_map[self] = obj_map[self._variable] return obj_map, resource_map # TODO(reedwm): Maybe encode the fact the variable is an AutoCastVariable in # to_proto(). def to_proto(self, export_scope=None): return self._variable.to_proto(export_scope) def from_proto(self, variable_def, import_scope=None): return self._variable.from_proto(variable_def, import_scope) # Delegate the private attributes _handle_name and _initializer_op to # self._variable. SavedModel sets these attributes when loading a model. For # example, it sets _handle_name here: # https://github.com/tensorflow/tensorflow/blob/db26bd574fa95b5bdd53c08463dd19407cc0297e/tensorflow/python/keras/saving/saved_model/load.py#L211 # We need to expose these attributes on AutoCastVariable as well for # SavedModel to work properly. # TODO(reedwm/kathywu): Find a better way to support SavedModel. Exposing # private attributes is hacky and difficult to maintain. @property def _handle_name(self): return self._variable._handle_name # pylint: disable=protected-access @_handle_name.setter def _handle_name(self, handle_name): self._variable._handle_name = handle_name # pylint: disable=protected-access @property def _initializer_op(self): return self._variable._initializer_op # pylint: disable=protected-access @_initializer_op.setter def _initializer_op(self, initializer_op): self._variable._initializer_op = initializer_op # pylint: disable=protected-access # Operator overloads: # Note we only overload operators that support floating-point types, as # non-float variables cannot be wrapped with an AutoCastVariable. # Also note: We call read_value() instead of value(), because value() causes # gradients not to work properly when TPUStrategy is used: b/143380936 def __add__(self, o): return self.read_value() + o def __radd__(self, o): return o + self.read_value() def __sub__(self, o): return self.read_value() - o def __rsub__(self, o): return o - self.read_value() def __mul__(self, o): return self.read_value() * o def __rmul__(self, o): return o * self.read_value() def __truediv__(self, o): return self.read_value() / o def __rtruediv__(self, o): return o / self.read_value() def __floordiv__(self, o): return self.read_value() // o def __rfloordiv__(self, o): return o // self.read_value() def __mod__(self, o): return self.read_value() % o def __rmod__(self, o): return o % self.read_value() def __lt__(self, o): return self.read_value() < o def __le__(self, o): return self.read_value() <= o def __gt__(self, o): return self.read_value() > o def __ge__(self, o): return self.read_value() >= o def __getitem__(self, o): return self.read_value()[o] def __pow__(self, o, modulo=None): return pow(self.read_value(), o, modulo) def __rpow__(self, o): return pow(o, self.read_value()) def __neg__(self): return -self.read_value() def __abs__(self): return abs(self.read_value()) def __div__(self, o): try: return self.read_value().__div__(o) except AttributeError: # See https://docs.python.org/3/library/constants.html#NotImplemented return NotImplemented def __rdiv__(self, o): try: return self.read_value().__rdiv__(o) except AttributeError: # See https://docs.python.org/3/library/constants.html#NotImplemented return NotImplemented def __matmul__(self, o): try: return self.read_value().__matmul__(o) except AttributeError: # See https://docs.python.org/3/library/constants.html#NotImplemented return NotImplemented def __rmatmul__(self, o): try: return self.read_value().__rmatmul__(o) except AttributeError: # See https://docs.python.org/3/library/constants.html#NotImplemented return NotImplemented # pylint: enable=multiple-statements ops.register_tensor_conversion_function(AutoCastVariable, AutoCastVariable._dense_var_to_tensor) # pylint:disable=protected-access def create_autocast_variable(variable, op=None): """Creates an AutoCastVariable that wraps another variable. This typically just returns `AutoCastVariable(variable)`. But, if the variable is a DistributedVariable or one of its subclasses, we instead dynamically create a class that subclasses from both AutoCastVariable and variable.__class__. This is so the returned variable will still pass `isinstance(variable, variable.__class__)`, which is required for DistributedVariables and its subclasses to work properly. Args: variable: A floating-point resource variable to wrap. op: Optional operation of this variable. Returns: An AutoCastVariable that wraps the variable. """ if not isinstance(variable, (distribute_values.DistributedVariable, ps_distribute_values.AggregatingVariable)): return AutoCastVariable(variable, op=op) class AutoCastDistributedVariable(AutoCastVariable, variable.__class__): """An AutoCastVariable that also subclasses from variable.__class__. variable.__class__ is either a DistributedVariable or an AggregatingVariable. """ def __repr__(self): if issubclass(ps_distribute_values.AggregatingVariable, variable.__class__): # AggregatingVariable's __repr__ simply calls super.__repr__. So we do # the same here for consistency, which calls AutoCastVariable.__repr__. return super(AutoCastDistributedVariable, self).__repr__() # pylint: disable=missing-format-attribute return ('<AutoCastDistributedVariable dtype={v.dtype.name} ' 'dtype_to_cast_to={v._cast_dtype.name} ' 'inner_variable={v._variable}>' ).format(v=self) # pylint: enable=missing-format-attribute return AutoCastDistributedVariable(variable, op=op) class enable_auto_cast_variables(object): # pylint:disable=invalid-name """Context manager which enables the autocasting of `AutoCastVariable`s. Under this context manager, `AutoCastVariable`s will be cast to `dtype` if `dtype` is floating-point. Otherwise, `AutoCastVariable`s will not be cast. """ __slots__ = ['_dtype', '_prev_dtype'] def __init__(self, dtype): if dtype and not dtype.is_floating: dtype = None self._dtype = dtype def __enter__(self): self._prev_dtype = getattr(_autocast_dtype, 'dtype', None) _autocast_dtype.dtype = self._dtype def __exit__(self, type_arg, value_arg, traceback_arg): _autocast_dtype.dtype = self._prev_dtype
aam-at/tensorflow
tensorflow/python/keras/mixed_precision/autocast_variable.py
Python
apache-2.0
19,643
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. import inspect import logging import hashlib import re import unicodedata from werkzeug import urls from werkzeug.exceptions import NotFound from odoo import api, fields, models, tools from odoo.addons.http_routing.models.ir_http import slugify from odoo.addons.portal.controllers.portal import pager from odoo.tools import pycompat from odoo.http import request from odoo.tools.translate import _ logger = logging.getLogger(__name__) DEFAULT_CDN_FILTERS = [ "^/[^/]+/static/", "^/web/(css|js)/", "^/web/image", "^/web/content", # retrocompatibility "^/website/image/", ] class Website(models.Model): _name = "website" # Avoid website.website convention for conciseness (for new api). Got a special authorization from xmo and rco _description = "Website" def _active_languages(self): return self.env['res.lang'].search([]).ids def _default_language(self): lang_code = self.env['ir.default'].get('res.partner', 'lang') def_lang = self.env['res.lang'].search([('code', '=', lang_code)], limit=1) return def_lang.id if def_lang else self._active_languages()[0] name = fields.Char('Website Name') domain = fields.Char('Website Domain') company_id = fields.Many2one('res.company', string="Company", default=lambda self: self.env.ref('base.main_company').id) language_ids = fields.Many2many('res.lang', 'website_lang_rel', 'website_id', 'lang_id', 'Languages', default=_active_languages) default_lang_id = fields.Many2one('res.lang', string="Default Language", default=_default_language, required=True) default_lang_code = fields.Char(related='default_lang_id.code', string="Default language code", store=True) social_twitter = fields.Char(related="company_id.social_twitter") social_facebook = fields.Char(related="company_id.social_facebook") social_github = fields.Char(related="company_id.social_github") social_linkedin = fields.Char(related="company_id.social_linkedin") social_youtube = fields.Char(related="company_id.social_youtube") social_googleplus = fields.Char(related="company_id.social_googleplus") google_analytics_key = fields.Char('Google Analytics Key') google_management_client_id = fields.Char('Google Client ID') google_management_client_secret = fields.Char('Google Client Secret') user_id = fields.Many2one('res.users', string='Public User', default=lambda self: self.env.ref('base.public_user').id) cdn_activated = fields.Boolean('Activate CDN for assets') cdn_url = fields.Char('CDN Base URL', default='') cdn_filters = fields.Text('CDN Filters', default=lambda s: '\n'.join(DEFAULT_CDN_FILTERS), help="URL matching those filters will be rewritten using the CDN Base URL") partner_id = fields.Many2one(related='user_id.partner_id', relation='res.partner', string='Public Partner') menu_id = fields.Many2one('website.menu', compute='_compute_menu', string='Main Menu') homepage_id = fields.Many2one('website.page', string='Homepage') favicon = fields.Binary(string="Website Favicon", help="This field holds the image used to display a favicon on the website.") @api.multi def _compute_menu(self): Menu = self.env['website.menu'] for website in self: website.menu_id = Menu.search([('parent_id', '=', False), ('website_id', '=', website.id)], order='id', limit=1).id # cf. Wizard hack in website_views.xml def noop(self, *args, **kwargs): pass @api.multi def write(self, values): self._get_languages.clear_cache(self) return super(Website, self).write(values) #---------------------------------------------------------- # Page Management #---------------------------------------------------------- @api.model def new_page(self, name=False, add_menu=False, template='website.default_page', ispage=True, namespace=None): """ Create a new website page, and assign it a xmlid based on the given one :param name : the name of the page :param template : potential xml_id of the page to create :param namespace : module part of the xml_id if none, the template module name is used """ if namespace: template_module = namespace else: template_module, _ = template.split('.') # completely arbitrary max_length page_url = '/' + slugify(name, max_length=200, path=True) page_key = self.get_unique_path(slugify(name, 50)) if not name: name = 'Home' page_key = 'home' template_record = self.env.ref(template) website_id = self._context.get('website_id') key = '%s.%s' % (template_module, page_key) view = template_record.copy({'website_id': website_id, 'key': key}) view.with_context(lang=None).write({ 'arch': template_record.arch.replace(template, key), 'name': name, }) if ispage: page = self.env['website.page'].create({ 'name': name, 'url': page_url, 'website_ids': [(6, None, [self.get_current_website().id])], 'ir_ui_view_id': view.id }) if add_menu: self.env['website.menu'].create({ 'name': name, 'url': page_url, 'parent_id': self.get_current_website().menu_id.id, 'page_id': page.id, 'website_id': self.get_current_website().id, }) return page_url def get_unique_path(self, page_url): """ Given an url, return that url suffixed by counter if it already exists :param page_url : the url to be checked for uniqueness """ website_id = self.get_current_website().id inc = 0 domain_static = ['|', ('website_ids', '=', False), ('website_ids', 'in', website_id)] page_temp = page_url while self.env['website.page'].with_context(active_test=False).sudo().search([('url', '=', page_temp)] + domain_static): inc += 1 page_temp = page_url + (inc and "-%s" % inc or "") return page_temp def key_to_view_id(self, view_id): return self.env['ir.ui.view'].search([ ('id', '=', view_id), '|', ('website_id', '=', self._context.get('website_id')), ('website_id', '=', False), ('type', '=', 'qweb') ]) @api.model def page_search_dependencies(self, page_id=False): """ Search dependencies just for information. It will not catch 100% of dependencies and False positive is more than possible Each module could add dependences in this dict :returns a dictionnary where key is the 'categorie' of object related to the given view, and the value is the list of text and link to the resource using given page """ dependencies = {} if not page_id: return dependencies page = self.env['website.page'].browse(int(page_id)) website_id = self._context.get('website_id') url = page.url page_key = _('Page') # search for website_page with link website_page_search_dom = [ '|', ('website_ids', 'in', website_id), ('website_ids', '=', False), ('ir_ui_view_id.arch_db', 'ilike', url) ] pages = self.env['website.page'].search(website_page_search_dom) page_view_ids = [] for page in pages: dependencies.setdefault(page_key, []) dependencies[page_key].append({ 'text': _('Page <b>%s</b> contains a link to this page') % page.url, 'link': page.url }) page_view_ids.append(page.ir_ui_view_id.id) # search for ir_ui_view (not from a website_page) with link page_search_dom = [ '|', ('website_id', '=', website_id), ('website_id', '=', False), ('arch_db', 'ilike', url), ('id', 'not in', page_view_ids) ] views = self.env['ir.ui.view'].search(page_search_dom) for view in views: dependencies.setdefault(page_key, []) dependencies[page_key].append({ 'text': _('Template <b>%s (id:%s)</b> contains a link to this page') % (view.key or view.name, view.id), 'link': '#' }) # search for menu with link menu_search_dom = [ '|', ('website_id', '=', website_id), ('website_id', '=', False), ('url', 'ilike', '%s' % url) ] menu_key = _('Menu') menus = self.env['website.menu'].search(menu_search_dom) for menu in menus: dependencies.setdefault(menu_key, []).append({ 'text': _('This page is in the menu <b>%s</b>') % menu.name, 'link': False }) return dependencies @api.model def page_exists(self, name, module='website'): try: name = (name or "").replace("/website.", "").replace("/", "") if not name: return False return self.env.ref('%s.%s' % module, name) except Exception: return False #---------------------------------------------------------- # Languages #---------------------------------------------------------- @api.multi def get_languages(self): self.ensure_one() return self._get_languages() @tools.cache('self.id') def _get_languages(self): return [(lg.code, lg.name) for lg in self.language_ids] @api.multi def get_alternate_languages(self, req=None): langs = [] if req is None: req = request.httprequest default = self.get_current_website().default_lang_code shorts = [] def get_url_localized(router, lang): arguments = dict(request.endpoint_arguments) for key, val in list(arguments.items()): if isinstance(val, models.BaseModel): arguments[key] = val.with_context(lang=lang) return router.build(request.endpoint, arguments) router = request.httprequest.app.get_db_router(request.db).bind('') for code, dummy in self.get_languages(): lg_path = ('/' + code) if code != default else '' lg_codes = code.split('_') shorts.append(lg_codes[0]) uri = get_url_localized(router, code) if request.endpoint else request.httprequest.path if req.query_string: uri += u'?' + req.query_string.decode('utf-8') lang = { 'hreflang': ('-'.join(lg_codes)).lower(), 'short': lg_codes[0], 'href': req.url_root[0:-1] + lg_path + uri, } langs.append(lang) for lang in langs: if shorts.count(lang['short']) == 1: lang['hreflang'] = lang['short'] return langs #---------------------------------------------------------- # Utilities #---------------------------------------------------------- @api.model def get_current_website(self): domain_name = request and request.httprequest.environ.get('HTTP_HOST', '').split(':')[0] or None website_id = self._get_current_website_id(domain_name) if request: request.context = dict(request.context, website_id=website_id) return self.browse(website_id) @tools.cache('domain_name') def _get_current_website_id(self, domain_name): """ Reminder : cached method should be return record, since they will use a closed cursor. """ website = self.search([('domain', '=', domain_name)], limit=1) if not website: website = self.search([], limit=1) return website.id @api.model def is_publisher(self): return self.env['ir.model.access'].check('ir.ui.view', 'write', False) @api.model def is_user(self): return self.env['ir.model.access'].check('ir.ui.menu', 'read', False) @api.model def get_template(self, template): View = self.env['ir.ui.view'] if isinstance(template, pycompat.integer_types): view_id = template else: if '.' not in template: template = 'website.%s' % template view_id = View.get_view_id(template) if not view_id: raise NotFound return View.browse(view_id) @api.model def pager(self, url, total, page=1, step=30, scope=5, url_args=None): return pager(url, total, page=page, step=step, scope=scope, url_args=url_args) def rule_is_enumerable(self, rule): """ Checks that it is possible to generate sensible GET queries for a given rule (if the endpoint matches its own requirements) :type rule: werkzeug.routing.Rule :rtype: bool """ endpoint = rule.endpoint methods = endpoint.routing.get('methods') or ['GET'] converters = list(rule._converters.values()) if not ('GET' in methods and endpoint.routing['type'] == 'http' and endpoint.routing['auth'] in ('none', 'public') and endpoint.routing.get('website', False) and all(hasattr(converter, 'generate') for converter in converters) and endpoint.routing.get('website')): return False # dont't list routes without argument having no default value or converter spec = inspect.getargspec(endpoint.method.original_func) # remove self and arguments having a default value defaults_count = len(spec.defaults or []) args = spec.args[1:(-defaults_count or None)] # check that all args have a converter return all((arg in rule._converters) for arg in args) @api.multi def enumerate_pages(self, query_string=None, force=False): """ Available pages in the website/CMS. This is mostly used for links generation and can be overridden by modules setting up new HTML controllers for dynamic pages (e.g. blog). By default, returns template views marked as pages. :param str query_string: a (user-provided) string, fetches pages matching the string :returns: a list of mappings with two keys: ``name`` is the displayable name of the resource (page), ``url`` is the absolute URL of the same. :rtype: list({name: str, url: str}) """ router = request.httprequest.app.get_db_router(request.db) # Force enumeration to be performed as public user url_set = set() for rule in router.iter_rules(): if not self.rule_is_enumerable(rule): continue converters = rule._converters or {} if query_string and not converters and (query_string not in rule.build([{}], append_unknown=False)[1]): continue values = [{}] # converters with a domain are processed after the other ones convitems = sorted( converters.items(), key=lambda x: hasattr(x[1], 'domain') and (x[1].domain != '[]')) for (i, (name, converter)) in enumerate(convitems): newval = [] for val in values: query = i == len(convitems)-1 and query_string for value_dict in converter.generate(uid=self.env.uid, query=query, args=val): newval.append(val.copy()) value_dict[name] = value_dict['loc'] del value_dict['loc'] newval[-1].update(value_dict) values = newval for value in values: domain_part, url = rule.build(value, append_unknown=False) page = {'loc': url} for key, val in value.items(): if key.startswith('__'): page[key[2:]] = val if url in ('/sitemap.xml',): continue if url in url_set: continue url_set.add(url) yield page # '/' already has a http.route & is in the routing_map so it will already have an entry in the xml domain = [('url', '!=', '/')] if not force: domain += [('website_indexed', '=', True)] #is_visible domain += [('website_published', '=', True), '|', ('date_publish', '!=', False), ('date_publish', '>', fields.Datetime.now())] if query_string: domain += [('url', 'like', query_string)] pages = self.get_website_pages(domain) for page in pages: record = {'loc': page['url'], 'id': page['id'], 'name': page['name']} if page.ir_ui_view_id and page.ir_ui_view_id.priority != 16: record['__priority'] = min(round(page.ir_ui_view_id.priority / 32.0, 1), 1) if page['write_date']: record['__lastmod'] = page['write_date'][:10] yield record @api.multi def get_website_pages(self, domain=[], order='name', limit=None): domain += ['|', ('website_ids', 'in', self.get_current_website().id), ('website_ids', '=', False)] pages = request.env['website.page'].search(domain, order='name', limit=limit) return pages @api.multi def search_pages(self, needle=None, limit=None): name = slugify(needle, max_length=50, path=True) res = [] for page in self.enumerate_pages(query_string=name, force=True): res.append(page) if len(res) == limit: break return res @api.model def image_url(self, record, field, size=None): """ Returns a local url that points to the image field of a given browse record. """ sudo_record = record.sudo() sha = hashlib.sha1(getattr(sudo_record, '__last_update').encode('utf-8')).hexdigest()[0:7] size = '' if size is None else '/%s' % size return '/web/image/%s/%s/%s%s?unique=%s' % (record._name, record.id, field, size, sha) @api.model def get_cdn_url(self, uri): # Currently only usable in a website_enable request context if request and request.website and not request.debug and request.website.user_id.id == request.uid: cdn_url = request.website.cdn_url cdn_filters = (request.website.cdn_filters or '').splitlines() for flt in cdn_filters: if flt and re.match(flt, uri): return urls.url_join(cdn_url, uri) return uri @api.model def action_dashboard_redirect(self): if self.env.user.has_group('base.group_system') or self.env.user.has_group('website.group_website_designer'): return self.env.ref('website.backend_dashboard').read()[0] return self.env.ref('website.action_website').read()[0] class SeoMetadata(models.AbstractModel): _name = 'website.seo.metadata' _description = 'SEO metadata' website_meta_title = fields.Char("Website meta title", translate=True) website_meta_description = fields.Text("Website meta description", translate=True) website_meta_keywords = fields.Char("Website meta keywords", translate=True) class WebsitePublishedMixin(models.AbstractModel): _name = "website.published.mixin" website_published = fields.Boolean('Visible in Website', copy=False) website_url = fields.Char('Website URL', compute='_compute_website_url', help='The full URL to access the document through the website.') @api.multi def _compute_website_url(self): for record in self: record.website_url = '#' @api.multi def website_publish_button(self): self.ensure_one() if self.env.user.has_group('website.group_website_publisher') and self.website_url != '#': return self.open_website_url() return self.write({'website_published': not self.website_published}) def open_website_url(self): return { 'type': 'ir.actions.act_url', 'url': self.website_url, 'target': 'self', } class Page(models.Model): _name = 'website.page' _inherits = {'ir.ui.view': 'ir_ui_view_id'} _inherit = 'website.published.mixin' _description = 'Page' name = fields.Char('Page Name') url = fields.Char('Page URL') website_ids = fields.Many2many('website', string='Websites') ir_ui_view_id = fields.Many2one('ir.ui.view', string='View', required=True, ondelete="cascade") website_indexed = fields.Boolean('Page Indexed', default=True) date_publish = fields.Datetime('Publishing Date') # This is needed to be able to display if page is a menu in /website/page_management menu_ids = fields.One2many('website.menu', 'page_id', 'Related Menus') is_homepage = fields.Boolean(compute='_compute_homepage', string='Homepage') is_visible = fields.Boolean(compute='_compute_visible', string='Is Visible') @api.one def _compute_homepage(self): self.is_homepage = self == self.env['website'].get_current_website().homepage_id @api.one def _compute_visible(self): self.is_visible = self.website_published and (not self.date_publish or self.date_publish < fields.Datetime.now()) @api.model def get_page_info(self, id, website_id): domain = ['|', ('website_ids', 'in', website_id), ('website_ids', '=', False), ('id', '=', id)] item = self.search_read(domain, fields=['id', 'name', 'url', 'website_published', 'website_indexed', 'date_publish', 'menu_ids', 'is_homepage'], limit=1) return item @api.model def save_page_info(self, website_id, data): website = self.env['website'].browse(website_id) if data['is_homepage'] and website.homepage_id.id != int(data['id']): # If page is set as the new homepage, set it on website (only page can be set as homepage) website.write({'homepage_id': data['id']}) else: if not data['is_homepage'] and website.homepage_id.id == int(data['id']): # If the page is not a homepage, check if it was the homepage website.write({'homepage_id': None}) #If URL has been edited, slug it page = self.browse(int(data['id'])) original_url = page.url url = data['url'] if page.url != url: url = slugify(url, max_length=200, path=True) menu = self.env['website.menu'].search([('page_id', '=', int(data['id']))]) if not data['is_menu']: #If the page is no longer in menu, we should remove its website_menu if menu: menu.unlink() else: #The page is now a menu, check if has already one if menu: menu.write({'url': url}) else: self.env['website.menu'].create({ 'name': data['name'], 'url': url, 'page_id': data['id'], 'parent_id': website.menu_id.id, 'website_id': website.id, }) page.write({ 'name': data['name'], 'url': url, 'website_published': data['website_published'], 'website_indexed': data['website_indexed'], 'date_publish': data['date_publish'] or None }) # Create redirect if needed if data['create_redirect']: self.env['website.redirect'].create({ 'type': data['redirect_type'], 'url_from': original_url, 'url_to': url, 'website_id': website.id, }) return True @api.multi def copy(self, default=None): view = self.env['ir.ui.view'].browse(self.ir_ui_view_id.id) new_view = view.copy() default = { 'name': self.name + ' (copy)', 'url': self.env['website'].get_unique_path(self.url), 'ir_ui_view_id': new_view.id, } return super(Page, self).copy(default=default) @api.model def clone_page(self, page_id, clone_menu=True): """ Clone a page, given its identifier :param page_id : website.page identifier """ page = self.browse(int(page_id)) new_page = page.copy() if clone_menu: menu = self.env['website.menu'].search([('page_id', '=', page_id)], limit=1) if menu: # If the page being cloned has a menu, clone it too new_menu = menu.copy() new_menu.write({'url': new_page.url, 'name': menu.name + ' (copy)', 'page_id': new_page.id}) return new_page.url + '?enable_editor=1' @api.multi def unlink(self): """ When a website_page is deleted, the ORM does not delete its ir_ui_view. So we got to delete it ourself, but only if the ir_ui_view is not used by another website_page. """ # Handle it's ir_ui_view for page in self: # Other pages linked to the ir_ui_view of the page being deleted (will it even be possible?) pages_linked_to_iruiview = self.env['website.page'].search( [('ir_ui_view_id', '=', self.ir_ui_view_id.id), ('id', '!=', self.id)] ) if len(pages_linked_to_iruiview) == 0: # If there is no other pages linked to that ir_ui_view, we can delete the ir_ui_view self.env['ir.ui.view'].search([('id', '=', self.ir_ui_view_id.id)]).unlink() # And then delete the website_page itself return super(Page, self).unlink() @api.model def delete_page(self, page_id): """ Delete a page or a link, given its identifier :param object_id : object identifier eg: menu-5 """ # If we are deleting a page (that could possibly be a menu with a page) page = self.env['website.page'].browse(int(page_id)) if page: # Check if it is a menu with a page and also delete menu if so menu = self.env['website.menu'].search([('page_id', '=', page.id)], limit=1) if menu: menu.unlink() page.unlink() @api.multi def write(self, vals): self.ensure_one() if 'url' in vals and not vals['url'].startswith('/'): vals['url'] = '/' + vals['url'] result = super(Page, self).write(vals) return result class Menu(models.Model): _name = "website.menu" _description = "Website Menu" _parent_store = True _parent_order = 'sequence' _order = "sequence" def _default_sequence(self): menu = self.search([], limit=1, order="sequence DESC") return menu.sequence or 0 name = fields.Char('Menu', required=True, translate=True) url = fields.Char('Url', default='') page_id = fields.Many2one('website.page', 'Related Page') new_window = fields.Boolean('New Window') sequence = fields.Integer(default=_default_sequence) website_id = fields.Many2one('website', 'Website') # TODO: support multiwebsite once done for ir.ui.views parent_id = fields.Many2one('website.menu', 'Parent Menu', index=True, ondelete="cascade") child_id = fields.One2many('website.menu', 'parent_id', string='Child Menus') parent_left = fields.Integer('Parent Left', index=True) parent_right = fields.Integer('Parent Rigth', index=True) @api.model def clean_url(self): # clean the url with heuristic if self.page_id: url = self.page_id.url else: url = self.url if not self.url.startswith('/'): if '@' in self.url and not self.url.startswith('mailto'): url = 'mailto:%s' % self.url elif not self.url.startswith('http'): url = '/%s' % self.url return url # would be better to take a menu_id as argument @api.model def get_tree(self, website_id, menu_id=None): def make_tree(node): page_id = node.page_id.id if node.page_id else None is_homepage = page_id and self.env['website'].browse(website_id).homepage_id.id == page_id menu_node = dict( id=node.id, name=node.name, url=node.page_id.url if page_id else node.url, new_window=node.new_window, sequence=node.sequence, parent_id=node.parent_id.id, children=[], is_homepage=is_homepage, ) for child in node.child_id: menu_node['children'].append(make_tree(child)) return menu_node if menu_id: menu = self.browse(menu_id) else: menu = self.env['website'].browse(website_id).menu_id return make_tree(menu) @api.model def save(self, website_id, data): def replace_id(old_id, new_id): for menu in data['data']: if menu['id'] == old_id: menu['id'] = new_id if menu['parent_id'] == old_id: menu['parent_id'] = new_id to_delete = data['to_delete'] if to_delete: self.browse(to_delete).unlink() for menu in data['data']: mid = menu['id'] # new menu are prefixed by new- if isinstance(mid, pycompat.string_types): new_menu = self.create({'name': menu['name']}) replace_id(mid, new_menu.id) for menu in data['data']: # if the url match a website.page, set the m2o relation page = self.env['website.page'].search([('url', '=', menu['url'])], limit=1) if page: menu['page_id'] = page.id self.browse(menu['id']).write(menu) return True class WebsiteRedirect(models.Model): _name = "website.redirect" _description = "Website Redirect" _order = "sequence, id" _rec_name = 'url_from' type = fields.Selection([('301', 'Moved permanently'), ('302', 'Moved temporarily')], string='Redirection Type') url_from = fields.Char('Redirect From') url_to = fields.Char('Redirect To') website_id = fields.Many2one('website', 'Website') active = fields.Boolean(default=True) sequence = fields.Integer(default=0)
richard-willowit/odoo
addons/website/models/website.py
Python
gpl-3.0
30,818
# -*- coding: utf-8 -*- from __future__ import unicode_literals import unittest from nose.plugins.attrib import attr from nose.tools import * # PEP8 asserts from textblob.translate import Translator from textblob.compat import unicode @attr('requires_internet') class TestTranslator(unittest.TestCase): def setUp(self): self.translator = Translator() self.sentence = "This is a sentence." def test_translate(self): t = self.translator.translate(self.sentence, to_lang="es") assert_equal(t, "Esta es una frase.") def test_detect(self): lang = self.translator.detect(self.sentence) assert_equal(lang, "en") lang2 = self.translator.detect("Hola") assert_equal(lang2, "es") lang3 = self.translator.detect("Kumusta ka na?") assert_equal(lang3, "tl") lang4 = self.translator.detect("Programmiersprache") assert_equal(lang4, 'de') def test_detect_non_ascii(self): lang = self.translator.detect(unicode("关于中文维基百科")) assert_equal(lang, 'zh-CN') lang2 = self.translator.detect(unicode("известен още с псевдонимите")) assert_equal(lang2, "bg") lang3 = self.translator.detect(unicode("Избранная статья")) assert_equal(lang3, "ru") def test_get_language_from_json5(self): json5 = '[[["This is a sentence.","This is a sentence.","",""]],,"en",,,,,,[["en"]],0]' lang = self.translator._get_language_from_json5(json5) assert_equal(lang, "en") if __name__ == '__main__': unittest.main()
bbengfort/TextBlob
tests/test_translate.py
Python
mit
1,627
# -*- coding: utf-8 -*- ''' Copyright (c) 2015 Jacob Mendt Created on 05.08.15 @author: mendt ''' import uuid import os from georeference.settings import ELASTICSEARCH_INDEX from georeference.settings import GEOREFERENCE_PERSITENT_TARGETDIR from georeference.settings import OAI_ID_PATTERN from georeference.settings import TMP_DIR from georeference.utils.parser.georeferenceparser import parseGcps from georeference.utils.process.georeferencer import addOverviews from georeference.utils.process.georeferencer import createClipShapefile from georeference.utils.process.georeferencer import rectifyTps from georeference.utils.process.georeferencer import rectifyPolynom from georeference.utils.process.tools import convertPostgisStringToList from georeference.utils.process.tools import stripSRIDFromEPSG from georeference.persistent.elastic.datamodel import createSearchRecord from georeference.persistent.elastic.elasticsearch import pushRecordToEs from georeference.persistent.elastic.elasticsearch import deleteRecordFromEsById def processGeorefImage(mapObj, georefObj, dbsession, logger): """ Function process a persistent georeference image :type georeference.models.vkdb.map.Map: mapObj :type georeference.models.vkdb.georeferenzierungsprozess.Georeferenzierungsprozess: georefObj :type sqlalchemy.orm.session.Session: dbsession :type logging.Logger: logger :return: str """ gcps = parseGcps(georefObj.georefparams['gcps']) georefTargetSRS = stripSRIDFromEPSG(georefObj.georefparams['target']) targetPath = os.path.join(GEOREFERENCE_PERSITENT_TARGETDIR, os.path.join(str(mapObj.maptype).lower(), mapObj.apsdateiname+'.tif')) transformationAlgorithm = georefObj.georefparams['algorithm'] if 'algorithm' in georefObj.georefparams else 'affine' destPath = None # create clip shape if exists clipShpPath = None if georefObj.clip is not None: clipShpPath = os.path.join(TMP_DIR, '%s' % uuid.uuid4()) clipShpPath = createClipShapefile(convertPostgisStringToList(georefObj.clip), clipShpPath, georefObj.getSRIDClip(dbsession)) logger.debug('Process georeference result ...') if transformationAlgorithm == 'affine': destPath = rectifyPolynom(mapObj.originalimage, targetPath, [], gcps, georefTargetSRS, logger, TMP_DIR, clipShpPath, order=1) elif transformationAlgorithm == 'polynom': destPath = rectifyPolynom(mapObj.originalimage, targetPath, [], gcps, georefTargetSRS, logger, TMP_DIR, clipShpPath) elif transformationAlgorithm == 'tps': destPath = rectifyTps(mapObj.originalimage, targetPath, [], gcps, georefTargetSRS, logger, TMP_DIR, clipShpPath) logger.debug('Add overviews to the image ...') addOverviews(destPath, '2 4 8 16 32', logger) return destPath def pushRecordToSearchIndex(mapObj, dbsession, logger, georefObj=None): """ Push the metadata for a given mapObj to the search index (actual ElasticSearch). :type georeference.models.vkdb.map.Map: mapObj :type sqlalchemy.orm.session.Session: dbsession :type logging.Logger: logger :type georefObj: georeference.models.vkdb.georeferenzierungsprozess.Georeferenzierungsprozess|None :return: str RecordId of the ElasticSearch record """ datarecord = createSearchRecord(mapObj, dbsession, logger, georefObj) return pushRecordToEs(datarecord, ELASTICSEARCH_INDEX, logger) def removeRecordFromSearchIndex(mapObj): """ Removes the equivalent record from the search index. Instead georeference is set to false :deprecated: :type georeference.models.vkdb.map.Map: mapObj :return: """ key = OAI_ID_PATTERN%mapObj.id deleteRecordFromEsById(key, ELASTICSEARCH_INDEX)
slub/vk2-georeference
georeference/persistent/jobs/genericjobs.py
Python
gpl-3.0
3,721
#!/usr/bin/python # # Copyright 2011 Ytai Ben-Tsvi. All rights reserved. # # # Redistribution and use in source and binary forms, with or without modification, are # permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this list of # conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, this list # of conditions and the following disclaimer in the documentation and/or other materials # provided with the distribution. # # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED # WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND # FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ARSHAN POURSOHI OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON # ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # The views and conclusions contained in the software and documentation are those of the # authors and should not be interpreted as representing official policies, either expressed # or implied. # import sys import select import io import fcntl import os def main(pipe_name): pipe = io.open(pipe_name, 'r+b') fl = fcntl.fcntl(pipe.fileno(), fcntl.F_GETFL) fcntl.fcntl(pipe.fileno(), fcntl.F_SETFL, fl | os.O_NONBLOCK) while True: rlist, _, _ = select.select((pipe, sys.stdin), (), ()) if pipe in rlist: got = pipe.read(1) while got is not None: if not got: print "Bridget disconnected" return print "%.2x" % ord(got), sys.stdout.flush() got = pipe.read(1) if sys.stdin in rlist: line = sys.stdin.readline() if not line: print "Got EOF" return first = None for c in line: if c not in "0123456789abcdef": first = None continue if first is None: first = c continue i = int(first + c, 16) pipe.write(chr(i)) pipe.flush() first = None if __name__ == '__main__': main(sys.argv[1])
ytai/ioio
tools/hex_communicator.py
Python
apache-2.0
2,547
# -*- coding: utf-8 -*- # Copyright 2017 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Evidence processor to mount local images or disks.""" from __future__ import unicode_literals import logging import os import subprocess import tempfile import time from prometheus_client import Gauge from turbinia import config from turbinia import TurbiniaException log = logging.getLogger('turbinia') RETRY_MAX = 10 turbinia_failed_loop_device_detach = Gauge( 'turbinia_failed_loop_device_detach', 'Total number of loop devices failed to detach') def GetDiskSize(source_path): """Gets the size of disk evidence in bytes. Tries using blockdev to query the size of block devices, and falls back on filesize for image files. Args: source_path(str): the source path of the disk. Returns: int: the size of the disk in bytes. """ size = None if not os.path.exists(source_path): log.error( 'Cannot check disk size for non-existing source_path {0!s}'.format( source_path)) return None cmd = ['blockdev', '--getsize64', source_path] log.info('Running {0!s}'.format(cmd)) # Run blockdev first, this will fail if evidence is not a block device try: cmd_output = subprocess.check_output(cmd, stderr=subprocess.STDOUT).split() size = int(cmd_output[0].decode('utf-8')) except subprocess.CalledProcessError: log.debug('blockdev failed, attempting to get file size') except ValueError: log.debug( 'Unexpected output from blockdev: {0:s}'.format( cmd_output[0].decode('utf-8'))) if size is None: # evidence is not a block device, check image file size cmd = ['ls', '-s', source_path] try: cmd_output = subprocess.check_output(cmd).split() size = int(cmd_output[0].decode('utf-8')) except subprocess.CalledProcessError as e: log.warning('Checking disk size failed: {0!s}'.format(e)) return size def PreprocessBitLocker(source_path, partition_offset=None, credentials=None): """Uses libbde on a target block device or image file. Creates a decrypted virtual device of the encrypted volume. Args: source_path(str): the source path to run bdemount on. partition_offset(int): offset of volume in bytes. credentials(list[(str, str)]): decryption credentials set in evidence setup Raises: TurbiniaException: if source_path doesn't exist or if the bdemount command failed to create a virtual device. Returns: str: the path to the decrypted virtual block device """ config.LoadConfig() mount_prefix = config.MOUNT_DIR_PREFIX decrypted_device = None if not os.path.exists(source_path): raise TurbiniaException( ('Cannot create virtual device for non-existing source_path ' '{0!s}').format(source_path)) if os.path.exists(mount_prefix) and not os.path.isdir(mount_prefix): raise TurbiniaException( 'Mount dir {0:s} exists, but is not a directory'.format(mount_prefix)) if not os.path.exists(mount_prefix): log.info('Creating local mount parent directory {0:s}'.format(mount_prefix)) try: os.makedirs(mount_prefix) except OSError as e: raise TurbiniaException( 'Could not create mount directory {0:s}: {1!s}'.format( mount_prefix, e)) mount_path = tempfile.mkdtemp(prefix='turbinia', dir=mount_prefix) for credential_type, credential_data in credentials: libbde_command = ['sudo', 'bdemount', '-o', str(partition_offset)] if credential_type == 'password': libbde_command.extend(['-p', credential_data]) elif credential_type == 'recovery_password': libbde_command.extend(['-r', credential_data]) else: # Unsupported credential type, try the next log.warning('Unsupported credential type: {0!s}'.format(credential_type)) continue libbde_command.extend(['-X', 'allow_other', source_path, mount_path]) # Not logging command since it will contain credentials try: subprocess.check_call(libbde_command) except subprocess.CalledProcessError as e: # Decryption failed with these credentials, try the next continue # Decrypted volume was mounted decrypted_device = os.path.join(mount_path, 'bde1') if not os.path.exists(decrypted_device): raise TurbiniaException( 'Cannot attach decrypted device: {0!s}'.format(decrypted_device)) else: log.info('Decrypted device attached: {0!s}'.format(decrypted_device)) return decrypted_device def PreprocessLosetup( source_path, partition_offset=None, partition_size=None, lv_uuid=None): """Runs Losetup on a target block device or image file. Args: source_path(str): the source path to run losetup on. partition_offset(int): offset of volume in bytes. partition_size(int): size of volume in bytes. lv_uuid(str): LVM Logical Volume UUID. Raises: TurbiniaException: if source_path doesn't exist or if the losetup command failed to run in anyway. Returns: str: the path to the 'disk' block device """ losetup_device = None if lv_uuid: # LVM lvdisplay_command = [ 'sudo', 'lvdisplay', '--colon', '--select', 'lv_uuid={0:s}'.format(lv_uuid) ] log.info('Running: {0:s}'.format(' '.join(lvdisplay_command))) try: lvdetails = subprocess.check_output( lvdisplay_command, universal_newlines=True).split('\n')[-2].strip() except subprocess.CalledProcessError as e: raise TurbiniaException( 'Could not determine logical volume device {0!s}'.format(e)) lvdetails = lvdetails.split(':') volume_group = lvdetails[1] vgchange_command = ['sudo', 'vgchange', '-a', 'y', volume_group] log.info('Running: {0:s}'.format(' '.join(vgchange_command))) try: subprocess.check_call(vgchange_command) except subprocess.CalledProcessError as e: raise TurbiniaException('Could not activate volume group {0!s}'.format(e)) losetup_device = lvdetails[0] else: if not os.path.exists(source_path): raise TurbiniaException(( 'Cannot create loopback device for non-existing source_path ' '{0!s}').format(source_path)) # TODO(aarontp): Remove hard-coded sudo in commands: # https://github.com/google/turbinia/issues/73 losetup_command = ['sudo', 'losetup', '--show', '--find', '-r'] if partition_size: # Evidence is DiskPartition losetup_command.extend(['-o', str(partition_offset)]) losetup_command.extend(['--sizelimit', str(partition_size)]) losetup_command.append(source_path) log.info('Running command {0:s}'.format(' '.join(losetup_command))) try: losetup_device = subprocess.check_output( losetup_command, universal_newlines=True).strip() except subprocess.CalledProcessError as e: raise TurbiniaException('Could not set losetup devices {0!s}'.format(e)) return losetup_device def PreprocessMountDisk(partition_paths, partition_number): """Locally mounts disk in an instance. Args: partition_paths(list(str)): A list of paths to partition block devices; partition_number(int): the number of the partition to mount. Remember these are 1-indexed (first partition is 1). Raises: TurbiniaException: if the mount command failed to run. Returns: str: the path to the mounted filesystem. """ config.LoadConfig() mount_prefix = config.MOUNT_DIR_PREFIX if partition_number > len(partition_paths): raise TurbiniaException( 'Can not mount partition {0:d}: found only {1:d} partitions in ' 'Evidence.'.format(partition_number, len(partition_paths))) # Partitions are 1-indexed for the user and the system if partition_number < 1: raise TurbiniaException( 'Can not mount partition {0:d}: partition numbering starts at 1'.format( partition_number)) partition_path = partition_paths[partition_number - 1] if not os.path.exists(partition_path): raise TurbiniaException( 'Could not mount partition {0:s}, the path does not exist'.format( partition_path)) if os.path.exists(mount_prefix) and not os.path.isdir(mount_prefix): raise TurbiniaException( 'Mount dir {0:s} exists, but is not a directory'.format(mount_prefix)) if not os.path.exists(mount_prefix): log.info('Creating local mount parent directory {0:s}'.format(mount_prefix)) try: os.makedirs(mount_prefix) except OSError as e: raise TurbiniaException( 'Could not create mount directory {0:s}: {1!s}'.format( mount_prefix, e)) mount_path = tempfile.mkdtemp(prefix='turbinia', dir=mount_prefix) mount_cmd = ['sudo', 'mount', '-o', 'ro'] fstype = GetFilesystem(partition_path) if fstype in ['ext3', 'ext4']: # This is in case the underlying filesystem is dirty, as we want to mount # everything read-only. mount_cmd.extend(['-o', 'noload']) mount_cmd.extend([partition_path, mount_path]) log.info('Running: {0:s}'.format(' '.join(mount_cmd))) try: subprocess.check_call(mount_cmd) except subprocess.CalledProcessError as e: raise TurbiniaException('Could not mount directory {0!s}'.format(e)) return mount_path def PreprocessMountPartition(partition_path, filesystem_type): """Locally mounts disk partition in an instance. Args: partition_path(str): A path to a partition block device filesystem_type(str): Filesystem of the partition to be mounted Raises: TurbiniaException: if the mount command failed to run. Returns: str: the path to the mounted filesystem. """ config.LoadConfig() mount_prefix = config.MOUNT_DIR_PREFIX if not os.path.exists(partition_path): raise TurbiniaException( 'Could not mount partition {0:s}, the path does not exist'.format( partition_path)) if os.path.exists(mount_prefix) and not os.path.isdir(mount_prefix): raise TurbiniaException( 'Mount dir {0:s} exists, but is not a directory'.format(mount_prefix)) if not os.path.exists(mount_prefix): log.info('Creating local mount parent directory {0:s}'.format(mount_prefix)) try: os.makedirs(mount_prefix) except OSError as e: raise TurbiniaException( 'Could not create mount directory {0:s}: {1!s}'.format( mount_prefix, e)) mount_path = tempfile.mkdtemp(prefix='turbinia', dir=mount_prefix) mount_cmd = ['sudo', 'mount', '-o', 'ro'] if filesystem_type == 'EXT': # This is in case the underlying filesystem is dirty, as we want to mount # everything read-only. mount_cmd.extend(['-o', 'noload']) elif filesystem_type == 'XFS': mount_cmd.extend(['-o', 'norecovery']) mount_cmd.extend([partition_path, mount_path]) log.info('Running: {0:s}'.format(' '.join(mount_cmd))) try: subprocess.check_call(mount_cmd) except subprocess.CalledProcessError as e: raise TurbiniaException('Could not mount directory {0!s}'.format(e)) return mount_path def GetFilesystem(path): """Uses the sleuthkit to detect the filesystem of a partition block device. Args: path(str): the full path to the block device. Returns: str: the filesystem detected (for example: 'ext4') """ cmd = ['fsstat', '-t', path] log.info('Running {0!s}'.format(cmd)) for retry in range(RETRY_MAX): fstype = subprocess.check_output(cmd).split() if fstype: break else: log.debug( 'Filesystem type for {0:s} not found, retry {1:d} of {2:d}'.format( path, retry, RETRY_MAX)) time.sleep(1) if len(fstype) != 1: raise TurbiniaException( '{0:s} should contain exactly one partition, found {1:d}'.format( path, len(fstype))) fstype = fstype[0].decode('utf-8').strip() log.info('Found filesystem type {0:s} for path {1:s}'.format(fstype, path)) return fstype def PostprocessDeleteLosetup(device_path, lv_uuid=None): """Removes a loop device. Args: device_path(str): the path to the block device to remove (ie: /dev/loopX). lv_uuid(str): LVM Logical Volume UUID. Raises: TurbiniaException: if the losetup command failed to run. """ if lv_uuid: # LVM # Rather than detaching a loopback device, we need to deactivate the volume # group. lvdisplay_command = [ 'sudo', 'lvdisplay', '--colon', '--select', 'lv_uuid={0:s}'.format(lv_uuid) ] log.info('Running: {0:s}'.format(' '.join(lvdisplay_command))) try: lvdetails = subprocess.check_output( lvdisplay_command, universal_newlines=True).split('\n')[-2].strip() except subprocess.CalledProcessError as e: raise TurbiniaException( 'Could not determine volume group {0!s}'.format(e)) lvdetails = lvdetails.split(':') volume_group = lvdetails[1] vgchange_command = ['sudo', 'vgchange', '-a', 'n', volume_group] log.info('Running: {0:s}'.format(' '.join(vgchange_command))) try: subprocess.check_call(vgchange_command) except subprocess.CalledProcessError as e: raise TurbiniaException( 'Could not deactivate volume group {0!s}'.format(e)) else: # TODO(aarontp): Remove hard-coded sudo in commands: # https://github.com/google/turbinia/issues/73 losetup_cmd = ['sudo', 'losetup', '-d', device_path] log.info('Running: {0:s}'.format(' '.join(losetup_cmd))) try: subprocess.check_call(losetup_cmd) except subprocess.CalledProcessError as e: turbinia_failed_loop_device_detach.inc() raise TurbiniaException('Could not delete losetup device {0!s}'.format(e)) # Check that the device was actually removed losetup_cmd = ['sudo', 'losetup', '-a'] log.info('Running: {0:s}'.format(' '.join(losetup_cmd))) try: output = subprocess.check_output(losetup_cmd) except subprocess.CalledProcessError as e: raise TurbiniaException( 'Could not check losetup device status {0!s}'.format(e)) if output.find(device_path.encode('utf-8')) != -1: turbinia_failed_loop_device_detach.inc() raise TurbiniaException( 'Could not delete losetup device {0!s}'.format(device_path)) log.info('losetup device [{0!s}] deleted.'.format(device_path)) def PostprocessUnmountPath(mount_path): """Unmounts a local disk. Args: mount_path(str): The path to the mount point to unmount. Raises: TurbiniaException: if the umount command failed to run. """ # TODO(aarontp): Remove hard-coded sudo in commands: # https://github.com/google/turbinia/issues/73 umount_cmd = ['sudo', 'umount', mount_path] log.info('Running: {0:s}'.format(' '.join(umount_cmd))) try: subprocess.check_call(umount_cmd) except subprocess.CalledProcessError as e: raise TurbiniaException('Could not unmount directory {0!s}'.format(e)) log.info('Removing mount path {0:s}'.format(mount_path)) try: os.rmdir(mount_path) except OSError as e: raise TurbiniaException( 'Could not remove mount path directory {0:s}: {1!s}'.format( mount_path, e))
google/turbinia
turbinia/processors/mount_local.py
Python
apache-2.0
15,621
import re import os.path import datetime import base64 import aql # ============================================================================== info = aql.get_aql_info() HEADER = """#!/usr/bin/env python # # THIS FILE WAS AUTO-GENERATED. DO NOT EDIT! # # Copyright (c) 2011-{year} of the {name} project, site: {url} # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom # the Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, # DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE # OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """.format(year=datetime.date.today().year, name=info.name, url=info.url) # ============================================================================== AQL_DATE = '_AQL_VERSION_INFO.date = "{date}"'.format( date=datetime.date.today().isoformat()) # ============================================================================== MAIN = """ if __name__ == '__main__': aql_module_globals = globals().copy() aql_module_name = "aql" aql_module = imp.new_module(aql_module_name) aql_module_globals.update( aql_module.__dict__) aql_module.__dict__.update(aql_module_globals) sys.modules[aql_module_name] = aql_module {embedded_tools} sys.exit(main()) """ # ============================================================================== EMBEDDED_TOOLS = '\n _EMBEDDED_TOOLS.append(b"""\n%s""")\n' # ============================================================================== class AqlPreprocess (aql.FileBuilder): split = aql.FileBuilder.split_single # ---------------------------------------------------------- def get_trace_name(self, source_entities, brief): return "Preprocess file" # ---------------------------------------------------------- def get_trace_targets(self, target_entities, brief): return None # ----------------------------------------------------------- def build(self, source_entities, targets): src_file = source_entities[0].get() empty_re = re.compile(r'^\s*\r*\n', re.MULTILINE) slash_re = re.compile(r'\\\r*\n', re.MULTILINE) comments_re = re.compile(r"^\s*#.*$", re.MULTILINE) all_stmt_re = re.compile( r"^__all__\s*=\s*\(.+?\)", re.MULTILINE | re.DOTALL) content = aql.read_text_file(src_file) content = slash_re.sub("", content) content = comments_re.sub("", content) content = all_stmt_re.sub("", content) # ----------------------------------------------------------- import_re = re.compile(r"^import\s+(.+)$", re.MULTILINE) std_imports = set() def import_handler(match, _std_imports=std_imports): module_name = match.group(1) _std_imports.add(module_name) return "" content = import_re.sub(import_handler, content) # ----------------------------------------------------------- aql_import_re = re.compile(r"^\s*from\s+(\.?aql.+)\s+import\s+.+$", re.MULTILINE) aql_imports = set() def aql_import_handler(match, _aql_imports=aql_imports): module_name = match.group(1) if module_name.startswith('.'): module_name = os.sep + module_name[1:] + '.py' else: module_name = os.sep + \ module_name.replace('.', os.sep) + os.sep _aql_imports.add(module_name) return "" content = aql_import_re.sub(aql_import_handler, content) # ----------------------------------------------------------- content = empty_re.sub("", content) target = aql.SimpleEntity(name=src_file, data=(std_imports, aql_imports, content)) targets.add_target_entity(target) # ============================================================================== class AqlLinkCore (aql.FileBuilder): def __init__(self, options, target): self.target = self.get_target_path(target, ext='.py') def get_trace_name(self, source_entities, brief): return "Link AQL Module" # ---------------------------------------------------------- def get_target_entities(self, source_entities): return self.target # ---------------------------------------------------------- def get_trace_sources(self, source_entities, brief): return (os.path.basename(src.name) for src in source_entities) # ----------------------------------------------------------- def replace(self, options, source_entities): finder = aql.FindFilesBuilder(options, mask='*.py', exclude_mask="__init__.py") core_files = aql.Node(finder, source_entities) return aql.Node(AqlPreprocess(options), core_files) # ----------------------------------------------------------- @staticmethod def _mod_to_files(file2deps, modules): mod2files = {} for mod in modules: files = set() for file in file2deps: if file.find(mod) != -1: files.add(file) mod2files[mod] = files return mod2files # ----------------------------------------------------------- @staticmethod def _get_dep_to_files(file2deps, mod2files): dep2files = {} tmp_file2deps = {} for file, mods in file2deps.items(): for mod in mods: files = mod2files[mod] tmp_file2deps.setdefault(file, set()).update(files) for f in files: dep2files.setdefault(f, set()).add(file) return dep2files, tmp_file2deps # ----------------------------------------------------------- @staticmethod def _get_content(files_content, dep2files, file2deps, tails): content = "" while tails: tail = tails.pop(0) content += files_content[tail] files = dep2files.pop(tail, []) for file in files: deps = file2deps[file] deps.remove(tail) if not deps: tails.append(file) del file2deps[file] return content # ----------------------------------------------------------- def build(self, source_entities, targets): file2deps = {} files_content = {} modules = set() tails = [] std_modules = set() for entity in source_entities: file_name = entity.name mod_std_imports, mod_deps, mod_content = entity.data if not mod_content: continue if not mod_deps: tails.append(file_name) files_content[file_name] = mod_content file2deps[file_name] = mod_deps std_modules.update(mod_std_imports) modules.update(mod_deps) mod2files = self._mod_to_files(file2deps, modules) dep2files, file2deps = self._get_dep_to_files(file2deps, mod2files) content = self._get_content(files_content, dep2files, file2deps, tails) imports_content = '\n'.join( "import %s" % module for module in sorted(std_modules)) content = '\n'.join([HEADER, imports_content, content, AQL_DATE]) aql.write_text_file(self.target, data=content) targets.add_target_files(self.target) # ============================================================================== class AqlPackTools (aql.FileBuilder): NAME_ATTRS = ['target'] def __init__(self, options, target): self.target = target self.build_target = self.get_target_path(target, ext='.b64') # ---------------------------------------------------------- def get_trace_name(self, source_entities, brief): return "Pack Tools" # ---------------------------------------------------------- def get_target_entities(self, source_values): return self.build_target # ---------------------------------------------------------- def replace(self, options, source_entities): tools_path = [source.get() for source in source_entities] if not tools_path: return None finder = aql.FindFilesBuilder(options, '*.py') zipper = aql.ZipFilesBuilder(options, target=self.target, basedir=tools_path) tool_files = aql.Node(finder, source_entities) zip = aql.Node(zipper, tool_files) return zip # ----------------------------------------------------------- def build(self, source_entities, targets): target = self.build_target with aql.open_file(target, write=True, binary=True, truncate=True) as output: for source in source_entities: zip_file = source.get() with aql.open_file(zip_file, read=True, binary=True) as input: base64.encode(input, output) targets.add_target_files(target, tags="embedded_tools") # ============================================================================== class AqlLinkStandalone (aql.FileBuilder): def __init__(self, options, target): self.target = self.get_target_path(target) # ----------------------------------------------------------- def get_trace_name(self, source_entities, brief): return "Link AQL standalone script" # ---------------------------------------------------------- def get_target_entities(self, source_values): return self.target # ---------------------------------------------------------- def build(self, source_entities, targets): content = [] embedded_tools = "" for source in source_entities: data = aql.read_text_file(source.get()) if not data: continue if "embedded_tools" in source.tags: embedded_tools = EMBEDDED_TOOLS % data else: content.append(data) content.append(MAIN.format(embedded_tools=embedded_tools)) content = '\n'.join(content) aql.write_text_file(self.target, content) targets.add_target_files(self.target) # ============================================================================== class AqlBuildTool(aql.Tool): def pack_tools(self, options, target): return AqlPackTools(options, target) def link_module(self, options, target): return AqlLinkCore(options, target) def link_standalone(self, options, target): return AqlLinkStandalone(options, target) PackTools = pack_tools LinkModule = link_module LinkStandalone = link_standalone
aqualid/aqualid
make/aql_linker.py
Python
mit
11,736
# -- coding: utf-8 -- """ pysteps.verification.lifetime ============================= Estimation of precipitation lifetime from a decaying verification score function (e.g. autocorrelation function). .. autosummary:: :toctree: ../generated/ lifetime lifetime_init lifetime_accum lifetime_compute """ from math import exp import numpy as np from scipy.integrate import simps def lifetime(X_s, X_t, rule="1/e"): """ Compute the average lifetime by integrating the correlation function as a function of lead time. When not using the 1/e rule, the correlation function must be long enough to converge to 0, otherwise the lifetime is underestimated. The correlation function can be either empirical or theoretical, e.g. derived using the function 'ar_acf' in timeseries/autoregression.py. Parameters ---------- X_s: array-like Array with the correlation function. Works also with other decaying scores that are defined in the range [0,1]=[min_skill,max_skill]. X_t: array-like Array with the forecast lead times in the desired unit, e.g. [min, hour]. rule: str {'1/e', 'trapz', 'simpson'}, optional Name of the method to integrate the correlation curve. \n '1/e' uses the 1/e rule and assumes an exponential decay. It linearly interpolates the time when the correlation goes below the value 1/e. When all values are > 1/e it returns the max lead time. When all values are < 1/e it returns the min lead time. \n 'trapz' uses the trapezoidal rule for integration.\n 'simpson' uses the Simpson's rule for integration. Returns ------- lf: float Estimated lifetime with same units of X_t. """ X_s = X_s.copy() X_t = X_t.copy() life = lifetime_init(rule) lifetime_accum(life, X_s, X_t) return lifetime_compute(life) def lifetime_init(rule="1/e"): """ Initialize a lifetime object. Parameters ---------- rule: str {'1/e', 'trapz', 'simpson'}, optional Name of the method to integrate the correlation curve. \n '1/e' uses the 1/e rule and assumes an exponential decay. It linearly interpolates the time when the correlation goes below the value 1/e. When all values are > 1/e it returns the max lead time. When all values are < 1/e it returns the min lead time.\n 'trapz' uses the trapezoidal rule for integration.\n 'simpson' uses the Simpson's rule for integration. Returns ------- out: dict The lifetime object. """ list_rules = ["trapz", "simpson", "1/e"] if rule not in list_rules: raise ValueError( "Unknown rule %s for integration.\n" % rule + "The available methods are: " + str(list_rules) ) lifetime = {} lifetime["lifetime_sum"] = 0.0 lifetime["n"] = 0.0 lifetime["rule"] = rule return lifetime def lifetime_accum(lifetime, X_s, X_t): """ Compute the lifetime by integrating the correlation function and accumulate the result into the given lifetime object. Parameters ---------- X_s: array-like Array with the correlation function. Works also with other decaying scores that are defined in the range [0,1]=[min_skill,max_skill]. X_t: array-like Array with the forecast lead times in the desired unit, e.g. [min, hour]. """ if lifetime["rule"] == "trapz": lf = np.trapz(X_s, x=X_t) elif lifetime["rule"] == "simpson": lf = simps(X_s, x=X_t) elif lifetime["rule"] == "1/e": euler_number = 1.0 / exp(1.0) X_s_ = np.array(X_s) is_euler_reached = np.sum(X_s_ <= euler_number) > 0 if is_euler_reached: idx_b = np.argmax(X_s_ <= euler_number) if idx_b > 0: idx_a = idx_b - 1 fraction_score = ( (euler_number - X_s[idx_b]) * (X_t[idx_a] - X_t[idx_b]) / (X_s[idx_a] - X_s[idx_b]) ) lf = X_t[idx_b] + fraction_score else: # if all values are below the 1/e value, return min lead time lf = np.min(X_t) else: # if all values are above the 1/e value, return max lead time lf = np.max(X_t) lifetime["lifetime_sum"] += lf lifetime["n"] += 1 def lifetime_compute(lifetime): """ Compute the average value from the lifetime object. Parameters ---------- lifetime: dict A lifetime object created with lifetime_init. Returns ------- out: float The computed lifetime. """ return 1.0 * lifetime["lifetime_sum"] / lifetime["n"]
pySTEPS/pysteps
pysteps/verification/lifetime.py
Python
bsd-3-clause
4,985
import ir_report import py3o_report import py3o_template import py3o_server import py3o_fusion_filetype
xcgd/report_py3o
__init__.py
Python
agpl-3.0
104
#!/usr/bin/python3 from ABE_ExpanderPi import IO import time """ # ================================================ # ABElectronics Expander Pi | - IO Interrupts Demo # Version 1.0 Created 21/08/2014 # Version 1.1 Updated 11/06/2017 updated to include changes to Expander Pi library # # Requires python smbus to be installed with: sudo apt-get install python3-smbus # run with: sudo python3 demo-iointerrupts.py # ================================================ # This example shows how to use the interrupt methods on the Expander Pi IO port. # The interrupts will be enabled and set so that a voltage applied to pins 1 and 16 will trigger INT A and B respectively. # using the read_interrupt_capture or read_port methods will reset the # interrupts. # Initialise the IOPi and create an instance called io. """ io = IO() # Set all pins on the IO bus to be inputs with internal pull-ups disabled. io.set_port_pullups(0, 0x00) io.set_port_pullups(1, 0x00) io.set_port_direction(0, 0xFF) io.set_port_direction(1, 0xFF) # Set the interrupt polarity to be active high and mirroring disabled, so # pins 1 to 8 trigger INT A and pins 9 to 16 trigger INT B io.set_interrupt_polarity(1) io.mirror_interrupts(0) # Set the interrupts default value to trigger when 5V is applied to pins 1 # and 16 io.set_interrupt_defaults(0, 0x01) io.set_interrupt_defaults(0, 0x80) # Set the interrupt type to be 1 for ports A and B so an interrupt is # fired when the pin matches the default value io.set_interrupt_type(0, 1) io.set_interrupt_type(1, 1) # Enable interrupts for pins 1 and 16 io.set_interrupt_on_pin(1, 1) io.set_interrupt_on_pin(16, 1) while True: # read the port value from the last capture for ports 0 and 1. This will # reset the interrupts print (io.read_interrupt_capture(0)) print (io.read_interrupt_capture(1)) time.sleep(2)
abelectronicsuk/ABElectronics_Python3_Libraries
ExpanderPi/demo-iointerrupts.py
Python
mit
1,862
# -*- coding: utf-8 -*- from django.db import models from django_orm.postgresql.fields.arrays import ArrayField from django_orm.postgresql.fields.interval import IntervalField from django_orm.postgresql.fields.bytea import ByteaField from django_orm.manager import Manager class IntModel(models.Model): lista = ArrayField(dbtype='int') objects = Manager() class TextModel(models.Model): lista = ArrayField(dbtype='text') objects = Manager() class DoubleModel(models.Model): lista = ArrayField(dbtype='double precision') objects = Manager() class VarcharModel(models.Model): lista = ArrayField(dbtype='varchar(40)') objects = Manager() class IntervalModel(models.Model): iv = IntervalField() objects = Manager() class ByteaModel(models.Model): bb = ByteaField() objects = Manager() from django_orm.postgresql.geometric.fields import PointField, CircleField from django_orm.postgresql.geometric.fields import LsegField, BoxField from django_orm.postgresql.geometric.fields import PathField, PolygonField class GeomModel(models.Model): pt = PointField() pl = PolygonField() ln = LsegField() bx = BoxField() cr = CircleField() ph = PathField() objects = Manager() from .composite_types import Person, Account from django_orm.postgresql.composite import CompositeModelField class Foo2Model(models.Model): account = CompositeModelField(type=Account(), null=True) objects = Manager() class FooModel(models.Model): person = CompositeModelField(type=Person(), null=True) objects = Manager() class FooBigModel(models.Model): foo = models.ForeignKey(FooModel, related_name='big') objects = Manager()
cr8ivecodesmith/django-orm-extensions-save22
tests/old_test_apps/old_pg_complex/models.py
Python
bsd-3-clause
1,710
# -*- coding: utf-8 -*- from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('projects', '0010_migrate_domain_data'), ] operations = [ migrations.RemoveField( model_name='domain', name='url', ), ]
rtfd/readthedocs.org
readthedocs/projects/migrations/0011_delete-url.py
Python
mit
302
################################################################# # MET v2 Metadate Explorer Tool # # This Software is Open Source. See License: https://github.com/TERENA/met/blob/master/LICENSE.md # Copyright (c) 2012, TERENA All rights reserved. # # This Software is based on MET v1 developed for TERENA by Yaco Sistemas, http://www.yaco.es/ # MET v2 was developed for TERENA by Tamim Ziai, DAASI International GmbH, http://www.daasi.de # Current version of MET has been revised for performance improvements by Andrea Biancini, # Consortium GARR, http://www.garr.it ########################################################################## import csv from xml.dom.minidom import Document from django.http import HttpResponse, HttpResponseBadRequest from django.template.defaultfilters import slugify import simplejson as json # Taken from http://djangosnippets.org/snippets/790/ def export_csv(model, filename, fields): response = HttpResponse(content_type='text/csv') response['Content-Disposition'] = ('attachment; filename=%s.csv' % slugify(filename)) writer = csv.writer(response) # Write headers to CSV file writer.writerow(fields) # Write data to CSV file for obj in model: row = [] for field in fields: row.append("%s" % obj[field]) writer.writerow(row) # Return CSV file to browser as download return response def export_json(model, filename, fields): objs = [] for obj in model: item = {} for field in fields: if type(obj[field]) == set: item[field] = list(obj[field]) else: item[field] = obj[field] objs.append(item) # Return JS file to browser as download serialized = json.dumps(objs) response = HttpResponse(serialized, content_type='application/json') response['Content-Disposition'] = ('attachment; filename=%s.json' % slugify(filename)) return response def _parse_xml_element(xml, father, structure): if type(structure) == dict: for k in structure: tag = xml.createElement(k) father.appendChild(tag) _parse_xml_element(xml, tag, structure[k]) elif type(structure) == tuple: tag_name = father.tagName for l in list(structure): tag = xml.createElement(tag_name) _parse_xml_element(xml, tag, l) father.appendChild(tag) elif type(structure) == list: tag_name = father.tagName for l in structure: tag = xml.createElement(tag_name) _parse_xml_element(xml, tag, l) father.appendChild(tag) elif type(structure) == set: tag_name = father.tagName for l in list(structure): tag = xml.createElement(tag_name) _parse_xml_element(xml, tag, l) father.appendChild(tag) else: if type(structure) == unicode: data = structure.encode("ascii", errors="xmlcharrefreplace") else: data = str(structure) tag = xml.createTextNode(data) father.appendChild(tag) def export_xml(model, filename, fields=None): xml = Document() root = xml.createElement(filename) for obj in model: elem = xml.createElement("entity") _parse_xml_element(xml, elem, obj) root.appendChild(elem) xml.appendChild(root) # Return xml file to browser as download response = HttpResponse(xml.toxml(), content_type='application/xml') response['Content-Disposition'] = ('attachment; filename=%s.xml' % slugify(filename)) return response export_modes = { 'csv': export_csv, 'json': export_json, 'xml': export_xml, } def export_query_set(mode, qs, filename, fields=None): if mode in export_modes: return export_modes[mode](qs, filename, fields) else: content = "Error 400, Format %s is not supported" % mode return HttpResponseBadRequest(content)
TERENA/met
met/metadataparser/query_export.py
Python
bsd-2-clause
4,096
import pygame import screens.sound as sound class Mine: def __init__(self, session, tile): self.tile = tile self.x = tile.x self.y = tile.y self.session = session self.image = pygame.image.load('resources/templates/Sea_mine.png') self.rect = pygame.rect.Rect(self.x * self.tile.width, self.y * self.tile.height, self.tile.width, self.tile.height) self.tile.set_mine(self) # Computes and returns a collection of coordinates that surround this mine. def get_surrounding_pos(self, delta): positions = [] for y in range(self.y - delta, self.y + delta): for x in range(self.x - delta, self.x + delta): positions.append((x, y)) return positions # Clears this mine from the field. def clear(self): self.tile.set_mine(None) # Explodes this mine, damaging all of the specified ships that are in the vicinity. def explode(self, ships): for ship in ships: ship.health -= 2 if ship.health <= 0: sound.Plopperdeplop.tune(self, 'explosion_ship') ship.wreck() ship.health = 0 sound.Plopperdeplop.tune(self, 'explosion_ship') self.clear() # Updates the state of this mine per frame def update(self): pass # Draws this mine on the given surface def draw(self, surface): surface.blit(self.image, self.rect)
sinoz/boat-wars
src/play/mine.py
Python
apache-2.0
1,469
import tests.model_control.test_ozone_custom_models_enabled as testmod testmod.build_model( ['RelativeDifference'] , ['MovingMedian'] , ['BestCycle'] , ['AR'] );
antoinecarme/pyaf
tests/model_control/detailed/transf_RelativeDifference/model_control_one_enabled_RelativeDifference_MovingMedian_BestCycle_AR.py
Python
bsd-3-clause
163
#!/usr/bin/python # Nathan Harmon # https://github.com/nharmon/bogie-five # # Turn program # from Motion import * import sys if __name__ == '__main__': if len(sys.argv) < 2: exit("Must specify target image file") try: drive = Drive() drive.turn(float(sys.argv[1])) except: exit("Specify direction change")
nharmon/bogie-five
src/turn.py
Python
gpl-3.0
356
import os import tempfile from findex_gui.main import python_env class CronController: @staticmethod def has_cronjob(): crontab = os.popen("crontab -l").read() cronjob = CronController.generate_cronjob() if cronjob in crontab: return True return @staticmethod def insert_cronjob(job, default_time="*/1 * * * *"): """ Insert a command into cronjobs :param job: the command :param default_time: every 1 min is default :return: """ tmp_file = tempfile.mkstemp("_fincron")[1] crontab = os.popen("crontab -l").read() if "no crontab for" not in crontab.lower(): crontab += "\n%s %s\n" % (default_time, job) f = open(tmp_file, "w") f.write(crontab) f.close() os.popen("crontab %s" % tmp_file).read() os.remove(tmp_file) @staticmethod def remove_cronjob(): crontab = os.popen("crontab -l").read() cronjob = CronController.generate_cronjob() new_lines = [] for line in crontab.split("\n"): if cronjob not in line and line != cronjob: new_lines.append(line) new_crontab = "\n".join(new_lines) if not new_crontab.endswith("\n"): new_crontab += "\n" tmp_file = tempfile.mkstemp("_fincron")[1] f = open(tmp_file, "w") f.write(new_crontab) f.close() os.popen("crontab %s" % tmp_file).read() os.remove(tmp_file) @staticmethod def generate_cronjob(): return " ".join([ "cd %s &&" % python_env["project_root"], python_env["interpreter"], "%s/findex" % os.path.dirname(python_env["interpreter"]), "scheduler" ])
skftn/findex-gui
findex_gui/controllers/admin/scheduler/cron.py
Python
mit
1,807
# -*- coding: utf-8 -*- """ This Python module is part of the mataraclari package, which provides some mathematical shortcuts for Python. Copyright (C) 2011 Yaşar Arabacı This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ from itertools import combinations as _combinations from _mathGerecleri import primes as _primes def TekilAsalCarpanlar(sayi): return sorted(set(asalCarpanlar(sayi))) def asalCarpanlar(sayi): muhtemel_asal_carpanlar = _primes(int(sayi ** 0.5) + 1) # Bir tane daha asal çarpan olabilir. current = sayi asal_carpanlar = [] for i in muhtemel_asal_carpanlar: if current == 1: break while True: if current % i == 0: asal_carpanlar.append(i) current = current / i else: break if current != 1: # Son bir asal atlandı... asal_carpanlar.append(current) return asal_carpanlar def carpanlar(sayi): asal_carpanlar = asalCarpanlar(sayi) if len(asal_carpanlar) == 1: return [1] + [sayi] sonuc = [1] for i in range(1,len(asal_carpanlar)): for carpilcaklar in _combinations(asal_carpanlar, i): carpim = carpilcaklar[0] for j in range(1,i): carpim *= carpilcaklar[j] sonuc.append(carpim) sonuc.append(sayi) return sorted(tuple(set(sonuc)))
yasar11732/PyMathGerec
mataraclari/_carpanlar.py
Python
gpl-3.0
2,114
# This file is part of beets. # Copyright 2013, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. """Conversion from legacy (pre-1.1) configuration to Confit/YAML configuration. """ import os import ConfigParser import codecs import yaml import logging import time import itertools import re import beets from beets import util from beets import ui from beets.util import confit CONFIG_PATH_VAR = 'BEETSCONFIG' DEFAULT_CONFIG_FILENAME_UNIX = '.beetsconfig' DEFAULT_CONFIG_FILENAME_WINDOWS = 'beetsconfig.ini' DEFAULT_LIBRARY_FILENAME_UNIX = '.beetsmusic.blb' DEFAULT_LIBRARY_FILENAME_WINDOWS = 'beetsmusic.blb' WINDOWS_BASEDIR = os.environ.get('APPDATA') or '~' OLD_CONFIG_SUFFIX = '.old' PLUGIN_NAMES = { 'rdm': 'random', 'fuzzy_search': 'fuzzy', } AUTO_KEYS = ('automatic', 'autofetch', 'autoembed', 'autoscrub') IMPORTFEEDS_PREFIX = 'feeds_' CONFIG_MIGRATED_MESSAGE = u""" You appear to be upgrading from beets 1.0 (or earlier) to 1.1. Your configuration file has been migrated automatically to: {newconfig} Edit this file to configure beets. You might want to remove your old-style ".beetsconfig" file now. See the documentation for more details on the new configuration system: http://beets.readthedocs.org/page/reference/config.html """.strip() DB_MIGRATED_MESSAGE = u'Your database file has also been copied to:\n{newdb}' YAML_COMMENT = '# Automatically migrated from legacy .beetsconfig.\n\n' log = logging.getLogger('beets') # An itertools recipe. def grouper(n, iterable): args = [iter(iterable)] * n return itertools.izip_longest(*args) def _displace(fn): """Move a file aside using a timestamp suffix so a new file can be put in its place. """ util.move( fn, u'{0}.old.{1}'.format(fn, int(time.time())), True ) def default_paths(): """Produces the appropriate default config and library database paths for the current system. On Unix, this is always in ~. On Windows, tries ~ first and then $APPDATA for the config and library files (for backwards compatibility). """ windows = os.path.__name__ == 'ntpath' if windows: windata = os.environ.get('APPDATA') or '~' # Shorthand for joining paths. def exp(*vals): return os.path.expanduser(os.path.join(*vals)) config = exp('~', DEFAULT_CONFIG_FILENAME_UNIX) if windows and not os.path.exists(config): config = exp(windata, DEFAULT_CONFIG_FILENAME_WINDOWS) libpath = exp('~', DEFAULT_LIBRARY_FILENAME_UNIX) if windows and not os.path.exists(libpath): libpath = exp(windata, DEFAULT_LIBRARY_FILENAME_WINDOWS) return config, libpath def get_config(): """Using the same logic as beets 1.0, locate and read the .beetsconfig file. Return a ConfigParser instance or None if no config is found. """ default_config, default_libpath = default_paths() if CONFIG_PATH_VAR in os.environ: configpath = os.path.expanduser(os.environ[CONFIG_PATH_VAR]) else: configpath = default_config config = ConfigParser.SafeConfigParser() if os.path.exists(util.syspath(configpath)): with codecs.open(configpath, 'r', encoding='utf-8') as f: config.readfp(f) return config, configpath else: return None, configpath def flatten_config(config): """Given a ConfigParser, flatten the values into a dict-of-dicts representation where each section gets its own dictionary of values. """ out = confit.OrderedDict() for section in config.sections(): sec_dict = out[section] = confit.OrderedDict() for option in config.options(section): sec_dict[option] = config.get(section, option, True) return out def transform_value(value): """Given a string read as the value of a config option, return a massaged version of that value (possibly with a different type). """ # Booleans. if value.lower() in ('false', 'no', 'off'): return False elif value.lower() in ('true', 'yes', 'on'): return True # Integers. try: return int(value) except ValueError: pass # Floats. try: return float(value) except ValueError: pass return value def transform_data(data): """Given a dict-of-dicts representation of legacy config data, tweak the data into a new form. This new form is suitable for dumping as YAML. """ out = confit.OrderedDict() for section, pairs in data.items(): if section == 'beets': # The "main" section. In the new config system, these values # are in the "root": no section at all. for key, value in pairs.items(): value = transform_value(value) if key.startswith('import_'): # Importer config is now under an "import:" key. if 'import' not in out: out['import'] = confit.OrderedDict() out['import'][key[7:]] = value elif key == 'plugins': # Renamed plugins. plugins = value.split() new_plugins = [PLUGIN_NAMES.get(p, p) for p in plugins] out['plugins'] = ' '.join(new_plugins) elif key == 'replace': # YAMLy representation for character replacements. replacements = confit.OrderedDict() for pat, repl in grouper(2, value.split()): if repl == '<strip>': repl = '' replacements[pat] = repl out['replace'] = replacements elif key == 'pluginpath': # Used to be a colon-separated string. Now a list. out['pluginpath'] = value.split(':') else: out[key] = value elif pairs: # Other sections (plugins, etc). sec_out = out[section] = confit.OrderedDict() for key, value in pairs.items(): # Standardized "auto" option. if key in AUTO_KEYS: key = 'auto' # Unnecessary : hack in queries. if section == 'paths': key = key.replace('_', ':') # Changed option names for importfeeds plugin. if section == 'importfeeds': if key.startswith(IMPORTFEEDS_PREFIX): key = key[len(IMPORTFEEDS_PREFIX):] sec_out[key] = transform_value(value) return out class Dumper(yaml.SafeDumper): """A PyYAML Dumper that represents OrderedDicts as ordinary mappings (in order, of course). """ # From http://pyyaml.org/attachment/ticket/161/use_ordered_dict.py def represent_mapping(self, tag, mapping, flow_style=None): value = [] node = yaml.MappingNode(tag, value, flow_style=flow_style) if self.alias_key is not None: self.represented_objects[self.alias_key] = node best_style = True if hasattr(mapping, 'items'): mapping = list(mapping.items()) for item_key, item_value in mapping: node_key = self.represent_data(item_key) node_value = self.represent_data(item_value) if not (isinstance(node_key, yaml.ScalarNode) and \ not node_key.style): best_style = False if not (isinstance(node_value, yaml.ScalarNode) and \ not node_value.style): best_style = False value.append((node_key, node_value)) if flow_style is None: if self.default_flow_style is not None: node.flow_style = self.default_flow_style else: node.flow_style = best_style return node Dumper.add_representer(confit.OrderedDict, Dumper.represent_dict) def migrate_config(replace=False): """Migrate a legacy beetsconfig file to a new-style config.yaml file in an appropriate place. If `replace` is enabled, then any existing config.yaml will be moved aside. Otherwise, the process is aborted when the file exists. """ # Load legacy configuration data, if any. config, configpath = get_config() if not config: log.debug(u'no config file found at {0}'.format( util.displayable_path(configpath) )) return # Get the new configuration file path and possibly move it out of # the way. destfn = os.path.join(beets.config.config_dir(), confit.CONFIG_FILENAME) if os.path.exists(destfn): if replace: log.debug(u'moving old config aside: {0}'.format( util.displayable_path(destfn) )) _displace(destfn) else: # File exists and we won't replace it. We're done. return log.debug(u'migrating config file {0}'.format( util.displayable_path(configpath) )) # Convert the configuration to a data structure ready to be dumped # as the new Confit file. data = transform_data(flatten_config(config)) # Encode result as YAML. yaml_out = yaml.dump( data, Dumper=Dumper, default_flow_style=False, indent=4, width=1000, ) # A ridiculous little hack to add some whitespace between "sections" # in the YAML output. I hope this doesn't break any YAML syntax. yaml_out = re.sub(r'(\n\w+:\n [^-\s])', '\n\\1', yaml_out) yaml_out = YAML_COMMENT + yaml_out # Write the data to the new config destination. log.debug(u'writing migrated config to {0}'.format( util.displayable_path(destfn) )) with open(destfn, 'w') as f: f.write(yaml_out) return destfn def migrate_db(replace=False): """Copy the beets library database file to the new location (e.g., from ~/.beetsmusic.blb to ~/.config/beets/library.db). """ _, srcfn = default_paths() destfn = beets.config['library'].as_filename() if not os.path.exists(srcfn) or srcfn == destfn: # Old DB does not exist or we're configured to point to the same # database. Do nothing. return if os.path.exists(destfn): if replace: log.debug(u'moving old database aside: {0}'.format( util.displayable_path(destfn) )) _displace(destfn) else: return log.debug(u'copying database from {0} to {1}'.format( util.displayable_path(srcfn), util.displayable_path(destfn) )) util.copy(srcfn, destfn) return destfn def migrate_state(replace=False): """Copy the beets runtime state file from the old path (i.e., ~/.beetsstate) to the new path (i.e., ~/.config/beets/state.pickle). """ srcfn = os.path.expanduser(os.path.join('~', '.beetsstate')) if not os.path.exists(srcfn): return destfn = beets.config['statefile'].as_filename() if os.path.exists(destfn): if replace: _displace(destfn) else: return log.debug(u'copying state file from {0} to {1}'.format( util.displayable_path(srcfn), util.displayable_path(destfn) )) util.copy(srcfn, destfn) return destfn # Automatic migration when beets starts. def automigrate(): """Migrate the configuration, database, and state files. If any migration occurs, print out a notice with some helpful next steps. """ config_fn = migrate_config() db_fn = migrate_db() migrate_state() if config_fn: ui.print_(ui.colorize('fuchsia', u'MIGRATED CONFIGURATION')) ui.print_(CONFIG_MIGRATED_MESSAGE.format( newconfig=util.displayable_path(config_fn)) ) if db_fn: ui.print_(DB_MIGRATED_MESSAGE.format( newdb=util.displayable_path(db_fn) )) ui.input_(ui.colorize('fuchsia', u'Press ENTER to continue:')) ui.print_() # CLI command for explicit migration. migrate_cmd = ui.Subcommand('migrate', help='convert legacy config') def migrate_func(lib, opts, args): """Explicit command for migrating files. Existing files in each destination are moved aside. """ config_fn = migrate_config(replace=True) if config_fn: log.info(u'Migrated configuration to: {0}'.format( util.displayable_path(config_fn) )) db_fn = migrate_db(replace=True) if db_fn: log.info(u'Migrated library database to: {0}'.format( util.displayable_path(db_fn) )) state_fn = migrate_state(replace=True) if state_fn: log.info(u'Migrated state file to: {0}'.format( util.displayable_path(state_fn) )) migrate_cmd.func = migrate_func
dpendl00/headphones
lib/beets/ui/migrate.py
Python
gpl-3.0
13,453