repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
xiechuanj/containerops
|
refs/heads/master
|
pilotage/examples/python/Reliability/HTTPServerDemo.py
|
49
|
#encoding=utf-8
'''
http server
'''
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import io,shutil
import urllib,time
import getopt,string
class MyRequestHandler(BaseHTTPRequestHandler):
def do_GET(self):
self.process(2)
def do_POST(self):
self.process(1)
def do_PUT(self):
self.process(1)
def process(self,type):
content =""
if type==1: #put, post method
datas = self.rfile.read(int(self.headers['content-length']))
datas = urllib.unquote(datas).decode("utf-8", 'ignore')
print datas
# datas = eval(data)
content = str(datas)+"\r\n"
enc="UTF-8"
content = content.encode(enc)
f = io.BytesIO()
f.write(content)
f.seek(0)
self.send_response(200)
self.send_header("Content-type", "text/html; charset=%s" % enc)
self.send_header("Content-Length", str(len(content)))
self.end_headers()
shutil.copyfileobj(f,self.wfile)
def transDicts(params):
dicts={}
if len(params)==0:
return
params = params.split('&')
for param in params:
dicts[param.split('=')[0]]=param.split('=')[1]
return dicts
if __name__=='__main__':
try:
server = HTTPServer(('', 8001), MyRequestHandler)
print 'started httpserver...'
server.serve_forever()
except KeyboardInterrupt:
server.socket.close()
pass
|
suyashphadtare/vestasi-erp-1
|
refs/heads/develop
|
erpnext/selling/doctype/campaign/test_campaign.py
|
40
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
import frappe
test_records = frappe.get_test_records('Campaign')
|
sander76/home-assistant
|
refs/heads/dev
|
homeassistant/components/axis/const.py
|
8
|
"""Constants for the Axis component."""
import logging
from homeassistant.components.binary_sensor import DOMAIN as BINARY_SENSOR_DOMAIN
from homeassistant.components.camera import DOMAIN as CAMERA_DOMAIN
from homeassistant.components.light import DOMAIN as LIGHT_DOMAIN
from homeassistant.components.switch import DOMAIN as SWITCH_DOMAIN
LOGGER = logging.getLogger(__package__)
DOMAIN = "axis"
ATTR_MANUFACTURER = "Axis Communications AB"
CONF_EVENTS = "events"
CONF_MODEL = "model"
CONF_STREAM_PROFILE = "stream_profile"
CONF_VIDEO_SOURCE = "video_source"
DEFAULT_EVENTS = True
DEFAULT_STREAM_PROFILE = "No stream profile"
DEFAULT_TRIGGER_TIME = 0
DEFAULT_VIDEO_SOURCE = "No video source"
PLATFORMS = [BINARY_SENSOR_DOMAIN, CAMERA_DOMAIN, LIGHT_DOMAIN, SWITCH_DOMAIN]
|
leopittelli/Django-on-App-Engine-Example
|
refs/heads/master
|
django/template/loaders/cached.py
|
114
|
"""
Wrapper class that takes a list of template loaders as an argument and attempts
to load templates from them in order, caching the result.
"""
import hashlib
from django.template.base import TemplateDoesNotExist
from django.template.loader import BaseLoader, get_template_from_string, find_template_loader, make_origin
from django.utils.encoding import force_bytes
class Loader(BaseLoader):
is_usable = True
def __init__(self, loaders):
self.template_cache = {}
self._loaders = loaders
self._cached_loaders = []
@property
def loaders(self):
# Resolve loaders on demand to avoid circular imports
if not self._cached_loaders:
# Set self._cached_loaders atomically. Otherwise, another thread
# could see an incomplete list. See #17303.
cached_loaders = []
for loader in self._loaders:
cached_loaders.append(find_template_loader(loader))
self._cached_loaders = cached_loaders
return self._cached_loaders
def find_template(self, name, dirs=None):
for loader in self.loaders:
try:
template, display_name = loader(name, dirs)
return (template, make_origin(display_name, loader, name, dirs))
except TemplateDoesNotExist:
pass
raise TemplateDoesNotExist(name)
def load_template(self, template_name, template_dirs=None):
key = template_name
if template_dirs:
# If template directories were specified, use a hash to differentiate
key = '-'.join([template_name, hashlib.sha1(force_bytes('|'.join(template_dirs))).hexdigest()])
try:
template = self.template_cache[key]
except KeyError:
template, origin = self.find_template(template_name, template_dirs)
if not hasattr(template, 'render'):
try:
template = get_template_from_string(template, origin, template_name)
except TemplateDoesNotExist:
# If compiling the template we found raises TemplateDoesNotExist,
# back off to returning the source and display name for the template
# we were asked to load. This allows for correct identification (later)
# of the actual template that does not exist.
return template, origin
self.template_cache[key] = template
return template, None
def reset(self):
"Empty the template cache."
self.template_cache.clear()
|
tengpeng/spark
|
refs/heads/master
|
python/pyspark/sql/context.py
|
8
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import sys
import warnings
if sys.version >= '3':
basestring = unicode = str
from pyspark import since, _NoValue
from pyspark.rdd import ignore_unicode_prefix
from pyspark.sql.session import _monkey_patch_RDD, SparkSession
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.readwriter import DataFrameReader
from pyspark.sql.streaming import DataStreamReader
from pyspark.sql.types import IntegerType, Row, StringType
from pyspark.sql.udf import UDFRegistration
from pyspark.sql.utils import install_exception_handler
__all__ = ["SQLContext", "HiveContext"]
class SQLContext(object):
"""The entry point for working with structured data (rows and columns) in Spark, in Spark 1.x.
As of Spark 2.0, this is replaced by :class:`SparkSession`. However, we are keeping the class
here for backward compatibility.
A SQLContext can be used create :class:`DataFrame`, register :class:`DataFrame` as
tables, execute SQL over tables, cache tables, and read parquet files.
:param sparkContext: The :class:`SparkContext` backing this SQLContext.
:param sparkSession: The :class:`SparkSession` around which this SQLContext wraps.
:param jsqlContext: An optional JVM Scala SQLContext. If set, we do not instantiate a new
SQLContext in the JVM, instead we make all calls to this object.
"""
_instantiatedContext = None
@ignore_unicode_prefix
def __init__(self, sparkContext, sparkSession=None, jsqlContext=None):
"""Creates a new SQLContext.
>>> from datetime import datetime
>>> sqlContext = SQLContext(sc)
>>> allTypes = sc.parallelize([Row(i=1, s="string", d=1.0, l=1,
... b=True, list=[1, 2, 3], dict={"s": 0}, row=Row(a=1),
... time=datetime(2014, 8, 1, 14, 1, 5))])
>>> df = allTypes.toDF()
>>> df.createOrReplaceTempView("allTypes")
>>> sqlContext.sql('select i+1, d+1, not b, list[1], dict["s"], time, row.a '
... 'from allTypes where b and i > 0').collect()
[Row((i + CAST(1 AS BIGINT))=2, (d + CAST(1 AS DOUBLE))=2.0, (NOT b)=False, list[1]=2, \
dict[s]=0, time=datetime.datetime(2014, 8, 1, 14, 1, 5), a=1)]
>>> df.rdd.map(lambda x: (x.i, x.s, x.d, x.l, x.b, x.time, x.row.a, x.list)).collect()
[(1, u'string', 1.0, 1, True, datetime.datetime(2014, 8, 1, 14, 1, 5), 1, [1, 2, 3])]
"""
self._sc = sparkContext
self._jsc = self._sc._jsc
self._jvm = self._sc._jvm
if sparkSession is None:
sparkSession = SparkSession.builder.getOrCreate()
if jsqlContext is None:
jsqlContext = sparkSession._jwrapped
self.sparkSession = sparkSession
self._jsqlContext = jsqlContext
_monkey_patch_RDD(self.sparkSession)
install_exception_handler()
if SQLContext._instantiatedContext is None:
SQLContext._instantiatedContext = self
@property
def _ssql_ctx(self):
"""Accessor for the JVM Spark SQL context.
Subclasses can override this property to provide their own
JVM Contexts.
"""
return self._jsqlContext
@property
def _conf(self):
"""Accessor for the JVM SQL-specific configurations"""
return self.sparkSession._jsparkSession.sessionState().conf()
@classmethod
@since(1.6)
def getOrCreate(cls, sc):
"""
Get the existing SQLContext or create a new one with given SparkContext.
:param sc: SparkContext
"""
if cls._instantiatedContext is None:
jsqlContext = sc._jvm.SQLContext.getOrCreate(sc._jsc.sc())
sparkSession = SparkSession(sc, jsqlContext.sparkSession())
cls(sc, sparkSession, jsqlContext)
return cls._instantiatedContext
@since(1.6)
def newSession(self):
"""
Returns a new SQLContext as new session, that has separate SQLConf,
registered temporary views and UDFs, but shared SparkContext and
table cache.
"""
return self.__class__(self._sc, self.sparkSession.newSession())
@since(1.3)
def setConf(self, key, value):
"""Sets the given Spark SQL configuration property.
"""
self.sparkSession.conf.set(key, value)
@ignore_unicode_prefix
@since(1.3)
def getConf(self, key, defaultValue=_NoValue):
"""Returns the value of Spark SQL configuration property for the given key.
If the key is not set and defaultValue is set, return
defaultValue. If the key is not set and defaultValue is not set, return
the system default value.
>>> sqlContext.getConf("spark.sql.shuffle.partitions")
u'200'
>>> sqlContext.getConf("spark.sql.shuffle.partitions", u"10")
u'10'
>>> sqlContext.setConf("spark.sql.shuffle.partitions", u"50")
>>> sqlContext.getConf("spark.sql.shuffle.partitions", u"10")
u'50'
"""
return self.sparkSession.conf.get(key, defaultValue)
@property
@since("1.3.1")
def udf(self):
"""Returns a :class:`UDFRegistration` for UDF registration.
:return: :class:`UDFRegistration`
"""
return self.sparkSession.udf
@since(1.4)
def range(self, start, end=None, step=1, numPartitions=None):
"""
Create a :class:`DataFrame` with single :class:`pyspark.sql.types.LongType` column named
``id``, containing elements in a range from ``start`` to ``end`` (exclusive) with
step value ``step``.
:param start: the start value
:param end: the end value (exclusive)
:param step: the incremental step (default: 1)
:param numPartitions: the number of partitions of the DataFrame
:return: :class:`DataFrame`
>>> sqlContext.range(1, 7, 2).collect()
[Row(id=1), Row(id=3), Row(id=5)]
If only one argument is specified, it will be used as the end value.
>>> sqlContext.range(3).collect()
[Row(id=0), Row(id=1), Row(id=2)]
"""
return self.sparkSession.range(start, end, step, numPartitions)
@since(1.2)
def registerFunction(self, name, f, returnType=None):
"""An alias for :func:`spark.udf.register`.
See :meth:`pyspark.sql.UDFRegistration.register`.
.. note:: Deprecated in 2.3.0. Use :func:`spark.udf.register` instead.
"""
warnings.warn(
"Deprecated in 2.3.0. Use spark.udf.register instead.",
DeprecationWarning)
return self.sparkSession.udf.register(name, f, returnType)
@since(2.1)
def registerJavaFunction(self, name, javaClassName, returnType=None):
"""An alias for :func:`spark.udf.registerJavaFunction`.
See :meth:`pyspark.sql.UDFRegistration.registerJavaFunction`.
.. note:: Deprecated in 2.3.0. Use :func:`spark.udf.registerJavaFunction` instead.
"""
warnings.warn(
"Deprecated in 2.3.0. Use spark.udf.registerJavaFunction instead.",
DeprecationWarning)
return self.sparkSession.udf.registerJavaFunction(name, javaClassName, returnType)
# TODO(andrew): delete this once we refactor things to take in SparkSession
def _inferSchema(self, rdd, samplingRatio=None):
"""
Infer schema from an RDD of Row or tuple.
:param rdd: an RDD of Row or tuple
:param samplingRatio: sampling ratio, or no sampling (default)
:return: :class:`pyspark.sql.types.StructType`
"""
return self.sparkSession._inferSchema(rdd, samplingRatio)
@since(1.3)
@ignore_unicode_prefix
def createDataFrame(self, data, schema=None, samplingRatio=None, verifySchema=True):
"""
Creates a :class:`DataFrame` from an :class:`RDD`, a list or a :class:`pandas.DataFrame`.
When ``schema`` is a list of column names, the type of each column
will be inferred from ``data``.
When ``schema`` is ``None``, it will try to infer the schema (column names and types)
from ``data``, which should be an RDD of :class:`Row`,
or :class:`namedtuple`, or :class:`dict`.
When ``schema`` is :class:`pyspark.sql.types.DataType` or a datatype string it must match
the real data, or an exception will be thrown at runtime. If the given schema is not
:class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` as its only field, and the field name will be "value",
each record will also be wrapped into a tuple, which can be converted to row later.
If schema inference is needed, ``samplingRatio`` is used to determined the ratio of
rows used for schema inference. The first row will be used if ``samplingRatio`` is ``None``.
:param data: an RDD of any kind of SQL data representation(e.g. :class:`Row`,
:class:`tuple`, ``int``, ``boolean``, etc.), or :class:`list`, or
:class:`pandas.DataFrame`.
:param schema: a :class:`pyspark.sql.types.DataType` or a datatype string or a list of
column names, default is None. The data type string format equals to
:class:`pyspark.sql.types.DataType.simpleString`, except that top level struct type can
omit the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use
``byte`` instead of ``tinyint`` for :class:`pyspark.sql.types.ByteType`.
We can also use ``int`` as a short name for :class:`pyspark.sql.types.IntegerType`.
:param samplingRatio: the sample ratio of rows used for inferring
:param verifySchema: verify data types of every row against schema.
:return: :class:`DataFrame`
.. versionchanged:: 2.0
The ``schema`` parameter can be a :class:`pyspark.sql.types.DataType` or a
datatype string after 2.0.
If it's not a :class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` and each record will also be wrapped into a tuple.
.. versionchanged:: 2.1
Added verifySchema.
>>> l = [('Alice', 1)]
>>> sqlContext.createDataFrame(l).collect()
[Row(_1=u'Alice', _2=1)]
>>> sqlContext.createDataFrame(l, ['name', 'age']).collect()
[Row(name=u'Alice', age=1)]
>>> d = [{'name': 'Alice', 'age': 1}]
>>> sqlContext.createDataFrame(d).collect()
[Row(age=1, name=u'Alice')]
>>> rdd = sc.parallelize(l)
>>> sqlContext.createDataFrame(rdd).collect()
[Row(_1=u'Alice', _2=1)]
>>> df = sqlContext.createDataFrame(rdd, ['name', 'age'])
>>> df.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql import Row
>>> Person = Row('name', 'age')
>>> person = rdd.map(lambda r: Person(*r))
>>> df2 = sqlContext.createDataFrame(person)
>>> df2.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql.types import *
>>> schema = StructType([
... StructField("name", StringType(), True),
... StructField("age", IntegerType(), True)])
>>> df3 = sqlContext.createDataFrame(rdd, schema)
>>> df3.collect()
[Row(name=u'Alice', age=1)]
>>> sqlContext.createDataFrame(df.toPandas()).collect() # doctest: +SKIP
[Row(name=u'Alice', age=1)]
>>> sqlContext.createDataFrame(pandas.DataFrame([[1, 2]])).collect() # doctest: +SKIP
[Row(0=1, 1=2)]
>>> sqlContext.createDataFrame(rdd, "a: string, b: int").collect()
[Row(a=u'Alice', b=1)]
>>> rdd = rdd.map(lambda row: row[1])
>>> sqlContext.createDataFrame(rdd, "int").collect()
[Row(value=1)]
>>> sqlContext.createDataFrame(rdd, "boolean").collect() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
Py4JJavaError: ...
"""
return self.sparkSession.createDataFrame(data, schema, samplingRatio, verifySchema)
@since(1.3)
def registerDataFrameAsTable(self, df, tableName):
"""Registers the given :class:`DataFrame` as a temporary table in the catalog.
Temporary tables exist only during the lifetime of this instance of :class:`SQLContext`.
>>> sqlContext.registerDataFrameAsTable(df, "table1")
"""
df.createOrReplaceTempView(tableName)
@since(1.6)
def dropTempTable(self, tableName):
""" Remove the temp table from catalog.
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> sqlContext.dropTempTable("table1")
"""
self.sparkSession.catalog.dropTempView(tableName)
@since(1.3)
def createExternalTable(self, tableName, path=None, source=None, schema=None, **options):
"""Creates an external table based on the dataset in a data source.
It returns the DataFrame associated with the external table.
The data source is specified by the ``source`` and a set of ``options``.
If ``source`` is not specified, the default data source configured by
``spark.sql.sources.default`` will be used.
Optionally, a schema can be provided as the schema of the returned :class:`DataFrame` and
created external table.
:return: :class:`DataFrame`
"""
return self.sparkSession.catalog.createExternalTable(
tableName, path, source, schema, **options)
@ignore_unicode_prefix
@since(1.0)
def sql(self, sqlQuery):
"""Returns a :class:`DataFrame` representing the result of the given query.
:return: :class:`DataFrame`
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> df2 = sqlContext.sql("SELECT field1 AS f1, field2 as f2 from table1")
>>> df2.collect()
[Row(f1=1, f2=u'row1'), Row(f1=2, f2=u'row2'), Row(f1=3, f2=u'row3')]
"""
return self.sparkSession.sql(sqlQuery)
@since(1.0)
def table(self, tableName):
"""Returns the specified table or view as a :class:`DataFrame`.
:return: :class:`DataFrame`
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> df2 = sqlContext.table("table1")
>>> sorted(df.collect()) == sorted(df2.collect())
True
"""
return self.sparkSession.table(tableName)
@ignore_unicode_prefix
@since(1.3)
def tables(self, dbName=None):
"""Returns a :class:`DataFrame` containing names of tables in the given database.
If ``dbName`` is not specified, the current database will be used.
The returned DataFrame has two columns: ``tableName`` and ``isTemporary``
(a column with :class:`BooleanType` indicating if a table is a temporary one or not).
:param dbName: string, name of the database to use.
:return: :class:`DataFrame`
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> df2 = sqlContext.tables()
>>> df2.filter("tableName = 'table1'").first()
Row(database=u'', tableName=u'table1', isTemporary=True)
"""
if dbName is None:
return DataFrame(self._ssql_ctx.tables(), self)
else:
return DataFrame(self._ssql_ctx.tables(dbName), self)
@since(1.3)
def tableNames(self, dbName=None):
"""Returns a list of names of tables in the database ``dbName``.
:param dbName: string, name of the database to use. Default to the current database.
:return: list of table names, in string
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> "table1" in sqlContext.tableNames()
True
>>> "table1" in sqlContext.tableNames("default")
True
"""
if dbName is None:
return [name for name in self._ssql_ctx.tableNames()]
else:
return [name for name in self._ssql_ctx.tableNames(dbName)]
@since(1.0)
def cacheTable(self, tableName):
"""Caches the specified table in-memory."""
self._ssql_ctx.cacheTable(tableName)
@since(1.0)
def uncacheTable(self, tableName):
"""Removes the specified table from the in-memory cache."""
self._ssql_ctx.uncacheTable(tableName)
@since(1.3)
def clearCache(self):
"""Removes all cached tables from the in-memory cache. """
self._ssql_ctx.clearCache()
@property
@since(1.4)
def read(self):
"""
Returns a :class:`DataFrameReader` that can be used to read data
in as a :class:`DataFrame`.
:return: :class:`DataFrameReader`
"""
return DataFrameReader(self)
@property
@since(2.0)
def readStream(self):
"""
Returns a :class:`DataStreamReader` that can be used to read data streams
as a streaming :class:`DataFrame`.
.. note:: Evolving.
:return: :class:`DataStreamReader`
>>> text_sdf = sqlContext.readStream.text(tempfile.mkdtemp())
>>> text_sdf.isStreaming
True
"""
return DataStreamReader(self)
@property
@since(2.0)
def streams(self):
"""Returns a :class:`StreamingQueryManager` that allows managing all the
:class:`StreamingQuery` StreamingQueries active on `this` context.
.. note:: Evolving.
"""
from pyspark.sql.streaming import StreamingQueryManager
return StreamingQueryManager(self._ssql_ctx.streams())
class HiveContext(SQLContext):
"""A variant of Spark SQL that integrates with data stored in Hive.
Configuration for Hive is read from ``hive-site.xml`` on the classpath.
It supports running both SQL and HiveQL commands.
:param sparkContext: The SparkContext to wrap.
:param jhiveContext: An optional JVM Scala HiveContext. If set, we do not instantiate a new
:class:`HiveContext` in the JVM, instead we make all calls to this object.
.. note:: Deprecated in 2.0.0. Use SparkSession.builder.enableHiveSupport().getOrCreate().
"""
def __init__(self, sparkContext, jhiveContext=None):
warnings.warn(
"HiveContext is deprecated in Spark 2.0.0. Please use " +
"SparkSession.builder.enableHiveSupport().getOrCreate() instead.",
DeprecationWarning)
if jhiveContext is None:
sparkSession = SparkSession.builder.enableHiveSupport().getOrCreate()
else:
sparkSession = SparkSession(sparkContext, jhiveContext.sparkSession())
SQLContext.__init__(self, sparkContext, sparkSession, jhiveContext)
@classmethod
def _createForTesting(cls, sparkContext):
"""(Internal use only) Create a new HiveContext for testing.
All test code that touches HiveContext *must* go through this method. Otherwise,
you may end up launching multiple derby instances and encounter with incredibly
confusing error messages.
"""
jsc = sparkContext._jsc.sc()
jtestHive = sparkContext._jvm.org.apache.spark.sql.hive.test.TestHiveContext(jsc, False)
return cls(sparkContext, jtestHive)
def refreshTable(self, tableName):
"""Invalidate and refresh all the cached the metadata of the given
table. For performance reasons, Spark SQL or the external data source
library it uses might cache certain metadata about a table, such as the
location of blocks. When those change outside of Spark SQL, users should
call this function to invalidate the cache.
"""
self._ssql_ctx.refreshTable(tableName)
def _test():
import os
import doctest
import tempfile
from pyspark.context import SparkContext
from pyspark.sql import Row, SQLContext
import pyspark.sql.context
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.sql.context.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['tempfile'] = tempfile
globs['os'] = os
globs['sc'] = sc
globs['sqlContext'] = SQLContext(sc)
globs['rdd'] = rdd = sc.parallelize(
[Row(field1=1, field2="row1"),
Row(field1=2, field2="row2"),
Row(field1=3, field2="row3")]
)
globs['df'] = rdd.toDF()
jsonStrings = [
'{"field1": 1, "field2": "row1", "field3":{"field4":11}}',
'{"field1" : 2, "field3":{"field4":22, "field5": [10, 11]},'
'"field6":[{"field7": "row2"}]}',
'{"field1" : null, "field2": "row3", '
'"field3":{"field4":33, "field5": []}}'
]
globs['jsonStrings'] = jsonStrings
globs['json'] = sc.parallelize(jsonStrings)
(failure_count, test_count) = doctest.testmod(
pyspark.sql.context, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
globs['sc'].stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
|
ptoraskar/django
|
refs/heads/master
|
tests/forms_tests/tests/test_regressions.py
|
155
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.forms import (
CharField, ChoiceField, Form, HiddenInput, IntegerField, ModelForm,
ModelMultipleChoiceField, MultipleChoiceField, RadioSelect, Select,
TextInput,
)
from django.test import TestCase, ignore_warnings
from django.utils import translation
from django.utils.translation import gettext_lazy, ugettext_lazy
from ..models import Cheese
class FormsRegressionsTestCase(TestCase):
def test_class(self):
# Tests to prevent against recurrences of earlier bugs.
extra_attrs = {'class': 'special'}
class TestForm(Form):
f1 = CharField(max_length=10, widget=TextInput(attrs=extra_attrs))
f2 = CharField(widget=TextInput(attrs=extra_attrs))
self.assertHTMLEqual(TestForm(auto_id=False).as_p(), '<p>F1: <input type="text" class="special" name="f1" maxlength="10" /></p>\n<p>F2: <input type="text" class="special" name="f2" /></p>')
def test_regression_3600(self):
# Tests for form i18n #
# There were some problems with form translations in #3600
class SomeForm(Form):
username = CharField(max_length=10, label=ugettext_lazy('username'))
f = SomeForm()
self.assertHTMLEqual(f.as_p(), '<p><label for="id_username">username:</label> <input id="id_username" type="text" name="username" maxlength="10" /></p>')
# Translations are done at rendering time, so multi-lingual apps can define forms)
with translation.override('de'):
self.assertHTMLEqual(f.as_p(), '<p><label for="id_username">Benutzername:</label> <input id="id_username" type="text" name="username" maxlength="10" /></p>')
with translation.override('pl'):
self.assertHTMLEqual(f.as_p(), '<p><label for="id_username">u\u017cytkownik:</label> <input id="id_username" type="text" name="username" maxlength="10" /></p>')
def test_regression_5216(self):
# There was some problems with form translations in #5216
class SomeForm(Form):
field_1 = CharField(max_length=10, label=ugettext_lazy('field_1'))
field_2 = CharField(max_length=10, label=ugettext_lazy('field_2'), widget=TextInput(attrs={'id': 'field_2_id'}))
f = SomeForm()
self.assertHTMLEqual(f['field_1'].label_tag(), '<label for="id_field_1">field_1:</label>')
self.assertHTMLEqual(f['field_2'].label_tag(), '<label for="field_2_id">field_2:</label>')
# Unicode decoding problems...
GENDERS = (('\xc5', 'En tied\xe4'), ('\xf8', 'Mies'), ('\xdf', 'Nainen'))
class SomeForm(Form):
somechoice = ChoiceField(choices=GENDERS, widget=RadioSelect(), label='\xc5\xf8\xdf')
f = SomeForm()
self.assertHTMLEqual(f.as_p(), '<p><label for="id_somechoice_0">\xc5\xf8\xdf:</label> <ul id="id_somechoice">\n<li><label for="id_somechoice_0"><input type="radio" id="id_somechoice_0" value="\xc5" name="somechoice" /> En tied\xe4</label></li>\n<li><label for="id_somechoice_1"><input type="radio" id="id_somechoice_1" value="\xf8" name="somechoice" /> Mies</label></li>\n<li><label for="id_somechoice_2"><input type="radio" id="id_somechoice_2" value="\xdf" name="somechoice" /> Nainen</label></li>\n</ul></p>')
# Translated error messages used to be buggy.
with translation.override('ru'):
f = SomeForm({})
self.assertHTMLEqual(f.as_p(), '<ul class="errorlist"><li>\u041e\u0431\u044f\u0437\u0430\u0442\u0435\u043b\u044c\u043d\u043e\u0435 \u043f\u043e\u043b\u0435.</li></ul>\n<p><label for="id_somechoice_0">\xc5\xf8\xdf:</label> <ul id="id_somechoice">\n<li><label for="id_somechoice_0"><input type="radio" id="id_somechoice_0" value="\xc5" name="somechoice" /> En tied\xe4</label></li>\n<li><label for="id_somechoice_1"><input type="radio" id="id_somechoice_1" value="\xf8" name="somechoice" /> Mies</label></li>\n<li><label for="id_somechoice_2"><input type="radio" id="id_somechoice_2" value="\xdf" name="somechoice" /> Nainen</label></li>\n</ul></p>')
# Deep copying translated text shouldn't raise an error)
class CopyForm(Form):
degree = IntegerField(widget=Select(choices=((1, gettext_lazy('test')),)))
f = CopyForm()
@ignore_warnings(category=UnicodeWarning)
def test_regression_5216_b(self):
# Testing choice validation with UTF-8 bytestrings as input (these are the
# Russian abbreviations "мес." and "шт.".
UNITS = ((b'\xd0\xbc\xd0\xb5\xd1\x81.', b'\xd0\xbc\xd0\xb5\xd1\x81.'),
(b'\xd1\x88\xd1\x82.', b'\xd1\x88\xd1\x82.'))
f = ChoiceField(choices=UNITS)
self.assertEqual(f.clean('\u0448\u0442.'), '\u0448\u0442.')
self.assertEqual(f.clean(b'\xd1\x88\xd1\x82.'), '\u0448\u0442.')
def test_misc(self):
# There once was a problem with Form fields called "data". Let's make sure that
# doesn't come back.
class DataForm(Form):
data = CharField(max_length=10)
f = DataForm({'data': 'xyzzy'})
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data, {'data': 'xyzzy'})
# A form with *only* hidden fields that has errors is going to be very unusual.
class HiddenForm(Form):
data = IntegerField(widget=HiddenInput)
f = HiddenForm({})
self.assertHTMLEqual(f.as_p(), '<ul class="errorlist nonfield"><li>(Hidden field data) This field is required.</li></ul>\n<p> <input type="hidden" name="data" id="id_data" /></p>')
self.assertHTMLEqual(f.as_table(), '<tr><td colspan="2"><ul class="errorlist nonfield"><li>(Hidden field data) This field is required.</li></ul><input type="hidden" name="data" id="id_data" /></td></tr>')
def test_xss_error_messages(self):
###################################################
# Tests for XSS vulnerabilities in error messages #
###################################################
# The forms layer doesn't escape input values directly because error messages
# might be presented in non-HTML contexts. Instead, the message is just marked
# for escaping by the template engine. So we'll need to construct a little
# silly template to trigger the escaping.
from django.template import Template, Context
t = Template('{{ form.errors }}')
class SomeForm(Form):
field = ChoiceField(choices=[('one', 'One')])
f = SomeForm({'field': '<script>'})
self.assertHTMLEqual(t.render(Context({'form': f})), '<ul class="errorlist"><li>field<ul class="errorlist"><li>Select a valid choice. <script> is not one of the available choices.</li></ul></li></ul>')
class SomeForm(Form):
field = MultipleChoiceField(choices=[('one', 'One')])
f = SomeForm({'field': ['<script>']})
self.assertHTMLEqual(t.render(Context({'form': f})), '<ul class="errorlist"><li>field<ul class="errorlist"><li>Select a valid choice. <script> is not one of the available choices.</li></ul></li></ul>')
from forms_tests.models import ChoiceModel
class SomeForm(Form):
field = ModelMultipleChoiceField(ChoiceModel.objects.all())
f = SomeForm({'field': ['<script>']})
self.assertHTMLEqual(t.render(Context({'form': f})), '<ul class="errorlist"><li>field<ul class="errorlist"><li>"<script>" is not a valid value for a primary key.</li></ul></li></ul>')
def test_regression_14234(self):
"""
Re-cleaning an instance that was added via a ModelForm should not raise
a pk uniqueness error.
"""
class CheeseForm(ModelForm):
class Meta:
model = Cheese
fields = '__all__'
form = CheeseForm({
'name': 'Brie',
})
self.assertTrue(form.is_valid())
obj = form.save()
obj.name = 'Camembert'
obj.full_clean()
|
eloquence/unisubs
|
refs/heads/staging
|
apps/subtitles/migrations/0004_auto__add_field_subtitlelanguage_official_signoff_count__add_field_sub.py
|
5
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'SubtitleLanguage.official_signoff_count'
db.add_column('subtitles_subtitlelanguage', 'official_signoff_count', self.gf('django.db.models.fields.PositiveIntegerField')(default=0), keep_default=False)
# Adding field 'SubtitleLanguage.unofficial_signoff_count'
db.add_column('subtitles_subtitlelanguage', 'unofficial_signoff_count', self.gf('django.db.models.fields.PositiveIntegerField')(default=0), keep_default=False)
# Adding field 'SubtitleLanguage.pending_signoff_expired_count'
db.add_column('subtitles_subtitlelanguage', 'pending_signoff_expired_count', self.gf('django.db.models.fields.PositiveIntegerField')(default=0), keep_default=False)
# Adding field 'SubtitleLanguage.pending_signoff_count'
db.add_column('subtitles_subtitlelanguage', 'pending_signoff_count', self.gf('django.db.models.fields.PositiveIntegerField')(default=0), keep_default=False)
# Adding field 'SubtitleLanguage.pending_signoff_unexpired_count'
db.add_column('subtitles_subtitlelanguage', 'pending_signoff_unexpired_count', self.gf('django.db.models.fields.PositiveIntegerField')(default=0), keep_default=False)
def backwards(self, orm):
# Deleting field 'SubtitleLanguage.official_signoff_count'
db.delete_column('subtitles_subtitlelanguage', 'official_signoff_count')
# Deleting field 'SubtitleLanguage.unofficial_signoff_count'
db.delete_column('subtitles_subtitlelanguage', 'unofficial_signoff_count')
# Deleting field 'SubtitleLanguage.pending_signoff_expired_count'
db.delete_column('subtitles_subtitlelanguage', 'pending_signoff_expired_count')
# Deleting field 'SubtitleLanguage.pending_signoff_count'
db.delete_column('subtitles_subtitlelanguage', 'pending_signoff_count')
# Deleting field 'SubtitleLanguage.pending_signoff_unexpired_count'
db.delete_column('subtitles_subtitlelanguage', 'pending_signoff_unexpired_count')
models = {
'accountlinker.thirdpartyaccount': {
'Meta': {'unique_together': "(('type', 'username'),)", 'object_name': 'ThirdPartyAccount'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'oauth_access_token': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'oauth_refresh_token': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
'auth.customuser': {
'Meta': {'object_name': 'CustomUser', '_ormbases': ['auth.User']},
'autoplay_preferences': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'award_points': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'biography': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'can_send_messages': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'full_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '63', 'blank': 'True'}),
'homepage': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'is_partner': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'notify_by_email': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'notify_by_message': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'partner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Partner']", 'null': 'True', 'blank': 'True'}),
'picture': ('utils.amazon.fields.S3EnabledImageField', [], {'thumb_options': "{'upscale': True, 'crop': 'smart'}", 'max_length': '100', 'blank': 'True'}),
'preferred_language': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'primary_key': 'True'}),
'valid_email': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'videos': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['videos.Video']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'subtitles.collaborator': {
'Meta': {'unique_together': "(('user', 'subtitle_language'),)", 'object_name': 'Collaborator'},
'created': ('django.db.models.fields.DateTimeField', [], {}),
'expiration_start': ('django.db.models.fields.DateTimeField', [], {}),
'expired': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'signoff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'signoff_is_official': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'subtitle_language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['subtitles.SubtitleLanguage']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']"})
},
'subtitles.subtitlelanguage': {
'Meta': {'unique_together': "[('video', 'language_code')]", 'object_name': 'SubtitleLanguage'},
'collaborators': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'collab_newlanguages'", 'blank': 'True', 'to': "orm['auth.CustomUser']"}),
'created': ('django.db.models.fields.DateTimeField', [], {}),
'followers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'followed_newlanguages'", 'blank': 'True', 'to': "orm['auth.CustomUser']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'official_signoff_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'pending_signoff_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'pending_signoff_expired_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'pending_signoff_unexpired_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'unofficial_signoff_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'newsubtitlelanguage_set'", 'to': "orm['videos.Video']"}),
'writelock_owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'writelocked_newlanguages'", 'null': 'True', 'to': "orm['auth.CustomUser']"}),
'writelock_session_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'writelock_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'subtitles.subtitleversion': {
'Meta': {'unique_together': "[('video', 'language_code', 'version_number')]", 'object_name': 'SubtitleVersion'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'newsubtitleversion_set'", 'to': "orm['auth.CustomUser']"}),
'created': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'parents': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['subtitles.SubtitleVersion']", 'symmetrical': 'False', 'blank': 'True'}),
'serialized_lineage': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'serialized_subtitles': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'subtitle_language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['subtitles.SubtitleLanguage']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'blank': 'True'}),
'version_number': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.Video']"}),
'visibility': ('django.db.models.fields.CharField', [], {'default': "'public'", 'max_length': '10'}),
'visibility_override': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '10', 'blank': 'True'})
},
'teams.application': {
'Meta': {'unique_together': "(('team', 'user', 'status'),)", 'object_name': 'Application'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'note': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'applications'", 'to': "orm['teams.Team']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'team_applications'", 'to': "orm['auth.CustomUser']"})
},
'teams.partner': {
'Meta': {'object_name': 'Partner'},
'can_request_paid_captions': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '250'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'})
},
'teams.project': {
'Meta': {'unique_together': "(('team', 'name'), ('team', 'slug'))", 'object_name': 'Project'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'guidelines': ('django.db.models.fields.TextField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Team']"}),
'workflow_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'})
},
'teams.team': {
'Meta': {'object_name': 'Team'},
'applicants': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'applicated_teams'", 'symmetrical': 'False', 'through': "orm['teams.Application']", 'to': "orm['auth.CustomUser']"}),
'application_text': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'auth_provider_code': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '24', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'header_html_text': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'highlight': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_moderated': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_visible': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'last_notification_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'logo': ('utils.amazon.fields.S3EnabledImageField', [], {'thumb_options': "{'upscale': True, 'autocrop': True}", 'max_length': '100', 'blank': 'True'}),
'max_tasks_per_member': ('django.db.models.fields.PositiveIntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'membership_policy': ('django.db.models.fields.IntegerField', [], {'default': '4'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '250'}),
'page_content': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'partner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'teams'", 'null': 'True', 'to': "orm['teams.Partner']"}),
'points': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'projects_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'subtitle_policy': ('django.db.models.fields.IntegerField', [], {'default': '10'}),
'task_assign_policy': ('django.db.models.fields.IntegerField', [], {'default': '10'}),
'task_expiration': ('django.db.models.fields.PositiveIntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'third_party_accounts': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'tseams'", 'symmetrical': 'False', 'to': "orm['accountlinker.ThirdPartyAccount']"}),
'translate_policy': ('django.db.models.fields.IntegerField', [], {'default': '10'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'teams'", 'symmetrical': 'False', 'through': "orm['teams.TeamMember']", 'to': "orm['auth.CustomUser']"}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'intro_for_teams'", 'null': 'True', 'to': "orm['videos.Video']"}),
'video_policy': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'videos': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['videos.Video']", 'through': "orm['teams.TeamVideo']", 'symmetrical': 'False'}),
'workflow_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'})
},
'teams.teammember': {
'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'TeamMember'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'role': ('django.db.models.fields.CharField', [], {'default': "'contributor'", 'max_length': '16', 'db_index': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'members'", 'to': "orm['teams.Team']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'team_members'", 'to': "orm['auth.CustomUser']"})
},
'teams.teamvideo': {
'Meta': {'unique_together': "(('team', 'video'),)", 'object_name': 'TeamVideo'},
'added_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']"}),
'all_languages': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'completed_languages': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['videos.SubtitleLanguage']", 'symmetrical': 'False', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'partner_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Project']"}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Team']"}),
'thumbnail': ('utils.amazon.fields.S3EnabledImageField', [], {'max_length': '100', 'thumb_options': "{'upscale': True, 'crop': 'smart'}", 'null': 'True', 'thumb_sizes': '((290, 165), (120, 90))', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'blank': 'True'}),
'video': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['videos.Video']", 'unique': 'True'})
},
'videos.subtitlelanguage': {
'Meta': {'unique_together': "(('video', 'language', 'standard_language'),)", 'object_name': 'SubtitleLanguage'},
'created': ('django.db.models.fields.DateTimeField', [], {}),
'followers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'followed_languages'", 'blank': 'True', 'to': "orm['auth.CustomUser']"}),
'had_version': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'has_version': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_forked': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_original': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'percent_done': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'standard_language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.SubtitleLanguage']", 'null': 'True', 'blank': 'True'}),
'subtitle_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'subtitles_fetched_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.Video']"}),
'writelock_owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']", 'null': 'True', 'blank': 'True'}),
'writelock_session_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'writelock_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
'videos.video': {
'Meta': {'object_name': 'Video'},
'allow_community_edits': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'allow_video_urls_edit': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'complete_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'duration': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'edited': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'featured': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'followers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'followed_videos'", 'blank': 'True', 'to': "orm['auth.CustomUser']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_subtitled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'languages_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'moderated_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'moderating'", 'null': 'True', 'to': "orm['teams.Team']"}),
's3_thumbnail': ('utils.amazon.fields.S3EnabledImageField', [], {'thumb_options': "{'upscale': True, 'crop': 'smart'}", 'max_length': '100', 'thumb_sizes': '((290, 165), (120, 90))', 'blank': 'True'}),
'small_thumbnail': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'subtitles_fetched_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'db_index': 'True'}),
'thumbnail': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']", 'null': 'True', 'blank': 'True'}),
'video_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'was_subtitled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}),
'widget_views_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'db_index': 'True'}),
'writelock_owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'writelock_owners'", 'null': 'True', 'to': "orm['auth.CustomUser']"}),
'writelock_session_key': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'writelock_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
}
}
complete_apps = ['subtitles']
|
AyoubZahid/odoo
|
refs/heads/9.0
|
addons/base_import/test_models.py
|
399
|
from openerp.osv import orm, fields
def name(n): return 'base_import.tests.models.%s' % n
class char(orm.Model):
_name = name('char')
_columns = {
'value': fields.char('unknown')
}
class char_required(orm.Model):
_name = name('char.required')
_columns = {
'value': fields.char('unknown', required=True)
}
class char_readonly(orm.Model):
_name = name('char.readonly')
_columns = {
'value': fields.char('unknown', readonly=True)
}
class char_states(orm.Model):
_name = name('char.states')
_columns = {
'value': fields.char('unknown', readonly=True, states={'draft': [('readonly', False)]})
}
class char_noreadonly(orm.Model):
_name = name('char.noreadonly')
_columns = {
'value': fields.char('unknown', readonly=True, states={'draft': [('invisible', True)]})
}
class char_stillreadonly(orm.Model):
_name = name('char.stillreadonly')
_columns = {
'value': fields.char('unknown', readonly=True, states={'draft': [('readonly', True)]})
}
# TODO: complex field (m2m, o2m, m2o)
class m2o(orm.Model):
_name = name('m2o')
_columns = {
'value': fields.many2one(name('m2o.related'))
}
class m2o_related(orm.Model):
_name = name('m2o.related')
_columns = {
'value': fields.integer()
}
_defaults = {
'value': 42
}
class m2o_required(orm.Model):
_name = name('m2o.required')
_columns = {
'value': fields.many2one(name('m2o.required.related'), required=True)
}
class m2o_required_related(orm.Model):
_name = name('m2o.required.related')
_columns = {
'value': fields.integer()
}
_defaults = {
'value': 42
}
class o2m(orm.Model):
_name = name('o2m')
_columns = {
'value': fields.one2many(name('o2m.child'), 'parent_id')
}
class o2m_child(orm.Model):
_name = name('o2m.child')
_columns = {
'parent_id': fields.many2one(name('o2m')),
'value': fields.integer()
}
class preview_model(orm.Model):
_name = name('preview')
_columns = {
'name': fields.char('Name'),
'somevalue': fields.integer('Some Value', required=True),
'othervalue': fields.integer('Other Variable'),
}
|
modulexcite/boxstarter
|
refs/heads/master
|
BuildPackages/example/sublime/Packages/AAAPackageDev/tests/sublime.py
|
12133432
| |
350dotorg/Django
|
refs/heads/master
|
tests/regressiontests/admin_scripts/management/commands/__init__.py
|
12133432
| |
PeterWangIntel/blink-crosswalk
|
refs/heads/master
|
LayoutTests/http/tests/websocket/workers/resources/simple_wsh.py
|
66
|
# Copyright (C) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from mod_pywebsocket import msgutil
def web_socket_do_extra_handshake(request):
pass # Always accept.
def web_socket_transfer_data(request):
msgutil.send_message(request, 'Hello from Simple WSH.')
|
jheld/pylint
|
refs/heads/master
|
test/utils.py
|
2
|
"""some pylint test utilities
"""
import sys
from glob import glob
from os.path import join, abspath, dirname, basename, exists, splitext
from cStringIO import StringIO
from pylint.interfaces import IReporter
from pylint.reporters import BaseReporter
PREFIX = abspath(dirname(__file__))
def fix_path():
sys.path.insert(0, PREFIX)
SYS_VERS_STR = '%d%d' % sys.version_info[:2]
def get_tests_info(prefix, suffix):
"""get python input examples and output messages
We use following conventions for input files and messages:
for different inputs:
don't test for python < x.y -> input = <name>_pyxy.py
don't test for python >= x.y -> input = <name>_py_xy.py
for one input and different messages:
message for python <= x.y -> message = <name>_pyxy.txt
higher versions -> message with highest num
"""
result = []
for fname in glob(join(PREFIX, 'input', prefix + '*' + suffix)):
infile = basename(fname)
fbase = splitext(infile)[0]
# filter input files :
pyrestr = fbase.rsplit('_py', 1)[-1] # like _26 or 26
if pyrestr.isdigit(): # '24', '25'...
if SYS_VERS_STR < pyrestr:
continue
if pyrestr.startswith('_') and pyrestr[1:].isdigit():
# skip test for higher python versions
if SYS_VERS_STR >= pyrestr[1:]:
continue
messages = glob(join(PREFIX, 'messages', fbase + '*.txt'))
# the last one will be without ext, i.e. for all or upper versions:
if messages:
for outfile in sorted(messages, reverse=True):
py_rest = outfile.rsplit('_py', 1)[-1][:-4]
if py_rest.isdigit() and SYS_VERS_STR >= py_rest:
break
else:
outfile = None
result.append((infile, outfile))
return result
TITLE_UNDERLINES = ['', '=', '-', '.']
class TestReporter(BaseReporter):
""" store plain text messages
"""
__implements____ = IReporter
def __init__(self):
self.message_ids = {}
self.reset()
def reset(self):
self.out = StringIO()
self.messages = []
def add_message(self, msg_id, location, msg):
"""manage message of different type and in the context of path """
fpath, module, object, line, _ = location
self.message_ids[msg_id] = 1
if object:
object = ':%s' % object
sigle = msg_id[0]
self.messages.append('%s:%3s%s: %s' % (sigle, line, object, msg))
def finalize(self):
self.messages.sort()
for msg in self.messages:
print >>self.out, msg
result = self.out.getvalue()
self.reset()
return result
def display_results(self, layout):
"""ignore layouts"""
# # # # # pyreverse unittest utilities # # # # # #
from logilab.common.testlib import TestCase
import os
import sys
from os.path import join
from logilab.astng import MANAGER
def _astng_wrapper(func, modname):
return func(modname)
def _sorted_file(path):
lines = [line.strip() for line in open(path).readlines()
if (line.find('squeleton generated by ') == -1 and
not line.startswith('__revision__ = "$Id:'))]
lines = [line for line in lines if line]
lines.sort()
return '\n'.join(lines)
def get_project(module, name=None):
"""return a astng project representation
"""
manager = MANAGER
# flush cache
manager._modules_by_name = {}
return manager.project_from_files([module], _astng_wrapper,
project_name=name)
DEFAULTS = {'all_ancestors': None, 'show_associated': None,
'module_names': None,
'output_format': 'dot', 'diadefs_file': None, 'quiet': 0,
'show_ancestors': None, 'classes': (), 'all_associated': None,
'mode': 'PUB_ONLY', 'show_builtin': False, 'only_classnames': False}
class Config(object):
"""config object for tests"""
def __init__(self):
for attr, value in DEFAULTS.items():
setattr(self, attr, value)
class FileTC(TestCase):
"""base test case for testing file output"""
generated_files = ()
def setUp(self):
self.expected_files = [join(dirname(abspath(__file__)), 'data', file)
for file in self.generated_files]
def tearDown(self):
for fname in self.generated_files:
try:
os.remove(fname)
except:
continue
def _test_same_file(self, index):
generated_file = self.generated_files[index]
expected_file = self.expected_files[index]
generated = _sorted_file(generated_file)
expected = _sorted_file(expected_file)
from difflib import unified_diff
files = "\n *** expected : %s, generated : %s \n" % (
expected_file, generated_file)
self.assertEqual(expected, generated, '%s%s' % (
files, '\n'.join(line for line in unified_diff(
expected.splitlines(), generated.splitlines() ))) )
os.remove(generated_file)
def build_file_case(filetc):
for i in range(len(filetc.generated_files)):
setattr(filetc, 'test_same_file_%s' %i,
lambda self, index=i: self._test_same_file(index))
|
SRabbelier/Melange
|
refs/heads/master
|
thirdparty/google_appengine/lib/graphy/graphy/line_chart.py
|
28
|
#!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code related to line charts."""
import copy
import warnings
from graphy import common
class LineStyle(object):
"""Represents the style for a line on a line chart. Also provides some
convenient presets.
Object attributes (Passed directly to the Google Chart API. Check there for
details):
width: Width of the line
on: Length of a line segment (for dashed/dotted lines)
off: Length of a break (for dashed/dotted lines)
color: Color of the line. A hex string, like 'ff0000' for red. Optional,
AutoColor will fill this in for you automatically if empty.
Some common styles, such as LineStyle.dashed, are available:
solid
dashed
dotted
thick_solid
thick_dashed
thick_dotted
"""
# Widths
THIN = 1
THICK = 2
# Patterns
# ((on, off) tuples, as passed to LineChart.AddLine)
SOLID = (1, 0)
DASHED = (8, 4)
DOTTED = (2, 4)
def __init__(self, width, on, off, color=None):
"""Construct a LineStyle. See class docstring for details on args."""
self.width = width
self.on = on
self.off = off
self.color = color
LineStyle.solid = LineStyle(1, 1, 0)
LineStyle.dashed = LineStyle(1, 8, 4)
LineStyle.dotted = LineStyle(1, 2, 4)
LineStyle.thick_solid = LineStyle(2, 1, 0)
LineStyle.thick_dashed = LineStyle(2, 8, 4)
LineStyle.thick_dotted = LineStyle(2, 2, 4)
class LineChart(common.BaseChart):
"""Represents a line chart."""
def __init__(self, points=None):
super(LineChart, self).__init__()
if points is not None:
self.AddLine(points)
def AddLine(self, points, label=None, color=None,
pattern=LineStyle.SOLID, width=LineStyle.THIN, markers=None):
"""Add a new line to the chart.
This is a convenience method which constructs the DataSeries and appends it
for you. It returns the new series.
points: List of equally-spaced y-values for the line
label: Name of the line (used for the legend)
color: Hex string, like 'ff0000' for red
pattern: Tuple for (length of segment, length of gap). i.e.
LineStyle.DASHED
width: Width of the line (i.e. LineStyle.THIN)
markers: List of Marker objects to attach to this line (see DataSeries
for more info)
"""
if color is not None and isinstance(color[0], common.Marker):
warnings.warn('Your code may be broken! '
'You passed a list of Markers instead of a color. The '
'old argument order (markers before color) is deprecated.',
DeprecationWarning, stacklevel=2)
style = LineStyle(width, pattern[0], pattern[1], color=color)
series = common.DataSeries(points, label=label, style=style,
markers=markers)
self.data.append(series)
return series
def AddSeries(self, points, color=None, style=LineStyle.solid, markers=None,
label=None):
"""DEPRECATED"""
warnings.warn('LineChart.AddSeries is deprecated. Call AddLine instead. ',
DeprecationWarning, stacklevel=2)
return self.AddLine(points, color=color, width=style.width,
pattern=(style.on, style.off), markers=markers,
label=label)
class Sparkline(LineChart):
"""Represent a sparkline. These behave like LineCharts,
mostly, but come without axes.
"""
|
Permutatrix/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/mathml/tools/fractions.py
|
92
|
#!/usr/bin/python
from utils import mathfont
import fontforge
v1 = 7 * mathfont.em
v2 = 1 * mathfont.em
f = mathfont.create("fraction-axisheight%d-rulethickness%d" % (v1, v2))
f.math.AxisHeight = v1
f.math.FractionDenominatorDisplayStyleGapMin = 0
f.math.FractionDenominatorDisplayStyleShiftDown = 0
f.math.FractionDenominatorGapMin = 0
f.math.FractionDenominatorShiftDown = 0
f.math.FractionNumeratorDisplayStyleGapMin = 0
f.math.FractionNumeratorDisplayStyleShiftUp = 0
f.math.FractionNumeratorGapMin = 0
f.math.FractionNumeratorShiftUp = 0
f.math.FractionRuleThickness = v2
mathfont.save(f)
v1 = 5 * mathfont.em
v2 = 1 * mathfont.em
f = mathfont.create("fraction-denominatordisplaystylegapmin%d-rulethickness%d" % (v1, v2))
f.math.AxisHeight = 0
f.math.FractionDenominatorDisplayStyleGapMin = v1
f.math.FractionDenominatorDisplayStyleShiftDown = 0
f.math.FractionDenominatorGapMin = 0
f.math.FractionDenominatorShiftDown = 0
f.math.FractionNumeratorDisplayStyleGapMin = 0
f.math.FractionNumeratorDisplayStyleShiftUp = 0
f.math.FractionNumeratorGapMin = 0
f.math.FractionNumeratorShiftUp = 0
f.math.FractionRuleThickness = v2
mathfont.save(f)
v1 = 6 * mathfont.em
v2 = 1 * mathfont.em
f = mathfont.create("fraction-denominatordisplaystyleshiftdown%d-rulethickness%d" % (v1, v2))
f.math.AxisHeight = 0
f.math.FractionDenominatorDisplayStyleGapMin = 0
f.math.FractionDenominatorDisplayStyleShiftDown = v1
f.math.FractionDenominatorGapMin = 0
f.math.FractionDenominatorShiftDown = 0
f.math.FractionNumeratorDisplayStyleGapMin = 0
f.math.FractionNumeratorDisplayStyleShiftUp = 0
f.math.FractionNumeratorGapMin = 0
f.math.FractionNumeratorShiftUp = 0
f.math.FractionRuleThickness = v2
mathfont.save(f)
v1 = 4 * mathfont.em
v2 = 1 * mathfont.em
f = mathfont.create("fraction-denominatorgapmin%d-rulethickness%d" % (v1, v2))
f.math.AxisHeight = 0
f.math.FractionDenominatorDisplayStyleGapMin = 0
f.math.FractionDenominatorDisplayStyleShiftDown = 0
f.math.FractionDenominatorGapMin = v1
f.math.FractionDenominatorShiftDown = 0
f.math.FractionNumeratorDisplayStyleGapMin = 0
f.math.FractionNumeratorDisplayStyleShiftUp = 0
f.math.FractionNumeratorGapMin = 0
f.math.FractionNumeratorShiftUp = 0
f.math.FractionRuleThickness = v2
mathfont.save(f)
v1 = 3 * mathfont.em
v2 = 1 * mathfont.em
f = mathfont.create("fraction-denominatorshiftdown%d-rulethickness%d" % (v1, v2))
f.math.AxisHeight = 0
f.math.FractionDenominatorDisplayStyleGapMin = 0
f.math.FractionDenominatorDisplayStyleShiftDown = 0
f.math.FractionDenominatorGapMin = 0
f.math.FractionDenominatorShiftDown = v1
f.math.FractionNumeratorDisplayStyleGapMin = 0
f.math.FractionNumeratorDisplayStyleShiftUp = 0
f.math.FractionNumeratorGapMin = 0
f.math.FractionNumeratorShiftUp = 0
f.math.FractionRuleThickness = v2
mathfont.save(f)
v1 = 8 * mathfont.em
v2 = 1 * mathfont.em
f = mathfont.create("fraction-numeratordisplaystylegapmin%d-rulethickness%d" % (v1, v2))
f.math.AxisHeight = 0
f.math.FractionDenominatorDisplayStyleGapMin = 0
f.math.FractionDenominatorDisplayStyleShiftDown = 0
f.math.FractionDenominatorGapMin = 0
f.math.FractionDenominatorShiftDown = 0
f.math.FractionNumeratorDisplayStyleGapMin = v1
f.math.FractionNumeratorDisplayStyleShiftUp = 0
f.math.FractionNumeratorGapMin = 0
f.math.FractionNumeratorShiftUp = 0
f.math.FractionRuleThickness = v2
mathfont.save(f)
v1 = 2 * mathfont.em
v2 = 1 * mathfont.em
f = mathfont.create("fraction-numeratordisplaystyleshiftup%d-rulethickness%d" % (v1, v2))
f.math.AxisHeight = 0
f.math.FractionDenominatorDisplayStyleGapMin = 0
f.math.FractionDenominatorDisplayStyleShiftDown = 0
f.math.FractionDenominatorGapMin = 0
f.math.FractionDenominatorShiftDown = 0
f.math.FractionNumeratorDisplayStyleGapMin = 0
f.math.FractionNumeratorDisplayStyleShiftUp = v1
f.math.FractionNumeratorGapMin = 0
f.math.FractionNumeratorShiftUp = 0
f.math.FractionRuleThickness = v2
mathfont.save(f)
v1 = 9 * mathfont.em
v2 = 1 * mathfont.em
f = mathfont.create("fraction-numeratorgapmin%d-rulethickness%d" % (v1, v2))
f.math.AxisHeight = 0
f.math.FractionDenominatorDisplayStyleGapMin = 0
f.math.FractionDenominatorDisplayStyleShiftDown = 0
f.math.FractionDenominatorGapMin = 0
f.math.FractionDenominatorShiftDown = 0
f.math.FractionNumeratorDisplayStyleGapMin = 0
f.math.FractionNumeratorDisplayStyleShiftUp = 0
f.math.FractionNumeratorGapMin = v1
f.math.FractionNumeratorShiftUp = 0
f.math.FractionRuleThickness = v2
mathfont.save(f)
v1 = 11 * mathfont.em
v2 = 1 * mathfont.em
f = mathfont.create("fraction-numeratorshiftup%d-rulethickness%d" % (v1, v2))
f.math.AxisHeight = 0
f.math.FractionDenominatorDisplayStyleGapMin = 0
f.math.FractionDenominatorDisplayStyleShiftDown = 0
f.math.FractionDenominatorGapMin = 0
f.math.FractionDenominatorShiftDown = 0
f.math.FractionNumeratorDisplayStyleGapMin = 0
f.math.FractionNumeratorDisplayStyleShiftUp = 0
f.math.FractionNumeratorGapMin = 0
f.math.FractionNumeratorShiftUp = v1
f.math.FractionRuleThickness = v2
mathfont.save(f)
v1 = 10 * mathfont.em
f = mathfont.create("fraction-rulethickness%d" % v1)
f.math.AxisHeight = 0
f.math.FractionDenominatorDisplayStyleGapMin = 0
f.math.FractionDenominatorDisplayStyleShiftDown = 0
f.math.FractionDenominatorGapMin = 0
f.math.FractionDenominatorShiftDown = 0
f.math.FractionNumeratorDisplayStyleGapMin = 0
f.math.FractionNumeratorDisplayStyleShiftUp = 0
f.math.FractionNumeratorGapMin = 0
f.math.FractionNumeratorShiftUp = 0
f.math.FractionRuleThickness = v1
mathfont.save(f)
|
insomnia-lab/calibre
|
refs/heads/master
|
src/calibre/db/write.py
|
5
|
#!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:fdm=marker:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import re
from functools import partial
from datetime import datetime
from future_builtins import zip
from calibre.constants import preferred_encoding, ispy3
from calibre.ebooks.metadata import author_to_author_sort, title_sort
from calibre.utils.date import (
parse_only_date, parse_date, UNDEFINED_DATE, isoformat, is_date_undefined)
from calibre.utils.localization import canonicalize_lang
from calibre.utils.icu import strcmp
if ispy3:
unicode = str
# Convert data into values suitable for the db {{{
def sqlite_datetime(x):
return isoformat(x, sep=' ') if isinstance(x, datetime) else x
def single_text(x):
if x is None:
return x
if not isinstance(x, unicode):
x = x.decode(preferred_encoding, 'replace')
x = x.strip()
return x if x else None
series_index_pat = re.compile(r'(.*)\s+\[([.0-9]+)\]$')
def get_series_values(val):
if not val:
return (val, None)
match = series_index_pat.match(val.strip())
if match is not None:
idx = match.group(2)
try:
idx = float(idx)
return (match.group(1).strip(), idx)
except:
pass
return (val, None)
def multiple_text(sep, ui_sep, x):
if not x:
return ()
if isinstance(x, bytes):
x = x.decode(preferred_encoding, 'replce')
if isinstance(x, unicode):
x = x.split(sep)
else:
x = (y.decode(preferred_encoding, 'replace') if isinstance(y, bytes)
else y for y in x)
ui_sep = ui_sep.strip()
repsep = ',' if ui_sep == ';' else ';'
x = (y.strip().replace(ui_sep, repsep) for y in x if y.strip())
return tuple(' '.join(y.split()) for y in x if y)
def adapt_datetime(x):
if isinstance(x, (unicode, bytes)):
x = parse_date(x, assume_utc=False, as_utc=False)
if x and is_date_undefined(x):
x = UNDEFINED_DATE
return x
def adapt_date(x):
if isinstance(x, (unicode, bytes)):
x = parse_only_date(x)
if x is None or is_date_undefined(x):
x = UNDEFINED_DATE
return x
def adapt_number(typ, x):
if x is None:
return None
if isinstance(x, (unicode, bytes)):
if not x or x.lower() == 'none':
return None
return typ(x)
def adapt_bool(x):
if isinstance(x, (unicode, bytes)):
x = x.lower()
if x == 'true':
x = True
elif x == 'false':
x = False
elif x == 'none' or x == '':
x = None
else:
x = bool(int(x))
return x if x is None else bool(x)
def adapt_languages(to_tuple, x):
ans = []
for lang in to_tuple(x):
lc = canonicalize_lang(lang)
if not lc or lc in ans or lc in ('und', 'zxx', 'mis', 'mul'):
continue
ans.append(lc)
return tuple(ans)
def clean_identifier(typ, val):
typ = icu_lower(typ or '').strip().replace(':', '').replace(',', '')
val = (val or '').strip().replace(',', '|')
return typ, val
def adapt_identifiers(to_tuple, x):
if not isinstance(x, dict):
x = {k:v for k, v in (y.partition(':')[0::2] for y in to_tuple(x))}
ans = {}
for k, v in x.iteritems():
k, v = clean_identifier(k, v)
if k and v:
ans[k] = v
return ans
def get_adapter(name, metadata):
dt = metadata['datatype']
if dt == 'text':
if metadata['is_multiple']:
m = metadata['is_multiple']
ans = partial(multiple_text, m['ui_to_list'], m['list_to_ui'])
else:
ans = single_text
elif dt == 'series':
ans = single_text
elif dt == 'datetime':
ans = adapt_date if name == 'pubdate' else adapt_datetime
elif dt == 'int':
ans = partial(adapt_number, int)
elif dt == 'float':
ans = partial(adapt_number, float)
elif dt == 'bool':
ans = adapt_bool
elif dt == 'comments':
ans = single_text
elif dt == 'rating':
ans = lambda x: None if x in {None, 0} else min(10, max(0, adapt_number(int, x)))
elif dt == 'enumeration':
ans = single_text
elif dt == 'composite':
ans = lambda x: x
if name == 'title':
return lambda x: ans(x) or _('Unknown')
if name == 'author_sort':
return lambda x: ans(x) or ''
if name == 'authors':
return lambda x: tuple(y.replace('|', ',') for y in ans(x)) or (_('Unknown'),)
if name in {'timestamp', 'last_modified'}:
return lambda x: ans(x) or UNDEFINED_DATE
if name == 'series_index':
return lambda x: 1.0 if ans(x) is None else ans(x)
if name == 'languages':
return partial(adapt_languages, ans)
if name == 'identifiers':
return partial(adapt_identifiers, ans)
return ans
# }}}
# One-One fields {{{
def one_one_in_books(book_id_val_map, db, field, *args):
'Set a one-one field in the books table'
if book_id_val_map:
sequence = ((sqlite_datetime(v), k) for k, v in book_id_val_map.iteritems())
db.executemany(
'UPDATE books SET %s=? WHERE id=?'%field.metadata['column'], sequence)
field.table.book_col_map.update(book_id_val_map)
return set(book_id_val_map)
def set_uuid(book_id_val_map, db, field, *args):
field.table.update_uuid_cache(book_id_val_map)
return one_one_in_books(book_id_val_map, db, field, *args)
def set_title(book_id_val_map, db, field, *args):
ans = one_one_in_books(book_id_val_map, db, field, *args)
# Set the title sort field
field.title_sort_field.writer.set_books(
{k:title_sort(v) for k, v in book_id_val_map.iteritems()}, db)
return ans
def one_one_in_other(book_id_val_map, db, field, *args):
'Set a one-one field in the non-books table, like comments'
deleted = tuple((k,) for k, v in book_id_val_map.iteritems() if v is None)
if deleted:
db.executemany('DELETE FROM %s WHERE book=?'%field.metadata['table'],
deleted)
for book_id in deleted:
field.table.book_col_map.pop(book_id[0], None)
updated = {k:v for k, v in book_id_val_map.iteritems() if v is not None}
if updated:
db.executemany('INSERT OR REPLACE INTO %s(book,%s) VALUES (?,?)'%(
field.metadata['table'], field.metadata['column']),
((k, sqlite_datetime(v)) for k, v in updated.iteritems()))
field.table.book_col_map.update(updated)
return set(book_id_val_map)
def custom_series_index(book_id_val_map, db, field, *args):
series_field = field.series_field
sequence = []
for book_id, sidx in book_id_val_map.iteritems():
if sidx is None:
sidx = 1.0
ids = series_field.ids_for_book(book_id)
if ids:
sequence.append((sidx, book_id, ids[0]))
field.table.book_col_map[book_id] = sidx
if sequence:
db.executemany('UPDATE %s SET %s=? WHERE book=? AND value=?'%(
field.metadata['table'], field.metadata['column']), sequence)
return {s[1] for s in sequence}
# }}}
# Many-One fields {{{
def safe_lower(x):
try:
return icu_lower(x)
except (TypeError, ValueError, KeyError, AttributeError):
return x
def get_db_id(val, db, m, table, kmap, rid_map, allow_case_change,
case_changes, val_map, is_authors=False):
''' Get the db id for the value val. If val does not exist in the db it is
inserted into the db. '''
kval = kmap(val)
item_id = rid_map.get(kval, None)
if item_id is None:
if is_authors:
aus = author_to_author_sort(val)
db.execute('INSERT INTO authors(name,sort) VALUES (?,?)',
(val.replace(',', '|'), aus))
else:
db.execute('INSERT INTO %s(%s) VALUES (?)'%(
m['table'], m['column']), (val,))
item_id = rid_map[kval] = db.last_insert_rowid()
table.id_map[item_id] = val
table.col_book_map[item_id] = set()
if is_authors:
table.asort_map[item_id] = aus
table.alink_map[item_id] = ''
elif allow_case_change and val != table.id_map[item_id]:
case_changes[item_id] = val
val_map[val] = item_id
def change_case(case_changes, dirtied, db, table, m, is_authors=False):
if is_authors:
vals = ((val.replace(',', '|'), item_id) for item_id, val in
case_changes.iteritems())
else:
vals = ((val, item_id) for item_id, val in case_changes.iteritems())
db.executemany(
'UPDATE %s SET %s=? WHERE id=?'%(m['table'], m['column']), vals)
for item_id, val in case_changes.iteritems():
table.id_map[item_id] = val
dirtied.update(table.col_book_map[item_id])
if is_authors:
table.asort_map[item_id] = author_to_author_sort(val)
def many_one(book_id_val_map, db, field, allow_case_change, *args):
dirtied = set()
m = field.metadata
table = field.table
dt = m['datatype']
is_custom_series = dt == 'series' and table.name.startswith('#')
# Map values to db ids, including any new values
kmap = safe_lower if dt in {'text', 'series'} else lambda x:x
rid_map = {kmap(item):item_id for item_id, item in table.id_map.iteritems()}
if len(rid_map) != len(table.id_map):
# table has some entries that differ only in case, fix it
table.fix_case_duplicates(db)
rid_map = {kmap(item):item_id for item_id, item in table.id_map.iteritems()}
val_map = {None:None}
case_changes = {}
for val in book_id_val_map.itervalues():
if val is not None:
get_db_id(val, db, m, table, kmap, rid_map, allow_case_change,
case_changes, val_map)
if case_changes:
change_case(case_changes, dirtied, db, table, m)
book_id_item_id_map = {k:val_map[v] for k, v in book_id_val_map.iteritems()}
# Ignore those items whose value is the same as the current value
book_id_item_id_map = {k:v for k, v in book_id_item_id_map.iteritems()
if v != table.book_col_map.get(k, None)}
dirtied |= set(book_id_item_id_map)
# Update the book->col and col->book maps
deleted = set()
updated = {}
for book_id, item_id in book_id_item_id_map.iteritems():
old_item_id = table.book_col_map.get(book_id, None)
if old_item_id is not None:
table.col_book_map[old_item_id].discard(book_id)
if item_id is None:
table.book_col_map.pop(book_id, None)
deleted.add(book_id)
else:
table.book_col_map[book_id] = item_id
table.col_book_map[item_id].add(book_id)
updated[book_id] = item_id
# Update the db link table
if deleted:
db.executemany('DELETE FROM %s WHERE book=?'%table.link_table,
((k,) for k in deleted))
if updated:
sql = (
'DELETE FROM {0} WHERE book=?; INSERT INTO {0}(book,{1},extra) VALUES(?, ?, 1.0)'
if is_custom_series else
'DELETE FROM {0} WHERE book=?; INSERT INTO {0}(book,{1}) VALUES(?, ?)'
)
db.executemany(sql.format(table.link_table, m['link_column']),
((book_id, book_id, item_id) for book_id, item_id in
updated.iteritems()))
# Remove no longer used items
remove = {item_id for item_id in table.id_map if not
table.col_book_map.get(item_id, False)}
if remove:
db.executemany('DELETE FROM %s WHERE id=?'%m['table'],
((item_id,) for item_id in remove))
for item_id in remove:
del table.id_map[item_id]
table.col_book_map.pop(item_id, None)
return dirtied
# }}}
# Many-Many fields {{{
def uniq(vals, kmap=lambda x:x):
''' Remove all duplicates from vals, while preserving order. kmap must be a
callable that returns a hashable value for every item in vals '''
vals = vals or ()
lvals = (kmap(x) for x in vals)
seen = set()
seen_add = seen.add
return tuple(x for x, k in zip(vals, lvals) if k not in seen and not seen_add(k))
def many_many(book_id_val_map, db, field, allow_case_change, *args):
dirtied = set()
m = field.metadata
table = field.table
dt = m['datatype']
is_authors = field.name == 'authors'
# Map values to db ids, including any new values
kmap = safe_lower if dt == 'text' else lambda x:x
rid_map = {kmap(item):item_id for item_id, item in table.id_map.iteritems()}
if len(rid_map) != len(table.id_map):
# table has some entries that differ only in case, fix it
table.fix_case_duplicates(db)
rid_map = {kmap(item):item_id for item_id, item in table.id_map.iteritems()}
val_map = {}
case_changes = {}
book_id_val_map = {k:uniq(vals, kmap) for k, vals in book_id_val_map.iteritems()}
for vals in book_id_val_map.itervalues():
for val in vals:
get_db_id(val, db, m, table, kmap, rid_map, allow_case_change,
case_changes, val_map, is_authors=is_authors)
if case_changes:
change_case(case_changes, dirtied, db, table, m, is_authors=is_authors)
if is_authors:
for item_id, val in case_changes.iteritems():
for book_id in table.col_book_map[item_id]:
current_sort = field.db_author_sort_for_book(book_id)
new_sort = field.author_sort_for_book(book_id)
if strcmp(current_sort, new_sort) == 0:
# The sort strings differ only by case, update the db
# sort
field.author_sort_field.writer.set_books({book_id:new_sort}, db)
book_id_item_id_map = {k:tuple(val_map[v] for v in vals)
for k, vals in book_id_val_map.iteritems()}
# Ignore those items whose value is the same as the current value
book_id_item_id_map = {k:v for k, v in book_id_item_id_map.iteritems()
if v != table.book_col_map.get(k, None)}
dirtied |= set(book_id_item_id_map)
# Update the book->col and col->book maps
deleted = set()
updated = {}
for book_id, item_ids in book_id_item_id_map.iteritems():
old_item_ids = table.book_col_map.get(book_id, None)
if old_item_ids:
for old_item_id in old_item_ids:
table.col_book_map[old_item_id].discard(book_id)
if item_ids:
table.book_col_map[book_id] = item_ids
for item_id in item_ids:
table.col_book_map[item_id].add(book_id)
updated[book_id] = item_ids
else:
table.book_col_map.pop(book_id, None)
deleted.add(book_id)
# Update the db link table
if deleted:
db.executemany('DELETE FROM %s WHERE book=?'%table.link_table,
((k,) for k in deleted))
if updated:
vals = (
(book_id, val) for book_id, vals in updated.iteritems()
for val in vals
)
db.executemany('DELETE FROM %s WHERE book=?'%table.link_table,
((k,) for k in updated))
db.executemany('INSERT INTO {0}(book,{1}) VALUES(?, ?)'.format(
table.link_table, m['link_column']), vals)
if is_authors:
aus_map = {book_id:field.author_sort_for_book(book_id) for book_id
in updated}
field.author_sort_field.writer.set_books(aus_map, db)
# Remove no longer used items
remove = {item_id for item_id in table.id_map if not
table.col_book_map.get(item_id, False)}
if remove:
db.executemany('DELETE FROM %s WHERE id=?'%m['table'],
((item_id,) for item_id in remove))
for item_id in remove:
del table.id_map[item_id]
table.col_book_map.pop(item_id, None)
if is_authors:
table.asort_map.pop(item_id, None)
table.alink_map.pop(item_id, None)
return dirtied
# }}}
def identifiers(book_id_val_map, db, field, *args): # {{{
table = field.table
updates = set()
for book_id, identifiers in book_id_val_map.iteritems():
if book_id not in table.book_col_map:
table.book_col_map[book_id] = {}
current_ids = table.book_col_map[book_id]
remove_keys = set(current_ids) - set(identifiers)
for key in remove_keys:
table.col_book_map.get(key, set()).discard(book_id)
current_ids.pop(key, None)
current_ids.update(identifiers)
for key, val in identifiers.iteritems():
if key not in table.col_book_map:
table.col_book_map[key] = set()
table.col_book_map[key].add(book_id)
updates.add((book_id, key, val))
db.executemany('DELETE FROM identifiers WHERE book=?',
((x,) for x in book_id_val_map))
if updates:
db.executemany('INSERT OR REPLACE INTO identifiers (book, type, val) VALUES (?, ?, ?)',
tuple(updates))
return set(book_id_val_map)
# }}}
def dummy(book_id_val_map, *args):
return set()
class Writer(object):
def __init__(self, field):
self.adapter = get_adapter(field.name, field.metadata)
self.name = field.name
self.field = field
dt = field.metadata['datatype']
self.accept_vals = lambda x: True
if dt == 'composite' or field.name in {
'id', 'size', 'path', 'formats', 'news'}:
self.set_books_func = dummy
elif self.name[0] == '#' and self.name.endswith('_index'):
self.set_books_func = custom_series_index
elif self.name == 'identifiers':
self.set_books_func = identifiers
elif self.name == 'uuid':
self.set_books_func = set_uuid
elif self.name == 'title':
self.set_books_func = set_title
elif field.is_many_many:
self.set_books_func = many_many
elif field.is_many:
self.set_books_func = (self.set_books_for_enum if dt ==
'enumeration' else many_one)
else:
self.set_books_func = (one_one_in_books if field.metadata['table']
== 'books' else one_one_in_other)
if self.name in {'timestamp', 'uuid', 'sort'}:
self.accept_vals = bool
def set_books(self, book_id_val_map, db, allow_case_change=True):
book_id_val_map = {k:self.adapter(v) for k, v in
book_id_val_map.iteritems() if self.accept_vals(v)}
if not book_id_val_map:
return set()
dirtied = self.set_books_func(book_id_val_map, db, self.field,
allow_case_change)
return dirtied
def set_books_for_enum(self, book_id_val_map, db, field,
allow_case_change):
allowed = set(field.metadata['display']['enum_values'])
book_id_val_map = {k:v for k, v in book_id_val_map.iteritems() if v is
None or v in allowed}
if not book_id_val_map:
return set()
return many_one(book_id_val_map, db, field, False)
|
akarki15/mozillians
|
refs/heads/master
|
vendor-local/lib/python/celery/tests/test_app/test_beat.py
|
14
|
from __future__ import absolute_import
import logging
from datetime import datetime, timedelta
from nose import SkipTest
from celery import beat
from celery import registry
from celery.result import AsyncResult
from celery.schedules import schedule
from celery.task.base import Task
from celery.utils import uuid
from celery.tests.utils import Case
class Object(object):
pass
class MockShelve(dict):
closed = False
synced = False
def close(self):
self.closed = True
def sync(self):
self.synced = True
class MockService(object):
started = False
stopped = False
def __init__(self, *args, **kwargs):
pass
def start(self, **kwargs):
self.started = True
def stop(self, **kwargs):
self.stopped = True
class test_ScheduleEntry(Case):
Entry = beat.ScheduleEntry
def create_entry(self, **kwargs):
entry = dict(name="celery.unittest.add",
schedule=schedule(timedelta(seconds=10)),
args=(2, 2),
options={"routing_key": "cpu"})
return self.Entry(**dict(entry, **kwargs))
def test_next(self):
entry = self.create_entry(schedule=10)
self.assertTrue(entry.last_run_at)
self.assertIsInstance(entry.last_run_at, datetime)
self.assertEqual(entry.total_run_count, 0)
next_run_at = entry.last_run_at + timedelta(seconds=10)
next = entry.next(next_run_at)
self.assertGreaterEqual(next.last_run_at, next_run_at)
self.assertEqual(next.total_run_count, 1)
def test_is_due(self):
entry = self.create_entry(schedule=timedelta(seconds=10))
due1, next_time_to_run1 = entry.is_due()
self.assertFalse(due1)
self.assertGreater(next_time_to_run1, 9)
next_run_at = entry.last_run_at - timedelta(seconds=10)
next = entry.next(next_run_at)
due2, next_time_to_run2 = next.is_due()
self.assertTrue(due2)
self.assertGreater(next_time_to_run2, 9)
def test_repr(self):
entry = self.create_entry()
self.assertIn("<Entry:", repr(entry))
def test_update(self):
entry = self.create_entry()
self.assertEqual(entry.schedule, timedelta(seconds=10))
self.assertTupleEqual(entry.args, (2, 2))
self.assertDictEqual(entry.kwargs, {})
self.assertDictEqual(entry.options, {"routing_key": "cpu"})
entry2 = self.create_entry(schedule=timedelta(minutes=20),
args=(16, 16),
kwargs={"callback": "foo.bar.baz"},
options={"routing_key": "urgent"})
entry.update(entry2)
self.assertEqual(entry.schedule, schedule(timedelta(minutes=20)))
self.assertTupleEqual(entry.args, (16, 16))
self.assertDictEqual(entry.kwargs, {"callback": "foo.bar.baz"})
self.assertDictEqual(entry.options, {"routing_key": "urgent"})
class MockLogger(logging.Logger):
def __init__(self, *args, **kwargs):
self.logged = []
logging.Logger.__init__(self, *args, **kwargs)
def _log(self, level, msg, args, **kwargs):
self.logged.append((level, msg, args, kwargs))
class mScheduler(beat.Scheduler):
def __init__(self, *args, **kwargs):
self.sent = []
beat.Scheduler.__init__(self, *args, **kwargs)
self.logger = MockLogger("celery.beat", logging.ERROR)
def send_task(self, name=None, args=None, kwargs=None, **options):
self.sent.append({"name": name,
"args": args,
"kwargs": kwargs,
"options": options})
return AsyncResult(uuid())
class mSchedulerSchedulingError(mScheduler):
def send_task(self, *args, **kwargs):
raise beat.SchedulingError("Could not apply task")
class mSchedulerRuntimeError(mScheduler):
def maybe_due(self, *args, **kwargs):
raise RuntimeError("dict modified while itervalues")
class mocked_schedule(schedule):
def __init__(self, is_due, next_run_at):
self._is_due = is_due
self._next_run_at = next_run_at
self.run_every = timedelta(seconds=1)
def is_due(self, last_run_at):
return self._is_due, self._next_run_at
always_due = mocked_schedule(True, 1)
always_pending = mocked_schedule(False, 1)
class test_Scheduler(Case):
def test_custom_schedule_dict(self):
custom = {"foo": "bar"}
scheduler = mScheduler(schedule=custom, lazy=True)
self.assertIs(scheduler.data, custom)
def test_apply_async_uses_registered_task_instances(self):
through_task = [False]
class MockTask(Task):
@classmethod
def apply_async(cls, *args, **kwargs):
through_task[0] = True
assert MockTask.name in registry.tasks
scheduler = mScheduler()
scheduler.apply_async(scheduler.Entry(task=MockTask.name))
self.assertTrue(through_task[0])
def test_info(self):
scheduler = mScheduler()
self.assertIsInstance(scheduler.info, basestring)
def test_due_tick(self):
scheduler = mScheduler()
scheduler.add(name="test_due_tick",
schedule=always_due,
args=(1, 2),
kwargs={"foo": "bar"})
self.assertEqual(scheduler.tick(), 1)
def test_due_tick_SchedulingError(self):
scheduler = mSchedulerSchedulingError()
scheduler.add(name="test_due_tick_SchedulingError",
schedule=always_due)
self.assertEqual(scheduler.tick(), 1)
self.assertTrue(scheduler.logger.logged[0])
level, msg, args, kwargs = scheduler.logger.logged[0]
self.assertEqual(level, logging.ERROR)
self.assertIn("Couldn't apply scheduled task",
repr(args[0].args[0]))
def test_due_tick_RuntimeError(self):
scheduler = mSchedulerRuntimeError()
scheduler.add(name="test_due_tick_RuntimeError",
schedule=always_due)
self.assertEqual(scheduler.tick(), scheduler.max_interval)
def test_pending_tick(self):
scheduler = mScheduler()
scheduler.add(name="test_pending_tick",
schedule=always_pending)
self.assertEqual(scheduler.tick(), 1)
def test_honors_max_interval(self):
scheduler = mScheduler()
maxi = scheduler.max_interval
scheduler.add(name="test_honors_max_interval",
schedule=mocked_schedule(False, maxi * 4))
self.assertEqual(scheduler.tick(), maxi)
def test_ticks(self):
scheduler = mScheduler()
nums = [600, 300, 650, 120, 250, 36]
s = dict(("test_ticks%s" % i,
{"schedule": mocked_schedule(False, j)})
for i, j in enumerate(nums))
scheduler.update_from_dict(s)
self.assertEqual(scheduler.tick(), min(nums))
def test_schedule_no_remain(self):
scheduler = mScheduler()
scheduler.add(name="test_schedule_no_remain",
schedule=mocked_schedule(False, None))
self.assertEqual(scheduler.tick(), scheduler.max_interval)
def test_interface(self):
scheduler = mScheduler()
scheduler.sync()
scheduler.setup_schedule()
scheduler.close()
def test_merge_inplace(self):
a = mScheduler()
b = mScheduler()
a.update_from_dict({"foo": {"schedule": mocked_schedule(True, 10)},
"bar": {"schedule": mocked_schedule(True, 20)}})
b.update_from_dict({"bar": {"schedule": mocked_schedule(True, 40)},
"baz": {"schedule": mocked_schedule(True, 10)}})
a.merge_inplace(b.schedule)
self.assertNotIn("foo", a.schedule)
self.assertIn("baz", a.schedule)
self.assertEqual(a.schedule["bar"].schedule._next_run_at, 40)
class test_Service(Case):
def get_service(self):
sh = MockShelve()
class PersistentScheduler(beat.PersistentScheduler):
persistence = Object()
persistence.open = lambda *a, **kw: sh
tick_raises_exit = False
shutdown_service = None
def tick(self):
if self.tick_raises_exit:
raise SystemExit()
if self.shutdown_service:
self.shutdown_service._is_shutdown.set()
return 0.0
return beat.Service(scheduler_cls=PersistentScheduler), sh
def test_start(self):
s, sh = self.get_service()
schedule = s.scheduler.schedule
self.assertIsInstance(schedule, dict)
self.assertIsInstance(s.scheduler, beat.Scheduler)
scheduled = schedule.keys()
for task_name in sh["entries"].keys():
self.assertIn(task_name, scheduled)
s.sync()
self.assertTrue(sh.closed)
self.assertTrue(sh.synced)
self.assertTrue(s._is_stopped.isSet())
s.sync()
s.stop(wait=False)
self.assertTrue(s._is_shutdown.isSet())
s.stop(wait=True)
self.assertTrue(s._is_shutdown.isSet())
p = s.scheduler._store
s.scheduler._store = None
try:
s.scheduler.sync()
finally:
s.scheduler._store = p
def test_start_embedded_process(self):
s, sh = self.get_service()
s._is_shutdown.set()
s.start(embedded_process=True)
def test_start_thread(self):
s, sh = self.get_service()
s._is_shutdown.set()
s.start(embedded_process=False)
def test_start_tick_raises_exit_error(self):
s, sh = self.get_service()
s.scheduler.tick_raises_exit = True
s.start()
self.assertTrue(s._is_shutdown.isSet())
def test_start_manages_one_tick_before_shutdown(self):
s, sh = self.get_service()
s.scheduler.shutdown_service = s
s.start()
self.assertTrue(s._is_shutdown.isSet())
class test_EmbeddedService(Case):
def test_start_stop_process(self):
try:
from multiprocessing import Process
except ImportError:
raise SkipTest("multiprocessing not available")
s = beat.EmbeddedService()
self.assertIsInstance(s, Process)
self.assertIsInstance(s.service, beat.Service)
s.service = MockService()
class _Popen(object):
terminated = False
def terminate(self):
self.terminated = True
s.run()
self.assertTrue(s.service.started)
s._popen = _Popen()
s.stop()
self.assertTrue(s.service.stopped)
self.assertTrue(s._popen.terminated)
def test_start_stop_threaded(self):
s = beat.EmbeddedService(thread=True)
from threading import Thread
self.assertIsInstance(s, Thread)
self.assertIsInstance(s.service, beat.Service)
s.service = MockService()
s.run()
self.assertTrue(s.service.started)
s.stop()
self.assertTrue(s.service.stopped)
|
lduarte1991/edx-platform
|
refs/heads/master
|
openedx/core/djangoapps/contentserver/migrations/0001_initial.py
|
62
|
# -*- coding: utf-8 -*-
#pylint: skip-file
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='CourseAssetCacheTtlConfig',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('change_date', models.DateTimeField(auto_now_add=True, verbose_name='Change date')),
('enabled', models.BooleanField(default=False, verbose_name='Enabled')),
('cache_ttl', models.PositiveIntegerField(default=0, help_text=b'The time, in seconds, to report that a course asset is allowed to be cached for.')),
('changed_by', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, editable=False, to=settings.AUTH_USER_MODEL, null=True, verbose_name='Changed by')),
],
),
]
|
yfdyh000/kuma
|
refs/heads/master
|
kuma/wiki/tests/test_helpers.py
|
4
|
# -*- coding: utf-8 -*-
import mock
from django.contrib.sites.models import Site
from kuma.core.cache import memcache
from kuma.core.tests import eq_
from kuma.users.tests import UserTestCase
from . import WikiTestCase, document, revision
from ..models import DocumentZone
from ..templatetags.jinja_helpers import (absolutify,
document_zone_management_links,
revisions_unified_diff,
selector_content_find, tojson)
class HelpTests(WikiTestCase):
def test_tojson(self):
eq_(tojson({'title': '<script>alert("Hi!")</script>'}),
'{"title": "<script>alert("Hi!")</script>"}')
@mock.patch.object(Site.objects, 'get_current')
def test_absolutify(self, get_current):
get_current.return_value.domain = 'testserver'
eq_(absolutify(''), 'https://testserver/')
eq_(absolutify('/'), 'https://testserver/')
eq_(absolutify('//'), 'https://testserver/')
eq_(absolutify('/foo/bar'), 'https://testserver/foo/bar')
eq_(absolutify('http://domain.com'), 'http://domain.com')
site = Site(domain='otherserver')
eq_(absolutify('/woo', site), 'https://otherserver/woo')
eq_(absolutify('/woo?var=value'), 'https://testserver/woo?var=value')
eq_(absolutify('/woo?var=value#fragment'),
'https://testserver/woo?var=value#fragment')
class RevisionsUnifiedDiffTests(UserTestCase, WikiTestCase):
def test_from_revision_none(self):
rev = revision()
try:
diff = revisions_unified_diff(None, rev)
except AttributeError:
self.fail("Should not throw AttributeError")
eq_("Diff is unavailable.", diff)
def test_from_revision_non_ascii(self):
doc1 = document(title=u'Gänsefüßchen', save=True)
rev1 = revision(document=doc1, content=u'spam', save=True)
doc2 = document(title=u'Außendienstüberwachlösung', save=True)
rev2 = revision(document=doc2, content=u'eggs', save=True)
try:
revisions_unified_diff(rev1, rev2)
except UnicodeEncodeError:
self.fail("Should not throw UnicodeEncodeError")
class DocumentZoneTests(UserTestCase, WikiTestCase):
"""Tests for DocumentZone helpers"""
def setUp(self):
super(DocumentZoneTests, self).setUp()
self.root_links_content = """
<p>Links content</p>
"""
self.root_content = """
<h4 id="links">Links</h4>
%s
""" % (self.root_links_content)
root_rev = revision(title='ZoneRoot',
slug='ZoneRoot',
content=self.root_content,
is_approved=True,
save=True)
self.root_doc = root_rev.document
self.root_doc.rendered_html = self.root_content
self.root_doc.save()
self.root_zone = DocumentZone(document=self.root_doc)
self.root_zone.save()
sub_rev = revision(title='SubPage',
slug='SubPage',
content='This is a subpage',
is_approved=True,
save=True)
self.sub_doc = sub_rev.document
self.sub_doc.parent_topic = self.root_doc
self.sub_doc.rendered_html = sub_rev.content
self.sub_doc.save()
self.sub_sub_links_content = """
<p>Sub-page links content</p>
"""
self.sub_sub_content = """
<h4 id="links">Links</h4>
%s
""" % (self.sub_sub_links_content)
sub_sub_rev = revision(title='SubSubPage',
slug='SubSubPage',
content='This is a subpage',
is_approved=True,
save=True)
self.sub_sub_doc = sub_sub_rev.document
self.sub_sub_doc.parent_topic = self.sub_doc
self.sub_sub_doc.rendered_html = self.sub_sub_content
self.sub_sub_doc.save()
other_rev = revision(title='otherPage',
slug='otherPage',
content='This is an other page',
is_approved=True,
save=True)
self.other_doc = other_rev.document
self.other_doc.save()
memcache.clear()
def test_document_zone_links(self):
admin = self.user_model.objects.filter(is_superuser=True)[0]
random = self.user_model.objects.filter(is_superuser=False)[0]
cases = [
(admin, self.root_doc, False, True),
(random, self.root_doc, False, False),
(admin, self.sub_doc, True, True),
(random, self.sub_doc, False, False),
(admin, self.other_doc, True, False),
(random, self.other_doc, False, False),
]
for (user, doc, add, change) in cases:
result_links = document_zone_management_links(user, doc)
eq_(add, result_links['add'] is not None, (user, doc))
eq_(change, result_links['change'] is not None)
class SelectorContentFindTests(UserTestCase, WikiTestCase):
def test_selector_not_found_returns_empty_string(self):
doc_content = u'<div id="not-summary">Not the summary</div>'
doc1 = document(title=u'Test Missing Selector', save=True)
doc1.rendered_html = doc_content
doc1.save()
revision(document=doc1, content=doc_content, save=True)
content = selector_content_find(doc1, 'summary')
assert content == ''
def test_pyquery_bad_selector_syntax_returns_empty_string(self):
doc_content = u'<div id="not-suNot the summary</span'
doc1 = document(title=u'Test Missing Selector', save=True)
doc1.rendered_html = doc_content
doc1.save()
revision(document=doc1, content=doc_content, save=True)
content = selector_content_find(doc1, '.')
assert content == ''
|
kogotko/carburetor
|
refs/heads/master
|
openstack_dashboard/dashboards/project/instances/tables.py
|
1
|
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.conf import settings
from django.core import urlresolvers
from django.http import HttpResponse
from django import shortcuts
from django import template
from django.template.defaultfilters import title
from django.utils.http import urlencode
from django.utils.translation import npgettext_lazy
from django.utils.translation import pgettext_lazy
from django.utils.translation import string_concat
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from horizon import conf
from horizon import exceptions
from horizon import messages
from horizon import tables
from horizon.templatetags import sizeformat
from horizon.utils import filters
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.floating_ips import workflows
from openstack_dashboard.dashboards.project.instances import tabs
from openstack_dashboard.dashboards.project.instances.workflows \
import resize_instance
from openstack_dashboard.dashboards.project.instances.workflows \
import update_instance
from openstack_dashboard import policy
LOG = logging.getLogger(__name__)
ACTIVE_STATES = ("ACTIVE",)
VOLUME_ATTACH_READY_STATES = ("ACTIVE", "SHUTOFF")
SNAPSHOT_READY_STATES = ("ACTIVE", "SHUTOFF", "PAUSED", "SUSPENDED")
SHELVE_READY_STATES = ("ACTIVE", "SHUTOFF", "PAUSED", "SUSPENDED")
POWER_STATES = {
0: "NO STATE",
1: "RUNNING",
2: "BLOCKED",
3: "PAUSED",
4: "SHUTDOWN",
5: "SHUTOFF",
6: "CRASHED",
7: "SUSPENDED",
8: "FAILED",
9: "BUILDING",
}
PAUSE = 0
UNPAUSE = 1
SUSPEND = 0
RESUME = 1
SHELVE = 0
UNSHELVE = 1
def is_deleting(instance):
task_state = getattr(instance, "OS-EXT-STS:task_state", None)
if not task_state:
return False
return task_state.lower() == "deleting"
class DeleteInstance(policy.PolicyTargetMixin, tables.DeleteAction):
policy_rules = (("compute", "os_compute_api:servers:delete"),)
help_text = _("Deleted instances are not recoverable.")
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Instance",
u"Delete Instances",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Scheduled deletion of Instance",
u"Scheduled deletion of Instances",
count
)
def allowed(self, request, instance=None):
"""Allow delete action if instance is in error state or not currently
being deleted.
"""
error_state = False
if instance:
error_state = (instance.status == 'ERROR')
return error_state or not is_deleting(instance)
def action(self, request, obj_id):
api.nova.server_delete(request, obj_id)
class RebootInstance(policy.PolicyTargetMixin, tables.BatchAction):
name = "reboot"
classes = ('btn-reboot',)
policy_rules = (("compute", "os_compute_api:servers:reboot"),)
help_text = _("Restarted instances will lose any data"
" not saved in persistent storage.")
action_type = "danger"
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Hard Reboot Instance",
u"Hard Reboot Instances",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Hard Rebooted Instance",
u"Hard Rebooted Instances",
count
)
def allowed(self, request, instance=None):
if instance is not None:
return ((instance.status in ACTIVE_STATES
or instance.status == 'SHUTOFF')
and not is_deleting(instance))
else:
return True
def action(self, request, obj_id):
api.nova.server_reboot(request, obj_id, soft_reboot=False)
class SoftRebootInstance(RebootInstance):
name = "soft_reboot"
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Soft Reboot Instance",
u"Soft Reboot Instances",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Soft Rebooted Instance",
u"Soft Rebooted Instances",
count
)
def action(self, request, obj_id):
api.nova.server_reboot(request, obj_id, soft_reboot=True)
def allowed(self, request, instance=None):
if instance is not None:
return instance.status in ACTIVE_STATES
else:
return True
class TogglePause(tables.BatchAction):
name = "pause"
icon = "pause"
@staticmethod
def action_present(count):
return (
ungettext_lazy(
u"Pause Instance",
u"Pause Instances",
count
),
ungettext_lazy(
u"Resume Instance",
u"Resume Instances",
count
),
)
@staticmethod
def action_past(count):
return (
ungettext_lazy(
u"Paused Instance",
u"Paused Instances",
count
),
ungettext_lazy(
u"Resumed Instance",
u"Resumed Instances",
count
),
)
def allowed(self, request, instance=None):
if not api.nova.extension_supported('AdminActions',
request):
return False
if not instance:
return False
self.paused = instance.status == "PAUSED"
if self.paused:
self.current_present_action = UNPAUSE
policy_rules = (
("compute", "os_compute_api:os-pause-server:unpause"),)
else:
self.current_present_action = PAUSE
policy_rules = (
("compute", "os_compute_api:os-pause-server:pause"),)
has_permission = policy.check(
policy_rules, request,
target={'project_id': getattr(instance, 'tenant_id', None)})
return (has_permission
and (instance.status in ACTIVE_STATES or self.paused)
and not is_deleting(instance))
def action(self, request, obj_id):
if self.paused:
api.nova.server_unpause(request, obj_id)
self.current_past_action = UNPAUSE
else:
api.nova.server_pause(request, obj_id)
self.current_past_action = PAUSE
class ToggleSuspend(tables.BatchAction):
name = "suspend"
classes = ("btn-suspend",)
@staticmethod
def action_present(count):
return (
ungettext_lazy(
u"Suspend Instance",
u"Suspend Instances",
count
),
ungettext_lazy(
u"Resume Instance",
u"Resume Instances",
count
),
)
@staticmethod
def action_past(count):
return (
ungettext_lazy(
u"Suspended Instance",
u"Suspended Instances",
count
),
ungettext_lazy(
u"Resumed Instance",
u"Resumed Instances",
count
),
)
def allowed(self, request, instance=None):
if not api.nova.extension_supported('AdminActions',
request):
return False
if not instance:
return False
self.suspended = instance.status == "SUSPENDED"
if self.suspended:
self.current_present_action = RESUME
policy_rules = (
("compute", "os_compute_api:os-rescue"),)
else:
self.current_present_action = SUSPEND
policy_rules = (
("compute", "os_compute_api:os-suspend-server:suspend"),)
has_permission = policy.check(
policy_rules, request,
target={'project_id': getattr(instance, 'tenant_id', None)})
return (has_permission
and (instance.status in ACTIVE_STATES or self.suspended)
and not is_deleting(instance))
def action(self, request, obj_id):
if self.suspended:
api.nova.server_resume(request, obj_id)
self.current_past_action = RESUME
else:
api.nova.server_suspend(request, obj_id)
self.current_past_action = SUSPEND
class ToggleShelve(tables.BatchAction):
name = "shelve"
icon = "shelve"
@staticmethod
def action_present(count):
return (
ungettext_lazy(
u"Shelve Instance",
u"Shelve Instances",
count
),
ungettext_lazy(
u"Unshelve Instance",
u"Unshelve Instances",
count
),
)
@staticmethod
def action_past(count):
return (
ungettext_lazy(
u"Shelved Instance",
u"Shelved Instances",
count
),
ungettext_lazy(
u"Unshelved Instance",
u"Unshelved Instances",
count
),
)
def allowed(self, request, instance=None):
if not api.nova.extension_supported('Shelve', request):
return False
if not instance:
return False
if not request.user.is_superuser and getattr(
instance, 'locked', False):
return False
self.shelved = instance.status == "SHELVED_OFFLOADED"
if self.shelved:
self.current_present_action = UNSHELVE
policy_rules = (("compute", "os_compute_api:os-shelve:unshelve"),)
else:
self.current_present_action = SHELVE
policy_rules = (("compute", "os_compute_api:os-shelve:shelve"),)
has_permission = policy.check(
policy_rules, request,
target={'project_id': getattr(instance, 'tenant_id', None)})
return (has_permission
and (instance.status in SHELVE_READY_STATES or self.shelved)
and not is_deleting(instance))
def action(self, request, obj_id):
if self.shelved:
api.nova.server_unshelve(request, obj_id)
self.current_past_action = UNSHELVE
else:
api.nova.server_shelve(request, obj_id)
self.current_past_action = SHELVE
class LaunchLink(tables.LinkAction):
name = "launch"
verbose_name = _("Launch Instance")
url = "horizon:project:instances:launch"
classes = ("ajax-modal", "btn-launch")
icon = "cloud-upload"
policy_rules = (("compute", "os_compute_api:servers:create"),)
ajax = True
def __init__(self, attrs=None, **kwargs):
kwargs['preempt'] = True
super(LaunchLink, self).__init__(attrs, **kwargs)
def allowed(self, request, datum):
try:
limits = api.nova.tenant_absolute_limits(request, reserved=True)
instances_available = limits['maxTotalInstances'] \
- limits['totalInstancesUsed']
cores_available = limits['maxTotalCores'] \
- limits['totalCoresUsed']
ram_available = limits['maxTotalRAMSize'] - limits['totalRAMUsed']
if instances_available <= 0 or cores_available <= 0 \
or ram_available <= 0:
if "disabled" not in self.classes:
self.classes = [c for c in self.classes] + ['disabled']
self.verbose_name = string_concat(self.verbose_name, ' ',
_("(Quota exceeded)"))
else:
self.verbose_name = _("Launch Instance")
classes = [c for c in self.classes if c != "disabled"]
self.classes = classes
except Exception:
LOG.exception("Failed to retrieve quota information")
# If we can't get the quota information, leave it to the
# API to check when launching
return True # The action should always be displayed
def single(self, table, request, object_id=None):
self.allowed(request, None)
return HttpResponse(self.render(is_table_action=True))
class LaunchLinkNG(LaunchLink):
name = "launch-ng"
url = "horizon:project:instances:index"
ajax = False
classes = ("btn-launch", )
def get_default_attrs(self):
url = urlresolvers.reverse(self.url)
ngclick = "modal.openLaunchInstanceWizard(" \
"{ successUrl: '%s' })" % url
self.attrs.update({
'ng-controller': 'LaunchInstanceModalController as modal',
'ng-click': ngclick
})
return super(LaunchLinkNG, self).get_default_attrs()
def get_link_url(self, datum=None):
return "javascript:void(0);"
class EditInstance(policy.PolicyTargetMixin, tables.LinkAction):
name = "edit"
verbose_name = _("Edit Instance")
url = "horizon:project:instances:update"
classes = ("ajax-modal",)
icon = "pencil"
policy_rules = (("compute", "os_compute_api:servers:update"),)
def get_link_url(self, project):
return self._get_link_url(project, 'instance_info')
def _get_link_url(self, project, step_slug):
base_url = urlresolvers.reverse(self.url, args=[project.id])
next_url = self.table.get_full_url()
params = {"step": step_slug,
update_instance.UpdateInstance.redirect_param_name: next_url}
param = urlencode(params)
return "?".join([base_url, param])
def allowed(self, request, instance):
return not is_deleting(instance)
class EditInstanceSecurityGroups(EditInstance):
name = "edit_secgroups"
verbose_name = _("Edit Security Groups")
def get_link_url(self, project):
return self._get_link_url(project, 'update_security_groups')
def allowed(self, request, instance=None):
return (instance.status in ACTIVE_STATES and
not is_deleting(instance) and
request.user.tenant_id == instance.tenant_id)
class CreateSnapshot(policy.PolicyTargetMixin, tables.LinkAction):
name = "snapshot"
verbose_name = _("Create Snapshot")
url = "horizon:project:images:snapshots:create"
classes = ("ajax-modal",)
icon = "camera"
policy_rules = (("compute", "os_compute_api:snapshot"),)
def allowed(self, request, instance=None):
return instance.status in SNAPSHOT_READY_STATES \
and not is_deleting(instance)
class ConsoleLink(policy.PolicyTargetMixin, tables.LinkAction):
name = "console"
verbose_name = _("Console")
url = "horizon:project:instances:detail"
classes = ("btn-console",)
policy_rules = (("compute", "os_compute_api:os-consoles:index"),)
def allowed(self, request, instance=None):
# We check if ConsoleLink is allowed only if settings.CONSOLE_TYPE is
# not set at all, or if it's set to any value other than None or False.
return bool(getattr(settings, 'CONSOLE_TYPE', True)) and \
instance.status in ACTIVE_STATES and not is_deleting(instance)
def get_link_url(self, datum):
base_url = super(ConsoleLink, self).get_link_url(datum)
tab_query_string = tabs.ConsoleTab(
tabs.InstanceDetailTabs).get_query_string()
return "?".join([base_url, tab_query_string])
class LogLink(policy.PolicyTargetMixin, tables.LinkAction):
name = "log"
verbose_name = _("View Log")
url = "horizon:project:instances:detail"
classes = ("btn-log",)
policy_rules = (("compute", "os_compute_api:os-console-output"),)
def allowed(self, request, instance=None):
return instance.status in ACTIVE_STATES and not is_deleting(instance)
def get_link_url(self, datum):
base_url = super(LogLink, self).get_link_url(datum)
tab_query_string = tabs.LogTab(
tabs.InstanceDetailTabs).get_query_string()
return "?".join([base_url, tab_query_string])
class ResizeLink(policy.PolicyTargetMixin, tables.LinkAction):
name = "resize"
verbose_name = _("Resize Instance")
url = "horizon:project:instances:resize"
classes = ("ajax-modal", "btn-resize")
policy_rules = (("compute", "os_compute_api:servers:resize"),)
def get_link_url(self, project):
return self._get_link_url(project, 'flavor_choice')
def _get_link_url(self, project, step_slug):
base_url = urlresolvers.reverse(self.url, args=[project.id])
next_url = self.table.get_full_url()
params = {"step": step_slug,
resize_instance.ResizeInstance.redirect_param_name: next_url}
param = urlencode(params)
return "?".join([base_url, param])
def allowed(self, request, instance):
return ((instance.status in ACTIVE_STATES
or instance.status == 'SHUTOFF')
and not is_deleting(instance))
class ConfirmResize(policy.PolicyTargetMixin, tables.Action):
name = "confirm"
verbose_name = _("Confirm Resize/Migrate")
classes = ("btn-confirm", "btn-action-required")
policy_rules = (("compute", "os_compute_api:servers:confirm_resize"),)
def allowed(self, request, instance):
return instance.status == 'VERIFY_RESIZE'
def single(self, table, request, instance):
api.nova.server_confirm_resize(request, instance)
class RevertResize(policy.PolicyTargetMixin, tables.Action):
name = "revert"
verbose_name = _("Revert Resize/Migrate")
classes = ("btn-revert", "btn-action-required")
policy_rules = (("compute", "os_compute_api:servers:revert_resize"),)
def allowed(self, request, instance):
return instance.status == 'VERIFY_RESIZE'
def single(self, table, request, instance):
api.nova.server_revert_resize(request, instance)
class RebuildInstance(policy.PolicyTargetMixin, tables.LinkAction):
name = "rebuild"
verbose_name = _("Rebuild Instance")
classes = ("btn-rebuild", "ajax-modal")
url = "horizon:project:instances:rebuild"
policy_rules = (("compute", "os_compute_api:servers:rebuild"),)
def allowed(self, request, instance):
return ((instance.status in ACTIVE_STATES
or instance.status == 'SHUTOFF')
and not is_deleting(instance))
def get_link_url(self, datum):
instance_id = self.table.get_object_id(datum)
return urlresolvers.reverse(self.url, args=[instance_id])
class DecryptInstancePassword(tables.LinkAction):
name = "decryptpassword"
verbose_name = _("Retrieve Password")
classes = ("btn-decrypt", "ajax-modal")
url = "horizon:project:instances:decryptpassword"
def allowed(self, request, instance):
enable = getattr(settings,
'OPENSTACK_ENABLE_PASSWORD_RETRIEVE',
False)
return (enable
and (instance.status in ACTIVE_STATES
or instance.status == 'SHUTOFF')
and not is_deleting(instance)
and get_keyname(instance) is not None)
def get_link_url(self, datum):
instance_id = self.table.get_object_id(datum)
keypair_name = get_keyname(datum)
return urlresolvers.reverse(self.url, args=[instance_id,
keypair_name])
class AssociateIP(policy.PolicyTargetMixin, tables.LinkAction):
name = "associate"
verbose_name = _("Associate Floating IP")
url = "horizon:project:floating_ips:associate"
classes = ("ajax-modal",)
icon = "link"
# Nova doesn't support floating ip actions policy, update this
# when bug #1610520 resloved
policy_rules = (("compute", "os_compute_api:os-floating-ips"),)
def allowed(self, request, instance):
if not api.network.floating_ip_supported(request):
return False
if api.network.floating_ip_simple_associate_supported(request):
return False
if instance.status == "ERROR":
return False
for addresses in instance.addresses.values():
for address in addresses:
if address.get('OS-EXT-IPS:type') == "floating":
return False
return not is_deleting(instance)
def get_link_url(self, datum):
base_url = urlresolvers.reverse(self.url)
next_url = self.table.get_full_url()
params = {
"instance_id": self.table.get_object_id(datum),
workflows.IPAssociationWorkflow.redirect_param_name: next_url}
params = urlencode(params)
return "?".join([base_url, params])
class SimpleAssociateIP(policy.PolicyTargetMixin, tables.Action):
name = "associate-simple"
verbose_name = _("Associate Floating IP")
icon = "link"
# Nova doesn't support floating ip actions policy, update this
# when bug #1610520 resloved
policy_rules = (("compute", "os_compute_api:os-floating-ips"),)
def allowed(self, request, instance):
if not api.network.floating_ip_simple_associate_supported(request):
return False
if instance.status == "ERROR":
return False
return not is_deleting(instance)
def single(self, table, request, instance_id):
try:
# target_id is port_id for Neutron and instance_id for Nova Network
# (Neutron API wrapper returns a 'portid_fixedip' string)
target_id = api.network.floating_ip_target_get_by_instance(
request, instance_id).split('_')[0]
fip = api.network.tenant_floating_ip_allocate(request)
api.network.floating_ip_associate(request, fip.id, target_id)
messages.success(request,
_("Successfully associated floating IP: %s")
% fip.ip)
except Exception:
exceptions.handle(request,
_("Unable to associate floating IP."))
return shortcuts.redirect(request.get_full_path())
class SimpleDisassociateIP(policy.PolicyTargetMixin, tables.Action):
name = "disassociate"
verbose_name = _("Disassociate Floating IP")
classes = ("btn-disassociate",)
# Nova doesn't support floating ip actions policy, update this
# when bug #1610520 resloved
policy_rules = (("compute", "os_compute_api:os-floating-ips"),)
action_type = "danger"
def allowed(self, request, instance):
if not api.network.floating_ip_supported(request):
return False
if not conf.HORIZON_CONFIG["simple_ip_management"]:
return False
for addresses in instance.addresses.values():
for address in addresses:
if address.get('OS-EXT-IPS:type') == "floating":
return not is_deleting(instance)
return False
def single(self, table, request, instance_id):
try:
# target_id is port_id for Neutron and instance_id for Nova Network
# (Neutron API wrapper returns a 'portid_fixedip' string)
targets = api.network.floating_ip_target_list_by_instance(
request, instance_id)
target_ids = [t.split('_')[0] for t in targets]
fips = [fip for fip in api.network.tenant_floating_ip_list(request)
if fip.port_id in target_ids]
# Removing multiple floating IPs at once doesn't work, so this pops
# off the first one.
if fips:
fip = fips.pop()
api.network.floating_ip_disassociate(request, fip.id)
messages.success(request,
_("Successfully disassociated "
"floating IP: %s") % fip.ip)
else:
messages.info(request, _("No floating IPs to disassociate."))
except Exception:
exceptions.handle(request,
_("Unable to disassociate floating IP."))
return shortcuts.redirect(request.get_full_path())
class UpdateMetadata(policy.PolicyTargetMixin, tables.LinkAction):
name = "update_metadata"
verbose_name = _("Update Metadata")
ajax = False
icon = "pencil"
attrs = {"ng-controller": "MetadataModalHelperController as modal"}
policy_rules = (("compute", "os_compute_api:server-metadata:update"),)
def __init__(self, attrs=None, **kwargs):
kwargs['preempt'] = True
super(UpdateMetadata, self).__init__(attrs, **kwargs)
def get_link_url(self, datum):
instance_id = self.table.get_object_id(datum)
self.attrs['ng-click'] = (
"modal.openMetadataModal('instance', '%s', true, 'metadata')"
% instance_id)
return "javascript:void(0);"
def allowed(self, request, instance=None):
return (instance and
instance.status.lower() != 'error')
def instance_fault_to_friendly_message(instance):
fault = getattr(instance, 'fault', {})
message = fault.get('message', _("Unknown"))
default_message = _("Please try again later [Error: %s].") % message
fault_map = {
'NoValidHost': _("There is not enough capacity for this "
"flavor in the selected availability zone. "
"Try again later or select a different availability "
"zone.")
}
return fault_map.get(message, default_message)
def get_instance_error(instance):
if instance.status.lower() != 'error':
return None
message = instance_fault_to_friendly_message(instance)
preamble = _('Failed to perform requested operation on instance "%s", the '
'instance has an error status') % instance.name or instance.id
message = string_concat(preamble, ': ', message)
return message
class UpdateRow(tables.Row):
ajax = True
def get_data(self, request, instance_id):
instance = api.nova.server_get(request, instance_id)
try:
instance.full_flavor = api.nova.flavor_get(request,
instance.flavor["id"])
except Exception:
exceptions.handle(request,
_('Unable to retrieve flavor information '
'for instance "%s".') % instance_id,
ignore=True)
try:
api.network.servers_update_addresses(request, [instance])
except Exception:
exceptions.handle(request,
_('Unable to retrieve Network information '
'for instance "%s".') % instance_id,
ignore=True)
error = get_instance_error(instance)
if error:
messages.error(request, error)
return instance
class StartInstance(policy.PolicyTargetMixin, tables.BatchAction):
name = "start"
classes = ('btn-confirm',)
policy_rules = (("compute", "os_compute_api:servers:start"),)
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Start Instance",
u"Start Instances",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Started Instance",
u"Started Instances",
count
)
def allowed(self, request, instance):
return ((instance is None) or
(instance.status in ("SHUTDOWN", "SHUTOFF", "CRASHED")))
def action(self, request, obj_id):
api.nova.server_start(request, obj_id)
class StopInstance(policy.PolicyTargetMixin, tables.BatchAction):
name = "stop"
policy_rules = (("compute", "os_compute_api:servers:stop"),)
help_text = _("The instance(s) will be shut off.")
action_type = "danger"
@staticmethod
def action_present(count):
return npgettext_lazy(
"Action to perform (the instance is currently running)",
u"Shut Off Instance",
u"Shut Off Instances",
count
)
@staticmethod
def action_past(count):
return npgettext_lazy(
"Past action (the instance is currently already Shut Off)",
u"Shut Off Instance",
u"Shut Off Instances",
count
)
def allowed(self, request, instance):
return ((instance is None)
or ((get_power_state(instance) in ("RUNNING", "SUSPENDED"))
and not is_deleting(instance)))
def action(self, request, obj_id):
api.nova.server_stop(request, obj_id)
class LockInstance(policy.PolicyTargetMixin, tables.BatchAction):
name = "lock"
policy_rules = (("compute", "os_compute_api:os-lock-server:lock"),)
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Lock Instance",
u"Lock Instances",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Locked Instance",
u"Locked Instances",
count
)
# to only allow unlocked instances to be locked
def allowed(self, request, instance):
if getattr(instance, 'locked', False):
return False
if not api.nova.extension_supported('AdminActions', request):
return False
if not api.nova.is_feature_available(request, "locked_attribute"):
return False
return True
def action(self, request, obj_id):
api.nova.server_lock(request, obj_id)
class UnlockInstance(policy.PolicyTargetMixin, tables.BatchAction):
name = "unlock"
policy_rules = (("compute", "os_compute_api:os-lock-server:unlock"),)
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Unlock Instance",
u"Unlock Instances",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Unlocked Instance",
u"Unlocked Instances",
count
)
# to only allow locked instances to be unlocked
def allowed(self, request, instance):
if not getattr(instance, 'locked', True):
return False
if not api.nova.extension_supported('AdminActions', request):
return False
if not api.nova.is_feature_available(request, "locked_attribute"):
return False
return True
def action(self, request, obj_id):
api.nova.server_unlock(request, obj_id)
class AttachVolume(tables.LinkAction):
name = "attach_volume"
verbose_name = _("Attach Volume")
url = "horizon:project:instances:attach_volume"
classes = ("ajax-modal",)
policy_rules = (("compute", "os_compute_api:servers:attach_volume"),)
# This action should be disabled if the instance
# is not active, or the instance is being deleted
def allowed(self, request, instance=None):
return instance.status in ("ACTIVE") \
and not is_deleting(instance)
class DetachVolume(AttachVolume):
name = "detach_volume"
verbose_name = _("Detach Volume")
url = "horizon:project:instances:detach_volume"
policy_rules = (("compute", "os_compute_api:servers:detach_volume"),)
# This action should be disabled if the instance
# is not active, or the instance is being deleted
def allowed(self, request, instance=None):
return instance.status in ("ACTIVE") \
and not is_deleting(instance)
class AttachInterface(policy.PolicyTargetMixin, tables.LinkAction):
name = "attach_interface"
verbose_name = _("Attach Interface")
classes = ("btn-confirm", "ajax-modal")
url = "horizon:project:instances:attach_interface"
policy_rules = (("compute", "os_compute_api:os-attach-interfaces"),)
def allowed(self, request, instance):
return ((instance.status in ACTIVE_STATES
or instance.status == 'SHUTOFF')
and not is_deleting(instance)
and api.base.is_service_enabled(request, 'network'))
def get_link_url(self, datum):
instance_id = self.table.get_object_id(datum)
return urlresolvers.reverse(self.url, args=[instance_id])
# TODO(lyj): the policy for detach interface not exists in nova.json,
# once it's added, it should be added here.
class DetachInterface(policy.PolicyTargetMixin, tables.LinkAction):
name = "detach_interface"
verbose_name = _("Detach Interface")
classes = ("btn-confirm", "ajax-modal")
url = "horizon:project:instances:detach_interface"
def allowed(self, request, instance):
if not api.base.is_service_enabled(request, 'network'):
return False
if is_deleting(instance):
return False
if (instance.status not in ACTIVE_STATES and
instance.status != 'SHUTOFF'):
return False
for addresses in instance.addresses.values():
for address in addresses:
if address.get('OS-EXT-IPS:type') == "fixed":
return True
return False
def get_link_url(self, datum):
instance_id = self.table.get_object_id(datum)
return urlresolvers.reverse(self.url, args=[instance_id])
def get_ips(instance):
template_name = 'project/instances/_instance_ips.html'
ip_groups = {}
for ip_group, addresses in instance.addresses.items():
ip_groups[ip_group] = {}
ip_groups[ip_group]["floating"] = []
ip_groups[ip_group]["non_floating"] = []
for address in addresses:
if ('OS-EXT-IPS:type' in address and
address['OS-EXT-IPS:type'] == "floating"):
ip_groups[ip_group]["floating"].append(address)
else:
ip_groups[ip_group]["non_floating"].append(address)
context = {
"ip_groups": ip_groups,
}
return template.loader.render_to_string(template_name, context)
def get_flavor(instance):
if hasattr(instance, "full_flavor"):
template_name = 'project/instances/_instance_flavor.html'
size_ram = sizeformat.mb_float_format(instance.full_flavor.ram)
if instance.full_flavor.disk > 0:
size_disk = sizeformat.diskgbformat(instance.full_flavor.disk)
else:
size_disk = _("%s GB") % "0"
context = {
"name": instance.full_flavor.name,
"id": instance.id,
"size_disk": size_disk,
"size_ram": size_ram,
"vcpus": instance.full_flavor.vcpus,
"flavor_id": instance.full_flavor.id
}
return template.loader.render_to_string(template_name, context)
return _("Not available")
def get_keyname(instance):
if hasattr(instance, "key_name"):
keyname = instance.key_name
return keyname
return _("Not available")
def get_power_state(instance):
return POWER_STATES.get(getattr(instance, "OS-EXT-STS:power_state", 0), '')
STATUS_DISPLAY_CHOICES = (
("deleted", pgettext_lazy("Current status of an Instance", u"Deleted")),
("active", pgettext_lazy("Current status of an Instance", u"Active")),
("shutoff", pgettext_lazy("Current status of an Instance", u"Shutoff")),
("suspended", pgettext_lazy("Current status of an Instance",
u"Suspended")),
("paused", pgettext_lazy("Current status of an Instance", u"Paused")),
("error", pgettext_lazy("Current status of an Instance", u"Error")),
("resize", pgettext_lazy("Current status of an Instance",
u"Resize/Migrate")),
("verify_resize", pgettext_lazy("Current status of an Instance",
u"Confirm or Revert Resize/Migrate")),
("revert_resize", pgettext_lazy(
"Current status of an Instance", u"Revert Resize/Migrate")),
("reboot", pgettext_lazy("Current status of an Instance", u"Reboot")),
("hard_reboot", pgettext_lazy("Current status of an Instance",
u"Hard Reboot")),
("password", pgettext_lazy("Current status of an Instance", u"Password")),
("rebuild", pgettext_lazy("Current status of an Instance", u"Rebuild")),
("migrating", pgettext_lazy("Current status of an Instance",
u"Migrating")),
("build", pgettext_lazy("Current status of an Instance", u"Build")),
("rescue", pgettext_lazy("Current status of an Instance", u"Rescue")),
("soft-delete", pgettext_lazy("Current status of an Instance",
u"Soft Deleted")),
("shelved", pgettext_lazy("Current status of an Instance", u"Shelved")),
("shelved_offloaded", pgettext_lazy("Current status of an Instance",
u"Shelved Offloaded")),
# these vm states are used when generating CSV usage summary
("building", pgettext_lazy("Current status of an Instance", u"Building")),
("stopped", pgettext_lazy("Current status of an Instance", u"Stopped")),
("rescued", pgettext_lazy("Current status of an Instance", u"Rescued")),
("resized", pgettext_lazy("Current status of an Instance", u"Resized")),
)
TASK_DISPLAY_NONE = pgettext_lazy("Task status of an Instance", u"None")
# Mapping of task states taken from Nova's nova/compute/task_states.py
TASK_DISPLAY_CHOICES = (
("scheduling", pgettext_lazy("Task status of an Instance",
u"Scheduling")),
("block_device_mapping", pgettext_lazy("Task status of an Instance",
u"Block Device Mapping")),
("networking", pgettext_lazy("Task status of an Instance",
u"Networking")),
("spawning", pgettext_lazy("Task status of an Instance", u"Spawning")),
("image_snapshot", pgettext_lazy("Task status of an Instance",
u"Snapshotting")),
("image_snapshot_pending", pgettext_lazy("Task status of an Instance",
u"Image Snapshot Pending")),
("image_pending_upload", pgettext_lazy("Task status of an Instance",
u"Image Pending Upload")),
("image_uploading", pgettext_lazy("Task status of an Instance",
u"Image Uploading")),
("image_backup", pgettext_lazy("Task status of an Instance",
u"Image Backup")),
("updating_password", pgettext_lazy("Task status of an Instance",
u"Updating Password")),
("resize_prep", pgettext_lazy("Task status of an Instance",
u"Preparing Resize or Migrate")),
("resize_migrating", pgettext_lazy("Task status of an Instance",
u"Resizing or Migrating")),
("resize_migrated", pgettext_lazy("Task status of an Instance",
u"Resized or Migrated")),
("resize_finish", pgettext_lazy("Task status of an Instance",
u"Finishing Resize or Migrate")),
("resize_reverting", pgettext_lazy("Task status of an Instance",
u"Reverting Resize or Migrate")),
("resize_confirming", pgettext_lazy("Task status of an Instance",
u"Confirming Resize or Migrate")),
("rebooting", pgettext_lazy("Task status of an Instance", u"Rebooting")),
("reboot_pending", pgettext_lazy("Task status of an Instance",
u"Reboot Pending")),
("reboot_started", pgettext_lazy("Task status of an Instance",
u"Reboot Started")),
("rebooting_hard", pgettext_lazy("Task status of an Instance",
u"Hard Rebooting")),
("reboot_pending_hard", pgettext_lazy("Task status of an Instance",
u"Hard Reboot Pending")),
("reboot_started_hard", pgettext_lazy("Task status of an Instance",
u"Hard Reboot Started")),
("pausing", pgettext_lazy("Task status of an Instance", u"Pausing")),
("unpausing", pgettext_lazy("Task status of an Instance", u"Resuming")),
("suspending", pgettext_lazy("Task status of an Instance",
u"Suspending")),
("resuming", pgettext_lazy("Task status of an Instance", u"Resuming")),
("powering-off", pgettext_lazy("Task status of an Instance",
u"Powering Off")),
("powering-on", pgettext_lazy("Task status of an Instance",
u"Powering On")),
("rescuing", pgettext_lazy("Task status of an Instance", u"Rescuing")),
("unrescuing", pgettext_lazy("Task status of an Instance",
u"Unrescuing")),
("rebuilding", pgettext_lazy("Task status of an Instance",
u"Rebuilding")),
("rebuild_block_device_mapping", pgettext_lazy(
"Task status of an Instance", u"Rebuild Block Device Mapping")),
("rebuild_spawning", pgettext_lazy("Task status of an Instance",
u"Rebuild Spawning")),
("migrating", pgettext_lazy("Task status of an Instance", u"Migrating")),
("deleting", pgettext_lazy("Task status of an Instance", u"Deleting")),
("soft-deleting", pgettext_lazy("Task status of an Instance",
u"Soft Deleting")),
("restoring", pgettext_lazy("Task status of an Instance", u"Restoring")),
("shelving", pgettext_lazy("Task status of an Instance", u"Shelving")),
("shelving_image_pending_upload", pgettext_lazy(
"Task status of an Instance", u"Shelving Image Pending Upload")),
("shelving_image_uploading", pgettext_lazy("Task status of an Instance",
u"Shelving Image Uploading")),
("shelving_offloading", pgettext_lazy("Task status of an Instance",
u"Shelving Offloading")),
("unshelving", pgettext_lazy("Task status of an Instance",
u"Unshelving")),
)
POWER_DISPLAY_CHOICES = (
("NO STATE", pgettext_lazy("Power state of an Instance", u"No State")),
("RUNNING", pgettext_lazy("Power state of an Instance", u"Running")),
("BLOCKED", pgettext_lazy("Power state of an Instance", u"Blocked")),
("PAUSED", pgettext_lazy("Power state of an Instance", u"Paused")),
("SHUTDOWN", pgettext_lazy("Power state of an Instance", u"Shut Down")),
("SHUTOFF", pgettext_lazy("Power state of an Instance", u"Shut Off")),
("CRASHED", pgettext_lazy("Power state of an Instance", u"Crashed")),
("SUSPENDED", pgettext_lazy("Power state of an Instance", u"Suspended")),
("FAILED", pgettext_lazy("Power state of an Instance", u"Failed")),
("BUILDING", pgettext_lazy("Power state of an Instance", u"Building")),
)
INSTANCE_FILTER_CHOICES = (
('uuid', _("Instance ID ="), True),
('name', _("Instance Name"), True),
('image', _("Image ID ="), True),
('image_name', _("Image Name ="), True),
('ip', _("IPv4 Address"), True),
('ip6', _("IPv6 Address"), True),
('flavor', _("Flavor ID ="), True),
('flavor_name', _("Flavor Name ="), True),
('key_name', _("Key Pair Name"), True),
('status', _("Status ="), True),
('availability_zone', _("Availability Zone"), True),
('changes-since', _("Changes Since"), True,
_("Filter by an ISO 8061 formatted time, e.g. 2016-06-14T06:27:59Z")),
('vcpus', _("vCPUs ="), True),
)
class InstancesFilterAction(tables.FilterAction):
filter_type = "server"
filter_choices = INSTANCE_FILTER_CHOICES
class InstancesTable(tables.DataTable):
TASK_STATUS_CHOICES = (
(None, True),
("none", True)
)
STATUS_CHOICES = (
("active", True),
("shutoff", True),
("suspended", True),
("paused", True),
("error", False),
("rescue", True),
("shelved", True),
("shelved_offloaded", True),
)
name = tables.WrappingColumn("name",
link="horizon:project:instances:detail",
verbose_name=_("Instance Name"))
image_name = tables.Column("image_name",
verbose_name=_("Image Name"))
ip = tables.Column(get_ips,
verbose_name=_("IP Address"),
attrs={'data-type': "ip"})
flavor = tables.Column(get_flavor,
sortable=False,
verbose_name=_("Flavor"))
keypair = tables.Column(get_keyname, verbose_name=_("Key Pair"))
status = tables.Column("status",
filters=(title, filters.replace_underscores),
verbose_name=_("Status"),
status=True,
status_choices=STATUS_CHOICES,
display_choices=STATUS_DISPLAY_CHOICES)
az = tables.Column("availability_zone",
verbose_name=_("Availability Zone"))
task = tables.Column("OS-EXT-STS:task_state",
verbose_name=_("Task"),
empty_value=TASK_DISPLAY_NONE,
status=True,
status_choices=TASK_STATUS_CHOICES,
display_choices=TASK_DISPLAY_CHOICES)
state = tables.Column(get_power_state,
filters=(title, filters.replace_underscores),
verbose_name=_("Power State"),
display_choices=POWER_DISPLAY_CHOICES)
created = tables.Column("created",
verbose_name=_("Time since created"),
filters=(filters.parse_isotime,
filters.timesince_sortable),
attrs={'data-type': 'timesince'})
class Meta(object):
name = "instances"
verbose_name = _("Instances")
status_columns = ["status", "task"]
row_class = UpdateRow
table_actions_menu = (StartInstance, StopInstance, SoftRebootInstance)
launch_actions = ()
if getattr(settings, 'LAUNCH_INSTANCE_LEGACY_ENABLED', False):
launch_actions = (LaunchLink,) + launch_actions
if getattr(settings, 'LAUNCH_INSTANCE_NG_ENABLED', True):
launch_actions = (LaunchLinkNG,) + launch_actions
table_actions = launch_actions + (DeleteInstance,
InstancesFilterAction)
row_actions = (StartInstance, ConfirmResize, RevertResize,
CreateSnapshot, SimpleAssociateIP, AssociateIP,
SimpleDisassociateIP, AttachInterface,
DetachInterface, EditInstance, AttachVolume,
DetachVolume, UpdateMetadata, DecryptInstancePassword,
EditInstanceSecurityGroups, ConsoleLink, LogLink,
TogglePause, ToggleSuspend, ToggleShelve,
ResizeLink, LockInstance, UnlockInstance,
SoftRebootInstance, RebootInstance,
StopInstance, RebuildInstance, DeleteInstance)
|
nathangeffen/tbonline-old
|
refs/heads/master
|
tbonlineproject/external/sorl/thumbnail/models.py
|
12
|
from django.db import models
from sorl.thumbnail.conf import settings
class KVStore(models.Model):
key = models.CharField(max_length=200, primary_key=True,
db_column=settings.THUMBNAIL_KEY_DBCOLUMN
)
value = models.TextField()
|
350dotorg/Django
|
refs/heads/master
|
tests/modeltests/user_commands/management/__init__.py
|
12133432
| |
codefordurham/Durham-Restaurants
|
refs/heads/develop
|
users/tests/__init__.py
|
12133432
| |
Joshmoss11/x-seq
|
refs/heads/master
|
deeptools/test/__init__.py
|
12133432
| |
albertomurillo/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/cnos/cnos_facts.py
|
37
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (C) 2019 Red Hat Inc.
# Copyright (C) 2019 Lenovo.
#
# GNU General Public License v3.0+
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Module to Collect facts from Lenovo Switches running Lenovo CNOS commands
# Lenovo Networking
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cnos_facts
version_added: "2.3"
author: "Anil Kumar Muraleedharan (@amuraleedhar)"
short_description: Collect facts from remote devices running Lenovo CNOS
description:
- Collects a base set of device facts from a remote Lenovo device
running on CNOS. This module prepends all of the
base network fact keys with C(ansible_net_<fact>). The facts
module will always collect a base set of facts from the device
and can enable or disable collection of additional facts.
notes:
- Tested against CNOS 10.8.1
options:
authorize:
version_added: "2.6"
description:
- Instructs the module to enter privileged mode on the remote device
before sending any commands. If not specified, the device will
attempt to execute all commands in non-privileged mode. If the value
is not specified in the task, the value of environment variable
C(ANSIBLE_NET_AUTHORIZE) will be used instead.
type: bool
default: 'no'
auth_pass:
version_added: "2.6"
description:
- Specifies the password to use if required to enter privileged mode
on the remote device. If I(authorize) is false, then this argument
does nothing. If the value is not specified in the task, the value of
environment variable C(ANSIBLE_NET_AUTH_PASS) will be used instead.
gather_subset:
version_added: "2.6"
description:
- When supplied, this argument will restrict the facts collected
to a given subset. Possible values for this argument include
all, hardware, config, and interfaces. Can specify a list of
values to include a larger subset. Values can also be used
with an initial C(M(!)) to specify that a specific subset should
not be collected.
required: false
default: '!config'
'''
EXAMPLES = '''
Tasks: The following are examples of using the module cnos_facts.
---
- name: Test cnos Facts
cnos_facts:
---
# Collect all facts from the device
- cnos_facts:
gather_subset: all
# Collect only the config and default facts
- cnos_facts:
gather_subset:
- config
# Do not collect hardware facts
- cnos_facts:
gather_subset:
- "!hardware"
'''
RETURN = '''
ansible_net_gather_subset:
description: The list of fact subsets collected from the device
returned: always
type: list
# default
ansible_net_model:
description: The model name returned from the Lenovo CNOS device
returned: always
type: str
ansible_net_serialnum:
description: The serial number of the Lenovo CNOS device
returned: always
type: str
ansible_net_version:
description: The CNOS operating system version running on the remote device
returned: always
type: str
ansible_net_hostname:
description: The configured hostname of the device
returned: always
type: str
ansible_net_image:
description: Indicates the active image for the device
returned: always
type: str
# hardware
ansible_net_memfree_mb:
description: The available free memory on the remote device in MB
returned: when hardware is configured
type: int
# config
ansible_net_config:
description: The current active config from the device
returned: when config is configured
type: str
# interfaces
ansible_net_all_ipv4_addresses:
description: All IPv4 addresses configured on the device
returned: when interfaces is configured
type: list
ansible_net_all_ipv6_addresses:
description: All IPv6 addresses configured on the device
returned: when interfaces is configured
type: list
ansible_net_interfaces:
description: A hash of all interfaces running on the system.
This gives information on description, mac address, mtu, speed,
duplex and operstatus
returned: when interfaces is configured
type: dict
ansible_net_neighbors:
description: The list of LLDP neighbors from the remote device
returned: when interfaces is configured
type: dict
'''
import re
from ansible.module_utils.network.cnos.cnos import run_commands
from ansible.module_utils.network.cnos.cnos import check_args
from ansible.module_utils._text import to_text
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import iteritems
from ansible.module_utils.six.moves import zip
class FactsBase(object):
COMMANDS = list()
def __init__(self, module):
self.module = module
self.facts = dict()
self.responses = None
self.PERSISTENT_COMMAND_TIMEOUT = 60
def populate(self):
self.responses = run_commands(self.module, self.COMMANDS,
check_rc=False)
def run(self, cmd):
return run_commands(self.module, cmd, check_rc=False)
class Default(FactsBase):
COMMANDS = ['show sys-info', 'show running-config']
def populate(self):
super(Default, self).populate()
data = self.responses[0]
data_run = self.responses[1]
if data:
self.facts['version'] = self.parse_version(data)
self.facts['serialnum'] = self.parse_serialnum(data)
self.facts['model'] = self.parse_model(data)
self.facts['image'] = self.parse_image(data)
if data_run:
self.facts['hostname'] = self.parse_hostname(data_run)
def parse_version(self, data):
for line in data.split('\n'):
line = line.strip()
match = re.match(r'System Software Revision (.*?)',
line, re.M | re.I)
if match:
vers = line.split(':')
ver = vers[1].strip()
return ver
return "NA"
def parse_hostname(self, data_run):
for line in data_run.split('\n'):
line = line.strip()
match = re.match(r'hostname (.*?)', line, re.M | re.I)
if match:
hosts = line.split()
hostname = hosts[1].strip('\"')
return hostname
return "NA"
def parse_model(self, data):
for line in data.split('\n'):
line = line.strip()
match = re.match(r'System Model (.*?)', line, re.M | re.I)
if match:
mdls = line.split(':')
mdl = mdls[1].strip()
return mdl
return "NA"
def parse_image(self, data):
match = re.search(r'(.*) image(.*)', data, re.M | re.I)
if match:
return "Image1"
else:
return "Image2"
def parse_serialnum(self, data):
for line in data.split('\n'):
line = line.strip()
match = re.match(r'System Serial Number (.*?)', line, re.M | re.I)
if match:
serNums = line.split(':')
ser = serNums[1].strip()
return ser
return "NA"
class Hardware(FactsBase):
COMMANDS = [
'show running-config'
]
def populate(self):
super(Hardware, self).populate()
data = self.run(['show process memory'])
data = to_text(data, errors='surrogate_or_strict').strip()
data = data.replace(r"\n", "\n")
if data:
for line in data.split('\n'):
line = line.strip()
match = re.match(r'Mem: (.*?)', line, re.M | re.I)
if match:
memline = line.split(':')
mems = memline[1].strip().split()
self.facts['memtotal_mb'] = int(mems[0]) / 1024
self.facts['memused_mb'] = int(mems[1]) / 1024
self.facts['memfree_mb'] = int(mems[2]) / 1024
self.facts['memshared_mb'] = int(mems[3]) / 1024
self.facts['memavailable_mb'] = int(mems[5]) / 1024
def parse_memtotal(self, data):
match = re.search(r'^MemTotal:\s*(.*) kB', data, re.M | re.I)
if match:
return int(match.group(1)) / 1024
def parse_memfree(self, data):
match = re.search(r'^MemFree:\s*(.*) kB', data, re.M | re.I)
if match:
return int(match.group(1)) / 1024
class Config(FactsBase):
COMMANDS = ['show running-config']
def populate(self):
super(Config, self).populate()
data = self.responses[0]
if data:
self.facts['config'] = data
class Interfaces(FactsBase):
COMMANDS = ['show interface brief']
def populate(self):
super(Interfaces, self).populate()
self.facts['all_ipv4_addresses'] = list()
self.facts['all_ipv6_addresses'] = list()
data1 = self.run(['show interface status'])
data1 = to_text(data1, errors='surrogate_or_strict').strip()
data1 = data1.replace(r"\n", "\n")
data2 = self.run(['show interface mac-address'])
data2 = to_text(data2, errors='surrogate_or_strict').strip()
data2 = data2.replace(r"\n", "\n")
lines1 = None
lines2 = None
if data1:
lines1 = self.parse_interfaces(data1)
if data2:
lines2 = self.parse_interfaces(data2)
if lines1 is not None and lines2 is not None:
self.facts['interfaces'] = self.populate_interfaces(lines1, lines2)
data3 = self.run(['show lldp neighbors'])
data3 = to_text(data3, errors='surrogate_or_strict').strip()
data3 = data3.replace(r"\n", "\n")
if data3:
lines3 = self.parse_neighbors(data3)
if lines3 is not None:
self.facts['neighbors'] = self.populate_neighbors(lines3)
data4 = self.run(['show ip interface brief vrf all'])
data5 = self.run(['show ipv6 interface brief vrf all'])
data4 = to_text(data4, errors='surrogate_or_strict').strip()
data4 = data4.replace(r"\n", "\n")
data5 = to_text(data5, errors='surrogate_or_strict').strip()
data5 = data5.replace(r"\n", "\n")
lines4 = None
lines5 = None
if data4:
lines4 = self.parse_ipaddresses(data4)
ipv4_interfaces = self.set_ip_interfaces(lines4)
self.facts['all_ipv4_addresses'] = ipv4_interfaces
if data5:
lines5 = self.parse_ipaddresses(data5)
ipv6_interfaces = self.set_ipv6_interfaces(lines5)
self.facts['all_ipv6_addresses'] = ipv6_interfaces
def parse_ipaddresses(self, data):
parsed = list()
for line in data.split('\n'):
if len(line) == 0:
continue
else:
line = line.strip()
match = re.match(r'^(Ethernet+)', line)
if match:
key = match.group(1)
parsed.append(line)
match = re.match(r'^(po+)', line)
if match:
key = match.group(1)
parsed.append(line)
match = re.match(r'^(mgmt+)', line)
if match:
key = match.group(1)
parsed.append(line)
match = re.match(r'^(loopback+)', line)
if match:
key = match.group(1)
parsed.append(line)
return parsed
def populate_interfaces(self, lines1, lines2):
interfaces = dict()
for line1, line2 in zip(lines1, lines2):
line = line1 + " " + line2
intfSplit = line.split()
innerData = dict()
innerData['description'] = intfSplit[1].strip()
innerData['macaddress'] = intfSplit[8].strip()
innerData['type'] = intfSplit[6].strip()
innerData['speed'] = intfSplit[5].strip()
innerData['duplex'] = intfSplit[4].strip()
innerData['operstatus'] = intfSplit[2].strip()
interfaces[intfSplit[0].strip()] = innerData
return interfaces
def parse_interfaces(self, data):
parsed = list()
for line in data.split('\n'):
if len(line) == 0:
continue
else:
line = line.strip()
match = re.match(r'^(Ethernet+)', line)
if match:
key = match.group(1)
parsed.append(line)
match = re.match(r'^(po+)', line)
if match:
key = match.group(1)
parsed.append(line)
match = re.match(r'^(mgmt+)', line)
if match:
key = match.group(1)
parsed.append(line)
return parsed
def set_ip_interfaces(self, line4):
ipv4_addresses = list()
for line in line4:
ipv4Split = line.split()
if 'Ethernet' in ipv4Split[0]:
ipv4_addresses.append(ipv4Split[1])
if 'mgmt' in ipv4Split[0]:
ipv4_addresses.append(ipv4Split[1])
if 'po' in ipv4Split[0]:
ipv4_addresses.append(ipv4Split[1])
if 'loopback' in ipv4Split[0]:
ipv4_addresses.append(ipv4Split[1])
return ipv4_addresses
def set_ipv6_interfaces(self, line4):
ipv6_addresses = list()
for line in line4:
ipv6Split = line.split()
if 'Ethernet' in ipv6Split[0]:
ipv6_addresses.append(ipv6Split[1])
if 'mgmt' in ipv6Split[0]:
ipv6_addresses.append(ipv6Split[1])
if 'po' in ipv6Split[0]:
ipv6_addresses.append(ipv6Split[1])
if 'loopback' in ipv6Split[0]:
ipv6_addresses.append(ipv6Split[1])
return ipv6_addresses
def populate_neighbors(self, lines3):
neighbors = dict()
device_name = ''
for line in lines3:
neighborSplit = line.split()
innerData = dict()
count = len(neighborSplit)
if count == 5:
local_interface = neighborSplit[1].strip()
innerData['Device Name'] = neighborSplit[0].strip()
innerData['Hold Time'] = neighborSplit[2].strip()
innerData['Capability'] = neighborSplit[3].strip()
innerData['Remote Port'] = neighborSplit[4].strip()
neighbors[local_interface] = innerData
elif count == 4:
local_interface = neighborSplit[0].strip()
innerData['Hold Time'] = neighborSplit[1].strip()
innerData['Capability'] = neighborSplit[2].strip()
innerData['Remote Port'] = neighborSplit[3].strip()
neighbors[local_interface] = innerData
return neighbors
def parse_neighbors(self, neighbors):
parsed = list()
for line in neighbors.split('\n'):
if len(line) == 0:
continue
else:
line = line.strip()
if 'Ethernet' in line:
parsed.append(line)
if 'mgmt' in line:
parsed.append(line)
if 'po' in line:
parsed.append(line)
if 'loopback' in line:
parsed.append(line)
return parsed
FACT_SUBSETS = dict(
default=Default,
hardware=Hardware,
interfaces=Interfaces,
config=Config,
)
VALID_SUBSETS = frozenset(FACT_SUBSETS.keys())
PERSISTENT_COMMAND_TIMEOUT = 60
def main():
"""main entry point for module execution
"""
argument_spec = dict(
gather_subset=dict(default=['!config'], type='list')
)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
gather_subset = module.params['gather_subset']
runable_subsets = set()
exclude_subsets = set()
for subset in gather_subset:
if subset == 'all':
runable_subsets.update(VALID_SUBSETS)
continue
if subset.startswith('!'):
subset = subset[1:]
if subset == 'all':
exclude_subsets.update(VALID_SUBSETS)
continue
exclude = True
else:
exclude = False
if subset not in VALID_SUBSETS:
module.fail_json(msg='Bad subset')
if exclude:
exclude_subsets.add(subset)
else:
runable_subsets.add(subset)
if not runable_subsets:
runable_subsets.update(VALID_SUBSETS)
runable_subsets.difference_update(exclude_subsets)
runable_subsets.add('default')
facts = dict()
facts['gather_subset'] = list(runable_subsets)
instances = list()
for key in runable_subsets:
instances.append(FACT_SUBSETS[key](module))
for inst in instances:
inst.populate()
facts.update(inst.facts)
ansible_facts = dict()
for key, value in iteritems(facts):
key = 'ansible_net_%s' % key
ansible_facts[key] = value
warnings = list()
check_args(module, warnings)
module.exit_json(ansible_facts=ansible_facts, warnings=warnings)
if __name__ == '__main__':
main()
|
Raviyanto/sunflower-fm
|
refs/heads/master
|
application/plugins/system_terminal/plugin.py
|
7
|
import os
import user
import shlex
import subprocess
from parameters import Parameters
from plugin_base.terminal import Terminal, TerminalType
def register_plugin(application):
"""Register plugin classes with application"""
application.register_class('system_terminal', _('System terminal'), SystemTerminal)
class SystemTerminal(Terminal):
"""System terminal plugin"""
def __init__(self, parent, notebook, options):
Terminal.__init__(self, parent, notebook, options)
self._file_list_button.set_tooltip_text(_('Open current directory'))
# variable to store process id
self._pid = None
# make sure we open in a good path
self.path = self._options.get('path', user.home)
self._close_on_child_exit = self._options.get('close_with_child', True)
self._terminal_type = self._parent.options.section('terminal').get('type')
shell_command = self._options.get('shell_command', os.environ['SHELL'])
if self._terminal_type == TerminalType.VTE:
# we need TERM environment variable set
if not 'TERM' in os.environ:
os.environ['TERM'] = 'xterm-color'
os.environ['COLORTERM'] = 'gnome-terminal'
if self._vte_present:
# fork default shell
self._terminal.connect('child-exited', self.__child_exited)
self._terminal.connect('status-line-changed', self._update_terminal_status)
self._terminal.connect('realize', self.__terminal_realized)
elif self._terminal_type == TerminalType.EXTERNAL:
# connect signals
self._terminal.connect('realize', self.__socket_realized)
self._terminal.connect('plug-removed', self.__child_exited)
# disable controls
self._menu_button.set_sensitive(False)
# change titles
self._change_tab_text(_('Terminal'))
self._title_bar.set_title(_('Terminal'))
self._title_bar.set_subtitle(shell_command)
self.show_all()
def __socket_realized(self, widget, data=None):
"""Connect process when socket is realized"""
socket_id = self._terminal.get_id()
shell_command = self._options.get('shell_command', None)
command_version = 'command' if shell_command is None else 'command2'
arguments = self._options.get('arguments', [])
# append additional parameter if we need to wait for command to finish
if not self._options.get('close_with_child'):
arguments.extend(('&&', 'read'))
arguments_string = ' '.join(arguments)
# parse command
terminal_command = self._parent.options.section('terminal').get(command_version)
terminal_command = shlex.split(terminal_command.format(socket_id, arguments_string))
# execute process
process = subprocess.Popen(terminal_command, cwd=self.path)
self._pid = process.pid
def __terminal_realized(self, widget, data=None):
"""Event called once terminal emulator is realized"""
shell_command = self._options.get('shell_command', os.environ['SHELL'])
arguments = self._options.get('arguments', [shell_command])
self._pid = self._terminal.fork_command(
command=shell_command,
argv=arguments,
directory=self.path
)
def __child_exited(self, widget, data=None):
"""Handle child process termination"""
if self._close_on_child_exit or self._terminal_type == TerminalType.EXTERNAL:
self._close_tab()
def __update_path_from_pid(self):
"""Update terminal path from child process"""
try:
if self._pid is not None and os.path.isdir('/proc/{0}'.format(self._pid)):
self.path = os.readlink('/proc/{0}/cwd'.format(self._pid))
self._options.set('path', self.path)
except:
pass
def _close_tab(self, widget=None, data=None):
"""Provide additional functionality"""
if self._notebook.get_n_pages() == 1:
DefaultList = self._parent.plugin_classes['file_list']
options = Parameters()
options.set('path', self.path)
self._parent.create_tab(self._notebook, DefaultList, options)
return Terminal._close_tab(self, widget, data)
def _handle_tab_close(self):
"""Clean up before closing tab"""
Terminal._handle_tab_close(self)
self.__update_path_from_pid()
def _create_file_list(self, widget=None, data=None):
"""Create file list in parent notebook"""
self.__update_path_from_pid()
DefaultList = self._parent.plugin_classes['file_list']
options = Parameters()
options.set('path', self.path)
self._parent.create_tab(self._notebook, DefaultList, options)
return True
|
alex3287/PyCharmProjects
|
refs/heads/master
|
a_b_s/backup.py
|
1
|
#! usr/bin/python3
#программа для создания backups
import os, zipfile
def backupToZip(folder):
"""
создает резервную копию всего содержимого папки folder
"""
folder = os.path.abspath(folder)
number=1
while True:
zipFilename = os.path.basename(folder)+'_'+str(number)+'.zip'
if not os.path.exists(zipFilename):
break
number += 1
print('Создается файл %s...' % (zipFilename))
backupZip = zipfile.ZipFile(zipFilename, 'w')
for foldername, subfolders, filenames in os.walk(folder):
print('Добавления файла из папки %s...' % (foldername))
#Добавить в zip-файл текущую папку.
backupZip.write(foldername)
#Добавить в Zip-файл все файлы из данной папки
for filename in filenames:
newBase= os.path.basename(folder) + '_'
if filename.startswith(newBase) and filename.endswith('.zip'):
continue # не создавать резервной копии
backupZip.write(os.path.join(foldername, filename))
backupZip.close()
print('Все готово!!!')
if __name__ == '__main__':
n = input('Введите путь к папке для создания из нее Zip-архива \n>>>')
backupToZip(n)
|
evansd/django
|
refs/heads/master
|
tests/custom_columns/models.py
|
72
|
"""
Custom column/table names
If your database column name is different than your model attribute, use the
``db_column`` parameter. Note that you'll use the field's name, not its column
name, in API usage.
If your database table name is different than your model name, use the
``db_table`` Meta attribute. This has no effect on the API used to
query the database.
If you need to use a table name for a many-to-many relationship that differs
from the default generated name, use the ``db_table`` parameter on the
``ManyToManyField``. This has no effect on the API for querying the database.
"""
from django.db import models
class Author(models.Model):
Author_ID = models.AutoField(primary_key=True, db_column='Author ID')
first_name = models.CharField(max_length=30, db_column='firstname')
last_name = models.CharField(max_length=30, db_column='last')
def __str__(self):
return '%s %s' % (self.first_name, self.last_name)
class Meta:
db_table = 'my_author_table'
ordering = ('last_name', 'first_name')
class Article(models.Model):
Article_ID = models.AutoField(primary_key=True, db_column='Article ID')
headline = models.CharField(max_length=100)
authors = models.ManyToManyField(Author, db_table='my_m2m_table')
primary_author = models.ForeignKey(
Author,
models.SET_NULL,
db_column='Author ID',
related_name='primary_set',
null=True,
)
def __str__(self):
return self.headline
class Meta:
ordering = ('headline',)
|
rysson/filmkodi
|
refs/heads/master
|
plugin.video.mrknow/lib/entities/CItemInfo.py
|
2
|
class CItemInfo(object):
def __init__(self):
self.name = ''
self.src = 'url'
self.rule = ''
self.default = ''
self.build = ''
self.convert = []
|
svn2github/audacity
|
refs/heads/master
|
lib-src/lv2/lilv/waflib/TaskGen.py
|
62
|
#! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import copy,re,os
from waflib import Task,Utils,Logs,Errors,ConfigSet,Node
feats=Utils.defaultdict(set)
class task_gen(object):
mappings={}
prec=Utils.defaultdict(list)
def __init__(self,*k,**kw):
self.source=''
self.target=''
self.meths=[]
self.prec=Utils.defaultdict(list)
self.mappings={}
self.features=[]
self.tasks=[]
if not'bld'in kw:
self.env=ConfigSet.ConfigSet()
self.idx=0
self.path=None
else:
self.bld=kw['bld']
self.env=self.bld.env.derive()
self.path=self.bld.path
try:
self.idx=self.bld.idx[id(self.path)]=self.bld.idx.get(id(self.path),0)+1
except AttributeError:
self.bld.idx={}
self.idx=self.bld.idx[id(self.path)]=1
for key,val in kw.items():
setattr(self,key,val)
def __str__(self):
return"<task_gen %r declared in %s>"%(self.name,self.path.abspath())
def __repr__(self):
lst=[]
for x in self.__dict__.keys():
if x not in['env','bld','compiled_tasks','tasks']:
lst.append("%s=%s"%(x,repr(getattr(self,x))))
return"bld(%s) in %s"%(", ".join(lst),self.path.abspath())
def get_name(self):
try:
return self._name
except AttributeError:
if isinstance(self.target,list):
lst=[str(x)for x in self.target]
name=self._name=','.join(lst)
else:
name=self._name=str(self.target)
return name
def set_name(self,name):
self._name=name
name=property(get_name,set_name)
def to_list(self,val):
if isinstance(val,str):return val.split()
else:return val
def post(self):
if getattr(self,'posted',None):
return False
self.posted=True
keys=set(self.meths)
self.features=Utils.to_list(self.features)
for x in self.features+['*']:
st=feats[x]
if not st:
if not x in Task.classes:
Logs.warn('feature %r does not exist - bind at least one method to it'%x)
keys.update(list(st))
prec={}
prec_tbl=self.prec or task_gen.prec
for x in prec_tbl:
if x in keys:
prec[x]=prec_tbl[x]
tmp=[]
for a in keys:
for x in prec.values():
if a in x:break
else:
tmp.append(a)
tmp.sort()
out=[]
while tmp:
e=tmp.pop()
if e in keys:out.append(e)
try:
nlst=prec[e]
except KeyError:
pass
else:
del prec[e]
for x in nlst:
for y in prec:
if x in prec[y]:
break
else:
tmp.append(x)
if prec:
raise Errors.WafError('Cycle detected in the method execution %r'%prec)
out.reverse()
self.meths=out
Logs.debug('task_gen: posting %s %d'%(self,id(self)))
for x in out:
try:
v=getattr(self,x)
except AttributeError:
raise Errors.WafError('%r is not a valid task generator method'%x)
Logs.debug('task_gen: -> %s (%d)'%(x,id(self)))
v()
Logs.debug('task_gen: posted %s'%self.name)
return True
def get_hook(self,node):
name=node.name
for k in self.mappings:
if name.endswith(k):
return self.mappings[k]
for k in task_gen.mappings:
if name.endswith(k):
return task_gen.mappings[k]
raise Errors.WafError("File %r has no mapping in %r (did you forget to load a waf tool?)"%(node,task_gen.mappings.keys()))
def create_task(self,name,src=None,tgt=None):
task=Task.classes[name](env=self.env.derive(),generator=self)
if src:
task.set_inputs(src)
if tgt:
task.set_outputs(tgt)
self.tasks.append(task)
return task
def clone(self,env):
newobj=self.bld()
for x in self.__dict__:
if x in['env','bld']:
continue
elif x in['path','features']:
setattr(newobj,x,getattr(self,x))
else:
setattr(newobj,x,copy.copy(getattr(self,x)))
newobj.posted=False
if isinstance(env,str):
newobj.env=self.bld.all_envs[env].derive()
else:
newobj.env=env.derive()
return newobj
def declare_chain(name='',rule=None,reentrant=None,color='BLUE',ext_in=[],ext_out=[],before=[],after=[],decider=None,scan=None,install_path=None,shell=False):
ext_in=Utils.to_list(ext_in)
ext_out=Utils.to_list(ext_out)
if not name:
name=rule
cls=Task.task_factory(name,rule,color=color,ext_in=ext_in,ext_out=ext_out,before=before,after=after,scan=scan,shell=shell)
def x_file(self,node):
ext=decider and decider(self,node)or cls.ext_out
if ext_in:
_ext_in=ext_in[0]
tsk=self.create_task(name,node)
cnt=0
keys=list(self.mappings.keys())+list(self.__class__.mappings.keys())
for x in ext:
k=node.change_ext(x,ext_in=_ext_in)
tsk.outputs.append(k)
if reentrant!=None:
if cnt<int(reentrant):
self.source.append(k)
else:
for y in keys:
if k.name.endswith(y):
self.source.append(k)
break
cnt+=1
if install_path:
self.bld.install_files(install_path,tsk.outputs)
return tsk
for x in cls.ext_in:
task_gen.mappings[x]=x_file
return x_file
def taskgen_method(func):
setattr(task_gen,func.__name__,func)
return func
def feature(*k):
def deco(func):
setattr(task_gen,func.__name__,func)
for name in k:
feats[name].update([func.__name__])
return func
return deco
def before_method(*k):
def deco(func):
setattr(task_gen,func.__name__,func)
for fun_name in k:
if not func.__name__ in task_gen.prec[fun_name]:
task_gen.prec[fun_name].append(func.__name__)
return func
return deco
before=before_method
def after_method(*k):
def deco(func):
setattr(task_gen,func.__name__,func)
for fun_name in k:
if not fun_name in task_gen.prec[func.__name__]:
task_gen.prec[func.__name__].append(fun_name)
return func
return deco
after=after_method
def extension(*k):
def deco(func):
setattr(task_gen,func.__name__,func)
for x in k:
task_gen.mappings[x]=func
return func
return deco
@taskgen_method
def to_nodes(self,lst,path=None):
tmp=[]
path=path or self.path
find=path.find_resource
if isinstance(lst,self.path.__class__):
lst=[lst]
for x in Utils.to_list(lst):
if isinstance(x,str):
node=find(x)
else:
node=x
if not node:
raise Errors.WafError("source not found: %r in %r"%(x,self))
tmp.append(node)
return tmp
@feature('*')
def process_source(self):
self.source=self.to_nodes(getattr(self,'source',[]))
for node in self.source:
self.get_hook(node)(self,node)
@feature('*')
@before_method('process_source')
def process_rule(self):
if not getattr(self,'rule',None):
return
name=str(getattr(self,'name',None)or self.target or getattr(self.rule,'__name__',self.rule))
try:
cache=self.bld.cache_rule_attr
except AttributeError:
cache=self.bld.cache_rule_attr={}
cls=None
if getattr(self,'cache_rule','True'):
try:
cls=cache[(name,self.rule)]
except KeyError:
pass
if not cls:
cls=Task.task_factory(name,self.rule,getattr(self,'vars',[]),shell=getattr(self,'shell',True),color=getattr(self,'color','BLUE'),scan=getattr(self,'scan',None))
if getattr(self,'scan',None):
cls.scan=self.scan
elif getattr(self,'deps',None):
def scan(self):
nodes=[]
for x in self.generator.to_list(getattr(self.generator,'deps',None)):
node=self.generator.path.find_resource(x)
if not node:
self.generator.bld.fatal('Could not find %r (was it declared?)'%x)
nodes.append(node)
return[nodes,[]]
cls.scan=scan
if getattr(self,'update_outputs',None):
Task.update_outputs(cls)
if getattr(self,'always',None):
Task.always_run(cls)
for x in['after','before','ext_in','ext_out']:
setattr(cls,x,getattr(self,x,[]))
if getattr(self,'cache_rule','True'):
cache[(name,self.rule)]=cls
tsk=self.create_task(name)
if getattr(self,'target',None):
if isinstance(self.target,str):
self.target=self.target.split()
if not isinstance(self.target,list):
self.target=[self.target]
for x in self.target:
if isinstance(x,str):
tsk.outputs.append(self.path.find_or_declare(x))
else:
x.parent.mkdir()
tsk.outputs.append(x)
if getattr(self,'install_path',None):
self.bld.install_files(self.install_path,tsk.outputs)
if getattr(self,'source',None):
tsk.inputs=self.to_nodes(self.source)
self.source=[]
if getattr(self,'cwd',None):
tsk.cwd=self.cwd
@feature('seq')
def sequence_order(self):
if self.meths and self.meths[-1]!='sequence_order':
self.meths.append('sequence_order')
return
if getattr(self,'seq_start',None):
return
if getattr(self.bld,'prev',None):
self.bld.prev.post()
for x in self.bld.prev.tasks:
for y in self.tasks:
y.set_run_after(x)
self.bld.prev=self
re_m4=re.compile('@(\w+)@',re.M)
class subst_pc(Task.Task):
def run(self):
if getattr(self.generator,'is_copy',None):
self.outputs[0].write(self.inputs[0].read('rb'),'wb')
if getattr(self.generator,'chmod',None):
os.chmod(self.outputs[0].abspath(),self.generator.chmod)
return
code=self.inputs[0].read(encoding=getattr(self.generator,'encoding','ISO8859-1'))
if getattr(self.generator,'subst_fun',None):
code=self.generator.subst_fun(self,code)
if code:
self.outputs[0].write(code,encoding=getattr(self.generator,'encoding','ISO8859-1'))
return
code=code.replace('%','%%')
lst=[]
def repl(match):
g=match.group
if g(1):
lst.append(g(1))
return"%%(%s)s"%g(1)
return''
code=re_m4.sub(repl,code)
try:
d=self.generator.dct
except AttributeError:
d={}
for x in lst:
tmp=getattr(self.generator,x,'')or self.env.get_flat(x)or self.env.get_flat(x.upper())
d[x]=str(tmp)
code=code%d
self.outputs[0].write(code,encoding=getattr(self.generator,'encoding','ISO8859-1'))
self.generator.bld.raw_deps[self.uid()]=self.dep_vars=lst
try:delattr(self,'cache_sig')
except AttributeError:pass
if getattr(self.generator,'chmod',None):
os.chmod(self.outputs[0].abspath(),self.generator.chmod)
def sig_vars(self):
bld=self.generator.bld
env=self.env
upd=self.m.update
if getattr(self.generator,'subst_fun',None):
upd(Utils.h_fun(self.generator.subst_fun))
vars=self.generator.bld.raw_deps.get(self.uid(),[])
act_sig=bld.hash_env_vars(env,vars)
upd(act_sig)
lst=[getattr(self.generator,x,'')for x in vars]
upd(Utils.h_list(lst))
return self.m.digest()
@extension('.pc.in')
def add_pcfile(self,node):
tsk=self.create_task('subst_pc',node,node.change_ext('.pc','.pc.in'))
self.bld.install_files(getattr(self,'install_path','${LIBDIR}/pkgconfig/'),tsk.outputs)
class subst(subst_pc):
pass
@feature('subst')
@before_method('process_source','process_rule')
def process_subst(self):
src=Utils.to_list(getattr(self,'source',[]))
if isinstance(src,Node.Node):
src=[src]
tgt=Utils.to_list(getattr(self,'target',[]))
if isinstance(tgt,Node.Node):
tgt=[tgt]
if len(src)!=len(tgt):
raise Errors.WafError('invalid number of source/target for %r'%self)
for x,y in zip(src,tgt):
if not x or not y:
raise Errors.WafError('null source or target for %r'%self)
a,b=None,None
if isinstance(x,str)and isinstance(y,str)and x==y:
a=self.path.find_node(x)
b=self.path.get_bld().make_node(y)
if not os.path.isfile(b.abspath()):
b.sig=None
b.parent.mkdir()
else:
if isinstance(x,str):
a=self.path.find_resource(x)
elif isinstance(x,Node.Node):
a=x
if isinstance(y,str):
b=self.path.find_or_declare(y)
elif isinstance(y,Node.Node):
b=y
if not a:
raise Errors.WafError('cound not find %r for %r'%(x,self))
has_constraints=False
tsk=self.create_task('subst',a,b)
for k in('after','before','ext_in','ext_out'):
val=getattr(self,k,None)
if val:
has_constraints=True
setattr(tsk,k,val)
if not has_constraints and b.name.endswith('.h'):
tsk.before=[k for k in('c','cxx')if k in Task.classes]
inst_to=getattr(self,'install_path',None)
if inst_to:
self.bld.install_files(inst_to,b,chmod=getattr(self,'chmod',Utils.O644))
self.source=[]
|
wu-ty/LINE_PROJECT
|
refs/heads/master
|
setup.py
|
2
|
import os
from setuptools import setup
import feedreader
version = feedreader.__version__
long_description = open(os.path.join(os.path.dirname(__file__), 'README.rst')).read()
setup(
name = 'django-feedreader',
version = version,
packages = ['feedreader'],
include_package_data = True,
license = 'BSD License',
description = 'A simple Django app to aggregate RSS feeds.',
long_description = long_description,
url = 'https://github.com/ahernp/django-feedreader',
author = 'Paul Ahern',
author_email = 'ahernp@ahernp.com',
classifiers = [
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
install_requires=[
'Django==1.8',
'django-braces==1.4.0',
'factory-boy==2.5.1',
'feedparser==5.1.3',
'mock==1.0.1',
'pytz>=2015.2',
],
)
|
kehao95/Wechat_LearnHelper
|
refs/heads/master
|
src/env/lib/python3.5/site-packages/flask/testsuite/test_apps/path/installed_package/__init__.py
|
1799
|
import flask
app = flask.Flask(__name__)
|
xlongfeng/agreement
|
refs/heads/master
|
item.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from datetime import datetime, date, timedelta
from enum import Enum
import json
import copy
import sqlalchemy
from sqlalchemy import (Column, ForeignKey, Integer, Boolean, \
String, DateTime, Date, UnicodeText, \
create_engine, desc)
from sqlalchemy.orm import relationship, sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.ext.automap import automap_base
from PyQt5.QtCore import Qt, QCoreApplication, QDate
from PyQt5.QtWidgets import (QDialog, QHeaderView, QTreeWidgetItem, \
QAction, QMessageBox)
from PyQt5.QtGui import QIntValidator
from database import *
from ui_itemview import *
from ui_itemhistoryview import *
from ui_itemphaseview import *
from ui_itemdualphasenewview import *
_translate = QCoreApplication.translate
def qdate_to_date(qdate):
return datetime.strptime(qdate.toString("yyyy-MM-dd"), "%Y-%m-%d").date()
class ItemModel(Base):
__tablename__ = 'item_model'
id = Column(Integer, primary_key=True)
createDate = Column('create_date', DateTime, default=datetime.now)
writeDate = Column('write_date', DateTime, default=datetime.now, onupdate=datetime.now)
name = Column('name', String)
startDate = Column('start_date', Date)
startDateLeapMonth = Column('start_date_leap_month', Boolean, default=False)
quantity = Column('quantity', Integer)
checkin = Column('checkin', Integer)
checkout = Column('checkout', Integer)
fee = Column('fee', Integer)
period = Column('period', Integer)
# [dict(phase=phase, amount=amount)]
markup = Column('markup', String)
# [cashOut]
cashOut = Column('cash_out', String)
# [dict(date=date, months=[month])]
dualPhase = Column('dual_phase', String)
note = Column('note', UnicodeText)
histories = relationship("ItemHistoryModel")
def startDatetoString(self):
return _translate("ItemViewDialog", "{}/{}").format(self.startDate.year, self.startDate.month)
def getChecking(self, startPhase=None, endPhase=None):
''' 活期名数 '''
if startPhase is None or endPhase is None:
checking = self.quantity - len(self.getCashOut())
checking = 0 if checking < 0 else checking
else:
checking = self.quantity
for cashOut in self.getCashOut():
if startPhase <= cashOut and cashOut <= endPhase:
checking -= 1
elif startPhase > cashOut:
checking -= 1
return checking
def getPhaseAmount(self, phase, index):
''' 当期供金额 '''
checkin = self.checkin
if phase == 1:
checkin += self.getFee()
for markup in self.getMarkup():
if phase >= markup["phase"]: # 涨价
checkin += markup["amount"]
cashOut = self.getCashOut()
if index < len(cashOut): # 已取现
cachPhase = cashOut[index]
if phase == cachPhase:
if phase == 1:
checkin = self.getFee()
else:
checkin = 0
elif phase > cachPhase:
checkin = self.checkout
return checkin
def getFee(self):
return self.fee if self.fee is not None else self.checkout
def getMarkup(self):
if self.markup is not None and self.markup != "":
markup = json.loads(self.markup)
markup = sorted(markup, key=lambda x: x["phase"])
else:
markup = []
return markup
def setMarkup(self, markup):
self.markup = json.dumps(markup)
def getCashOut(self):
if self.cashOut is not None and self.cashOut != "":
cashOut = json.loads(self.cashOut)
cashOut = sorted(cashOut)
else:
cashOut = []
return cashOut
def setCashOut(self, cashOut):
self.cashOut = json.dumps(cashOut)
def getCashOutAmount(self, phase):
checkin = self.checkin
for markup in self.getMarkup():
if phase >= markup["phase"]: # 涨价
checkin += markup["amount"]
return (phase - 1) * self.checkout + (self.period - phase - 1) * checkin
def getDualPhase(self):
if self.dualPhase is not None and self.dualPhase != "":
dualPhase = json.loads(self.dualPhase)
dualPhase = sorted(dualPhase, key=lambda x: x["date"])
else:
dualPhase = []
return dualPhase
def setDualPhase(self, dualPhase):
self.dualPhase = json.dumps(dualPhase)
class ItemHistoryModel(Base):
__tablename__ = 'item_history_model'
id = Column(Integer, primary_key=True)
parent_id = Column(Integer, ForeignKey('item_model.id'))
createDate = Column('create_date', DateTime, default=datetime.now)
writeDate = Column('write_date', DateTime, default=datetime.now)
name = Column('name', String)
class ItemDualPhaseNewDialog(QDialog):
def __init__(self, item, dualPhaseEdit, parent=None):
super(ItemDualPhaseNewDialog, self).__init__(parent)
self.ui = Ui_ItemDualPhaseNewView()
self.ui.setupUi(self)
self.item = item
minDate = item.startDate
maxDate = item.startDate + timedelta(days=item.period * 31)
self.ui.dateEdit.setDateRange(minDate, maxDate)
self.monthCheckBox = list()
self.monthCheckBox.append(self.ui.m1CheckBox)
self.monthCheckBox.append(self.ui.m2CheckBox)
self.monthCheckBox.append(self.ui.m3CheckBox)
self.monthCheckBox.append(self.ui.m4CheckBox)
self.monthCheckBox.append(self.ui.m5CheckBox)
self.monthCheckBox.append(self.ui.m6CheckBox)
self.monthCheckBox.append(self.ui.m7CheckBox)
self.monthCheckBox.append(self.ui.m8CheckBox)
self.monthCheckBox.append(self.ui.m9CheckBox)
self.monthCheckBox.append(self.ui.m10CheckBox)
self.monthCheckBox.append(self.ui.m11CheckBox)
self.monthCheckBox.append(self.ui.m12CheckBox)
self.dualPhaseEdit = dualPhaseEdit
self.loadDualPhase()
self.ui.savePushButton.pressed.connect(self.onAccepted)
self.ui.cancelPushButton.pressed.connect(self.reject)
def loadDualPhase(self):
if self.dualPhaseEdit is None:
return
self.ui.dateEdit.setDate(QDate.fromString(self.dualPhaseEdit["date"], Qt.ISODate))
for i in range(0, 12):
if (i + 1) in self.dualPhaseEdit["months"]:
self.monthCheckBox[i].setChecked(True)
def monthHasChecked(self):
hasChecked = False
for checkBox in self.monthCheckBox:
if checkBox.isChecked():
hasChecked = True
break;
return hasChecked
def getDualPhase(self):
months = list()
for i in range(0, 12):
if self.monthCheckBox[i].isChecked():
months.append(i + 1)
return dict(date=qdate_to_date(self.ui.dateEdit.date()).isoformat(),
months=months)
def onAccepted(self):
if self.monthHasChecked() == False:
QMessageBox.warning(self, "", _translate("ItemViewDialog", "No month was checked"))
return
dualPhase = self.item.getDualPhase()
for dp in dualPhase:
if dp["date"] == self.getDualPhase()["date"] and self.dualPhaseEdit is None:
QMessageBox.warning(self, "", _translate("ItemViewDialog", "Duplicate date"))
return
if self.dualPhaseEdit is not None:
dualPhase.remove(self.dualPhaseEdit)
dualPhase.append(self.getDualPhase())
self.item.setDualPhase(dualPhase)
self.accept()
class ItemMarkupNewDialog(QDialog):
def __init__(self, item, markupEdit, parent=None):
super(ItemMarkupNewDialog, self).__init__(parent)
self.ui = Ui_ItemPhaseView()
self.ui.setupUi(self)
self.setWindowTitle(_translate("ItemPhaseView", "New Markup"))
self.item = item
self.ui.phaseSpinBox.setRange(1, item.period)
self.ui.amountLineEdit.setValidator(QIntValidator(self))
self.markupEdit = markupEdit
self.loadMarkupEdit()
self.ui.savePushButton.pressed.connect(self.onAccepted)
self.ui.cancelPushButton.pressed.connect(self.reject)
def loadMarkupEdit(self):
if self.markupEdit is None:
return
self.ui.phaseSpinBox.setValue(self.markupEdit["phase"])
self.ui.amountLineEdit.setText(str(self.markupEdit["amount"]))
def getMarkup(self):
return dict(phase=self.ui.phaseSpinBox.value(), amount=int(self.ui.amountLineEdit.text()))
def onAccepted(self):
if self.ui.amountLineEdit.text() == "":
QMessageBox.warning(self, "", _translate("ItemViewDialog", "No amount was input"))
return
markup = self.item.getMarkup()
for m in markup:
if m["phase"] == self.getMarkup()["phase"] and self.markupEdit is None:
QMessageBox.warning(self, "", _translate("ItemViewDialog", "Duplicate phase"))
return
if self.markupEdit is not None:
markup.remove(self.markupEdit)
markup.append(self.getMarkup())
self.item.setMarkup(markup)
self.accept()
class ItemCashOutNewDialog(QDialog):
def __init__(self, item, cashOutEdit, parent=None):
super(ItemCashOutNewDialog, self).__init__(parent)
self.ui = Ui_ItemPhaseView()
self.ui.setupUi(self)
self.setWindowTitle(_translate("ItemPhaseView", "New Cash Out"))
self.item = item
self.ui.phaseSpinBox.setRange(1, item.period)
self.ui.phaseSpinBox.valueChanged.connect(self.onPhaseChanged)
self.onPhaseChanged(cashOutEdit)
self.ui.amountLineEdit.setReadOnly(True)
self.cashOutEdit = cashOutEdit
self.loadCashOutEdit()
self.ui.savePushButton.pressed.connect(self.onAccepted)
self.ui.cancelPushButton.pressed.connect(self.reject)
def onPhaseChanged(self, phase):
if phase is None:
phase = 1
amount = self.item.getCashOutAmount(phase)
self.ui.amountLineEdit.setText(str(amount))
def loadCashOutEdit(self):
if self.cashOutEdit is None:
return
self.ui.phaseSpinBox.setValue(self.cashOutEdit)
def getCashOut(self):
return self.ui.phaseSpinBox.value()
def onAccepted(self):
cashOut = self.item.getCashOut()
if self.getCashOut() in cashOut and self.cashOutEdit is None:
QMessageBox.warning(self, "", _translate("ItemViewDialog", "Duplicate phase"))
return
if self.cashOutEdit is not None:
cashOut.remove(self.cashOutEdit)
cashOut.append(self.getCashOut())
self.item.setCashOut(cashOut)
self.accept()
class ItemHistoryDialog(QDialog):
def __init__(self, item, parent=None):
super(ItemHistoryDialog, self).__init__(parent)
self.ui = Ui_ItemHistoryView()
self.ui.setupUi(self)
self.ui.treeWidget.header().setSectionResizeMode(QHeaderView.ResizeToContents)
topItem = QTreeWidgetItem([item.name])
self.ui.treeWidget.addTopLevelItem(topItem)
for history in item.histories:
topItem.addChild(QTreeWidgetItem([history.createDate.strftime("%Y-%m-%d %H:%M:%S"), history.name]))
topItem.setExpanded(True)
class TreeWidgetItem (QTreeWidgetItem):
Category = Enum('Category', 'dualphase markup cashout')
def __init__(self, category, data, strings):
super(TreeWidgetItem, self).__init__(strings)
self.category = category
self.data = data
def getCategory(self):
return self.category
def getData(self):
return self.data
class ItemViewDialog(QDialog):
def __init__(self, id=None, parent=None):
super(ItemViewDialog, self).__init__(parent)
self.ui = Ui_ItemView()
self.ui.setupUi(self)
self.ui.checkinLineEdit.setValidator(QIntValidator(self))
self.ui.checkoutLineEdit.setValidator(QIntValidator(self))
self.ui.feeLineEdit.setValidator(QIntValidator(self))
self.ui.quantityLineEdit.setValidator(QIntValidator(self))
self.ui.periodLineEdit.setValidator(QIntValidator(self))
self.ui.checkoutLineEdit.textEdited.connect(self.checkoutEdit)
self.ui.feeCustomCheckBox.clicked.connect(self.customFee)
self.ui.dualPhaseNewPushButton.pressed.connect(self.dualPhaseNew)
self.ui.markupNewPushButton.pressed.connect(self.markupNew)
self.ui.cashOutNewPushButton.pressed.connect(self.cashOutNew)
self.ui.infoTreeWidget.setColumnCount(1)
self.ui.infoTreeWidget.header().setVisible(False)
self.ui.infoTreeWidget.itemDoubleClicked.connect(self.infoEdit)
self.ui.infoTreeWidget.setContextMenuPolicy(Qt.ActionsContextMenu)
editAction = QAction(_translate("ItemViewDialog", "Edit"), self.ui.infoTreeWidget)
editAction.triggered.connect(self.infoContextMenuEditAction)
self.ui.infoTreeWidget.addAction(editAction)
deleteAction = QAction(_translate("ItemViewDialog", "Delete"), self.ui.infoTreeWidget)
deleteAction.triggered.connect(self.infoContextMenuDeleteAction)
self.ui.infoTreeWidget.addAction(deleteAction)
self.ui.historyPushButton.pressed.connect(self.onHistoryView)
self.ui.savePushButton.pressed.connect(self.onAccepted)
self.ui.cancelPushButton.pressed.connect(self.onRejected)
self.id = id
if id != None:
session = Database.instance().session()
self.item = session.query(ItemModel).filter_by(id = id).one()
else:
self.ui.historyPushButton.setVisible(False)
today = date.today()
self.item = ItemModel(startDate=date(today.year, today.month, 1), quantity=1, checkin=400, checkout=600, period=80)
self.itemCopyed = copy.deepcopy(self.item)
self.loadItem()
def checkoutEdit(self, text):
if not self.ui.feeCustomCheckBox.isChecked():
self.ui.feeLineEdit.setText(text)
def customFee(self, checked):
self.ui.feeLineEdit.setEnabled(checked)
if not checked:
self.ui.feeLineEdit.setText(self.ui.checkoutLineEdit.text())
def dualPhaseNew(self, dualPhaseEdit=None):
if not self.checkItem():
return
self.saveItem()
dialog = ItemDualPhaseNewDialog(self.item, dualPhaseEdit, self)
if dialog.exec() == QDialog.Accepted:
self.loadInformation()
def markupNew(self, markupEdit=None):
if not self.checkItem():
return
self.saveItem()
dialog = ItemMarkupNewDialog(self.item, markupEdit, self)
if dialog.exec() == QDialog.Accepted:
self.loadInformation()
def cashOutNew(self, cashOutEdit=None):
if not self.checkItem():
return
self.saveItem()
if cashOutEdit is None and len(self.item.getCashOut()) > self.item.quantity:
QMessageBox.warning(self, "", _translate("ItemViewDialog", "Exceed quantity"))
return
dialog = ItemCashOutNewDialog(self.item, cashOutEdit, self)
if dialog.exec() == QDialog.Accepted:
self.loadInformation()
def infoEdit(self, item, column):
category = item.getCategory()
if category == TreeWidgetItem.Category.dualphase:
self.dualPhaseNew(item.getData())
elif category == TreeWidgetItem.Category.markup:
self.markupNew(item.getData())
else: # category == TreeWidgetItem.Category.cashout:
self.cashOutNew(item.getData())
def infoContextMenuEditAction(self):
selectedItems = self.ui.infoTreeWidget.selectedItems()
if len(selectedItems) > 0:
self.infoEdit(selectedItems[0], 0)
def infoContextMenuDeleteAction(self):
selectedItems = self.ui.infoTreeWidget.selectedItems()
if len(selectedItems) > 0:
item = selectedItems[0]
if QMessageBox.question(self, "", _translate("ItemViewDialog", "Continue to delete {}?").format(item.text(0))) == QMessageBox.Yes:
category = item.getCategory()
if category == TreeWidgetItem.Category.dualphase:
data = self.item.getDualPhase()
data.remove(item.getData())
self.item.setDualPhase(data)
elif category == TreeWidgetItem.Category.markup:
data = self.item.getMarkup()
data.remove(item.getData())
self.item.setMarkup(data)
else: # category == TreeWidgetItem.Category.cashout:
data = self.item.getCashOut()
data.remove(item.getData())
self.item.setCashOut(data)
self.loadInformation()
def onHistoryView(self):
ItemHistoryDialog(self.item, self).exec()
def onAccepted(self):
if not self.checkItem():
return
self.saveItem()
session = Database.instance().session()
self.createHistory()
if self.id == None:
session.add(self.item)
session.commit()
self.accept()
def onRejected(self):
session = Database.instance().session()
if session.dirty:
session.rollback()
self.reject()
def loadInformation(self):
self.ui.infoTreeWidget.clear()
for dualPhase in self.item.getDualPhase():
months = dualPhase["months"]
if months == [1, 3, 5, 7, 9, 11]:
months = _translate("ItemViewDialog", "odd")
elif months == [2, 4, 6, 8, 10, 12]:
months = _translate("ItemViewDialog", "even")
elif months == [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]:
months = _translate("ItemViewDialog", "every")
item = TreeWidgetItem(TreeWidgetItem.Category.dualphase, dualPhase, \
[_translate("ItemViewDialog", "Since {}, {} month get dual phase").format(dualPhase["date"], months)])
self.ui.infoTreeWidget.addTopLevelItem(item)
for markup in self.item.getMarkup():
item = TreeWidgetItem(TreeWidgetItem.Category.markup, markup, \
[_translate("ItemViewDialog", "Since {} phase, rise in price {}").format(markup["phase"], markup["amount"])])
self.ui.infoTreeWidget.addTopLevelItem(item)
for cashOut in self.item.getCashOut():
item = TreeWidgetItem(TreeWidgetItem.Category.cashout, cashOut, \
[_translate("ItemViewDialog", "At {} phase, cash out {}").format(cashOut, self.item.getCashOutAmount(cashOut))])
self.ui.infoTreeWidget.addTopLevelItem(item)
def loadItem(self):
self.ui.nameLineEdit.setText(self.item.name)
self.ui.startDateEdit.setDate(self.item.startDate)
self.ui.quantityLineEdit.setText(str(self.item.quantity))
self.ui.checkinLineEdit.setText(str(self.item.checkin))
self.ui.checkoutLineEdit.setText(str(self.item.checkout))
if self.item.fee is not None and self.item.fee != self.item.checkout:
self.ui.feeCustomCheckBox.setChecked(True)
self.customFee(True)
self.ui.feeLineEdit.setText(str(self.item.fee))
else:
self.customFee(False)
self.ui.periodLineEdit.setText(str(self.item.period))
self.loadInformation()
self.ui.noteTextEdit.setPlainText(self.item.note)
def checkItem(self):
name = self.ui.nameLineEdit.text()
if name == "":
QMessageBox.warning(self, "", _translate("ItemViewDialog", "Name is not correct"))
return False
quantity = self.ui.quantityLineEdit.text()
if quantity == "":
QMessageBox.warning(self, "", _translate("ItemViewDialog", "Quantity is not correct"))
return False
checkin = self.ui.checkinLineEdit.text()
if checkin == "":
QMessageBox.warning(self, "", _translate("ItemViewDialog", "Checkin is not correct"))
return False
checkout = self.ui.checkoutLineEdit.text()
if checkout == "":
QMessageBox.warning(self, "", _translate("ItemViewDialog", "Checkout is not correct"))
return False
period = self.ui.periodLineEdit.text()
if period == "":
QMessageBox.warning(self, "", _translate("ItemViewDialog", "Period is not correct"))
return False
return True
def saveItem(self):
self.item.name = self.ui.nameLineEdit.text()
self.item.startDate = qdate_to_date(self.ui.startDateEdit.date())
self.item.quantity = int(self.ui.quantityLineEdit.text())
self.item.checkin = int(self.ui.checkinLineEdit.text())
self.item.checkout = int(self.ui.checkoutLineEdit.text())
self.item.fee = None
if self.ui.feeCustomCheckBox.isChecked():
fee = self.ui.feeLineEdit.text()
if fee != "" and int(fee) != self.item.checkout:
self.item.fee = int(fee)
self.item.period = int(self.ui.periodLineEdit.text())
self.item.note = self.ui.noteTextEdit.toPlainText()
def createHistory(self):
if self.id == None:
self.item.histories.append(ItemHistoryModel(name=_translate("ItemViewDialog", "Create item {}").format(self.item.name)))
else:
item = self.item
itemCopyed = self.itemCopyed
if item.name != itemCopyed.name:
self.item.histories.append(ItemHistoryModel(name=_translate("ItemViewDialog", "Change name {} to {}").format(itemCopyed.name, item.name)))
if item.startDate != itemCopyed.startDate:
self.item.histories.append(ItemHistoryModel(name=_translate("ItemViewDialog", "Change startDate {} to {}").format(itemCopyed.startDate, item.startDate)))
if item.quantity != itemCopyed.quantity:
self.item.histories.append(ItemHistoryModel(name=_translate("ItemViewDialog", "Change quantity {} to {}").format(itemCopyed.quantity, item.quantity)))
if item.checkin != itemCopyed.checkin:
self.item.histories.append(ItemHistoryModel(name=_translate("ItemViewDialog", "Change checkin {} to {}").format(itemCopyed.checkin, item.checkin)))
if item.checkout != itemCopyed.checkout:
self.item.histories.append(ItemHistoryModel(name=_translate("ItemViewDialog", "Change checkout {} to {}").format(itemCopyed.checkout, item.checkout)))
if item.fee != itemCopyed.fee:
self.item.histories.append(ItemHistoryModel(name=_translate("ItemViewDialog", "Change fee {} to {}").format(itemCopyed.fee, item.fee)))
if item.period != itemCopyed.period:
self.item.histories.append(ItemHistoryModel(name=_translate("ItemViewDialog", "Change period {} to {}").format(itemCopyed.period, item.period)))
if item.markup != itemCopyed.markup:
self.item.histories.append(ItemHistoryModel(name=_translate("ItemViewDialog", "Change markup {} to {}").format(itemCopyed.markup, item.markup)))
if item.cashOut != itemCopyed.cashOut:
self.item.histories.append(ItemHistoryModel(name=_translate("ItemViewDialog", "Change cashOut {} to {}").format(itemCopyed.cashOut, item.cashOut)))
if item.dualPhase != itemCopyed.dualPhase:
self.item.histories.append(ItemHistoryModel(name=_translate("ItemViewDialog", "Change dualPhase {} to {}").format(itemCopyed.dualPhase, item.dualPhase)))
if item.note != itemCopyed.note:
self.item.histories.append(ItemHistoryModel(name=_translate("ItemViewDialog", "Change note {} to {}").format(itemCopyed.note, item.note)))
|
gileno/curso-citi
|
refs/heads/master
|
core/views.py
|
1
|
import random
from django.shortcuts import render
from django.contrib import messages
from django.http import HttpResponse
from django.core.mail import send_mail
from django.views.generic import View, TemplateView
from .forms import ContactForm
class IndexView(TemplateView):
template_name = 'index.html'
def contact(request):
if request.method == 'POST':
form = ContactForm(request.POST)
if form.is_valid():
name = form.cleaned_data['name']
email = form.cleaned_data['email']
message = form.cleaned_data['message']
subject = '[DjangoEcommerce] %s entrou em contato' % name
body = 'E-mail: %s\n%s' % (email, message)
send_mail(subject, body, 'admin@admin.com', ['contato@admin.com'])
form = ContactForm()
else:
messages.error(request, 'Formulário inválido')
else:
form = ContactForm()
context = {
'form': form
}
return render(request, 'contact.html', context)
|
mavenlin/tensorflow
|
refs/heads/master
|
tensorflow/contrib/distributions/python/ops/estimator.py
|
44
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions to bridge `Distribution`s and `tf.contrib.learn.estimator` APIs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.estimators.head import _compute_weighted_loss
from tensorflow.contrib.learn.python.learn.estimators.head import _RegressionHead
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
__all__ = [
"estimator_head_distribution_regression",
]
def estimator_head_distribution_regression(make_distribution_fn,
label_dimension=1,
logits_dimension=None,
label_name=None,
weight_column_name=None,
enable_centered_bias=False,
head_name=None):
"""Creates a `Head` for regression under a generic distribution.
Args:
make_distribution_fn: Python `callable` which returns a `tf.Distribution`
instance created using only logits.
label_dimension: Number of regression labels per example. This is the size
of the last dimension of the labels `Tensor` (typically, this has shape
`[batch_size, label_dimension]`).
logits_dimension: Number of logits per example. This is the size of the last
dimension of the logits `Tensor` (typically, this has shape
`[batch_size, logits_dimension]`).
Default value: `label_dimension`.
label_name: Python `str`, name of the key in label `dict`. Can be `None` if
label is a `Tensor` (single headed models).
weight_column_name: Python `str` defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
enable_centered_bias: Python `bool`. If `True`, estimator will learn a
centered bias variable for each class. Rest of the model structure learns
the residual after centered bias.
head_name: Python `str`, name of the head. Predictions, summary and metrics
keys are suffixed by `"/" + head_name` and the default variable scope is
`head_name`.
Returns:
An instance of `Head` for generic regression.
"""
return _DistributionRegressionHead(
make_distribution_fn=make_distribution_fn,
label_dimension=label_dimension,
logits_dimension=logits_dimension,
label_name=label_name,
weight_column_name=weight_column_name,
enable_centered_bias=enable_centered_bias,
head_name=head_name)
class _DistributionRegressionHead(_RegressionHead):
"""Creates a _RegressionHead instance from an arbitray `Distribution`."""
def __init__(self,
make_distribution_fn,
label_dimension,
logits_dimension=None,
label_name=None,
weight_column_name=None,
enable_centered_bias=False,
head_name=None):
"""`Head` for regression.
Args:
make_distribution_fn: Python `callable` which returns a `tf.Distribution`
instance created using only logits.
label_dimension: Number of regression labels per example. This is the
size of the last dimension of the labels `Tensor` (typically, this has
shape `[batch_size, label_dimension]`).
logits_dimension: Number of logits per example. This is the size of the
last dimension of the logits `Tensor` (typically, this has shape
`[batch_size, logits_dimension]`).
Default value: `label_dimension`.
label_name: Python `str`, name of the key in label `dict`. Can be `None`
if label is a tensor (single headed models).
weight_column_name: Python `str` defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
enable_centered_bias: Python `bool`. If `True`, estimator will learn a
centered bias variable for each class. Rest of the model structure
learns the residual after centered bias.
head_name: Python `str`, name of the head. Predictions, summary and
metrics keys are suffixed by `"/" + head_name` and the default variable
scope is `head_name`.
Raises:
TypeError: if `make_distribution_fn` is not `callable`.
"""
if not callable(make_distribution_fn):
raise TypeError("`make_distribution_fn` must be a callable function.")
self._distributions = {}
self._make_distribution_fn = make_distribution_fn
def static_value(x):
"""Returns the static value of a `Tensor` or `None`."""
return tensor_util.constant_value(ops.convert_to_tensor(x))
def concat_vectors(*args):
"""Concatenates input vectors, statically if possible."""
args_ = [static_value(x) for x in args]
if any(vec is None for vec in args_):
return array_ops.concat(args, axis=0)
return [val for vec in args_ for val in vec]
def loss_fn(labels, logits, weights=None):
"""Returns the loss of using `logits` to predict `labels`."""
d = self.distribution(logits)
labels_batch_shape = labels.shape.with_rank_at_least(1)[:-1]
labels_batch_shape = (
labels_batch_shape.as_list() if labels_batch_shape.is_fully_defined()
else array_ops.shape(labels)[:-1])
labels = array_ops.reshape(
labels,
shape=concat_vectors(labels_batch_shape, d.event_shape_tensor()))
return _compute_weighted_loss(
loss_unweighted=-d.log_prob(labels),
weight=weights)
def link_fn(logits):
"""Returns the inverse link function at `logits`."""
# Note: What the API calls a "link function" is really the inverse-link
# function, i.e., the "mean".
d = self.distribution(logits)
return d.mean()
super(_DistributionRegressionHead, self).__init__(
label_dimension=label_dimension,
loss_fn=loss_fn,
link_fn=link_fn,
logits_dimension=logits_dimension,
label_name=label_name,
weight_column_name=weight_column_name,
enable_centered_bias=enable_centered_bias,
head_name=head_name)
@property
def distributions(self):
"""Returns all distributions created by `DistributionRegressionHead`."""
return self._distributions
def distribution(self, logits, name=None):
"""Retrieves a distribution instance, parameterized by `logits`.
Args:
logits: `float`-like `Tensor` representing the parameters of the
underlying distribution.
name: The Python `str` name to given to this op.
Default value: "distribution".
Returns:
distribution: `tf.Distribution` instance parameterized by `logits`.
"""
with ops.name_scope(name, "distribution", [logits]):
d = self._distributions.get(logits, None)
if d is None:
d = self._make_distribution_fn(logits)
self._distributions[logits] = d
return d
|
grantcoin/grantcoin
|
refs/heads/master
|
contrib/p2pool/p2pool/util/jsonrpc.py
|
261
|
from __future__ import division
import json
import weakref
from twisted.internet import defer
from twisted.protocols import basic
from twisted.python import failure, log
from twisted.web import client, error
from p2pool.util import deferral, deferred_resource, memoize
class Error(Exception):
def __init__(self, code, message, data=None):
if type(self) is Error:
raise TypeError("can't directly instantiate Error class; use Error_for_code")
if not isinstance(code, int):
raise TypeError('code must be an int')
#if not isinstance(message, unicode):
# raise TypeError('message must be a unicode')
self.code, self.message, self.data = code, message, data
def __str__(self):
return '%i %s' % (self.code, self.message) + (' %r' % (self.data, ) if self.data is not None else '')
def _to_obj(self):
return {
'code': self.code,
'message': self.message,
'data': self.data,
}
@memoize.memoize_with_backing(weakref.WeakValueDictionary())
def Error_for_code(code):
class NarrowError(Error):
def __init__(self, *args, **kwargs):
Error.__init__(self, code, *args, **kwargs)
return NarrowError
class Proxy(object):
def __init__(self, func, services=[]):
self._func = func
self._services = services
def __getattr__(self, attr):
if attr.startswith('rpc_'):
return lambda *params: self._func('.'.join(self._services + [attr[len('rpc_'):]]), params)
elif attr.startswith('svc_'):
return Proxy(self._func, self._services + [attr[len('svc_'):]])
else:
raise AttributeError('%r object has no attribute %r' % (self.__class__.__name__, attr))
@defer.inlineCallbacks
def _handle(data, provider, preargs=(), response_handler=None):
id_ = None
try:
try:
try:
req = json.loads(data)
except Exception:
raise Error_for_code(-32700)(u'Parse error')
if 'result' in req or 'error' in req:
response_handler(req['id'], req['result'] if 'error' not in req or req['error'] is None else
failure.Failure(Error_for_code(req['error']['code'])(req['error']['message'], req['error'].get('data', None))))
defer.returnValue(None)
id_ = req.get('id', None)
method = req.get('method', None)
if not isinstance(method, basestring):
raise Error_for_code(-32600)(u'Invalid Request')
params = req.get('params', [])
if not isinstance(params, list):
raise Error_for_code(-32600)(u'Invalid Request')
for service_name in method.split('.')[:-1]:
provider = getattr(provider, 'svc_' + service_name, None)
if provider is None:
raise Error_for_code(-32601)(u'Service not found')
method_meth = getattr(provider, 'rpc_' + method.split('.')[-1], None)
if method_meth is None:
raise Error_for_code(-32601)(u'Method not found')
result = yield method_meth(*list(preargs) + list(params))
error = None
except Error:
raise
except Exception:
log.err(None, 'Squelched JSON error:')
raise Error_for_code(-32099)(u'Unknown error')
except Error, e:
result = None
error = e._to_obj()
defer.returnValue(json.dumps(dict(
jsonrpc='2.0',
id=id_,
result=result,
error=error,
)))
# HTTP
@defer.inlineCallbacks
def _http_do(url, headers, timeout, method, params):
id_ = 0
try:
data = yield client.getPage(
url=url,
method='POST',
headers=dict(headers, **{'Content-Type': 'application/json'}),
postdata=json.dumps({
'jsonrpc': '2.0',
'method': method,
'params': params,
'id': id_,
}),
timeout=timeout,
)
except error.Error, e:
try:
resp = json.loads(e.response)
except:
raise e
else:
resp = json.loads(data)
if resp['id'] != id_:
raise ValueError('invalid id')
if 'error' in resp and resp['error'] is not None:
raise Error_for_code(resp['error']['code'])(resp['error']['message'], resp['error'].get('data', None))
defer.returnValue(resp['result'])
HTTPProxy = lambda url, headers={}, timeout=5: Proxy(lambda method, params: _http_do(url, headers, timeout, method, params))
class HTTPServer(deferred_resource.DeferredResource):
def __init__(self, provider):
deferred_resource.DeferredResource.__init__(self)
self._provider = provider
@defer.inlineCallbacks
def render_POST(self, request):
data = yield _handle(request.content.read(), self._provider, preargs=[request])
assert data is not None
request.setHeader('Content-Type', 'application/json')
request.setHeader('Content-Length', len(data))
request.write(data)
class LineBasedPeer(basic.LineOnlyReceiver):
delimiter = '\n'
def __init__(self):
#basic.LineOnlyReceiver.__init__(self)
self._matcher = deferral.GenericDeferrer(max_id=2**30, func=lambda id, method, params: self.sendLine(json.dumps({
'jsonrpc': '2.0',
'method': method,
'params': params,
'id': id,
})))
self.other = Proxy(self._matcher)
def lineReceived(self, line):
_handle(line, self, response_handler=self._matcher.got_response).addCallback(lambda line2: self.sendLine(line2) if line2 is not None else None)
|
MicroPyramid/micro-finance
|
refs/heads/master
|
micro_admin/migrations/__init__.py
|
12133432
| |
2013Commons/HUE-SHARK
|
refs/heads/master
|
desktop/core/ext-py/Django-1.2.3/django/conf/locale/nb/__init__.py
|
12133432
| |
benoitsteiner/tensorflow
|
refs/heads/master
|
tensorflow/python/estimator/__init__.py
|
12133432
| |
astropy/conda-build-tools
|
refs/heads/master
|
extruder/tests/test_channel_copy.py
|
1
|
import pytest
from os import getenv
from binstar_client.utils import get_server_api
from binstar_client.errors import NotFound
from ..copy_packages import PackageCopier
SOURCE = 'conda-forge'
DEST = 'astropy-channel-copy-test'
# Destination channel contains only the packages:
# wcsaxes
# only versions 0.7 and 0.8, but not the latest on conda-forge,
# which is 0.9.
# sep
# only version 0.5.2, copied from channel mwcraig,
# which contains only that version.
def test_package_not_on_source():
# Package does not exist on source channel
# Expected outcome: NotFound
packages = {'asudifjqeiroufnver': None}
with pytest.raises(NotFound):
PackageCopier(SOURCE, DEST, packages)
# Whether or not version exists on destination channel:
def test_version_not_in_source():
# Package version is pinned and...
# ...pinned version is not in source channel
# Expected outcome: RuntimeError and specific message
packages = {'wcsaxes': '0.0.0'}
with pytest.raises(RuntimeError):
PackageCopier(SOURCE, DEST, packages)
# Package version is pinned and...
def test_version_pinned_not_in_destination():
# ...pinned version is not in destination channel
# Expected outcome: copy
packages = {'wcsaxes': '0.9'}
pc = PackageCopier(SOURCE, DEST, packages)
assert 'wcsaxes' in pc.to_copy
def test_version_pinned_in_destination():
# ...pinned version is in destination channel
# Expected outcome: No copy
packages = {'wcsaxes': '0.8'}
pc = PackageCopier(SOURCE, DEST, packages)
assert 'wcsaxes' not in pc.to_copy
# Package version is not pinned and...
def test_version_not_pinned_not_in_destination():
# ...destination channel is not up to date
# Expected outcome: copy
packages = {'wcsaxes': None}
pc = PackageCopier(SOURCE, DEST, packages)
assert 'wcsaxes' in pc.to_copy
def test_version_not_pinned_no_update_needed():
# ...destination is up to date
# Expected outcome: no copy
packages = {'sep': None}
pc = PackageCopier('mwcraig', DEST, packages)
assert 'sep' not in pc.to_copy
token = getenv('COPY_TEST_BINSTAR_TOKEN')
@pytest.mark.skipif(token is None,
reason='binstar token not set')
def test_package_copying():
api = get_server_api(token)
packages = {'wcsaxes': None}
pc = PackageCopier(SOURCE, DEST, packages, token=token)
# Make sure v0.9 has not accidentally ended up in the channel.
dest_wcs = api.package(DEST, 'wcsaxes')
assert "0.9" not in dest_wcs['versions']
# Copy 0.9 to the channel.
pc.copy_packages()
# Make sure it is really there.
dest_wcs = api.package(DEST, 'wcsaxes')
assert "0.9" in dest_wcs['versions']
# Remove it...
api.remove_release(DEST, 'wcsaxes', "0.9")
# ...and make sure it is really gone.
dest_wcs = api.package(DEST, 'wcsaxes')
assert "0.9" not in dest_wcs['versions']
|
theopolis/thrift
|
refs/heads/master
|
test/py/util.py
|
43
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import glob
import os
import sys
_SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__))
_ROOT_DIR = os.path.dirname(os.path.dirname(_SCRIPT_DIR))
def local_libpath():
globdir = os.path.join(_ROOT_DIR, 'lib', 'py', 'build', 'lib.*')
for libpath in glob.glob(globdir):
if libpath.endswith('-%d.%d' % (sys.version_info[0], sys.version_info[1])):
return libpath
|
jstammers/EDMSuite
|
refs/heads/atom-mega-mix
|
NavPython/IronPython/Lib/lib2to3/fixes/fix_print.py
|
326
|
# Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer for print.
Change:
'print' into 'print()'
'print ...' into 'print(...)'
'print ... ,' into 'print(..., end=" ")'
'print >>x, ...' into 'print(..., file=x)'
No changes are applied if print_function is imported from __future__
"""
# Local imports
from .. import patcomp
from .. import pytree
from ..pgen2 import token
from .. import fixer_base
from ..fixer_util import Name, Call, Comma, String, is_tuple
parend_expr = patcomp.compile_pattern(
"""atom< '(' [atom|STRING|NAME] ')' >"""
)
class FixPrint(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
simple_stmt< any* bare='print' any* > | print_stmt
"""
def transform(self, node, results):
assert results
bare_print = results.get("bare")
if bare_print:
# Special-case print all by itself
bare_print.replace(Call(Name(u"print"), [],
prefix=bare_print.prefix))
return
assert node.children[0] == Name(u"print")
args = node.children[1:]
if len(args) == 1 and parend_expr.match(args[0]):
# We don't want to keep sticking parens around an
# already-parenthesised expression.
return
sep = end = file = None
if args and args[-1] == Comma():
args = args[:-1]
end = " "
if args and args[0] == pytree.Leaf(token.RIGHTSHIFT, u">>"):
assert len(args) >= 2
file = args[1].clone()
args = args[3:] # Strip a possible comma after the file expression
# Now synthesize a print(args, sep=..., end=..., file=...) node.
l_args = [arg.clone() for arg in args]
if l_args:
l_args[0].prefix = u""
if sep is not None or end is not None or file is not None:
if sep is not None:
self.add_kwarg(l_args, u"sep", String(repr(sep)))
if end is not None:
self.add_kwarg(l_args, u"end", String(repr(end)))
if file is not None:
self.add_kwarg(l_args, u"file", file)
n_stmt = Call(Name(u"print"), l_args)
n_stmt.prefix = node.prefix
return n_stmt
def add_kwarg(self, l_nodes, s_kwd, n_expr):
# XXX All this prefix-setting may lose comments (though rarely)
n_expr.prefix = u""
n_argument = pytree.Node(self.syms.argument,
(Name(s_kwd),
pytree.Leaf(token.EQUAL, u"="),
n_expr))
if l_nodes:
l_nodes.append(Comma())
n_argument.prefix = u" "
l_nodes.append(n_argument)
|
MER-GROUP/intellij-community
|
refs/heads/master
|
python/testData/quickFixes/PyMoveAttributeToInitQuickFixTest/py3K_after.py
|
79
|
class Base():
def __init__(self):
self.param = 2
class Child(Base):
def __init__(self):
super().__init__()
self.my = 1
def f(self):
pass
|
benjyw/pants
|
refs/heads/fix_docs_urls
|
src/python/pants/backend/python/lint/pylint/rules.py
|
3
|
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from collections import defaultdict
from dataclasses import dataclass
from typing import Iterable, List, Tuple
from pants.backend.python.lint.pylint.skip_field import SkipPylintField
from pants.backend.python.lint.pylint.subsystem import Pylint
from pants.backend.python.target_types import (
InterpreterConstraintsField,
PythonRequirementsField,
PythonSources,
)
from pants.backend.python.util_rules import pex_from_targets
from pants.backend.python.util_rules.interpreter_constraints import InterpreterConstraints
from pants.backend.python.util_rules.pex import (
Pex,
PexRequest,
PexRequirements,
VenvPex,
VenvPexProcess,
)
from pants.backend.python.util_rules.pex_from_targets import PexFromTargetsRequest
from pants.backend.python.util_rules.python_sources import (
PythonSourceFiles,
PythonSourceFilesRequest,
StrippedPythonSourceFiles,
)
from pants.core.goals.lint import LintRequest, LintResult, LintResults
from pants.core.util_rules.config_files import ConfigFiles, ConfigFilesRequest
from pants.core.util_rules.source_files import SourceFiles, SourceFilesRequest
from pants.engine.addresses import Addresses, UnparsedAddressInputs
from pants.engine.fs import EMPTY_DIGEST, AddPrefix, Digest, MergeDigests
from pants.engine.process import FallibleProcessResult
from pants.engine.rules import Get, MultiGet, collect_rules, rule
from pants.engine.target import (
Dependencies,
DependenciesRequest,
FieldSet,
Target,
Targets,
TransitiveTargets,
TransitiveTargetsRequest,
)
from pants.engine.unions import UnionRule
from pants.python.python_setup import PythonSetup
from pants.util.logging import LogLevel
from pants.util.meta import frozen_after_init
from pants.util.strutil import pluralize
@dataclass(frozen=True)
class PylintFieldSet(FieldSet):
required_fields = (PythonSources,)
sources: PythonSources
dependencies: Dependencies
@classmethod
def opt_out(cls, tgt: Target) -> bool:
return tgt.get(SkipPylintField).value
@dataclass(frozen=True)
class PylintTargetSetup:
field_set: PylintFieldSet
target_with_dependencies: Targets
@frozen_after_init
@dataclass(unsafe_hash=True)
class PylintPartition:
field_sets: Tuple[PylintFieldSet, ...]
targets_with_dependencies: Targets
interpreter_constraints: InterpreterConstraints
plugin_targets: Targets
def __init__(
self,
target_setups: Iterable[PylintTargetSetup],
interpreter_constraints: InterpreterConstraints,
plugin_targets: Iterable[Target],
) -> None:
field_sets = []
targets_with_deps: List[Target] = []
for target_setup in target_setups:
field_sets.append(target_setup.field_set)
targets_with_deps.extend(target_setup.target_with_dependencies)
self.field_sets = tuple(field_sets)
self.targets_with_dependencies = Targets(targets_with_deps)
self.interpreter_constraints = interpreter_constraints
self.plugin_targets = Targets(plugin_targets)
class PylintRequest(LintRequest):
field_set_type = PylintFieldSet
def generate_argv(source_files: SourceFiles, pylint: Pylint) -> Tuple[str, ...]:
args = []
if pylint.config is not None:
args.append(f"--rcfile={pylint.config}")
args.extend(pylint.args)
args.extend(source_files.files)
return tuple(args)
@rule(level=LogLevel.DEBUG)
async def pylint_lint_partition(partition: PylintPartition, pylint: Pylint) -> LintResult:
requirements_pex_get = Get(
Pex,
PexFromTargetsRequest,
PexFromTargetsRequest.for_requirements(
(field_set.address for field_set in partition.field_sets),
# NB: These constraints must be identical to the other PEXes. Otherwise, we risk using
# a different version for the requirements than the other two PEXes, which can result
# in a PEX runtime error about missing dependencies.
hardcoded_interpreter_constraints=partition.interpreter_constraints,
internal_only=True,
direct_deps_only=True,
),
)
plugin_requirements = PexRequirements.create_from_requirement_fields(
plugin_tgt[PythonRequirementsField]
for plugin_tgt in partition.plugin_targets
if plugin_tgt.has_field(PythonRequirementsField)
)
pylint_pex_get = Get(
Pex,
PexRequest(
output_filename="pylint.pex",
internal_only=True,
requirements=PexRequirements([*pylint.all_requirements, *plugin_requirements]),
interpreter_constraints=partition.interpreter_constraints,
),
)
prepare_plugin_sources_get = Get(
StrippedPythonSourceFiles, PythonSourceFilesRequest(partition.plugin_targets)
)
prepare_python_sources_get = Get(
PythonSourceFiles, PythonSourceFilesRequest(partition.targets_with_dependencies)
)
field_set_sources_get = Get(
SourceFiles, SourceFilesRequest(field_set.sources for field_set in partition.field_sets)
)
(
pylint_pex,
requirements_pex,
prepared_plugin_sources,
prepared_python_sources,
field_set_sources,
) = await MultiGet(
pylint_pex_get,
requirements_pex_get,
prepare_plugin_sources_get,
prepare_python_sources_get,
field_set_sources_get,
)
pylint_runner_pex, config_files = await MultiGet(
Get(
VenvPex,
PexRequest(
output_filename="pylint_runner.pex",
interpreter_constraints=partition.interpreter_constraints,
main=pylint.main,
internal_only=True,
pex_path=[pylint_pex, requirements_pex],
),
),
Get(
ConfigFiles, ConfigFilesRequest, pylint.config_request(field_set_sources.snapshot.dirs)
),
)
prefixed_plugin_sources = (
await Get(
Digest,
AddPrefix(prepared_plugin_sources.stripped_source_files.snapshot.digest, "__plugins"),
)
if pylint.source_plugins
else EMPTY_DIGEST
)
pythonpath = list(prepared_python_sources.source_roots)
if pylint.source_plugins:
# NB: Pylint source plugins must be explicitly loaded via PEX_EXTRA_SYS_PATH. The value must
# point to the plugin's directory, rather than to a parent's directory, because
# `load-plugins` takes a module name rather than a path to the module; i.e. `plugin`, but
# not `path.to.plugin`. (This means users must have specified the parent directory as a
# source root.)
pythonpath.append("__plugins")
input_digest = await Get(
Digest,
MergeDigests(
(
config_files.snapshot.digest,
prefixed_plugin_sources,
prepared_python_sources.source_files.snapshot.digest,
)
),
)
result = await Get(
FallibleProcessResult,
VenvPexProcess(
pylint_runner_pex,
argv=generate_argv(field_set_sources, pylint),
input_digest=input_digest,
extra_env={"PEX_EXTRA_SYS_PATH": ":".join(pythonpath)},
description=f"Run Pylint on {pluralize(len(partition.field_sets), 'file')}.",
level=LogLevel.DEBUG,
),
)
return LintResult.from_fallible_process_result(
result, partition_description=str(sorted(str(c) for c in partition.interpreter_constraints))
)
@rule(desc="Lint using Pylint", level=LogLevel.DEBUG)
async def pylint_lint(
request: PylintRequest, pylint: Pylint, python_setup: PythonSetup
) -> LintResults:
if pylint.skip:
return LintResults([], linter_name="Pylint")
plugin_target_addresses = await Get(Addresses, UnparsedAddressInputs, pylint.source_plugins)
plugin_targets_request = Get(
TransitiveTargets, TransitiveTargetsRequest(plugin_target_addresses)
)
linted_targets_request = Get(
Targets, Addresses(field_set.address for field_set in request.field_sets)
)
plugin_targets, linted_targets = await MultiGet(plugin_targets_request, linted_targets_request)
plugin_targets_compatibility_fields = tuple(
plugin_tgt[InterpreterConstraintsField]
for plugin_tgt in plugin_targets.closure
if plugin_tgt.has_field(InterpreterConstraintsField)
)
# Pylint needs direct dependencies in the chroot to ensure that imports are valid. However, it
# doesn't lint those direct dependencies nor does it care about transitive dependencies.
per_target_dependencies = await MultiGet(
Get(Targets, DependenciesRequest(field_set.dependencies))
for field_set in request.field_sets
)
# We batch targets by their interpreter constraints to ensure, for example, that all Python 2
# targets run together and all Python 3 targets run together.
# Note that Pylint uses the AST of the interpreter that runs it. So, we include any plugin
# targets in this interpreter constraints calculation.
interpreter_constraints_to_target_setup = defaultdict(set)
for field_set, tgt, dependencies in zip(
request.field_sets, linted_targets, per_target_dependencies
):
target_setup = PylintTargetSetup(field_set, Targets([tgt, *dependencies]))
interpreter_constraints = InterpreterConstraints.create_from_compatibility_fields(
(
*(
tgt[InterpreterConstraintsField]
for tgt in [tgt, *dependencies]
if tgt.has_field(InterpreterConstraintsField)
),
*plugin_targets_compatibility_fields,
),
python_setup,
)
interpreter_constraints_to_target_setup[interpreter_constraints].add(target_setup)
partitions = (
PylintPartition(
tuple(sorted(target_setups, key=lambda tgt_setup: tgt_setup.field_set.address)),
interpreter_constraints,
Targets(plugin_targets.closure),
)
for interpreter_constraints, target_setups in sorted(
interpreter_constraints_to_target_setup.items()
)
)
partitioned_results = await MultiGet(
Get(LintResult, PylintPartition, partition) for partition in partitions
)
return LintResults(partitioned_results, linter_name="Pylint")
def rules():
return [
*collect_rules(),
UnionRule(LintRequest, PylintRequest),
*pex_from_targets.rules(),
]
|
giruenf/GRIPy
|
refs/heads/master
|
basic/uom/uom.py
|
1
|
# -*- coding: utf-8 -*-
#
# GRIPy Unit of Measure (UOM)
# Classes for units of measure and it's conversion
# Universidade Estadual do Norte Fluminense - UENF
# Laboratório de Engenharia de Petróleo - LENEP
# Grupo de Inferência em Reservatório - GIR
# Adriano Paulo Laes de Santana
# March 25th, 2017
#
# The following code is based on Energistics Unit of Measure Dictionary (UOM) V1.0
# Energistics UOM data is distributed under the Energistics License Agreement at http://www.energistics.org.
# Copyright (c) 2014 Energistics.
import os
import math
import xml.etree.ElementTree as ElementTree
UOM_FILENAME = 'Energistics_Unit_of_Measure_Dictionary_V1.0.xml'
NAMESPACE_KEY = 'uom'
NAMESPACE_VALUE = 'http://www.energistics.org/energyml/data/uomv1'
UNIT_DIMENSION_SET_KEY = 'unitDimensionSet'
QUANTITY_CLASS_SET_KEY = 'quantityClassSet'
UNIT_SET_KEY = 'unitSet'
REFERENCE_SET_KEY = 'referenceSet'
PREFIX_SET_KEY = 'prefixSet'
TAG_UNIT_DIMENSION_SET = NAMESPACE_KEY + ':' + UNIT_DIMENSION_SET_KEY
TAG_QUANTITY_CLASS_SET = NAMESPACE_KEY + ':' + QUANTITY_CLASS_SET_KEY
TAG_UNIT_SET = NAMESPACE_KEY + ':' + UNIT_SET_KEY
TAG_REFERENCE_SET = NAMESPACE_KEY + ':' + REFERENCE_SET_KEY
TAG_PREFIX_SET = NAMESPACE_KEY + ':' + PREFIX_SET_KEY
namespace = {NAMESPACE_KEY: NAMESPACE_VALUE}
class UOM(object):
_instance = None
def __init__(self, filename=None):
if self.__class__._instance:
raise Exception('Cannot create another UOM instance.')
self._unit_dimensions = {}
self._quantity_classes = {}
self._units = {}
self._references = {}
self._prefixes = {}
if filename:
self._load_XML(filename)
self.__class__._instance = self
@classmethod
def get(cls):
if not cls._instance:
UOM()
return cls._instance
def convert(self, value, from_unit_symbol, to_unit_symbol):
unit_from = self.get_unit(from_unit_symbol)
if unit_from is None:
raise Exception('Invalid unit from')
unit_to = self.get_unit(to_unit_symbol)
if unit_to is None:
raise Exception('Invalid unit to')
if unit_from.dimension != unit_to.dimension:
raise Exception('Cannot convert between diferent dimensions.')
units_dimension = self.get_unit_dimension(unit_from.dimension)
#
if units_dimension.baseForConversion == unit_from.symbol:
base_value = value
else:
base_value = (unit_from.A + unit_from.B * value) / (unit_from.C + unit_from.D * value)
if units_dimension.baseForConversion == unit_to.symbol:
return base_value
return (unit_to.A - unit_to.C * base_value) / (unit_to.D * base_value - unit_to.B)
def get_unit_dimension(self, dimension):
return self._unit_dimensions.get(dimension)
def get_quantity_class(self, name):
return self._quantity_classes.get(name)
def get_unit(self, symbol):
return self._units.get(symbol)
def get_reference(self, ID):
return self._references.get(ID)
def get_prefix(self, symbol):
return self._prefixes.get(symbol)
def is_valid_unit(self, unit_symbol, quantity_name=None):
unit = self.get_unit(unit_symbol)
if not unit:
return False
elif quantity_name is None:
return True
quantity = self.get_quantity_class(quantity_name)
if quantity is None:
return False
return unit_symbol in quantity.memberUnit
def _load_XML(self, filename):
filename = os.path.join(
os.path.dirname(os.path.realpath(__file__)), filename
)
tree = ElementTree.parse(filename)
uds = tree.findall(TAG_UNIT_DIMENSION_SET, namespace)[0]
qcs = tree.findall(TAG_QUANTITY_CLASS_SET, namespace)[0]
us = tree.findall(TAG_UNIT_SET, namespace)[0]
rs = tree.findall(TAG_REFERENCE_SET, namespace)[0]
ps = tree.findall(TAG_PREFIX_SET, namespace)[0]
for ud in uds:
kv = {}
for attr in ud:
key = attr.tag.split('}')[1]
if attr.text:
# kv[key] = attr.text.translate(None, '\t\n')
kv[key] = attr.text.translate('\t\n')
self._unit_dimensions[kv['dimension']] = UnitDimension(**kv)
for qc in qcs:
kv = {}
member_unit = []
for attr in qc:
key = attr.tag.split('}')[1]
if attr.text:
if key == 'memberUnit':
member_unit.append(attr.text)
else:
# kv[key] = attr.text.translate(None, '\t\n')
kv[key] = attr.text.translate('\t\n')
qc = QuantityClass(**kv)
qc.memberUnit = member_unit
self._quantity_classes[kv['name']] = qc
for ref in rs:
kv = {}
for attr in ref:
key = attr.tag.split('}')[1]
if attr.text:
# kv[key] = attr.text.translate(None, '\t\n')
kv[key] = attr.text.translate('\t\n')
self._references[kv['ID']] = Reference(**kv)
for pref in ps:
kv = {}
for attr in pref:
key = attr.tag.split('}')[1]
if attr.text:
# kv[key] = attr.text.translate(None, '\t\n')
kv[key] = attr.text.translate('\t\n')
self._prefixes[kv['symbol']] = Prefix(**kv)
for unit in us:
kv = {}
for attr in unit:
key = attr.tag.split('}')[1]
if attr.text:
# kv[key] = attr.text.translate(None, '\t\n')
kv[key] = attr.text.translate('\t\n')
self._units[kv['symbol']] = Unit(**kv)
class Unit(object):
def __init__(self, **kwargs):
self.symbol = kwargs.get('symbol')
self.name = kwargs.get('name')
self.dimension = kwargs.get('dimension')
self.isSI = self._value_parser(kwargs.get('isSI'))
self.category = kwargs.get('category')
self.baseUnit = kwargs.get('baseUnit')
self.conversionRef = kwargs.get('conversionRef')
self.isExact = self._value_parser(kwargs.get('isExact'))
self.A = self._value_parser(kwargs.get('A'))
self.B = self._value_parser(kwargs.get('B'))
self.C = self._value_parser(kwargs.get('C'))
self.D = self._value_parser(kwargs.get('D'))
if self.A is None:
self.A = 0.0
if self.B is None:
self.B = 0.0
if self.C is None:
self.C = 0.0
if self.D is None:
self.D = 0.0
self.underlyingDef = kwargs.get('underlyingDef')
self.description = kwargs.get('description')
self.isBase = self._value_parser(kwargs.get('isBase'))
def _value_parser(self, value_str):
if value_str is None:
return None
if value_str == 'true':
return True
if value_str == 'false':
return False
try:
return float(value_str)
except Exception:
if value_str == 'PI':
return math.pi
elif value_str == '2*PI':
return 2 * math.pi
elif value_str == '4*PI':
return 4 * math.pi
raise
def getstate(self):
state = {
'symbol': self.symbol,
'name': self.name,
'dimension': self.dimension,
'isSI': self.isSI,
'category': self.category,
'baseUnit': self.baseUnit,
'conversionRef': self.conversionRef,
'isExact': self.isExact,
'A': self.A,
'B': self.B,
'C': self.C,
'D': self.D,
'underlyingDef': self.underlyingDef,
'description': self.description,
'isBase': self.isBase
}
return state
class QuantityClass(object):
def __init__(self, **kwargs):
self.name = kwargs.get('name')
self.dimension = kwargs.get('dimension')
self.baseForConversion = kwargs.get('baseForConversion')
self.alternativeBase = kwargs.get('name')
self.memberUnit = []
self.description = kwargs.get('description')
def getstate(self):
state = {
'name': self.name,
'dimension': self.dimension,
'baseForConversion': self.baseForConversion,
'alternativeBase': self.alternativeBase,
'memberUnit': self.memberUnit,
'description': self.description
}
return state
class UnitDimension(object):
def __init__(self, **kwargs):
self.name = kwargs.get('name')
self.dimension = kwargs.get('dimension')
self.baseForConversion = kwargs.get('baseForConversion')
self.canonicalUnit = kwargs.get('canonicalUnit')
self.description = kwargs.get('description')
def getstate(self):
state = {
'name': self.name,
'dimension': self.dimension,
'baseForConversion': self.baseForConversion,
'canonicalUnit': self.canonicalUnit,
'description': self.description
}
return state
class Reference(object):
def __init__(self, **kwargs):
self.ID = kwargs.get('ID')
self.description = kwargs.get('description')
def getstate(self):
state = {
'ID': self.ID,
'description': self.description
}
return state
class Prefix(object):
def __init__(self, **kwargs):
self.symbol = kwargs.get('symbol')
self.name = kwargs.get('name')
self.multiplier = kwargs.get('multiplier')
def getstate(self):
state = {
'symbol': self.symbol,
'name': self.name,
'multiplier': self.multiplier
}
return state
uom = UOM(UOM_FILENAME)
"""
def print_(dict_):
for key, value in dict_.items():
print key, '-', value
print
if __name__ == '__main__':
uom_ = UOM('D:\\repo\\GRIPy\\DT\\Energistics_Unit_of_Measure_Dictionary_V1.0.xml')
#print uom_
#
unit = uom_.get_unit('km')
print_(unit.getstate())
#
ud = uom_.get_unit_dimension('L')
print_(ud.getstate())
#
qc = uom_.get_quantity_class('length')
print_(qc.getstate())
print uom_.is_valid_unit('ms', 'length')
#value = 1555
#new_value = UOM_.convert(value, 'g/cm3', 'kg/m3')#, 'g/cm3')
#print 'new_value:', new_value
"""
|
odoo-brazil/odoo-brazil-hr
|
refs/heads/develop
|
l10n_br_hr_payroll/models/hr_contract.py
|
2
|
# -*- coding: utf-8 -*-
# Copyright 2017 KMEE
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from openerp import api, fields, models
from openerp import exceptions
from datetime import datetime, timedelta
class HrContract(models.Model):
_inherit = 'hr.contract'
_rec_name = 'nome_contrato'
codigo_contrato = fields.Char(
string='Codigo de Identificacao',
required=True,
default="/",
readonly=True
)
is_editable = fields.Boolean(
string="Pode Alterar ?",
compute="_is_editable",
default=True,
store=True,
)
payslip_ids_confirmados = fields.One2many(
"hr.payslip",
"contract_id",
"Holerites Confirmados",
domain=[
('state', '!=', 'draft'),
('is_simulacao', '=', False)
]
)
@api.multi
@api.depends('payslip_ids_confirmados', 'payslip_ids_confirmados.state')
def _is_editable(self):
for contrato in self:
if len(contrato.payslip_ids_confirmados) != 0:
contrato.is_editable = False
else:
contrato.is_editable = True
@api.model
def create(self, vals):
if vals.get('codigo_contrato', '/') == '/':
vals['codigo_contrato'] = self.env['ir.sequence'].get(self._name)
return super(HrContract, self).create(vals)
@api.depends('employee_id')
def _compute_nome_contrato(self):
for contrato in self:
nome = contrato.employee_id.name
matricula = contrato.codigo_contrato
nome_contrato = '[%s] %s' % (matricula, nome)
contrato.nome_contrato = nome_contrato if nome else ''
nome_contrato = fields.Char(
default="[mat] nome - inicio - fim",
compute="_compute_nome_contrato",
store=True
)
@api.multi
def _buscar_salario_vigente_periodo(
self, data_inicio, data_fim, inicial=False, final=False):
contract_change_obj = self.env['l10n_br_hr.contract.change']
#
# Checa se há alterações contratuais em estado Rascunho
# Não continua se houver
#
change = contract_change_obj.search([
('contract_id', '=', self.id),
('change_type', '=', 'remuneracao'),
('state', '=', 'draft'),
], order="change_date DESC",
)
if change:
raise exceptions.ValidationError(
"Há alteração de remuneração em estado Rascunho "
"neste contrato, por favor exclua a alteração "
"contratual ou Aplique-a para torná-la efetiva "
"antes de calcular um holerite!"
)
# Busca todas as alterações de remuneração deste contrato
#
change = contract_change_obj.search([
('contract_id', '=', self.id),
('change_type', '=', 'remuneracao'),
('state', '=', 'applied'),
], order="change_date DESC",
)
# Calcular o salário proporcional dentro do período especificado
# Pega o salário do contrato caso nunca tenha havido uma alteração
# contratual
#
salario_medio = self.wage
for i in range(len(change)):
# Dentro deste período houve alteração contratual ?
#
if data_inicio <= change[i].change_date <= data_fim:
i_2 = i + 1
data_mudanca = \
datetime.strptime(change[i].change_date, "%Y-%m-%d")
d_inicio = datetime.strptime(data_inicio, "%Y-%m-%d")
d_fim = datetime.strptime(data_fim, "%Y-%m-%d")
d_fim = d_fim.replace(day=30)
dias = (d_fim - d_inicio) + timedelta(days=1)
# Se a alteração salarial for exatamente no primeiro dia do
# período do holerite, Considere o salário no período inteiro
#
if data_mudanca == d_inicio:
# if i_2 in range(len(change)):
salario_medio = change[i].wage
salario_dia_1 = change[i].wage / dias.days
salario_dia_2 = change[i].wage / dias.days
else:
# Calcula o número de dias dentro do período e quantos dias
# são de cada lado da alteração contratual
#
dias_2 = (dias.days - data_mudanca.day) + 1
dias_1 = data_mudanca.day - d_inicio.day
# Calcula cada valor de salário nos dias em com valores
# diferentes
#
salario_dia_2 = change[i].wage / dias.days
if i_2 in range(len(change)):
salario_dia_1 = change[i_2].wage / dias.days
else:
salario_dia_1 = change[i].wage / dias.days
salario_medio_2 = salario_dia_2 * dias_2
salario_medio_1 = salario_dia_1 * dias_1
# Soma os 2 lados e temos o salário proporcional dentro
# do período
#
salario_medio = salario_medio_2 + salario_medio_1
# Se for para buscar o salário inicial
#
if inicial:
salario_medio = salario_dia_1 * dias.days
# Se for para buscar o salário final
#
if final:
salario_medio = salario_dia_2 * dias.days
break
# Houve alteração contratual anterior ao período atual
#
elif change[i].change_date < data_inicio:
salario_medio = change[i].wage
break
return salario_medio
@api.multi
def _salario_dia(self, data_inicio, data_fim):
return self._salario_mes_proporcional(
data_inicio, data_fim) / 30
@api.multi
def _salario_hora(self, data_inicio, data_fim):
wage = self._salario_mes_proporcional(data_inicio, data_fim)
hours_total = 220 if not self.monthly_hours else self.monthly_hours
return wage / hours_total
@api.multi
def _salario_mes(self, data_inicio, data_fim):
return self._buscar_salario_vigente_periodo(
data_inicio, data_fim)
@api.multi
def _salario_mes_proporcional(self, data_inicio, data_fim):
return self._buscar_salario_vigente_periodo(
data_inicio, data_fim)
@api.multi
def _salario_mes_inicial(self, data_inicio, data_fim):
return self._buscar_salario_vigente_periodo(
data_inicio, data_fim, inicial=True)
@api.multi
def _salario_mes_final(self, data_inicio, data_fim):
return self._buscar_salario_vigente_periodo(
data_inicio, data_fim, final=True)
specific_rule_ids = fields.One2many(
comodel_name='hr.contract.salary.rule',
inverse_name='contract_id',
string=u"Rúbricas específicas",
ondelete='cascade',
)
change_salary_ids = fields.One2many(
comodel_name='l10n_br_hr.contract.change',
inverse_name='contract_id',
string=u"Remuneração",
domain=[
('change_type', '=', 'remuneracao')
],
)
change_workdays_ids = fields.One2many(
comodel_name='l10n_br_hr.contract.change',
inverse_name='contract_id',
string=u"Jornada",
domain=[
('change_type', '=', 'jornada')
],
)
change_job_ids = fields.One2many(
comodel_name='l10n_br_hr.contract.change',
inverse_name='contract_id',
string=u"Atividade/Cargo",
domain=[
('change_type', '=', 'cargo-atividade')
],
)
change_labor_union_ids = fields.One2many(
comodel_name='l10n_br_hr.contract.change',
inverse_name='contract_id',
string=u"Filiação Sindical",
domain=[
('change_type', '=', 'filiacao-sindical')
],
)
change_place_ids = fields.One2many(
comodel_name='l10n_br_hr.contract.change',
inverse_name='contract_id',
string=u"Lotação/Local de trabalho",
domain=[
('change_type', '=', 'filiacao-sindical')
],
)
company_id = fields.Many2one(
comodel_name='res.company',
string='Empresa',
required=True,
default=lambda self: self.env.user.company_id or '',
)
# Admissão
tipo_do_contrato = fields.Selection(
selection=[],
string="Tipo do contrato"
)
tipo_de_admissao = fields.Selection(
selection=[],
string="Tipo de admissão"
)
indicativo_de_admissao = fields.Selection(
selection=[('transferencia', u'Trasferência'),
('normal', u'Normal')],
string="Indicativo da admissão"
)
contrato_transferido = fields.Selection(
selection=[],
string="Contrato transferido"
)
data_da_transferencia = fields.Date(
string="Data da transferencia"
)
seguro_desemprego = fields.Boolean(
string="Em Seguro Desemprego?"
)
primeiro_emprego = fields.Boolean(
string="Primeiro emprego?"
)
primeira_experiencia = fields.Integer(
string="Tempo em dias do 1º período de experiência"
)
data_primeira_experiencia = fields.Date(
string="Início da primeira experiência"
)
segunda_experiencia = fields.Integer(
string=u"Tempo em dias do 2º período de experiência"
)
data_segunda_experiencia = fields.Date(
string=u"Início da segunda experiência"
)
department_id = fields.Many2one(
comodel_name='hr.department',
string='Departamento/Lotação',
related=False,
readonly=False,
)
lotacao_cliente_fornecedor = fields.Selection(
selection=[],
string="Lotação/cliente/fornecedor"
)
# Jornada
tipo_de_jornada = fields.Selection(
selection=[],
string="Tipo de jornada de trabalho"
)
jornada_seg_sex = fields.Selection(
selection=[],
string="Jornada padrão de segunda a sexta-feira"
)
jornada_sab = fields.Selection(
selection=[],
string="Jornada no sábado"
)
# Aba Vínculos Anteriores e cedentes
# Vínculo anterior
cnpj_empregador_anterior = fields.Char(
string="CNPJ do empregador anterior"
)
matricula_anterior = fields.Char(
string="Matrícula anterior"
)
data_admissao_anterior = fields.Date(
string="Data de admissão no vínculo anterior"
)
observacoes_vinculo_anterior = fields.Text(
string="Observações do vínculo anterior"
)
# Vínculo cedente
cnpj_empregador_cedente = fields.Char(
string="CNPJ do empregador cedente"
)
matricula_cedente = fields.Char(
string="Matrícula cedente"
)
data_admissao_cedente = fields.Date(
string="Data de admissão no vínculo cedente"
)
adiantamento_13_cedente = fields.Float(
string=u"Antecipação de 13º na Orgiem R$",
default=0.0,
)
# Aba Saúde ocupacional
data_atestado_saude = fields.Date(
string="Data do atestado de saúde ocupacional"
)
numero_crm = fields.Integer(
string="CRM nº"
)
nome_medico_encarregado = fields.Char(
string="Nome do médico encarregado"
)
estado_crm = fields.Selection(
selection=[],
string="Estado do CRM"
)
# Tree Exames
exame_ids = fields.One2many(
comodel_name='hr.exame.medico',
inverse_name='contract_id',
string="Exames"
)
# Aba Processo judicial
numero_processo = fields.Integer(
string="Nº processo judicial"
)
nome_advogado_autor = fields.Char(
string="Advogado do autor do processo"
)
nome_advogado_empresa = fields.Char(
string="Advogado da empresa"
)
observacoes_processo = fields.Text(
string="Observações do processo judicial"
)
# Aba Cursos e treinamentos
curso_ids = fields.One2many(
comodel_name='hr.curso',
inverse_name='contract_id',
string="Cursos"
)
# Aba Afastamentos
afastamento_ids = fields.One2many(
comodel_name='hr.holidays',
inverse_name='contrato_id',
string="Afastamentos"
)
conta_bancaria_id = fields.Many2one(
string="Conta bancaria",
comodel_name='res.partner.bank',
)
class Exame(models.Model):
_name = 'hr.exame.medico'
name = fields.Char(
string="Exame"
)
data_do_exame = fields.Date(
string="Data do exame"
)
data_de_validade = fields.Date(
string="Data de validade"
)
contract_id = fields.Many2one(
comodel_name='hr.contract',
)
class Curso(models.Model):
_name = 'hr.curso'
name = fields.Char(
string="Curso"
)
carga_horaria = fields.Integer(
string="Carga horária"
)
inicio_curso = fields.Date(
string="Início"
)
fim_curso = fields.Date(
string="Encerramento"
)
situacao = fields.Selection(
selection=[],
string="Situação"
)
contract_id = fields.Many2one(
comodel_name='hr.contract',
)
class HrHoliday(models.Model):
_inherit = 'hr.holidays'
rubrica = fields.Char(
string="Rubrica"
)
periodo = fields.Char(
string="Data de afastamento"
)
valor_inss = fields.Float(
string="Valor INSS"
)
class HrContractSalaryUnit(models.Model):
_inherit = 'hr.contract.salary.unit'
@api.multi
def name_get(self):
result = []
for record in self:
name = record['name']
if name == 'Monthly':
name = 'Por mês'
elif name == 'Biweekly':
name = 'Por 15 dias'
elif name == 'Weekly':
name = 'Por semana'
elif name == 'Daily':
name = 'Por dia'
elif name == 'Hourly':
name = 'Por hora'
elif name == 'Task':
name = 'Por tarefa'
elif name == 'Others':
name = 'Outros'
elif record['code']:
name = record['code'] + ' - ' + name
result.append((record['id'], name))
return result
|
zaxtax/scikit-learn
|
refs/heads/master
|
sklearn/ensemble/tests/test_forest.py
|
26
|
"""
Testing for the forest module (sklearn.ensemble.forest).
"""
# Authors: Gilles Louppe,
# Brian Holt,
# Andreas Mueller,
# Arnaud Joly
# License: BSD 3 clause
import pickle
from collections import defaultdict
from itertools import combinations
from itertools import product
import numpy as np
from scipy.misc import comb
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import coo_matrix
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_less, assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import skip_if_32bit
from sklearn import datasets
from sklearn.decomposition import TruncatedSVD
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomTreesEmbedding
from sklearn.model_selection import GridSearchCV
from sklearn.svm import LinearSVC
from sklearn.utils.fixes import bincount
from sklearn.utils.validation import check_random_state
from sklearn.tree.tree import SPARSE_SPLITTERS
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = check_random_state(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
# also make a hastie_10_2 dataset
hastie_X, hastie_y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
hastie_X = hastie_X.astype(np.float32)
FOREST_CLASSIFIERS = {
"ExtraTreesClassifier": ExtraTreesClassifier,
"RandomForestClassifier": RandomForestClassifier,
}
FOREST_REGRESSORS = {
"ExtraTreesRegressor": ExtraTreesRegressor,
"RandomForestRegressor": RandomForestRegressor,
}
FOREST_TRANSFORMERS = {
"RandomTreesEmbedding": RandomTreesEmbedding,
}
FOREST_ESTIMATORS = dict()
FOREST_ESTIMATORS.update(FOREST_CLASSIFIERS)
FOREST_ESTIMATORS.update(FOREST_REGRESSORS)
FOREST_ESTIMATORS.update(FOREST_TRANSFORMERS)
def check_classification_toy(name):
"""Check classification on a toy dataset."""
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
clf = ForestClassifier(n_estimators=10, max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
# also test apply
leaf_indices = clf.apply(X)
assert_equal(leaf_indices.shape, (len(X), clf.n_estimators))
def test_classification_toy():
for name in FOREST_CLASSIFIERS:
yield check_classification_toy, name
def check_iris_criterion(name, criterion):
# Check consistency on dataset iris.
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, criterion=criterion,
random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9, "Failed with criterion %s and score = %f"
% (criterion, score))
clf = ForestClassifier(n_estimators=10, criterion=criterion,
max_features=2, random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.5, "Failed with criterion %s and score = %f"
% (criterion, score))
def test_iris():
for name, criterion in product(FOREST_CLASSIFIERS, ("gini", "entropy")):
yield check_iris_criterion, name, criterion
def check_boston_criterion(name, criterion):
# Check consistency on dataset boston house prices.
ForestRegressor = FOREST_REGRESSORS[name]
clf = ForestRegressor(n_estimators=5, criterion=criterion,
random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=None, criterion %s "
"and score = %f" % (criterion, score))
clf = ForestRegressor(n_estimators=5, criterion=criterion,
max_features=6, random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=6, criterion %s "
"and score = %f" % (criterion, score))
def test_boston():
for name, criterion in product(FOREST_REGRESSORS, ("mse", )):
yield check_boston_criterion, name, criterion
def check_regressor_attributes(name):
# Regression models should not have a classes_ attribute.
r = FOREST_REGRESSORS[name](random_state=0)
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
r.fit([[1, 2, 3], [4, 5, 6]], [1, 2])
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
def test_regressor_attributes():
for name in FOREST_REGRESSORS:
yield check_regressor_attributes, name
def check_probability(name):
# Predict probabilities.
ForestClassifier = FOREST_CLASSIFIERS[name]
with np.errstate(divide="ignore"):
clf = ForestClassifier(n_estimators=10, random_state=1, max_features=1,
max_depth=1)
clf.fit(iris.data, iris.target)
assert_array_almost_equal(np.sum(clf.predict_proba(iris.data), axis=1),
np.ones(iris.data.shape[0]))
assert_array_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)))
def test_probability():
for name in FOREST_CLASSIFIERS:
yield check_probability, name
def check_importances(name, criterion, X, y):
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(n_estimators=20, criterion=criterion,
random_state=0)
est.fit(X, y)
importances = est.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10)
assert_equal(n_important, 3)
# XXX: Remove this test in 0.19 after transform support to estimators
# is removed.
X_new = assert_warns(
DeprecationWarning, est.transform, X, threshold="mean")
assert_less(0 < X_new.shape[1], X.shape[1])
# Check with parallel
importances = est.feature_importances_
est.set_params(n_jobs=2)
importances_parrallel = est.feature_importances_
assert_array_almost_equal(importances, importances_parrallel)
# Check with sample weights
sample_weight = check_random_state(0).randint(1, 10, len(X))
est = ForestEstimator(n_estimators=20, random_state=0, criterion=criterion)
est.fit(X, y, sample_weight=sample_weight)
importances = est.feature_importances_
assert_true(np.all(importances >= 0.0))
for scale in [0.5, 10, 100]:
est = ForestEstimator(n_estimators=20, random_state=0, criterion=criterion)
est.fit(X, y, sample_weight=scale * sample_weight)
importances_bis = est.feature_importances_
assert_less(np.abs(importances - importances_bis).mean(), 0.001)
@skip_if_32bit
def test_importances():
X, y = datasets.make_classification(n_samples=500, n_features=10,
n_informative=3, n_redundant=0,
n_repeated=0, shuffle=False,
random_state=0)
for name, criterion in product(FOREST_CLASSIFIERS, ["gini", "entropy"]):
yield check_importances, name, criterion, X, y
for name, criterion in product(FOREST_REGRESSORS, ["mse", "friedman_mse"]):
yield check_importances, name, criterion, X, y
def test_importances_asymptotic():
# Check whether variable importances of totally randomized trees
# converge towards their theoretical values (See Louppe et al,
# Understanding variable importances in forests of randomized trees, 2013).
def binomial(k, n):
return 0 if k < 0 or k > n else comb(int(n), int(k), exact=True)
def entropy(samples):
n_samples = len(samples)
entropy = 0.
for count in bincount(samples):
p = 1. * count / n_samples
if p > 0:
entropy -= p * np.log2(p)
return entropy
def mdi_importance(X_m, X, y):
n_samples, n_features = X.shape
features = list(range(n_features))
features.pop(X_m)
values = [np.unique(X[:, i]) for i in range(n_features)]
imp = 0.
for k in range(n_features):
# Weight of each B of size k
coef = 1. / (binomial(k, n_features) * (n_features - k))
# For all B of size k
for B in combinations(features, k):
# For all values B=b
for b in product(*[values[B[j]] for j in range(k)]):
mask_b = np.ones(n_samples, dtype=np.bool)
for j in range(k):
mask_b &= X[:, B[j]] == b[j]
X_, y_ = X[mask_b, :], y[mask_b]
n_samples_b = len(X_)
if n_samples_b > 0:
children = []
for xi in values[X_m]:
mask_xi = X_[:, X_m] == xi
children.append(y_[mask_xi])
imp += (coef
* (1. * n_samples_b / n_samples) # P(B=b)
* (entropy(y_) -
sum([entropy(c) * len(c) / n_samples_b
for c in children])))
return imp
data = np.array([[0, 0, 1, 0, 0, 1, 0, 1],
[1, 0, 1, 1, 1, 0, 1, 2],
[1, 0, 1, 1, 0, 1, 1, 3],
[0, 1, 1, 1, 0, 1, 0, 4],
[1, 1, 0, 1, 0, 1, 1, 5],
[1, 1, 0, 1, 1, 1, 1, 6],
[1, 0, 1, 0, 0, 1, 0, 7],
[1, 1, 1, 1, 1, 1, 1, 8],
[1, 1, 1, 1, 0, 1, 1, 9],
[1, 1, 1, 0, 1, 1, 1, 0]])
X, y = np.array(data[:, :7], dtype=np.bool), data[:, 7]
n_features = X.shape[1]
# Compute true importances
true_importances = np.zeros(n_features)
for i in range(n_features):
true_importances[i] = mdi_importance(i, X, y)
# Estimate importances with totally randomized trees
clf = ExtraTreesClassifier(n_estimators=500,
max_features=1,
criterion="entropy",
random_state=0).fit(X, y)
importances = sum(tree.tree_.compute_feature_importances(normalize=False)
for tree in clf.estimators_) / clf.n_estimators
# Check correctness
assert_almost_equal(entropy(y), sum(importances))
assert_less(np.abs(true_importances - importances).mean(), 0.01)
def check_unfitted_feature_importances(name):
assert_raises(ValueError, getattr, FOREST_ESTIMATORS[name](random_state=0),
"feature_importances_")
def test_unfitted_feature_importances():
for name in FOREST_ESTIMATORS:
yield check_unfitted_feature_importances, name
def check_oob_score(name, X, y, n_estimators=20):
# Check that oob prediction is a good estimation of the generalization
# error.
# Proper behavior
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=n_estimators, bootstrap=True)
n_samples = X.shape[0]
est.fit(X[:n_samples // 2, :], y[:n_samples // 2])
test_score = est.score(X[n_samples // 2:, :], y[n_samples // 2:])
if name in FOREST_CLASSIFIERS:
assert_less(abs(test_score - est.oob_score_), 0.1)
else:
assert_greater(test_score, est.oob_score_)
assert_greater(est.oob_score_, .8)
# Check warning if not enough estimators
with np.errstate(divide="ignore", invalid="ignore"):
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=1, bootstrap=True)
assert_warns(UserWarning, est.fit, X, y)
def test_oob_score():
for name in FOREST_CLASSIFIERS:
yield check_oob_score, name, iris.data, iris.target
# csc matrix
yield check_oob_score, name, csc_matrix(iris.data), iris.target
# non-contiguous targets in classification
yield check_oob_score, name, iris.data, iris.target * 2 + 1
for name in FOREST_REGRESSORS:
yield check_oob_score, name, boston.data, boston.target, 50
# csc matrix
yield check_oob_score, name, csc_matrix(boston.data), boston.target, 50
def check_oob_score_raise_error(name):
ForestEstimator = FOREST_ESTIMATORS[name]
if name in FOREST_TRANSFORMERS:
for oob_score in [True, False]:
assert_raises(TypeError, ForestEstimator, oob_score=oob_score)
assert_raises(NotImplementedError, ForestEstimator()._set_oob_score,
X, y)
else:
# Unfitted / no bootstrap / no oob_score
for oob_score, bootstrap in [(True, False), (False, True),
(False, False)]:
est = ForestEstimator(oob_score=oob_score, bootstrap=bootstrap,
random_state=0)
assert_false(hasattr(est, "oob_score_"))
# No bootstrap
assert_raises(ValueError, ForestEstimator(oob_score=True,
bootstrap=False).fit, X, y)
def test_oob_score_raise_error():
for name in FOREST_ESTIMATORS:
yield check_oob_score_raise_error, name
def check_gridsearch(name):
forest = FOREST_CLASSIFIERS[name]()
clf = GridSearchCV(forest, {'n_estimators': (1, 2), 'max_depth': (1, 2)})
clf.fit(iris.data, iris.target)
def test_gridsearch():
# Check that base trees can be grid-searched.
for name in FOREST_CLASSIFIERS:
yield check_gridsearch, name
def check_parallel(name, X, y):
"""Check parallel computations in classification"""
ForestEstimator = FOREST_ESTIMATORS[name]
forest = ForestEstimator(n_estimators=10, n_jobs=3, random_state=0)
forest.fit(X, y)
assert_equal(len(forest), 10)
forest.set_params(n_jobs=1)
y1 = forest.predict(X)
forest.set_params(n_jobs=2)
y2 = forest.predict(X)
assert_array_almost_equal(y1, y2, 3)
def test_parallel():
for name in FOREST_CLASSIFIERS:
yield check_parallel, name, iris.data, iris.target
for name in FOREST_REGRESSORS:
yield check_parallel, name, boston.data, boston.target
def check_pickle(name, X, y):
# Check pickability.
ForestEstimator = FOREST_ESTIMATORS[name]
obj = ForestEstimator(random_state=0)
obj.fit(X, y)
score = obj.score(X, y)
pickle_object = pickle.dumps(obj)
obj2 = pickle.loads(pickle_object)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(X, y)
assert_equal(score, score2)
def test_pickle():
for name in FOREST_CLASSIFIERS:
yield check_pickle, name, iris.data[::2], iris.target[::2]
for name in FOREST_REGRESSORS:
yield check_pickle, name, boston.data[::2], boston.target[::2]
def check_multioutput(name):
# Check estimators on multi-output problems.
X_train = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [-2, 1],
[-1, 1], [-1, 2], [2, -1], [1, -1], [1, -2]]
y_train = [[-1, 0], [-1, 0], [-1, 0], [1, 1], [1, 1], [1, 1], [-1, 2],
[-1, 2], [-1, 2], [1, 3], [1, 3], [1, 3]]
X_test = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_test = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
y_pred = est.fit(X_train, y_train).predict(X_test)
assert_array_almost_equal(y_pred, y_test)
if name in FOREST_CLASSIFIERS:
with np.errstate(divide="ignore"):
proba = est.predict_proba(X_test)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = est.predict_log_proba(X_test)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
def test_multioutput():
for name in FOREST_CLASSIFIERS:
yield check_multioutput, name
for name in FOREST_REGRESSORS:
yield check_multioutput, name
def check_classes_shape(name):
# Test that n_classes_ and classes_ have proper shape.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Classification, single output
clf = ForestClassifier(random_state=0).fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(random_state=0).fit(X, _y)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_classes_shape():
for name in FOREST_CLASSIFIERS:
yield check_classes_shape, name
def test_random_trees_dense_type():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning a dense array.
# Create the RTE with sparse=False
hasher = RandomTreesEmbedding(n_estimators=10, sparse_output=False)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# Assert that type is ndarray, not scipy.sparse.csr.csr_matrix
assert_equal(type(X_transformed), np.ndarray)
def test_random_trees_dense_equal():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning the same array for both argument values.
# Create the RTEs
hasher_dense = RandomTreesEmbedding(n_estimators=10, sparse_output=False,
random_state=0)
hasher_sparse = RandomTreesEmbedding(n_estimators=10, sparse_output=True,
random_state=0)
X, y = datasets.make_circles(factor=0.5)
X_transformed_dense = hasher_dense.fit_transform(X)
X_transformed_sparse = hasher_sparse.fit_transform(X)
# Assert that dense and sparse hashers have same array.
assert_array_equal(X_transformed_sparse.toarray(), X_transformed_dense)
def test_random_hasher():
# test random forest hashing on circles dataset
# make sure that it is linearly separable.
# even after projected to two SVD dimensions
# Note: Not all random_states produce perfect results.
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# test fit and transform:
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
assert_array_equal(hasher.fit(X).transform(X).toarray(),
X_transformed.toarray())
# one leaf active per data point per forest
assert_equal(X_transformed.shape[0], X.shape[0])
assert_array_equal(X_transformed.sum(axis=1), hasher.n_estimators)
svd = TruncatedSVD(n_components=2)
X_reduced = svd.fit_transform(X_transformed)
linear_clf = LinearSVC()
linear_clf.fit(X_reduced, y)
assert_equal(linear_clf.score(X_reduced, y), 1.)
def test_random_hasher_sparse_data():
X, y = datasets.make_multilabel_classification(random_state=0)
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X_transformed = hasher.fit_transform(X)
X_transformed_sparse = hasher.fit_transform(csc_matrix(X))
assert_array_equal(X_transformed_sparse.toarray(), X_transformed.toarray())
def test_parallel_train():
rng = check_random_state(12321)
n_samples, n_features = 80, 30
X_train = rng.randn(n_samples, n_features)
y_train = rng.randint(0, 2, n_samples)
clfs = [
RandomForestClassifier(n_estimators=20, n_jobs=n_jobs,
random_state=12345).fit(X_train, y_train)
for n_jobs in [1, 2, 3, 8, 16, 32]
]
X_test = rng.randn(n_samples, n_features)
probas = [clf.predict_proba(X_test) for clf in clfs]
for proba1, proba2 in zip(probas, probas[1:]):
assert_array_almost_equal(proba1, proba2)
def test_distribution():
rng = check_random_state(12321)
# Single variable with 4 values
X = rng.randint(0, 4, size=(1000, 1))
y = rng.rand(1000)
n_trees = 500
clf = ExtraTreesRegressor(n_estimators=n_trees, random_state=42).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = sorted([(1. * count / n_trees, tree)
for tree, count in uniques.items()])
# On a single variable problem where X_0 has 4 equiprobable values, there
# are 5 ways to build a random tree. The more compact (0,1/0,0/--0,2/--) of
# them has probability 1/3 while the 4 others have probability 1/6.
assert_equal(len(uniques), 5)
assert_greater(0.20, uniques[0][0]) # Rough approximation of 1/6.
assert_greater(0.20, uniques[1][0])
assert_greater(0.20, uniques[2][0])
assert_greater(0.20, uniques[3][0])
assert_greater(uniques[4][0], 0.3)
assert_equal(uniques[4][1], "0,1/0,0/--0,2/--")
# Two variables, one with 2 values, one with 3 values
X = np.empty((1000, 2))
X[:, 0] = np.random.randint(0, 2, 1000)
X[:, 1] = np.random.randint(0, 3, 1000)
y = rng.rand(1000)
clf = ExtraTreesRegressor(n_estimators=100, max_features=1,
random_state=1).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = [(count, tree) for tree, count in uniques.items()]
assert_equal(len(uniques), 8)
def check_max_leaf_nodes_max_depth(name):
X, y = hastie_X, hastie_y
# Test precedence of max_leaf_nodes over max_depth.
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(max_depth=1, max_leaf_nodes=4,
n_estimators=1, random_state=0).fit(X, y)
assert_greater(est.estimators_[0].tree_.max_depth, 1)
est = ForestEstimator(max_depth=1, n_estimators=1,
random_state=0).fit(X, y)
assert_equal(est.estimators_[0].tree_.max_depth, 1)
def test_max_leaf_nodes_max_depth():
for name in FOREST_ESTIMATORS:
yield check_max_leaf_nodes_max_depth, name
def check_min_samples_split(name):
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
# test boundary value
assert_raises(ValueError,
ForestEstimator(min_samples_split=-1).fit, X, y)
assert_raises(ValueError,
ForestEstimator(min_samples_split=0).fit, X, y)
assert_raises(ValueError,
ForestEstimator(min_samples_split=1.1).fit, X, y)
est = ForestEstimator(min_samples_split=10, n_estimators=1, random_state=0)
est.fit(X, y)
node_idx = est.estimators_[0].tree_.children_left != -1
node_samples = est.estimators_[0].tree_.n_node_samples[node_idx]
assert_greater(np.min(node_samples), len(X) * 0.5 - 1,
"Failed with {0}".format(name))
est = ForestEstimator(min_samples_split=0.5, n_estimators=1, random_state=0)
est.fit(X, y)
node_idx = est.estimators_[0].tree_.children_left != -1
node_samples = est.estimators_[0].tree_.n_node_samples[node_idx]
assert_greater(np.min(node_samples), len(X) * 0.5 - 1,
"Failed with {0}".format(name))
def test_min_samples_split():
for name in FOREST_ESTIMATORS:
yield check_min_samples_split, name
def check_min_samples_leaf(name):
X, y = hastie_X, hastie_y
# Test if leaves contain more than leaf_count training examples
ForestEstimator = FOREST_ESTIMATORS[name]
# test boundary value
assert_raises(ValueError,
ForestEstimator(min_samples_leaf=-1).fit, X, y)
assert_raises(ValueError,
ForestEstimator(min_samples_leaf=0).fit, X, y)
est = ForestEstimator(min_samples_leaf=5, n_estimators=1, random_state=0)
est.fit(X, y)
out = est.estimators_[0].tree_.apply(X)
node_counts = bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
est = ForestEstimator(min_samples_leaf=0.25, n_estimators=1,
random_state=0)
est.fit(X, y)
out = est.estimators_[0].tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), len(X) * 0.25 - 1,
"Failed with {0}".format(name))
def test_min_samples_leaf():
for name in FOREST_ESTIMATORS:
yield check_min_samples_leaf, name
def check_min_weight_fraction_leaf(name):
X, y = hastie_X, hastie_y
# Test if leaves contain at least min_weight_fraction_leaf of the
# training set
ForestEstimator = FOREST_ESTIMATORS[name]
rng = np.random.RandomState(0)
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for frac in np.linspace(0, 0.5, 6):
est = ForestEstimator(min_weight_fraction_leaf=frac, n_estimators=1,
random_state=0)
if "RandomForest" in name:
est.bootstrap = False
est.fit(X, y, sample_weight=weights)
out = est.estimators_[0].tree_.apply(X)
node_weights = bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
for name in FOREST_ESTIMATORS:
yield check_min_weight_fraction_leaf, name
def check_sparse_input(name, X, X_sparse, y):
ForestEstimator = FOREST_ESTIMATORS[name]
dense = ForestEstimator(random_state=0, max_depth=2).fit(X, y)
sparse = ForestEstimator(random_state=0, max_depth=2).fit(X_sparse, y)
assert_array_almost_equal(sparse.apply(X), dense.apply(X))
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_array_almost_equal(sparse.predict(X), dense.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
dense.feature_importances_)
if name in FOREST_CLASSIFIERS:
assert_array_almost_equal(sparse.predict_proba(X),
dense.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
dense.predict_log_proba(X))
if name in FOREST_TRANSFORMERS:
assert_array_almost_equal(sparse.transform(X).toarray(),
dense.transform(X).toarray())
assert_array_almost_equal(sparse.fit_transform(X).toarray(),
dense.fit_transform(X).toarray())
def test_sparse_input():
X, y = datasets.make_multilabel_classification(random_state=0,
n_samples=50)
for name, sparse_matrix in product(FOREST_ESTIMATORS,
(csr_matrix, csc_matrix, coo_matrix)):
yield check_sparse_input, name, X, sparse_matrix(X), y
def check_memory_layout(name, dtype):
# Check that it works no matter the memory layout
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if est.base_estimator.splitter in SPARSE_SPLITTERS:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# coo_matrix
X = coo_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_memory_layout():
for name, dtype in product(FOREST_CLASSIFIERS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
for name, dtype in product(FOREST_REGRESSORS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
@ignore_warnings
def check_1d_input(name, X, X_2d, y):
ForestEstimator = FOREST_ESTIMATORS[name]
assert_raises(ValueError, ForestEstimator(n_estimators=1,
random_state=0).fit, X, y)
est = ForestEstimator(random_state=0)
est.fit(X_2d, y)
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_raises(ValueError, est.predict, X)
@ignore_warnings
def test_1d_input():
X = iris.data[:, 0]
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
for name in FOREST_ESTIMATORS:
yield check_1d_input, name, X, X_2d, y
def check_class_weights(name):
# Check class_weights resemble sample_weights behavior.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = ForestClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = ForestClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "balanced" which should also have no effect
clf4 = ForestClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
def test_class_weights():
for name in FOREST_CLASSIFIERS:
yield check_class_weights, name
def check_class_weight_balanced_and_bootstrap_multi_output(name):
# Test class_weight works for multi-output"""
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(class_weight='balanced', random_state=0)
clf.fit(X, _y)
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}, {-2: 1., 2: 1.}],
random_state=0)
clf.fit(X, _y)
# smoke test for subsample and balanced subsample
clf = ForestClassifier(class_weight='balanced_subsample', random_state=0)
clf.fit(X, _y)
clf = ForestClassifier(class_weight='subsample', random_state=0)
ignore_warnings(clf.fit)(X, _y)
def test_class_weight_balanced_and_bootstrap_multi_output():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_balanced_and_bootstrap_multi_output, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = ForestClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Warning warm_start with preset
clf = ForestClassifier(class_weight='auto', warm_start=True,
random_state=0)
assert_warns(UserWarning, clf.fit, X, y)
assert_warns(UserWarning, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = ForestClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_errors, name
def check_warm_start(name, random_state=42):
# Test if fitting incrementally with warm start gives a forest of the
# right size and the same results as a normal fit.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf_ws = None
for n_estimators in [5, 10]:
if clf_ws is None:
clf_ws = ForestEstimator(n_estimators=n_estimators,
random_state=random_state,
warm_start=True)
else:
clf_ws.set_params(n_estimators=n_estimators)
clf_ws.fit(X, y)
assert_equal(len(clf_ws), n_estimators)
clf_no_ws = ForestEstimator(n_estimators=10, random_state=random_state,
warm_start=False)
clf_no_ws.fit(X, y)
assert_equal(set([tree.random_state for tree in clf_ws]),
set([tree.random_state for tree in clf_no_ws]))
assert_array_equal(clf_ws.apply(X), clf_no_ws.apply(X),
err_msg="Failed with {0}".format(name))
def test_warm_start():
for name in FOREST_ESTIMATORS:
yield check_warm_start, name
def check_warm_start_clear(name):
# Test if fit clears state and grows a new forest when warm_start==False.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True,
random_state=2)
clf_2.fit(X, y) # inits state
clf_2.set_params(warm_start=False, random_state=1)
clf_2.fit(X, y) # clears old state and equals clf
assert_array_almost_equal(clf_2.apply(X), clf.apply(X))
def test_warm_start_clear():
for name in FOREST_ESTIMATORS:
yield check_warm_start_clear, name
def check_warm_start_smaller_n_estimators(name):
# Test if warm start second fit with smaller n_estimators raises error.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True)
clf.fit(X, y)
clf.set_params(n_estimators=4)
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_smaller_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_smaller_n_estimators, name
def check_warm_start_equal_n_estimators(name):
# Test if warm start with equal n_estimators does nothing and returns the
# same forest and raises a warning.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf_2.fit(X, y)
# Now clf_2 equals clf.
clf_2.set_params(random_state=2)
assert_warns(UserWarning, clf_2.fit, X, y)
# If we had fit the trees again we would have got a different forest as we
# changed the random state.
assert_array_equal(clf.apply(X), clf_2.apply(X))
def test_warm_start_equal_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_equal_n_estimators, name
def check_warm_start_oob(name):
# Test that the warm start computes oob score when asked.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
# Use 15 estimators to avoid 'some inputs do not have OOB scores' warning.
clf = ForestEstimator(n_estimators=15, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=True)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=False)
clf_2.fit(X, y)
clf_2.set_params(warm_start=True, oob_score=True, n_estimators=15)
clf_2.fit(X, y)
assert_true(hasattr(clf_2, 'oob_score_'))
assert_equal(clf.oob_score_, clf_2.oob_score_)
# Test that oob_score is computed even if we don't need to train
# additional trees.
clf_3 = ForestEstimator(n_estimators=15, max_depth=3, warm_start=True,
random_state=1, bootstrap=True, oob_score=False)
clf_3.fit(X, y)
assert_true(not(hasattr(clf_3, 'oob_score_')))
clf_3.set_params(oob_score=True)
ignore_warnings(clf_3.fit)(X, y)
assert_equal(clf.oob_score_, clf_3.oob_score_)
def test_warm_start_oob():
for name in FOREST_CLASSIFIERS:
yield check_warm_start_oob, name
for name in FOREST_REGRESSORS:
yield check_warm_start_oob, name
def test_dtype_convert(n_classes=15):
classifier = RandomForestClassifier(random_state=0, bootstrap=False)
X = np.eye(n_classes)
y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:n_classes]]
result = classifier.fit(X, y).predict(X)
assert_array_equal(classifier.classes_, y)
assert_array_equal(result, y)
def check_decision_path(name):
X, y = hastie_X, hastie_y
n_samples = X.shape[0]
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False,
random_state=1)
est.fit(X, y)
indicator, n_nodes_ptr = est.decision_path(X)
assert_equal(indicator.shape[1], n_nodes_ptr[-1])
assert_equal(indicator.shape[0], n_samples)
assert_array_equal(np.diff(n_nodes_ptr),
[e.tree_.node_count for e in est.estimators_])
# Assert that leaves index are correct
leaves = est.apply(X)
for est_id in range(leaves.shape[1]):
leave_indicator = [indicator[i, n_nodes_ptr[est_id] + j]
for i, j in enumerate(leaves[:, est_id])]
assert_array_almost_equal(leave_indicator, np.ones(shape=n_samples))
def test_decision_path():
for name in FOREST_CLASSIFIERS:
yield check_decision_path, name
for name in FOREST_REGRESSORS:
yield check_decision_path, name
|
tedder/ansible
|
refs/heads/devel
|
lib/ansible/plugins/cache/__init__.py
|
17
|
# (c) 2014, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2018, Ansible Project
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import time
import errno
from abc import ABCMeta, abstractmethod
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.module_utils.six import with_metaclass
from ansible.module_utils._text import to_bytes
from ansible.module_utils.common._collections_compat import MutableMapping
from ansible.plugins.loader import cache_loader
from ansible.utils.display import Display
from ansible.vars.fact_cache import FactCache as RealFactCache
display = Display()
class FactCache(RealFactCache):
"""
This is for backwards compatibility. Will be removed after deprecation. It was removed as it
wasn't actually part of the cache plugin API. It's actually the code to make use of cache
plugins, not the cache plugin itself. Subclassing it wouldn't yield a usable Cache Plugin and
there was no facility to use it as anything else.
"""
def __init__(self, *args, **kwargs):
display.deprecated('ansible.plugins.cache.FactCache has been moved to'
' ansible.vars.fact_cache.FactCache. If you are looking for the class'
' to subclass for a cache plugin, you want'
' ansible.plugins.cache.BaseCacheModule or one of its subclasses.',
version='2.12')
super(FactCache, self).__init__(*args, **kwargs)
class BaseCacheModule(with_metaclass(ABCMeta, object)):
# Backwards compat only. Just import the global display instead
_display = display
@abstractmethod
def get(self, key):
pass
@abstractmethod
def set(self, key, value):
pass
@abstractmethod
def keys(self):
pass
@abstractmethod
def contains(self, key):
pass
@abstractmethod
def delete(self, key):
pass
@abstractmethod
def flush(self):
pass
@abstractmethod
def copy(self):
pass
class BaseFileCacheModule(BaseCacheModule):
"""
A caching module backed by file based storage.
"""
def __init__(self, *args, **kwargs):
self.plugin_name = self.__module__.split('.')[-1]
self._timeout = float(C.CACHE_PLUGIN_TIMEOUT)
self._cache = {}
self._cache_dir = self._get_cache_connection(C.CACHE_PLUGIN_CONNECTION)
self._set_inventory_cache_override(**kwargs)
self.validate_cache_connection()
def _get_cache_connection(self, source):
if source:
try:
return os.path.expanduser(os.path.expandvars(source))
except TypeError:
pass
def _set_inventory_cache_override(self, **kwargs):
if kwargs.get('cache_timeout'):
self._timeout = kwargs.get('cache_timeout')
if kwargs.get('cache_connection'):
self._cache_dir = self._get_cache_connection(kwargs.get('cache_connection'))
def validate_cache_connection(self):
if not self._cache_dir:
raise AnsibleError("error, '%s' cache plugin requires the 'fact_caching_connection' config option "
"to be set (to a writeable directory path)" % self.plugin_name)
if not os.path.exists(self._cache_dir):
try:
os.makedirs(self._cache_dir)
except (OSError, IOError) as e:
raise AnsibleError("error in '%s' cache plugin while trying to create cache dir %s : %s" % (self.plugin_name, self._cache_dir, to_bytes(e)))
else:
for x in (os.R_OK, os.W_OK, os.X_OK):
if not os.access(self._cache_dir, x):
raise AnsibleError("error in '%s' cache, configured path (%s) does not have necessary permissions (rwx), disabling plugin" % (
self.plugin_name, self._cache_dir))
def get(self, key):
""" This checks the in memory cache first as the fact was not expired at 'gather time'
and it would be problematic if the key did expire after some long running tasks and
user gets 'undefined' error in the same play """
if key not in self._cache:
if self.has_expired(key) or key == "":
raise KeyError
cachefile = "%s/%s" % (self._cache_dir, key)
try:
value = self._load(cachefile)
self._cache[key] = value
except ValueError as e:
display.warning("error in '%s' cache plugin while trying to read %s : %s. "
"Most likely a corrupt file, so erasing and failing." % (self.plugin_name, cachefile, to_bytes(e)))
self.delete(key)
raise AnsibleError("The cache file %s was corrupt, or did not otherwise contain valid data. "
"It has been removed, so you can re-run your command now." % cachefile)
except (OSError, IOError) as e:
display.warning("error in '%s' cache plugin while trying to read %s : %s" % (self.plugin_name, cachefile, to_bytes(e)))
raise KeyError
except Exception as e:
raise AnsibleError("Error while decoding the cache file %s: %s" % (cachefile, to_bytes(e)))
return self._cache.get(key)
def set(self, key, value):
self._cache[key] = value
cachefile = "%s/%s" % (self._cache_dir, key)
try:
self._dump(value, cachefile)
except (OSError, IOError) as e:
display.warning("error in '%s' cache plugin while trying to write to %s : %s" % (self.plugin_name, cachefile, to_bytes(e)))
def has_expired(self, key):
if self._timeout == 0:
return False
cachefile = "%s/%s" % (self._cache_dir, key)
try:
st = os.stat(cachefile)
except (OSError, IOError) as e:
if e.errno == errno.ENOENT:
return False
else:
display.warning("error in '%s' cache plugin while trying to stat %s : %s" % (self.plugin_name, cachefile, to_bytes(e)))
return False
if time.time() - st.st_mtime <= self._timeout:
return False
if key in self._cache:
del self._cache[key]
return True
def keys(self):
keys = []
for k in os.listdir(self._cache_dir):
if not (k.startswith('.') or self.has_expired(k)):
keys.append(k)
return keys
def contains(self, key):
cachefile = "%s/%s" % (self._cache_dir, key)
if key in self._cache:
return True
if self.has_expired(key):
return False
try:
os.stat(cachefile)
return True
except (OSError, IOError) as e:
if e.errno == errno.ENOENT:
return False
else:
display.warning("error in '%s' cache plugin while trying to stat %s : %s" % (self.plugin_name, cachefile, to_bytes(e)))
def delete(self, key):
try:
del self._cache[key]
except KeyError:
pass
try:
os.remove("%s/%s" % (self._cache_dir, key))
except (OSError, IOError):
pass # TODO: only pass on non existing?
def flush(self):
self._cache = {}
for key in self.keys():
self.delete(key)
def copy(self):
ret = dict()
for key in self.keys():
ret[key] = self.get(key)
return ret
@abstractmethod
def _load(self, filepath):
"""
Read data from a filepath and return it as a value
:arg filepath: The filepath to read from.
:returns: The value stored in the filepath
This method reads from the file on disk and takes care of any parsing
and transformation of the data before returning it. The value
returned should be what Ansible would expect if it were uncached data.
.. note:: Filehandles have advantages but calling code doesn't know
whether this file is text or binary, should be decoded, or accessed via
a library function. Therefore the API uses a filepath and opens
the file inside of the method.
"""
pass
@abstractmethod
def _dump(self, value, filepath):
"""
Write data to a filepath
:arg value: The value to store
:arg filepath: The filepath to store it at
"""
pass
class InventoryFileCacheModule(BaseFileCacheModule):
"""
A caching module backed by file based storage.
"""
def __init__(self, plugin_name, timeout, cache_dir):
self.plugin_name = plugin_name
self._timeout = timeout
self._cache = {}
self._cache_dir = self._get_cache_connection(cache_dir)
self.validate_cache_connection()
self._plugin = self.get_plugin(plugin_name)
def validate_cache_connection(self):
try:
super(InventoryFileCacheModule, self).validate_cache_connection()
except AnsibleError:
cache_connection_set = False
else:
cache_connection_set = True
if not cache_connection_set:
raise AnsibleError("error, '%s' inventory cache plugin requires the one of the following to be set:\n"
"ansible.cfg:\n[default]: fact_caching_connection,\n[inventory]: cache_connection;\n"
"Environment:\nANSIBLE_INVENTORY_CACHE_CONNECTION,\nANSIBLE_CACHE_PLUGIN_CONNECTION."
"to be set to a writeable directory path" % self.plugin_name)
def get(self, cache_key):
if not self.contains(cache_key):
# Check if cache file exists
raise KeyError
return super(InventoryFileCacheModule, self).get(cache_key)
def get_plugin(self, plugin_name):
plugin = cache_loader.get(plugin_name, cache_connection=self._cache_dir, cache_timeout=self._timeout)
if not plugin:
raise AnsibleError('Unable to load the facts cache plugin (%s).' % (plugin_name))
self._cache = {}
return plugin
def _load(self, path):
return self._plugin._load(path)
def _dump(self, value, path):
return self._plugin._dump(value, path)
|
reimandlab/ActiveDriverDB
|
refs/heads/master
|
website/database/types.py
|
1
|
from io import StringIO
from pandas import DataFrame, read_csv
from sqlalchemy import TypeDecorator, Text
from sqlalchemy.dialects import mysql
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.ext.mutable import MutableSet
from database import db
from pandas.util import hash_pandas_object
class DataFrameStore(TypeDecorator):
""""""
impl = Text
def process_bind_param(self, value: DataFrame, dialect):
from io import StringIO
stream = StringIO()
value.to_csv(stream)
return stream.getvalue()
def compare_values(self, x, y):
return sum(hash_pandas_object(x)) == sum(hash_pandas_object(y)) and list(x.columns) == list(y.columns)
def process_result_value(self, value: str, dialect) -> DataFrame:
stream = StringIO(value)
return read_csv(stream)
def copy(self, **kw):
return DataFrameStore(self.impl.length)
class ScalarSet(TypeDecorator):
@property
def python_type(self):
return set
impl = Text
def __init__(self, *args, separator=',', element_type=str, empty_indicator='{}', coerce=None, **kwargs):
"""A column mimicking a Python set on top of Text column.
Args:
*args: passed to Text type constructor
separator: a character or set of character used as separator during serialization
element_type: type of the element to be stored (all elements have to be of the same type)
empty_indicator: a string used to indicate that the set is empty
coerce: a set of rules for coercion from types other than element types;
each rule should map a type to object's property which will be
used instead of the object; it has to be of element_type type.
The purpose of having coerce rules separate from element_type is to
enable support for multiple types/rules simultaneously.
**kwargs: passed to Text type constructor
"""
super().__init__(*args, **kwargs)
self.separator = separator
self.type = element_type
self.empty_indicator = empty_indicator
self.coerce_rules = coerce or {}
@property
def comparator_factory(self):
coerce_element = self.coerce_element
class Comparator(self.impl.Comparator):
def operate(self, op, *other, **kwargs):
return super().operate(op, *[coerce_element(e) for e in other], **kwargs)
return Comparator
def process_bind_param(self, value, dialect):
if not value:
return self.empty_indicator
value = [self.coerce_element(v) for v in value]
assert all(isinstance(v, self.type) for v in value)
if not isinstance(self.type, str):
value = list(map(str, value))
assert all([self.separator not in v for v in value])
return self.separator.join(value)
def process_result_value(self, value, dialect):
if not value or value == self.empty_indicator:
return set()
return set(map(self.type, value.split(self.separator)))
def coerce_element(self, element):
for value_type, attribute in self.coerce_rules.items():
if isinstance(element, value_type):
return getattr(element, attribute)
return element
def coerce_compared_value(self, op, value):
return self.impl.coerce_compared_value(op, value)
MutableSet.associate_with(ScalarSet)
class MediumPickle(db.PickleType):
impl = mysql.MEDIUMBLOB
@compiles(MediumPickle, 'sqlite')
def sqlite_utc_after(element, compiler, **kwargs):
return compiler.visit_BLOB(element, **kwargs)
|
svsn2117/coala
|
refs/heads/master
|
tests/processes/section_executor_test_files/ProcessingGlobalTestBear.py
|
28
|
from coalib.bears.GlobalBear import GlobalBear
from coalib.results.Result import Result
class ProcessingGlobalTestBear(GlobalBear): # pragma: no cover
def run(self):
for filename in self.file_dict:
return [Result.from_values("GlobalTestBear",
"test message",
filename)]
|
cetic/ansible
|
refs/heads/devel
|
lib/ansible/utils/module_docs_fragments/ios.py
|
101
|
#
# (c) 2015, Peter Sprygada <psprygada@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# Standard files documentation fragment
DOCUMENTATION = """
options:
authorize:
description:
- Instructs the module to enter privileged mode on the remote device
before sending any commands. If not specified, the device will
attempt to execute all commands in non-privileged mode. If the value
is not specified in the task, the value of environment variable
C(ANSIBLE_NET_AUTHORIZE) will be used instead.
default: no
choices: ['yes', 'no']
auth_pass:
description:
- Specifies the password to use if required to enter privileged mode
on the remote device. If I(authorize) is false, then this argument
does nothing. If the value is not specified in the task, the value of
environment variable C(ANSIBLE_NET_AUTH_PASS) will be used instead.
default: none
provider:
description:
- A dict object containing connection details.
default: null
suboptions:
host:
description:
- Specifies the DNS host name or address for connecting to the remote
device over the specified transport. The value of host is used as
the destination address for the transport.
required: true
port:
description:
- Specifies the port to use when building the connection to the remote device.
default: 22
username:
description:
- Configures the username to use to authenticate the connection to
the remote device. This value is used to authenticate
the SSH session. If the value is not specified in the task, the
value of environment variable C(ANSIBLE_NET_USERNAME) will be used instead.
password:
description:
- Specifies the password to use to authenticate the connection to
the remote device. This value is used to authenticate
the SSH session. If the value is not specified in the task, the
value of environment variable C(ANSIBLE_NET_PASSWORD) will be used instead.
default: null
timeout:
description:
- Specifies the timeout in seconds for communicating with the network device
for either connecting or sending commands. If the timeout is
exceeded before the operation is completed, the module will error.
default: 10
ssh_keyfile:
description:
- Specifies the SSH key to use to authenticate the connection to
the remote device. This value is the path to the
key used to authenticate the SSH session. If the value is not specified
in the task, the value of environment variable C(ANSIBLE_NET_SSH_KEYFILE)
will be used instead.
authorize:
description:
- Instructs the module to enter privileged mode on the remote device
before sending any commands. If not specified, the device will
attempt to execute all commands in non-privileged mode. If the value
is not specified in the task, the value of environment variable
C(ANSIBLE_NET_AUTHORIZE) will be used instead.
default: no
choices: ['yes', 'no']
auth_pass:
description:
- Specifies the password to use if required to enter privileged mode
on the remote device. If I(authorize) is false, then this argument
does nothing. If the value is not specified in the task, the value of
environment variable C(ANSIBLE_NET_AUTH_PASS) will be used instead.
default: none
"""
|
srimai/odoo
|
refs/heads/8.0
|
addons/crm_profiling/crm_profiling.py
|
333
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields,osv
from openerp.osv import orm
from openerp.tools.translate import _
def _get_answers(cr, uid, ids):
"""
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of crm profiling’s IDs """
query = """
select distinct(answer)
from profile_question_yes_rel
where profile IN %s"""
cr.execute(query, (tuple(ids),))
ans_yes = [x[0] for x in cr.fetchall()]
query = """
select distinct(answer)
from profile_question_no_rel
where profile IN %s"""
cr.execute(query, (tuple(ids),))
ans_no = [x[0] for x in cr.fetchall()]
return [ans_yes, ans_no]
def _get_parents(cr, uid, ids):
"""
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of crm profiling’s IDs
@return: Get parents's Id """
ids_to_check = ids
cr.execute("""
select distinct(parent_id)
from crm_segmentation
where parent_id is not null
and id IN %s""",(tuple(ids),))
parent_ids = [x[0] for x in cr.fetchall()]
trigger = False
for x in parent_ids:
if x not in ids_to_check:
ids_to_check.append(x)
trigger = True
if trigger:
ids_to_check = _get_parents(cr, uid, ids_to_check)
return ids_to_check
def test_prof(cr, uid, seg_id, pid, answers_ids=None):
""" return True if the partner pid fetch the segmentation rule seg_id
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param seg_id: Segmentaion's ID
@param pid: partner's ID
@param answers_ids: Answers's IDs
"""
ids_to_check = _get_parents(cr, uid, [seg_id])
[yes_answers, no_answers] = _get_answers(cr, uid, ids_to_check)
temp = True
for y_ans in yes_answers:
if y_ans not in answers_ids:
temp = False
break
if temp:
for ans in answers_ids:
if ans in no_answers:
temp = False
break
if temp:
return True
return False
def _recompute_categ(self, cr, uid, pid, answers_ids):
""" Recompute category
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param pid: partner's ID
@param answers_ids: Answers's IDs
"""
ok = []
cr.execute('''
select r.category_id
from res_partner_res_partner_category_rel r left join crm_segmentation s on (r.category_id = s.categ_id)
where r.partner_id = %s and (s.exclusif = false or s.exclusif is null)
''', (pid,))
for x in cr.fetchall():
ok.append(x[0])
query = '''
select id, categ_id
from crm_segmentation
where profiling_active = true'''
if ok != []:
query = query +''' and categ_id not in(%s)'''% ','.join([str(i) for i in ok ])
query = query + ''' order by id '''
cr.execute(query)
segm_cat_ids = cr.fetchall()
for (segm_id, cat_id) in segm_cat_ids:
if test_prof(cr, uid, segm_id, pid, answers_ids):
ok.append(cat_id)
return ok
class question(osv.osv):
""" Question """
_name="crm_profiling.question"
_description= "Question"
_columns={
'name': fields.char("Question", required=True),
'answers_ids': fields.one2many("crm_profiling.answer", "question_id", "Available Answers", copy=True),
}
class questionnaire(osv.osv):
""" Questionnaire """
_name="crm_profiling.questionnaire"
_description= "Questionnaire"
_columns = {
'name': fields.char("Questionnaire", required=True),
'description':fields.text("Description", required=True),
'questions_ids': fields.many2many('crm_profiling.question','profile_questionnaire_quest_rel',\
'questionnaire', 'question', "Questions"),
}
class answer(osv.osv):
_name="crm_profiling.answer"
_description="Answer"
_columns={
"name": fields.char("Answer", required=True),
"question_id": fields.many2one('crm_profiling.question',"Question"),
}
class partner(osv.osv):
_inherit="res.partner"
_columns={
"answers_ids": fields.many2many("crm_profiling.answer","partner_question_rel",\
"partner","answer","Answers"),
}
def _questionnaire_compute(self, cr, uid, answers, context=None):
"""
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param data: Get Data
@param context: A standard dictionary for contextual values """
partner_id = context.get('active_id')
query = "select answer from partner_question_rel where partner=%s"
cr.execute(query, (partner_id,))
for x in cr.fetchall():
answers.append(x[0])
self.write(cr, uid, [partner_id], {'answers_ids': [[6, 0, answers]]}, context=context)
return {}
def write(self, cr, uid, ids, vals, context=None):
"""
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of crm profiling’s IDs
@param context: A standard dictionary for contextual values """
if 'answers_ids' in vals:
vals['category_id']=[[6, 0, _recompute_categ(self, cr, uid, ids[0], vals['answers_ids'][0][2])]]
return super(partner, self).write(cr, uid, ids, vals, context=context)
class crm_segmentation(osv.osv):
""" CRM Segmentation """
_inherit="crm.segmentation"
_columns={
"answer_yes": fields.many2many("crm_profiling.answer","profile_question_yes_rel",\
"profile","answer","Included Answers"),
"answer_no": fields.many2many("crm_profiling.answer","profile_question_no_rel",\
"profile","answer","Excluded Answers"),
'parent_id': fields.many2one('crm.segmentation', 'Parent Profile'),
'child_ids': fields.one2many('crm.segmentation', 'parent_id', 'Child Profiles'),
'profiling_active': fields.boolean('Use The Profiling Rules', help='Check\
this box if you want to use this tab as part of the \
segmentation rule. If not checked, the criteria beneath will be ignored')
}
_constraints = [
(osv.osv._check_recursion, 'Error ! You cannot create recursive profiles.', ['parent_id'])
]
def process_continue(self, cr, uid, ids, start=False):
"""
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of crm segmentation’s IDs """
partner_obj = self.pool.get('res.partner')
categs = self.read(cr,uid,ids,['categ_id','exclusif','partner_id', \
'sales_purchase_active', 'profiling_active'])
for categ in categs:
if start:
if categ['exclusif']:
cr.execute('delete from res_partner_res_partner_category_rel where \
category_id=%s', (categ['categ_id'][0],))
partner_obj.invalidate_cache(cr, uid, ['category_id'])
id = categ['id']
cr.execute('select id from res_partner order by id ')
partners = [x[0] for x in cr.fetchall()]
if categ['sales_purchase_active']:
to_remove_list=[]
cr.execute('select id from crm_segmentation_line where segmentation_id=%s', (id,))
line_ids = [x[0] for x in cr.fetchall()]
for pid in partners:
if (not self.pool.get('crm.segmentation.line').test(cr, uid, line_ids, pid)):
to_remove_list.append(pid)
for pid in to_remove_list:
partners.remove(pid)
if categ['profiling_active']:
to_remove_list = []
for pid in partners:
cr.execute('select distinct(answer) from partner_question_rel where partner=%s',(pid,))
answers_ids = [x[0] for x in cr.fetchall()]
if (not test_prof(cr, uid, id, pid, answers_ids)):
to_remove_list.append(pid)
for pid in to_remove_list:
partners.remove(pid)
for partner in partner_obj.browse(cr, uid, partners):
category_ids = [categ_id.id for categ_id in partner.category_id]
if categ['categ_id'][0] not in category_ids:
cr.execute('insert into res_partner_res_partner_category_rel (category_id,partner_id) values (%s,%s)', (categ['categ_id'][0],partner.id))
partner_obj.invalidate_cache(cr, uid, ['category_id'], [partner.id])
self.write(cr, uid, [id], {'state':'not running', 'partner_id':0})
return True
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
waytai/networkx
|
refs/heads/master
|
examples/drawing/weighted_graph.py
|
44
|
#!/usr/bin/env python
"""
An example using Graph as a weighted network.
"""
__author__ = """Aric Hagberg (hagberg@lanl.gov)"""
try:
import matplotlib.pyplot as plt
except:
raise
import networkx as nx
G=nx.Graph()
G.add_edge('a','b',weight=0.6)
G.add_edge('a','c',weight=0.2)
G.add_edge('c','d',weight=0.1)
G.add_edge('c','e',weight=0.7)
G.add_edge('c','f',weight=0.9)
G.add_edge('a','d',weight=0.3)
elarge=[(u,v) for (u,v,d) in G.edges(data=True) if d['weight'] >0.5]
esmall=[(u,v) for (u,v,d) in G.edges(data=True) if d['weight'] <=0.5]
pos=nx.spring_layout(G) # positions for all nodes
# nodes
nx.draw_networkx_nodes(G,pos,node_size=700)
# edges
nx.draw_networkx_edges(G,pos,edgelist=elarge,
width=6)
nx.draw_networkx_edges(G,pos,edgelist=esmall,
width=6,alpha=0.5,edge_color='b',style='dashed')
# labels
nx.draw_networkx_labels(G,pos,font_size=20,font_family='sans-serif')
plt.axis('off')
plt.savefig("weighted_graph.png") # save as png
plt.show() # display
|
wolcomm/rptk
|
refs/heads/master
|
rptk/format/__init__.py
|
1
|
# Copyright (c) 2018 Workonline Communications (Pty) Ltd. All rights reserved.
#
# The contents of this file are licensed under the Apache License version 2.0
# (the "License"); you may not use this file except in compliance with the
# License.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""rptk module.format module."""
from __future__ import print_function
from __future__ import unicode_literals
import datetime
import jinja2
from rptk.base import BaseObject
try:
basestring
except NameError:
basestring = str
try:
unicode
except NameError:
unicode = str
class BaseFormat(BaseObject):
"""Base class for the definition of output format classes."""
description = None
content_type = "text/plain"
def __init__(self, **opts):
"""Initialise new object."""
super(BaseFormat, self).__init__()
self.log_init()
self._opts = opts
self.log_init_done()
def format(self, result=None):
"""Check the result type and name."""
self.log_method_enter(method=self.current_method)
if not isinstance(result, dict):
self.raise_type_error(arg=result, cls=dict)
self.log_method_exit(method=self.current_method)
class JinjaFormat(BaseFormat):
"""Base class for Jinja2 template-based output format classes."""
template_name = None
def __init__(self, **opts):
"""Initialise new object."""
super(JinjaFormat, self).__init__(**opts)
self.log.debug("configuring jinja2 environment")
try:
self.env = jinja2.Environment(
loader=jinja2.PackageLoader('rptk')
)
self.env.trim_blocks = True
self.env.lstrip_blocks = True
except Exception as e:
self.log.error(msg="{}".format(e))
raise e
self._template = None
self.log_init_done()
def __enter__(self):
"""Load Jinja2 template."""
self.log_ready_start()
self._load_template()
self.log_ready_done()
return self
@property
def template(self):
"""Get loaded Jinja2 template object."""
return self._template
def _load_template(self):
"""Load template into Jinja2 Environment instance."""
try:
self._template = self.env.get_template(self.template_name)
except jinja2.TemplateError as e:
self.log.error(msg="{}".format(e))
raise e
self.log.debug("template loaded successfully")
def format(self, result=None):
"""Render output from template."""
self.log_method_enter(method=self.current_method)
super(JinjaFormat, self).format(result=result)
if isinstance(self.template, jinja2.Template):
try:
output = self.template.render(results=result,
now=datetime.datetime.now())
except Exception as e:
self.log.error(msg="{}".format(e))
raise e
else:
self.raise_type_error(arg=self.template, cls=jinja2.Template)
self.log_method_exit(method=self.current_method)
return output
|
openstack/keystone
|
refs/heads/master
|
keystone/common/manager.py
|
2
|
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import inspect
import time
import types
from oslo_log import log
import stevedore
from keystone.common import provider_api
from keystone.i18n import _
LOG = log.getLogger(__name__)
if hasattr(inspect, 'getfullargspec'):
getargspec = inspect.getfullargspec
else:
getargspec = inspect.getargspec
def response_truncated(f):
"""Truncate the list returned by the wrapped function.
This is designed to wrap Manager list_{entity} methods to ensure that
any list limits that are defined are passed to the driver layer. If a
hints list is provided, the wrapper will insert the relevant limit into
the hints so that the underlying driver call can try and honor it. If the
driver does truncate the response, it will update the 'truncated' attribute
in the 'limit' entry in the hints list, which enables the caller of this
function to know if truncation has taken place. If, however, the driver
layer is unable to perform truncation, the 'limit' entry is simply left in
the hints list for the caller to handle.
A _get_list_limit() method is required to be present in the object class
hierarchy, which returns the limit for this backend to which we will
truncate.
If a hints list is not provided in the arguments of the wrapped call then
any limits set in the config file are ignored. This allows internal use
of such wrapped methods where the entire data set is needed as input for
the calculations of some other API (e.g. get role assignments for a given
project).
"""
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
if kwargs.get('hints') is None:
return f(self, *args, **kwargs)
list_limit = self.driver._get_list_limit()
if list_limit:
kwargs['hints'].set_limit(list_limit)
return f(self, *args, **kwargs)
return wrapper
def load_driver(namespace, driver_name, *args):
try:
driver_manager = stevedore.DriverManager(namespace,
driver_name,
invoke_on_load=True,
invoke_args=args)
return driver_manager.driver
except stevedore.exception.NoMatches:
msg = (_('Unable to find %(name)r driver in %(namespace)r.'))
raise ImportError(msg % {'name': driver_name, 'namespace': namespace})
class _TraceMeta(type):
"""A metaclass that, in trace mode, will log entry and exit of methods.
This metaclass automatically wraps all methods on the class when
instantiated with a decorator that will log entry/exit from a method
when keystone is run in Trace log level.
"""
@staticmethod
def wrapper(__f, __classname):
__argspec = getargspec(__f)
__fn_info = '%(module)s.%(classname)s.%(funcname)s' % {
'module': inspect.getmodule(__f).__name__,
'classname': __classname,
'funcname': __f.__name__
}
# NOTE(morganfainberg): Omit "cls" and "self" when printing trace logs
# the index can be calculated at wrap time rather than at runtime.
if __argspec.args and __argspec.args[0] in ('self', 'cls'):
__arg_idx = 1
else:
__arg_idx = 0
@functools.wraps(__f)
def wrapped(*args, **kwargs):
__exc = None
__t = time.time()
__do_trace = LOG.logger.getEffectiveLevel() <= log.TRACE
__ret_val = None
try:
if __do_trace:
LOG.trace('CALL => %s', __fn_info)
__ret_val = __f(*args, **kwargs)
except Exception as e: # nosec
__exc = e
raise
finally:
if __do_trace:
__subst = {
'run_time': (time.time() - __t),
'passed_args': ', '.join([
', '.join([repr(a)
for a in args[__arg_idx:]]),
', '.join(['%(k)s=%(v)r' % {'k': k, 'v': v}
for k, v in kwargs.items()]),
]),
'function': __fn_info,
'exception': __exc,
'ret_val': __ret_val,
}
if __exc is not None:
__msg = ('[%(run_time)ss] %(function)s '
'(%(passed_args)s) => raised '
'%(exception)r')
else:
# TODO(morganfainberg): find a way to indicate if this
# was a cache hit or cache miss.
__msg = ('[%(run_time)ss] %(function)s'
'(%(passed_args)s) => %(ret_val)r')
LOG.trace(__msg, __subst)
return __ret_val
return wrapped
def __new__(meta, classname, bases, class_dict):
final_cls_dict = {}
for attr_name, attr in class_dict.items():
# NOTE(morganfainberg): only wrap public instances and methods.
if (isinstance(attr, types.FunctionType) and
not attr_name.startswith('_')):
attr = _TraceMeta.wrapper(attr, classname)
final_cls_dict[attr_name] = attr
return type.__new__(meta, classname, bases, final_cls_dict)
class Manager(object, metaclass=_TraceMeta):
"""Base class for intermediary request layer.
The Manager layer exists to support additional logic that applies to all
or some of the methods exposed by a service that are not specific to the
HTTP interface.
It also provides a stable entry point to dynamic backends.
An example of a probable use case is logging all the calls.
"""
driver_namespace = None
_provides_api = None
def __init__(self, driver_name):
if self._provides_api is None:
raise ValueError('Programming Error: All managers must provide an '
'API that can be referenced by other components '
'of Keystone.')
if driver_name is not None:
self.driver = load_driver(self.driver_namespace, driver_name)
self.__register_provider_api()
def __register_provider_api(self):
provider_api.ProviderAPIs._register_provider_api(
name=self._provides_api, obj=self)
def __getattr__(self, name):
"""Forward calls to the underlying driver.
This method checks for a provider api before forwarding.
"""
try:
return getattr(provider_api.ProviderAPIs, name)
except AttributeError:
# NOTE(morgan): We didn't find a provider api, move on and
# forward to the driver as expected.
pass
f = getattr(self.driver, name)
if callable(f):
# NOTE(dstanek): only if this is callable (class or function)
# cache this
setattr(self, name, f)
return f
|
pedroeml/t1-fcg
|
refs/heads/master
|
CrowdDataAnalysis/graph/graph.py
|
1
|
from graph.node import Node
class Graph:
def __init__(self, nodes={}):
self.nodes = nodes
def add_node(self, item):
node = Node(item)
self.nodes[item] = node
def find_node(self, item):
"""
:param item:
:return:
:rtype: Node
"""
return self.nodes[item]
def add_edge(self, item_a, item_b, weight):
node_a = self.find_node(item_a)
node_b = self.find_node(item_b)
node_a.add_edge(node_b, weight)
def change_edge_weight(self, item_a, item_b, weight):
node_a = self.find_node(item_a)
node_b = self.find_node(item_b)
node_a.change_edge_weight(node_b, weight)
def get_nodes(self):
"""
:return:
:rtype: list
"""
return self.nodes.values()
|
JianyuWang/neutron
|
refs/heads/master
|
neutron/db/migration/alembic_migrations/vmware_init_ops.py
|
32
|
# Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Initial schema operations for VMware plugins
from alembic import op
import sqlalchemy as sa
net_binding_type = sa.Enum('flat', 'vlan', 'stt', 'gre', 'l3_ext',
name='tz_network_bindings_binding_type')
l2gw_segmentation_type = sa.Enum('flat', 'vlan',
name='networkconnections_segmentation_type')
qos_marking = sa.Enum('untrusted', 'trusted', name='qosqueues_qos_marking')
def upgrade():
op.create_table(
'tz_network_bindings',
sa.Column('network_id', sa.String(length=36), nullable=False),
sa.Column('binding_type', net_binding_type, nullable=False),
sa.Column('phy_uuid', sa.String(length=36), nullable=True),
sa.Column('vlan_id', sa.Integer(), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['network_id'], ['networks.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('network_id', 'binding_type',
'phy_uuid', 'vlan_id'))
op.create_table(
'multi_provider_networks',
sa.Column('network_id', sa.String(length=36), nullable=False),
sa.ForeignKeyConstraint(['network_id'], ['networks.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('network_id'))
op.create_table(
'vcns_router_bindings',
sa.Column('status', sa.String(length=16), nullable=False),
sa.Column('status_description', sa.String(length=255), nullable=True),
sa.Column('router_id', sa.String(length=36), nullable=False),
sa.Column('edge_id', sa.String(length=16), nullable=True),
sa.Column('lswitch_id', sa.String(length=36), nullable=False),
sa.PrimaryKeyConstraint('router_id'))
op.create_table(
'vcns_edge_pool_bindings',
sa.Column('pool_id', sa.String(length=36), nullable=False),
sa.Column('edge_id', sa.String(length=36), nullable=False),
sa.Column('pool_vseid', sa.String(length=36), nullable=True),
sa.ForeignKeyConstraint(['pool_id'], ['pools.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('pool_id', 'edge_id'))
op.create_table(
'vcns_edge_monitor_bindings',
sa.Column('monitor_id', sa.String(length=36), nullable=False),
sa.Column('edge_id', sa.String(length=36), nullable=False),
sa.Column('monitor_vseid', sa.String(length=36), nullable=True),
sa.ForeignKeyConstraint(['monitor_id'], ['healthmonitors.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('monitor_id', 'edge_id'))
op.create_table(
'vcns_firewall_rule_bindings',
sa.Column('rule_id', sa.String(length=36), nullable=False),
sa.Column('edge_id', sa.String(length=36), nullable=False),
sa.Column('rule_vseid', sa.String(length=36), nullable=True),
sa.ForeignKeyConstraint(['rule_id'], ['firewall_rules.id'], ),
sa.PrimaryKeyConstraint('rule_id', 'edge_id'))
op.create_table(
'vcns_edge_vip_bindings',
sa.Column('vip_id', sa.String(length=36), nullable=False),
sa.Column('edge_id', sa.String(length=36), nullable=True),
sa.Column('vip_vseid', sa.String(length=36), nullable=True),
sa.Column('app_profileid', sa.String(length=36), nullable=True),
sa.ForeignKeyConstraint(['vip_id'], ['vips.id'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('vip_id'))
op.create_table(
'networkgateways',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('name', sa.String(length=255), nullable=True),
sa.Column('tenant_id', sa.String(length=36), nullable=True),
sa.Column('default', sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint('id'))
op.create_table(
'networkconnections',
sa.Column('tenant_id', sa.String(length=255), nullable=True),
sa.Column('network_gateway_id', sa.String(length=36), nullable=True),
sa.Column('network_id', sa.String(length=36), nullable=True),
sa.Column('segmentation_type', l2gw_segmentation_type, nullable=True),
sa.Column('segmentation_id', sa.Integer(), nullable=True),
sa.Column('port_id', sa.String(length=36), nullable=False),
sa.ForeignKeyConstraint(['network_gateway_id'], ['networkgateways.id'],
ondelete='CASCADE'),
sa.ForeignKeyConstraint(['network_id'], ['networks.id'],
ondelete='CASCADE'),
sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('port_id'),
sa.UniqueConstraint('network_gateway_id', 'segmentation_type',
'segmentation_id'))
op.create_table(
'qosqueues',
sa.Column('tenant_id', sa.String(length=255), nullable=True),
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('name', sa.String(length=255), nullable=True),
sa.Column('default', sa.Boolean(), nullable=True,
server_default=sa.sql.false()),
sa.Column('min', sa.Integer(), nullable=False),
sa.Column('max', sa.Integer(), nullable=True),
sa.Column('qos_marking', qos_marking, nullable=True),
sa.Column('dscp', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id'))
op.create_table(
'networkqueuemappings',
sa.Column('network_id', sa.String(length=36), nullable=False),
sa.Column('queue_id', sa.String(length=36), nullable=True),
sa.ForeignKeyConstraint(['network_id'], ['networks.id'],
ondelete='CASCADE'),
sa.ForeignKeyConstraint(['queue_id'], ['qosqueues.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('network_id'))
op.create_table(
'portqueuemappings',
sa.Column('port_id', sa.String(length=36), nullable=False),
sa.Column('queue_id', sa.String(length=36), nullable=False),
sa.ForeignKeyConstraint(['port_id'], ['ports.id'],
ondelete='CASCADE'),
sa.ForeignKeyConstraint(['queue_id'], ['qosqueues.id'], ),
sa.PrimaryKeyConstraint('port_id', 'queue_id'))
op.create_table(
'maclearningstates',
sa.Column('port_id', sa.String(length=36), nullable=False),
sa.Column('mac_learning_enabled', sa.Boolean(), nullable=False),
sa.ForeignKeyConstraint(['port_id'], ['ports.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('port_id'))
op.create_table('neutron_nsx_port_mappings',
sa.Column('neutron_id', sa.String(length=36),
nullable=False),
sa.Column('nsx_port_id', sa.String(length=36),
nullable=False),
sa.Column('nsx_switch_id', sa.String(length=36),
nullable=True),
sa.ForeignKeyConstraint(['neutron_id'], ['ports.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('neutron_id'))
op.create_table(
'lsn',
sa.Column('net_id',
sa.String(length=36), nullable=False),
sa.Column('lsn_id',
sa.String(length=36), nullable=False),
sa.PrimaryKeyConstraint('lsn_id'))
op.create_table(
'lsn_port',
sa.Column('lsn_port_id',
sa.String(length=36), nullable=False),
sa.Column('lsn_id',
sa.String(length=36), nullable=False),
sa.Column('sub_id',
sa.String(length=36), nullable=False, unique=True),
sa.Column('mac_addr',
sa.String(length=32), nullable=False, unique=True),
sa.ForeignKeyConstraint(['lsn_id'], ['lsn.lsn_id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('lsn_port_id'))
op.create_table(
'neutron_nsx_network_mappings',
sa.Column('neutron_id', sa.String(length=36), nullable=False),
sa.Column('nsx_id', sa.String(length=36), nullable=True),
sa.ForeignKeyConstraint(['neutron_id'], ['networks.id'],
ondelete='CASCADE'),
# There might be multiple switches for a neutron network
sa.PrimaryKeyConstraint('neutron_id', 'nsx_id'),
)
op.create_table(
'neutron_nsx_router_mappings',
sa.Column('neutron_id', sa.String(length=36), nullable=False),
sa.Column('nsx_id', sa.String(length=36), nullable=True),
sa.ForeignKeyConstraint(['neutron_id'], ['routers.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('neutron_id'),
)
# Execute statement to a record in nsx_router_mappings for
# each record in routers
op.execute("INSERT INTO neutron_nsx_router_mappings SELECT id,id "
"from routers")
op.create_table(
'neutron_nsx_security_group_mappings',
sa.Column('neutron_id', sa.String(length=36), nullable=False),
sa.Column('nsx_id', sa.String(length=36), nullable=False),
sa.ForeignKeyConstraint(['neutron_id'], ['securitygroups.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('neutron_id', 'nsx_id'))
# Execute statement to add a record in security group mappings for
# each record in securitygroups
op.execute("INSERT INTO neutron_nsx_security_group_mappings SELECT id,id "
"from securitygroups")
op.create_table(
'networkgatewaydevicereferences',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('network_gateway_id', sa.String(length=36), nullable=True),
sa.Column('interface_name', sa.String(length=64), nullable=True),
sa.ForeignKeyConstraint(['network_gateway_id'], ['networkgateways.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id', 'network_gateway_id', 'interface_name'))
op.create_table(
'networkgatewaydevices',
sa.Column('tenant_id', sa.String(length=255), nullable=True),
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('nsx_id', sa.String(length=36), nullable=True),
sa.Column('name', sa.String(length=255), nullable=True),
sa.Column('connector_type', sa.String(length=10), nullable=True),
sa.Column('connector_ip', sa.String(length=64), nullable=True),
sa.Column('status', sa.String(length=16), nullable=True),
sa.PrimaryKeyConstraint('id'))
|
JamisHoo/Cloud-Image-Migration-Tool
|
refs/heads/master
|
usr/lib/requests/packages/chardet/eucjpprober.py
|
2918
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from . import constants
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCJPDistributionAnalysis
from .jpcntx import EUCJPContextAnalysis
from .mbcssm import EUCJPSMModel
class EUCJPProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(EUCJPSMModel)
self._mDistributionAnalyzer = EUCJPDistributionAnalysis()
self._mContextAnalyzer = EUCJPContextAnalysis()
self.reset()
def reset(self):
MultiByteCharSetProber.reset(self)
self._mContextAnalyzer.reset()
def get_charset_name(self):
return "EUC-JP"
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
# PY3K: aBuf is a byte array, so aBuf[i] is an int, not a byte
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mContextAnalyzer.feed(self._mLastChar, charLen)
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mContextAnalyzer.feed(aBuf[i - 1:i + 1], charLen)
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mContextAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
contxtCf = self._mContextAnalyzer.get_confidence()
distribCf = self._mDistributionAnalyzer.get_confidence()
return max(contxtCf, distribCf)
|
gdit-cnd/RAPID
|
refs/heads/master
|
unit_tests/test_monitor_views.py
|
2
|
from monitors.views import MonitorDashboard, AddIndicator, DomainMonitor, DeleteIndicator
from profiles.models import Profile
from django.test import TestCase, RequestFactory
from django.core.urlresolvers import reverse
from django.contrib.auth.models import AnonymousUser
from django.http import HttpResponseRedirect
import datetime
class TestMonitorDashboard(TestCase):
indicator = "twitter.com"
ip_indicator = "199.59.150.7"
def setUp(self):
self.factory = RequestFactory()
self.url = reverse("monitor_dashboard")
self.request = self.factory.get(self.url)
# We test each role, authenticated users first.
def test_monitor_dashboard_auth_get(self):
# Spin up a user and run a request through the view.
self.request.user = Profile.objects.create_user(email='test@test.com', password='test', is_admin=False)
response = MonitorDashboard.as_view()(self.request)
# This view returns a template response, so we must manually render.
response = response.render()
self.assertEqual(response.status_code, 200)
self.assertTrue("DomainPanel" in response.content.decode("utf-8"))
self.assertTrue("IpPanel" in response.content.decode("utf-8"))
self.assertTrue("AlertPanel" in response.content.decode("utf-8"))
# Admin users.
def test_monitor_dashboard_admin_get(self):
# Spin up a user and run a request through the view.
self.request.user = Profile.objects.create_user(email='test@test.com', password='test', is_admin=True)
response = MonitorDashboard.as_view()(self.request)
# This view returns a template response, so we must manually render.
response = response.render()
self.assertEqual(response.status_code, 200)
self.assertTrue("DomainPanel" in response.content.decode("utf-8"))
self.assertTrue("IpPanel" in response.content.decode("utf-8"))
self.assertTrue("AlertPanel" in response.content.decode("utf-8"))
# Staff users.
def test_monitor_dashboard_staff_get(self):
# Spin up a user and run a request through the view.
self.request.user = Profile.objects.create_user(email='test@test.com', password='test', is_admin=False, is_staff=True)
response = MonitorDashboard.as_view()(self.request)
# This view returns a template response, so we must manually render.
response = response.render()
self.assertEqual(response.status_code, 200)
self.assertTrue("DomainPanel" in response.content.decode("utf-8"))
self.assertTrue("IpPanel" in response.content.decode("utf-8"))
self.assertTrue("AlertPanel" in response.content.decode("utf-8"))
# Anon users should be restricted.
def test_monitor_dashboard_anon_get(self):
# Spin up a user and run a request through the view.
self.request.user = AnonymousUser()
response = MonitorDashboard.as_view()(self.request)
# Anon users will be redirected, so we don't render().
self.assertNotEqual(response.status_code, 200)
self.assertTrue("DomainPanel" not in response.content.decode("utf-8"))
self.assertTrue("IpPanel" not in response.content.decode("utf-8"))
self.assertTrue("AlertPanel" not in response.content.decode("utf-8"))
class TestAddIndicator(TestCase):
indicator = "twitter.com"
ip_indicator = "199.59.150.7"
def setUp(self):
self.factory = RequestFactory()
self.url = reverse("add_indicators")
self.request = self.factory.get(self.url)
# We test each role, authenticated users first.
def test_add_indicator_auth(self):
# Spin up a user and run a request through the view.
self.request.user = Profile.objects.create_user(email='test@test.com', password='test', is_admin=False)
response = AddIndicator.as_view()(self.request)
# This view returns a template response, so we must manually render.
response = response.render()
self.assertEqual(response.status_code, 200)
self.assertTrue("New monitor submissions" in response.content.decode("utf-8"))
def test_add_indicator_admin(self):
# Spin up a user and run a request through the view.
self.request.user = Profile.objects.create_user(email='test@test.com', password='test', is_admin=True)
response = AddIndicator.as_view()(self.request)
response = response.render()
self.assertEqual(response.status_code, 200)
self.assertTrue("New monitor submissions" in response.content.decode("utf-8"))
# To test methods in the view, we must not use .as_view() because it will return a response.
# What we really need is just an instance of the class.
view = AddIndicator()
url = view.get_success_url()
self.assertEqual(url, "/monitors/")
def test_add_indicator_auth(self):
# Spin up a user and run a request through the view.
self.request.user = Profile.objects.create_user(email='test@test.com', password='test', is_admin=False)
response = AddIndicator.as_view()(self.request)
# This view returns a template response, so we must manually render.
response = response.render()
self.assertEqual(response.status_code, 200)
self.assertTrue("New monitor submissions" in response.content.decode("utf-8"))
# To test methods in the view, we must not use .as_view() because it will return a response.
# What we really need is just an instance of the class.
view = AddIndicator()
url = view.get_success_url()
self.assertEqual(url, "/monitors/")
def test_add_indicator_staff(self):
# Spin up a user and run a request through the view.
self.request.user = Profile.objects.create_user(email='test@test.com', password='test', is_staff=True)
response = AddIndicator.as_view()(self.request)
# This view returns a template response, so we must manually render.
response = response.render()
self.assertEqual(response.status_code, 200)
self.assertTrue("New monitor submissions" in response.content.decode("utf-8"))
# To test methods in the view, we must not use .as_view() because it will return a response.
# What we really need is just an instance of the class.
view = AddIndicator()
url = view.get_success_url()
self.assertEqual(url, "/monitors/")
# Anon users should be redirected to login page.
def test_add_indicator_anon(self):
# Spin up a user and run a request through the view.
self.request.user = AnonymousUser()
response = AddIndicator.as_view()(self.request)
self.assertEqual(response.status_code, 302)
self.assertTrue(isinstance(response, HttpResponseRedirect))
# Ensure we are redirecting to the correct URL.
self.assertTrue("/profile/login/?next=/monitors/add_indicators" in response._headers['location'],)
# To test methods in the view, we must not use .as_view() because it will return a response.
# What we really need is just an instance of the class.
view = AddIndicator()
url = view.get_success_url()
self.assertEqual(url, "/monitors/")
class TestRemoveIndicator(TestCase):
indicator = "twitter.com"
ip_indicator = "199.59.150.7"
def setUp(self):
self.factory = RequestFactory()
self.url = reverse("delete_indicators")
self.request = self.factory.get(self.url)
def test_remove_indicator_anon(self):
# Spin up a user and run a request through the view.
self.request.user = AnonymousUser()
response = DeleteIndicator.as_view()(self.request)
self.assertEqual(response.status_code, 302)
self.assertTrue(isinstance(response, HttpResponseRedirect))
# Ensure we are redirecting to the correct URL.
self.assertTrue("/profile/login/?next=/monitors/delete_indicators" in response._headers['location'])
def test_remove_indicator_admin(self):
self.request.user = Profile.objects.create_user(email='rapid@rapid.com', password='test', is_admin=True)
monitor = DomainMonitor(owner=self.request.user,
domain_name=self.indicator,
lookup_interval=24,
next_lookup= datetime.datetime.utcnow())
monitor.save()
response = DeleteIndicator.as_view()(self.request)
self.assertEqual(response.status_code, 200)
self.assertFalse(isinstance(response, HttpResponseRedirect))
# There is a confirmation page.
self.assertTrue("Confirm Monitor Deletion" in response.content.decode("utf-8"))
def test_remove_indicator_auth(self):
self.request.user = Profile.objects.create_user(email='rapid@rapid.com', password='test', is_admin=False)
monitor = DomainMonitor(owner=self.request.user,
domain_name=self.indicator,
lookup_interval=24,
next_lookup=datetime.datetime.utcnow())
monitor.save()
response = DeleteIndicator.as_view()(self.request)
self.assertEqual(response.status_code, 200)
self.assertFalse(isinstance(response, HttpResponseRedirect))
# There is a confirmation page.
self.assertTrue("Confirm Monitor Deletion" in response.content.decode("utf-8"))
def test_remove_indicator_staff(self):
self.request.user = Profile.objects.create_user(email='rapid@rapid.com', password='test', is_staff=True)
monitor = DomainMonitor(owner=self.request.user,
domain_name=self.indicator,
lookup_interval=24,
next_lookup=datetime.datetime.utcnow())
monitor.save()
response = DeleteIndicator.as_view()(self.request)
self.assertEqual(response.status_code, 200)
self.assertFalse(isinstance(response, HttpResponseRedirect))
# There is a confirmation page.
self.assertTrue("Confirm Monitor Deletion" in response.content.decode("utf-8"))
|
maciek123/pyage-forams
|
refs/heads/master
|
pyage_forams/conf/distributed2d/lowerright.py
|
1
|
# coding=utf-8
from functools import partial
import Pyro4
from pyage.core import address
from pyage.core.stop_condition import StepLimitStopCondition
from pyage_forams.solutions.distributed.neighbour_matcher import Neighbour2dMatcher
from pyage_forams.solutions.agent.remote_aggegate import create_remote_agent
from pyage_forams.solutions.distributed.request import create_dispatcher
from pyage_forams.solutions.environment import environment_factory, Environment2d
from pyage_forams.solutions.foram import create_forams
from pyage_forams.solutions.genom import GenomFactory
from pyage_forams.solutions.insolation_meter import StaticInsolation
from pyage_forams.conf.distributed2d.common import *
factory = GenomFactory(chambers_limit=2)
genom_factory = lambda: factory.generate
forams = create_forams(5, initial_energy=5)
agents = partial(create_remote_agent, "lowerright")
insolation_meter = StaticInsolation
environment = environment_factory(regeneration_factor=0.1, clazz=Environment2d)
neighbour_matcher = Neighbour2dMatcher
request_dispatcher = create_dispatcher()
stop_condition = lambda: StepLimitStopCondition(90)
reproduction_minimum = lambda: 10
movement_energy = lambda: 0.25
growth_minimum = lambda: 10
energy_need = lambda: 0.2
algae_limit = lambda: 2
newborn_limit = lambda: 9
reproduction_probability = lambda: 0.8
growth_probability = lambda: 0.8
growth_cost_factor = lambda: 0.5
capacity_factor = lambda: 1.1
initial_algae_probability = lambda: 0.03
address_provider = address.SequenceAddressProvider
ns_hostname = lambda: "127.0.0.1"
pyro_daemon = Pyro4.Daemon()
daemon = lambda: pyro_daemon
neighbours = lambda: {"left": "lowerleft", "upper": "upperright"}
|
ncloudioj/splice
|
refs/heads/master
|
tests/api/test_heartbeat.py
|
6
|
from flask import url_for
from nose.tools import assert_equal
from tests.base import BaseTestCase
from mock import Mock
class TestHeartbeat(BaseTestCase):
def setUp(self):
import splice.web.api.heartbeat
super(TestHeartbeat, self).setUp()
def get_key_mock(*args, **kwargs):
return self.key_mock
self.key_mock = Mock()
splice.web.api.heartbeat.Key = Mock(side_effect=get_key_mock)
def get_bucket_mock(*args, **kwargs):
return self.bucket_mock
self.bucket_mock = Mock()
self.env.s3.get_bucket = Mock(side_effect=get_bucket_mock)
def test_heartbeat(self):
"""
/__heartbeat__ test success path
"""
url = url_for('api.heartbeat.root')
response = self.client.get(url)
assert_equal(response.status_code, 200)
def test_fail_db_heartbeat(self):
"""
/__heartbeat__ test for DB Failure
"""
url = url_for('api.heartbeat.root')
def get_connect_mock(*args, **kwargs):
raise Exception()
connect = self.env.db.engine.connect
self.env.db.engine.connect = Mock(side_effect=get_connect_mock)
response = self.client.get(url)
self.env.db.engine.connect = connect
assert_equal(response.status_code, 500)
def test_fail_s3_heartbeat(self):
import splice.web.api.heartbeat
"""
/__heartbeat__ test s3 failure
"""
url = url_for('api.heartbeat.root')
def get_s3_key_mock(*args, **kwargs):
raise Exception()
connect = self.env.db.engine.connect
key = splice.web.api.heartbeat.Key
splice.web.api.heartbeat.Key = Mock(side_effect=get_s3_key_mock)
response = self.client.get(url)
self.env.db.engine.connect = connect
splice.web.api.heartbeat.key = key
assert_equal(response.status_code, 500)
|
suiyuan2009/tensorflow
|
refs/heads/master
|
tensorflow/contrib/remote_fused_graph/pylib/python/ops/__init__.py
|
189
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Remote fused graph ops python library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
|
google/flax
|
refs/heads/master
|
tests/nn_attention_test.py
|
1
|
# Copyright 2021 The Flax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for flax.nn.attention."""
from absl.testing import absltest
from absl.testing import parameterized
from flax import nn
from flax import jax_utils
import jax
from jax import lax
from jax import random
from jax.nn import initializers
import jax.numpy as jnp
import numpy as np
# Parse absl flags test_srcdir and test_tmpdir.
jax.config.parse_flags_with_absl()
class AttentionTest(parameterized.TestCase):
def test_multihead_self_attention(self):
rng = random.PRNGKey(0)
x = jnp.ones((4, 2, 3, 5))
sa_module = nn.SelfAttention.partial(
num_heads=8,
attention_axis=(1, 2),
qkv_features=16,
kernel_init=initializers.ones,
bias_init=initializers.zeros,
)
y, initial_params = sa_module.init(rng, x)
self.assertEqual(y.shape, x.shape)
def test_multihead_encoder_decoder_attention(self):
rng = random.PRNGKey(0)
q = jnp.ones((4, 2, 3, 5))
kv = jnp.ones((4, 2, 3, 5))
sa_module = nn.MultiHeadDotProductAttention.partial(
num_heads=8,
attention_axis=(1, 2),
qkv_features=16,
kernel_init=initializers.ones,
bias_init=initializers.zeros,
)
y, _ = sa_module.init(rng, q, kv)
self.assertEqual(y.shape, q.shape)
def test_multihead_self_attention_w_dropout(self):
rng = random.PRNGKey(0)
x = jnp.ones((4, 2, 3, 5))
sa_module = nn.SelfAttention.partial(
num_heads=8,
attention_axis=(1, 2),
qkv_features=16,
kernel_init=initializers.ones,
bias_init=initializers.zeros,
dropout_rate=0.1,
)
rng1, rng2 = random.split(rng)
with nn.stochastic(rng1):
y, initial_params = sa_module.init(rng2, x)
self.assertEqual(y.shape, x.shape)
def test_causal_mask_1d(self):
"""Tests autoregresive masking for 1d attention."""
key = jnp.ones((4, 5, 2, 16)) # (bs, dim1, dim2, heads, channel)
att_axis = (1,)
mask_1d = nn.attention._make_causal_mask(
key, attention_axis=att_axis, self_mask=False)
ts = np.arange(key.shape[1])
mask_1d_simple = (ts[:, None] >= ts[None, :])[None, None, :, :]
np.testing.assert_allclose(mask_1d, mask_1d_simple,)
def test_causal_mask_2d(self):
"""Tests autoregresive masking for 2d attention."""
key = jnp.ones((4, 5, 5, 2, 16)) # (bs, dim1, dim2, heads, channel)
# masking when dealing with nd attention weights
# w_nd_shape = (4, 5, 5, 5, 5, 2)
att_axis = (1, 2)
mask_nd = nn.attention._make_causal_mask(
key, attention_axis=att_axis, self_mask=False)
# masking when dealing with 1d attention weights
# w_1d_shape = (4, 5*5, 5*5, 2)
ts = np.arange(25)
mask_1d = (ts[:, None] >= ts[None, :])[None, None, :, :]
np.testing.assert_allclose(mask_nd.reshape(mask_1d.shape), mask_1d,
atol=1e-9)
@parameterized.parameters([((5,), (1,)),
((5, 6), (1,)),
((5, 6), (2,)),
((5, 6), (1, 2)),])
def test_decoding(self, spatial_shape, attn_dims):
bs = 2
num_heads = 3
num_features = 4
rng = random.PRNGKey(0)
key1, key2 = random.split(rng)
inputs = random.normal(
key1, (bs,) + spatial_shape + (num_heads * num_features,))
module = nn.SelfAttention.partial(
num_heads=num_heads,
qkv_features=num_heads * num_features,
attention_axis=attn_dims,
causal_mask=True,
precision=lax.Precision.HIGHEST)
with nn.attention.Cache().mutate() as cache_def:
_, initial_params = module.init_by_shape(
key2, [(inputs.shape, inputs.dtype)], cache=cache_def)
model = nn.Model(module, initial_params)
y_ref = jax.jit(lambda f, x: f(x))(model, inputs)
# feed the inputs sequentially to simulate decoding
cache0 = cache_def.initialize_cache((bs,) + spatial_shape)
def body_fn(cache, x):
with cache.mutate() as new_cache:
y = model(x, cache=new_cache)
return new_cache, y
# scan_in_dim supports scanning multiple dims
_, y = jax_utils.scan_in_dim(body_fn, cache0, inputs,
axis=attn_dims, keepdims=True)
np.testing.assert_allclose(y_ref, y, atol=1e-5)
def test_autoregresive_receptive_field_1d(self):
"""Tests the autoregresive self-attention receptive field."""
rng = random.PRNGKey(0)
rng1, rng2 = random.split(rng, num=2)
def model_loss(inputs, pos):
out = model(inputs)
assert out.shape == input_shape
assert len(out.shape) == 3
return out[0, pos, :].sum()
grad_fn = jax.jit(jax.grad(model_loss))
def get_receptive_field_1d(pos):
g = grad_fn(inputs, pos)[0, :, :]
return jnp.any((jnp.abs(g) > 1e-5).astype(jnp.uint32), axis=-1)
length = 10
dim = 1
num_heads = 1
input_shape = (1, length, dim)
inputs = random.normal(rng2, input_shape)
module = nn.attention.SelfAttention.partial(
num_heads=num_heads,
causal_mask=True,
kernel_init=jax.nn.initializers.ones)
_, initial_params = module.init_by_shape(
rng1, [((1,) + (length, dim), jnp.float32)])
model = nn.Model(module, initial_params)
for i in range(length):
deps = get_receptive_field_1d(i)
assert (deps[:i] == 1).all(), ('Receptive Field Error: Some of the '
'previous postions are not reachable '
'in autoregressive self-attention.')
if i != length - 1:
k = i + 1
assert (deps[k:] == 0).all(), ('Receptive Field Error: Some of the '
'future postions are reachable in '
'autoregressive self-attention.')
if __name__ == '__main__':
absltest.main()
|
EdzBrz/websync-MasterNode
|
refs/heads/master
|
server.py
|
1
|
import tornado.ioloop
from tornado.wsgi import WSGIContainer
from tornado.httpserver import HTTPServer
from tornado.options import options
import signal, logging
is_closing = False
def start(app, port):
logging.info("Starting Tornado Server...")
tornado.options.parse_command_line()
http_server = HTTPServer(WSGIContainer(app))
http_server.listen(port)
signal.signal(signal.SIGINT, signal_handler)
tornado.ioloop.PeriodicCallback(try_exit, 100).start()
tornado.ioloop.IOLoop.instance().start()
# Called every 100 ms to see if server should stop
def try_exit():
global is_closing
if is_closing:
tornado.ioloop.IOLoop.instance().stop()
def signal_handler(signum, frame):
stop()
def stop():
global is_closing
is_closing = True
|
mdibaiee/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/tools/html5lib/html5lib/filters/sanitizer.py
|
1734
|
from __future__ import absolute_import, division, unicode_literals
from . import _base
from ..sanitizer import HTMLSanitizerMixin
class Filter(_base.Filter, HTMLSanitizerMixin):
def __iter__(self):
for token in _base.Filter.__iter__(self):
token = self.sanitize_token(token)
if token:
yield token
|
hansenDise/scrapy
|
refs/heads/master
|
scrapy/utils/testsite.py
|
93
|
from __future__ import print_function
from six.moves.urllib.parse import urljoin
from twisted.internet import reactor
from twisted.web import server, resource, static, util
class SiteTest(object):
def setUp(self):
super(SiteTest, self).setUp()
self.site = reactor.listenTCP(0, test_site(), interface="127.0.0.1")
self.baseurl = "http://localhost:%d/" % self.site.getHost().port
def tearDown(self):
super(SiteTest, self).tearDown()
self.site.stopListening()
def url(self, path):
return urljoin(self.baseurl, path)
def test_site():
r = resource.Resource()
r.putChild("text", static.Data("Works", "text/plain"))
r.putChild("html", static.Data("<body><p class='one'>Works</p><p class='two'>World</p></body>", "text/html"))
r.putChild("enc-gb18030", static.Data("<p>gb18030 encoding</p>", "text/html; charset=gb18030"))
r.putChild("redirect", util.Redirect("/redirected"))
r.putChild("redirected", static.Data("Redirected here", "text/plain"))
return server.Site(r)
if __name__ == '__main__':
port = reactor.listenTCP(0, test_site(), interface="127.0.0.1")
print("http://localhost:%d/" % port.getHost().port)
reactor.run()
|
AnotherIvan/calibre
|
refs/heads/master
|
src/calibre/gui2/preferences/save_template.py
|
11
|
#!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import with_statement
__license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
from PyQt5.Qt import QWidget, pyqtSignal
from calibre.gui2 import error_dialog, question_dialog
from calibre.gui2.preferences.save_template_ui import Ui_Form
from calibre.library.save_to_disk import FORMAT_ARG_DESCS, preprocess_template
from calibre.utils.formatter import validation_formatter
from calibre.gui2.dialogs.template_dialog import TemplateDialog
class SaveTemplate(QWidget, Ui_Form):
changed_signal = pyqtSignal()
def __init__(self, *args):
QWidget.__init__(self, *args)
Ui_Form.__init__(self)
self.setupUi(self)
self.orig_help_text = self.help_label.text()
def initialize(self, name, default, help, field_metadata):
variables = sorted(FORMAT_ARG_DESCS.keys())
if name == 'send_to_device':
self.help_label.setText(self.orig_help_text + _(
'. This setting can be overridden for <b>individual devices</b>,'
' by clicking the device icon and choosing "Configure this device".'))
rows = []
for var in variables:
rows.append(u'<tr><td>%s</td><td> </td><td>%s</td></tr>'%
(var, FORMAT_ARG_DESCS[var]))
rows.append(u'<tr><td>%s </td><td> </td><td>%s</td></tr>'%(
_('Any custom field'),
_('The lookup name of any custom field (these names begin with "#").')))
table = u'<table>%s</table>'%(u'\n'.join(rows))
self.template_variables.setText(table)
self.field_metadata = field_metadata
self.opt_template.initialize(name+'_template_history',
default, help)
self.opt_template.editTextChanged.connect(self.changed)
self.opt_template.currentIndexChanged.connect(self.changed)
self.option_name = name
self.open_editor.clicked.connect(self.do_open_editor)
def do_open_editor(self):
t = TemplateDialog(self, self.opt_template.text(), fm=self.field_metadata)
t.setWindowTitle(_('Edit template'))
if t.exec_():
self.opt_template.set_value(t.rule[1])
def changed(self, *args):
self.changed_signal.emit()
def validate(self):
'''
Do a syntax check on the format string. Doing a semantic check
(verifying that the fields exist) is not useful in the presence of
custom fields, because they may or may not exist.
'''
tmpl = preprocess_template(self.opt_template.text())
try:
t = validation_formatter.validate(tmpl)
if t.find(validation_formatter._validation_string) < 0:
return question_dialog(self, _('Constant template'),
_('The template contains no {fields}, so all '
'books will have the same name. Is this OK?'))
except Exception as err:
error_dialog(self, _('Invalid template'),
'<p>'+_('The template %s is invalid:')%tmpl +
'<br>'+str(err), show=True)
return False
return True
def set_value(self, val):
self.opt_template.set_value(val)
def save_settings(self, config, name):
val = unicode(self.opt_template.text())
config.set(name, val)
self.opt_template.save_history(self.option_name+'_template_history')
|
alextruberg/custom_django
|
refs/heads/master
|
tests/fixtures_regress/models.py
|
49
|
from __future__ import absolute_import, unicode_literals
from django.contrib.auth.models import User
from django.db import models
from django.utils import six
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Animal(models.Model):
name = models.CharField(max_length=150)
latin_name = models.CharField(max_length=150)
count = models.IntegerField()
weight = models.FloatField()
# use a non-default name for the default manager
specimens = models.Manager()
def __str__(self):
return self.name
class Plant(models.Model):
name = models.CharField(max_length=150)
class Meta:
# For testing when upper case letter in app name; regression for #4057
db_table = "Fixtures_regress_plant"
@python_2_unicode_compatible
class Stuff(models.Model):
name = models.CharField(max_length=20, null=True)
owner = models.ForeignKey(User, null=True)
def __str__(self):
return six.text_type(self.name) + ' is owned by ' + six.text_type(self.owner)
class Absolute(models.Model):
name = models.CharField(max_length=40)
class Parent(models.Model):
name = models.CharField(max_length=10)
class Meta:
ordering = ('id',)
class Child(Parent):
data = models.CharField(max_length=10)
# Models to regression test #7572
class Channel(models.Model):
name = models.CharField(max_length=255)
class Article(models.Model):
title = models.CharField(max_length=255)
channels = models.ManyToManyField(Channel)
class Meta:
ordering = ('id',)
# Subclass of a model with a ManyToManyField for test_ticket_20820
class SpecialArticle(Article):
pass
# Models to regression test #11428
@python_2_unicode_compatible
class Widget(models.Model):
name = models.CharField(max_length=255)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
class WidgetProxy(Widget):
class Meta:
proxy = True
# Check for forward references in FKs and M2Ms with natural keys
class TestManager(models.Manager):
def get_by_natural_key(self, key):
return self.get(name=key)
@python_2_unicode_compatible
class Store(models.Model):
objects = TestManager()
name = models.CharField(max_length=255)
main = models.ForeignKey('self', null=True)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
def natural_key(self):
return (self.name,)
@python_2_unicode_compatible
class Person(models.Model):
objects = TestManager()
name = models.CharField(max_length=255)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
# Person doesn't actually have a dependency on store, but we need to define
# one to test the behavior of the dependency resolution algorithm.
def natural_key(self):
return (self.name,)
natural_key.dependencies = ['fixtures_regress.store']
@python_2_unicode_compatible
class Book(models.Model):
name = models.CharField(max_length=255)
author = models.ForeignKey(Person)
stores = models.ManyToManyField(Store)
class Meta:
ordering = ('name',)
def __str__(self):
return '%s by %s (available at %s)' % (
self.name,
self.author.name,
', '.join(s.name for s in self.stores.all())
)
class NKManager(models.Manager):
def get_by_natural_key(self, data):
return self.get(data=data)
@python_2_unicode_compatible
class NKChild(Parent):
data = models.CharField(max_length=10, unique=True)
objects = NKManager()
def natural_key(self):
return self.data
def __str__(self):
return 'NKChild %s:%s' % (self.name, self.data)
@python_2_unicode_compatible
class RefToNKChild(models.Model):
text = models.CharField(max_length=10)
nk_fk = models.ForeignKey(NKChild, related_name='ref_fks')
nk_m2m = models.ManyToManyField(NKChild, related_name='ref_m2ms')
def __str__(self):
return '%s: Reference to %s [%s]' % (
self.text,
self.nk_fk,
', '.join(str(o) for o in self.nk_m2m.all())
)
# ome models with pathological circular dependencies
class Circle1(models.Model):
name = models.CharField(max_length=255)
def natural_key(self):
return self.name
natural_key.dependencies = ['fixtures_regress.circle2']
class Circle2(models.Model):
name = models.CharField(max_length=255)
def natural_key(self):
return self.name
natural_key.dependencies = ['fixtures_regress.circle1']
class Circle3(models.Model):
name = models.CharField(max_length=255)
def natural_key(self):
return self.name
natural_key.dependencies = ['fixtures_regress.circle3']
class Circle4(models.Model):
name = models.CharField(max_length=255)
def natural_key(self):
return self.name
natural_key.dependencies = ['fixtures_regress.circle5']
class Circle5(models.Model):
name = models.CharField(max_length=255)
def natural_key(self):
return self.name
natural_key.dependencies = ['fixtures_regress.circle6']
class Circle6(models.Model):
name = models.CharField(max_length=255)
def natural_key(self):
return self.name
natural_key.dependencies = ['fixtures_regress.circle4']
class ExternalDependency(models.Model):
name = models.CharField(max_length=255)
def natural_key(self):
return self.name
natural_key.dependencies = ['fixtures_regress.book']
# Model for regression test of #11101
class Thingy(models.Model):
name = models.CharField(max_length=255)
|
Yukarumya/Yukarum-Redfoxes
|
refs/heads/master
|
python/mozboot/setup.py
|
11
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
from distutils.core import setup
VERSION = '0.1'
setup(
name='mozboot',
description='System bootstrap for building Mozilla projects.',
license='MPL 2.0',
packages=['mozboot'],
version=VERSION,
scripts=['bin/bootstrap.py'],
)
|
chencoyote/owasp-pysec
|
refs/heads/master
|
pysec/kv/kyoto.py
|
1
|
# Python Security Project (PySec) and its related class files.
#
# PySec is a set of tools for secure application development under Linux
#
# Copyright 2014 PySec development team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- coding: ascii -*-
import os
import kv
import kyotocabinet as kyoto
from pysec import log
__name__ = 'pysec.kv.kyoto'
log.register_actions('KYOTOKV_NEW', 'KYOTOKV_SET', 'KYOTOKV_GET',
'KYOTOKV_DEL', 'KYOTOKV_CLEAR', 'KYOTOKV_POP',
'KYOTOKV_UPDATE', 'KYOTOKV_CLOSE')
_OPEN_MODE = kyoto.DB.OWRITER | kyoto.DB.OREADER | kyoto.DB.OCREATE
class KyotoKV(kv.HardKV):
@log.wrap(log.actions.KYOTOKV_NEW, fields=('path',), lib=__name__)
def __init__(self, path, parse=lambda v: v, unparse=lambda v: v):
self.fk = kyoto.DB()
if not self.fk.open(path, _OPEN_MODE):
raise self.fk.error()
self.parse = parse
self.unparse = unparse
@log.wrap(log.actions.KYOTOKV_CLOSE, lib=__name__)
def close(self):
self.fk.close()
def __del__(self):
self.close()
def __len__(self):
count = self.fk.count()
if count < 0:
raise self.fk.error()
return count
@log.wrap(log.actions.KYOTOKV_GET, fields=('key',), lib=__name__)
def __getitem__(self, key):
value = self.fk.get(self.parse(key))
if value is None:
raise self.fk.error()
return self.unparse(value)
@log.wrap(log.actions.KYOTOKV_SET, fields=('key', 'value'), lib=__name__)
def __setitem__(self, key, value):
if not self.fk.set(self.parse(key), self.parse(value)):
raise self.fk.error()
@log.wrap(log.actions.KYOTOKV_DEL, fields=('key',), lib=__name__)
def __delitem__(self, key):
if not self.fk.remove(self.parse(key)):
raise self.fk.error()
def __contains__(self, key):
return self.fk.check(self.parse(key)) >= 0
def __iter__(self):
for key, _ in self.iteritems():
yield key
def __str__(self):
return '<KyotoKV %s>' % hex(id(self))
def __repr__(self):
return '{%s}' % ', '.join('%r: %r' % (k, v)
for k, v in self.iteritems())
def size(self):
return os.stat(self.fk.path()).st_size
@log.wrap(log.actions.KYOTOKV_CLEAR, lib=__name__)
def clear(self):
self.fk.clear()
def copy(self):
raise NotImplementedError
@classmethod
def fromkeys(seq, value=None):
raise NotImplementedError
@log.wrap(log.actions.KYOTOKV_GET, fields=('key',), lib=__name__)
def get(self, key, default=None):
value = self.fk.get(self.parse(key))
return default if value is None else self.unparse(value)
def has_key(self, key):
return key in self
def items(self):
return list(self.iteritems())
def iteritems(self):
parse = self.parse
unparse = self.unparse
try:
cursor = self.fk.cursor()
cursor.jump()
while 1:
record = cursor.get(1)
if record is None:
break
yield unparse(record[0]), unparse(record[1])
finally:
cursor.disable()
def values(self):
return list(self.itervalues())
def itervalues(self):
unparse = self.unparse
return (value for _, value in self.iteritems())
def keys(self):
return list(self)
def iterkeys(self):
return iter(self)
@log.wrap(log.actions.KYOTOKV_POP, fields=('key',), lib=__name__)
def pop(self, key):
value = self[key]
self.fk.remove(self.parse(key))
return value
@log.wrap(log.actions.KYOTOKV_POP, lib=__name__)
def popitem(self):
item = self.fk.shift()
if item is None:
raise KeyError("popitem(): dictionary is empty")
return self.unparse(item[0]), self.unparse(item[1])
def setdefault(self, key, default=None):
key = self.parse(key)
return self[key] if self.fk.add(key, self.parse(default)) else default
@log.wrap(log.actions.KYOTOKV_UPDATE, lib=__name__)
def update(self, **other):
parse = self.parse
self.fk.set_bulks(((parse(k), parse(v))
for k, v in other.iteritems()), 1)
def cas(self, key, oval, nval):
if not self.fk.cas(self.parse(key), self.parse(oval),
self.parse(nval)):
raise self.fk.error()
|
ArianaGashi/Techstitution
|
refs/heads/master
|
venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/cp949prober.py
|
2800
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCKRDistributionAnalysis
from .mbcssm import CP949SMModel
class CP949Prober(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(CP949SMModel)
# NOTE: CP949 is a superset of EUC-KR, so the distribution should be
# not different.
self._mDistributionAnalyzer = EUCKRDistributionAnalysis()
self.reset()
def get_charset_name(self):
return "CP949"
|
myersjustinc/django-calaccess-campaign-browser
|
refs/heads/master
|
calaccess_campaign_browser/management/commands/importtosqlserver.py
|
3
|
import os
import re
import csv
import fnmatch
from optparse import make_option
import pypyodbc
from django.conf import settings
from django.db import connection
from django.db.models import get_model
from django.core.management.base import AppCommand
from calaccess_campaign_browser.management.commands import CalAccessCommand
custom_options = (
make_option(
"--skip-contributions",
action="store_false",
dest="contributions",
default=True,
help="Skip contributions import"
),
make_option(
"--skip-expenditures",
action="store_false",
dest="expenditures",
default=True,
help="Skip expenditures import"
),
make_option(
"--skip-summary",
action="store_false",
dest="summary",
default=True,
help="Skip summary import"
),
)
def all_files(root, patterns='*', single_level=False, yield_folders=False):
"""
Expand patterns form semicolon-separated string to list
example usage: thefiles = list(all_files('/tmp', '*.py;*.htm;*.html'))
"""
patterns = patterns.split(';')
for path, subdirs, files in os.walk(root):
if yield_folders:
files.extend(subdirs)
files.sort()
for name in files:
for pattern in patterns:
if fnmatch.fnmatch(name, pattern):
yield os.path.join(path, name)
break
if single_level:
break
class Command(CalAccessCommand):
"""
Send CSVs exported from `exportcalaccesscampaignbrowser` to
Microsoft SQL Server
"""
option_list = CalAccessCommand.option_list + custom_options
conn_path = (
'Driver=%s;Server=%s;port=%s;uid=%s;pwd=%s;database=%s;autocommit=1'
) % (
settings.SQL_SERVER_DRIVER,
settings.SQL_SERVER_ADDRESS,
settings.SQL_SERVER_PORT,
settings.SQL_SERVER_USER,
settings.SQL_SERVER_PASSWORD,
settings.SQL_SERVER_DATABASE
)
conn = pypyodbc.connect(conn_path)
cursor = conn.cursor()
app = AppCommand()
def set_options(self, *args, **kwargs):
self.data_dir = os.path.join(
settings.BASE_DIR, 'data')
os.path.exists(self.data_dir) or os.mkdir(self.data_dir)
def generate_table_schema(self, model_name):
"""
Take Expenditure, Contribution or Summary models; grab their db schema,
and create MS SQL Server compatible schema
"""
self.log(' Creating database schema for {} ...'.format(model_name))
style = self.app.style
model = get_model('calaccess_campaign_browser', model_name)
table_name = 'dbo.{}'.format(model._meta.db_table)
raw_statement = connection.creation\
.sql_create_model(model, style)[0][0]
# http://stackoverflow.com/a/14693789/868724
ansi_escape = re.compile(r'\x1b[^m]*m')
strip_ansi_statement = (ansi_escape.sub('', raw_statement))
statement = strip_ansi_statement.replace('\n', '')\
.replace('`', '')\
.replace('bool', 'bit')\
.replace(' AUTO_INCREMENT', '')\
.replace(model._meta.db_table, table_name)\
.replace('NOT NULL', '')
statement = """{}, committee_name varchar(255),\
filer_name varchar(255), filer_id integer,\
filer_id_raw integer );""".format(statement[:-3])
self.construct_table(model_name, table_name, statement)
def construct_table(self, model_name, table_name, query):
"""
Create matching MS SQL Server database table
"""
statement = str(query)
self.log(' Creating {} table ...'.format(table_name))
drop_path = "IF object_id('{}') IS NOT NULL DROP TABLE {}".format(
table_name, table_name)
self.cursor.execute(drop_path)
self.cursor.execute(statement)
self.cursor.commit()
self.success(' {} created'.format(table_name))
self.load_table(table_name, model_name)
def load_table(self, table_name, model_name):
"""
Load Table with CSVs generated from `exportcalaccesscampaignbrowser`
See: https://msdn.microsoft.com/en-us/library/ms188609.aspx
"""
self.log(' Loading table {} ...'.format(table_name))
all_csvs = list(all_files(self.data_dir, '*.csv'))
csv_ = [f for f in all_csvs if fnmatch.fnmatch(f, '*-{}.csv'.format(
model_name))]
if len(csv_) > 1:
self.log(' There are multiple files matching {}'.format(
model_name))
self.log(' We only support one match at the moment. Sorry!')
raise NotImplementedError
with open(csv_[0]) as csvfile:
reader = csv.reader(csvfile, delimiter='\t')
reader.next() # skip headers
for row in reader:
# Remove none values and turn booleans into bit type
row = [r.replace('"', '') for r in row]
row = [r.replace('None', '') for r in row]
row = [r.replace('True', '0') for r in row]
row = [r.replace('False', '1') for r in row]
sql = """INSERT INTO {} VALUES {};""".format(
table_name, tuple(row))
try:
self.cursor.execute(sql)
self.log(' loading {} ID:{} ...'.format(
model_name, row[0]))
except pypyodbc.Error, e:
self.failure(' Encountered an arror')
raise e
self.cursor.commit()
self.success(' Loaded {} with data from {}'.format(
table_name, os.path.split(csv_[0])[1]))
def handle(self, *args, **options):
self.header('Importing models ...')
self.set_options(*args, **options)
if options['contributions']:
self.generate_table_schema('contribution')
if options['expenditures']:
self.generate_table_schema('expenditure')
|
jedimatt42/pi-messaging
|
refs/heads/master
|
htdocs/TipiConfig.py
|
1
|
"""
This config object will be used by in process and external process
services. It is externally shared as /home/tipi/tipi.config, and
internally by providing the TipiConfig.instance() accessor.
If an external actor has updated the file it will be reloaded
automatically with any read operation (get & keys). Unsaved
data will be lost.
"""
import os
import logging
LOGGER = logging.getLogger(__name__)
CONFIG_DEFAULTS = {
"DSK1_DIR": "",
"DSK2_DIR": "",
"DSK3_DIR": "",
"URI1": "",
"URI2": "",
"URI3": "",
"TIPI_NAME": "TIPI",
"WIFI_SSID": "",
"WIFI_PSK": ""
}
class TipiConfig(object):
""" Encapsulation of tipi.config file, and in memory config values """
def __init__(self):
self.tipi_config = "/home/tipi/tipi.config"
self.records = dict(CONFIG_DEFAULTS)
self.sorted_keys = []
self.mtime = 0
self.changes = set()
self.load()
@staticmethod
def instance():
""" return the singleton config object """
return SINGLETON
def load(self):
""" read config values from file """
if os.path.exists(self.tipi_config):
self.mtime = os.path.getmtime(self.tipi_config)
with open(self.tipi_config, 'r') as in_file:
self.records = dict(CONFIG_DEFAULTS)
for line in in_file.readlines():
key = line.split('=')[0].strip()
value = line.split('=')[1].strip()
self.records[key] = value
LOGGER.debug("read record: %s = %s", key, value)
self.sorted_keys = list(self.records.keys())
self.sorted_keys.sort()
else:
LOGGER.info("config file missing: %s", self.tipi_config)
def save(self):
""" write the in-memory config out to disk to share and persist """
with open(self.tipi_config, 'w') as out_file:
for key in self.sorted_keys:
out_file.write(key + "=" + self.records[key])
out_file.write("\n")
# Some config events require action
if "WIFI_SSID" in self.changes or "WIFI_PSK" in self.changes:
self.__triggerWifiConfig()
# reset changed settings
self.changes = set()
def __check_for_update(self):
if os.path.exists(self.tipi_config):
if os.path.getmtime(self.tipi_config) > self.mtime:
self.load()
def keys(self):
""" Provide the keys to iterate over """
self.__check_for_update()
return self.sorted_keys
def set(self, key, value):
""" Update a config item """
key = key.strip()
newvalue = value.strip()
oldvalue = self.records.get(key, "")
if oldvalue != newvalue:
self.records[key.strip()] = value.strip()
self.sorted_keys = list(self.records.keys())
self.sorted_keys.sort()
self.changes.add(key)
def get(self, key, default=None):
""" Fetch a config item """
self.__check_for_update()
return self.records.get(key.strip(), default)
def __triggerWifiConfig(self):
with open("/tmp/wificonfig", 'w') as out_file:
out_file.write(self.records["WIFI_SSID"])
out_file.write('\n')
out_file.write(self.records["WIFI_PSK"])
out_file.write('\n')
SINGLETON = TipiConfig()
|
socialplanning/Cabochon
|
refs/heads/master
|
cabochon/config/__init__.py
|
9480
|
#
|
alianmohammad/pd-gem5-latest
|
refs/heads/master
|
src/arch/x86/isa/insts/general_purpose/control_transfer/conditional_jump.py
|
90
|
# Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
def macroop JZ_I
{
# Make the defualt data size of jumps 64 bits in 64 bit mode
.adjust_env oszIn64Override
rdip t1
limm t2, imm
wrip t1, t2, flags=(CZF,)
};
def macroop JNZ_I
{
# Make the defualt data size of jumps 64 bits in 64 bit mode
.adjust_env oszIn64Override
rdip t1
limm t2, imm
wrip t1, t2, flags=(nCZF,)
};
def macroop JB_I
{
# Make the default data size of jumps 64 bits in 64 bit mode
.adjust_env oszIn64Override
rdip t1
limm t2, imm
wrip t1, t2, flags=(CCF,)
};
def macroop JNB_I
{
# Make the default data size of jumps 64 bits in 64 bit mode
.adjust_env oszIn64Override
rdip t1
limm t2, imm
wrip t1, t2, flags=(nCCF,)
};
def macroop JBE_I
{
# Make the default data size of jumps 64 bits in 64 bit mode
.adjust_env oszIn64Override
rdip t1
limm t2, imm
wrip t1, t2, flags=(CCvZF,)
};
def macroop JNBE_I
{
# Make the default data size of jumps 64 bits in 64 bit mode
.adjust_env oszIn64Override
rdip t1
limm t2, imm
wrip t1, t2, flags=(nCCvZF,)
};
def macroop JS_I
{
# Make the default data size of jumps 64 bits in 64 bit mode
.adjust_env oszIn64Override
rdip t1
limm t2, imm
wrip t1, t2, flags=(CSF,)
};
def macroop JNS_I
{
# Make the default data size of jumps 64 bits in 64 bit mode
.adjust_env oszIn64Override
rdip t1
limm t2, imm
wrip t1, t2, flags=(nCSF,)
};
def macroop JP_I
{
# Make the default data size of jumps 64 bits in 64 bit mode
.adjust_env oszIn64Override
rdip t1
limm t2, imm
wrip t1, t2, flags=(CPF,)
};
def macroop JNP_I
{
# Make the default data size of jumps 64 bits in 64 bit mode
.adjust_env oszIn64Override
rdip t1
limm t2, imm
wrip t1, t2, flags=(nCPF,)
};
def macroop JL_I
{
# Make the default data size of jumps 64 bits in 64 bit mode
.adjust_env oszIn64Override
rdip t1
limm t2, imm
wrip t1, t2, flags=(CSxOF,)
};
def macroop JNL_I
{
# Make the default data size of jumps 64 bits in 64 bit mode
.adjust_env oszIn64Override
rdip t1
limm t2, imm
wrip t1, t2, flags=(nCSxOF,)
};
def macroop JLE_I
{
# Make the default data size of jumps 64 bits in 64 bit mode
.adjust_env oszIn64Override
rdip t1
limm t2, imm
wrip t1, t2, flags=(CSxOvZF,)
};
def macroop JNLE_I
{
# Make the default data size of jumps 64 bits in 64 bit mode
.adjust_env oszIn64Override
rdip t1
limm t2, imm
wrip t1, t2, flags=(nCSxOvZF,)
};
def macroop JO_I
{
# Make the default data size of jumps 64 bits in 64 bit mode
.adjust_env oszIn64Override
rdip t1
limm t2, imm
wrip t1, t2, flags=(COF,)
};
def macroop JNO_I
{
# Make the default data size of jumps 64 bits in 64 bit mode
.adjust_env oszIn64Override
rdip t1
limm t2, imm
wrip t1, t2, flags=(nCOF,)
};
def macroop JRCX_I
{
rdip t1
add t0, t0, rcx, flags=(EZF,), dataSize=asz
wripi t1, imm, flags=(CEZF,)
};
'''
|
quantopian/pgcontents
|
refs/heads/master
|
pgcontents/tests/test_synchronization.py
|
1
|
"""
Tests for synchronization tools.
"""
from __future__ import unicode_literals
from base64 import b64encode
from logging import Logger
from unittest import TestCase
from cryptography.fernet import Fernet
from sqlalchemy import create_engine
from pgcontents import PostgresContentsManager
from pgcontents.crypto import (
FernetEncryption,
NoEncryption,
single_password_crypto_factory,
)
from pgcontents.query import generate_files, generate_checkpoints
from pgcontents.utils.ipycompat import new_markdown_cell
from .utils import (
assertRaisesHTTPError,
clear_test_db,
remigrate_test_schema,
populate,
TEST_DB_URL,
)
from ..utils.sync import (
reencrypt_all_users,
unencrypt_all_users,
)
try:
import mock
except ImportError:
from unittest import mock
class TestReEncryption(TestCase):
def setUp(self):
remigrate_test_schema()
def tearDown(self):
clear_test_db()
def add_markdown_cell(self, path):
# Load and update
model = self.contents.get(path=path)
model['content'].cells.append(
new_markdown_cell('Created by test: ' + path)
)
# Save and checkpoint again.
self.contents.save(model, path=path)
return model
def test_reencryption(self):
"""
Create two unencrypted notebooks and a file, create checkpoints for
each, then encrypt and check that content is unchanged, then re-encrypt
and check the same.
"""
db_url = TEST_DB_URL
user_id = 'test_reencryption'
no_crypto = NoEncryption()
no_crypto_manager = PostgresContentsManager(
user_id=user_id,
db_url=db_url,
crypto=no_crypto,
create_user_on_startup=True,
)
key1 = b'fizzbuzz' * 4
crypto1 = FernetEncryption(Fernet(b64encode(key1)))
manager1 = PostgresContentsManager(
user_id=user_id,
db_url=db_url,
crypto=crypto1,
)
key2 = key1[::-1]
crypto2 = FernetEncryption(Fernet(b64encode(key2)))
manager2 = PostgresContentsManager(
user_id=user_id,
db_url=db_url,
crypto=crypto2,
)
# Populate an unencrypted user.
paths = populate(no_crypto_manager)
original_content = {}
for path in paths:
# Create a checkpoint of the original content and store what we
# expect it to look like.
no_crypto_manager.create_checkpoint(path)
original_content[path] = no_crypto_manager.get(path)['content']
updated_content = {}
for path in paths:
# Create a new version of each notebook with a cell appended.
model = no_crypto_manager.get(path=path)
model['content'].cells.append(
new_markdown_cell('Created by test: ' + path)
)
no_crypto_manager.save(model, path=path)
# Store the updated content.
updated_content[path] = no_crypto_manager.get(path)['content']
# Create a checkpoint of the new content.
no_crypto_manager.create_checkpoint(path)
def check_path_content(path, mgr, expected):
retrieved = mgr.get(path)['content']
self.assertEqual(retrieved, expected[path])
def check_reencryption(old, new):
for path in paths:
# We should no longer be able to retrieve notebooks from the
# no-crypto manager.
with assertRaisesHTTPError(self, 500):
old.get(path)
# The new manager should read the latest version of each file.
check_path_content(path, new, updated_content)
# We should have two checkpoints available, one from the
# original version of the file, and one for the updated
# version.
(new_cp, old_cp) = new.list_checkpoints(path)
self.assertGreater(
new_cp['last_modified'],
old_cp['last_modified'],
)
# The old checkpoint should restore us to the original state.
new.restore_checkpoint(old_cp['id'], path)
check_path_content(path, new, original_content)
# The new checkpoint should put us back into our updated state.
# state.
new.restore_checkpoint(new_cp['id'], path)
check_path_content(path, new, updated_content)
engine = create_engine(db_url)
logger = Logger('Reencryption Testing')
no_crypto_factory = {user_id: no_crypto}.__getitem__
crypto1_factory = {user_id: crypto1}.__getitem__
crypto2_factory = {user_id: crypto2}.__getitem__
# Verify that reencryption is idempotent:
for _ in range(2):
reencrypt_all_users(
engine,
no_crypto_factory,
crypto1_factory,
logger,
)
check_reencryption(no_crypto_manager, manager1)
for _ in range(2):
reencrypt_all_users(
engine,
crypto1_factory,
crypto2_factory,
logger,
)
check_reencryption(manager1, manager2)
with self.assertRaises(ValueError):
# Using reencrypt_all_users with a no-encryption target isn't
# supported.
reencrypt_all_users(
engine,
crypto2_factory,
no_crypto_factory,
logger,
)
# There should have been no changes from the failed attempt.
check_reencryption(manager1, manager2)
# Unencrypt and verify that we can now read everything with the no
# crypto manager.
unencrypt_all_users(engine, crypto2_factory, logger)
check_reencryption(manager2, no_crypto_manager)
class TestGenerateNotebooks(TestCase):
def setUp(self):
remigrate_test_schema()
self.db_url = TEST_DB_URL
self.engine = create_engine(self.db_url)
encryption_pw = u'foobar'
self.crypto_factory = single_password_crypto_factory(encryption_pw)
def tearDown(self):
clear_test_db()
@staticmethod
def cleanup_pgcontents_managers(managers):
for manager in managers:
manager.engine.dispose()
manager.checkpoints.engine.dispose()
def populate_users(self, user_ids):
"""
Create a `PostgresContentsManager` and notebooks for each user.
Notebooks are returned in a list in order of their creation.
"""
def encrypted_pgmanager(user_id):
return PostgresContentsManager(
user_id=user_id,
db_url=self.db_url,
crypto=self.crypto_factory(user_id),
create_user_on_startup=True,
)
managers = {user_id: encrypted_pgmanager(user_id)
for user_id in user_ids}
paths = [(user_id, path)
for user_id in user_ids
for path in populate(managers[user_id])]
# Create a text file for each user as well, which should be ignored by
# the notebook generators
model = {'content': 'text file contents', 'format': 'text'}
for manager in managers.values():
manager.new(model, path='text file.txt')
return (managers, paths)
def save_bad_notebook(self, manager):
"""
Save a notebook with non-notebook content. Trying to parse it should
cause `CorruptedFile` to be raised.
Returns the file id of the saved notebook.
"""
model = {
'type': 'file',
'content': 'bad notebook contents',
'format': 'text',
}
path = 'bad notebook.ipynb'
manager.new(model, path=path)
return manager.get_file_id(path)
def test_generate_files(self):
"""
Create files for three users; try fetching them using `generate_files`.
"""
user_ids = ['test_generate_files0',
'test_generate_files1',
'test_generate_files2']
(managers, paths) = self.populate_users(user_ids)
# Dispose of all engines created during this test to prevent leaked
# database connections.
self.addCleanup(self.cleanup_pgcontents_managers, managers.values())
# Since the bad notebook is saved last, it will be hit only when no
# max_dt is specified.
bad_notebook_id = self.save_bad_notebook(managers[user_ids[0]])
def get_file_dt(idx):
(user_id, path) = paths[idx]
return managers[user_id].get(path, content=False)['last_modified']
# Find three split datetimes
n = 3
split_idxs = [i * (len(paths) // (n + 1)) for i in range(1, n + 1)]
split_dts = [get_file_dt(idx) for idx in split_idxs]
def check_call(kwargs, expect_files, expect_warning=False):
"""
Call `generate_files`; check that all expected files are found,
with the correct content, in the correct order.
"""
file_record = []
logger = Logger('Generate Files Testing')
with mock.patch.object(logger, 'warning') as mock_warn:
for result in generate_files(self.engine, self.crypto_factory,
logger=logger, **kwargs):
manager = managers[result['user_id']]
# This recreates functionality from
# `manager._notebook_model_from_db` to match with the model
# returned by `manager.get`.
nb = result['content']
manager.mark_trusted_cells(nb, result['path'])
# Check that the content returned by the pgcontents manager
# matches that returned by `generate_files`
self.assertEqual(
nb,
manager.get(result['path'])['content']
)
file_record.append((result['user_id'], result['path']))
if expect_warning:
mock_warn.assert_called_once_with(
'Corrupted file with id %d in table files.'
% bad_notebook_id
)
mock_warn.reset_mock()
else:
mock_warn.assert_not_called()
# Make sure all files were found in the right order
self.assertEqual(file_record, expect_files)
# Expect all files given no `min_dt`/`max_dt`
check_call(
{},
paths,
expect_warning=True,
)
check_call(
{'min_dt': split_dts[1]},
paths[split_idxs[1]:],
expect_warning=True,
)
check_call(
{'max_dt': split_dts[1]},
paths[:split_idxs[1]],
expect_warning=False,
)
check_call(
{'min_dt': split_dts[0], 'max_dt': split_dts[2]},
paths[split_idxs[0]:split_idxs[2]],
expect_warning=False,
)
def test_generate_checkpoints(self):
"""
Create checkpoints in three stages; try fetching them with
`generate_checkpoints`.
"""
user_ids = ['test_generate_checkpoints0',
'test_generate_checkpoints1',
'test_generate_checkpoints2']
(managers, paths) = self.populate_users(user_ids)
# Dispose of all engines created during this test to prevent leaked
# database connections.
self.addCleanup(self.cleanup_pgcontents_managers, managers.values())
def update_content(user_id, path, text):
"""
Add a Markdown cell and save the notebook.
Returns the new notebook content.
"""
manager = managers[user_id]
model = manager.get(path)
model['content'].cells.append(
new_markdown_cell(text + ' on path: ' + path)
)
manager.save(model, path)
return manager.get(path)['content']
# Each of the next three steps creates a checkpoint for each notebook
# and stores the notebook content in a list, together with the user id,
# the path, and the datetime of the new checkpoint.
# Begin by making a checkpoint for the original notebook content.
beginning_checkpoints = []
for user_id, path in paths:
content = managers[user_id].get(path)['content']
dt = managers[user_id].create_checkpoint(path)['last_modified']
beginning_checkpoints.append((user_id, path, dt, content))
# Update each notebook and make a new checkpoint.
middle_checkpoints = []
middle_min_dt = None
for user_id, path in paths:
content = update_content(user_id, path, '1st addition')
dt = managers[user_id].create_checkpoint(path)['last_modified']
middle_checkpoints.append((user_id, path, dt, content))
if middle_min_dt is None:
middle_min_dt = dt
# Update each notebook again and make another checkpoint.
end_checkpoints = []
end_min_dt = None
for user_id, path in paths:
content = update_content(user_id, path, '2nd addition')
dt = managers[user_id].create_checkpoint(path)['last_modified']
end_checkpoints.append((user_id, path, dt, content))
if end_min_dt is None:
end_min_dt = dt
def concat_all(lists):
return sum(lists, [])
def check_call(kwargs, expect_checkpoints):
"""
Call `generate_checkpoints`; check that all expected checkpoints
are found, with the correct content, in the correct order.
"""
checkpoint_record = []
for result in generate_checkpoints(self.engine,
self.crypto_factory, **kwargs):
manager = managers[result['user_id']]
# This recreates functionality from
# `manager._notebook_model_from_db` to match with the model
# returned by `manager.get`.
nb = result['content']
manager.mark_trusted_cells(nb, result['path'])
checkpoint_record.append((result['user_id'], result['path'],
result['last_modified'], nb))
# Make sure all checkpoints were found in the right order
self.assertEqual(checkpoint_record, expect_checkpoints)
# No `min_dt`/`max_dt`
check_call({}, concat_all([beginning_checkpoints, middle_checkpoints,
end_checkpoints]))
# `min_dt` cuts off `beginning_checkpoints` checkpoints
check_call({'min_dt': middle_min_dt},
concat_all([middle_checkpoints, end_checkpoints]))
# `max_dt` cuts off `end_checkpoints` checkpoints
check_call({'max_dt': end_min_dt},
concat_all([beginning_checkpoints, middle_checkpoints]))
# `min_dt` and `max_dt` together isolate `middle_checkpoints`
check_call({'min_dt': middle_min_dt, 'max_dt': end_min_dt},
middle_checkpoints)
|
Shao-Feng/crosswalk-test-suite
|
refs/heads/master
|
apptools/apptools-windows-tests/apptools/comm.py
|
11
|
#!/usr/bin/env python
#
# Copyright (c) 2015 Intel Corporation.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of works must retain the original copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the original copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this work without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors:
# Yun, Liu<yunx.liu@intel.com>
import os
import sys
import stat
import shutil
import urllib2
import subprocess
import time
import re
import zipfile
from bs4 import BeautifulSoup
SCRIPT_PATH = os.path.realpath(__file__)
ConstPath = os.path.dirname(SCRIPT_PATH)
DEFAULT_CMD_TIMEOUT = 600
def setUp():
global XwalkPath, windowsCrosswalk, PackTools, HOST_PREFIX, SHELL_FLAG, cachedir, crosswalkversion
cachedir = os.environ.get('CROSSWALK_APP_TOOLS_CACHE_DIR')
HOST_PREFIX = "node "
SHELL_FLAG = "False"
PackTools = os.environ.get('CROSSWALK_APP_SRC')
if not PackTools:
PackTools = ConstPath + "/../tools/crosswalk-app-tools/src/"
XwalkPath = ConstPath + "/../tools/"
if not PackTools and "crosswalk-app-tools" not in os.listdir(XwalkPath):
print "Please check if the crosswalk-app-tools exists in " + ConstPath + "/../tools/"
sys.exit(1)
if not cachedir:
for i in range(len(os.listdir(XwalkPath))):
if os.listdir(XwalkPath)[i].startswith("crosswalk") and os.listdir(XwalkPath)[i].endswith(".zip"):
windowsCrosswalk = os.listdir(XwalkPath)[i]
else:
for i in range(len(os.listdir(cachedir))):
if os.listdir(cachedir)[i].startswith("crosswalk") and os.listdir(cachedir)[i].endswith(".zip"):
windowsCrosswalk = os.listdir(cachedir)[i]
crosswalkversion = windowsCrosswalk[windowsCrosswalk.index("-") + 1:windowsCrosswalk.index(".zip")].strip()
if not windowsCrosswalk:
print "Please check if the Crosswalk Binary exists in " + ConstPath + "/../tools/"
sys.exit(1)
def getstatusoutput(cmd, time_out=DEFAULT_CMD_TIMEOUT):
print cmd
pre_time = time.time()
output = []
cmd_return_code = 1
cmd_proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=SHELL_FLAG)
while True:
output_line = cmd_proc.stdout.read()
cmd_return_code = cmd_proc.poll()
elapsed_time = time.time() - pre_time
if cmd_return_code is None:
if elapsed_time >= time_out:
killProcesses(ppid=cmd_proc.pid)
return False
elif output_line == '' and cmd_return_code is not None:
break
sys.stdout.write(output_line)
sys.stdout.flush()
output.append(output_line)
return (cmd_return_code, output)
def clear(pkg):
os.chdir(XwalkPath)
if os.path.exists(ConstPath + "/../tools/" + pkg):
if os.path.exists(ConstPath + "/../tools/" + pkg + "/prj"):
shutil.rmtree(pkg + "/prj")
shutil.rmtree(pkg)
def create(self):
clear("org.xwalk.test")
setUp()
os.chdir(XwalkPath)
cmd = HOST_PREFIX + PackTools + \
"crosswalk-app create org.xwalk.test --platform=windows --windows-crosswalk=" + \
XwalkPath + windowsCrosswalk
return_code = os.system(cmd)
self.assertEquals(return_code, 0)
self.assertIn("org.xwalk.test", os.listdir(os.getcwd()))
def build(self, cmd):
return_code = os.system(cmd)
self.assertEquals(return_code, 0)
apks = os.listdir(os.getcwd())
apkLength = 0
for i in range(len(apks)):
if apks[i].endswith(".msi"):
apkLength = apkLength + 1
appVersion = apks[i].split('-')[1][:apks[i].split('-')[1].index(".msi")].strip()
self.assertEquals(apkLength, 1)
return appVersion
def update(self, cmd):
(return_update_code, update_output) = getstatusoutput(cmd)
self.assertEquals(return_update_code, 0)
self.assertNotIn("ERROR:", update_output[0])
def check_crosswalk_version(self, channel):
htmlDoc = urllib2.urlopen(
'https://download.01.org/crosswalk/releases/crosswalk/windows/' +
channel +
'/').read()
soup = BeautifulSoup(htmlDoc)
alist = soup.find_all('a')
version = ''
for index in range(-1, -len(alist)-1, -1):
aEle = alist[index]
version = aEle['href'].strip('/')
if re.search('[0-9]*\.[0-9]*\.[0-9]*\.[0-9]*', version):
break
return version
def unzip_dir(zipfilename, unzipdirname):
fullzipfilename = os.path.abspath(zipfilename)
fullunzipdirname = os.path.abspath(unzipdirname)
print "Start to unzip file %s to folder %s ..." % (zipfilename, unzipdirname)
#Check input ...
if not os.path.exists(fullzipfilename):
print "Dir/File %s is not exist,.." % fullzipfilename
return
#Start extract files ...
srcZip = zipfile.ZipFile(fullzipfilename, "r")
for eachfile in srcZip.namelist():
if not eachfile.endswith('/'):
print "Unzip file %s ..." % eachfile
eachfilename = os.path.join(fullunzipdirname, eachfile)
eachdirname = os.path.dirname(eachfilename)
if not os.path.exists(eachdirname):
os.makedirs(eachdirname)
f = file(eachfilename, 'wb')
f.write(srcZip.read(eachfile))
f.close()
srcZip.close()
print "Unzip file succeed!"
|
40123210/w17b_exam
|
refs/heads/master
|
static/Brython3.1.3-20150514-095342/Lib/weakref.py
|
769
|
"""Weak reference support for Python.
This module is an implementation of PEP 205:
http://www.python.org/dev/peps/pep-0205/
"""
# Naming convention: Variables named "wr" are weak reference objects;
# they are called this instead of "ref" to avoid name collisions with
# the module-global ref() function imported from _weakref.
from _weakref import (
getweakrefcount,
getweakrefs,
ref,
proxy,
CallableProxyType,
ProxyType,
ReferenceType)
from _weakrefset import WeakSet, _IterationGuard
import collections # Import after _weakref to avoid circular import.
ProxyTypes = (ProxyType, CallableProxyType)
__all__ = ["ref", "proxy", "getweakrefcount", "getweakrefs",
"WeakKeyDictionary", "ReferenceType", "ProxyType",
"CallableProxyType", "ProxyTypes", "WeakValueDictionary",
"WeakSet"]
class WeakValueDictionary(collections.MutableMapping):
"""Mapping class that references values weakly.
Entries in the dictionary will be discarded when no strong
reference to the value exists anymore
"""
# We inherit the constructor without worrying about the input
# dictionary; since it uses our .update() method, we get the right
# checks (if the other dictionary is a WeakValueDictionary,
# objects are unwrapped on the way out, and we always wrap on the
# way in).
def __init__(self, *args, **kw):
def remove(wr, selfref=ref(self)):
self = selfref()
if self is not None:
if self._iterating:
self._pending_removals.append(wr.key)
else:
del self.data[wr.key]
self._remove = remove
# A list of keys to be removed
self._pending_removals = []
self._iterating = set()
self.data = d = {}
self.update(*args, **kw)
def _commit_removals(self):
l = self._pending_removals
d = self.data
# We shouldn't encounter any KeyError, because this method should
# always be called *before* mutating the dict.
while l:
del d[l.pop()]
def __getitem__(self, key):
o = self.data[key]()
if o is None:
raise KeyError(key)
else:
return o
def __delitem__(self, key):
if self._pending_removals:
self._commit_removals()
del self.data[key]
def __len__(self):
return len(self.data) - len(self._pending_removals)
def __contains__(self, key):
try:
o = self.data[key]()
except KeyError:
return False
return o is not None
def __repr__(self):
return "<WeakValueDictionary at %s>" % id(self)
def __setitem__(self, key, value):
if self._pending_removals:
self._commit_removals()
self.data[key] = KeyedRef(value, self._remove, key)
def copy(self):
new = WeakValueDictionary()
for key, wr in self.data.items():
o = wr()
if o is not None:
new[key] = o
return new
__copy__ = copy
def __deepcopy__(self, memo):
from copy import deepcopy
new = self.__class__()
for key, wr in self.data.items():
o = wr()
if o is not None:
new[deepcopy(key, memo)] = o
return new
def get(self, key, default=None):
try:
wr = self.data[key]
except KeyError:
return default
else:
o = wr()
if o is None:
# This should only happen
return default
else:
return o
def items(self):
with _IterationGuard(self):
for k, wr in self.data.items():
v = wr()
if v is not None:
yield k, v
def keys(self):
with _IterationGuard(self):
for k, wr in self.data.items():
if wr() is not None:
yield k
__iter__ = keys
def itervaluerefs(self):
"""Return an iterator that yields the weak references to the values.
The references are not guaranteed to be 'live' at the time
they are used, so the result of calling the references needs
to be checked before being used. This can be used to avoid
creating references that will cause the garbage collector to
keep the values around longer than needed.
"""
with _IterationGuard(self):
for wr in self.data.values():
yield wr
def values(self):
with _IterationGuard(self):
for wr in self.data.values():
obj = wr()
if obj is not None:
yield obj
def popitem(self):
if self._pending_removals:
self._commit_removals()
while True:
key, wr = self.data.popitem()
o = wr()
if o is not None:
return key, o
def pop(self, key, *args):
if self._pending_removals:
self._commit_removals()
try:
o = self.data.pop(key)()
except KeyError:
if args:
return args[0]
raise
if o is None:
raise KeyError(key)
else:
return o
def setdefault(self, key, default=None):
try:
wr = self.data[key]
except KeyError:
if self._pending_removals:
self._commit_removals()
self.data[key] = KeyedRef(default, self._remove, key)
return default
else:
return wr()
def update(self, dict=None, **kwargs):
if self._pending_removals:
self._commit_removals()
d = self.data
if dict is not None:
if not hasattr(dict, "items"):
dict = type({})(dict)
for key, o in dict.items():
d[key] = KeyedRef(o, self._remove, key)
if len(kwargs):
self.update(kwargs)
def valuerefs(self):
"""Return a list of weak references to the values.
The references are not guaranteed to be 'live' at the time
they are used, so the result of calling the references needs
to be checked before being used. This can be used to avoid
creating references that will cause the garbage collector to
keep the values around longer than needed.
"""
return list(self.data.values())
class KeyedRef(ref):
"""Specialized reference that includes a key corresponding to the value.
This is used in the WeakValueDictionary to avoid having to create
a function object for each key stored in the mapping. A shared
callback object can use the 'key' attribute of a KeyedRef instead
of getting a reference to the key from an enclosing scope.
"""
__slots__ = "key",
def __new__(type, ob, callback, key):
self = ref.__new__(type, ob, callback)
self.key = key
return self
def __init__(self, ob, callback, key):
super().__init__(ob, callback)
class WeakKeyDictionary(collections.MutableMapping):
""" Mapping class that references keys weakly.
Entries in the dictionary will be discarded when there is no
longer a strong reference to the key. This can be used to
associate additional data with an object owned by other parts of
an application without adding attributes to those objects. This
can be especially useful with objects that override attribute
accesses.
"""
def __init__(self, dict=None):
self.data = {}
def remove(k, selfref=ref(self)):
self = selfref()
if self is not None:
if self._iterating:
self._pending_removals.append(k)
else:
del self.data[k]
self._remove = remove
# A list of dead weakrefs (keys to be removed)
self._pending_removals = []
self._iterating = set()
if dict is not None:
self.update(dict)
def _commit_removals(self):
# NOTE: We don't need to call this method before mutating the dict,
# because a dead weakref never compares equal to a live weakref,
# even if they happened to refer to equal objects.
# However, it means keys may already have been removed.
l = self._pending_removals
d = self.data
while l:
try:
del d[l.pop()]
except KeyError:
pass
def __delitem__(self, key):
del self.data[ref(key)]
def __getitem__(self, key):
return self.data[ref(key)]
def __len__(self):
return len(self.data) - len(self._pending_removals)
def __repr__(self):
return "<WeakKeyDictionary at %s>" % id(self)
def __setitem__(self, key, value):
self.data[ref(key, self._remove)] = value
def copy(self):
new = WeakKeyDictionary()
for key, value in self.data.items():
o = key()
if o is not None:
new[o] = value
return new
__copy__ = copy
def __deepcopy__(self, memo):
from copy import deepcopy
new = self.__class__()
for key, value in self.data.items():
o = key()
if o is not None:
new[o] = deepcopy(value, memo)
return new
def get(self, key, default=None):
return self.data.get(ref(key),default)
def __contains__(self, key):
try:
wr = ref(key)
except TypeError:
return False
return wr in self.data
def items(self):
with _IterationGuard(self):
for wr, value in self.data.items():
key = wr()
if key is not None:
yield key, value
def keys(self):
with _IterationGuard(self):
for wr in self.data:
obj = wr()
if obj is not None:
yield obj
__iter__ = keys
def values(self):
with _IterationGuard(self):
for wr, value in self.data.items():
if wr() is not None:
yield value
def keyrefs(self):
"""Return a list of weak references to the keys.
The references are not guaranteed to be 'live' at the time
they are used, so the result of calling the references needs
to be checked before being used. This can be used to avoid
creating references that will cause the garbage collector to
keep the keys around longer than needed.
"""
return list(self.data)
def popitem(self):
while True:
key, value = self.data.popitem()
o = key()
if o is not None:
return o, value
def pop(self, key, *args):
return self.data.pop(ref(key), *args)
def setdefault(self, key, default=None):
return self.data.setdefault(ref(key, self._remove),default)
def update(self, dict=None, **kwargs):
d = self.data
if dict is not None:
if not hasattr(dict, "items"):
dict = type({})(dict)
for key, value in dict.items():
d[ref(key, self._remove)] = value
if len(kwargs):
self.update(kwargs)
|
tximikel/kuma
|
refs/heads/master
|
vendor/packages/translate/misc/wsgiserver/ssl_pyopenssl.py
|
51
|
"""A library for integrating pyOpenSSL with CherryPy.
The OpenSSL module must be importable for SSL functionality.
You can obtain it from http://pyopenssl.sourceforge.net/
To use this module, set CherryPyWSGIServer.ssl_adapter to an instance of
SSLAdapter. There are two ways to use SSL:
Method One
----------
* ``ssl_adapter.context``: an instance of SSL.Context.
If this is not None, it is assumed to be an SSL.Context instance,
and will be passed to SSL.Connection on bind(). The developer is
responsible for forming a valid Context object. This approach is
to be preferred for more flexibility, e.g. if the cert and key are
streams instead of files, or need decryption, or SSL.SSLv3_METHOD
is desired instead of the default SSL.SSLv23_METHOD, etc. Consult
the pyOpenSSL documentation for complete options.
Method Two (shortcut)
---------------------
* ``ssl_adapter.certificate``: the filename of the server SSL certificate.
* ``ssl_adapter.private_key``: the filename of the server's private key file.
Both are None by default. If ssl_adapter.context is None, but .private_key
and .certificate are both given and valid, they will be read, and the
context will be automatically created from them.
"""
import socket
import threading
import time
from cherrypy import wsgiserver
try:
from OpenSSL import SSL
from OpenSSL import crypto
except ImportError:
SSL = None
class SSL_fileobject(wsgiserver.CP_fileobject):
"""SSL file object attached to a socket object."""
ssl_timeout = 3
ssl_retry = .01
def _safe_call(self, is_reader, call, *args, **kwargs):
"""Wrap the given call with SSL error-trapping.
is_reader: if False EOF errors will be raised. If True, EOF errors
will return "" (to emulate normal sockets).
"""
start = time.time()
while True:
try:
return call(*args, **kwargs)
except SSL.WantReadError:
# Sleep and try again. This is dangerous, because it means
# the rest of the stack has no way of differentiating
# between a "new handshake" error and "client dropped".
# Note this isn't an endless loop: there's a timeout below.
time.sleep(self.ssl_retry)
except SSL.WantWriteError:
time.sleep(self.ssl_retry)
except SSL.SysCallError, e:
if is_reader and e.args == (-1, 'Unexpected EOF'):
return ""
errnum = e.args[0]
if is_reader and errnum in wsgiserver.socket_errors_to_ignore:
return ""
raise socket.error(errnum)
except SSL.Error, e:
if is_reader and e.args == (-1, 'Unexpected EOF'):
return ""
thirdarg = None
try:
thirdarg = e.args[0][0][2]
except IndexError:
pass
if thirdarg == 'http request':
# The client is talking HTTP to an HTTPS server.
raise wsgiserver.NoSSLError()
raise wsgiserver.FatalSSLAlert(*e.args)
except:
raise
if time.time() - start > self.ssl_timeout:
raise socket.timeout("timed out")
def recv(self, *args, **kwargs):
buf = []
r = super(SSL_fileobject, self).recv
while True:
data = self._safe_call(True, r, *args, **kwargs)
buf.append(data)
p = self._sock.pending()
if not p:
return "".join(buf)
def sendall(self, *args, **kwargs):
return self._safe_call(False, super(SSL_fileobject, self).sendall,
*args, **kwargs)
def send(self, *args, **kwargs):
return self._safe_call(False, super(SSL_fileobject, self).send,
*args, **kwargs)
class SSLConnection:
"""A thread-safe wrapper for an SSL.Connection.
``*args``: the arguments to create the wrapped ``SSL.Connection(*args)``.
"""
def __init__(self, *args):
self._ssl_conn = SSL.Connection(*args)
self._lock = threading.RLock()
for f in ('get_context', 'pending', 'send', 'write', 'recv', 'read',
'renegotiate', 'bind', 'listen', 'connect', 'accept',
'setblocking', 'fileno', 'close', 'get_cipher_list',
'getpeername', 'getsockname', 'getsockopt', 'setsockopt',
'makefile', 'get_app_data', 'set_app_data', 'state_string',
'sock_shutdown', 'get_peer_certificate', 'want_read',
'want_write', 'set_connect_state', 'set_accept_state',
'connect_ex', 'sendall', 'settimeout', 'gettimeout'):
exec("""def %s(self, *args):
self._lock.acquire()
try:
return self._ssl_conn.%s(*args)
finally:
self._lock.release()
""" % (f, f))
def shutdown(self, *args):
self._lock.acquire()
try:
# pyOpenSSL.socket.shutdown takes no args
return self._ssl_conn.shutdown()
finally:
self._lock.release()
class pyOpenSSLAdapter(wsgiserver.SSLAdapter):
"""A wrapper for integrating pyOpenSSL with CherryPy."""
context = None
"""An instance of SSL.Context."""
certificate = None
"""The filename of the server SSL certificate."""
private_key = None
"""The filename of the server's private key file."""
certificate_chain = None
"""Optional. The filename of CA's intermediate certificate bundle.
This is needed for cheaper "chained root" SSL certificates, and should be
left as None if not required."""
def __init__(self, certificate, private_key, certificate_chain=None):
if SSL is None:
raise ImportError("You must install pyOpenSSL to use HTTPS.")
self.context = None
self.certificate = certificate
self.private_key = private_key
self.certificate_chain = certificate_chain
self._environ = None
def bind(self, sock):
"""Wrap and return the given socket."""
if self.context is None:
self.context = self.get_context()
conn = SSLConnection(self.context, sock)
self._environ = self.get_environ()
return conn
def wrap(self, sock):
"""Wrap and return the given socket, plus WSGI environ entries."""
return sock, self._environ.copy()
def get_context(self):
"""Return an SSL.Context from self attributes."""
# See http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/442473
c = SSL.Context(SSL.SSLv23_METHOD)
c.use_privatekey_file(self.private_key)
if self.certificate_chain:
c.load_verify_locations(self.certificate_chain)
c.use_certificate_file(self.certificate)
return c
def get_environ(self):
"""Return WSGI environ entries to be merged into each request."""
ssl_environ = {
"HTTPS": "on",
# pyOpenSSL doesn't provide access to any of these AFAICT
## 'SSL_PROTOCOL': 'SSLv2',
## SSL_CIPHER string The cipher specification name
## SSL_VERSION_INTERFACE string The mod_ssl program version
## SSL_VERSION_LIBRARY string The OpenSSL program version
}
if self.certificate:
# Server certificate attributes
cert = open(self.certificate, 'rb').read()
cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert)
ssl_environ.update({
'SSL_SERVER_M_VERSION': cert.get_version(),
'SSL_SERVER_M_SERIAL': cert.get_serial_number(),
## 'SSL_SERVER_V_START': Validity of server's certificate (start time),
## 'SSL_SERVER_V_END': Validity of server's certificate (end time),
})
for prefix, dn in [("I", cert.get_issuer()),
("S", cert.get_subject())]:
# X509Name objects don't seem to have a way to get the
# complete DN string. Use str() and slice it instead,
# because str(dn) == "<X509Name object '/C=US/ST=...'>"
dnstr = str(dn)[18:-2]
wsgikey = 'SSL_SERVER_%s_DN' % prefix
ssl_environ[wsgikey] = dnstr
# The DN should be of the form: /k1=v1/k2=v2, but we must allow
# for any value to contain slashes itself (in a URL).
while dnstr:
pos = dnstr.rfind("=")
dnstr, value = dnstr[:pos], dnstr[pos + 1:]
pos = dnstr.rfind("/")
dnstr, key = dnstr[:pos], dnstr[pos + 1:]
if key and value:
wsgikey = 'SSL_SERVER_%s_DN_%s' % (prefix, key)
ssl_environ[wsgikey] = value
return ssl_environ
def makefile(self, sock, mode='r', bufsize=-1):
if SSL and isinstance(sock, SSL.ConnectionType):
timeout = sock.gettimeout()
f = SSL_fileobject(sock, mode, bufsize)
f.ssl_timeout = timeout
return f
else:
return wsgiserver.CP_fileobject(sock, mode, bufsize)
|
sabi0/intellij-community
|
refs/heads/master
|
python/lib/Lib/site-packages/django/conf/locale/pt_BR/__init__.py
|
12133432
| |
j3parker/servo
|
refs/heads/master
|
tests/wpt/css-tests/tools/html5lib/html5lib/filters/__init__.py
|
12133432
| |
stefanfoulis/cmsplugin-filer
|
refs/heads/develop
|
cmsplugin_filer_file/migrations/__init__.py
|
12133432
| |
lunafeng/django
|
refs/heads/master
|
django/conf/locale/km/__init__.py
|
12133432
| |
pathomx/pathomx
|
refs/heads/master
|
pathomx/plugins/geo/__init__.py
|
12133432
| |
quickresolve/accel.ai
|
refs/heads/master
|
flask-aws/lib/python2.7/site-packages/botocore/config.py
|
6
|
# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import copy
from botocore.compat import OrderedDict
from botocore.endpoint import DEFAULT_TIMEOUT, MAX_POOL_CONNECTIONS
from botocore.exceptions import InvalidS3AddressingStyleError
class Config(object):
"""Advanced configuration for Botocore clients.
:type region_name: str
:param region_name: The region to use in instantiating the client
:type signature_version: str
:param signature_version: The signature version when signing requests.
:type user_agent: str
:param user_agent: The value to use in the User-Agent header.
:type user_agent_extra: str
:param user_agent_extra: The value to append to the current User-Agent
header value.
:type connect_timeout: int
:param connect_timeout: The time in seconds till a timeout exception is
thrown when attempting to make a connection. The default is 60
seconds.
:type read_timeout: int
:param read_timeout: The time in seconds till a timeout exception is
thrown when attempting to read from a connection. The default is
60 seconds.
:type parameter_validation: bool
:param parameter_validation: Whether parameter validation should occur
when serializing requests. The default is True. You can disable
parameter validation for performance reasons. Otherwise, it's
recommended to leave parameter validation enabled.
:type max_pool_connections: int
:param max_pool_connections: The maximum number of connections to
keep in a connection pool. If this value is not set, the default
value of 10 is used.
:type s3: dict
:param s3: A dictionary of s3 specific configurations.
Valid keys are:
* 'use_accelerate_endpoint' -- Refers to whether to use the S3
Accelerate endpoint. The value must be a boolean. If True, the
client will use the S3 Accelerate endpoint. If the S3 Accelerate
endpoint is being used then the addressing style will always
be virtual.
* 'payload_signing_enabled' -- Refers to whether or not to SHA256
sign sigv4 payloads. By default, this is disabled for streaming
uploads (UploadPart and PutObject).
* 'addressing_style' -- Refers to the style in which to address
s3 endpoints. Values must be a string that equals:
* auto -- Addressing style is chosen for user. Depending
on the configuration of client, the endpoint may be addressed in
the virtual or the path style. Note that this is the default
behavior if no style is specified.
* virtual -- Addressing style is always virtual. The name of the
bucket must be DNS compatible or an exception will be thrown.
Endpoints will be addressed as such: mybucket.s3.amazonaws.com
* path -- Addressing style is always by path. Endpoints will be
addressed as such: s3.amazonaws.com/mybucket
"""
OPTION_DEFAULTS = OrderedDict([
('region_name', None),
('signature_version', None),
('user_agent', None),
('user_agent_extra', None),
('connect_timeout', DEFAULT_TIMEOUT),
('read_timeout', DEFAULT_TIMEOUT),
('parameter_validation', True),
('max_pool_connections', MAX_POOL_CONNECTIONS),
('s3', None)
])
def __init__(self, *args, **kwargs):
self._user_provided_options = self._record_user_provided_options(
args, kwargs)
# Merge the user_provided options onto the default options
config_vars = copy.copy(self.OPTION_DEFAULTS)
config_vars.update(self._user_provided_options)
# Set the attributes based on the config_vars
for key, value in config_vars.items():
setattr(self, key, value)
# Validate the s3 options
self._validate_s3_configuration(self.s3)
def _record_user_provided_options(self, args, kwargs):
option_order = list(self.OPTION_DEFAULTS)
user_provided_options = {}
# Iterate through the kwargs passed through to the constructor and
# map valid keys to the dictionary
for key, value in kwargs.items():
if key in self.OPTION_DEFAULTS:
user_provided_options[key] = value
# The key must exist in the available options
else:
raise TypeError(
'Got unexpected keyword argument \'%s\'' % key)
# The number of args should not be longer than the allowed
# options
if len(args) > len(option_order):
raise TypeError(
'Takes at most %s arguments (%s given)' % (
len(option_order), len(args)))
# Iterate through the args passed through to the constructor and map
# them to appropriate keys.
for i, arg in enumerate(args):
# If it a kwarg was specified for the arg, then error out
if option_order[i] in user_provided_options:
raise TypeError(
'Got multiple values for keyword argument \'%s\'' % (
option_order[i]))
user_provided_options[option_order[i]] = arg
return user_provided_options
def _validate_s3_configuration(self, s3):
if s3 is not None:
addressing_style = s3.get('addressing_style')
if addressing_style not in ['virtual', 'auto', 'path', None]:
raise InvalidS3AddressingStyleError(
s3_addressing_style=addressing_style)
def merge(self, other_config):
"""Merges the config object with another config object
This will merge in all non-default values from the provided config
and return a new config object
:type other_config: botocore.config.Config
:param other config: Another config object to merge with. The values
in the provided config object will take precedence in the merging
:returns: A config object built from the merged values of both
config objects.
"""
# Make a copy of the current attributes in the config object.
config_options = copy.copy(self._user_provided_options)
# Merge in the user provided options from the other config
config_options.update(other_config._user_provided_options)
# Return a new config object with the merged properties.
return Config(**config_options)
|
tiagofrepereira2012/tensorflow
|
refs/heads/master
|
tensorflow/contrib/rnn/__init__.py
|
25
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""RNN Cells and additional RNN operations.
See @{$python/contrib.rnn} guide.
# From core
@@RNNCell
@@BasicRNNCell
@@BasicLSTMCell
@@GRUCell
@@LSTMCell
@@LSTMStateTuple
@@DropoutWrapper
@@MultiRNNCell
@@DeviceWrapper
@@ResidualWrapper
# Used to be in core, but kept in contrib.
@@EmbeddingWrapper
@@InputProjectionWrapper
@@OutputProjectionWrapper
# Created in contrib, eventual plans to move to core.
@@LayerNormBasicLSTMCell
@@LSTMBlockWrapper
@@LSTMBlockCell
@@GRUBlockCell
@@FusedRNNCell
@@FusedRNNCellAdaptor
@@TimeReversedFusedRNN
@@LSTMBlockFusedCell
@@CoupledInputForgetGateLSTMCell
@@TimeFreqLSTMCell
@@GridLSTMCell
@@BidirectionalGridLSTMCell
@@NASCell
@@UGRNNCell
@@IntersectionRNNCell
@@PhasedLSTMCell
@@HighwayWrapper
@@GLSTMCell
# RNNCell wrappers
@@AttentionCellWrapper
@@CompiledWrapper
# RNN functions
@@static_rnn
@@static_state_saving_rnn
@@static_bidirectional_rnn
@@stack_bidirectional_dynamic_rnn
@@stack_bidirectional_rnn
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import,line-too-long
from tensorflow.contrib.rnn.python.ops.core_rnn_cell import EmbeddingWrapper
from tensorflow.contrib.rnn.python.ops.core_rnn_cell import InputProjectionWrapper
from tensorflow.contrib.rnn.python.ops.core_rnn_cell import OutputProjectionWrapper
from tensorflow.contrib.rnn.python.ops.fused_rnn_cell import *
from tensorflow.contrib.rnn.python.ops.gru_ops import *
from tensorflow.contrib.rnn.python.ops.lstm_ops import *
from tensorflow.contrib.rnn.python.ops.rnn import *
from tensorflow.contrib.rnn.python.ops.rnn_cell import *
from tensorflow.python.ops.rnn import static_bidirectional_rnn
from tensorflow.python.ops.rnn import static_rnn
from tensorflow.python.ops.rnn import static_state_saving_rnn
from tensorflow.python.ops.rnn_cell import *
# pylint: enable=unused-import,wildcard-import,line-too-long
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(__name__)
|
rsms/smisk
|
refs/heads/master
|
lib/smisk/wsgi.py
|
1
|
# encoding: utf-8
# Copyright (c) 2008, Eric Moritz <eric@themoritzfamily.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# * notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# * copyright notice, this list of conditions and the following
# * disclaimer in the documentation and/or other materials provided
# * with the distribution. Neither the name of the <ORGANIZATION>
# * nor the names of its contributors may be used to endorse or
# * promote products derived from this software without specific
# * prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
This module provides a way to use Smisk as a WSGI backend.
Conforms to :pep:`333`
Example::
def hello_app(env, start_response):
start_response("200 OK", [])
return ["Hello, World"]
from smisk.wsgi import main
main(hello_app)
:author: Eric Moritz
:author: Rasmus Andersson
'''
import os, sys, smisk.core, logging
from smisk.util.main import *
from smisk.config import LOGGING_FORMAT, LOGGING_DATEFMT
__all__ = ['__version__', 'Request', 'Gateway', 'main']
__version__ = (0,1,0)
_hop_headers = {
'connection':1, 'keep-alive':1, 'proxy-authenticate':1,
'proxy-authorization':1, 'te':1, 'trailers':1, 'transfer-encoding':1,
'upgrade':1
}
def is_hop_by_hop(header_name):
'''Return true if 'header_name' is an HTTP/1.1 "Hop-by-Hop" header'''
return header_name.lower() in _hop_headers
class Request(smisk.core.Request):
'''WSGI request'''
def prepare(self, app):
'''Set up the environment for one request'''
self.env['wsgi.input'] = self.input
self.env['wsgi.errors'] = self.errors
self.env['wsgi.version'] = app.wsgi_version
self.env['wsgi.run_once'] = app.wsgi_run_once
self.env['wsgi.url_scheme'] = app.request.url.scheme
self.env['wsgi.multithread'] = app.wsgi_multithread
self.env['wsgi.multiprocess'] = app.wsgi_multiprocess
# Put a reference of ourselves in the environment so that the user
# might reference other parts of the framework and discover if they
# are running in Smisk or not.
self.env['smisk.app'] = app
# Rebind our send_file to the real send_file
self.send_file = app.response.send_file
def send_file(self, path):
raise NotImplementedError('unprepared request does not have a valid send_file method')
class Gateway(smisk.core.Application):
'''WSGI adapter
'''
# Configuration parameters; can override per-subclass or per-instance
wsgi_version = (1,0)
wsgi_multithread = False
wsgi_multiprocess = True
wsgi_run_once = False
def __init__(self, wsgi_app):
super(Gateway, self).__init__()
self.request_class = Request
self.wsgi_app = wsgi_app
def start_response(self, status, headers, exc_info=None):
'''`start_response()` callable as specified by
`PEP 333 <http://www.python.org/dev/peps/pep-0333/>`__'''
if exc_info:
try:
if self.response.has_begun:
raise exc_info[0],exc_info[1],exc_info[2]
else:
# In this case of response not being initiated yet, this will replace
# both headers and any buffered body.
self.error(exc_info[0], exc_info[1], exc_info[2])
finally:
exc_info = None # Avoid circular ref.
elif len(self.response.headers) != 0:
raise AssertionError("Headers already set!")
assert isinstance(status, str),"Status must be a string"
assert len(status)>=4,"Status must be at least 4 characters"
assert int(status[:3]),"Status message must begin w/3-digit code"
assert status[3]==" ", "Status message must have a space after code"
if __debug__:
for name,val in headers:
assert isinstance(name, str),"Header names must be strings"
assert isinstance(val, str),"Header values must be strings"
assert not is_hop_by_hop(name),"Hop-by-hop headers not allowed"
# Construct the headers
# Add the status to the headers
self.response.headers = ['Status: '+status]
# Append each of the headers provided by wsgi
self.response.headers += [": ".join(header) for header in headers]
# Add the X-Powered-By header to show off this extension
self.response.headers.append("X-Powered-By: smisk+wsgi/%d.%d.%d" % __version__)
# Return the write function as required by the WSGI spec
return self.response.write
def service(self):
self.request.prepare(self)
output = self.wsgi_app(self.request.env, self.start_response)
# Discussion about Content-Length:
# Output might be an iterable in which case we can not trust len()
# but in a perfect world, we did know how many parts we got and if
# we only got _one_ we could also add a Content-length. But no.
# Instead, we rely on the host server splitting up things in nice
# chunks, using chunked transfer encoding, (If the server complies
# to HTTP/1.1 it is required to do so, so we are pretty safe) or
# simply rely on the host server setting the Content-Length header.
for data in output:
self.response.write(data)
# XXX TODO replace this main function with the stuff from smisk.util.main
def main(wsgi_app, appdir=None, bind=None, forks=None, handle_errors=True, cli=True):
'''Helper for setting up and running an application.
This is normally what you do in your top module ``__init__``::
from smisk.wsgi import main
from your.app import wsgi_app
main(wsgi_app)
Your module is now a runnable program which automatically configures and
runs your application. There is also a Command Line Interface if `cli`
evaluates to ``True``.
:Parameters:
wsgi_app : callable
A WSGI application
appdir : string
Path to the applications base directory.
bind : string
Bind to address (and port). Note that this overrides ``SMISK_BIND``.
forks : int
Number of child processes to spawn.
handle_errors : bool
Handle any errors by wrapping calls in `handle_errors_wrapper()`
cli : bool
Act as a *Command Line Interface*, parsing command line arguments and
options.
:rtype: None
'''
if cli:
appdir, bind, forks = main_cli_filter(appdir=appdir, bind=bind, forks=forks)
# Setup logging
# Calling basicConfig has no effect if logging is already configured.
logging.basicConfig(format=LOGGING_FORMAT, datefmt=LOGGING_DATEFMT)
# Bind
if bind is not None:
os.environ['SMISK_BIND'] = bind
if 'SMISK_BIND' in os.environ:
smisk.core.bind(os.environ['SMISK_BIND'])
log.info('Listening on %s', smisk.core.listening())
# Configure appdir
setup_appdir(appdir)
# Create the application
application = Gateway(wsgi_app=wsgi_app)
# Forks
if isinstance(forks, int) and forks > -1:
application.forks = forks
# Runloop
if handle_errors:
return handle_errors_wrapper(application.run)
else:
return application.run()
if __name__ == '__main__':
from wsgiref.validate import validator # Import the wsgi validator app
def hello_app(env, start_response):
start_response("200 OK", [('Content-Type', 'text/plain')])
return ["Hello, World"]
if len(sys.argv) != 2:
print "Usage: %s hostname:port" % (sys.argv[0])
print "This runs a sample fastcgi server under the hostname and"
print "port given in argv[1]"
smisk.core.bind(sys.argv[1])
app = validator(hello_app)
Gateway(app).run()
|
pyq881120/Veil-Evasion
|
refs/heads/master
|
tools/pyherion.py
|
12
|
#!/usr/bin/python
"""
PyHerion 1.0
By: @harmj0y
Python 'crypter' that builds an dynamic AES/base64 encoded launcher
(with a random key) that's decoded/decrypted in memory and then executed.
Standalone version of the same functionality integrated into Veil,
in ./modules/common/encryption.py
"""
from Crypto.Cipher import AES
import base64, random, string, sys
# crypto config stuff
BLOCK_SIZE = 32
PADDING = '{'
# used for separting out the import lines
imports = list()
output = list()
# check to make sure it's being called properly
if len(sys.argv) < 2 or len(sys.argv) > 3:
print "\nPyherion 1.0\n\n\tusage:\t./pyherion.py intputfile [outputfile]\n"
sys.exit()
# returns a random string/key of "bytes" length
def randKey(bytes):
return ''.join(random.choice(string.ascii_letters + string.digits + "{}!@#$^&()*&[]|,./?") for x in range(bytes))
# random 3 letter variable generator
def randVar():
return ''.join(random.choice(string.ascii_letters) for x in range(3)) + "_" + ''.join(random.choice("0123456789") for x in range(3))
# one-liner to sufficiently pad the text to be encrypted
pad = lambda s: str(s) + (BLOCK_SIZE - len(str(s)) % BLOCK_SIZE) * PADDING
# one-liner to encrypt a code block then base64 it
EncodeAES = lambda c, s: base64.b64encode(c.encrypt(pad(s)))
DecodeAES = lambda c, e: c.decrypt(base64.b64decode(e)).rstrip(PADDING)
# generate our key and initialization vector
key = randKey(32)
iv = randKey(16)
input = open(sys.argv[1]).readlines()
pieces = sys.argv[1].split(".")
# build our new filename, "payload.py" -> "payload_crypted.py"
outputName = ".".join(pieces[:-2]) + pieces[-2] + "_crypted." + pieces[-1]
# check if the output name was specified, otherwise use the one built above
if len(sys.argv) == 3:
outputName = sys.argv[2]
f = open(outputName, 'w')
# Detect if the passed argument is a python file
if pieces[-1] == "py":
# separate imports from code- this is because pyinstaller needs to
# know what imports to package with the .exe at compile time.
# Otherwise the imports in the exec() string won't work
for line in input:
if not line.startswith("#"): # ignore commented imports...
if "import" in line:
imports.append(line.strip())
else:
output.append(line)
# build our AES cipher
cipherEnc = AES.new(key)
# encrypt the input file (less the imports)
encrypted = EncodeAES(cipherEnc, "".join(output))
b64var = randVar()
aesvar = randVar()
# randomize our base64 and AES importing variable
imports.append("from base64 import b64decode as %s" %(b64var))
imports.append("from Crypto.Cipher import AES as %s" %(aesvar))
# shuffle up our imports
random.shuffle(imports)
f.write(";".join(imports) + "\n")
# build the exec() launcher
f.write("exec(%s(\"%s\"))" % (b64var,base64.b64encode("exec(%s.new(\"%s\").decrypt(%s(\"%s\")).rstrip('{'))\n" %(aesvar,key,b64var,encrypted))))
f.close()
else:
print "\nonly python files can be used as input files"
sys.exit()
print "\n\tCrypted output written to %s\n" % (outputName)
|
pointgaming/point-gaming-btc
|
refs/heads/master
|
authentication/allowall.py
|
1
|
# Eloipool - Python Bitcoin pool server
# Copyright (C) 2011-2013 Luke Dashjr <luke-jr+eloipool@utopios.org>
# Written by Peter Leurs <kinlo@triplemining.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
_logger = logging.getLogger('authentication.allowall')
class allowall:
def __init__(self, **k):
pass
def checkAuthentication(self, user, password):
_logger.info("Authenticated user %s", user)
return True
|
mhvk/baseband
|
refs/heads/master
|
baseband/vdif/base.py
|
1
|
# Licensed under the GPLv3 - see LICENSE
import warnings
from collections import namedtuple
import numpy as np
import astropy.units as u
from astropy.utils import lazyproperty
from ..base.base import (
FileBase, VLBIFileReaderBase,
VLBIStreamReaderBase, StreamWriterBase,
FileOpener, FileInfo, HeaderNotFoundError)
from .header import VDIFHeader
from .payload import VDIFPayload
from .frame import VDIFFrame, VDIFFrameSet
from .file_info import VDIFFileReaderInfo
__all__ = ['VDIFFileReader', 'VDIFFileWriter',
'VDIFStreamBase', 'VDIFStreamReader', 'VDIFStreamWriter',
'open', 'info']
# Check code on 2015-MAY-30
# 00000000 77 2c db 00 00 00 00 1c 75 02 00 20 fc ff 01 04 # header 0 - 3
# 00000010 10 00 80 03 ed fe ab ac 00 00 40 33 83 15 03 f2 # header 4 - 7
# 00000020 2a 0a 7c 43 8b 69 9d 59 cb 99 6d 9a 99 96 5d 67 # data 0 - 3
# NOTE: thread_id = 1
# 2a = 00 10 10 10 = (lsb first) 1, 1, 1, -3
# 0a = 00 00 10 10 = 1, 1, -3, -3
# 7c = 01 11 11 00 = -3, 3, 3, -1
# m5d evn/Fd/GP052D_FD_No0006.m5a VDIF_5000-512-1-2 100
# Mark5 stream: 0x16cd140
# stream = File-1/1=evn/Fd/GP052D_FD_No0006.m5a
# format = VDIF_5000-512-1-2 = 3
# start mjd/sec = 56824 21367.000000000
# frame duration = 78125.00 ns
# framenum = 0
# sample rate = 256000000 Hz
# offset = 0
# framebytes = 5032 bytes
# datasize = 5000 bytes
# sample granularity = 4
# frame granularity = 1
# gframens = 78125
# payload offset = 32
# read position = 0
# data window size = 1048576 bytes
# 1 1 1 -3 1 1 -3 -3 -3 3 3 -1 -> OK
# fh = vdif.open('evn/Fd/GP052D_FD_No0006.m5a', 'rb')
# fs = fh.read_frameset()
# fs.data.astype(int)[1, :12, 0] # thread id = 1!!
# -> array([ 1, 1, 1, -3, 1, 1, -3, -3, -3, 3, 3, -1]) -> OK
# Also, next frame (thread #3)
# m5d evn/Fd/GP052D_FD_No0006.m5a VDIF_5000-512-1-2 12 5032
# -1 1 -1 1 -3 -1 3 -1 3 -3 1 3
# fs.data.astype(int)[3, :12, 0]
# -> array([-1, 1, -1, 1, -3, -1, 3, -1, 3, -3, 1, 3])
# And first thread #0
# m5d evn/Fd/GP052D_FD_No0006.m5a VDIF_5000-512-1-2 12 20128
# -1 -1 3 -1 1 -1 3 -1 1 3 -1 1
# fs.data.astype(int)[0, :12, 0]
# -> array([-1, -1, 3, -1, 1, -1, 3, -1, 1, 3, -1, 1])
# sanity check that we can read 12 samples with stream reader
# fh.close()
# fh = vdif.open('evn/Fd/GP052D_FD_No0006.m5a', 'rs')
# fh.read(12).astype(int)[:, 0]
# -> array([-1, -1, 3, -1, 1, -1, 3, -1, 1, 3, -1, 1])
class VDIFFileReader(VLBIFileReaderBase):
"""Simple reader for VDIF files.
Wraps a binary filehandle, providing methods to help interpret the data,
such as `read_frame`, `read_frameset` and `get_frame_rate`.
Parameters
----------
fh_raw : filehandle
Filehandle of the raw binary data file.
"""
info = VDIFFileReaderInfo()
def read_header(self, edv=None, verify=True):
"""Read a single header from the file.
Parameters
----------
edv : int, False, or None, optional
Extended data version. If `False`, a legacy header is used.
If `None` (default), it is determined from the header. (Given it
explicitly is mostly useful for a slight speed-up.)
verify : bool, optional
Whether to do basic verification of integrity. Default: `True`.
Returns
-------
header : `~baseband.vdif.VDIFHeader`
"""
return VDIFHeader.fromfile(self.fh_raw, edv=edv, verify=verify)
def read_frame(self, edv=None, verify=True):
"""Read a single frame (header plus payload).
Parameters
----------
edv : int, optional
The expected extended data version for the VDIF Header. If `None`,
use that of the first frame. (Passing it in slightly improves file
integrity checking.)
verify : bool, optional
Whether to do basic checks of frame integrity. Default: `True`.
Returns
-------
frame : `~baseband.vdif.VDIFFrame`
With ``.header`` and ``.data`` properties that return the
:class:`~baseband.vdif.VDIFHeader` and data encoded in the frame,
respectively.
"""
return VDIFFrame.fromfile(self.fh_raw, edv=edv, verify=verify)
def read_frameset(self, thread_ids=None, edv=None, verify=True):
"""Read a single frame (header plus payload).
Parameters
----------
thread_ids : list, optional
The thread ids that should be read. If `None` (default), read all
threads.
edv : int, optional
The expected extended data version for the VDIF Header. If `None`,
use that of the first frame. (Passing it in slightly improves file
integrity checking.)
verify : bool, optional
Whether to do basic checks of frame integrity. Default: `True`.
Returns
-------
frameset : :class:`~baseband.vdif.VDIFFrameSet`
With ``.headers`` and ``.data`` properties that return a list of
:class:`~baseband.vdif.VDIFHeader` and the data encoded in the
frame set, respectively.
"""
return VDIFFrameSet.fromfile(self.fh_raw, thread_ids, edv=edv,
verify=verify)
def get_frame_rate(self):
"""Determine the number of frames per second.
This method first tries to determine the frame rate by looking for
the highest frame number in the first second of data. If that fails,
it attempts to extract the sample rate from the header.
Returns
-------
frame_rate : `~astropy.units.Quantity`
Frames per second.
"""
try:
return super().get_frame_rate()
except Exception as exc:
with self.temporary_offset(0):
try:
header = self.read_header()
return (header.sample_rate
/ header.samples_per_frame).to(u.Hz).round()
except Exception:
pass
raise exc
def get_thread_ids(self, check=2):
"""Determine the number of threads in the VDIF file.
The file is presumed to be positioned at the start of a header.
Usually, it suffices to just seek to the start of the file, but
if not, use `~baseband.vdif.base.VDIFFileReader.find_header`.
Parameters
----------
check : int, optional
Number of extra frames to check. Frame sets are scanned until
the number of thread IDs found no longer increases for ``check``
frames.
Returns
-------
thread_ids : list
Sorted list of all thread ids encountered in the frames scanned.
"""
with self.temporary_offset():
header = header0 = self.read_header()
try:
thread_ids = set()
n_check = 1
while n_check > 0:
frame_nr = header['frame_nr']
n_thread = len(thread_ids)
while header['frame_nr'] == frame_nr:
thread_ids.add(header['thread_id'])
self.seek(header.payload_nbytes, 1)
header = self.read_header(edv=header0.edv)
assert header0.same_stream(header)
if len(thread_ids) > n_thread:
n_check = check
else:
n_check -= 1
except EOFError:
# Hack: let through very short files (like our samples).
if self.seek(0, 2) > (check * len(thread_ids)
* header0.frame_nbytes):
raise
return sorted(thread_ids)
def find_header(self, pattern=None, *, edv=None, mask=None,
frame_nbytes=None, offset=0,
forward=True, maximum=None, check=1):
"""Find the nearest header from the current position.
Search for a valid header at a given position which is consistent with
``pattern`` and/or with a header a frame size ahead. Note
that the search is much slower if no pattern is given, as at every
position it is tried to read a header, and then check for another one
one frame ahead. It helps to pass in ``edv`` and ``frame_nbytes``
(if known).
If successful, the file pointer is left at the start of the header.
Parameters
----------
pattern : `~baseband.vdif.VDIFHeader`, array of byte, or compatible
If given, used for a direct search.
edv : int
EDV of the header, used if ``pattern`` is not given.
mask : array of byte, bytes, iterable of int, string or int
Bit mask for the pattern, with 1 indicating a given bit will
be used the comparison. Only used with ``pattern`` and not
needed if ``pattern`` is a header.
frame_nbytes : int, optional
Frame size in bytes. Defaults to the frame size in any header
passed in.
offset : int, optional
Offset from the frame start that the pattern occurs. Any
offsets inferred from masked entries are added to this (hence,
no offset needed when a header is passed in as ``pattern``,
nor is an offset needed for a full search).
forward : bool, optional
Seek forward if `True` (default), backward if `False`.
maximum : int, optional
Maximum number of bytes to search away from the present location.
Default: search twice the frame size if given, otherwise 10000
(extra bytes to avoid partial patterns will be added).
Use 0 to check only at the current position.
check : int or tuple of int, optional
Frame offsets where another header should be present.
Default: 1, i.e., a sync pattern should be present one
frame after the one found (independent of ``forward``),
thus helping to guarantee the frame is not corrupted.
Returns
-------
header : :class:`~baseband.vdif.VDIFHeader`
Retrieved VDIF header.
Raises
------
~baseband.base.base.HeaderNotFoundError
If no header could be located.
AssertionError
If the header did not pass verification.
"""
if pattern is not None:
locations = self.locate_frames(
pattern, mask=mask, frame_nbytes=frame_nbytes, offset=offset,
forward=forward, maximum=maximum, check=check)
if not locations:
raise HeaderNotFoundError('could not locate a a nearby frame.')
self.seek(locations[0])
with self.temporary_offset():
return self.read_header(edv=getattr(pattern, 'edv', None))
# Try reading headers at a set of locations.
if maximum is None:
maximum = 10000 if frame_nbytes is None else 2 * frame_nbytes
file_pos = self.tell()
# Generate file pointer positions to test.
if forward:
iterate = range(file_pos, file_pos+maximum+1)
else:
iterate = range(file_pos, max(file_pos-maximum-1, -1), -1)
# Loop over all of them to try to find the frame marker.
for frame in iterate:
self.seek(frame)
try:
header = self.read_header(edv=edv)
except Exception:
continue
if (frame_nbytes is not None
and frame_nbytes != header.frame_nbytes):
continue
# Possible hit! Try if there are other headers right around.
self.seek(frame)
try:
return self.find_header(header, maximum=0, check=check)
except Exception:
continue
self.seek(file_pos)
raise HeaderNotFoundError("could not locate a nearby header.")
class VDIFFileWriter(FileBase):
"""Simple writer for VDIF files.
Adds `write_frame` and `write_frameset` methods to the basic VLBI
binary file wrapper.
"""
def write_frame(self, data, header=None, **kwargs):
"""Write a single frame (header plus payload).
Parameters
----------
data : `~numpy.ndarray` or `~baseband.vdif.VDIFFrame`
If an array, a ``header`` should be given, which will be used to
get the information needed to encode the array, and to construct
the VDIF frame.
header : `~baseband.vdif.VDIFHeader`
Can instead give keyword arguments to construct a header. Ignored
if ``data`` is a `~baseband.vdif.VDIFFrame` instance.
**kwargs
If ``header`` is not given, these are used to initialize one.
"""
if not isinstance(data, VDIFFrame):
data = VDIFFrame.fromdata(data, header, **kwargs)
return data.tofile(self.fh_raw)
def write_frameset(self, data, header=None, **kwargs):
"""Write a single frame set (headers plus payloads).
Parameters
----------
data : `~numpy.ndarray` or :class:`~baseband.vdif.VDIFFrameSet`
If an array, a header should be given, which will be used to
get the information needed to encode the array, and to construct
the VDIF frame set.
header : :class:`~baseband.vdif.VDIFHeader`, list of same
Can instead give keyword arguments to construct a header. Ignored
if ``data`` is a :class:`~baseband.vdif.VDIFFrameSet` instance.
If a list, should have a length matching the number of threads in
``data``; if a single header, ``thread_ids`` corresponding
to the number of threads are generated automatically.
**kwargs
If ``header`` is not given, these are used to initialize one.
"""
if not isinstance(data, VDIFFrameSet):
data = VDIFFrameSet.fromdata(data, header, **kwargs)
return data.tofile(self.fh_raw)
class VDIFStreamBase:
"""Provides sample shape maker and fast time and index getting/setting."""
_sample_shape_maker = namedtuple('SampleShape', 'nthread, nchan')
def _get_time(self, header):
"""Get time from a header.
This passes on sample rate, since not all VDIF headers can calculate
it.
"""
return header.get_time(frame_rate=self._frame_rate)
def _set_time(self, header, time):
"""Set time in a header.
This passes on sample rate, which not all VDIF headers can calculate.
"""
header.update(time=time, frame_rate=self._frame_rate)
def _get_index(self, header):
# Override to avoid explicit time calculations.
return int(round((header['seconds'] - self.header0['seconds'])
* self._frame_rate.to_value(u.Hz)
+ header['frame_nr'] - self.header0['frame_nr']))
def _set_index(self, header, index):
# Override to avoid explicit time calculations.
dt, frame_nr = divmod(index + self.header0['frame_nr'],
int(round(self._frame_rate.to_value(u.Hz))))
seconds = self.header0['seconds'] + dt
header['seconds'] = seconds
header['frame_nr'] = frame_nr
def __repr__(self):
return ("<{s.__class__.__name__} name={s.name} offset={s.offset}\n"
" sample_rate={s.sample_rate},"
" samples_per_frame={s.samples_per_frame},\n"
" sample_shape={s.sample_shape},\n"
" bps={s.bps}, complex_data={s.complex_data},"
" edv={h.edv}, station={h.station},\n"
" {sub}start_time={s.start_time}>"
.format(s=self, h=self.header0,
sub=('subset={0}, '.format(self.subset)
if self.subset else '')))
class VDIFStreamReader(VDIFStreamBase, VLBIStreamReaderBase):
"""VLBI VDIF format reader.
Allows access to a VDIF file as a continuous series of samples.
Parameters
----------
fh_raw : filehandle
Filehandle of the raw VDIF stream.
sample_rate : `~astropy.units.Quantity`, optional
Number of complete samples per second, i.e. the rate at which each
channel in each thread is sampled. If `None` (default), will be
inferred from the header or by scanning one second of the file.
squeeze : bool, optional
If `True` (default), remove any dimensions of length unity from
decoded data.
subset : indexing object or tuple of objects, optional
Specific components of the complete sample to decode (after possible
squeezing). If a single indexing object is passed, it selects threads.
If a tuple is passed, the first selects threads and the second selects
channels. If the tuple is empty (default), all components are read.
fill_value : float or complex, optional
Value to use for invalid or missing data. Default: 0.
verify : bool, optional
Whether to do basic checks of frame integrity when reading. The first
frameset of the stream is always checked. Default: `True`.
"""
def __init__(self, fh_raw, sample_rate=None, squeeze=True, subset=(),
fill_value=0., verify='fix'):
fh_raw = VDIFFileReader(fh_raw)
# We read the very first header, hoping this is the right one
# (in some VLBA files not all the headers have the right time).
header0 = fh_raw.read_header()
# Next, we determine how many threads there are, and use those
# to calculate the frameset size. We on purpose do *not* just read
# a frame set, since sometimes the first one is short (see gh-359).
fh_raw.seek(0)
thread_ids = fh_raw.get_thread_ids()
nthread = len(thread_ids)
super().__init__(
fh_raw, header0, sample_rate=sample_rate,
sample_shape=(nthread, header0.nchan), squeeze=squeeze,
subset=subset, fill_value=fill_value, verify=verify)
self._raw_offsets.frame_nbytes *= nthread
# Check whether we are reading only some threads. This is somewhat
# messy since normally we apply the whole subset to the whole data,
# but here we need to split it up in the part that selects specific
# threads, which we use to selectively read, and the rest, which we
# do post-decoding.
if self.subset and (nthread > 1 or not self.squeeze):
# Select the thread ids we want using first part of subset.
thread_ids = np.array(thread_ids)[self.subset[0]]
# Use squeese in case subset[0] uses broadcasting, and
# atleast_1d to ensure single threads get upgraded to a list,
# which is needed by the VDIFFrameSet reader.
self._thread_ids = np.atleast_1d(thread_ids.squeeze()).tolist()
# Since we have subset the threads already, we now need to
# determine a new subset that takes this into account.
if thread_ids.shape == ():
# If we indexed with a scalar, we're meant to remove that
# dimension. If we squeeze, this happens automatically, but if
# not, we need to do it explicitly (FrameSet does not do it).
new_subset0 = () if self.squeeze else (0,)
elif len(self._thread_ids) == 1 and self.squeeze:
# If we want a single remaining thread, undo the squeeze.
new_subset0 = (np.newaxis,)
else:
# Just pass on multiple threads or unsqueezed single ones.
new_subset0 = (slice(None),)
self._frameset_subset = new_subset0 + self.subset[1:]
else:
# We either have no subset or we have a single thread that
# will be squeezed away, so the subset is fine as is.
self._frameset_subset = self.subset
self._thread_ids = thread_ids
@lazyproperty
def _last_header(self):
"""Last header of the file."""
# Go to end of file.
maximum = 2 * self._raw_offsets.frame_nbytes
with self.fh_raw.temporary_offset(
-self.header0.frame_nbytes, 2) as fh_raw:
# Find first header with same thread_id going backward.
locations = fh_raw.locate_frames(self.header0, forward=False,
maximum=maximum, check=(-1, 1))
for location in locations:
fh_raw.seek(location)
try:
header = fh_raw.read_header(edv=self.header0.edv)
except Exception: # pragma: no cover
# If reading fails, just try the next one -- we would
# use loose a bit of the end of the file. Would require
# an EDV verify stricter than what locate_frames uses.
continue
if header['thread_id'] == self.header0['thread_id']:
return header
raise HeaderNotFoundError(
"corrupt VDIF? No thread_id={0} frame in last {1} bytes."
.format(self.header0['thread_id'], maximum))
def _squeeze_and_subset(self, data):
# Overwrite VLBIStreamReaderBase version, since the threads part of
# subset has already been used.
if self.squeeze:
data = data.reshape(data.shape[:1]
+ tuple(sh for sh in data.shape[1:] if sh > 1))
if self._frameset_subset:
data = data[(slice(None),) + self._frameset_subset]
return data
# Overrides to deal with framesets instead of frames.
def _fh_raw_read_frame(self):
return self.fh_raw.read_frameset(self._thread_ids,
edv=self.header0.edv,
verify=self.verify)
def _bad_frame(self, index, frameset, exc):
# Duplication of base class, but able to deal with missing
# frames inside a frame set.
if (frameset is not None and self._get_index(frameset) == index
and index == self._get_index(self._last_header)):
# If we got an exception because we're trying to read beyond the
# last frame, the frame is almost certainly OK, so keep it.
return frameset
if self.verify != 'fix':
raise exc
# If the frameset does contain the right number of frames, but all
# are invalid, assume not just the data but also the frame number
# and seconds might be wrong, i.e., just proceed with it.
# TODO: make this an option for a specific type of fixing!
if (frameset is not None
and len(frameset.frames) == len(self._thread_ids)
and not any(frame.valid for frame in frameset.frames)):
return frameset
msg = 'problem loading frame set {}.'.format(index)
# Where should we be?
raw_offset = self._seek_frame(index)
# See if we're in the right place. First ensure we have a header.
# Here, it is more important that it is a good one than that we go
# too far, so we insist on two consistent frames after it, as well
# as a good one before to guard against corruption of the start of
# the VDIF header.
self.fh_raw.seek(raw_offset)
try:
header = self.fh_raw.find_header(
self.header0, forward=True, check=(-1, 1, 2),
maximum=3*self.header0.frame_nbytes)
except HeaderNotFoundError:
exc.args += (msg + ' Cannot find header nearby.',)
raise exc
# Don't yet know how to deal with excess data.
header_index = self._get_index(header)
if header_index < index:
exc.args += (msg + ' There appears to be excess data.',)
raise exc
# Go backward until we find previous frame, storing offsets
# as we go. We again increase the maximum since we may need
# to jump over a bad bit. We slightly relax our search pattern.
while header_index >= index:
raw_pos = self.fh_raw.tell()
header1 = header
header1_index = header_index
if raw_pos <= 0:
break
self.fh_raw.seek(-1, 1)
try:
header = self.fh_raw.find_header(
self.header0, forward=False,
maximum=4*self.header0.frame_nbytes,
check=(-1, 1))
except HeaderNotFoundError:
exc.args += (msg + ' Could not find previous index.',)
raise exc
header_index = self._get_index(header)
if header_index < header1_index:
# While we are at it: if we pass an index boundary,
# update the list of known indices.
self._raw_offsets[header1_index] = raw_pos
# Move back to position of last good header (header1).
self.fh_raw.seek(raw_pos)
# Create the header we will use below for constructing the
# frameset. Usually, this is guaranteed to be from this set,
# but we also use it to create a new header for a completely
# messed up frameset below. It is copied to make it mutable
# without any risk of messing up possibly memory mapped data.
header = header1.copy()
if header1_index > index:
# Ouch, whole frame set missing!
msg += ' The frame set seems to be missing altogether.'
# Set up to construct a complete missing frame
# (after the very long else clause).
# TODO: just use the writer's _make_frame??
frames = {}
self._set_index(header, index)
else:
assert header1_index == index, \
'at this point, we should have a good header.'
# This header is the first one of its set.
if raw_pos != raw_offset:
msg += ' Stream off by {0} bytes.'.format(raw_offset
- raw_pos)
# Above, we should have added information about
# this index in our offset table.
assert raw_pos == self._raw_offsets[index]
# Try again to read it, however many threads there are.
# TODO: this somewhat duplicates FrameSet.fromfile; possibly
# move code there.
# TODO: Or keep track of header locations above.
# TODO: remove limitation that threads need to be together.
frames = {}
previous = False
frame_nr = header1['frame_nr']
while True:
raw_pos = self.fh_raw.tell()
try:
frame = self.fh_raw.read_frame(edv=self.header0.edv)
assert header.same_stream(frame.header)
# Check seconds as well, as a sanity check this is
# a real header. (We do allow at this point it to be
# the next frame, hence the second can increase by 1.
assert 0 <= (frame['seconds'] - header['seconds']) <= 1
except EOFError:
# End of file while reading a frame; we're done here.
next_header = None
break
except AssertionError:
# Frame is not OK.
assert previous is not False, \
('first frame should be readable if fully on disk,'
' since we found one correct header.')
# Go back to after previous payload and try finding
# next header. It can be before where we tried above,
# if some bytes in the previous payload were missing.
self.fh_raw.seek(raw_pos - header.payload_nbytes)
try:
next_header = self.fh_raw.find_header(self.header0)
# But sometimes a header is re-found even when
# there isn't really one (e.g., because one of the
# first bytes, defining seconds, is missing).
# Don't ever retry the same one!
if self.fh_raw.tell() == raw_pos:
self.fh_raw.seek(1, 1)
next_header = self.fh_raw.find_header(self.header0)
except HeaderNotFoundError:
# If no header was found, give up. The previous frame
# was likely bad too, so delete it.
if previous is not None:
del frames[previous]
next_header = None
break
# If the next header is not exactly a frame away from
# where we were trying to read, the previous frame was
# likely bad, so discard it.
if self.fh_raw.tell() != raw_pos + header.frame_nbytes:
if previous is not None:
del frames[previous]
previous = None
# Stop if the next header is from a different frame.
if next_header['frame_nr'] != frame_nr:
break
else:
# Successfully read frame. If not of the requested
# set, rewind and break out of the loop.
if frame['frame_nr'] != frame_nr:
next_header = frame.header
self.fh_raw.seek(raw_pos)
break
# Do we have a good frame, giving a new thread?
previous = frame['thread_id']
if previous in frames:
msg += (' Duplicate thread {0} found; discarding.'
.format(previous))
del frames[previous]
else:
# Looks like it, though may still be discarded
# if the next frame is not readable.
frames[previous] = frame
# If the next header is of the next frame, set up the raw
# offset (which likely will be needed, saving some time).
if (next_header is not None
and self._get_index(next_header) == index + 1):
self._raw_offsets[index+1] = self.fh_raw.tell()
# Create invalid frame template,
invalid_payload = VDIFPayload(
np.zeros(header.payload_nbytes // 4, '<u4'), header)
invalid_frame = VDIFFrame(header, invalid_payload, valid=False)
frame_list = []
missing = []
for thread in self._thread_ids:
if thread in frames:
frame_list.append(frames[thread])
else:
missing.append(thread)
invalid_frame.header['thread_id'] = thread
frame_list.append(invalid_frame)
if missing:
if frames == {}:
msg += ' All threads set to invalid.'
else:
msg += (' Thread(s) {0} missing; set to invalid.'
.format(missing))
warnings.warn(msg)
frameset = VDIFFrameSet(frame_list)
return frameset
class VDIFStreamWriter(VDIFStreamBase, StreamWriterBase):
"""VLBI VDIF format writer.
Encodes and writes sequences of samples to file.
Parameters
----------
fh_raw : filehandle
Which will write filled sets of frames to storage.
header0 : :class:`~baseband.vdif.VDIFHeader`
Header for the first frame, holding time information, etc.
sample_rate : `~astropy.units.Quantity`
Number of complete samples per second, i.e. the rate at which each
channel in each thread is sampled. For EDV 1 and 3, can
alternatively set ``sample_rate`` within the header.
nthread : int, optional
Number of threads (e.g., 2 for 2 polarisations). Default: 1.
squeeze : bool, optional
If `True` (default), `write` accepts squeezed arrays as input, and
adds any dimensions of length unity.
"""
def __init__(self, fh_raw, header0=None, sample_rate=None, nthread=1,
squeeze=True):
fh_raw = VDIFFileWriter(fh_raw)
# Get header sample rate
try:
header_sample_rate = header0.sample_rate
except AttributeError:
header_sample_rate = None
if sample_rate is None:
if header_sample_rate is None:
raise ValueError("the sample rate must be passed either "
"explicitly, or through the header if it "
"can be stored there.")
sample_rate = header_sample_rate
elif header_sample_rate is not None:
assert sample_rate == header_sample_rate, (
'sample_rate on header inconsistent with that passed in.')
super().__init__(fh_raw, header0, sample_rate=sample_rate,
sample_shape=(nthread, header0.nchan),
squeeze=squeeze)
self._frame = VDIFFrameSet.fromdata(
np.zeros((self.samples_per_frame, nthread, header0.nchan),
dtype=np.complex64 if self.complex_data else np.float32),
self.header0)
open = FileOpener.create(globals(), doc="""
--- For reading a stream : (see :class:`~baseband.vdif.base.VDIFStreamReader`)
sample_rate : `~astropy.units.Quantity`, optional
Number of complete samples per second, i.e. the rate at which each channel
in each thread is sampled. If `None` (default), will be inferred from the
header or by scanning one second of the file.
squeeze : bool, optional
If `True` (default), remove any dimensions of length unity from
decoded data.
subset : indexing object or tuple of objects, optional
Specific components of the complete sample to decode (after possible
squeezing). If a single indexing object is passed, it selects threads.
If a tuple is passed, the first selects threads and the second selects
channels. If the tuple is empty (default), all components are read.
fill_value : float or complex, optional
Value to use for invalid or missing data. Default: 0.
verify : bool, optional
Whether to do basic checks of frame integrity when reading. The first
frameset of the stream is always checked. Default: `True`.
--- For writing a stream : (see :class:`~baseband.vdif.base.VDIFStreamWriter`)
header0 : `~baseband.vdif.VDIFHeader`
Header for the first frame, holding time information, etc. Can instead
give keyword arguments to construct a header (see ``**kwargs``).
sample_rate : `~astropy.units.Quantity`
Number of complete samples per second, i.e. the rate at which each
channel in each thread is sampled. For EDV 1 and 3, can alternatively set
``sample_rate`` within the header.
nthread : int, optional
Number of threads (e.g., 2 for 2 polarisations). Default: 1.
squeeze : bool, optional
If `True` (default), writer accepts squeezed arrays as input, and adds any
dimensions of length unity.
file_size : int or None, optional
When writing to a sequence of files, the maximum size of one file in bytes.
If `None` (default), the file size is unlimited, and only the first
file will be written to.
**kwargs
If no header is given, an attempt is made to construct one from these.
For a standard header, this would include the following.
--- Header keywords : (see :meth:`~baseband.vdif.VDIFHeader.fromvalues`)
time : `~astropy.time.Time`
Start time of the file. Can instead pass on ``ref_epoch`` and
``seconds``.
nchan : int, optional
Number of channels (default: 1). Note: different numbers of channels
per thread is not supported.
complex_data : bool, optional
Whether data are complex. Default: `False`.
bps : int, optional
Bits per elementary sample, i.e. per real or imaginary component for
complex data. Default: 1.
samples_per_frame : int
Number of complete samples per frame. Can alternatively use
``frame_length``, the number of 8-byte words for header plus payload.
For some EDV, this number is fixed (e.g., ``frame_length=629`` for
``edv=3``, which corresponds to 20000 real 2-bit samples per frame).
station : 2 characters, optional
Station ID. Can also be an unsigned 2-byte integer. Default: 0.
edv : {`False`, 0, 1, 2, 3, 4, 0xab}
Extended Data Version.
Notes
-----
One can also pass to ``name`` a list, tuple, or subclass of
`~baseband.helpers.sequentialfile.FileNameSequencer`. For writing to multiple
files, the ``file_size`` keyword must be passed or only the first file will be
written to. One may also pass in a `~baseband.helpers.sequentialfile` object
(opened in 'rb' mode for reading or 'w+b' for writing), though for typical use
cases it is practically identical to passing in a list or template.
""")
info = FileInfo.create(globals())
|
rcarrillocruz/ansible
|
refs/heads/devel
|
lib/ansible/modules/files/iso_extract.py
|
17
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Jeroen Hoekx <jeroen.hoekx@dsquare.be>
# (c) 2016, Matt Robinson <git@nerdoftheherd.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
author:
- Jeroen Hoekx (@jhoekx)
- Matt Robinson (@ribbons)
module: iso_extract
short_description: Extract files from an ISO image.
description:
- This module mounts an iso image in a temporary directory and extracts
files from there to a given destination.
version_added: "2.3"
options:
image:
description:
- The ISO image to extract files from.
required: true
aliases: ['path', 'src']
dest:
description:
- The destination directory to extract files to.
required: true
files:
description:
- A list of files to extract from the image.
- Extracting directories does not work.
required: true
notes:
- Only the file hash (content) is taken into account for extracting files
from the ISO image.
'''
EXAMPLES = r'''
- name: Extract kernel and ramdisk from a LiveCD
iso_extract:
image: /tmp/rear-test.iso
dest: /tmp/virt-rear/
files:
- isolinux/kernel
- isolinux/initrd.cgz
'''
RETURN = r'''
#
'''
import os
import shutil
import tempfile
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
def main():
module = AnsibleModule(
argument_spec = dict(
image = dict(required=True, type='path', aliases=['path', 'src']),
dest = dict(required=True, type='path'),
files = dict(required=True, type='list'),
),
supports_check_mode = True,
)
image = module.params['image']
dest = module.params['dest']
files = module.params['files']
changed = False
if not os.path.exists(dest):
module.fail_json(msg='Directory "%s" does not exist' % dest)
if not os.path.exists(os.path.dirname(image)):
module.fail_json(msg='ISO image "%s" does not exist' % image)
tmp_dir = tempfile.mkdtemp()
rc, out, err = module.run_command('mount -o loop,ro "%s" "%s"' % (image, tmp_dir))
if rc != 0:
os.rmdir(tmp_dir)
module.fail_json(msg='Failed to mount ISO image "%s"' % image)
e = None
try:
for file in files:
tmp_src = os.path.join(tmp_dir, file)
src_hash = module.sha1(tmp_src)
dest_file = os.path.join(dest, os.path.basename(file))
if os.path.exists(dest_file):
dest_hash = module.sha1(dest_file)
else:
dest_hash = None
if src_hash != dest_hash:
if not module.check_mode:
shutil.copy(tmp_src, dest_file)
changed = True
finally:
module.run_command('umount "%s"' % tmp_dir)
os.rmdir(tmp_dir)
module.exit_json(changed=changed)
if __name__ == '__main__':
main()
|
Lilykos/invenio
|
refs/heads/master
|
invenio/modules/oauth2server/errors.py
|
16
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2014 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""OAuth2Server errors."""
class OAuth2ServerError(Exception):
"""Base class for errors in oauth2server module."""
class ScopeDoesNotExists(OAuth2ServerError):
"""Scope is not registered it scopes registry."""
def __init__(self, scope, *args, **kwargs):
"""Initialize exception by storing invalid scope."""
super(ScopeDoesNotExists, self).__init__(*args, **kwargs)
self.scope = scope
|
ThomasZh/legend-club-wxpub
|
refs/heads/master
|
foo/dao/evaluation_dao.py
|
1
|
#!/usr/bin/env python
# _*_ coding: utf-8_*_
#
# Copyright 2016 planc2c.com
# thomas@time2box.com
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import pymongo
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "../"))
from comm import singleton
from global_const import MONGO_HOST, MONGO_PORT, MONGO_USR, MONGO_PWD, MONGO_DB
# eval options
class evaluation_dao(singleton):
_evaluation_collection = None;
def __init__(self):
if self._evaluation_collection is None:
conn = pymongo.MongoClient(MONGO_HOST, MONGO_PORT);
db = conn[MONGO_DB];
db.authenticate(MONGO_USR, MONGO_PWD);
self._evaluation_collection = db.evaluation;
else:
logging.info("evaluation_dao has inited......");
def create(self, json):
self._evaluation_collection.insert(json);
logging.info("create eval success......");
def update(self, json):
_id = json["_id"];
self._evaluation_collection.update({"_id":_id},{"$set":json});
logging.info("update eval success......");
def delete(self, _id):
self._evaluation_collection.remove({"_id":_id});
logging.info("delete eval success......");
def query_by_vendor(self, vendor_id):
cursor = self._evaluation_collection.find({"vendor_id":vendor_id})
array = []
for i in cursor:
array.append(i)
return array
def query(self, _id):
cursor = self._evaluation_collection.find({"_id":_id})
data = None
for i in cursor:
data = i
return data
def query_by_triprouter(self, trip_router_id):
cursor = self._evaluation_collection.find({"triprouter":trip_router_id})
array = []
for i in cursor:
array.append(i)
return array
def query_by_activity(self, activity_id):
cursor = self._evaluation_collection.find({"activity":activity_id})
array = []
for i in cursor:
array.append(i)
return array
|
aagusti/o-sipkd
|
refs/heads/master
|
docs/apbd/config.py
|
4
|
db_url_dst = 'postgresql://aagusti:a@localhost/gaji_pns'
|
windygu/zhuaxia
|
refs/heads/master
|
zhuaxia/commander.py
|
3
|
# -*- coding:utf-8 -*-
import sys
import config ,util ,logging ,log,downloader
import xiami as xm
import netease
import re
from threadpool import ThreadPool
from time import sleep
from os import path
from threadpool import Terminate_Watcher
from proxypool import ProxyPool
LOG = log.get_logger("zxLogger")
dl_songs = []
total = 0
done = 0
fmt_parsing = u'解析: "%s" ..... [%s] %s'
fmt_has_song_nm = u'包含%d首歌曲.'
fmt_single_song = u'[曲目] %s'
border = log.hl(u'%s'% ('='*90), 'cyan')
pat_xm = r'^https?://[^/.]*\.xiami\.com/'
pat_163 = r'^https?://music\.163\.com/'
#proxypool
ppool = None
def shall_I_begin(in_str, is_file=False, is_hq=False, need_proxy_pool = False):
#start terminate_watcher
Terminate_Watcher()
global ppool
if need_proxy_pool:
LOG.info(u'初始化proxy pool')
ppool = ProxyPool()
LOG.info(u'proxy pool:[%d] 初始完毕'%len(ppool.proxies))
#xiami obj
xiami_obj = xm.Xiami(config.XIAMI_LOGIN_EMAIL,\
config.XIAMI_LOGIN_PASSWORD, \
is_hq,proxies=ppool)
#netease obj
m163 = netease.Netease(is_hq, proxies=ppool)
if is_file:
from_file(xiami_obj, m163,in_str)
elif re.match(pat_xm, in_str):
from_url_xm(xiami_obj, in_str)
elif re.match(pat_163, in_str):
from_url_163(m163, in_str)
print border
if len(dl_songs):
LOG.info(u' 下载任务总数: %d \n 3秒后开始下载' % len(dl_songs))
sleep(3)
downloader.start_download(dl_songs)
else:
LOG.warning(u' 没有可下载任务,自动退出.')
def from_url_163(m163, url, verbose=True):
""" parse the input string (163 url), and do download"""
LOG.debug('processing 163 url: "%s"'% url)
msg = u''
if '/song?id=' in url:
song =netease.NeteaseSong(m163,url=url)
dl_songs.append(song)
msg = fmt_parsing % (m163_url_abbr(url),u'曲目', song.song_name)
elif '/album?id=' in url:
album = netease.NeteaseAlbum(m163, url)
dl_songs.extend(album.songs)
msgs = [fmt_parsing % (m163_url_abbr(url),u'专辑', album.artist_name+u' => '+album.album_name)]
if verbose:
for s in album.songs:
msgs.append(fmt_single_song %s.song_name)
msg = u'\n |-> '.join(msgs)
else:
msgs.append(fmt_has_song_nm % len(album.songs))
msg= u' => '.join(msgs)
elif '/playlist?id=' in url:
playlist = netease.NeteasePlayList(m163, url)
dl_songs.extend(playlist.songs)
msgs = [ fmt_parsing % (m163_url_abbr(url),u'歌单',playlist.playlist_name)]
if verbose:
for s in playlist.songs:
msgs.append( fmt_single_song % s.song_name)
msg = u'\n |-> '.join(msgs)
else:
msgs.append(fmt_has_song_nm % len(playlist.songs))
msg= u' => '.join(msgs)
elif '/artist?id=' in url:
topsong= netease.NeteaseTopSong(m163, url)
dl_songs.extend(topsong.songs)
msgs = [fmt_parsing % (m163_url_abbr(url), u'艺人热门歌曲',topsong.artist_name)]
if verbose:
for s in topsong.songs:
msgs.append(fmt_single_song %s.song_name)
msg = u'\n |-> '.join(msgs)
else:
msgs.append( fmt_has_song_nm % len(topsong.songs))
msg = u' => '.join(msgs)
global total, done
done +=1
pre = ('[%d/%d] ' % (done, total)) if not verbose else ''
if not msg:
#unknown url
LOG.error(u'%s [易]不能识别的url [%s].' % (pre,url))
else:
LOG.info(u'%s%s'% (pre,msg))
def from_url_xm(xm_obj, url, verbose=True):
""" parse the input string (xiami url), and do download"""
LOG.debug('processing xiami url: "%s"'% url)
msg = u''
if '/collect/' in url:
collect = xm.Collection(xm_obj, url)
dl_songs.extend(collect.songs)
msgs = [ fmt_parsing % (xiami_url_abbr(url),u'精选集',collect.collection_name)]
if verbose:
for s in collect.songs:
msgs.append( fmt_single_song % s.song_name)
msg = u'\n |-> '.join(msgs)
else:
msgs.append(fmt_has_song_nm % len(collect.songs))
msg= u' => '.join(msgs)
elif '/song/' in url:
song = xm.XiamiSong(xm_obj, url=url)
dl_songs.append(song)
msg = fmt_parsing % (xiami_url_abbr(url),u'曲目', song.song_name)
elif '/album/' in url:
album = xm.Album(xm_obj, url)
dl_songs.extend(album.songs)
msgs = [fmt_parsing % (xiami_url_abbr(url),u'专辑', album.artist_name+u' => '+album.album_name)]
if verbose:
for s in album.songs:
msgs.append(fmt_single_song %s.song_name)
msg = u'\n |-> '.join(msgs)
else:
msgs.append(fmt_has_song_nm % len(album.songs))
msg= u' => '.join(msgs)
elif '/lib-song/u/' in url:
if verbose:
LOG.warning(u'[虾]如用户收藏较多,解析歌曲需要较长时间,请耐心等待')
fav = xm.Favorite(xm_obj, url, verbose)
dl_songs.extend(fav.songs)
msgs = [fmt_parsing % (xiami_url_abbr(url), u'用户收藏','')]
if verbose:
for s in fav.songs:
msgs.append(fmt_single_song %s.song_name)
msg = u'\n |-> '.join(msgs)
else:
msgs.append( fmt_has_song_nm % len(fav.songs))
msg = u' => '.join(msgs)
elif re.search(r'/artist/top/id/\d+', url):
topsong=xm.TopSong(xm_obj, url)
dl_songs.extend(topsong.songs)
msgs = [fmt_parsing % (xiami_url_abbr(url), u'艺人热门歌曲',topsong.artist_name)]
if verbose:
for s in topsong.songs:
msgs.append(fmt_single_song %s.song_name)
msg = u'\n |-> '.join(msgs)
else:
msgs.append( fmt_has_song_nm % len(topsong.songs))
msg = u' => '.join(msgs)
global total, done
done +=1
pre = ('[%d/%d] ' % (done, total)) if not verbose else ''
if not msg:
#unknown url
LOG.error(u'%s [虾]不能识别的url [%s].' % (pre,url))
else:
LOG.info(u'%s%s'% (pre,msg))
def from_file(xm_obj,m163, infile):
""" download objects (songs, albums...) from an input file. """
urls = []
with open(infile) as f:
urls = f.readlines()
global total, done
total = len(urls)
print border
LOG.info(u' 文件包含链接总数: %d' % total)
print border
pool = ThreadPool(config.THREAD_POOL_SIZE)
for link in [u for u in urls if u]:
link = link.rstrip('\n')
if re.match(pat_xm, link):
pool.add_task(from_url_xm, xm_obj,link, verbose=False)
elif re.match(pat_163, link):
pool.add_task(from_url_163, m163,link, verbose=False)
else:
LOG.warning(u' 略过不能识别的url [%s].' % link)
pool.wait_completion()
def xiami_url_abbr(url):
return re.sub(pat_xm,u'[虾] ',url)
def m163_url_abbr(url):
return re.sub(pat_163,u'[易] ',url)
|
javierlgroba/Eventer-gapp
|
refs/heads/master
|
django/contrib/staticfiles/models.py
|
12133432
| |
philanthropy-u/edx-platform
|
refs/heads/master
|
openedx/core/djangoapps/credentials/tasks/__init__.py
|
12133432
| |
js0701/chromium-crosswalk
|
refs/heads/master
|
third_party/cython/src/Cython/Compiler/ModuleNode.py
|
87
|
#
# Module parse tree node
#
import cython
cython.declare(Naming=object, Options=object, PyrexTypes=object, TypeSlots=object,
error=object, warning=object, py_object_type=object, UtilityCode=object,
EncodedString=object)
import os
import operator
from PyrexTypes import CPtrType
import Future
import Annotate
import Code
import Naming
import Nodes
import Options
import TypeSlots
import Version
import PyrexTypes
from Errors import error, warning
from PyrexTypes import py_object_type
from Cython.Utils import open_new_file, replace_suffix, decode_filename
from Code import UtilityCode
from StringEncoding import EncodedString
def check_c_declarations_pxd(module_node):
module_node.scope.check_c_classes_pxd()
return module_node
def check_c_declarations(module_node):
module_node.scope.check_c_classes()
module_node.scope.check_c_functions()
return module_node
class ModuleNode(Nodes.Node, Nodes.BlockNode):
# doc string or None
# body StatListNode
#
# referenced_modules [ModuleScope]
# full_module_name string
#
# scope The module scope.
# compilation_source A CompilationSource (see Main)
# directives Top-level compiler directives
child_attrs = ["body"]
directives = None
def merge_in(self, tree, scope, merge_scope=False):
# Merges in the contents of another tree, and possibly scope. With the
# current implementation below, this must be done right prior
# to code generation.
#
# Note: This way of doing it seems strange -- I believe the
# right concept is to split ModuleNode into a ModuleNode and a
# CodeGenerator, and tell that CodeGenerator to generate code
# from multiple sources.
assert isinstance(self.body, Nodes.StatListNode)
if isinstance(tree, Nodes.StatListNode):
self.body.stats.extend(tree.stats)
else:
self.body.stats.append(tree)
self.scope.utility_code_list.extend(scope.utility_code_list)
def extend_if_not_in(L1, L2):
for x in L2:
if x not in L1:
L1.append(x)
extend_if_not_in(self.scope.include_files, scope.include_files)
extend_if_not_in(self.scope.included_files, scope.included_files)
extend_if_not_in(self.scope.python_include_files,
scope.python_include_files)
if merge_scope:
# Ensure that we don't generate import code for these entries!
for entry in scope.c_class_entries:
entry.type.module_name = self.full_module_name
entry.type.scope.directives["internal"] = True
self.scope.merge_in(scope)
def analyse_declarations(self, env):
if not Options.docstrings:
env.doc = self.doc = None
elif Options.embed_pos_in_docstring:
env.doc = EncodedString(u'File: %s (starting at line %s)' % Nodes.relative_position(self.pos))
if not self.doc is None:
env.doc = EncodedString(env.doc + u'\n' + self.doc)
env.doc.encoding = self.doc.encoding
else:
env.doc = self.doc
env.directives = self.directives
self.body.analyse_declarations(env)
def process_implementation(self, options, result):
env = self.scope
env.return_type = PyrexTypes.c_void_type
self.referenced_modules = []
self.find_referenced_modules(env, self.referenced_modules, {})
self.sort_cdef_classes(env)
self.generate_c_code(env, options, result)
self.generate_h_code(env, options, result)
self.generate_api_code(env, result)
def has_imported_c_functions(self):
for module in self.referenced_modules:
for entry in module.cfunc_entries:
if entry.defined_in_pxd:
return 1
return 0
def generate_h_code(self, env, options, result):
def h_entries(entries, api=0, pxd=0):
return [entry for entry in entries
if ((entry.visibility == 'public') or
(api and entry.api) or
(pxd and entry.defined_in_pxd))]
h_types = h_entries(env.type_entries, api=1)
h_vars = h_entries(env.var_entries)
h_funcs = h_entries(env.cfunc_entries)
h_extension_types = h_entries(env.c_class_entries)
if (h_types or h_vars or h_funcs or h_extension_types):
result.h_file = replace_suffix(result.c_file, ".h")
h_code = Code.CCodeWriter()
Code.GlobalState(h_code, self)
if options.generate_pxi:
result.i_file = replace_suffix(result.c_file, ".pxi")
i_code = Code.PyrexCodeWriter(result.i_file)
else:
i_code = None
h_guard = Naming.h_guard_prefix + self.api_name(env)
h_code.put_h_guard(h_guard)
h_code.putln("")
self.generate_type_header_code(h_types, h_code)
if options.capi_reexport_cincludes:
self.generate_includes(env, [], h_code)
h_code.putln("")
api_guard = Naming.api_guard_prefix + self.api_name(env)
h_code.putln("#ifndef %s" % api_guard)
h_code.putln("")
self.generate_extern_c_macro_definition(h_code)
if h_extension_types:
h_code.putln("")
for entry in h_extension_types:
self.generate_cclass_header_code(entry.type, h_code)
if i_code:
self.generate_cclass_include_code(entry.type, i_code)
if h_funcs:
h_code.putln("")
for entry in h_funcs:
self.generate_public_declaration(entry, h_code, i_code)
if h_vars:
h_code.putln("")
for entry in h_vars:
self.generate_public_declaration(entry, h_code, i_code)
h_code.putln("")
h_code.putln("#endif /* !%s */" % api_guard)
h_code.putln("")
h_code.putln("#if PY_MAJOR_VERSION < 3")
h_code.putln("PyMODINIT_FUNC init%s(void);" % env.module_name)
h_code.putln("#else")
h_code.putln("PyMODINIT_FUNC PyInit_%s(void);" % env.module_name)
h_code.putln("#endif")
h_code.putln("")
h_code.putln("#endif /* !%s */" % h_guard)
f = open_new_file(result.h_file)
try:
h_code.copyto(f)
finally:
f.close()
def generate_public_declaration(self, entry, h_code, i_code):
h_code.putln("%s %s;" % (
Naming.extern_c_macro,
entry.type.declaration_code(
entry.cname, dll_linkage = "DL_IMPORT")))
if i_code:
i_code.putln("cdef extern %s" %
entry.type.declaration_code(entry.cname, pyrex = 1))
def api_name(self, env):
return env.qualified_name.replace(".", "__")
def generate_api_code(self, env, result):
def api_entries(entries, pxd=0):
return [entry for entry in entries
if entry.api or (pxd and entry.defined_in_pxd)]
api_vars = api_entries(env.var_entries)
api_funcs = api_entries(env.cfunc_entries)
api_extension_types = api_entries(env.c_class_entries)
if api_vars or api_funcs or api_extension_types:
result.api_file = replace_suffix(result.c_file, "_api.h")
h_code = Code.CCodeWriter()
Code.GlobalState(h_code, self)
api_guard = Naming.api_guard_prefix + self.api_name(env)
h_code.put_h_guard(api_guard)
h_code.putln('#include "Python.h"')
if result.h_file:
h_code.putln('#include "%s"' % os.path.basename(result.h_file))
if api_extension_types:
h_code.putln("")
for entry in api_extension_types:
type = entry.type
h_code.putln("static PyTypeObject *%s = 0;" % type.typeptr_cname)
h_code.putln("#define %s (*%s)" % (
type.typeobj_cname, type.typeptr_cname))
if api_funcs:
h_code.putln("")
for entry in api_funcs:
type = CPtrType(entry.type)
cname = env.mangle(Naming.func_prefix, entry.name)
h_code.putln("static %s = 0;" % type.declaration_code(cname))
h_code.putln("#define %s %s" % (entry.name, cname))
if api_vars:
h_code.putln("")
for entry in api_vars:
type = CPtrType(entry.type)
cname = env.mangle(Naming.varptr_prefix, entry.name)
h_code.putln("static %s = 0;" % type.declaration_code(cname))
h_code.putln("#define %s (*%s)" % (entry.name, cname))
h_code.put(UtilityCode.load_as_string("PyIdentifierFromString", "ImportExport.c")[0])
h_code.put(UtilityCode.load_as_string("ModuleImport", "ImportExport.c")[1])
if api_vars:
h_code.put(UtilityCode.load_as_string("VoidPtrImport", "ImportExport.c")[1])
if api_funcs:
h_code.put(UtilityCode.load_as_string("FunctionImport", "ImportExport.c")[1])
if api_extension_types:
h_code.put(UtilityCode.load_as_string("TypeImport", "ImportExport.c")[1])
h_code.putln("")
h_code.putln("static int import_%s(void) {" % self.api_name(env))
h_code.putln("PyObject *module = 0;")
h_code.putln('module = __Pyx_ImportModule("%s");' % env.qualified_name)
h_code.putln("if (!module) goto bad;")
for entry in api_funcs:
cname = env.mangle(Naming.func_prefix, entry.name)
sig = entry.type.signature_string()
h_code.putln(
'if (__Pyx_ImportFunction(module, "%s", (void (**)(void))&%s, "%s") < 0) goto bad;'
% (entry.name, cname, sig))
for entry in api_vars:
cname = env.mangle(Naming.varptr_prefix, entry.name)
sig = entry.type.declaration_code("")
h_code.putln(
'if (__Pyx_ImportVoidPtr(module, "%s", (void **)&%s, "%s") < 0) goto bad;'
% (entry.name, cname, sig))
h_code.putln("Py_DECREF(module); module = 0;")
for entry in api_extension_types:
self.generate_type_import_call(
entry.type, h_code,
"if (!%s) goto bad;" % entry.type.typeptr_cname)
h_code.putln("return 0;")
h_code.putln("bad:")
h_code.putln("Py_XDECREF(module);")
h_code.putln("return -1;")
h_code.putln("}")
h_code.putln("")
h_code.putln("#endif /* !%s */" % api_guard)
f = open_new_file(result.api_file)
try:
h_code.copyto(f)
finally:
f.close()
def generate_cclass_header_code(self, type, h_code):
h_code.putln("%s %s %s;" % (
Naming.extern_c_macro,
PyrexTypes.public_decl("PyTypeObject", "DL_IMPORT"),
type.typeobj_cname))
def generate_cclass_include_code(self, type, i_code):
i_code.putln("cdef extern class %s.%s:" % (
type.module_name, type.name))
i_code.indent()
var_entries = type.scope.var_entries
if var_entries:
for entry in var_entries:
i_code.putln("cdef %s" %
entry.type.declaration_code(entry.cname, pyrex = 1))
else:
i_code.putln("pass")
i_code.dedent()
def generate_c_code(self, env, options, result):
modules = self.referenced_modules
if Options.annotate or options.annotate:
emit_linenums = False
rootwriter = Annotate.AnnotationCCodeWriter()
else:
emit_linenums = options.emit_linenums
rootwriter = Code.CCodeWriter(emit_linenums=emit_linenums, c_line_in_traceback=options.c_line_in_traceback)
globalstate = Code.GlobalState(rootwriter, self, emit_linenums, options.common_utility_include_dir)
globalstate.initialize_main_c_code()
h_code = globalstate['h_code']
self.generate_module_preamble(env, modules, h_code)
globalstate.module_pos = self.pos
globalstate.directives = self.directives
globalstate.use_utility_code(refnanny_utility_code)
code = globalstate['before_global_var']
code.putln('#define __Pyx_MODULE_NAME "%s"' % self.full_module_name)
code.putln("int %s%s = 0;" % (Naming.module_is_main, self.full_module_name.replace('.', '__')))
code.putln("")
code.putln("/* Implementation of '%s' */" % env.qualified_name)
code = globalstate['all_the_rest']
self.generate_cached_builtins_decls(env, code)
self.generate_lambda_definitions(env, code)
# generate normal variable and function definitions
self.generate_variable_definitions(env, code)
self.body.generate_function_definitions(env, code)
code.mark_pos(None)
self.generate_typeobj_definitions(env, code)
self.generate_method_table(env, code)
if env.has_import_star:
self.generate_import_star(env, code)
self.generate_pymoduledef_struct(env, code)
# init_globals is inserted before this
self.generate_module_init_func(modules[:-1], env, globalstate['init_module'])
self.generate_module_cleanup_func(env, globalstate['cleanup_module'])
if Options.embed:
self.generate_main_method(env, globalstate['main_method'])
self.generate_filename_table(globalstate['filename_table'])
self.generate_declarations_for_modules(env, modules, globalstate)
h_code.write('\n')
for utilcode in env.utility_code_list[:]:
globalstate.use_utility_code(utilcode)
globalstate.finalize_main_c_code()
f = open_new_file(result.c_file)
try:
rootwriter.copyto(f)
finally:
f.close()
result.c_file_generated = 1
if options.gdb_debug:
self._serialize_lineno_map(env, rootwriter)
if Options.annotate or options.annotate:
self._generate_annotations(rootwriter, result)
def _generate_annotations(self, rootwriter, result):
self.annotate(rootwriter)
rootwriter.save_annotation(result.main_source_file, result.c_file)
# if we included files, additionally generate one annotation file for each
if not self.scope.included_files:
return
search_include_file = self.scope.context.search_include_directories
target_dir = os.path.abspath(os.path.dirname(result.c_file))
for included_file in self.scope.included_files:
target_file = os.path.abspath(os.path.join(target_dir, included_file))
target_file_dir = os.path.dirname(target_file)
if not target_file_dir.startswith(target_dir):
# any other directories may not be writable => avoid trying
continue
source_file = search_include_file(included_file, "", self.pos, include=True)
if not source_file:
continue
if target_file_dir != target_dir and not os.path.exists(target_file_dir):
try:
os.makedirs(target_file_dir)
except OSError, e:
import errno
if e.errno != errno.EEXIST:
raise
rootwriter.save_annotation(source_file, target_file)
def _serialize_lineno_map(self, env, ccodewriter):
tb = env.context.gdb_debug_outputwriter
markers = ccodewriter.buffer.allmarkers()
d = {}
for c_lineno, cython_lineno in enumerate(markers):
if cython_lineno > 0:
d.setdefault(cython_lineno, []).append(c_lineno + 1)
tb.start('LineNumberMapping')
for cython_lineno, c_linenos in sorted(d.iteritems()):
attrs = {
'c_linenos': ' '.join(map(str, c_linenos)),
'cython_lineno': str(cython_lineno),
}
tb.start('LineNumber', attrs)
tb.end('LineNumber')
tb.end('LineNumberMapping')
tb.serialize()
def find_referenced_modules(self, env, module_list, modules_seen):
if env not in modules_seen:
modules_seen[env] = 1
for imported_module in env.cimported_modules:
self.find_referenced_modules(imported_module, module_list, modules_seen)
module_list.append(env)
def sort_types_by_inheritance(self, type_dict, type_order, getkey):
# copy the types into a list moving each parent type before
# its first child
type_list = []
for i, key in enumerate(type_order):
new_entry = type_dict[key]
# collect all base classes to check for children
hierarchy = set()
base = new_entry
while base:
base_type = base.type.base_type
if not base_type:
break
base_key = getkey(base_type)
hierarchy.add(base_key)
base = type_dict.get(base_key)
new_entry.base_keys = hierarchy
# find the first (sub-)subclass and insert before that
for j in range(i):
entry = type_list[j]
if key in entry.base_keys:
type_list.insert(j, new_entry)
break
else:
type_list.append(new_entry)
return type_list
def sort_type_hierarchy(self, module_list, env):
# poor developer's OrderedDict
vtab_dict, vtab_dict_order = {}, []
vtabslot_dict, vtabslot_dict_order = {}, []
for module in module_list:
for entry in module.c_class_entries:
if entry.used and not entry.in_cinclude:
type = entry.type
key = type.vtabstruct_cname
if not key:
continue
if key in vtab_dict:
# FIXME: this should *never* happen, but apparently it does
# for Cython generated utility code
from Cython.Compiler.UtilityCode import NonManglingModuleScope
assert isinstance(entry.scope, NonManglingModuleScope), str(entry.scope)
assert isinstance(vtab_dict[key].scope, NonManglingModuleScope), str(vtab_dict[key].scope)
else:
vtab_dict[key] = entry
vtab_dict_order.append(key)
all_defined_here = module is env
for entry in module.type_entries:
if entry.used and (all_defined_here or entry.defined_in_pxd):
type = entry.type
if type.is_extension_type and not entry.in_cinclude:
type = entry.type
key = type.objstruct_cname
assert key not in vtabslot_dict, key
vtabslot_dict[key] = entry
vtabslot_dict_order.append(key)
def vtabstruct_cname(entry_type):
return entry_type.vtabstruct_cname
vtab_list = self.sort_types_by_inheritance(
vtab_dict, vtab_dict_order, vtabstruct_cname)
def objstruct_cname(entry_type):
return entry_type.objstruct_cname
vtabslot_list = self.sort_types_by_inheritance(
vtabslot_dict, vtabslot_dict_order, objstruct_cname)
return (vtab_list, vtabslot_list)
def sort_cdef_classes(self, env):
key_func = operator.attrgetter('objstruct_cname')
entry_dict, entry_order = {}, []
for entry in env.c_class_entries:
key = key_func(entry.type)
assert key not in entry_dict, key
entry_dict[key] = entry
entry_order.append(key)
env.c_class_entries[:] = self.sort_types_by_inheritance(
entry_dict, entry_order, key_func)
def generate_type_definitions(self, env, modules, vtab_list, vtabslot_list, code):
# TODO: Why are these separated out?
for entry in vtabslot_list:
self.generate_objstruct_predeclaration(entry.type, code)
vtabslot_entries = set(vtabslot_list)
for module in modules:
definition = module is env
if definition:
type_entries = module.type_entries
else:
type_entries = []
for entry in module.type_entries:
if entry.defined_in_pxd:
type_entries.append(entry)
type_entries = [t for t in type_entries if t not in vtabslot_entries]
self.generate_type_header_code(type_entries, code)
for entry in vtabslot_list:
self.generate_objstruct_definition(entry.type, code)
self.generate_typeobj_predeclaration(entry, code)
for entry in vtab_list:
self.generate_typeobj_predeclaration(entry, code)
self.generate_exttype_vtable_struct(entry, code)
self.generate_exttype_vtabptr_declaration(entry, code)
self.generate_exttype_final_methods_declaration(entry, code)
def generate_declarations_for_modules(self, env, modules, globalstate):
typecode = globalstate['type_declarations']
typecode.putln("")
typecode.putln("/*--- Type declarations ---*/")
# This is to work around the fact that array.h isn't part of the C-API,
# but we need to declare it earlier than utility code.
if 'cpython.array' in [m.qualified_name for m in modules]:
typecode.putln('#ifndef _ARRAYARRAY_H')
typecode.putln('struct arrayobject;')
typecode.putln('typedef struct arrayobject arrayobject;')
typecode.putln('#endif')
vtab_list, vtabslot_list = self.sort_type_hierarchy(modules, env)
self.generate_type_definitions(
env, modules, vtab_list, vtabslot_list, typecode)
modulecode = globalstate['module_declarations']
for module in modules:
defined_here = module is env
modulecode.putln("")
modulecode.putln("/* Module declarations from '%s' */" % module.qualified_name)
self.generate_c_class_declarations(module, modulecode, defined_here)
self.generate_cvariable_declarations(module, modulecode, defined_here)
self.generate_cfunction_declarations(module, modulecode, defined_here)
def generate_module_preamble(self, env, cimported_modules, code):
code.putln("/* Generated by Cython %s */" % Version.watermark)
code.putln("")
code.putln("#define PY_SSIZE_T_CLEAN")
# sizeof(PyLongObject.ob_digit[0]) may have been determined dynamically
# at compile time in CPython, in which case we can't know the correct
# storage size for an installed system. We can rely on it only if
# pyconfig.h defines it statically, i.e. if it was set by "configure".
# Once we include "Python.h", it will come up with its own idea about
# a suitable value, which may or may not match the real one.
code.putln("#ifndef CYTHON_USE_PYLONG_INTERNALS")
code.putln("#ifdef PYLONG_BITS_IN_DIGIT")
# assume it's an incorrect left-over
code.putln("#define CYTHON_USE_PYLONG_INTERNALS 0")
code.putln("#else")
code.putln('#include "pyconfig.h"')
code.putln("#ifdef PYLONG_BITS_IN_DIGIT")
code.putln("#define CYTHON_USE_PYLONG_INTERNALS 1")
code.putln("#else")
code.putln("#define CYTHON_USE_PYLONG_INTERNALS 0")
code.putln("#endif")
code.putln("#endif")
code.putln("#endif")
for filename in env.python_include_files:
code.putln('#include "%s"' % filename)
code.putln("#ifndef Py_PYTHON_H")
code.putln(" #error Python headers needed to compile C extensions, please install development version of Python.")
code.putln("#elif PY_VERSION_HEX < 0x02040000")
code.putln(" #error Cython requires Python 2.4+.")
code.putln("#else")
code.globalstate["end"].putln("#endif /* Py_PYTHON_H */")
from Cython import __version__
code.putln('#define CYTHON_ABI "%s"' % __version__.replace('.', '_'))
code.put(UtilityCode.load_as_string("CModulePreamble", "ModuleSetupCode.c")[1])
code.put("""
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y)
#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y)
#else
""")
if Future.division in env.context.future_directives:
code.putln(" #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y)")
code.putln(" #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y)")
else:
code.putln(" #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y)")
code.putln(" #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y)")
code.putln("#endif")
code.putln("")
self.generate_extern_c_macro_definition(code)
code.putln("")
code.putln("#if defined(WIN32) || defined(MS_WINDOWS)")
code.putln("#define _USE_MATH_DEFINES")
code.putln("#endif")
code.putln("#include <math.h>")
code.putln("#define %s" % Naming.h_guard_prefix + self.api_name(env))
code.putln("#define %s" % Naming.api_guard_prefix + self.api_name(env))
self.generate_includes(env, cimported_modules, code)
code.putln("")
code.putln("#ifdef PYREX_WITHOUT_ASSERTIONS")
code.putln("#define CYTHON_WITHOUT_ASSERTIONS")
code.putln("#endif")
code.putln("")
if env.directives['ccomplex']:
code.putln("")
code.putln("#if !defined(CYTHON_CCOMPLEX)")
code.putln("#define CYTHON_CCOMPLEX 1")
code.putln("#endif")
code.putln("")
code.put(UtilityCode.load_as_string("UtilityFunctionPredeclarations", "ModuleSetupCode.c")[0])
c_string_type = env.directives['c_string_type']
c_string_encoding = env.directives['c_string_encoding']
if c_string_type not in ('bytes', 'bytearray') and not c_string_encoding:
error(self.pos, "a default encoding must be provided if c_string_type is not a byte type")
code.putln('#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII %s' % int(c_string_encoding == 'ascii'))
if c_string_encoding == 'default':
code.putln('#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT 1')
else:
code.putln('#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT 0')
code.putln('#define __PYX_DEFAULT_STRING_ENCODING "%s"' % c_string_encoding)
if c_string_type == 'bytearray':
c_string_func_name = 'ByteArray'
else:
c_string_func_name = c_string_type.title()
code.putln('#define __Pyx_PyObject_FromString __Pyx_Py%s_FromString' % c_string_func_name)
code.putln('#define __Pyx_PyObject_FromStringAndSize __Pyx_Py%s_FromStringAndSize' % c_string_func_name)
code.put(UtilityCode.load_as_string("TypeConversions", "TypeConversion.c")[0])
# These utility functions are assumed to exist and used elsewhere.
PyrexTypes.c_long_type.create_to_py_utility_code(env)
PyrexTypes.c_long_type.create_from_py_utility_code(env)
PyrexTypes.c_int_type.create_from_py_utility_code(env)
code.put(Nodes.branch_prediction_macros)
code.putln('')
code.putln('static PyObject *%s;' % env.module_cname)
code.putln('static PyObject *%s;' % env.module_dict_cname)
code.putln('static PyObject *%s;' % Naming.builtins_cname)
code.putln('static PyObject *%s;' % Naming.empty_tuple)
code.putln('static PyObject *%s;' % Naming.empty_bytes)
if Options.pre_import is not None:
code.putln('static PyObject *%s;' % Naming.preimport_cname)
code.putln('static int %s;' % Naming.lineno_cname)
code.putln('static int %s = 0;' % Naming.clineno_cname)
code.putln('static const char * %s= %s;' % (Naming.cfilenm_cname, Naming.file_c_macro))
code.putln('static const char *%s;' % Naming.filename_cname)
def generate_extern_c_macro_definition(self, code):
name = Naming.extern_c_macro
code.putln("#ifndef %s" % name)
code.putln(" #ifdef __cplusplus")
code.putln(' #define %s extern "C"' % name)
code.putln(" #else")
code.putln(" #define %s extern" % name)
code.putln(" #endif")
code.putln("#endif")
def generate_includes(self, env, cimported_modules, code):
includes = []
for filename in env.include_files:
byte_decoded_filenname = str(filename)
if byte_decoded_filenname[0] == '<' and byte_decoded_filenname[-1] == '>':
code.putln('#include %s' % byte_decoded_filenname)
else:
code.putln('#include "%s"' % byte_decoded_filenname)
code.putln_openmp("#include <omp.h>")
def generate_filename_table(self, code):
code.putln("")
code.putln("static const char *%s[] = {" % Naming.filetable_cname)
if code.globalstate.filename_list:
for source_desc in code.globalstate.filename_list:
filename = os.path.basename(source_desc.get_filenametable_entry())
escaped_filename = filename.replace("\\", "\\\\").replace('"', r'\"')
code.putln('"%s",' % escaped_filename)
else:
# Some C compilers don't like an empty array
code.putln("0")
code.putln("};")
def generate_type_predeclarations(self, env, code):
pass
def generate_type_header_code(self, type_entries, code):
# Generate definitions of structs/unions/enums/typedefs/objstructs.
#self.generate_gcc33_hack(env, code) # Is this still needed?
# Forward declarations
for entry in type_entries:
if not entry.in_cinclude:
#print "generate_type_header_code:", entry.name, repr(entry.type) ###
type = entry.type
if type.is_typedef: # Must test this first!
pass
elif type.is_struct_or_union or type.is_cpp_class:
self.generate_struct_union_predeclaration(entry, code)
elif type.is_extension_type:
self.generate_objstruct_predeclaration(type, code)
# Actual declarations
for entry in type_entries:
if not entry.in_cinclude:
#print "generate_type_header_code:", entry.name, repr(entry.type) ###
type = entry.type
if type.is_typedef: # Must test this first!
self.generate_typedef(entry, code)
elif type.is_enum:
self.generate_enum_definition(entry, code)
elif type.is_struct_or_union:
self.generate_struct_union_definition(entry, code)
elif type.is_cpp_class:
self.generate_cpp_class_definition(entry, code)
elif type.is_extension_type:
self.generate_objstruct_definition(type, code)
def generate_gcc33_hack(self, env, code):
# Workaround for spurious warning generation in gcc 3.3
code.putln("")
for entry in env.c_class_entries:
type = entry.type
if not type.typedef_flag:
name = type.objstruct_cname
if name.startswith("__pyx_"):
tail = name[6:]
else:
tail = name
code.putln("typedef struct %s __pyx_gcc33_%s;" % (
name, tail))
def generate_typedef(self, entry, code):
base_type = entry.type.typedef_base_type
if base_type.is_numeric:
try:
writer = code.globalstate['numeric_typedefs']
except KeyError:
writer = code
else:
writer = code
writer.mark_pos(entry.pos)
writer.putln("typedef %s;" % base_type.declaration_code(entry.cname))
def sue_predeclaration(self, type, kind, name):
if type.typedef_flag:
return "%s %s;\ntypedef %s %s %s;" % (
kind, name,
kind, name, name)
else:
return "%s %s;" % (kind, name)
def generate_struct_union_predeclaration(self, entry, code):
type = entry.type
if type.is_cpp_class and type.templates:
code.putln("template <typename %s>" % ", typename ".join([T.declaration_code("") for T in type.templates]))
code.putln(self.sue_predeclaration(type, type.kind, type.cname))
def sue_header_footer(self, type, kind, name):
header = "%s %s {" % (kind, name)
footer = "};"
return header, footer
def generate_struct_union_definition(self, entry, code):
code.mark_pos(entry.pos)
type = entry.type
scope = type.scope
if scope:
kind = type.kind
packed = type.is_struct and type.packed
if packed:
kind = "%s %s" % (type.kind, "__Pyx_PACKED")
code.globalstate.use_utility_code(packed_struct_utility_code)
header, footer = \
self.sue_header_footer(type, kind, type.cname)
if packed:
code.putln("#if defined(__SUNPRO_C)")
code.putln(" #pragma pack(1)")
code.putln("#elif !defined(__GNUC__)")
code.putln(" #pragma pack(push, 1)")
code.putln("#endif")
code.putln(header)
var_entries = scope.var_entries
if not var_entries:
error(entry.pos,
"Empty struct or union definition not allowed outside a"
" 'cdef extern from' block")
for attr in var_entries:
code.putln(
"%s;" %
attr.type.declaration_code(attr.cname))
code.putln(footer)
if packed:
code.putln("#if defined(__SUNPRO_C)")
code.putln(" #pragma pack()")
code.putln("#elif !defined(__GNUC__)")
code.putln(" #pragma pack(pop)")
code.putln("#endif")
def generate_cpp_class_definition(self, entry, code):
code.mark_pos(entry.pos)
type = entry.type
scope = type.scope
if scope:
if type.templates:
code.putln("template <class %s>" % ", class ".join([T.declaration_code("") for T in type.templates]))
# Just let everything be public.
code.put("struct %s" % type.cname)
if type.base_classes:
base_class_decl = ", public ".join(
[base_class.declaration_code("") for base_class in type.base_classes])
code.put(" : public %s" % base_class_decl)
code.putln(" {")
has_virtual_methods = False
has_destructor = False
for attr in scope.var_entries:
if attr.type.is_cfunction and attr.name != "<init>":
code.put("virtual ")
has_virtual_methods = True
if attr.cname[0] == '~':
has_destructor = True
code.putln(
"%s;" %
attr.type.declaration_code(attr.cname))
if has_virtual_methods and not has_destructor:
code.put("virtual ~%s() { }" % type.cname)
code.putln("};")
def generate_enum_definition(self, entry, code):
code.mark_pos(entry.pos)
type = entry.type
name = entry.cname or entry.name or ""
header, footer = \
self.sue_header_footer(type, "enum", name)
code.putln(header)
enum_values = entry.enum_values
if not enum_values:
error(entry.pos,
"Empty enum definition not allowed outside a"
" 'cdef extern from' block")
else:
last_entry = enum_values[-1]
# this does not really generate code, just builds the result value
for value_entry in enum_values:
if value_entry.value_node is not None:
value_entry.value_node.generate_evaluation_code(code)
for value_entry in enum_values:
if value_entry.value_node is None:
value_code = value_entry.cname
else:
value_code = ("%s = %s" % (
value_entry.cname,
value_entry.value_node.result()))
if value_entry is not last_entry:
value_code += ","
code.putln(value_code)
code.putln(footer)
if entry.type.typedef_flag:
# Not pre-declared.
code.putln("typedef enum %s %s;" % (name, name))
def generate_typeobj_predeclaration(self, entry, code):
code.putln("")
name = entry.type.typeobj_cname
if name:
if entry.visibility == 'extern' and not entry.in_cinclude:
code.putln("%s %s %s;" % (
Naming.extern_c_macro,
PyrexTypes.public_decl("PyTypeObject", "DL_IMPORT"),
name))
elif entry.visibility == 'public':
code.putln("%s %s %s;" % (
Naming.extern_c_macro,
PyrexTypes.public_decl("PyTypeObject", "DL_EXPORT"),
name))
# ??? Do we really need the rest of this? ???
#else:
# code.putln("static PyTypeObject %s;" % name)
def generate_exttype_vtable_struct(self, entry, code):
if not entry.used:
return
code.mark_pos(entry.pos)
# Generate struct declaration for an extension type's vtable.
type = entry.type
scope = type.scope
self.specialize_fused_types(scope)
if type.vtabstruct_cname:
code.putln("")
code.putln(
"struct %s {" %
type.vtabstruct_cname)
if type.base_type and type.base_type.vtabstruct_cname:
code.putln("struct %s %s;" % (
type.base_type.vtabstruct_cname,
Naming.obj_base_cname))
for method_entry in scope.cfunc_entries:
if not method_entry.is_inherited:
code.putln(
"%s;" % method_entry.type.declaration_code("(*%s)" % method_entry.cname))
code.putln(
"};")
def generate_exttype_vtabptr_declaration(self, entry, code):
if not entry.used:
return
code.mark_pos(entry.pos)
# Generate declaration of pointer to an extension type's vtable.
type = entry.type
if type.vtabptr_cname:
code.putln("static struct %s *%s;" % (
type.vtabstruct_cname,
type.vtabptr_cname))
def generate_exttype_final_methods_declaration(self, entry, code):
if not entry.used:
return
code.mark_pos(entry.pos)
# Generate final methods prototypes
type = entry.type
for method_entry in entry.type.scope.cfunc_entries:
if not method_entry.is_inherited and method_entry.final_func_cname:
declaration = method_entry.type.declaration_code(
method_entry.final_func_cname)
modifiers = code.build_function_modifiers(method_entry.func_modifiers)
code.putln("static %s%s;" % (modifiers, declaration))
def generate_objstruct_predeclaration(self, type, code):
if not type.scope:
return
code.putln(self.sue_predeclaration(type, "struct", type.objstruct_cname))
def generate_objstruct_definition(self, type, code):
code.mark_pos(type.pos)
# Generate object struct definition for an
# extension type.
if not type.scope:
return # Forward declared but never defined
header, footer = \
self.sue_header_footer(type, "struct", type.objstruct_cname)
code.putln(header)
base_type = type.base_type
if base_type:
basestruct_cname = base_type.objstruct_cname
if basestruct_cname == "PyTypeObject":
# User-defined subclasses of type are heap allocated.
basestruct_cname = "PyHeapTypeObject"
code.putln(
"%s%s %s;" % (
("struct ", "")[base_type.typedef_flag],
basestruct_cname,
Naming.obj_base_cname))
else:
code.putln(
"PyObject_HEAD")
if type.vtabslot_cname and not (type.base_type and type.base_type.vtabslot_cname):
code.putln(
"struct %s *%s;" % (
type.vtabstruct_cname,
type.vtabslot_cname))
for attr in type.scope.var_entries:
if attr.is_declared_generic:
attr_type = py_object_type
else:
attr_type = attr.type
code.putln(
"%s;" %
attr_type.declaration_code(attr.cname))
code.putln(footer)
if type.objtypedef_cname is not None:
# Only for exposing public typedef name.
code.putln("typedef struct %s %s;" % (type.objstruct_cname, type.objtypedef_cname))
def generate_c_class_declarations(self, env, code, definition):
for entry in env.c_class_entries:
if definition or entry.defined_in_pxd:
code.putln("static PyTypeObject *%s = 0;" %
entry.type.typeptr_cname)
def generate_cvariable_declarations(self, env, code, definition):
if env.is_cython_builtin:
return
for entry in env.var_entries:
if (entry.in_cinclude or entry.in_closure or
(entry.visibility == 'private' and
not (entry.defined_in_pxd or entry.used))):
continue
storage_class = None
dll_linkage = None
cname = None
init = None
if entry.visibility == 'extern':
storage_class = Naming.extern_c_macro
dll_linkage = "DL_IMPORT"
elif entry.visibility == 'public':
storage_class = Naming.extern_c_macro
if definition:
dll_linkage = "DL_EXPORT"
else:
dll_linkage = "DL_IMPORT"
elif entry.visibility == 'private':
storage_class = "static"
dll_linkage = None
if entry.init is not None:
init = entry.type.literal_code(entry.init)
type = entry.type
cname = entry.cname
if entry.defined_in_pxd and not definition:
storage_class = "static"
dll_linkage = None
type = CPtrType(type)
cname = env.mangle(Naming.varptr_prefix, entry.name)
init = 0
if storage_class:
code.put("%s " % storage_class)
code.put(type.declaration_code(
cname, dll_linkage = dll_linkage))
if init is not None:
code.put_safe(" = %s" % init)
code.putln(";")
if entry.cname != cname:
code.putln("#define %s (*%s)" % (entry.cname, cname))
def generate_cfunction_declarations(self, env, code, definition):
for entry in env.cfunc_entries:
if entry.used or (entry.visibility == 'public' or entry.api):
generate_cfunction_declaration(entry, env, code, definition)
def generate_variable_definitions(self, env, code):
for entry in env.var_entries:
if (not entry.in_cinclude and
entry.visibility == "public"):
code.put(entry.type.declaration_code(entry.cname))
if entry.init is not None:
init = entry.type.literal_code(entry.init)
code.put_safe(" = %s" % init)
code.putln(";")
def generate_typeobj_definitions(self, env, code):
full_module_name = env.qualified_name
for entry in env.c_class_entries:
#print "generate_typeobj_definitions:", entry.name
#print "...visibility =", entry.visibility
if entry.visibility != 'extern':
type = entry.type
scope = type.scope
if scope: # could be None if there was an error
self.generate_exttype_vtable(scope, code)
self.generate_new_function(scope, code, entry)
self.generate_dealloc_function(scope, code)
if scope.needs_gc():
self.generate_traverse_function(scope, code, entry)
if scope.needs_tp_clear():
self.generate_clear_function(scope, code, entry)
if scope.defines_any(["__getitem__"]):
self.generate_getitem_int_function(scope, code)
if scope.defines_any(["__setitem__", "__delitem__"]):
self.generate_ass_subscript_function(scope, code)
if scope.defines_any(["__getslice__", "__setslice__", "__delslice__"]):
warning(self.pos, "__getslice__, __setslice__, and __delslice__ are not supported by Python 3, use __getitem__, __setitem__, and __delitem__ instead", 1)
code.putln("#if PY_MAJOR_VERSION >= 3")
code.putln("#error __getslice__, __setslice__, and __delslice__ not supported in Python 3.")
code.putln("#endif")
if scope.defines_any(["__setslice__", "__delslice__"]):
self.generate_ass_slice_function(scope, code)
if scope.defines_any(["__getattr__","__getattribute__"]):
self.generate_getattro_function(scope, code)
if scope.defines_any(["__setattr__", "__delattr__"]):
self.generate_setattro_function(scope, code)
if scope.defines_any(["__get__"]):
self.generate_descr_get_function(scope, code)
if scope.defines_any(["__set__", "__delete__"]):
self.generate_descr_set_function(scope, code)
self.generate_property_accessors(scope, code)
self.generate_method_table(scope, code)
self.generate_getset_table(scope, code)
self.generate_typeobj_definition(full_module_name, entry, code)
def generate_exttype_vtable(self, scope, code):
# Generate the definition of an extension type's vtable.
type = scope.parent_type
if type.vtable_cname:
code.putln("static struct %s %s;" % (
type.vtabstruct_cname,
type.vtable_cname))
def generate_self_cast(self, scope, code):
type = scope.parent_type
code.putln(
"%s = (%s)o;" % (
type.declaration_code("p"),
type.declaration_code("")))
def generate_new_function(self, scope, code, cclass_entry):
tp_slot = TypeSlots.ConstructorSlot("tp_new", '__new__')
slot_func = scope.mangle_internal("tp_new")
type = scope.parent_type
base_type = type.base_type
have_entries, (py_attrs, py_buffers, memoryview_slices) = \
scope.get_refcounted_entries()
is_final_type = scope.parent_type.is_final_type
if scope.is_internal:
# internal classes (should) never need None inits, normal zeroing will do
py_attrs = []
cpp_class_attrs = [entry for entry in scope.var_entries
if entry.type.is_cpp_class]
new_func_entry = scope.lookup_here("__new__")
if base_type or (new_func_entry and new_func_entry.is_special
and not new_func_entry.trivial_signature):
unused_marker = ''
else:
unused_marker = 'CYTHON_UNUSED '
if base_type:
freelist_size = 0 # not currently supported
else:
freelist_size = scope.directives.get('freelist', 0)
freelist_name = scope.mangle_internal(Naming.freelist_name)
freecount_name = scope.mangle_internal(Naming.freecount_name)
decls = code.globalstate['decls']
decls.putln("static PyObject *%s(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/" %
slot_func)
code.putln("")
if freelist_size:
code.putln("static %s[%d];" % (
scope.parent_type.declaration_code(freelist_name),
freelist_size))
code.putln("static int %s = 0;" % freecount_name)
code.putln("")
code.putln(
"static PyObject *%s(PyTypeObject *t, %sPyObject *a, %sPyObject *k) {"
% (slot_func, unused_marker, unused_marker))
need_self_cast = (type.vtabslot_cname or
(py_buffers or memoryview_slices or py_attrs) or
cpp_class_attrs)
if need_self_cast:
code.putln("%s;" % scope.parent_type.declaration_code("p"))
if base_type:
tp_new = TypeSlots.get_base_slot_function(scope, tp_slot)
if tp_new is None:
tp_new = "%s->tp_new" % base_type.typeptr_cname
code.putln("PyObject *o = %s(t, a, k);" % tp_new)
else:
code.putln("PyObject *o;")
if freelist_size:
code.globalstate.use_utility_code(
UtilityCode.load_cached("IncludeStringH", "StringTools.c"))
if is_final_type:
type_safety_check = ''
else:
type_safety_check = ' & ((t->tp_flags & (Py_TPFLAGS_IS_ABSTRACT | Py_TPFLAGS_HEAPTYPE)) == 0)'
obj_struct = type.declaration_code("", deref=True)
code.putln("if (CYTHON_COMPILING_IN_CPYTHON && likely((%s > 0) & (t->tp_basicsize == sizeof(%s))%s)) {" % (
freecount_name, obj_struct, type_safety_check))
code.putln("o = (PyObject*)%s[--%s];" % (
freelist_name, freecount_name))
code.putln("memset(o, 0, sizeof(%s));" % obj_struct)
code.putln("(void) PyObject_INIT(o, t);")
if scope.needs_gc():
code.putln("PyObject_GC_Track(o);")
code.putln("} else {")
if not is_final_type:
code.putln("if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) {")
code.putln("o = (*t->tp_alloc)(t, 0);")
if not is_final_type:
code.putln("} else {")
code.putln("o = (PyObject *) PyBaseObject_Type.tp_new(t, %s, 0);" % Naming.empty_tuple)
code.putln("}")
code.putln("if (unlikely(!o)) return 0;")
if freelist_size and not base_type:
code.putln('}')
if need_self_cast:
code.putln("p = %s;" % type.cast_code("o"))
#if need_self_cast:
# self.generate_self_cast(scope, code)
if type.vtabslot_cname:
vtab_base_type = type
while vtab_base_type.base_type and vtab_base_type.base_type.vtabstruct_cname:
vtab_base_type = vtab_base_type.base_type
if vtab_base_type is not type:
struct_type_cast = "(struct %s*)" % vtab_base_type.vtabstruct_cname
else:
struct_type_cast = ""
code.putln("p->%s = %s%s;" % (
type.vtabslot_cname,
struct_type_cast, type.vtabptr_cname))
for entry in cpp_class_attrs:
code.putln("new((void*)&(p->%s)) %s();" %
(entry.cname, entry.type.declaration_code("")))
for entry in py_attrs:
code.put_init_var_to_py_none(entry, "p->%s", nanny=False)
for entry in memoryview_slices:
code.putln("p->%s.data = NULL;" % entry.cname)
code.putln("p->%s.memview = NULL;" % entry.cname)
for entry in py_buffers:
code.putln("p->%s.obj = NULL;" % entry.cname)
if cclass_entry.cname == '__pyx_memoryviewslice':
code.putln("p->from_slice.memview = NULL;")
if new_func_entry and new_func_entry.is_special:
if new_func_entry.trivial_signature:
cinit_args = "o, %s, NULL" % Naming.empty_tuple
else:
cinit_args = "o, a, k"
code.putln(
"if (unlikely(%s(%s) < 0)) {" %
(new_func_entry.func_cname, cinit_args))
code.put_decref_clear("o", py_object_type, nanny=False)
code.putln(
"}")
code.putln(
"return o;")
code.putln(
"}")
def generate_dealloc_function(self, scope, code):
tp_slot = TypeSlots.ConstructorSlot("tp_dealloc", '__dealloc__')
slot_func = scope.mangle_internal("tp_dealloc")
base_type = scope.parent_type.base_type
if tp_slot.slot_code(scope) != slot_func:
return # never used
slot_func_cname = scope.mangle_internal("tp_dealloc")
code.putln("")
code.putln(
"static void %s(PyObject *o) {" % slot_func_cname)
is_final_type = scope.parent_type.is_final_type
needs_gc = scope.needs_gc()
weakref_slot = scope.lookup_here("__weakref__")
if weakref_slot not in scope.var_entries:
weakref_slot = None
_, (py_attrs, _, memoryview_slices) = scope.get_refcounted_entries()
cpp_class_attrs = [entry for entry in scope.var_entries
if entry.type.is_cpp_class]
if py_attrs or cpp_class_attrs or memoryview_slices or weakref_slot:
self.generate_self_cast(scope, code)
if not is_final_type:
# in Py3.4+, call tp_finalize() as early as possible
code.putln("#if PY_VERSION_HEX >= 0x030400a1")
if needs_gc:
finalised_check = '!_PyGC_FINALIZED(o)'
else:
finalised_check = (
'(!PyType_IS_GC(Py_TYPE(o)) || !_PyGC_FINALIZED(o))')
code.putln("if (unlikely(Py_TYPE(o)->tp_finalize) && %s) {" %
finalised_check)
# if instance was resurrected by finaliser, return
code.putln("if (PyObject_CallFinalizerFromDealloc(o)) return;")
code.putln("}")
code.putln("#endif")
if needs_gc:
# We must mark this object as (gc) untracked while tearing
# it down, lest the garbage collection is invoked while
# running this destructor.
code.putln("PyObject_GC_UnTrack(o);")
# call the user's __dealloc__
self.generate_usr_dealloc_call(scope, code)
if weakref_slot:
code.putln("if (p->__weakref__) PyObject_ClearWeakRefs(o);")
for entry in cpp_class_attrs:
code.putln("__Pyx_call_destructor(&p->%s);" % entry.cname)
for entry in py_attrs:
code.put_xdecref_clear("p->%s" % entry.cname, entry.type, nanny=False,
clear_before_decref=True)
for entry in memoryview_slices:
code.put_xdecref_memoryviewslice("p->%s" % entry.cname,
have_gil=True)
if base_type:
if needs_gc:
# The base class deallocator probably expects this to be tracked,
# so undo the untracking above.
if base_type.scope and base_type.scope.needs_gc():
code.putln("PyObject_GC_Track(o);")
else:
code.putln("#if CYTHON_COMPILING_IN_CPYTHON")
code.putln("if (PyType_IS_GC(Py_TYPE(o)->tp_base))")
code.putln("#endif")
code.putln("PyObject_GC_Track(o);")
tp_dealloc = TypeSlots.get_base_slot_function(scope, tp_slot)
if tp_dealloc is not None:
code.putln("%s(o);" % tp_dealloc)
elif base_type.is_builtin_type:
code.putln("%s->tp_dealloc(o);" % base_type.typeptr_cname)
else:
# This is an externally defined type. Calling through the
# cimported base type pointer directly interacts badly with
# the module cleanup, which may already have cleared it.
# In that case, fall back to traversing the type hierarchy.
base_cname = base_type.typeptr_cname
code.putln("if (likely(%s)) %s->tp_dealloc(o); "
"else __Pyx_call_next_tp_dealloc(o, %s);" % (
base_cname, base_cname, slot_func_cname))
code.globalstate.use_utility_code(
UtilityCode.load_cached("CallNextTpDealloc", "ExtensionTypes.c"))
else:
freelist_size = scope.directives.get('freelist', 0)
if freelist_size:
freelist_name = scope.mangle_internal(Naming.freelist_name)
freecount_name = scope.mangle_internal(Naming.freecount_name)
if is_final_type:
type_safety_check = ''
else:
type_safety_check = (
' & ((Py_TYPE(o)->tp_flags & (Py_TPFLAGS_IS_ABSTRACT | Py_TPFLAGS_HEAPTYPE)) == 0)')
type = scope.parent_type
code.putln("if (CYTHON_COMPILING_IN_CPYTHON && ((%s < %d) & (Py_TYPE(o)->tp_basicsize == sizeof(%s))%s)) {" % (
freecount_name, freelist_size, type.declaration_code("", deref=True),
type_safety_check))
code.putln("%s[%s++] = %s;" % (
freelist_name, freecount_name, type.cast_code("o")))
code.putln("} else {")
code.putln("(*Py_TYPE(o)->tp_free)(o);")
if freelist_size:
code.putln("}")
code.putln(
"}")
def generate_usr_dealloc_call(self, scope, code):
entry = scope.lookup_here("__dealloc__")
if not entry:
return
code.putln("{")
code.putln("PyObject *etype, *eval, *etb;")
code.putln("PyErr_Fetch(&etype, &eval, &etb);")
code.putln("++Py_REFCNT(o);")
code.putln("%s(o);" % entry.func_cname)
code.putln("--Py_REFCNT(o);")
code.putln("PyErr_Restore(etype, eval, etb);")
code.putln("}")
def generate_traverse_function(self, scope, code, cclass_entry):
tp_slot = TypeSlots.GCDependentSlot("tp_traverse")
slot_func = scope.mangle_internal("tp_traverse")
base_type = scope.parent_type.base_type
if tp_slot.slot_code(scope) != slot_func:
return # never used
code.putln("")
code.putln(
"static int %s(PyObject *o, visitproc v, void *a) {"
% slot_func)
have_entries, (py_attrs, py_buffers, memoryview_slices) = (
scope.get_refcounted_entries(include_gc_simple=False))
if base_type or py_attrs:
code.putln("int e;")
if py_attrs or py_buffers:
self.generate_self_cast(scope, code)
if base_type:
# want to call it explicitly if possible so inlining can be performed
static_call = TypeSlots.get_base_slot_function(scope, tp_slot)
if static_call:
code.putln("e = %s(o, v, a); if (e) return e;" % static_call)
elif base_type.is_builtin_type:
base_cname = base_type.typeptr_cname
code.putln("if (!%s->tp_traverse); else { e = %s->tp_traverse(o,v,a); if (e) return e; }" % (
base_cname, base_cname))
else:
# This is an externally defined type. Calling through the
# cimported base type pointer directly interacts badly with
# the module cleanup, which may already have cleared it.
# In that case, fall back to traversing the type hierarchy.
base_cname = base_type.typeptr_cname
code.putln("e = ((likely(%s)) ? ((%s->tp_traverse) ? %s->tp_traverse(o, v, a) : 0) : __Pyx_call_next_tp_traverse(o, v, a, %s)); if (e) return e;" % (
base_cname, base_cname, base_cname, slot_func))
code.globalstate.use_utility_code(
UtilityCode.load_cached("CallNextTpTraverse", "ExtensionTypes.c"))
for entry in py_attrs:
var_code = "p->%s" % entry.cname
code.putln(
"if (%s) {"
% var_code)
if entry.type.is_extension_type:
var_code = "((PyObject*)%s)" % var_code
code.putln(
"e = (*v)(%s, a); if (e) return e;"
% var_code)
code.putln(
"}")
# Traverse buffer exporting objects.
# Note: not traversing memoryview attributes of memoryview slices!
# When triggered by the GC, it would cause multiple visits (gc_refs
# subtractions which is not matched by its reference count!)
for entry in py_buffers:
cname = entry.cname + ".obj"
code.putln("if (p->%s) {" % cname)
code.putln( "e = (*v)(p->%s, a); if (e) return e;" % cname)
code.putln("}")
code.putln(
"return 0;")
code.putln(
"}")
def generate_clear_function(self, scope, code, cclass_entry):
tp_slot = TypeSlots.GCDependentSlot("tp_clear")
slot_func = scope.mangle_internal("tp_clear")
base_type = scope.parent_type.base_type
if tp_slot.slot_code(scope) != slot_func:
return # never used
have_entries, (py_attrs, py_buffers, memoryview_slices) = (
scope.get_refcounted_entries(include_gc_simple=False))
if py_attrs or py_buffers or base_type:
unused = ''
else:
unused = 'CYTHON_UNUSED '
code.putln("")
code.putln("static int %s(%sPyObject *o) {" % (slot_func, unused))
if py_attrs and Options.clear_to_none:
code.putln("PyObject* tmp;")
if py_attrs or py_buffers:
self.generate_self_cast(scope, code)
if base_type:
# want to call it explicitly if possible so inlining can be performed
static_call = TypeSlots.get_base_slot_function(scope, tp_slot)
if static_call:
code.putln("%s(o);" % static_call)
elif base_type.is_builtin_type:
base_cname = base_type.typeptr_cname
code.putln("if (!%s->tp_clear); else %s->tp_clear(o);" % (
base_cname, base_cname))
else:
# This is an externally defined type. Calling through the
# cimported base type pointer directly interacts badly with
# the module cleanup, which may already have cleared it.
# In that case, fall back to traversing the type hierarchy.
base_cname = base_type.typeptr_cname
code.putln("if (likely(%s)) { if (%s->tp_clear) %s->tp_clear(o); } else __Pyx_call_next_tp_clear(o, %s);" % (
base_cname, base_cname, base_cname, slot_func))
code.globalstate.use_utility_code(
UtilityCode.load_cached("CallNextTpClear", "ExtensionTypes.c"))
if Options.clear_to_none:
for entry in py_attrs:
name = "p->%s" % entry.cname
code.putln("tmp = ((PyObject*)%s);" % name)
if entry.is_declared_generic:
code.put_init_to_py_none(name, py_object_type, nanny=False)
else:
code.put_init_to_py_none(name, entry.type, nanny=False)
code.putln("Py_XDECREF(tmp);")
else:
for entry in py_attrs:
code.putln("Py_CLEAR(p->%s);" % entry.cname)
for entry in py_buffers:
# Note: shouldn't this call __Pyx_ReleaseBuffer ??
code.putln("Py_CLEAR(p->%s.obj);" % entry.cname)
if cclass_entry.cname == '__pyx_memoryviewslice':
code.putln("__PYX_XDEC_MEMVIEW(&p->from_slice, 1);")
code.putln(
"return 0;")
code.putln(
"}")
def generate_getitem_int_function(self, scope, code):
# This function is put into the sq_item slot when
# a __getitem__ method is present. It converts its
# argument to a Python integer and calls mp_subscript.
code.putln(
"static PyObject *%s(PyObject *o, Py_ssize_t i) {" %
scope.mangle_internal("sq_item"))
code.putln(
"PyObject *r;")
code.putln(
"PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0;")
code.putln(
"r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x);")
code.putln(
"Py_DECREF(x);")
code.putln(
"return r;")
code.putln(
"}")
def generate_ass_subscript_function(self, scope, code):
# Setting and deleting an item are both done through
# the ass_subscript method, so we dispatch to user's __setitem__
# or __delitem__, or raise an exception.
base_type = scope.parent_type.base_type
set_entry = scope.lookup_here("__setitem__")
del_entry = scope.lookup_here("__delitem__")
code.putln("")
code.putln(
"static int %s(PyObject *o, PyObject *i, PyObject *v) {" %
scope.mangle_internal("mp_ass_subscript"))
code.putln(
"if (v) {")
if set_entry:
code.putln(
"return %s(o, i, v);" %
set_entry.func_cname)
else:
self.generate_guarded_basetype_call(
base_type, "tp_as_mapping", "mp_ass_subscript", "o, i, v", code)
code.putln(
"PyErr_Format(PyExc_NotImplementedError,")
code.putln(
' "Subscript assignment not supported by %.200s", Py_TYPE(o)->tp_name);')
code.putln(
"return -1;")
code.putln(
"}")
code.putln(
"else {")
if del_entry:
code.putln(
"return %s(o, i);" %
del_entry.func_cname)
else:
self.generate_guarded_basetype_call(
base_type, "tp_as_mapping", "mp_ass_subscript", "o, i, v", code)
code.putln(
"PyErr_Format(PyExc_NotImplementedError,")
code.putln(
' "Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name);')
code.putln(
"return -1;")
code.putln(
"}")
code.putln(
"}")
def generate_guarded_basetype_call(
self, base_type, substructure, slot, args, code):
if base_type:
base_tpname = base_type.typeptr_cname
if substructure:
code.putln(
"if (%s->%s && %s->%s->%s)" % (
base_tpname, substructure, base_tpname, substructure, slot))
code.putln(
" return %s->%s->%s(%s);" % (
base_tpname, substructure, slot, args))
else:
code.putln(
"if (%s->%s)" % (
base_tpname, slot))
code.putln(
" return %s->%s(%s);" % (
base_tpname, slot, args))
def generate_ass_slice_function(self, scope, code):
# Setting and deleting a slice are both done through
# the ass_slice method, so we dispatch to user's __setslice__
# or __delslice__, or raise an exception.
base_type = scope.parent_type.base_type
set_entry = scope.lookup_here("__setslice__")
del_entry = scope.lookup_here("__delslice__")
code.putln("")
code.putln(
"static int %s(PyObject *o, Py_ssize_t i, Py_ssize_t j, PyObject *v) {" %
scope.mangle_internal("sq_ass_slice"))
code.putln(
"if (v) {")
if set_entry:
code.putln(
"return %s(o, i, j, v);" %
set_entry.func_cname)
else:
self.generate_guarded_basetype_call(
base_type, "tp_as_sequence", "sq_ass_slice", "o, i, j, v", code)
code.putln(
"PyErr_Format(PyExc_NotImplementedError,")
code.putln(
' "2-element slice assignment not supported by %.200s", Py_TYPE(o)->tp_name);')
code.putln(
"return -1;")
code.putln(
"}")
code.putln(
"else {")
if del_entry:
code.putln(
"return %s(o, i, j);" %
del_entry.func_cname)
else:
self.generate_guarded_basetype_call(
base_type, "tp_as_sequence", "sq_ass_slice", "o, i, j, v", code)
code.putln(
"PyErr_Format(PyExc_NotImplementedError,")
code.putln(
' "2-element slice deletion not supported by %.200s", Py_TYPE(o)->tp_name);')
code.putln(
"return -1;")
code.putln(
"}")
code.putln(
"}")
def generate_getattro_function(self, scope, code):
# First try to get the attribute using __getattribute__, if defined, or
# PyObject_GenericGetAttr.
#
# If that raises an AttributeError, call the __getattr__ if defined.
#
# In both cases, defined can be in this class, or any base class.
def lookup_here_or_base(n,type=None):
# Recursive lookup
if type is None:
type = scope.parent_type
r = type.scope.lookup_here(n)
if r is None and \
type.base_type is not None:
return lookup_here_or_base(n,type.base_type)
else:
return r
getattr_entry = lookup_here_or_base("__getattr__")
getattribute_entry = lookup_here_or_base("__getattribute__")
code.putln("")
code.putln(
"static PyObject *%s(PyObject *o, PyObject *n) {"
% scope.mangle_internal("tp_getattro"))
if getattribute_entry is not None:
code.putln(
"PyObject *v = %s(o, n);" %
getattribute_entry.func_cname)
else:
code.putln(
"PyObject *v = PyObject_GenericGetAttr(o, n);")
if getattr_entry is not None:
code.putln(
"if (!v && PyErr_ExceptionMatches(PyExc_AttributeError)) {")
code.putln(
"PyErr_Clear();")
code.putln(
"v = %s(o, n);" %
getattr_entry.func_cname)
code.putln(
"}")
code.putln(
"return v;")
code.putln(
"}")
def generate_setattro_function(self, scope, code):
# Setting and deleting an attribute are both done through
# the setattro method, so we dispatch to user's __setattr__
# or __delattr__ or fall back on PyObject_GenericSetAttr.
base_type = scope.parent_type.base_type
set_entry = scope.lookup_here("__setattr__")
del_entry = scope.lookup_here("__delattr__")
code.putln("")
code.putln(
"static int %s(PyObject *o, PyObject *n, PyObject *v) {" %
scope.mangle_internal("tp_setattro"))
code.putln(
"if (v) {")
if set_entry:
code.putln(
"return %s(o, n, v);" %
set_entry.func_cname)
else:
self.generate_guarded_basetype_call(
base_type, None, "tp_setattro", "o, n, v", code)
code.putln(
"return PyObject_GenericSetAttr(o, n, v);")
code.putln(
"}")
code.putln(
"else {")
if del_entry:
code.putln(
"return %s(o, n);" %
del_entry.func_cname)
else:
self.generate_guarded_basetype_call(
base_type, None, "tp_setattro", "o, n, v", code)
code.putln(
"return PyObject_GenericSetAttr(o, n, 0);")
code.putln(
"}")
code.putln(
"}")
def generate_descr_get_function(self, scope, code):
# The __get__ function of a descriptor object can be
# called with NULL for the second or third arguments
# under some circumstances, so we replace them with
# None in that case.
user_get_entry = scope.lookup_here("__get__")
code.putln("")
code.putln(
"static PyObject *%s(PyObject *o, PyObject *i, PyObject *c) {" %
scope.mangle_internal("tp_descr_get"))
code.putln(
"PyObject *r = 0;")
code.putln(
"if (!i) i = Py_None;")
code.putln(
"if (!c) c = Py_None;")
#code.put_incref("i", py_object_type)
#code.put_incref("c", py_object_type)
code.putln(
"r = %s(o, i, c);" %
user_get_entry.func_cname)
#code.put_decref("i", py_object_type)
#code.put_decref("c", py_object_type)
code.putln(
"return r;")
code.putln(
"}")
def generate_descr_set_function(self, scope, code):
# Setting and deleting are both done through the __set__
# method of a descriptor, so we dispatch to user's __set__
# or __delete__ or raise an exception.
base_type = scope.parent_type.base_type
user_set_entry = scope.lookup_here("__set__")
user_del_entry = scope.lookup_here("__delete__")
code.putln("")
code.putln(
"static int %s(PyObject *o, PyObject *i, PyObject *v) {" %
scope.mangle_internal("tp_descr_set"))
code.putln(
"if (v) {")
if user_set_entry:
code.putln(
"return %s(o, i, v);" %
user_set_entry.func_cname)
else:
self.generate_guarded_basetype_call(
base_type, None, "tp_descr_set", "o, i, v", code)
code.putln(
'PyErr_SetString(PyExc_NotImplementedError, "__set__");')
code.putln(
"return -1;")
code.putln(
"}")
code.putln(
"else {")
if user_del_entry:
code.putln(
"return %s(o, i);" %
user_del_entry.func_cname)
else:
self.generate_guarded_basetype_call(
base_type, None, "tp_descr_set", "o, i, v", code)
code.putln(
'PyErr_SetString(PyExc_NotImplementedError, "__delete__");')
code.putln(
"return -1;")
code.putln(
"}")
code.putln(
"}")
def generate_property_accessors(self, cclass_scope, code):
for entry in cclass_scope.property_entries:
property_scope = entry.scope
if property_scope.defines_any(["__get__"]):
self.generate_property_get_function(entry, code)
if property_scope.defines_any(["__set__", "__del__"]):
self.generate_property_set_function(entry, code)
def generate_property_get_function(self, property_entry, code):
property_scope = property_entry.scope
property_entry.getter_cname = property_scope.parent_scope.mangle(
Naming.prop_get_prefix, property_entry.name)
get_entry = property_scope.lookup_here("__get__")
code.putln("")
code.putln(
"static PyObject *%s(PyObject *o, CYTHON_UNUSED void *x) {" %
property_entry.getter_cname)
code.putln(
"return %s(o);" %
get_entry.func_cname)
code.putln(
"}")
def generate_property_set_function(self, property_entry, code):
property_scope = property_entry.scope
property_entry.setter_cname = property_scope.parent_scope.mangle(
Naming.prop_set_prefix, property_entry.name)
set_entry = property_scope.lookup_here("__set__")
del_entry = property_scope.lookup_here("__del__")
code.putln("")
code.putln(
"static int %s(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {" %
property_entry.setter_cname)
code.putln(
"if (v) {")
if set_entry:
code.putln(
"return %s(o, v);" %
set_entry.func_cname)
else:
code.putln(
'PyErr_SetString(PyExc_NotImplementedError, "__set__");')
code.putln(
"return -1;")
code.putln(
"}")
code.putln(
"else {")
if del_entry:
code.putln(
"return %s(o);" %
del_entry.func_cname)
else:
code.putln(
'PyErr_SetString(PyExc_NotImplementedError, "__del__");')
code.putln(
"return -1;")
code.putln(
"}")
code.putln(
"}")
def generate_typeobj_definition(self, modname, entry, code):
type = entry.type
scope = type.scope
for suite in TypeSlots.substructures:
suite.generate_substructure(scope, code)
code.putln("")
if entry.visibility == 'public':
header = "DL_EXPORT(PyTypeObject) %s = {"
else:
header = "static PyTypeObject %s = {"
#code.putln(header % scope.parent_type.typeobj_cname)
code.putln(header % type.typeobj_cname)
code.putln(
"PyVarObject_HEAD_INIT(0, 0)")
code.putln(
'__Pyx_NAMESTR("%s.%s"), /*tp_name*/' % (
self.full_module_name, scope.class_name))
if type.typedef_flag:
objstruct = type.objstruct_cname
else:
objstruct = "struct %s" % type.objstruct_cname
code.putln(
"sizeof(%s), /*tp_basicsize*/" %
objstruct)
code.putln(
"0, /*tp_itemsize*/")
for slot in TypeSlots.slot_table:
slot.generate(scope, code)
code.putln(
"};")
def generate_method_table(self, env, code):
if env.is_c_class_scope and not env.pyfunc_entries:
return
code.putln("")
code.putln(
"static PyMethodDef %s[] = {" %
env.method_table_cname)
for entry in env.pyfunc_entries:
if not entry.fused_cfunction:
code.put_pymethoddef(entry, ",")
code.putln(
"{0, 0, 0, 0}")
code.putln(
"};")
def generate_getset_table(self, env, code):
if env.property_entries:
code.putln("")
code.putln(
"static struct PyGetSetDef %s[] = {" %
env.getset_table_cname)
for entry in env.property_entries:
if entry.doc:
doc_code = "__Pyx_DOCSTR(%s)" % code.get_string_const(entry.doc)
else:
doc_code = "0"
code.putln(
'{(char *)"%s", %s, %s, %s, 0},' % (
entry.name,
entry.getter_cname or "0",
entry.setter_cname or "0",
doc_code))
code.putln(
"{0, 0, 0, 0, 0}")
code.putln(
"};")
def generate_import_star(self, env, code):
env.use_utility_code(streq_utility_code)
code.putln()
code.putln("static char* %s_type_names[] = {" % Naming.import_star)
for name, entry in sorted(env.entries.items()):
if entry.is_type:
code.putln('"%s",' % name)
code.putln("0")
code.putln("};")
code.putln()
code.enter_cfunc_scope() # as we need labels
code.putln("static int %s(PyObject *o, PyObject* py_name, char *name) {" % Naming.import_star_set)
code.putln("char** type_name = %s_type_names;" % Naming.import_star)
code.putln("while (*type_name) {")
code.putln("if (__Pyx_StrEq(name, *type_name)) {")
code.putln('PyErr_Format(PyExc_TypeError, "Cannot overwrite C type %s", name);')
code.putln('goto bad;')
code.putln("}")
code.putln("type_name++;")
code.putln("}")
old_error_label = code.new_error_label()
code.putln("if (0);") # so the first one can be "else if"
for name, entry in env.entries.items():
if entry.is_cglobal and entry.used:
code.putln('else if (__Pyx_StrEq(name, "%s")) {' % name)
if entry.type.is_pyobject:
if entry.type.is_extension_type or entry.type.is_builtin_type:
code.putln("if (!(%s)) %s;" % (
entry.type.type_test_code("o"),
code.error_goto(entry.pos)))
code.putln("Py_INCREF(o);")
code.put_decref(entry.cname, entry.type, nanny=False)
code.putln("%s = %s;" % (
entry.cname,
PyrexTypes.typecast(entry.type, py_object_type, "o")))
elif entry.type.from_py_function:
rhs = "%s(o)" % entry.type.from_py_function
if entry.type.is_enum:
rhs = PyrexTypes.typecast(entry.type, PyrexTypes.c_long_type, rhs)
code.putln("%s = %s; if (%s) %s;" % (
entry.cname,
rhs,
entry.type.error_condition(entry.cname),
code.error_goto(entry.pos)))
else:
code.putln('PyErr_Format(PyExc_TypeError, "Cannot convert Python object %s to %s");' % (name, entry.type))
code.putln(code.error_goto(entry.pos))
code.putln("}")
code.putln("else {")
code.putln("if (PyObject_SetAttr(%s, py_name, o) < 0) goto bad;" % Naming.module_cname)
code.putln("}")
code.putln("return 0;")
if code.label_used(code.error_label):
code.put_label(code.error_label)
# This helps locate the offending name.
code.put_add_traceback(self.full_module_name)
code.error_label = old_error_label
code.putln("bad:")
code.putln("return -1;")
code.putln("}")
code.putln(import_star_utility_code)
code.exit_cfunc_scope() # done with labels
def generate_module_init_func(self, imported_modules, env, code):
code.enter_cfunc_scope()
code.putln("")
header2 = "PyMODINIT_FUNC init%s(void)" % env.module_name
header3 = "PyMODINIT_FUNC PyInit_%s(void)" % env.module_name
code.putln("#if PY_MAJOR_VERSION < 3")
code.putln("%s; /*proto*/" % header2)
code.putln(header2)
code.putln("#else")
code.putln("%s; /*proto*/" % header3)
code.putln(header3)
code.putln("#endif")
code.putln("{")
tempdecl_code = code.insertion_point()
code.put_declare_refcount_context()
code.putln("#if CYTHON_REFNANNY")
code.putln("__Pyx_RefNanny = __Pyx_RefNannyImportAPI(\"refnanny\");")
code.putln("if (!__Pyx_RefNanny) {")
code.putln(" PyErr_Clear();")
code.putln(" __Pyx_RefNanny = __Pyx_RefNannyImportAPI(\"Cython.Runtime.refnanny\");")
code.putln(" if (!__Pyx_RefNanny)")
code.putln(" Py_FatalError(\"failed to import 'refnanny' module\");")
code.putln("}")
code.putln("#endif")
code.put_setup_refcount_context(header3)
env.use_utility_code(UtilityCode.load("CheckBinaryVersion", "ModuleSetupCode.c"))
code.putln("if ( __Pyx_check_binary_version() < 0) %s" % code.error_goto(self.pos))
code.putln("%s = PyTuple_New(0); %s" % (Naming.empty_tuple, code.error_goto_if_null(Naming.empty_tuple, self.pos)))
code.putln("%s = PyBytes_FromStringAndSize(\"\", 0); %s" % (Naming.empty_bytes, code.error_goto_if_null(Naming.empty_bytes, self.pos)))
code.putln("#ifdef __Pyx_CyFunction_USED")
code.putln("if (__Pyx_CyFunction_init() < 0) %s" % code.error_goto(self.pos))
code.putln("#endif")
code.putln("#ifdef __Pyx_FusedFunction_USED")
code.putln("if (__pyx_FusedFunction_init() < 0) %s" % code.error_goto(self.pos))
code.putln("#endif")
code.putln("#ifdef __Pyx_Generator_USED")
code.putln("if (__pyx_Generator_init() < 0) %s" % code.error_goto(self.pos))
code.putln("#endif")
code.putln("/*--- Library function declarations ---*/")
env.generate_library_function_declarations(code)
code.putln("/*--- Threads initialization code ---*/")
code.putln("#if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS")
code.putln("#ifdef WITH_THREAD /* Python build with threading support? */")
code.putln("PyEval_InitThreads();")
code.putln("#endif")
code.putln("#endif")
code.putln("/*--- Module creation code ---*/")
self.generate_module_creation_code(env, code)
code.putln("/*--- Initialize various global constants etc. ---*/")
code.putln(code.error_goto_if_neg("__Pyx_InitGlobals()", self.pos))
code.putln("#if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT)")
code.putln("if (__Pyx_init_sys_getdefaultencoding_params() < 0) %s" % code.error_goto(self.pos))
code.putln("#endif")
__main__name = code.globalstate.get_py_string_const(
EncodedString("__main__"), identifier=True)
code.putln("if (%s%s) {" % (Naming.module_is_main, self.full_module_name.replace('.', '__')))
code.putln(
'if (__Pyx_SetAttrString(%s, "__name__", %s) < 0) %s;' % (
env.module_cname,
__main__name.cname,
code.error_goto(self.pos)))
code.putln("}")
# set up __file__ and __path__, then add the module to sys.modules
self.generate_module_import_setup(env, code)
if Options.cache_builtins:
code.putln("/*--- Builtin init code ---*/")
code.putln(code.error_goto_if_neg("__Pyx_InitCachedBuiltins()", self.pos))
code.putln("/*--- Constants init code ---*/")
code.putln(code.error_goto_if_neg("__Pyx_InitCachedConstants()", self.pos))
code.putln("/*--- Global init code ---*/")
self.generate_global_init_code(env, code)
code.putln("/*--- Variable export code ---*/")
self.generate_c_variable_export_code(env, code)
code.putln("/*--- Function export code ---*/")
self.generate_c_function_export_code(env, code)
code.putln("/*--- Type init code ---*/")
self.generate_type_init_code(env, code)
code.putln("/*--- Type import code ---*/")
for module in imported_modules:
self.generate_type_import_code_for_module(module, env, code)
code.putln("/*--- Variable import code ---*/")
for module in imported_modules:
self.generate_c_variable_import_code_for_module(module, env, code)
code.putln("/*--- Function import code ---*/")
for module in imported_modules:
self.specialize_fused_types(module)
self.generate_c_function_import_code_for_module(module, env, code)
code.putln("/*--- Execution code ---*/")
code.mark_pos(None)
self.body.generate_execution_code(code)
if Options.generate_cleanup_code:
code.globalstate.use_utility_code(
UtilityCode.load_cached("RegisterModuleCleanup", "ModuleSetupCode.c"))
code.putln("if (__Pyx_RegisterCleanup()) %s;" % code.error_goto(self.pos))
code.put_goto(code.return_label)
code.put_label(code.error_label)
for cname, type in code.funcstate.all_managed_temps():
code.put_xdecref(cname, type)
code.putln('if (%s) {' % env.module_cname)
code.put_add_traceback("init %s" % env.qualified_name)
env.use_utility_code(Nodes.traceback_utility_code)
code.put_decref_clear(env.module_cname, py_object_type, nanny=False)
code.putln('} else if (!PyErr_Occurred()) {')
code.putln('PyErr_SetString(PyExc_ImportError, "init %s");' % env.qualified_name)
code.putln('}')
code.put_label(code.return_label)
code.put_finish_refcount_context()
code.putln("#if PY_MAJOR_VERSION < 3")
code.putln("return;")
code.putln("#else")
code.putln("return %s;" % env.module_cname)
code.putln("#endif")
code.putln('}')
tempdecl_code.put_temp_declarations(code.funcstate)
code.exit_cfunc_scope()
def generate_module_import_setup(self, env, code):
module_path = env.directives['set_initial_path']
if module_path == 'SOURCEFILE':
module_path = self.pos[0].filename
if module_path:
code.putln('if (__Pyx_SetAttrString(%s, "__file__", %s) < 0) %s;' % (
env.module_cname,
code.globalstate.get_py_string_const(
EncodedString(decode_filename(module_path))).cname,
code.error_goto(self.pos)))
if env.is_package:
# set __path__ to mark the module as package
temp = code.funcstate.allocate_temp(py_object_type, True)
code.putln('%s = Py_BuildValue("[O]", %s); %s' % (
temp,
code.globalstate.get_py_string_const(
EncodedString(decode_filename(
os.path.dirname(module_path)))).cname,
code.error_goto_if_null(temp, self.pos)))
code.put_gotref(temp)
code.putln(
'if (__Pyx_SetAttrString(%s, "__path__", %s) < 0) %s;' % (
env.module_cname, temp, code.error_goto(self.pos)))
code.put_decref_clear(temp, py_object_type)
code.funcstate.release_temp(temp)
elif env.is_package:
# packages require __path__, so all we can do is try to figure
# out the module path at runtime by rerunning the import lookup
package_name, _ = self.full_module_name.rsplit('.', 1)
if '.' in package_name:
parent_name = '"%s"' % (package_name.rsplit('.', 1)[0],)
else:
parent_name = 'NULL'
code.globalstate.use_utility_code(UtilityCode.load(
"SetPackagePathFromImportLib", "ImportExport.c"))
code.putln(code.error_goto_if_neg(
'__Pyx_SetPackagePathFromImportLib(%s, %s)' % (
parent_name,
code.globalstate.get_py_string_const(
EncodedString(env.module_name)).cname),
self.pos))
# CPython may not have put us into sys.modules yet, but relative imports and reimports require it
fq_module_name = self.full_module_name
if fq_module_name.endswith('.__init__'):
fq_module_name = fq_module_name[:-len('.__init__')]
code.putln("#if PY_MAJOR_VERSION >= 3")
code.putln("{")
code.putln("PyObject *modules = PyImport_GetModuleDict(); %s" %
code.error_goto_if_null("modules", self.pos))
code.putln('if (!PyDict_GetItemString(modules, "%s")) {' % fq_module_name)
code.putln(code.error_goto_if_neg('PyDict_SetItemString(modules, "%s", %s)' % (
fq_module_name, env.module_cname), self.pos))
code.putln("}")
code.putln("}")
code.putln("#endif")
def generate_module_cleanup_func(self, env, code):
if not Options.generate_cleanup_code:
return
code.putln('static void %s(CYTHON_UNUSED PyObject *self) {' %
Naming.cleanup_cname)
if Options.generate_cleanup_code >= 2:
code.putln("/*--- Global cleanup code ---*/")
rev_entries = list(env.var_entries)
rev_entries.reverse()
for entry in rev_entries:
if entry.visibility != 'extern':
if entry.type.is_pyobject and entry.used:
code.put_xdecref_clear(
entry.cname, entry.type,
clear_before_decref=True,
nanny=False)
code.putln("__Pyx_CleanupGlobals();")
if Options.generate_cleanup_code >= 3:
code.putln("/*--- Type import cleanup code ---*/")
for ext_type in sorted(env.types_imported, key=operator.attrgetter('typeptr_cname')):
code.put_xdecref_clear(
ext_type.typeptr_cname, ext_type,
clear_before_decref=True,
nanny=False)
if Options.cache_builtins:
code.putln("/*--- Builtin cleanup code ---*/")
for entry in env.cached_builtins:
code.put_xdecref_clear(
entry.cname, PyrexTypes.py_object_type,
clear_before_decref=True,
nanny=False)
code.putln("/*--- Intern cleanup code ---*/")
code.put_decref_clear(Naming.empty_tuple,
PyrexTypes.py_object_type,
clear_before_decref=True,
nanny=False)
for entry in env.c_class_entries:
cclass_type = entry.type
if cclass_type.is_external or cclass_type.base_type:
continue
if cclass_type.scope.directives.get('freelist', 0):
scope = cclass_type.scope
freelist_name = scope.mangle_internal(Naming.freelist_name)
freecount_name = scope.mangle_internal(Naming.freecount_name)
code.putln("while (%s > 0) {" % freecount_name)
code.putln("PyObject* o = (PyObject*)%s[--%s];" % (
freelist_name, freecount_name))
code.putln("(*Py_TYPE(o)->tp_free)(o);")
code.putln("}")
# for entry in env.pynum_entries:
# code.put_decref_clear(entry.cname,
# PyrexTypes.py_object_type,
# nanny=False)
# for entry in env.all_pystring_entries:
# if entry.is_interned:
# code.put_decref_clear(entry.pystring_cname,
# PyrexTypes.py_object_type,
# nanny=False)
# for entry in env.default_entries:
# if entry.type.is_pyobject and entry.used:
# code.putln("Py_DECREF(%s); %s = 0;" % (
# code.entry_as_pyobject(entry), entry.cname))
code.putln('#if CYTHON_COMPILING_IN_PYPY')
code.putln('Py_CLEAR(%s);' % Naming.builtins_cname)
code.putln('#endif')
code.put_decref_clear(env.module_dict_cname, py_object_type,
nanny=False, clear_before_decref=True)
def generate_main_method(self, env, code):
module_is_main = "%s%s" % (Naming.module_is_main, self.full_module_name.replace('.', '__'))
if Options.embed == "main":
wmain = "wmain"
else:
wmain = Options.embed
code.globalstate.use_utility_code(
main_method.specialize(
module_name = env.module_name,
module_is_main = module_is_main,
main_method = Options.embed,
wmain_method = wmain))
def generate_pymoduledef_struct(self, env, code):
if env.doc:
doc = "__Pyx_DOCSTR(%s)" % code.get_string_const(env.doc)
else:
doc = "0"
if Options.generate_cleanup_code:
cleanup_func = "(freefunc)%s" % Naming.cleanup_cname
else:
cleanup_func = 'NULL'
code.putln("")
code.putln("#if PY_MAJOR_VERSION >= 3")
code.putln("static struct PyModuleDef %s = {" % Naming.pymoduledef_cname)
code.putln("#if PY_VERSION_HEX < 0x03020000")
# fix C compiler warnings due to missing initialisers
code.putln(" { PyObject_HEAD_INIT(NULL) NULL, 0, NULL },")
code.putln("#else")
code.putln(" PyModuleDef_HEAD_INIT,")
code.putln("#endif")
code.putln(' __Pyx_NAMESTR("%s"),' % env.module_name)
code.putln(" %s, /* m_doc */" % doc)
code.putln(" -1, /* m_size */")
code.putln(" %s /* m_methods */," % env.method_table_cname)
code.putln(" NULL, /* m_reload */")
code.putln(" NULL, /* m_traverse */")
code.putln(" NULL, /* m_clear */")
code.putln(" %s /* m_free */" % cleanup_func)
code.putln("};")
code.putln("#endif")
def generate_module_creation_code(self, env, code):
# Generate code to create the module object and
# install the builtins.
if env.doc:
doc = "__Pyx_DOCSTR(%s)" % code.get_string_const(env.doc)
else:
doc = "0"
code.putln("#if PY_MAJOR_VERSION < 3")
code.putln(
'%s = Py_InitModule4(__Pyx_NAMESTR("%s"), %s, %s, 0, PYTHON_API_VERSION); Py_XINCREF(%s);' % (
env.module_cname,
env.module_name,
env.method_table_cname,
doc,
env.module_cname))
code.putln("#else")
code.putln(
"%s = PyModule_Create(&%s);" % (
env.module_cname,
Naming.pymoduledef_cname))
code.putln("#endif")
code.putln(code.error_goto_if_null(env.module_cname, self.pos))
code.putln(
"%s = PyModule_GetDict(%s); %s" % (
env.module_dict_cname, env.module_cname,
code.error_goto_if_null(env.module_dict_cname, self.pos)))
code.put_incref(env.module_dict_cname, py_object_type, nanny=False)
code.putln(
'%s = PyImport_AddModule(__Pyx_NAMESTR(__Pyx_BUILTIN_MODULE_NAME)); %s' % (
Naming.builtins_cname,
code.error_goto_if_null(Naming.builtins_cname, self.pos)))
code.putln('#if CYTHON_COMPILING_IN_PYPY')
code.putln('Py_INCREF(%s);' % Naming.builtins_cname)
code.putln('#endif')
code.putln(
'if (__Pyx_SetAttrString(%s, "__builtins__", %s) < 0) %s;' % (
env.module_cname,
Naming.builtins_cname,
code.error_goto(self.pos)))
if Options.pre_import is not None:
code.putln(
'%s = PyImport_AddModule(__Pyx_NAMESTR("%s")); %s' % (
Naming.preimport_cname,
Options.pre_import,
code.error_goto_if_null(Naming.preimport_cname, self.pos)))
def generate_global_init_code(self, env, code):
# Generate code to initialise global PyObject *
# variables to None.
for entry in env.var_entries:
if entry.visibility != 'extern':
if entry.used:
entry.type.global_init_code(entry, code)
def generate_c_variable_export_code(self, env, code):
# Generate code to create PyCFunction wrappers for exported C functions.
entries = []
for entry in env.var_entries:
if (entry.api
or entry.defined_in_pxd
or (Options.cimport_from_pyx and not entry.visibility == 'extern')):
entries.append(entry)
if entries:
env.use_utility_code(UtilityCode.load_cached("VoidPtrExport", "ImportExport.c"))
for entry in entries:
signature = entry.type.declaration_code("")
name = code.intern_identifier(entry.name)
code.putln('if (__Pyx_ExportVoidPtr(%s, (void *)&%s, "%s") < 0) %s' % (
name, entry.cname, signature,
code.error_goto(self.pos)))
def generate_c_function_export_code(self, env, code):
# Generate code to create PyCFunction wrappers for exported C functions.
entries = []
for entry in env.cfunc_entries:
if (entry.api
or entry.defined_in_pxd
or (Options.cimport_from_pyx and not entry.visibility == 'extern')):
entries.append(entry)
if entries:
env.use_utility_code(
UtilityCode.load_cached("FunctionExport", "ImportExport.c"))
for entry in entries:
signature = entry.type.signature_string()
code.putln('if (__Pyx_ExportFunction("%s", (void (*)(void))%s, "%s") < 0) %s' % (
entry.name,
entry.cname,
signature,
code.error_goto(self.pos)))
def generate_type_import_code_for_module(self, module, env, code):
# Generate type import code for all exported extension types in
# an imported module.
#if module.c_class_entries:
for entry in module.c_class_entries:
if entry.defined_in_pxd:
self.generate_type_import_code(env, entry.type, entry.pos, code)
def specialize_fused_types(self, pxd_env):
"""
If fused c(p)def functions are defined in an imported pxd, but not
used in this implementation file, we still have fused entries and
not specialized ones. This method replaces any fused entries with their
specialized ones.
"""
for entry in pxd_env.cfunc_entries[:]:
if entry.type.is_fused:
# This call modifies the cfunc_entries in-place
entry.type.get_all_specialized_function_types()
def generate_c_variable_import_code_for_module(self, module, env, code):
# Generate import code for all exported C functions in a cimported module.
entries = []
for entry in module.var_entries:
if entry.defined_in_pxd:
entries.append(entry)
if entries:
env.use_utility_code(
UtilityCode.load_cached("ModuleImport", "ImportExport.c"))
env.use_utility_code(
UtilityCode.load_cached("VoidPtrImport", "ImportExport.c"))
temp = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
code.putln(
'%s = __Pyx_ImportModule("%s"); if (!%s) %s' % (
temp,
module.qualified_name,
temp,
code.error_goto(self.pos)))
for entry in entries:
if env is module:
cname = entry.cname
else:
cname = module.mangle(Naming.varptr_prefix, entry.name)
signature = entry.type.declaration_code("")
code.putln(
'if (__Pyx_ImportVoidPtr(%s, "%s", (void **)&%s, "%s") < 0) %s' % (
temp, entry.name, cname, signature,
code.error_goto(self.pos)))
code.putln("Py_DECREF(%s); %s = 0;" % (temp, temp))
def generate_c_function_import_code_for_module(self, module, env, code):
# Generate import code for all exported C functions in a cimported module.
entries = []
for entry in module.cfunc_entries:
if entry.defined_in_pxd and entry.used:
entries.append(entry)
if entries:
env.use_utility_code(
UtilityCode.load_cached("ModuleImport", "ImportExport.c"))
env.use_utility_code(
UtilityCode.load_cached("FunctionImport", "ImportExport.c"))
temp = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
code.putln(
'%s = __Pyx_ImportModule("%s"); if (!%s) %s' % (
temp,
module.qualified_name,
temp,
code.error_goto(self.pos)))
for entry in entries:
code.putln(
'if (__Pyx_ImportFunction(%s, "%s", (void (**)(void))&%s, "%s") < 0) %s' % (
temp,
entry.name,
entry.cname,
entry.type.signature_string(),
code.error_goto(self.pos)))
code.putln("Py_DECREF(%s); %s = 0;" % (temp, temp))
def generate_type_init_code(self, env, code):
# Generate type import code for extern extension types
# and type ready code for non-extern ones.
for entry in env.c_class_entries:
if entry.visibility == 'extern' and not entry.utility_code_definition:
self.generate_type_import_code(env, entry.type, entry.pos, code)
else:
self.generate_base_type_import_code(env, entry, code)
self.generate_exttype_vtable_init_code(entry, code)
self.generate_type_ready_code(env, entry, code)
self.generate_typeptr_assignment_code(entry, code)
def generate_base_type_import_code(self, env, entry, code):
base_type = entry.type.base_type
if (base_type and base_type.module_name != env.qualified_name and not
base_type.is_builtin_type and not entry.utility_code_definition):
self.generate_type_import_code(env, base_type, self.pos, code)
def generate_type_import_code(self, env, type, pos, code):
# If not already done, generate code to import the typeobject of an
# extension type defined in another module, and extract its C method
# table pointer if any.
if type in env.types_imported:
return
env.use_utility_code(UtilityCode.load_cached("TypeImport", "ImportExport.c"))
self.generate_type_import_call(type, code,
code.error_goto_if_null(type.typeptr_cname, pos))
if type.vtabptr_cname:
code.globalstate.use_utility_code(
UtilityCode.load_cached('GetVTable', 'ImportExport.c'))
code.putln("%s = (struct %s*)__Pyx_GetVtable(%s->tp_dict); %s" % (
type.vtabptr_cname,
type.vtabstruct_cname,
type.typeptr_cname,
code.error_goto_if_null(type.vtabptr_cname, pos)))
env.types_imported.add(type)
py3_type_name_map = {'str' : 'bytes', 'unicode' : 'str'}
def generate_type_import_call(self, type, code, error_code):
if type.typedef_flag:
objstruct = type.objstruct_cname
else:
objstruct = "struct %s" % type.objstruct_cname
sizeof_objstruct = objstruct
module_name = type.module_name
condition = replacement = None
if module_name not in ('__builtin__', 'builtins'):
module_name = '"%s"' % module_name
else:
module_name = '__Pyx_BUILTIN_MODULE_NAME'
if type.name in Code.non_portable_builtins_map:
condition, replacement = Code.non_portable_builtins_map[type.name]
if objstruct in Code.basicsize_builtins_map:
# Some builtin types have a tp_basicsize which differs from sizeof(...):
sizeof_objstruct = Code.basicsize_builtins_map[objstruct]
code.put('%s = __Pyx_ImportType(%s,' % (
type.typeptr_cname,
module_name))
if condition and replacement:
code.putln("") # start in new line
code.putln("#if %s" % condition)
code.putln('"%s",' % replacement)
code.putln("#else")
code.putln('"%s",' % type.name)
code.putln("#endif")
else:
code.put(' "%s", ' % type.name)
if sizeof_objstruct != objstruct:
if not condition:
code.putln("") # start in new line
code.putln("#if CYTHON_COMPILING_IN_PYPY")
code.putln('sizeof(%s),' % objstruct)
code.putln("#else")
code.putln('sizeof(%s),' % sizeof_objstruct)
code.putln("#endif")
else:
code.put('sizeof(%s), ' % objstruct)
code.putln('%i); %s' % (
not type.is_external or type.is_subclassed,
error_code))
def generate_type_ready_code(self, env, entry, code):
# Generate a call to PyType_Ready for an extension
# type defined in this module.
type = entry.type
typeobj_cname = type.typeobj_cname
scope = type.scope
if scope: # could be None if there was an error
if entry.visibility != 'extern':
for slot in TypeSlots.slot_table:
slot.generate_dynamic_init_code(scope, code)
code.putln(
"if (PyType_Ready(&%s) < 0) %s" % (
typeobj_cname,
code.error_goto(entry.pos)))
# Don't inherit tp_print from builtin types, restoring the
# behavior of using tp_repr or tp_str instead.
code.putln("%s.tp_print = 0;" % typeobj_cname)
# Fix special method docstrings. This is a bit of a hack, but
# unless we let PyType_Ready create the slot wrappers we have
# a significant performance hit. (See trac #561.)
for func in entry.type.scope.pyfunc_entries:
is_buffer = func.name in ('__getbuffer__',
'__releasebuffer__')
if (func.is_special and Options.docstrings and
func.wrapperbase_cname and not is_buffer):
slot = TypeSlots.method_name_to_slot[func.name]
preprocessor_guard = slot.preprocessor_guard_code()
if preprocessor_guard:
code.putln(preprocessor_guard)
code.putln('#if CYTHON_COMPILING_IN_CPYTHON')
code.putln("{")
code.putln(
'PyObject *wrapper = __Pyx_GetAttrString((PyObject *)&%s, "%s"); %s' % (
typeobj_cname,
func.name,
code.error_goto_if_null('wrapper', entry.pos)))
code.putln(
"if (Py_TYPE(wrapper) == &PyWrapperDescr_Type) {")
code.putln(
"%s = *((PyWrapperDescrObject *)wrapper)->d_base;" % (
func.wrapperbase_cname))
code.putln(
"%s.doc = %s;" % (func.wrapperbase_cname, func.doc_cname))
code.putln(
"((PyWrapperDescrObject *)wrapper)->d_base = &%s;" % (
func.wrapperbase_cname))
code.putln("}")
code.putln("}")
code.putln('#endif')
if preprocessor_guard:
code.putln('#endif')
if type.vtable_cname:
code.putln(
"if (__Pyx_SetVtable(%s.tp_dict, %s) < 0) %s" % (
typeobj_cname,
type.vtabptr_cname,
code.error_goto(entry.pos)))
code.globalstate.use_utility_code(
UtilityCode.load_cached('SetVTable', 'ImportExport.c'))
if not type.scope.is_internal and not type.scope.directives['internal']:
# scope.is_internal is set for types defined by
# Cython (such as closures), the 'internal'
# directive is set by users
code.putln(
'if (__Pyx_SetAttrString(%s, "%s", (PyObject *)&%s) < 0) %s' % (
Naming.module_cname,
scope.class_name,
typeobj_cname,
code.error_goto(entry.pos)))
weakref_entry = scope.lookup_here("__weakref__")
if weakref_entry:
if weakref_entry.type is py_object_type:
tp_weaklistoffset = "%s.tp_weaklistoffset" % typeobj_cname
if type.typedef_flag:
objstruct = type.objstruct_cname
else:
objstruct = "struct %s" % type.objstruct_cname
code.putln("if (%s == 0) %s = offsetof(%s, %s);" % (
tp_weaklistoffset,
tp_weaklistoffset,
objstruct,
weakref_entry.cname))
else:
error(weakref_entry.pos, "__weakref__ slot must be of type 'object'")
def generate_exttype_vtable_init_code(self, entry, code):
# Generate code to initialise the C method table of an
# extension type.
type = entry.type
if type.vtable_cname:
code.putln(
"%s = &%s;" % (
type.vtabptr_cname,
type.vtable_cname))
if type.base_type and type.base_type.vtabptr_cname:
code.putln(
"%s.%s = *%s;" % (
type.vtable_cname,
Naming.obj_base_cname,
type.base_type.vtabptr_cname))
c_method_entries = [
entry for entry in type.scope.cfunc_entries
if entry.func_cname ]
if c_method_entries:
for meth_entry in c_method_entries:
cast = meth_entry.type.signature_cast_string()
code.putln(
"%s.%s = %s%s;" % (
type.vtable_cname,
meth_entry.cname,
cast,
meth_entry.func_cname))
def generate_typeptr_assignment_code(self, entry, code):
# Generate code to initialise the typeptr of an extension
# type defined in this module to point to its type object.
type = entry.type
if type.typeobj_cname:
code.putln(
"%s = &%s;" % (
type.typeptr_cname, type.typeobj_cname))
def generate_cfunction_declaration(entry, env, code, definition):
from_cy_utility = entry.used and entry.utility_code_definition
if entry.used and entry.inline_func_in_pxd or (not entry.in_cinclude and (definition
or entry.defined_in_pxd or entry.visibility == 'extern' or from_cy_utility)):
if entry.visibility == 'extern':
storage_class = Naming.extern_c_macro
dll_linkage = "DL_IMPORT"
elif entry.visibility == 'public':
storage_class = Naming.extern_c_macro
dll_linkage = "DL_EXPORT"
elif entry.visibility == 'private':
storage_class = "static"
dll_linkage = None
else:
storage_class = "static"
dll_linkage = None
type = entry.type
if entry.defined_in_pxd and not definition:
storage_class = "static"
dll_linkage = None
type = CPtrType(type)
header = type.declaration_code(
entry.cname, dll_linkage = dll_linkage)
modifiers = code.build_function_modifiers(entry.func_modifiers)
code.putln("%s %s%s; /*proto*/" % (
storage_class,
modifiers,
header))
#------------------------------------------------------------------------------------
#
# Runtime support code
#
#------------------------------------------------------------------------------------
streq_utility_code = UtilityCode(
proto = """
static CYTHON_INLINE int __Pyx_StrEq(const char *, const char *); /*proto*/
""",
impl = """
static CYTHON_INLINE int __Pyx_StrEq(const char *s1, const char *s2) {
while (*s1 != '\\0' && *s1 == *s2) { s1++; s2++; }
return *s1 == *s2;
}
""")
#------------------------------------------------------------------------------------
import_star_utility_code = """
/* import_all_from is an unexposed function from ceval.c */
static int
__Pyx_import_all_from(PyObject *locals, PyObject *v)
{
PyObject *all = __Pyx_GetAttrString(v, "__all__");
PyObject *dict, *name, *value;
int skip_leading_underscores = 0;
int pos, err;
if (all == NULL) {
if (!PyErr_ExceptionMatches(PyExc_AttributeError))
return -1; /* Unexpected error */
PyErr_Clear();
dict = __Pyx_GetAttrString(v, "__dict__");
if (dict == NULL) {
if (!PyErr_ExceptionMatches(PyExc_AttributeError))
return -1;
PyErr_SetString(PyExc_ImportError,
"from-import-* object has no __dict__ and no __all__");
return -1;
}
#if PY_MAJOR_VERSION < 3
all = PyObject_CallMethod(dict, (char *)"keys", NULL);
#else
all = PyMapping_Keys(dict);
#endif
Py_DECREF(dict);
if (all == NULL)
return -1;
skip_leading_underscores = 1;
}
for (pos = 0, err = 0; ; pos++) {
name = PySequence_GetItem(all, pos);
if (name == NULL) {
if (!PyErr_ExceptionMatches(PyExc_IndexError))
err = -1;
else
PyErr_Clear();
break;
}
if (skip_leading_underscores &&
#if PY_MAJOR_VERSION < 3
PyString_Check(name) &&
PyString_AS_STRING(name)[0] == '_')
#else
PyUnicode_Check(name) &&
PyUnicode_AS_UNICODE(name)[0] == '_')
#endif
{
Py_DECREF(name);
continue;
}
value = PyObject_GetAttr(v, name);
if (value == NULL)
err = -1;
else if (PyDict_CheckExact(locals))
err = PyDict_SetItem(locals, name, value);
else
err = PyObject_SetItem(locals, name, value);
Py_DECREF(name);
Py_XDECREF(value);
if (err != 0)
break;
}
Py_DECREF(all);
return err;
}
static int %(IMPORT_STAR)s(PyObject* m) {
int i;
int ret = -1;
char* s;
PyObject *locals = 0;
PyObject *list = 0;
#if PY_MAJOR_VERSION >= 3
PyObject *utf8_name = 0;
#endif
PyObject *name;
PyObject *item;
locals = PyDict_New(); if (!locals) goto bad;
if (__Pyx_import_all_from(locals, m) < 0) goto bad;
list = PyDict_Items(locals); if (!list) goto bad;
for(i=0; i<PyList_GET_SIZE(list); i++) {
name = PyTuple_GET_ITEM(PyList_GET_ITEM(list, i), 0);
item = PyTuple_GET_ITEM(PyList_GET_ITEM(list, i), 1);
#if PY_MAJOR_VERSION >= 3
utf8_name = PyUnicode_AsUTF8String(name);
if (!utf8_name) goto bad;
s = PyBytes_AS_STRING(utf8_name);
if (%(IMPORT_STAR_SET)s(item, name, s) < 0) goto bad;
Py_DECREF(utf8_name); utf8_name = 0;
#else
s = PyString_AsString(name);
if (!s) goto bad;
if (%(IMPORT_STAR_SET)s(item, name, s) < 0) goto bad;
#endif
}
ret = 0;
bad:
Py_XDECREF(locals);
Py_XDECREF(list);
#if PY_MAJOR_VERSION >= 3
Py_XDECREF(utf8_name);
#endif
return ret;
}
""" % {'IMPORT_STAR' : Naming.import_star,
'IMPORT_STAR_SET' : Naming.import_star_set }
refnanny_utility_code = UtilityCode.load_cached("Refnanny", "ModuleSetupCode.c")
main_method = UtilityCode.load("MainFunction", "Embed.c")
packed_struct_utility_code = UtilityCode(proto="""
#if defined(__GNUC__)
#define __Pyx_PACKED __attribute__((__packed__))
#else
#define __Pyx_PACKED
#endif
""", impl="", proto_block='utility_code_proto_before_types')
capsule_utility_code = UtilityCode.load("Capsule")
|
shaunwbell/FOCI_Analysis
|
refs/heads/master
|
ReanalysisRetreival_orig/Chuckchi_Winds/Chuckchi_WindsSFCtemp_NARR_model_prep.py
|
1
|
#!/usr/bin/env
"""
Chuckchi_Winds_NARR_model_prep.py
Retrieve NARR winds for one locations
Icy Cape Line, Ckip2
Latitude = 70.8401 Longitude = 163.2054
Filter NARR winds with a triangular filter (1/4, 1/2, 1/4) and output every 3hrs
Provide U, V
Save in EPIC NetCDF standard
"""
#System Stack
import datetime
#Science Stack
import numpy as np
from netCDF4 import Dataset
# User Stack
import utilities.haversine as sphered
from utilities import ncutilities as ncutil
# Visual Stack
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap, shiftgrid
__author__ = 'Shaun Bell'
__email__ = 'shaun.bell@noaa.gov'
__created__ = datetime.datetime(2014, 01, 13)
__modified__ = datetime.datetime(2014, 01, 13)
__version__ = "0.1.0"
__status__ = "Development"
__keywords__ = 'NARR','station_1','3hr filtered', 'U,V','Winds', 'Chuckchi'
"""------------------------General Modules-------------------------------------------"""
def from_netcdf(infile):
""" Uses ncreadfile_dic which returns a dictionary of all data from netcdf"""
###nc readin/out
nchandle = ncutil.ncopen(infile)
params = ncutil.get_vars(nchandle) #gets all of them
ncdata = ncutil.ncreadfile_dic(nchandle, params)
ncutil.ncclose(nchandle)
return (ncdata, params)
def from_netcdf_1dsplice(infile, height_ind, lat_ind, lon_ind):
""" Uses ncreadfile_dic which returns a dictionary of all data from netcdf"""
###nc readin/out
nchandle = ncutil.ncopen(infile)
params = ncutil.get_vars(nchandle) #gets all of them
print "Parameters available: "
print params
ncdata = ncutil.ncreadfile_dic_slice(nchandle, params, height_ind=height_ind, lat_ind=lat_ind, lon_ind=lon_ind)
ncutil.ncclose(nchandle)
return ncdata
def latlon_grid(infile):
nchandle = ncutil.ncopen(infile)
lat_lon = ncutil.get_geocoords(nchandle)
ncutil.ncclose(nchandle)
return (lat_lon)
def csvread(ifile):
date, time, uwnd, vwnd, atemp, bpress = [], [], [], [], [], []
with open(ifile, 'rb') as csv_file:
csv_reader = csv.reader(csv_file)
next(csv_reader) #skip header
""" DAT TIM WU WV AT BP """
for row in csv_reader:
try:
r0,r1,r2,r3,r4,r5,r6 = row[0].strip().split()
except ValueError:
r0,r1,r2,r3,r4,r5 = row[0].strip().split()
date.append(r0)
time.append(r1)
uwnd.append(r2)
vwnd.append(r3)
return {'DAT': np.array(date, int), 'TIM':np.array(time, float), 'WU':np.array(uwnd, float),\
'WV':np.array(vwnd, float)}
def write2epic( file_name, stationid, time, lat_lon, data ):
ncinstance = ncutil.EPIC_NC(savefile=file_name)
ncinstance.file_create()
ncinstance.sbeglobal_atts()
ncinstance.PMELglobal_atts(Station_Name=stationid, file_name=( __file__.split('/')[-1]) )
ncinstance.dimension_init(len_time=len(time[0]))
ncinstance.variable_init()
ncinstance.add_coord_data(time1=time[0], time2=time[1], latitude=lat_lon[0], longitude=-1 * lat_lon[1], \
depth_level=10. )
ncinstance.add_data('WU_422', data[0])
ncinstance.add_data('WV_423', data[1])
ncinstance.add_data('AT_21', data[2])
ncinstance.close()
def date2pydate(file_time, file_time2=None, file_flag='EPIC'):
""" Ingest EPIC date or NCEP Date and provide python serial date"""
if file_flag == 'EPIC':
ref_time_py = datetime.datetime.toordinal(datetime.datetime(1968, 5, 23))
ref_time_epic = 2440000
offset = ref_time_epic - ref_time_py
try: #if input is an array
python_time = [None] * len(file_time)
for i, val in enumerate(file_time):
pyday = file_time[i] - offset
pyfrac = file_time2[i] / (1000. * 60. * 60.* 24.) #milliseconds in a day
python_time[i] = (pyday + pyfrac)
except:
pyday = file_time - offset
pyfrac = file_time2 / (1000. * 60. * 60.* 24.) #milliseconds in a day
python_time = (pyday + pyfrac)
elif file_flag == 'NARR':
""" Hours since 1800-1-1"""
base_date=datetime.datetime.strptime('1800-01-01','%Y-%m-%d').toordinal()
python_time = file_time / 24. + base_date
elif file_flag == 'NCEP':
""" Hours since 1800-1-1"""
base_date=datetime.datetime.strptime('1800-01-01','%Y-%m-%d').toordinal()
python_time = file_time / 24. + base_date
else:
print "time flag not recognized"
sys.exit()
return np.array(python_time)
def pydate2EPIC(file_time):
ref_time_py = datetime.datetime.toordinal(datetime.datetime(1968, 5, 23))
ref_time_epic = 2440000
offset = ref_time_epic - ref_time_py
time1 = np.floor(file_time) + offset #truncate to get day and add 2440000 for true julian day
time2 = ( file_time - np.floor(file_time) ) * (1000. * 60. * 60.* 24.) #milliseconds since 0000GMT
return(time1, time2)
"---"
def triangle_smoothing(data_in):
weights=np.array([0.25,0.5,0.25])
filtered_data = np.convolve(data_in,np.array(weights),'same') #edge effects
return filtered_data
"""------------------------- Topo Modules -------------------------------------------"""
def etopo5_data():
""" read in etopo5 topography/bathymetry. """
file = '/Volumes/WDC_internal/Users/bell/in_and_outbox/MapGrids/etopo5.nc'
etopodata = Dataset(file)
topoin = etopodata.variables['bath'][:]
lons = etopodata.variables['X'][:]
lats = etopodata.variables['Y'][:]
etopodata.close()
topoin,lons = shiftgrid(0.,topoin,lons,start=False) # -360 -> 0
lons, lats = np.meshgrid(lons, lats)
return(topoin, lats, lons)
"""------------------------- Main Modules -------------------------------------------"""
### list of files
NARR = '/Volumes/WDC_internal/Users/bell/Data_Local/Reanalysis_Files/NARR/3hourly/'
infile = [NARR + 'uwnd.10m.1994.nc'] #used just to get grid sections
### Grab grid points for future slicing - assume grid is same in all model output
lat_lon = latlon_grid(infile[0])
"""
C1 70.8305, 163.1195
C2 71.2162, 164.3008
C3 71.8191, 165.9820
"""
station_name = [ 'C2',]
sta_lat = [71.2162,]
sta_long = [164.3008,]
#Find NARR nearest point to moorings - haversine formula
station_1 = sphered.nearest_point([sta_lat[0],-1 * sta_long[0]],lat_lon['lat'],lat_lon['lon'], '2d')
station_1_modelpt = [lat_lon['lat'][station_1[3],station_1[4]],lat_lon['lon'][station_1[3],station_1[4]]]
print "station_1 nearest point to %s, %s which is lat:%s , lon:%s" \
% (sta_lat[0], sta_long[0], station_1_modelpt[0], station_1_modelpt[1])
#loop over all requested data
years = range(2015,2016,1)
for yy in years:
# retrieve only these location's data
# uwnd
infile = NARR + 'uwnd.10m.'+ str(yy) + '.nc'
print "Working on file " + infile
station_1_data = from_netcdf_1dsplice(infile, None, station_1[3], station_1[4])
#filter data
station_1u_f = triangle_smoothing(station_1_data['uwnd'])
# retrieve only these location's data
# vwnd
infile = NARR + 'vwnd.10m.'+ str(yy) + '.nc'
print "Working on file " + infile
station_1_data = from_netcdf_1dsplice(infile, None, station_1[3], station_1[4])
#filter data
station_1v_f = triangle_smoothing(station_1_data['vwnd'])
# retrieve only these location's data
# sfc air temp
infile = NARR + 'air.2m.'+ str(yy) + '.nc'
print "Working on file " + infile
station_1_data = from_netcdf_1dsplice(infile, None, station_1[3], station_1[4])
station_1at = station_1_data['air'] -273.15 #Kelvin
#convert to EPIC time
pydate = date2pydate(station_1_data['time'], file_flag='NARR')
epic_time, epic_time1 = pydate2EPIC(pydate)
# output u,v wind components from model grid points
save_to_nc = True
if save_to_nc:
# write to NetCDF
outfile = 'data/NARR_' + station_name[0] + '_' + str(yy) + '.nc'
print "Writing to Epic NetCDF " + outfile
write2epic( outfile, station_name[0], [epic_time, epic_time1], station_1_modelpt, [station_1u_f, station_1v_f, station_1at])
plot_geoloc = True
if plot_geoloc:
(topoin, elats, elons) = etopo5_data()
fig = plt.figure()
ax = plt.subplot(111)
m = Basemap(resolution='i',projection='merc', llcrnrlat=55, \
urcrnrlat=75,llcrnrlon=-180,urcrnrlon=-145, lat_ts=60)
# Mooring Data
x_moor, y_moor = m([-1. * sta_long[0],],sta_lat)
x_close, y_close = m([station_1_modelpt[1],], [station_1_modelpt[0],])
#ETOPO 5 contour data
ex, ey = m(elons, elats)
CS = m.contourf(ex,ey,topoin, levels=range(250,5000,250), cmap='gray_r', alpha=.75) #colors='black'
CS = m.contour(ex,ey,topoin, levels=range(250,5000,250), linewidths=0.2, colors='black', alpha=.75) #
CS = m.contour(ex,ey,topoin, levels=[-1000, -200, -100], linestyle='--', linewidths=0.2, colors='black', alpha=.75) #
#plot points
m.scatter(x_close,y_close,20,marker='+',color='b')
m.scatter(x_moor,y_moor,20,marker='o',color='g')
m.drawcountries(linewidth=0.5)
m.drawcoastlines(linewidth=0.5)
m.drawparallels(np.arange(60,75,2.),labels=[1,0,0,0],color='black',dashes=[1,1],labelstyle='+/-',linewidth=0.2) # draw parallels
m.drawmeridians(np.arange(-180,-145,2.),labels=[0,0,0,1],color='black',dashes=[1,1],labelstyle='+/-',linewidth=0.2) # draw meridians
#m.fillcontinents(color='black')
DefaultSize = fig.get_size_inches()
fig.set_size_inches( (DefaultSize[0], DefaultSize[1]) )
plt.savefig('images/Chuckchi_region.png', bbox_inches='tight', dpi = (100))
plt.close()
|
cnewcome/sos
|
refs/heads/master
|
sos/plugins/dlm.py
|
4
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
from sos.plugins import Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin
import os.path
import re
class Dlm(Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin):
"""DLM (Distributed lock manager)"""
plugin_name = "dlm"
profiles = ("cluster", )
packages = ["cman", "dlm", "pacemaker"]
option_list = [
("lockdump", "capture lock dumps for DLM", "slow", False),
]
debugfs_path = "/sys/kernel/debug"
_debugfs_cleanup = False
def setup(self):
self.add_copy_spec([
"/etc/sysconfig/dlm"
])
self.add_cmd_output([
"dlm_tool log_plock",
"dlm_tool dump",
"dlm_tool ls -n"
])
if self.get_option("lockdump"):
self.do_lockdump()
def do_lockdump(self):
if self._mount_debug():
dlm_tool = "dlm_tool ls"
result = self.call_ext_prog(dlm_tool)
if result["status"] != 0:
return
lock_exp = r'^name\s+([^\s]+)$'
lock_re = re.compile(lock_exp, re.MULTILINE)
for lockspace in lock_re.findall(result["output"]):
self.add_cmd_output(
"dlm_tool lockdebug -svw '%s'" % lockspace,
suggest_filename="dlm_locks_%s" % lockspace
)
def _mount_debug(self):
if not os.path.ismount(self.debugfs_path):
self._debugfs_cleanup = True
r = self.call_ext_prog("mount -t debugfs debugfs %s"
% self.debugfs_path)
if r["status"] != 0:
self._log_error("debugfs not mounted and mount attempt failed")
self._debugfs_cleanup = False
return os.path.ismount(self.debugfs_path)
def postproc(self):
if self._debugfs_cleanup and os.path.ismount(self.debugfs_path):
r = self.call_ext_prog("umount %s" % self.debugfs_path)
if r["status"] != 0:
self._log_error("could not unmount %s" % self.debugfs_path)
return
# vim: et ts=4 sw=4
|
gtko/Sick-Beard
|
refs/heads/development
|
lib/hachoir_core/field/byte_field.py
|
90
|
"""
Very basic field: raw content with a size in byte. Use this class for
unknown content.
"""
from lib.hachoir_core.field import Field, FieldError
from lib.hachoir_core.tools import makePrintable
from lib.hachoir_core.bits import str2hex
from lib.hachoir_core import config
MAX_LENGTH = (2**64)
class RawBytes(Field):
"""
Byte vector of unknown content
@see: L{Bytes}
"""
static_size = staticmethod(lambda *args, **kw: args[1]*8)
def __init__(self, parent, name, length, description="Raw data"):
assert issubclass(parent.__class__, Field)
if not(0 < length <= MAX_LENGTH):
raise FieldError("Invalid RawBytes length (%s)!" % length)
Field.__init__(self, parent, name, length*8, description)
self._display = None
def _createDisplay(self, human):
max_bytes = config.max_byte_length
if type(self._getValue) is type(lambda: None):
display = self.value[:max_bytes]
else:
if self._display is None:
address = self.absolute_address
length = min(self._size / 8, max_bytes)
self._display = self._parent.stream.readBytes(address, length)
display = self._display
truncated = (8 * len(display) < self._size)
if human:
if truncated:
display += "(...)"
return makePrintable(display, "latin-1", quote='"', to_unicode=True)
else:
display = str2hex(display, format=r"\x%02x")
if truncated:
return '"%s(...)"' % display
else:
return '"%s"' % display
def createDisplay(self):
return self._createDisplay(True)
def createRawDisplay(self):
return self._createDisplay(False)
def hasValue(self):
return True
def createValue(self):
assert (self._size % 8) == 0
if self._display:
self._display = None
return self._parent.stream.readBytes(
self.absolute_address, self._size / 8)
class Bytes(RawBytes):
"""
Byte vector: can be used for magic number or GUID/UUID for example.
@see: L{RawBytes}
"""
pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.