text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
|---|---|---|---|---|---|---|
import json
import numpy as np
from glob import glob
inputs = {
'xml_file_path' : "./data/single_wavelength_copy",
'file_set' : {'p38' : glob( "./data/single_wavelength_copy/*.xml")},
'section' : '280_480_TOP_120',
'ligand_order' : ['Bosutinib','Bosutinib Isomer','Erlotinib','Gefitinib','Ponatinib','Lapatinib','Saracatinib','Vandetanib'],
'Lstated' : np.array([20.0e-6,14.0e-6,9.82e-6,6.88e-6,4.82e-6,3.38e-6,2.37e-6,1.66e-6,1.16e-6,0.815e-6,0.571e-6,0.4e-6,0.28e-6,0.196e-6,0.138e-6,0.0964e-6,0.0676e-6,0.0474e-6,0.0320e-6,0.0240e-6,0.0160e-6,0.0120e-6,0.008e-6,0.0], np.float64), # ligand concentration, M
'Pstated' : 0.5e-6 * np.ones([24],np.float64), # protein concentration, M
'assay_volume' : 50e-6, # assay volume, L
'well_area' : 0.1369, # well area, cm^2 for 4ti-0203 [http://4ti.co.uk/files/3113/4217/2464/4ti-0201.pdf]
}
inputs['Lstated'] = inputs['Lstated'].tolist()
inputs['Pstated'] = inputs['Pstated'].tolist()
with open('inputs.json', 'w') as fp:
json.dump(inputs, fp)
|
choderalab/assaytools
|
examples/direct-fluorescence-assay/inputs_p38_singlet.py
|
Python
|
lgpl-2.1
| 1,067
| 0.045923
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('browser', '0009_splitevent'),
]
operations = [
migrations.AddField(
model_name='affiliation',
name='validated_on',
field=models.DateTimeField(null=True, blank=True),
),
migrations.AddField(
model_name='attendance',
name='validated_on',
field=models.DateTimeField(null=True, blank=True),
),
migrations.AddField(
model_name='course',
name='validated_on',
field=models.DateTimeField(null=True, blank=True),
),
migrations.AddField(
model_name='coursegroup',
name='validated_on',
field=models.DateTimeField(null=True, blank=True),
),
migrations.AddField(
model_name='historicalaffiliation',
name='validated_on',
field=models.DateTimeField(null=True, blank=True),
),
migrations.AddField(
model_name='historicalattendance',
name='validated_on',
field=models.DateTimeField(null=True, blank=True),
),
migrations.AddField(
model_name='historicalcourse',
name='validated_on',
field=models.DateTimeField(null=True, blank=True),
),
migrations.AddField(
model_name='historicalcoursegroup',
name='validated_on',
field=models.DateTimeField(null=True, blank=True),
),
migrations.AddField(
model_name='historicalinstitution',
name='validated_on',
field=models.DateTimeField(null=True, blank=True),
),
migrations.AddField(
model_name='historicalinvestigator',
name='validated_on',
field=models.DateTimeField(null=True, blank=True),
),
migrations.AddField(
model_name='historicalknowninstitution',
name='validated_on',
field=models.DateTimeField(null=True, blank=True),
),
migrations.AddField(
model_name='historicalknownlocation',
name='validated_on',
field=models.DateTimeField(null=True, blank=True),
),
migrations.AddField(
model_name='historicalknownperson',
name='validated_on',
field=models.DateTimeField(null=True, blank=True),
),
migrations.AddField(
model_name='historicallocalization',
name='validated_on',
field=models.DateTimeField(null=True, blank=True),
),
migrations.AddField(
model_name='historicallocation',
name='validated_on',
field=models.DateTimeField(null=True, blank=True),
),
migrations.AddField(
model_name='historicalpartof',
name='validated_on',
field=models.DateTimeField(null=True, blank=True),
),
migrations.AddField(
model_name='historicalperson',
name='validated_on',
field=models.DateTimeField(null=True, blank=True),
),
migrations.AddField(
model_name='institution',
name='validated_on',
field=models.DateTimeField(null=True, blank=True),
),
migrations.AddField(
model_name='investigator',
name='validated_on',
field=models.DateTimeField(null=True, blank=True),
),
migrations.AddField(
model_name='knowninstitution',
name='validated_on',
field=models.DateTimeField(null=True, blank=True),
),
migrations.AddField(
model_name='knownlocation',
name='validated_on',
field=models.DateTimeField(null=True, blank=True),
),
migrations.AddField(
model_name='knownperson',
name='validated_on',
field=models.DateTimeField(null=True, blank=True),
),
migrations.AddField(
model_name='localization',
name='validated_on',
field=models.DateTimeField(null=True, blank=True),
),
migrations.AddField(
model_name='location',
name='validated_on',
field=models.DateTimeField(null=True, blank=True),
),
migrations.AddField(
model_name='partof',
name='validated_on',
field=models.DateTimeField(null=True, blank=True),
),
migrations.AddField(
model_name='person',
name='validated_on',
field=models.DateTimeField(null=True, blank=True),
),
migrations.AlterField(
model_name='affiliation',
name='validated',
field=models.BooleanField(default=False, help_text=b'Indicates that a record has been examined for accuracy. This does not necessarily mean that the record has been disambiguated with respect to an authority accord.'),
),
migrations.AlterField(
model_name='attendance',
name='validated',
field=models.BooleanField(default=False, help_text=b'Indicates that a record has been examined for accuracy. This does not necessarily mean that the record has been disambiguated with respect to an authority accord.'),
),
migrations.AlterField(
model_name='course',
name='validated',
field=models.BooleanField(default=False, help_text=b'Indicates that a record has been examined for accuracy. This does not necessarily mean that the record has been disambiguated with respect to an authority accord.'),
),
migrations.AlterField(
model_name='coursegroup',
name='validated',
field=models.BooleanField(default=False, help_text=b'Indicates that a record has been examined for accuracy. This does not necessarily mean that the record has been disambiguated with respect to an authority accord.'),
),
migrations.AlterField(
model_name='historicalaffiliation',
name='validated',
field=models.BooleanField(default=False, help_text=b'Indicates that a record has been examined for accuracy. This does not necessarily mean that the record has been disambiguated with respect to an authority accord.'),
),
migrations.AlterField(
model_name='historicalattendance',
name='validated',
field=models.BooleanField(default=False, help_text=b'Indicates that a record has been examined for accuracy. This does not necessarily mean that the record has been disambiguated with respect to an authority accord.'),
),
migrations.AlterField(
model_name='historicalcourse',
name='validated',
field=models.BooleanField(default=False, help_text=b'Indicates that a record has been examined for accuracy. This does not necessarily mean that the record has been disambiguated with respect to an authority accord.'),
),
migrations.AlterField(
model_name='historicalcoursegroup',
name='validated',
field=models.BooleanField(default=False, help_text=b'Indicates that a record has been examined for accuracy. This does not necessarily mean that the record has been disambiguated with respect to an authority accord.'),
),
migrations.AlterField(
model_name='historicalinstitution',
name='validated',
field=models.BooleanField(default=False, help_text=b'Indicates that a record has been examined for accuracy. This does not necessarily mean that the record has been disambiguated with respect to an authority accord.'),
),
migrations.AlterField(
model_name='historicalinvestigator',
name='validated',
field=models.BooleanField(default=False, help_text=b'Indicates that a record has been examined for accuracy. This does not necessarily mean that the record has been disambiguated with respect to an authority accord.'),
),
migrations.AlterField(
model_name='historicalknowninstitution',
name='validated',
field=models.BooleanField(default=False, help_text=b'Indicates that a record has been examined for accuracy. This does not necessarily mean that the record has been disambiguated with respect to an authority accord.'),
),
migrations.AlterField(
model_name='historicalknownlocation',
name='validated',
field=models.BooleanField(default=False, help_text=b'Indicates that a record has been examined for accuracy. This does not necessarily mean that the record has been disambiguated with respect to an authority accord.'),
),
migrations.AlterField(
model_name='historicalknownperson',
name='validated',
field=models.BooleanField(default=False, help_text=b'Indicates that a record has been examined for accuracy. This does not necessarily mean that the record has been disambiguated with respect to an authority accord.'),
),
migrations.AlterField(
model_name='historicallocalization',
name='validated',
field=models.BooleanField(default=False, help_text=b'Indicates that a record has been examined for accuracy. This does not necessarily mean that the record has been disambiguated with respect to an authority accord.'),
),
migrations.AlterField(
model_name='historicallocation',
name='validated',
field=models.BooleanField(default=False, help_text=b'Indicates that a record has been examined for accuracy. This does not necessarily mean that the record has been disambiguated with respect to an authority accord.'),
),
migrations.AlterField(
model_name='historicalpartof',
name='validated',
field=models.BooleanField(default=False, help_text=b'Indicates that a record has been examined for accuracy. This does not necessarily mean that the record has been disambiguated with respect to an authority accord.'),
),
migrations.AlterField(
model_name='historicalperson',
name='validated',
field=models.BooleanField(default=False, help_text=b'Indicates that a record has been examined for accuracy. This does not necessarily mean that the record has been disambiguated with respect to an authority accord.'),
),
migrations.AlterField(
model_name='institution',
name='validated',
field=models.BooleanField(default=False, help_text=b'Indicates that a record has been examined for accuracy. This does not necessarily mean that the record has been disambiguated with respect to an authority accord.'),
),
migrations.AlterField(
model_name='investigator',
name='validated',
field=models.BooleanField(default=False, help_text=b'Indicates that a record has been examined for accuracy. This does not necessarily mean that the record has been disambiguated with respect to an authority accord.'),
),
migrations.AlterField(
model_name='knowninstitution',
name='validated',
field=models.BooleanField(default=False, help_text=b'Indicates that a record has been examined for accuracy. This does not necessarily mean that the record has been disambiguated with respect to an authority accord.'),
),
migrations.AlterField(
model_name='knownlocation',
name='validated',
field=models.BooleanField(default=False, help_text=b'Indicates that a record has been examined for accuracy. This does not necessarily mean that the record has been disambiguated with respect to an authority accord.'),
),
migrations.AlterField(
model_name='knownperson',
name='validated',
field=models.BooleanField(default=False, help_text=b'Indicates that a record has been examined for accuracy. This does not necessarily mean that the record has been disambiguated with respect to an authority accord.'),
),
migrations.AlterField(
model_name='localization',
name='validated',
field=models.BooleanField(default=False, help_text=b'Indicates that a record has been examined for accuracy. This does not necessarily mean that the record has been disambiguated with respect to an authority accord.'),
),
migrations.AlterField(
model_name='location',
name='validated',
field=models.BooleanField(default=False, help_text=b'Indicates that a record has been examined for accuracy. This does not necessarily mean that the record has been disambiguated with respect to an authority accord.'),
),
migrations.AlterField(
model_name='partof',
name='validated',
field=models.BooleanField(default=False, help_text=b'Indicates that a record has been examined for accuracy. This does not necessarily mean that the record has been disambiguated with respect to an authority accord.'),
),
migrations.AlterField(
model_name='person',
name='validated',
field=models.BooleanField(default=False, help_text=b'Indicates that a record has been examined for accuracy. This does not necessarily mean that the record has been disambiguated with respect to an authority accord.'),
),
]
|
erickpeirson/mbl-browser
|
browser/migrations/0010_auto_20160804_1644.py
|
Python
|
gpl-3.0
| 13,851
| 0.001877
|
# sqlalchemy/events.py
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Core event interfaces."""
from . import event, exc
from .pool import Pool
from .engine import Connectable, Engine
from .sql.base import SchemaEventTarget
class DDLEvents(event.Events):
"""
Define event listeners for schema objects,
that is, :class:`.SchemaItem` and :class:`.SchemaEvent`
subclasses, including :class:`.MetaData`, :class:`.Table`,
:class:`.Column`.
:class:`.MetaData` and :class:`.Table` support events
specifically regarding when CREATE and DROP
DDL is emitted to the database.
Attachment events are also provided to customize
behavior whenever a child schema element is associated
with a parent, such as, when a :class:`.Column` is associated
with its :class:`.Table`, when a :class:`.ForeignKeyConstraint`
is associated with a :class:`.Table`, etc.
Example using the ``after_create`` event::
from sqlalchemy import event
from sqlalchemy import Table, Column, Metadata, Integer
m = MetaData()
some_table = Table('some_table', m, Column('data', Integer))
def after_create(target, connection, **kw):
connection.execute("ALTER TABLE %s SET name=foo_%s" %
(target.name, target.name))
event.listen(some_table, "after_create", after_create)
DDL events integrate closely with the
:class:`.DDL` class and the :class:`.DDLElement` hierarchy
of DDL clause constructs, which are themselves appropriate
as listener callables::
from sqlalchemy import DDL
event.listen(
some_table,
"after_create",
DDL("ALTER TABLE %(table)s SET name=foo_%(table)s")
)
The methods here define the name of an event as well
as the names of members that are passed to listener
functions.
See also:
:ref:`event_toplevel`
:class:`.DDLElement`
:class:`.DDL`
:ref:`schema_ddl_sequences`
"""
_target_class_doc = "SomeSchemaClassOrObject"
_dispatch_target = SchemaEventTarget
def before_create(self, target, connection, **kw):
"""Called before CREATE statments are emitted.
:param target: the :class:`.MetaData` or :class:`.Table`
object which is the target of the event.
:param connection: the :class:`.Connection` where the
CREATE statement or statements will be emitted.
:param \**kw: additional keyword arguments relevant
to the event. The contents of this dictionary
may vary across releases, and include the
list of tables being generated for a metadata-level
event, the checkfirst flag, and other
elements used by internal events.
"""
def after_create(self, target, connection, **kw):
"""Called after CREATE statments are emitted.
:param target: the :class:`.MetaData` or :class:`.Table`
object which is the target of the event.
:param connection: the :class:`.Connection` where the
CREATE statement or statements have been emitted.
:param \**kw: additional keyword arguments relevant
to the event. The contents of this dictionary
may vary across releases, and include the
list of tables being generated for a metadata-level
event, the checkfirst flag, and other
elements used by internal events.
"""
def before_drop(self, target, connection, **kw):
"""Called before DROP statments are emitted.
:param target: the :class:`.MetaData` or :class:`.Table`
object which is the target of the event.
:param connection: the :class:`.Connection` where the
DROP statement or statements will be emitted.
:param \**kw: additional keyword arguments relevant
to the event. The contents of this dictionary
may vary across releases, and include the
list of tables being generated for a metadata-level
event, the checkfirst flag, and other
elements used by internal events.
"""
def after_drop(self, target, connection, **kw):
"""Called after DROP statments are emitted.
:param target: the :class:`.MetaData` or :class:`.Table`
object which is the target of the event.
:param connection: the :class:`.Connection` where the
DROP statement or statements have been emitted.
:param \**kw: additional keyword arguments relevant
to the event. The contents of this dictionary
may vary across releases, and include the
list of tables being generated for a metadata-level
event, the checkfirst flag, and other
elements used by internal events.
"""
def before_parent_attach(self, target, parent):
"""Called before a :class:`.SchemaItem` is associated with
a parent :class:`.SchemaItem`.
:param target: the target object
:param parent: the parent to which the target is being attached.
:func:`.event.listen` also accepts a modifier for this event:
:param propagate=False: When True, the listener function will
be established for any copies made of the target object,
i.e. those copies that are generated when
:meth:`.Table.tometadata` is used.
"""
def after_parent_attach(self, target, parent):
"""Called after a :class:`.SchemaItem` is associated with
a parent :class:`.SchemaItem`.
:param target: the target object
:param parent: the parent to which the target is being attached.
:func:`.event.listen` also accepts a modifier for this event:
:param propagate=False: When True, the listener function will
be established for any copies made of the target object,
i.e. those copies that are generated when
:meth:`.Table.tometadata` is used.
"""
def column_reflect(self, inspector, table, column_info):
"""Called for each unit of 'column info' retrieved when
a :class:`.Table` is being reflected.
The dictionary of column information as returned by the
dialect is passed, and can be modified. The dictionary
is that returned in each element of the list returned
by :meth:`.reflection.Inspector.get_columns`.
The event is called before any action is taken against
this dictionary, and the contents can be modified.
The :class:`.Column` specific arguments ``info``, ``key``,
and ``quote`` can also be added to the dictionary and
will be passed to the constructor of :class:`.Column`.
Note that this event is only meaningful if either
associated with the :class:`.Table` class across the
board, e.g.::
from sqlalchemy.schema import Table
from sqlalchemy import event
def listen_for_reflect(inspector, table, column_info):
"receive a column_reflect event"
# ...
event.listen(
Table,
'column_reflect',
listen_for_reflect)
...or with a specific :class:`.Table` instance using
the ``listeners`` argument::
def listen_for_reflect(inspector, table, column_info):
"receive a column_reflect event"
# ...
t = Table(
'sometable',
autoload=True,
listeners=[
('column_reflect', listen_for_reflect)
])
This because the reflection process initiated by ``autoload=True``
completes within the scope of the constructor for :class:`.Table`.
"""
class PoolEvents(event.Events):
"""Available events for :class:`.Pool`.
The methods here define the name of an event as well
as the names of members that are passed to listener
functions.
e.g.::
from sqlalchemy import event
def my_on_checkout(dbapi_conn, connection_rec, connection_proxy):
"handle an on checkout event"
event.listen(Pool, 'checkout', my_on_checkout)
In addition to accepting the :class:`.Pool` class and
:class:`.Pool` instances, :class:`.PoolEvents` also accepts
:class:`.Engine` objects and the :class:`.Engine` class as
targets, which will be resolved to the ``.pool`` attribute of the
given engine or the :class:`.Pool` class::
engine = create_engine("postgresql://scott:tiger@localhost/test")
# will associate with engine.pool
event.listen(engine, 'checkout', my_on_checkout)
"""
_target_class_doc = "SomeEngineOrPool"
_dispatch_target = Pool
@classmethod
def _accept_with(cls, target):
if isinstance(target, type):
if issubclass(target, Engine):
return Pool
elif issubclass(target, Pool):
return target
elif isinstance(target, Engine):
return target.pool
else:
return target
def connect(self, dbapi_connection, connection_record):
"""Called once for each new DB-API connection or Pool's ``creator()``.
:param dbapi_con:
A newly connected raw DB-API connection (not a SQLAlchemy
``Connection`` wrapper).
:param con_record:
The ``_ConnectionRecord`` that persistently manages the connection
"""
def first_connect(self, dbapi_connection, connection_record):
"""Called exactly once for the first DB-API connection.
:param dbapi_con:
A newly connected raw DB-API connection (not a SQLAlchemy
``Connection`` wrapper).
:param con_record:
The ``_ConnectionRecord`` that persistently manages the connection
"""
def checkout(self, dbapi_connection, connection_record, connection_proxy):
"""Called when a connection is retrieved from the Pool.
:param dbapi_con:
A raw DB-API connection
:param con_record:
The ``_ConnectionRecord`` that persistently manages the connection
:param con_proxy:
The ``_ConnectionFairy`` which manages the connection for the span of
the current checkout.
If you raise a :class:`~sqlalchemy.exc.DisconnectionError`, the current
connection will be disposed and a fresh connection retrieved.
Processing of all checkout listeners will abort and restart
using the new connection.
.. seealso:: :meth:`.ConnectionEvents.connect` - a similar event
which occurs upon creation of a new :class:`.Connection`.
"""
def checkin(self, dbapi_connection, connection_record):
"""Called when a connection returns to the pool.
Note that the connection may be closed, and may be None if the
connection has been invalidated. ``checkin`` will not be called
for detached connections. (They do not return to the pool.)
:param dbapi_con:
A raw DB-API connection
:param con_record:
The ``_ConnectionRecord`` that persistently manages the connection
"""
def reset(self, dbapi_con, con_record):
"""Called before the "reset" action occurs for a pooled connection.
This event represents
when the ``rollback()`` method is called on the DBAPI connection
before it is returned to the pool. The behavior of "reset" can
be controlled, including disabled, using the ``reset_on_return``
pool argument.
The :meth:`.PoolEvents.reset` event is usually followed by the
the :meth:`.PoolEvents.checkin` event is called, except in those
cases where the connection is discarded immediately after reset.
:param dbapi_con:
A raw DB-API connection
:param con_record:
The ``_ConnectionRecord`` that persistently manages the connection
.. versionadded:: 0.8
.. seealso::
:meth:`.ConnectionEvents.rollback`
:meth:`.ConnectionEvents.commit`
"""
class ConnectionEvents(event.Events):
"""Available events for :class:`.Connectable`, which includes
:class:`.Connection` and :class:`.Engine`.
The methods here define the name of an event as well as the names of
members that are passed to listener functions.
An event listener can be associated with any :class:`.Connectable`
class or instance, such as an :class:`.Engine`, e.g.::
from sqlalchemy import event, create_engine
def before_cursor_execute(conn, cursor, statement, parameters, context,
executemany):
log.info("Received statement: %s" % statement)
engine = create_engine('postgresql://scott:tiger@localhost/test')
event.listen(engine, "before_cursor_execute", before_cursor_execute)
or with a specific :class:`.Connection`::
with engine.begin() as conn:
@event.listens_for(conn, 'before_cursor_execute')
def before_cursor_execute(conn, cursor, statement, parameters,
context, executemany):
log.info("Received statement: %s" % statement)
The :meth:`.before_execute` and :meth:`.before_cursor_execute`
events can also be established with the ``retval=True`` flag, which
allows modification of the statement and parameters to be sent
to the database. The :meth:`.before_cursor_execute` event is
particularly useful here to add ad-hoc string transformations, such
as comments, to all executions::
from sqlalchemy.engine import Engine
from sqlalchemy import event
@event.listens_for(Engine, "before_cursor_execute", retval=True)
def comment_sql_calls(conn, cursor, statement, parameters,
context, executemany):
statement = statement + " -- some comment"
return statement, parameters
.. note:: :class:`.ConnectionEvents` can be established on any
combination of :class:`.Engine`, :class:`.Connection`, as well
as instances of each of those classes. Events across all
four scopes will fire off for a given instance of
:class:`.Connection`. However, for performance reasons, the
:class:`.Connection` object determines at instantiation time
whether or not its parent :class:`.Engine` has event listeners
established. Event listeners added to the :class:`.Engine`
class or to an instance of :class:`.Engine` *after* the instantiation
of a dependent :class:`.Connection` instance will usually
*not* be available on that :class:`.Connection` instance. The newly
added listeners will instead take effect for :class:`.Connection`
instances created subsequent to those event listeners being
established on the parent :class:`.Engine` class or instance.
:param retval=False: Applies to the :meth:`.before_execute` and
:meth:`.before_cursor_execute` events only. When True, the
user-defined event function must have a return value, which
is a tuple of parameters that replace the given statement
and parameters. See those methods for a description of
specific return arguments.
.. versionchanged:: 0.8 :class:`.ConnectionEvents` can now be associated
with any :class:`.Connectable` including :class:`.Connection`,
in addition to the existing support for :class:`.Engine`.
"""
_target_class_doc = "SomeEngine"
_dispatch_target = Connectable
@classmethod
def _listen(cls, event_key, retval=False):
target, identifier, fn = \
event_key.dispatch_target, event_key.identifier, event_key.fn
target._has_events = True
if not retval:
if identifier == 'before_execute':
orig_fn = fn
def wrap_before_execute(conn, clauseelement,
multiparams, params):
orig_fn(conn, clauseelement, multiparams, params)
return clauseelement, multiparams, params
fn = wrap_before_execute
elif identifier == 'before_cursor_execute':
orig_fn = fn
def wrap_before_cursor_execute(conn, cursor, statement,
parameters, context, executemany):
orig_fn(conn, cursor, statement,
parameters, context, executemany)
return statement, parameters
fn = wrap_before_cursor_execute
elif retval and \
identifier not in ('before_execute', 'before_cursor_execute'):
raise exc.ArgumentError(
"Only the 'before_execute' and "
"'before_cursor_execute' engine "
"event listeners accept the 'retval=True' "
"argument.")
event_key.with_wrapper(fn).base_listen()
def before_execute(self, conn, clauseelement, multiparams, params):
"""Intercept high level execute() events, receiving uncompiled
SQL constructs and other objects prior to rendering into SQL.
This event is good for debugging SQL compilation issues as well
as early manipulation of the parameters being sent to the database,
as the parameter lists will be in a consistent format here.
This event can be optionally established with the ``retval=True``
flag. The ``clauseelement``, ``multiparams``, and ``params``
arguments should be returned as a three-tuple in this case::
@event.listens_for(Engine, "before_execute", retval=True)
def before_execute(conn, conn, clauseelement, multiparams, params):
# do something with clauseelement, multiparams, params
return clauseelement, multiparams, params
:param conn: :class:`.Connection` object
:param clauseelement: SQL expression construct, :class:`.Compiled`
instance, or string statement passed to :meth:`.Connection.execute`.
:param multiparams: Multiple parameter sets, a list of dictionaries.
:param params: Single parameter set, a single dictionary.
See also:
:meth:`.before_cursor_execute`
"""
def after_execute(self, conn, clauseelement, multiparams, params, result):
"""Intercept high level execute() events after execute.
:param conn: :class:`.Connection` object
:param clauseelement: SQL expression construct, :class:`.Compiled`
instance, or string statement passed to :meth:`.Connection.execute`.
:param multiparams: Multiple parameter sets, a list of dictionaries.
:param params: Single parameter set, a single dictionary.
:param result: :class:`.ResultProxy` generated by the execution.
"""
def before_cursor_execute(self, conn, cursor, statement,
parameters, context, executemany):
"""Intercept low-level cursor execute() events before execution,
receiving the string
SQL statement and DBAPI-specific parameter list to be invoked
against a cursor.
This event is a good choice for logging as well as late modifications
to the SQL string. It's less ideal for parameter modifications except
for those which are specific to a target backend.
This event can be optionally established with the ``retval=True``
flag. The ``statement`` and ``parameters`` arguments should be
returned as a two-tuple in this case::
@event.listens_for(Engine, "before_cursor_execute", retval=True)
def before_cursor_execute(conn, cursor, statement,
parameters, context, executemany):
# do something with statement, parameters
return statement, parameters
See the example at :class:`.ConnectionEvents`.
:param conn: :class:`.Connection` object
:param cursor: DBAPI cursor object
:param statement: string SQL statement
:param parameters: Dictionary, tuple, or list of parameters being
passed to the ``execute()`` or ``executemany()`` method of the
DBAPI ``cursor``. In some cases may be ``None``.
:param context: :class:`.ExecutionContext` object in use. May
be ``None``.
:param executemany: boolean, if ``True``, this is an ``executemany()``
call, if ``False``, this is an ``execute()`` call.
See also:
:meth:`.before_execute`
:meth:`.after_cursor_execute`
"""
def after_cursor_execute(self, conn, cursor, statement,
parameters, context, executemany):
"""Intercept low-level cursor execute() events after execution.
:param conn: :class:`.Connection` object
:param cursor: DBAPI cursor object. Will have results pending
if the statement was a SELECT, but these should not be consumed
as they will be needed by the :class:`.ResultProxy`.
:param statement: string SQL statement
:param parameters: Dictionary, tuple, or list of parameters being
passed to the ``execute()`` or ``executemany()`` method of the
DBAPI ``cursor``. In some cases may be ``None``.
:param context: :class:`.ExecutionContext` object in use. May
be ``None``.
:param executemany: boolean, if ``True``, this is an ``executemany()``
call, if ``False``, this is an ``execute()`` call.
"""
def dbapi_error(self, conn, cursor, statement, parameters,
context, exception):
"""Intercept a raw DBAPI error.
This event is called with the DBAPI exception instance
received from the DBAPI itself, *before* SQLAlchemy wraps the
exception with it's own exception wrappers, and before any
other operations are performed on the DBAPI cursor; the
existing transaction remains in effect as well as any state
on the cursor.
The use case here is to inject low-level exception handling
into an :class:`.Engine`, typically for logging and
debugging purposes. In general, user code should **not** modify
any state or throw any exceptions here as this will
interfere with SQLAlchemy's cleanup and error handling
routines.
Subsequent to this hook, SQLAlchemy may attempt any
number of operations on the connection/cursor, including
closing the cursor, rolling back of the transaction in the
case of connectionless execution, and disposing of the entire
connection pool if a "disconnect" was detected. The
exception is then wrapped in a SQLAlchemy DBAPI exception
wrapper and re-thrown.
:param conn: :class:`.Connection` object
:param cursor: DBAPI cursor object
:param statement: string SQL statement
:param parameters: Dictionary, tuple, or list of parameters being
passed to the ``execute()`` or ``executemany()`` method of the
DBAPI ``cursor``. In some cases may be ``None``.
:param context: :class:`.ExecutionContext` object in use. May
be ``None``.
:param exception: The **unwrapped** exception emitted directly from the
DBAPI. The class here is specific to the DBAPI module in use.
.. versionadded:: 0.7.7
"""
def engine_connect(self, conn, branch):
"""Intercept the creation of a new :class:`.Connection`.
This event is called typically as the direct result of calling
the :meth:`.Engine.connect` method.
It differs from the :meth:`.PoolEvents.connect` method, which
refers to the actual connection to a database at the DBAPI level;
a DBAPI connection may be pooled and reused for many operations.
In contrast, this event refers only to the production of a higher level
:class:`.Connection` wrapper around such a DBAPI connection.
It also differs from the :meth:`.PoolEvents.checkout` event
in that it is specific to the :class:`.Connection` object, not the
DBAPI connection that :meth:`.PoolEvents.checkout` deals with, although
this DBAPI connection is available here via the :attr:`.Connection.connection`
attribute. But note there can in fact
be multiple :meth:`.PoolEvents.checkout` events within the lifespan
of a single :class:`.Connection` object, if that :class:`.Connection`
is invalidated and re-established. There can also be multiple
:class:`.Connection` objects generated for the same already-checked-out
DBAPI connection, in the case that a "branch" of a :class:`.Connection`
is produced.
:param conn: :class:`.Connection` object.
:param branch: if True, this is a "branch" of an existing
:class:`.Connection`. A branch is generated within the course
of a statement execution to invoke supplemental statements, most
typically to pre-execute a SELECT of a default value for the purposes
of an INSERT statement.
.. versionadded:: 0.9.0
.. seealso::
:meth:`.PoolEvents.checkout` the lower-level pool checkout event
for an individual DBAPI connection
:meth:`.ConnectionEvents.set_connection_execution_options` - a copy of a
:class:`.Connection` is also made when the
:meth:`.Connection.execution_options` method is called.
"""
def set_connection_execution_options(self, conn, opts):
"""Intercept when the :meth:`.Connection.execution_options`
method is called.
This method is called after the new :class:`.Connection` has been
produced, with the newly updated execution options collection, but
before the :class:`.Dialect` has acted upon any of those new options.
Note that this method is not called when a new :class:`.Connection`
is produced which is inheriting execution options from its parent
:class:`.Engine`; to intercept this condition, use the
:meth:`.ConnectionEvents.connect` event.
:param conn: The newly copied :class:`.Connection` object
:param opts: dictionary of options that were passed to the
:meth:`.Connection.execution_options` method.
.. versionadded:: 0.9.0
.. seealso::
:meth:`.ConnectionEvents.set_engine_execution_options` - event
which is called when :meth:`.Engine.execution_options` is called.
"""
def set_engine_execution_options(self, engine, opts):
"""Intercept when the :meth:`.Engine.execution_options`
method is called.
The :meth:`.Engine.execution_options` method produces a shallow
copy of the :class:`.Engine` which stores the new options. That new
:class:`.Engine` is passed here. A particular application of this
method is to add a :meth:`.ConnectionEvents.engine_connect` event
handler to the given :class:`.Engine` which will perform some per-
:class:`.Connection` task specific to these execution options.
:param conn: The newly copied :class:`.Engine` object
:param opts: dictionary of options that were passed to the
:meth:`.Connection.execution_options` method.
.. versionadded:: 0.9.0
.. seealso::
:meth:`.ConnectionEvents.set_connection_execution_options` - event
which is called when :meth:`.Connection.execution_options` is called.
"""
def begin(self, conn):
"""Intercept begin() events.
:param conn: :class:`.Connection` object
"""
def rollback(self, conn):
"""Intercept rollback() events, as initiated by a
:class:`.Transaction`.
Note that the :class:`.Pool` also "auto-rolls back"
a DBAPI connection upon checkin, if the ``reset_on_return``
flag is set to its default value of ``'rollback'``.
To intercept this
rollback, use the :meth:`.PoolEvents.reset` hook.
:param conn: :class:`.Connection` object
.. seealso::
:meth:`.PoolEvents.reset`
"""
def commit(self, conn):
"""Intercept commit() events, as initiated by a
:class:`.Transaction`.
Note that the :class:`.Pool` may also "auto-commit"
a DBAPI connection upon checkin, if the ``reset_on_return``
flag is set to the value ``'commit'``. To intercept this
commit, use the :meth:`.PoolEvents.reset` hook.
:param conn: :class:`.Connection` object
"""
def savepoint(self, conn, name):
"""Intercept savepoint() events.
:param conn: :class:`.Connection` object
:param name: specified name used for the savepoint.
"""
def rollback_savepoint(self, conn, name, context):
"""Intercept rollback_savepoint() events.
:param conn: :class:`.Connection` object
:param name: specified name used for the savepoint.
:param context: :class:`.ExecutionContext` in use. May be ``None``.
"""
def release_savepoint(self, conn, name, context):
"""Intercept release_savepoint() events.
:param conn: :class:`.Connection` object
:param name: specified name used for the savepoint.
:param context: :class:`.ExecutionContext` in use. May be ``None``.
"""
def begin_twophase(self, conn, xid):
"""Intercept begin_twophase() events.
:param conn: :class:`.Connection` object
:param xid: two-phase XID identifier
"""
def prepare_twophase(self, conn, xid):
"""Intercept prepare_twophase() events.
:param conn: :class:`.Connection` object
:param xid: two-phase XID identifier
"""
def rollback_twophase(self, conn, xid, is_prepared):
"""Intercept rollback_twophase() events.
:param conn: :class:`.Connection` object
:param xid: two-phase XID identifier
:param is_prepared: boolean, indicates if
:meth:`.TwoPhaseTransaction.prepare` was called.
"""
def commit_twophase(self, conn, xid, is_prepared):
"""Intercept commit_twophase() events.
:param conn: :class:`.Connection` object
:param xid: two-phase XID identifier
:param is_prepared: boolean, indicates if
:meth:`.TwoPhaseTransaction.prepare` was called.
"""
|
michaelgugino/web_keyer
|
sqlalchemy/events.py
|
Python
|
gpl-3.0
| 31,080
| 0.000611
|
# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import urlparse
from ceilometerclient import client as ceilometer
from cinderclient import client as cinder
import glanceclient as glance
from heatclient import client as heat
from keystoneclient import exceptions as keystone_exceptions
from keystoneclient.v2_0 import client as keystone
from neutronclient.neutron import client as neutron
from novaclient import client as nova
from oslo.config import cfg
from rally import exceptions
CONF = cfg.CONF
CONF.register_opts([
cfg.FloatOpt("openstack_client_http_timeout", default=30.0,
help="HTTP timeout for any of OpenStack service in seconds"),
cfg.BoolOpt("https_insecure", default=False,
help="Use SSL for all OpenStack API interfaces"),
cfg.StrOpt("https_cacert", default=None,
help="Path to CA server cetrificate for SSL")
])
# NOTE(boris-42): super dirty hack to fix nova python client 2.17 thread safe
nova._adapter_pool = lambda x: nova.adapters.HTTPAdapter()
class Clients(object):
"""This class simplify and unify work with openstack python clients."""
def __init__(self, endpoint):
self.endpoint = endpoint
self.cache = {}
def clear(self):
"""Remove all cached client handles."""
self.cache = {}
def memoize(name):
"""Cache client handles."""
def decorate(func):
def wrapper(self, *args, **kwargs):
key = '{0}{1}{2}'.format(func.__name__,
str(args) if args else '',
str(kwargs) if kwargs else '')
if key in self.cache:
return self.cache[key]
self.cache[key] = func(self, *args, **kwargs)
return self.cache[key]
return wrapper
return decorate
@memoize('keystone')
def keystone(self):
"""Return keystone client."""
new_kw = {
"endpoint": self._change_port(self.endpoint.auth_url, "35357"),
"timeout": CONF.openstack_client_http_timeout,
"insecure": CONF.https_insecure, "cacert": CONF.https_cacert
}
kw = dict(self.endpoint.to_dict().items() + new_kw.items())
client = keystone.Client(**kw)
client.authenticate()
return client
def verified_keystone(self):
"""Ensure keystone endpoints are valid and then authenticate
:returns: Keystone Client
"""
try:
# Ensure that user is admin
client = self.keystone()
roles = client.auth_ref['user']['roles']
if not any('admin' == role['name'] for role in roles):
raise exceptions.InvalidAdminException(
username=self.endpoint.username)
except keystone_exceptions.Unauthorized:
raise exceptions.InvalidEndpointsException()
except keystone_exceptions.AuthorizationFailure:
raise exceptions.HostUnreachableException(
url=self.endpoint.auth_url)
return client
@memoize('nova')
def nova(self, version='2'):
"""Returns nova client."""
client = nova.Client(version,
self.endpoint.username,
self.endpoint.password,
self.endpoint.tenant_name,
auth_url=self.endpoint.auth_url,
service_type='compute',
http_log_debug=CONF.debug,
timeout=CONF.openstack_client_http_timeout,
insecure=CONF.https_insecure,
cacert=CONF.https_cacert)
return client
@memoize('neutron')
def neutron(self, version='2.0'):
"""Returns neutron client."""
client = neutron.Client(version,
username=self.endpoint.username,
password=self.endpoint.password,
tenant_name=self.endpoint.tenant_name,
auth_url=self.endpoint.auth_url,
timeout=CONF.openstack_client_http_timeout,
insecure=CONF.https_insecure,
cacert=CONF.https_cacert)
return client
@memoize('glance')
def glance(self, version='1'):
"""Returns glance client."""
kc = self.keystone()
endpoint = kc.service_catalog.get_endpoints()['image'][0]
client = glance.Client(version,
endpoint=endpoint['publicURL'],
token=kc.auth_token,
timeout=CONF.openstack_client_http_timeout,
insecure=CONF.https_insecure,
cacert=CONF.https_cacert)
return client
@memoize('heat')
def heat(self, version='1'):
"""Returns heat client."""
kc = self.keystone()
endpoint = kc.service_catalog.get_endpoints()['orchestration'][0]
client = heat.Client(version,
endpoint=endpoint['publicURL'],
token=kc.auth_token,
timeout=CONF.openstack_client_http_timeout,
insecure=CONF.https_insecure,
cacert=CONF.https_cacert)
return client
@memoize('cinder')
def cinder(self, version='1'):
"""Returns cinder client."""
client = cinder.Client(version,
self.endpoint.username,
self.endpoint.password,
self.endpoint.tenant_name,
auth_url=self.endpoint.auth_url,
service_type='volume',
http_log_debug=CONF.debug,
timeout=CONF.openstack_client_http_timeout,
insecure=CONF.https_insecure,
cacert=CONF.https_cacert)
return client
@memoize('ceilometer')
def ceilometer(self, version='1'):
"""Returns ceilometer client."""
client = ceilometer.Client(version,
username=self.endpoint.username,
password=self.endpoint.password,
tenant_name=self.endpoint.tenant_name,
endpoint=self.endpoint.auth_url,
timeout=CONF.openstack_client_http_timeout,
insecure=CONF.https_insecure,
cacert=CONF.https_cacert)
return client
def _change_port(self, url, new_port):
"""Change the port of a given url.
:param url: URL string
:param new_port: The new port
:returns: URL string
"""
url_obj = urlparse.urlparse(url)
new_url = "%(scheme)s://%(hostname)s:%(port)s%(path)s" % {
"scheme": url_obj.scheme, "hostname": url_obj.hostname,
"port": new_port, "path": url_obj.path}
return new_url
|
ytsarev/rally
|
rally/osclients.py
|
Python
|
apache-2.0
| 7,886
| 0.000127
|
from s3v1 import *
def filter_col_by_string(data_sample, field, filter_condition):
filtered_rows = [] # create a new list
col = int(data_sample[0].index(field)) # create a variable (col) and asign it to an integer which is pulled from the header row of the data_sample and which is the index (probably also an integer to begin with) of the field name that we passed in as an argument
filtered_rows.append(data_sample[0]) # add the header row to the new list
for item in data_sample[1:]:
if item[col] == filter_condition:
filtered_rows.append(item)
return filtered_rows
def filter_col_by_float(data_sample, field, direction, filter_condition):
filtered_rows = []
col = int(data_sample[0].index(field)) # you must use integers to access indexes. So this is just to be sure it's not a float or a string.
cond = float(filter_condition)
for row in data_sample[1:]:
element = float(row[col])
if direction == "<":
if element < cond:
filtered_rows.append(row)
elif direction == "<=":
if element <= cond:
filtered_rows.append(row)
elif direction == ">":
if element > cond:
filtered_rows.append(row)
elif direction == ">=":
if element >= cond:
filtered_rows.append(row)
elif direction == "==":
if element == cond:
filtered_rows.append(row)
else:
pass # the pass statement does nothing. It can be used when a statement is required syntactically but the program requires no action.
return filtered_rows
under_20_bucks = filter_col_by_float(data_from_csv, 'priceLabel', "<=", 20)
# print("Found {} ties under $20".format(number_of_records(under_20_bucks)))
silk_ties = filter_col_by_string(data_from_csv, "material", "_silk")
wool_ties = filter_col_by_string(data_from_csv, "material", "_wool")
cotton_ties = filter_col_by_string(data_from_csv, "material", "_cotton")
gucci_ties = filter_col_by_string(data_from_csv, "brandName", "Gucci") # this search term is case sensitive. It came back with 1 tie when I used "gucci" and 171 ties when I used "Gucci" I went to look for the one tie with lowercase gucci in the dataset (manual search), but it didn't come up. What's going on with the 1 "gucci" tie?
falfkafj_ties = filter_col_by_string(data_from_csv, "brandName", "falfkafj") # this a test of a non-existant thing to see if the lowercase "gucci" tie was a global error... like every request will return a minimum of one record even if zero exist.
# print("Found {} silk ties".format(number_of_records(silk_ties)))
# print("Found {} wool ties".format(number_of_records(wool_ties)))
# print("Found {} cotton ties".format(number_of_records(cotton_ties)))
# print("Found {} Gucci ties".format(number_of_records(gucci_ties)))
# print("Found {} falfkafj ties".format(number_of_records(falfkafj_ties)))
# print("Found {} falfkafj ties".format(number_of_records(falfkafj_ties[1:])))
# print("Found {} falfkafj ties".format(number_of_records_ignore_header(falfkafj_ties)))
|
alexmilesyounger/ds_basics
|
s3v2.py
|
Python
|
mit
| 2,936
| 0.016349
|
import pytz
from pyrfc3339.utils import timezone, timedelta_seconds
def generate(dt, utc=True, accept_naive=False, microseconds=False):
'''
Generate an :RFC:`3339`-formatted timestamp from a
:class:`datetime.datetime`.
>>> from datetime import datetime
>>> generate(datetime(2009,1,1,12,59,59,0,pytz.utc))
'2009-01-01T12:59:59Z'
The timestamp will use UTC unless `utc=False` is specified, in which case
it will use the timezone from the :class:`datetime.datetime`'s
:attr:`tzinfo` parameter.
>>> eastern = pytz.timezone('US/Eastern')
>>> dt = eastern.localize(datetime(2009,1,1,12,59,59))
>>> generate(dt)
'2009-01-01T17:59:59Z'
>>> generate(dt, utc=False)
'2009-01-01T12:59:59-05:00'
Unless `accept_naive=True` is specified, the `datetime` must not be naive.
>>> generate(datetime(2009,1,1,12,59,59,0))
Traceback (most recent call last):
...
ValueError: naive datetime and accept_naive is False
>>> generate(datetime(2009,1,1,12,59,59,0), accept_naive=True)
'2009-01-01T12:59:59Z'
If `accept_naive=True` is specified, the `datetime` is assumed to be UTC.
Attempting to generate a local timestamp from a naive datetime will result
in an error.
>>> generate(datetime(2009,1,1,12,59,59,0), accept_naive=True, utc=False)
Traceback (most recent call last):
...
ValueError: cannot generate a local timestamp from a naive datetime
'''
if dt.tzinfo is None:
if accept_naive is True:
if utc is True:
dt = dt.replace(tzinfo=pytz.utc)
else:
raise ValueError("cannot generate a local timestamp from " +
"a naive datetime")
else:
raise ValueError("naive datetime and accept_naive is False")
if utc is True:
dt = dt.astimezone(pytz.utc)
timestamp = dt.strftime('%Y-%m-%dT%H:%M:%S')
if microseconds is True:
timestamp += dt.strftime('.%f')
if dt.tzinfo is pytz.utc:
timestamp += 'Z'
else:
timestamp += timezone(timedelta_seconds(dt.tzinfo.utcoffset(dt)))
return timestamp
|
kurtraschke/pyRFC3339
|
pyrfc3339/generator.py
|
Python
|
mit
| 2,170
| 0
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Corwin Brown <corwin@corwinbrown.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = """
---
module: win_uri
version_added: "2.1"
short_description: Interacts with webservices.
description:
- Interacts with HTTP and HTTPS web services and supports Digest, Basic and WSSE HTTP authentication mechanisms.
options:
url:
description:
- HTTP or HTTPS URL in the form of (http|https)://host.domain:port/path
method:
description:
- The HTTP Method of the request or response.
default: GET
choices:
- GET
- POST
- PUT
- HEAD
- DELETE
- OPTIONS
- PATCH
- TRACE
- CONNECT
- REFRESH
content_type:
description:
- Sets the "Content-Type" header.
body:
description:
- The body of the HTTP request/response to the web service.
headers:
description:
- 'Key Value pairs for headers. Example "Host: www.somesite.com"'
use_basic_parsing:
description:
- This module relies upon 'Invoke-WebRequest', which by default uses the Internet Explorer Engine to parse a webpage. There's an edge-case where if a user hasn't run IE before, this will fail. The only advantage to using the Internet Explorer praser is that you can traverse the DOM in a powershell script. That isn't useful for Ansible, so by default we toggle 'UseBasicParsing'. However, you can toggle that off here.
choices:
- True
- False
default: True
author: Corwin Brown (@blakfeld)
"""
EXAMPLES = """
# Send a GET request and store the output:
---
- name: Perform a GET and Store Output
win_uri:
url: http://www.somesite.com/myendpoint
register: http_output
# Set a HOST header to hit an internal webserver:
---
- name: Hit a Specific Host on the Server
win_uri:
url: http://my.internal.server.com
method: GET
headers:
host: "www.somesite.com"
# Do a HEAD request on an endpoint
---
- name: Perform a HEAD on an Endpoint
win_uri:
url: http://www.somesite.com
method: HEAD
# Post a body to an endpoint
---
- name: POST a Body to an Endpoint
win_uri:
url: http://www.somesite.com
method: POST
body: "{ 'some': 'json' }"
"""
RETURN = """
url:
description: The Target URL
returned: always
type: string
sample: "https://www.ansible.com"
method:
description: The HTTP method used.
returned: always
type: string
sample: "GET"
content_type:
description: The "content-type" header used.
returned: always
type: string
sample: "application/json"
use_basic_parsing:
description: The state of the "use_basic_parsing" flag.
returned: always
type: bool
sample: True
body:
description: The content of the body used
returned: when body is specified
type: string
sample: '{"id":1}'
version_added: "2.3"
status_code:
description: The HTTP Status Code of the response.
returned: success
type: int
sample: 200
status_description:
description: A summery of the status.
returned: success
type: string
stample: "OK"
raw_content:
description: The raw content of the HTTP response.
returned: success
type: string
sample: 'HTTP/1.1 200 OK\nX-XSS-Protection: 1; mode=block\nX-Frame-Options: SAMEORIGIN\nAlternate-Protocol: 443:quic,p=1\nAlt-Svc: quic="www.google.com:443"; ma=2592000; v="30,29,28,27,26,25",quic=":443"; ma=2...'
headers:
description: The Headers of the response.
returned: success
type: dict
sample: {"Content-Type": "application/json"}
raw_content_length:
description: The byte size of the response.
returned: success
type: int
sample: 54447
"""
|
gundalow/ansible-modules-extras
|
windows/win_uri.py
|
Python
|
gpl-3.0
| 4,484
| 0.000669
|
"""private_base will be populated from puppet and placed in this directory"""
import logging
import os
import dj_database_url
from lib.settings_base import CACHE_PREFIX, ES_INDEXES, KNOWN_PROXIES, LOGGING
from .. import splitstrip
import private_base as private
ENGAGE_ROBOTS = False
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = private.EMAIL_HOST
DEBUG = False
TEMPLATE_DEBUG = DEBUG
DEBUG_PROPAGATE_EXCEPTIONS = False
SESSION_COOKIE_SECURE = True
REDIRECT_SECRET_KEY = private.REDIRECT_SECRET_KEY
ADMINS = ()
DATABASES = {}
DATABASES['default'] = dj_database_url.parse(private.DATABASES_DEFAULT_URL)
DATABASES['default']['ENGINE'] = 'mysql_pool'
DATABASES['default']['sa_pool_key'] = 'master'
DATABASES['default']['OPTIONS'] = {'init_command': 'SET storage_engine=InnoDB'}
DATABASES['slave'] = dj_database_url.parse(private.DATABASES_SLAVE_URL)
DATABASES['slave']['ENGINE'] = 'mysql_pool'
DATABASES['slave']['OPTIONS'] = {'init_command': 'SET storage_engine=InnoDB'}
DATABASES['slave']['sa_pool_key'] = 'slave'
SERVICES_DATABASE = dj_database_url.parse(private.SERVICES_DATABASE_URL)
DATABASE_POOL_ARGS = {
'max_overflow': 10,
'pool_size': 5,
'recycle': 30
}
SLAVE_DATABASES = ['slave']
CACHES = {
'default': {
'BACKEND': 'caching.backends.memcached.MemcachedCache',
# 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
# 'BACKEND': 'memcachepool.cache.UMemcacheCache',
'LOCATION': splitstrip(private.CACHES_DEFAULT_LOCATION),
'TIMEOUT': 500,
'KEY_PREFIX': CACHE_PREFIX,
},
}
SECRET_KEY = private.SECRET_KEY
LOG_LEVEL = logging.DEBUG
## Celery
BROKER_URL = private.BROKER_URL
CELERY_ALWAYS_EAGER = True
CELERY_IGNORE_RESULT = True
CELERY_DISABLE_RATE_LIMITS = True
CELERYD_PREFETCH_MULTIPLIER = 1
NETAPP_STORAGE = private.NETAPP_STORAGE_ROOT + '/shared_storage'
MIRROR_STAGE_PATH = private.NETAPP_STORAGE_ROOT + '/public-staging'
GUARDED_ADDONS_PATH = private.NETAPP_STORAGE_ROOT + '/guarded-addons'
UPLOADS_PATH = NETAPP_STORAGE + '/uploads'
USERPICS_PATH = UPLOADS_PATH + '/userpics'
ADDON_ICONS_PATH = UPLOADS_PATH + '/addon_icons'
COLLECTION_ICONS_PATH = UPLOADS_PATH + '/collection_icons'
IMAGEASSETS_PATH = UPLOADS_PATH + '/imageassets'
REVIEWER_ATTACHMENTS_PATH = UPLOADS_PATH + '/reviewer_attachment'
PREVIEWS_PATH = UPLOADS_PATH + '/previews'
SIGNED_APPS_PATH = NETAPP_STORAGE + '/signed_apps'
SIGNED_APPS_REVIEWER_PATH = NETAPP_STORAGE + '/signed_apps_reviewer'
PREVIEW_THUMBNAIL_PATH = PREVIEWS_PATH + '/thumbs/%s/%d.png'
PREVIEW_FULL_PATH = PREVIEWS_PATH + '/full/%s/%d.%s'
HERA = []
LOGGING['loggers'].update({
'z.task': { 'level': logging.DEBUG },
'z.hera': { 'level': logging.INFO },
'z.redis': { 'level': logging.DEBUG },
'z.pool': { 'level': logging.ERROR },
})
REDIS_BACKEND = private.REDIS_BACKENDS_CACHE
REDIS_BACKENDS = {
'cache': private.REDIS_BACKENDS_CACHE,
'cache_slave': private.REDIS_BACKENDS_CACHE_SLAVE,
'master': private.REDIS_BACKENDS_MASTER,
'slave': private.REDIS_BACKENDS_SLAVE,
}
CACHE_MACHINE_USE_REDIS = True
RECAPTCHA_PUBLIC_KEY = private.RECAPTCHA_PUBLIC_KEY
RECAPTCHA_PRIVATE_KEY = private.RECAPTCHA_PRIVATE_KEY
RECAPTCHA_URL = ('https://www.google.com/recaptcha/api/challenge?k=%s' % RECAPTCHA_PUBLIC_KEY)
TMP_PATH = os.path.join(NETAPP_STORAGE, 'tmp')
PACKAGER_PATH = os.path.join(TMP_PATH, 'packager')
ADDONS_PATH = private.NETAPP_STORAGE_ROOT + '/files'
PERF_THRESHOLD = 20
SPIDERMONKEY = '/usr/bin/tracemonkey'
# Remove DetectMobileMiddleware from middleware in production.
detect = 'mobility.middleware.DetectMobileMiddleware'
csp = 'csp.middleware.CSPMiddleware'
RESPONSYS_ID = private.RESPONSYS_ID
CRONJOB_LOCK_PREFIX = 'addons-landfill'
BUILDER_SECRET_KEY = private.BUILDER_SECRET_KEY
BUILDER_VERSIONS_URL = "https://builder-addons-dev.allizom.org/repackage/sdk-versions/"
ES_HOSTS = splitstrip(private.ES_HOSTS)
ES_URLS = ['http://%s' % h for h in ES_HOSTS]
ES_INDEXES = dict((k, '%s_landfill' % v) for k, v in ES_INDEXES.items())
BUILDER_UPGRADE_URL = "https://builder-addons-dev.allizom.org/repackage/rebuild/"
STATSD_HOST = private.STATSD_HOST
STATSD_PORT = private.STATSD_PORT
STATSD_PREFIX = private.STATSD_PREFIX
GRAPHITE_HOST = private.GRAPHITE_HOST
GRAPHITE_PORT = private.GRAPHITE_PORT
GRAPHITE_PREFIX = private.GRAPHITE_PREFIX
CEF_PRODUCT = STATSD_PREFIX
ES_TIMEOUT = 60
EXPOSE_VALIDATOR_TRACEBACKS = True
KNOWN_PROXIES += ['10.2.83.105',
'10.2.83.106',
'10.2.83.107',
'10.8.83.200',
'10.8.83.201',
'10.8.83.202',
'10.8.83.203',
'10.8.83.204',
'10.8.83.210',
'10.8.83.211',
'10.8.83.212',
'10.8.83.213',
'10.8.83.214',
'10.8.83.215',
'10.8.83.251',
'10.8.83.252',
'10.8.83.253',
]
NEW_FEATURES = True
PERF_TEST_URL = 'http://talos-addon-master1.amotest.scl1.mozilla.com/trigger/trigger.cgi'
REDIRECT_URL = 'https://outgoing.allizom.org/v1/'
CLEANCSS_BIN = 'cleancss'
UGLIFY_BIN = 'uglifyjs'
CELERYD_TASK_SOFT_TIME_LIMIT = 240
LESS_PREPROCESS = True
XSENDFILE_HEADER = 'X-Accel-Redirect'
ALLOW_SELF_REVIEWS = True
AES_KEYS = private.AES_KEYS
|
anaran/olympia
|
sites/landfill/settings_base.py
|
Python
|
bsd-3-clause
| 5,402
| 0.002962
|
#
# Copyright (C) 2000 Stefan Seefeld
# Copyright (C) 2000 Stephen Davies
# All rights reserved.
# Licensed to the public under the terms of the GNU LGPL (>= 2),
# see the file COPYING for details.
#
"""Abstract Syntax Tree classes.
This file contains classes which encapsulate nodes in the ASG. The base class
is the Declaration class that encapsulates a named declaration. All names used
are scoped tuples.
Also defined in module scope are the constants DEFAULT, PUBLIC, PROTECTED and
PRIVATE.
"""
# Accessibility constants
DEFAULT = 0
PUBLIC = 1
PROTECTED = 2
PRIVATE = 3
def ccmp(a,b):
"""Compares classes of two objects"""
return cmp(type(a),type(b)) or cmp(a.__class__,b.__class__)
class Error:
"""Exception class used by ASG internals."""
def __init__(self, err):
self.err = err
def __repr__(self):
return self.err
class Debugger(type):
"""Wrap the object's 'accept' method, printing out the visitor's type.
Useful for tracing visitors visiting declarations."""
def __init__(cls, name, bases, dict):
accept = dict['accept']
"The original instancemethod."
def accept_wrapper(self, visitor):
"The wrapper. The original 'accept' method is part of its closure."
print '%s accepting %s.%s'%(self.__class__.__name__,
visitor.__module__,
visitor.__class__.__name__)
accept(self, visitor)
setattr(cls, 'accept', accept_wrapper)
class TypeId(object):
"""Type-id abstract class."""
def __init__(self, language):
self.language = language
def accept(self, visitor):
"""visitor pattern accept. @see Visitor"""
pass
def __cmp__(self, other):
"Comparison operator"
return cmp(id(self),id(other))
class NamedTypeId(TypeId):
"""Named type abstract class"""
def __init__(self, language, name):
super(NamedTypeId, self).__init__(language)
self.name = name
class BuiltinTypeId(NamedTypeId):
"""Class for builtin type-ids"""
def __init__(self, language, name):
super(BuiltinTypeId, self).__init__(language, name)
def accept(self, visitor): visitor.visit_builtin_type_id(self)
def __cmp__(self, other):
"Comparison operator"
return ccmp(self,other) or cmp(self.name,other.name)
def __str__(self): return str(self.name)
class DependentTypeId(NamedTypeId):
"""Class for template dependent type-ids"""
def __init__(self, language, name):
super(DependentTypeId, self).__init__(language, name)
def accept(self, visitor): visitor.visit_dependent_type_id(self)
def __cmp__(self, other):
"Comparison operator"
return ccmp(self,other) or cmp(self.name,other.name)
def __str__(self): return str(self.name)
class UnknownTypeId(NamedTypeId):
"""Class for not (yet) known type-ids."""
base = TypeId
def __init__(self, language, name):
super(UnknownTypeId, self).__init__(language, name)
self.link = name
def resolve(self, language, name, link):
"""Associate this type-id with an external reference, instead of a declaration."""
self.base.language = language
self.name = name
self.link = link
def accept(self, visitor): visitor.visit_unknown_type_id(self)
def __cmp__(self, other):
"Comparison operator"
return ccmp(self,other) or cmp(self.name,other.name)
def __str__(self): return str(self.name)
class DeclaredTypeId(NamedTypeId):
"""Class for declared types"""
def __init__(self, language, name, declaration):
super(DeclaredTypeId, self).__init__(language, name)
self.declaration = declaration
def accept(self, visitor): visitor.visit_declared_type_id(self)
def __cmp__(self, other):
"Comparison operator"
return ccmp(self,other) or cmp(self.name,other.name)
def __str__(self): return str(self.name)
class TemplateId(DeclaredTypeId):
"""Class for template-ids."""
def __init__(self, language, name, declaration, parameters):
super(TemplateId, self).__init__(language, name, declaration)
self.parameters = parameters
def accept(self, visitor): visitor.visit_template_id(self)
def __cmp__(self, other):
"Comparison operator"
return ccmp(self,other) or cmp(self.parameters,other.parameters)
def __str__(self):
return "template<%s>%s"%(','.join(str(self.parameters)), str(self.name))
class ModifierTypeId(TypeId):
"""Class for alias types with modifiers (such as 'const', '&', etc.)"""
def __init__(self, language, alias, premod, postmod):
super(ModifierTypeId, self).__init__(language)
self.alias = alias
self.premod = premod
self.postmod = postmod
def accept(self, visitor): visitor.visit_modifier_type_id(self)
def __cmp__(self, other):
"Comparison operator"
return (ccmp(self,other)
or cmp(self.alias,other.alias)
or cmp(self.premod,other.premod)
or cmp(self.postmod,other.postmod))
def __str__(self):
return "%s%s%s"%(''.join(['%s '%s for s in self.premod]),
str(self.alias),
''.join(self.postmod))
class ArrayTypeId(TypeId):
"""A modifier that adds array dimensions to a type-id."""
def __init__(self, language, alias, sizes):
super(ArrayTypeId, self).__init__(language)
self.alias = alias
self.sizes = sizes
def accept(self, visitor): visitor.visit_array_type_id(self)
def __cmp__(self, other):
"Comparison operator"
return (ccmp(self,other)
or cmp(self.alias,other.alias)
or cmp(self.sizes,other.sizes))
def __str__(self):
return "%s%s"%(str(self.alias),
''.join(['[%d]'%s for s in self.sizes]))
class ParametrizedTypeId(TypeId):
"""Class for parametrized type-id instances."""
def __init__(self, language, template, parameters):
super(ParametrizedTypeId, self).__init__(language)
self.template = template
self.parameters = parameters
def accept(self, visitor): visitor.visit_parametrized_type_id(self)
def __cmp__(self, other):
"Comparison operator"
return ccmp(self,other) or cmp(self.template,other.template)
def __str__(self):
return "%s<%s>"%('::'.join(self.template.name),
','.join([str(a) for a in self.parameters]))
class FunctionTypeId(TypeId):
"""Class for function (pointer) types."""
def __init__(self, language, return_type, premod, parameters):
super(FunctionTypeId, self).__init__(language)
self.return_type = return_type
self.premod = premod
self.parameters = parameters
def accept(self, visitor): visitor.visit_function_type_id(self)
class Dictionary(dict):
"""Dictionary extends the builtin 'dict' by adding a lookup method to it."""
def lookup(self, name, scopes):
"""locate 'name' in one of the scopes"""
for s in scopes:
scope = list(s)
while len(scope) > 0:
if self.has_key(scope + name):
return self[scope + name]
else: del scope[-1]
if self.has_key(name):
return self[name]
return None
def merge(self, dict):
"""merge in a foreign dictionary, overriding already defined types only
if they are of type 'Unknown'."""
for i in dict.keys():
if self.has_key(i):
if isinstance(self[i], UnknownTypeId):
self[i] = dict[i]
else:
pass
else: self[i] = dict[i]
class Declaration(object):
"""Declaration base class. Every declaration has a name, type,
accessibility and annotations. The default accessibility is DEFAULT except for
C++ where the Parser always sets it to one of the other three. """
#__metaclass__ = Debugger
def __init__(self, file, line, type, name):
self.file = file
"""SourceFile instance this declaration is part of."""
self.line = line
"""The line number of this declaration."""
self.name = name
"""The (fully qualified) name of the declared object."""
self.type = type
"""A string describing the (language-specific) type of the declared object."""
self.accessibility = DEFAULT
"""Accessibility descriptor for the declared object."""
self.annotations = {}
"""A dictionary holding any annotations of this object."""
def accept(self, visitor):
"""Visit the given visitor"""
visitor.visit_declaration(self)
class Builtin(Declaration):
"""A node for internal use only."""
def accept(self, visitor): visitor.visit_builtin(self)
class UsingDirective(Builtin):
"""Import one module's content into another."""
def accept(self, visitor): visitor.visit_using_directive(self)
class UsingDeclaration(Builtin):
"""Import a declaration into this module."""
def __init__(self, file, line, type, name, alias):
super(UsingDeclaration, self).__init__(file, line, type, name)
self.alias = alias
def accept(self, visitor):
visitor.visit_using_declaration(self)
class Macro(Declaration):
"""A preprocessor macro. Note that macros are not strictly part of the
ASG, and as such are always in the global scope. A macro is "temporary" if
it was #undefined in the same file it was #defined in."""
def __init__(self, file, line, type, name, parameters, text):
Declaration.__init__(self, file, line, type, name)
self.parameters = parameters
self.text = text
def accept(self, visitor): visitor.visit_macro(self)
class Forward(Declaration):
"""Forward declaration"""
def __init__(self, file, line, type, name):
Declaration.__init__(self, file, line, type, name)
self.template = None
self.primary_template = None
self.specializations = []
def accept(self, visitor):
visitor.visit_forward(self)
class Group(Declaration):
"""Base class for groups which contain declarations.
This class doesn't correspond to any language construct.
Rather, it may be used with comment-embedded grouping tags
to regroup declarations that are to appear together in the
manual."""
def __init__(self, file, line, type, name):
Declaration.__init__(self, file, line, type, name)
self.declarations = []
def accept(self, visitor):
visitor.visit_group(self)
class Scope(Declaration):
"""Base class for scopes (named groups)."""
def __init__(self, file, line, type, name):
Declaration.__init__(self, file, line, type, name)
self.declarations = []
def accept(self, visitor): visitor.visit_scope(self)
class Module(Scope):
"""Module class"""
def __init__(self, file, line, type, name):
Scope.__init__(self, file, line, type, name)
def accept(self, visitor): visitor.visit_module(self)
class MetaModule(Module):
"""Module Class that references all places where this Module occurs"""
def __init__(self, type, name):
Scope.__init__(self, None, "", type, name)
self.module_declarations = []
def accept(self, visitor): visitor.visit_meta_module(self)
class Inheritance(object):
"""Inheritance class. This class encapsulates the information about an
inheritance, such as attributes like 'virtual' and 'public' """
def __init__(self, type, parent, attributes):
self.type = type
self.parent = parent
self.attributes = attributes
def accept(self, visitor): visitor.visit_inheritance(self)
class Class(Scope):
def __init__(self, file, line, type, name):
Scope.__init__(self, file, line, type, name)
self.parents = []
self.primary_template = None
def accept(self, visitor): visitor.visit_class(self)
class ClassTemplate(Scope):
def __init__(self, file, line, type, name, template = None):
Scope.__init__(self, file, line, type, name)
self.parents = []
self.template = template
self.primary_template = None
self.specializations = []
def accept(self, visitor): visitor.visit_class_template(self)
class Typedef(Declaration):
def __init__(self, file, line, type, name, alias, constr):
Declaration.__init__(self, file, line, type, name)
self.alias = alias
self.constr = constr
def accept(self, visitor): visitor.visit_typedef(self)
class Enumerator(Declaration):
"""Enumerator of an Enum. Enumerators represent the individual names and
values in an enum."""
def __init__(self, file, line, name, value):
Declaration.__init__(self, file, line, "enumerator", name)
self.value = value
def accept(self, visitor): visitor.visit_enumerator(self)
class Enum(Declaration):
"""Enum declaration. The actual names and values are encapsulated by
Enumerator objects."""
def __init__(self, file, line, name, enumerators):
Declaration.__init__(self, file, line, "enum", name)
self.enumerators = enumerators[:]
#FIXME: the Cxx parser will append a Builtin('eos') to the
#list of enumerators which we need to extract here.
self.eos = None
if self.enumerators and isinstance(self.enumerators[-1], Builtin):
self.eos = self.enumerators.pop()
def accept(self, visitor): visitor.visit_enum(self)
class Variable(Declaration):
"""Variable definition"""
def __init__(self, file, line, type, name, vtype, constr):
Declaration.__init__(self, file, line, type, name)
self.vtype = vtype
self.constr = constr
def accept(self, visitor): visitor.visit_variable(self)
class Const(Declaration):
"""Constant declaration. A constant is a name with a type and value."""
def __init__(self, file, line, type, name, ctype, value):
Declaration.__init__(self, file, line, type, name)
self.ctype = ctype
self.value = value
def accept(self, visitor): visitor.visit_const(self)
class Parameter(object):
"""Function Parameter"""
def __init__(self, premod, type, postmod, name='', value=''):
self.premodifier = premod
self.type = type
self.postmodifier = postmod
self.name = name
self.value = value or ''
def accept(self, visitor): visitor.visit_parameter(self)
def __cmp__(self, other):
"Comparison operator"
#print "Parameter.__cmp__"
return cmp(self.type,other.type)
def __str__(self):
return "%s%s%s"%(' '.join(self.premodifier),
str(self.type),
' '.join(self.postmodifier))
class Function(Declaration):
"""Function declaration.
Note that function names are stored in mangled form to allow overriding.
Formatters should use the real_name to extract the unmangled name."""
def __init__(self, file, line, type, premod, return_type, postmod, name, real_name):
Declaration.__init__(self, file, line, type, name)
self._real_name = real_name
self.premodifier = premod
self.return_type = return_type
self.parameters = []
self.postmodifier = postmod
self.exceptions = []
real_name = property(lambda self: self.name[:-1] + (self._real_name,))
def accept(self, visitor): visitor.visit_function(self)
def __cmp__(self, other):
"Recursively compares the typespec of the function"
return ccmp(self,other) or cmp(self.parameters, other.parameters)
class FunctionTemplate(Function):
def __init__(self, file, line, type, premod, return_type, postmod, name, real_name, template = None):
Function.__init__(self, file, line, type, premod, return_type, postmod, name, real_name)
self.template = template
def accept(self, visitor): visitor.visit_function_template(self)
class Operation(Function):
"""Operation class. An operation is related to a Function and is currently
identical.
"""
def __init__(self, file, line, type, premod, return_type, postmod, name, real_name):
Function.__init__(self, file, line, type, premod, return_type, postmod, name, real_name)
def accept(self, visitor): visitor.visit_operation(self)
class OperationTemplate(Operation):
def __init__(self, file, line, type, premod, return_type, postmod, name, real_name, template = None):
Operation.__init__(self, file, line, type, premod, return_type, postmod, name, real_name)
self.template = template
def accept(self, visitor): visitor.visit_operation_template(self)
class Visitor(object):
"""Visitor for ASG nodes"""
def visit_builtin_type_id(self, type): pass
def visit_unknown_type_id(self, type): pass
def visit_declared_type_id(self, type): pass
def visit_modifier_type_id(self, type): pass
def visit_array_type_id(self, type): pass
def visit_template_id(self, type): pass
def visit_parametrized_type_id(self, type): pass
def visit_function_type_id(self, type): pass
def visit_dependent_type_id(self, type): pass
def visit_declaration(self, node): pass
def visit_builtin(self, node):
"""Visit a Builtin instance. By default do nothing. Processors who
operate on Builtin nodes have to provide an appropriate implementation."""
pass
def visit_using_directive(self, node): self.visit_builtin(node)
def visit_using_declaration(self, node): self.visit_builtin(node)
def visit_macro(self, node): self.visit_declaration(node)
def visit_forward(self, node): self.visit_declaration(node)
def visit_group(self, node):
self.visit_declaration(node)
for d in node.declarations: d.accept(self)
def visit_scope(self, node):
self.visit_declaration(node)
for d in node.declarations: d.accept(self)
def visit_module(self, node): self.visit_scope(node)
def visit_meta_module(self, node): self.visit_module(node)
def visit_class(self, node): self.visit_scope(node)
def visit_class_template(self, node): self.visit_class(node)
def visit_typedef(self, node): self.visit_declaration(node)
def visit_enumerator(self, node): self.visit_declaration(node)
def visit_enum(self, node):
self.visit_declaration(node)
for e in node.enumerators:
e.accept(self)
if node.eos:
node.eos.accept(self)
def visit_variable(self, node): self.visit_declaration(node)
def visit_const(self, node): self.visit_declaration(node)
def visit_function(self, node):
self.visit_declaration(node)
for parameter in node.parameters: parameter.accept(self)
def visit_function_template(self, node): self.visit_function(node)
def visit_operation(self, node): self.visit_function(node)
def visit_operation_template(self, node): self.visit_operation(node)
def visit_parameter(self, node): pass
def visit_inheritance(self, node): pass
class ASG(object):
def __init__(self, declarations = None, types = None):
self.declarations = declarations or []
self.types = types or Dictionary()
def copy(self):
return type(self)(self.declarations[:],
self.types.copy())
def merge(self, other):
self.declarations.extend(other.declarations)
self.types.merge(other.types)
|
stefanseefeld/synopsis
|
Synopsis/ASG.py
|
Python
|
lgpl-2.1
| 19,516
| 0.015936
|
"""calculate bootstrap bounds on a sample game"""
import argparse
import json
import sys
import numpy as np
from gameanalysis import bootstrap
from gameanalysis import gameio
from gameanalysis import regret
from gameanalysis import scriptutils
CHOICES = {
'regret': (bootstrap.mixture_regret, regret.mixture_regret),
'surplus': (bootstrap.mixture_welfare, regret.mixed_social_welfare),
}
def add_parser(subparsers):
parser = subparsers.add_parser(
'sgboot', help="""Bootstrap on sample games""", description="""Compute
bootstrap statistics using a sample game with data for every profile in
the support of the subgame and potentially deviations. The return value
is a list with an entry for each mixture in order. Each element is a
dictionary mapping percentile to value.""")
parser.add_argument(
'--input', '-i', metavar='<input-file>', default=sys.stdin,
type=argparse.FileType('r'), help="""Input sample game to run bootstrap
on. (default: stdin)""")
parser.add_argument(
'--output', '-o', metavar='<output-file>', default=sys.stdout,
type=argparse.FileType('w'), help="""Output file for script. (default:
stdout)""")
parser.add_argument(
'profiles', metavar='<profile>', nargs='+', help="""File or string with
profiles from input game for which regrets should be calculated. This
file can be a list or a single profile""")
parser.add_argument(
'-t', '--type', default='regret', choices=CHOICES, help="""What to
return. regret - returns the regret of each profile. surplus - returns
the bootstrap surplus of every profile. (default: %(default)s)""")
parser.add_argument(
'--processes', metavar='num-processes', type=int, help="""The number of
processes when constructing bootstrap samples. Default will use all the
cores available.""")
parser.add_argument(
'--percentiles', '-p', metavar='percentile', type=float, nargs='+',
help="""Percentiles to return in [0, 100]. By default all bootstrap
values will be returned sorted.""")
parser.add_argument(
'--num-bootstraps', '-n', metavar='num-bootstraps', default=101,
type=int, help="""The number of bootstrap samples to acquire. More
samples takes longer, but in general the percentiles requested should
be a multiple of this number minus 1, otherwise there will be some
error due to linear interpolation between points. (default:
%(default)s)""")
parser.add_argument(
'--mean', '-m', action='store_true', help="""Also compute the mean
statistic and return it as well. This will be in each dictionary with
the key 'mean'.""")
return parser
def main(args):
game, serial = gameio.read_samplegame(json.load(args.input))
profiles = np.concatenate([serial.from_prof_json(p)[None] for p
in scriptutils.load_profiles(args.profiles)])
bootf, meanf = CHOICES[args.type]
results = bootf(game, profiles, args.num_bootstraps, args.percentiles,
args.processes)
if args.percentiles is None:
args.percentiles = np.linspace(0, 100, args.num_bootstraps)
percentile_strings = [str(p).rstrip('0').rstrip('.')
for p in args.percentiles]
jresults = [{p: v.item() for p, v in zip(percentile_strings, boots)}
for boots in results]
if args.mean:
for jres, mix in zip(jresults, profiles):
jres['mean'] = meanf(game, mix)
json.dump(jresults, args.output)
args.output.write('\n')
|
yackj/GameAnalysis
|
gameanalysis/script/sgboot.py
|
Python
|
apache-2.0
| 3,682
| 0
|
import shutil
import os
import base64
from PySide.QtCore import *
from PySide.QtGui import *
from maya.app.general.mayaMixin import MayaQWidgetBaseMixin, MayaQWidgetDockableMixin
from maya import cmds
from luka import Luka
from luka.gui.qt import TakeSnapshotWidget, SnapshotListWidget
__all__ = ['LukaTakeSnapshotUI', 'LukaUI']
def currentScenePath():
return cmds.file(q=True, sceneName=True).replace('/', '\\')
class LukaTakeSnapshotUI(MayaQWidgetBaseMixin, TakeSnapshotWidget):
def __init__(self, luka=None, *args, **kwargs):
if luka is None:
scene = currentScenePath()
luka = Luka(scene, load=True) if len(scene) > 0 else None
super(LukaTakeSnapshotUI, self).__init__(luka=luka, *args, **kwargs)
def take_snapshot(self):
cmds.file(save=True)
super(LukaTakeSnapshotUI, self).take_snapshot()
class LukaUI(MayaQWidgetBaseMixin, SnapshotListWidget):
def __init__(self, *args, **kwargs):
scene = currentScenePath()
self.luka = Luka(scene, load=True) if len(scene) > 0 else None
super(LukaUI, self).__init__(luka=self.luka, *args, **kwargs)
def initUI(self):
super(LukaUI, self).initUI()
self.newSnapshotButton = QPushButton("New Snapshot", self)
self.newSnapshotButton.clicked.connect(self.showTakeSnapshotUI)
self.layout.addWidget(self.newSnapshotButton)
def showTakeSnapshotUI(self):
ui = LukaTakeSnapshotUI(luka=self.luka)
ui.show()
def restore(self, s):
super(LukaUI, self).restore(s)
v = cmds.confirmDialog(
title='Restore snapshot',
message='All changes including SAVED will be lost. Are you sure?',
button=['OK','Cancel'], defaultButton='OK',
cancelButton='Cancel', dismissString='Cancel')
if v != 'OK':
return
cmds.file(cmds.file(q=True, sceneName=True), open=True, force=True)
def remove(self, s):
v = cmds.confirmDialog(
title='Remove snapshot',
message='Are you sure?',
button=['OK','Cancel'], defaultButton='OK',
cancelButton='Cancel', dismissString='Cancel')
if v != 'OK':
return
super(LukaUI, self).remove(s)
|
zakuro9715/luka
|
luka_maya.py
|
Python
|
gpl-3.0
| 2,318
| 0.006471
|
import unittest
import tests
import tests.test_logic
import tests.test_graph
import tests.test_output
|
sambayless/monosat
|
src/monosat/api/python/tests/__init__.py
|
Python
|
mit
| 102
| 0
|
# -*- coding: utf-8 -*-
# vim: sw=4:ts=4:expandtab
"""
riko.modules.strreplace
~~~~~~~~~~~~~~~~~~~~~~~
Provides functions for string search-and-replace.
You provide the module with the text string to search for, and what to replace
it with. Multiple search-and-replace pairs can be added. You can specify to
replace all occurrences of the search string, just the first occurrence, or the
last occurrence.
Examples:
basic usage::
>>> from riko.modules.strreplace import pipe
>>>
>>> conf = {'rule': {'find': 'hello', 'replace': 'bye'}}
>>> item = {'content': 'hello world'}
>>> next(pipe(item, conf=conf))['strreplace'] == 'bye world'
True
Attributes:
OPTS (dict): The default pipe options
DEFAULTS (dict): The default parser options
"""
import pygogo as gogo
from functools import reduce
from . import processor
from riko.bado import coroutine, return_value, itertools as ait
OPTS = {
'listize': True, 'ftype': 'text', 'field': 'content', 'extract': 'rule'}
DEFAULTS = {}
logger = gogo.Gogo(__name__, monolog=True).logger
OPS = {
'first': lambda word, rule: word.replace(rule.find, rule.replace, 1),
'last': lambda word, rule: rule.replace.join(word.rsplit(rule.find, 1)),
'every': lambda word, rule: word.replace(rule.find, rule.replace),
}
def reducer(word, rule):
return OPS.get(rule.param, OPS['every'])(word, rule)
@coroutine
def async_parser(word, rules, skip=False, **kwargs):
""" Asynchronously parses the pipe content
Args:
word (str): The string to transform
rules (List[obj]): the parsed rules (Objectify instances).
skip (bool): Don't parse the content
kwargs (dict): Keyword arguments
Kwargs:
assign (str): Attribute to assign parsed content (default: strreplace)
stream (dict): The original item
Returns:
Deferred: twisted.internet.defer.Deferred item
Examples:
>>> from riko.bado import react
>>> from riko.bado.mock import FakeReactor
>>> from meza.fntools import Objectify
>>>
>>> def run(reactor):
... item = {'content': 'hello world'}
... conf = {'rule': {'find': 'hello', 'replace': 'bye'}}
... rule = Objectify(conf['rule'])
... kwargs = {'stream': item, 'conf': conf}
... d = async_parser(item['content'], [rule], **kwargs)
... return d.addCallbacks(print, logger.error)
>>>
>>> try:
... react(run, _reactor=FakeReactor())
... except SystemExit:
... pass
...
bye world
"""
if skip:
value = kwargs['stream']
else:
value = yield ait.coop_reduce(reducer, rules, word)
return_value(value)
def parser(word, rules, skip=False, **kwargs):
""" Parses the pipe content
Args:
word (str): The string to transform
rules (List[obj]): the parsed rules (Objectify instances).
skip (bool): Don't parse the content
kwargs (dict): Keyword arguments
Kwargs:
assign (str): Attribute to assign parsed content (default: strtransform)
stream (dict): The original item
Returns:
dict: The item
Examples:
>>> from meza.fntools import Objectify
>>>
>>> item = {'content': 'hello world'}
>>> conf = {'rule': {'find': 'hello', 'replace': 'bye'}}
>>> rule = Objectify(conf['rule'])
>>> kwargs = {'stream': item, 'conf': conf}
>>> parser(item['content'], [rule], **kwargs) == 'bye world'
True
"""
return kwargs['stream'] if skip else reduce(reducer, rules, word)
@processor(DEFAULTS, isasync=True, **OPTS)
def async_pipe(*args, **kwargs):
"""A processor module that asynchronously replaces the text of a field of
an item.
Args:
item (dict): The entry to process
kwargs (dict): The keyword arguments passed to the wrapper
Kwargs:
conf (dict): The pipe configuration. Must contain the key 'rule'.
rule (dict): can be either a dict or list of dicts. Must contain
the keys 'find' and 'replace'. May contain the key 'param'.
find (str): The string to find.
replace (str): The string replacement.
param (str): The type of replacement. Must be one of: 'first',
'last', or 'every' (default: 'every').
assign (str): Attribute to assign parsed content (default: strreplace)
field (str): Item attribute to operate on (default: 'content')
Returns:
Deferred: twisted.internet.defer.Deferred item with replaced content
Examples:
>>> from riko.bado import react
>>> from riko.bado.mock import FakeReactor
>>>
>>> def run(reactor):
... callback = lambda x: print(next(x)['strreplace'])
... conf = {'rule': {'find': 'hello', 'replace': 'bye'}}
... d = async_pipe({'content': 'hello world'}, conf=conf)
... return d.addCallbacks(callback, logger.error)
>>>
>>> try:
... react(run, _reactor=FakeReactor())
... except SystemExit:
... pass
...
bye world
"""
return async_parser(*args, **kwargs)
@processor(**OPTS)
def pipe(*args, **kwargs):
"""A processor that replaces the text of a field of an item.
Args:
item (dict): The entry to process
kwargs (dict): The keyword arguments passed to the wrapper
Kwargs:
conf (dict): The pipe configuration. Must contain the key 'rule'.
rule (dict): can be either a dict or list of dicts. Must contain
the keys 'find' and 'replace'. May contain the key 'param'.
find (str): The string to find.
replace (str): The string replacement.
param (str): The type of replacement. Must be one of: 'first',
'last', or 'every' (default: 'every').
assign (str): Attribute to assign parsed content (default: strreplace)
field (str): Item attribute to operate on (default: 'content')
Yields:
dict: an item with replaced content
Examples:
>>> conf = {'rule': {'find': 'hello', 'replace': 'bye'}}
>>> item = {'content': 'hello world'}
>>> next(pipe(item, conf=conf))['strreplace'] == 'bye world'
True
>>> rules = [
... {'find': 'Gr', 'replace': 'M'},
... {'find': 'e', 'replace': 'a', 'param': 'last'}]
>>> conf = {'rule': rules}
>>> kwargs = {'conf': conf, 'field': 'title', 'assign': 'result'}
>>> item = {'title': 'Greetings'}
>>> next(pipe(item, **kwargs))['result'] == 'Meatings'
True
"""
return parser(*args, **kwargs)
|
nerevu/riko
|
riko/modules/strreplace.py
|
Python
|
mit
| 6,865
| 0.000146
|
# -*- coding: utf-8 -*-
import sys
import os
import shutil
import psutil
import subprocess
import time
import numpy as np
import itertools
# from matplotlib import pyplot
from routeGen import routeGen
from sumoConfigGen import sumoConfigGen
from stripXML import stripXML
import multiprocessing as mp
from glob import glob
#os.chdir(os.path.dirname(sys.argv[0]))
sys.path.insert(0, '../sumoAPI')
import GPSControl
import fixedTimeControl
import actuatedControl
import HybridVAControl
import HVA1
import sumoConnect
import readJunctionData
print(sys.path)
import traci
def simulation(x):
try:
assert len(x) == 4
runtime = time.time()
# Define Simulation Params
modelName, tlLogic, CAVratio, run = x
procID = int(mp.current_process().name[-1])
model = './models/{}_{}/'.format(modelName, procID)
simport = 8812 + procID
N = 10000 # Last time to insert vehicle at (10800=3hrs)
stepSize = 0.1
CAVtau = 1.0
configFile = model + modelName + ".sumocfg"
# Configure the Map of controllers to be run
tlControlMap = {'fixedTime': fixedTimeControl.fixedTimeControl,
'VA': actuatedControl.actuatedControl,
'GPSVA': GPSControl.GPSControl,
'HVA1': HVA1.HybridVA1Control,
'HVA': HybridVAControl.HybridVAControl}
tlController = tlControlMap[tlLogic]
exportPath = '/hardmem/results/' + tlLogic + '/' + modelName + '/'
# Check if model copy for this process exists
if not os.path.isdir(model):
shutil.copytree('./models/{}/'.format(modelName), model)
# this is relative to script not cfg file
if not os.path.exists(exportPath):
os.makedirs(exportPath)
#seed = int(sum([ord(X) for x in modelName + tlLogic]) + int(10*CAVratio) + run)
seed = int(run)
vehNr, lastVeh = routeGen(N, CAVratio, CAVtau,
routeFile=model + modelName + '.rou.xml',
seed=seed)
# Edit the the output filenames in sumoConfig
sumoConfigGen(modelName, configFile, exportPath,
CAVratio, stepSize, run, simport)
# Connect to model
connector = sumoConnect.sumoConnect(configFile, gui=False, port=simport)
connector.launchSumoAndConnect()
# Get junction data
jd = readJunctionData.readJunctionData(model + modelName + ".jcn.xml")
junctionsList = jd.getJunctionData()
# Add controller models to junctions
controllerList = []
for junction in junctionsList:
controllerList.append(tlController(junction))
# Step simulation while there are vehicles
while traci.simulation.getMinExpectedNumber():
# connector.runSimulationForSeconds(1)
traci.simulationStep()
for controller in controllerList:
controller.process()
# Disconnect from current configuration
connector.disconnect()
# Strip unused data from results file
ext = '{AVR:03d}_{Nrun:03d}.xml'.format(AVR=int(CAVratio*100), Nrun=run)
for filename in ['queuedata', 'tripinfo']:
target = exportPath+filename+ext
stripXML(target)
runtime = time.gmtime(time.time() - runtime)
print('DONE: {}, {}, Run: {:03d}, AVR: {:03d}%, Runtime: {}\n'
.format(modelName, tlLogic, run, int(CAVratio*100),
time.strftime("%H:%M:%S", runtime)))
return True
except:
# Print if an experiment fails and provide repr of params to repeat run
print('***FAILURE*** ' + repr(x))
return False
################################################################################
# MAIN SIMULATION DEFINITION
################################################################################
models = ['simpleT', 'twinT', 'corridor', 'manhattan']
#tlControllers = ['fixedTime', 'VA', 'HVA', 'GPSVA']
tlControllers = ['HVA']
CAVratios = np.linspace(0, 1, 11)
if len(sys.argv) >=3:
runArgs = sys.argv[1:3]
runArgs = [int(arg) for arg in runArgs]
runArgs.sort()
runStart, runEnd = runArgs
else:
runStart, runEnd = [1, 11]
runIDs = np.arange(runStart, runEnd)
configs = []
# Generate all simulation configs for fixed time and VA
#configs += list(itertools.product(models, ['VA'], [0.], runIDs))
# # Generate runs for CAV dependent controllers
configs += list(itertools.product(models[-1:], ['HVA'], [0.0,0.1,0.2], [10]))
print(len(configs))
nproc = 3
print('Starting simulation on {} cores'.format(nproc))
# define work pool
workpool = mp.Pool(processes=nproc)
# Run simualtions in parallel
result = workpool.map(simulation, configs, chunksize=1)
# remove spawned model copies
for rmdir in glob('./models/*_*'):
if os.path.isdir(rmdir):
shutil.rmtree(rmdir)
# Inform of failed expermiments
if all(result):
print('Simulations complete, no errors')
else:
print('Failed Experiment Runs:')
for i, j in zip(configs, result):
if not j:
print(i)
|
cbrafter/TRB18_GPSVA
|
codes/mainCode/ParallelSpecialMac.py
|
Python
|
mit
| 5,186
| 0.005401
|
import pygame
music_on = 0
# Set up a button class for later usage
class Button:
def __init__(self, x, y, w, h, img):
self.x = x
self.y = y
self.w = w
self.h = h
self.img = img
self.surface = w * h
def buttonHover(self):
mouse = pygame.mouse.get_pos()
if self.x + self.w > mouse[0] > self.x and self.y + self.h > mouse[1] > self.y:
return True
def startMusic():
global music_on
if music_on == 0:
music_on = 1
pygame.mixer.music.load('music.ogg')
pygame.mixer.music.play(-1, 0.0)
def start_program():
# Imports
import Rules
import Game
import Instructions
import Music
import Highscore
# Initialize pygame and music
pygame.init()
startMusic()
# Load in the images
startButtonImg = pygame.image.load('img/StartButtonWhite.png')
startButtonGrayImg = pygame.image.load('img/StartButtonGray.png')
quitButtonImg = pygame.image.load('img/QuitButtonWhite.png')
quitButtonGrayImg = pygame.image.load('img/QuitButtonGray.png')
backGroundImg = pygame.image.load('img/BackDrop.png')
buttonInstructionImg = pygame.image.load('img/ButtonInstructionWhite.png')
buttonInstructionGrayImg = pygame.image.load('img/ButtonInstructionGray.png')
buttonGameRulesImg = pygame.image.load('img/GameRulesWhite.png')
buttonGameRulesGrayImg = pygame.image.load('img/GameRulesGray.png')
MusicSettingsButtonImg = pygame.image.load('img/MSettings.png')
MusicSettingsGrayButtonImg = pygame.image.load('img/MSettingsGray.png')
HighscoreButtonImg = pygame.image.load('img/ButtonHighscoreWhite.png')
HighscoreButtonGrayImg = pygame.image.load('img/ButtonHighscoreGray.png')
# Set up a display with title
gameDisplay = pygame.display.set_mode((800, 600))
pygame.display.set_caption('Euromast V1.0')
# Create instances of the button
quitButton = Button(430, 220, 100, 50, quitButtonImg)
quitButtonGray = Button(430, 220, 100, 50, quitButtonGrayImg)
startButton = Button(280, 220, 100, 50, startButtonImg)
startButtonGray = Button(280, 220, 100, 50, startButtonGrayImg)
gameRulesButton = Button(280, 326, 250, 50, buttonGameRulesImg)
gameRulesButtonGray = Button(280, 326, 250, 50, buttonGameRulesGrayImg)
buttonInstruction = Button(280, 273, 250, 50, buttonInstructionImg)
buttonInstructionGray = Button(280, 273, 250, 50, buttonInstructionGrayImg)
MSettingsButton = Button(10, 10, 100, 50, MusicSettingsButtonImg)
MSettingsButtonGray = Button(10, 10, 100, 50, MusicSettingsGrayButtonImg)
HighscoreButton = Button(280, 379, 250, 50, HighscoreButtonImg)
HighscoreButtonGray = Button(280, 379, 250, 50, HighscoreButtonGrayImg)
# Initialize game loop
phase = "menu"
loop = True
while loop:
# Check if user wants to quit
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
gameDisplay.fill((0, 0, 0))
if phase == "menu":
gameDisplay.blit(backGroundImg, (0, 0))
# Display the created buttons
gameDisplay.blit(quitButtonGray.img, (quitButton.x, quitButton.y))
gameDisplay.blit(startButtonGray.img, (startButton.x, startButton.y))
gameDisplay.blit(gameRulesButtonGray.img, (gameRulesButton.x, gameRulesButton.y))
gameDisplay.blit(buttonInstructionGray.img, (buttonInstruction.x, buttonInstruction.y))
gameDisplay.blit(MSettingsButtonGray.img, (MSettingsButton.x, MSettingsButton.y))
gameDisplay.blit(MSettingsButtonGray.img, (MSettingsButton.x, MSettingsButton.y))
gameDisplay.blit(HighscoreButtonGray.img, (HighscoreButton.x, HighscoreButton.y))
# Check if mouse hovers over button
if Button.buttonHover(quitButtonGray):
gameDisplay.blit(quitButton.img, (quitButton.x, quitButton.y))
# Check if the quit button has been pressed for exit functionality
if Button.buttonHover(quitButton):
if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:
pygame.quit()
quit()
elif Button.buttonHover(startButtonGray):
gameDisplay.blit(startButton.img, (startButton.x, startButton.y))
if Button.buttonHover(startButton):
if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:
phase = "game"
elif Button.buttonHover(gameRulesButtonGray):
gameDisplay.blit(gameRulesButton.img, (gameRulesButton.x, gameRulesButton.y))
if Button.buttonHover(gameRulesButton):
if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:
phase = "rules"
elif Button.buttonHover(buttonInstructionGray):
gameDisplay.blit(buttonInstruction.img, (buttonInstruction.x, buttonInstruction.y))
if Button.buttonHover(buttonInstruction):
if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:
phase = "instructions"
elif Button.buttonHover(MSettingsButtonGray):
gameDisplay.blit(MSettingsButton.img, (MSettingsButton.x, MSettingsButton.y))
if Button.buttonHover(MSettingsButton):
if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:
phase = "muzieksettings"
# Button Highscore
elif Button.buttonHover(HighscoreButtonGray):
gameDisplay.blit(HighscoreButton.img, (HighscoreButton.x, HighscoreButton.y))
if Button.buttonHover(HighscoreButton):
if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:
phase = "highscore"
elif phase == "game":
Game.startGame()
elif phase == "rules":
Rules.displayRules()
elif phase == "instructions":
Instructions.startInstructions()
elif phase == "muzieksettings":
Music.menu_settings()
elif phase == 'highscore':
Highscore.highscore_tab()
pygame.display.flip()
start_program()
|
QuinDiesel/CommitSudoku-Project-Game
|
Euromast/menu.py
|
Python
|
mit
| 6,569
| 0.003349
|
from fabric.api import hosts, run, sudo, local
from fabric.contrib.console import confirm
from fabric.utils import puts,warn
DEV_PROVISIONING_UUID = "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX"
DEV_SIGN = "Mobi Ten"
DEV_APP_NAME = "Gym Mama"
DEV_APP_ID = 'com.mobiten.gym_mama'
TITANIUM_SDK_VERSION = '1.5.1'
IPHONE_SDK_VERSION = '4.2'
DEVICE_FAMILY = 'iphone'
BUILDER = "/Library/Application\ Support/Titanium/mobilesdk/osx/%s/iphone/builder.py" % (TITANIUM_SDK_VERSION)
def coffee():
local("coffee --watch -o Resources/js/ --compile App/*.coffee ", False)
def debug():
local("%s simulator %s ./ %s %s %s" % (BUILDER,IPHONE_SDK_VERSION,DEV_APP_ID,DEV_APP_NAME,DEVICE_FAMILY), False)
def device():
local("%s install %s ./ %s %s %s" % (BUILDER,IPHONE_SDK_VERSION, DEV_APP_ID, DEV_APP_NAME, DEV_PROVISIONING_UUID, DEV_SIGN))
def package():
print "nothing"
def clean():
if confirm("Clean will delete any files that is ignored by gitignore\nand also any files that not yet tracked by git.\nAre your sure you want to continue ?",default=False):
warn("Deleting Untracked and Ignore Files, you have been WARNED!")
local("git clean -d -f")
local("mkdir -p build/iphone")
puts("Project is now clean.")
else:
warn("CLEAN IS CANCELLED.")
|
leonyeh/CoffeeScript-Hello-World
|
fabfile.py
|
Python
|
apache-2.0
| 1,250
| 0.0256
|
# -*- coding: utf-8 -*-
#-----------------------------------------------------------------------------
# OpenModes - An eigenmode solver for open electromagnetic resonantors
# Copyright (C) 2013 David Powell
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#-----------------------------------------------------------------------------
"""
Various useful constants for electromagnetism. Most of these are already
defined in scipy.constants, but are duplicated here for convenience.
"""
import numpy as np
pi = np.pi
c = 299792458.0
mu_0 = 4e-7*pi
epsilon_0 = 1.0 / (mu_0*c*c)
eta_0 = mu_0*c
|
DavidPowell/OpenModes
|
openmodes/constants.py
|
Python
|
gpl-3.0
| 1,192
| 0.001678
|
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
# Copyright (C) 2011,2017 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS Wire Data Helper"""
import dns.exception
from ._compat import binary_type, string_types, PY2
# Figure out what constant python passes for an unspecified slice bound.
# It's supposed to be sys.maxint, yet on 64-bit windows sys.maxint is 2^31 - 1
# but Python uses 2^63 - 1 as the constant. Rather than making pointless
# extra comparisons, duplicating code, or weakening WireData, we just figure
# out what constant Python will use.
class _SliceUnspecifiedBound(binary_type):
def __getitem__(self, key):
return key.stop
if PY2:
def __getslice__(self, i, j): # pylint: disable=getslice-method
return self.__getitem__(slice(i, j))
_unspecified_bound = _SliceUnspecifiedBound()[1:]
class WireData(binary_type):
# WireData is a binary type with stricter slicing
def __getitem__(self, key):
try:
if isinstance(key, slice):
# make sure we are not going outside of valid ranges,
# do stricter control of boundaries than python does
# by default
start = key.start
stop = key.stop
if PY2:
if stop == _unspecified_bound:
# handle the case where the right bound is unspecified
stop = len(self)
if start < 0 or stop < 0:
raise dns.exception.FormError
# If it's not an empty slice, access left and right bounds
# to make sure they're valid
if start != stop:
super(WireData, self).__getitem__(start)
super(WireData, self).__getitem__(stop - 1)
else:
for index in (start, stop):
if index is None:
continue
elif abs(index) > len(self):
raise dns.exception.FormError
return WireData(super(WireData, self).__getitem__(
slice(start, stop)))
return bytearray(self.unwrap())[key]
except IndexError:
raise dns.exception.FormError
if PY2:
def __getslice__(self, i, j): # pylint: disable=getslice-method
return self.__getitem__(slice(i, j))
def __iter__(self):
i = 0
while 1:
try:
yield self[i]
i += 1
except dns.exception.FormError:
raise StopIteration
def unwrap(self):
return binary_type(self)
def maybe_wrap(wire):
if isinstance(wire, WireData):
return wire
elif isinstance(wire, binary_type):
return WireData(wire)
elif isinstance(wire, string_types):
return WireData(wire.encode())
raise ValueError("unhandled type %s" % type(wire))
|
waynechu/PythonProject
|
dns/wiredata.py
|
Python
|
mit
| 3,751
| 0.000267
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Diceware documentation build configuration file, created by
# sphinx-quickstart on Mon Oct 26 22:19:13 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
devdir = ''
try:
if os.environ['DEVDIR']:
devdir = os.environ['DEVDIR']
except KeyError:
print("Unable to obtain $DEVDIR from the environment")
exit(-1)
sys.path.insert(0, devdir + '/diceware')
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Diceware'
copyright = '2015, Tonko Mulder'
author = 'Tonko Mulder'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1'
# The full version, including alpha/beta/rc tags.
release = '1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Dicewaredoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Diceware.tex', 'Diceware Documentation',
'Tonko Mulder', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'diceware', 'Diceware Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Diceware', 'Diceware Documentation',
author, 'Diceware', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
#epub_basename = project
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
|
Treggats/DicewarePassphrase
|
docs/conf.py
|
Python
|
gpl-2.0
| 11,767
| 0.006289
|
# Copyright (c) 2011, Apple Inc. All rights reserved.
# Copyright (c) 2009, 2011, 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# WebKit's Python module for committer and reviewer validation.
import fnmatch
import json
from webkitpy.common.editdistance import edit_distance
from webkitpy.common.memoized import memoized
from webkitpy.common.system.filesystem import FileSystem
# The list of contributors have been moved to contributors.json
class Contributor(object):
def __init__(self, name, email_or_emails, irc_nickname_or_nicknames=None):
assert(name)
assert(email_or_emails)
self.full_name = name
if isinstance(email_or_emails, str):
self.emails = [email_or_emails]
else:
self.emails = email_or_emails
self.emails = map(lambda email: email.lower(), self.emails) # Emails are case-insensitive.
if isinstance(irc_nickname_or_nicknames, str):
self.irc_nicknames = [irc_nickname_or_nicknames]
else:
self.irc_nicknames = irc_nickname_or_nicknames
self.can_commit = False
self.can_review = False
def bugzilla_email(self):
# FIXME: We're assuming the first email is a valid bugzilla email,
# which might not be right.
return self.emails[0]
def __str__(self):
return unicode(self).encode('utf-8')
def __unicode__(self):
return '"%s" <%s>' % (self.full_name, self.emails[0])
def contains_string(self, search_string):
string = search_string.lower()
if string in self.full_name.lower():
return True
if self.irc_nicknames:
for nickname in self.irc_nicknames:
if string in nickname.lower():
return True
for email in self.emails:
if string in email:
return True
return False
def matches_glob(self, glob_string):
if fnmatch.fnmatch(self.full_name, glob_string):
return True
if self.irc_nicknames:
for nickname in self.irc_nicknames:
if fnmatch.fnmatch(nickname, glob_string):
return True
for email in self.emails:
if fnmatch.fnmatch(email, glob_string):
return True
return False
class Committer(Contributor):
def __init__(self, name, email_or_emails, irc_nickname=None):
Contributor.__init__(self, name, email_or_emails, irc_nickname)
self.can_commit = True
class Reviewer(Committer):
def __init__(self, name, email_or_emails, irc_nickname=None):
Committer.__init__(self, name, email_or_emails, irc_nickname)
self.can_review = True
class CommitterList(object):
# Committers and reviewers are passed in to allow easy testing
def __init__(self,
committers=[],
reviewers=[],
contributors=[]):
# FIXME: These arguments only exist for testing. Clean it up.
if not (committers or reviewers or contributors):
loaded_data = self.load_json()
contributors = loaded_data['Contributors']
committers = loaded_data['Committers']
reviewers = loaded_data['Reviewers']
self._contributors = contributors + committers + reviewers
self._committers = committers + reviewers
self._reviewers = reviewers
self._contributors_by_name = {}
self._accounts_by_email = {}
self._accounts_by_login = {}
@staticmethod
@memoized
def load_json():
filesystem = FileSystem()
json_path = filesystem.join(filesystem.dirname(filesystem.path_to_module('webkitpy.common.config')), 'contributors.json')
contributors = json.loads(filesystem.read_text_file(json_path))
return {
'Contributors': [Contributor(name, data.get('emails'), data.get('nicks')) for name, data in contributors['Contributors'].iteritems()],
'Committers': [Committer(name, data.get('emails'), data.get('nicks')) for name, data in contributors['Committers'].iteritems()],
'Reviewers': [Reviewer(name, data.get('emails'), data.get('nicks')) for name, data in contributors['Reviewers'].iteritems()],
}
def contributors(self):
return self._contributors
def committers(self):
return self._committers
def reviewers(self):
return self._reviewers
def _name_to_contributor_map(self):
if not len(self._contributors_by_name):
for contributor in self._contributors:
assert(contributor.full_name)
assert(contributor.full_name.lower() not in self._contributors_by_name) # We should never have duplicate names.
self._contributors_by_name[contributor.full_name.lower()] = contributor
return self._contributors_by_name
def _email_to_account_map(self):
if not len(self._accounts_by_email):
for account in self._contributors:
for email in account.emails:
assert(email not in self._accounts_by_email) # We should never have duplicate emails.
self._accounts_by_email[email] = account
return self._accounts_by_email
def _login_to_account_map(self):
if not len(self._accounts_by_login):
for account in self._contributors:
if account.emails:
login = account.bugzilla_email()
assert(login not in self._accounts_by_login) # We should never have duplicate emails.
self._accounts_by_login[login] = account
return self._accounts_by_login
def _committer_only(self, record):
if record and not record.can_commit:
return None
return record
def _reviewer_only(self, record):
if record and not record.can_review:
return None
return record
def committer_by_name(self, name):
return self._committer_only(self.contributor_by_name(name))
def contributor_by_irc_nickname(self, irc_nickname):
for contributor in self.contributors():
# FIXME: This should do case-insensitive comparison or assert that all IRC nicknames are in lowercase
if contributor.irc_nicknames and irc_nickname in contributor.irc_nicknames:
return contributor
return None
def contributors_by_search_string(self, string):
glob_matches = filter(lambda contributor: contributor.matches_glob(string), self.contributors())
return glob_matches or filter(lambda contributor: contributor.contains_string(string), self.contributors())
def contributors_by_email_username(self, string):
string = string + '@'
result = []
for contributor in self.contributors():
for email in contributor.emails:
if email.startswith(string):
result.append(contributor)
break
return result
def _contributor_name_shorthands(self, contributor):
if ' ' not in contributor.full_name:
return []
split_fullname = contributor.full_name.split()
first_name = split_fullname[0]
last_name = split_fullname[-1]
return first_name, last_name, first_name + last_name[0], first_name + ' ' + last_name[0]
def _tokenize_contributor_name(self, contributor):
full_name_in_lowercase = contributor.full_name.lower()
tokens = [full_name_in_lowercase] + full_name_in_lowercase.split()
if contributor.irc_nicknames:
return tokens + [nickname.lower() for nickname in contributor.irc_nicknames if len(nickname) > 5]
return tokens
def contributors_by_fuzzy_match(self, string):
string_in_lowercase = string.lower()
# 1. Exact match for fullname, email and irc_nicknames
account = self.contributor_by_name(string_in_lowercase) or self.contributor_by_email(string_in_lowercase) or self.contributor_by_irc_nickname(string_in_lowercase)
if account:
return [account], 0
# 2. Exact match for email username (before @)
accounts = self.contributors_by_email_username(string_in_lowercase)
if accounts and len(accounts) == 1:
return accounts, 0
# 3. Exact match for first name, last name, and first name + initial combinations such as "Dan B" and "Tim H"
accounts = [contributor for contributor in self.contributors() if string in self._contributor_name_shorthands(contributor)]
if accounts and len(accounts) == 1:
return accounts, 0
# 4. Finally, fuzzy-match using edit-distance
string = string_in_lowercase
contributorWithMinDistance = []
minDistance = len(string) / 2 - 1
for contributor in self.contributors():
tokens = self._tokenize_contributor_name(contributor)
editdistances = [edit_distance(token, string) for token in tokens if abs(len(token) - len(string)) <= minDistance]
if not editdistances:
continue
distance = min(editdistances)
if distance == minDistance:
contributorWithMinDistance.append(contributor)
elif distance < minDistance:
contributorWithMinDistance = [contributor]
minDistance = distance
if not len(contributorWithMinDistance):
return [], len(string)
return contributorWithMinDistance, minDistance
def contributor_by_email(self, email):
return self._email_to_account_map().get(email.lower()) if email else None
def contributor_by_name(self, name):
return self._name_to_contributor_map().get(name.lower()) if name else None
def committer_by_email(self, email):
return self._committer_only(self.contributor_by_email(email))
def reviewer_by_email(self, email):
return self._reviewer_only(self.contributor_by_email(email))
|
klim-iv/phantomjs-qt5
|
src/webkit/Tools/Scripts/webkitpy/common/config/committers.py
|
Python
|
bsd-3-clause
| 11,526
| 0.001822
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
callback: default
type: stdout
short_description: default Ansible screen output
version_added: historical
description:
- This is the default output callback for ansible-playbook.
extends_documentation_fragment:
- default_callback
requirements:
- set as stdout in configuration
'''
from ansible import constants as C
from ansible.playbook.task_include import TaskInclude
from ansible.plugins.callback import CallbackBase
from ansible.utils.color import colorize, hostcolor
class CallbackModule(CallbackBase):
'''
This is the default callback interface, which simply prints messages
to stdout when new callback events are received.
'''
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'default'
def __init__(self):
self._play = None
self._last_task_banner = None
super(CallbackModule, self).__init__()
def v2_runner_on_failed(self, result, ignore_errors=False):
delegated_vars = result._result.get('_ansible_delegated_vars', None)
self._clean_results(result._result, result._task.action)
if self._play.strategy == 'free' and self._last_task_banner != result._task._uuid:
self._print_task_banner(result._task)
self._handle_exception(result._result)
self._handle_warnings(result._result)
if result._task.loop and 'results' in result._result:
self._process_items(result)
else:
if delegated_vars:
self._display.display("fatal: [%s -> %s]: FAILED! => %s" % (result._host.get_name(), delegated_vars['ansible_host'],
self._dump_results(result._result)), color=C.COLOR_ERROR)
else:
self._display.display("fatal: [%s]: FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result)), color=C.COLOR_ERROR)
if ignore_errors:
self._display.display("...ignoring", color=C.COLOR_SKIP)
def v2_runner_on_ok(self, result):
delegated_vars = result._result.get('_ansible_delegated_vars', None)
self._clean_results(result._result, result._task.action)
if self._play.strategy == 'free' and self._last_task_banner != result._task._uuid:
self._print_task_banner(result._task)
if isinstance(result._task, TaskInclude):
return
elif result._result.get('changed', False):
if delegated_vars:
msg = "changed: [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host'])
else:
msg = "changed: [%s]" % result._host.get_name()
color = C.COLOR_CHANGED
else:
if delegated_vars:
msg = "ok: [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host'])
else:
msg = "ok: [%s]" % result._host.get_name()
color = C.COLOR_OK
self._handle_warnings(result._result)
if result._task.loop and 'results' in result._result:
self._process_items(result)
else:
if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and '_ansible_verbose_override' not in result._result:
msg += " => %s" % (self._dump_results(result._result),)
self._display.display(msg, color=color)
def v2_runner_on_skipped(self, result):
if self._plugin_options.get('show_skipped_hosts', C.DISPLAY_SKIPPED_HOSTS): # fallback on constants for inherited plugins missing docs
self._clean_results(result._result, result._task.action)
if self._play.strategy == 'free' and self._last_task_banner != result._task._uuid:
self._print_task_banner(result._task)
if result._task.loop and 'results' in result._result:
self._process_items(result)
else:
msg = "skipping: [%s]" % result._host.get_name()
if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and '_ansible_verbose_override' not in result._result:
msg += " => %s" % self._dump_results(result._result)
self._display.display(msg, color=C.COLOR_SKIP)
def v2_runner_on_unreachable(self, result):
if self._play.strategy == 'free' and self._last_task_banner != result._task._uuid:
self._print_task_banner(result._task)
delegated_vars = result._result.get('_ansible_delegated_vars', None)
if delegated_vars:
self._display.display("fatal: [%s -> %s]: UNREACHABLE! => %s" % (result._host.get_name(), delegated_vars['ansible_host'],
self._dump_results(result._result)),
color=C.COLOR_UNREACHABLE)
else:
self._display.display("fatal: [%s]: UNREACHABLE! => %s" % (result._host.get_name(), self._dump_results(result._result)), color=C.COLOR_UNREACHABLE)
def v2_playbook_on_no_hosts_matched(self):
self._display.display("skipping: no hosts matched", color=C.COLOR_SKIP)
def v2_playbook_on_no_hosts_remaining(self):
self._display.banner("NO MORE HOSTS LEFT")
def v2_playbook_on_task_start(self, task, is_conditional):
if self._play.strategy != 'free':
self._print_task_banner(task)
def _print_task_banner(self, task):
# args can be specified as no_log in several places: in the task or in
# the argument spec. We can check whether the task is no_log but the
# argument spec can't be because that is only run on the target
# machine and we haven't run it thereyet at this time.
#
# So we give people a config option to affect display of the args so
# that they can secure this if they feel that their stdout is insecure
# (shoulder surfing, logging stdout straight to a file, etc).
args = ''
if not task.no_log and C.DISPLAY_ARGS_TO_STDOUT:
args = u', '.join(u'%s=%s' % a for a in task.args.items())
args = u' %s' % args
self._display.banner(u"TASK [%s%s]" % (task.get_name().strip(), args))
if self._display.verbosity >= 2:
path = task.get_path()
if path:
self._display.display(u"task path: %s" % path, color=C.COLOR_DEBUG)
self._last_task_banner = task._uuid
def v2_playbook_on_cleanup_task_start(self, task):
self._display.banner("CLEANUP TASK [%s]" % task.get_name().strip())
def v2_playbook_on_handler_task_start(self, task):
self._display.banner("RUNNING HANDLER [%s]" % task.get_name().strip())
def v2_playbook_on_play_start(self, play):
name = play.get_name().strip()
if not name:
msg = u"PLAY"
else:
msg = u"PLAY [%s]" % name
self._play = play
self._display.banner(msg)
def v2_on_file_diff(self, result):
if result._task.loop and 'results' in result._result:
for res in result._result['results']:
if 'diff' in res and res['diff'] and res.get('changed', False):
diff = self._get_diff(res['diff'])
if diff:
self._display.display(diff)
elif 'diff' in result._result and result._result['diff'] and result._result.get('changed', False):
diff = self._get_diff(result._result['diff'])
if diff:
self._display.display(diff)
def v2_runner_item_on_ok(self, result):
delegated_vars = result._result.get('_ansible_delegated_vars', None)
self._clean_results(result._result, result._task.action)
if isinstance(result._task, TaskInclude):
return
elif result._result.get('changed', False):
msg = 'changed'
color = C.COLOR_CHANGED
else:
msg = 'ok'
color = C.COLOR_OK
if delegated_vars:
msg += ": [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host'])
else:
msg += ": [%s]" % result._host.get_name()
msg += " => (item=%s)" % (self._get_item(result._result),)
if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and '_ansible_verbose_override' not in result._result:
msg += " => %s" % self._dump_results(result._result)
self._display.display(msg, color=color)
def v2_runner_item_on_failed(self, result):
delegated_vars = result._result.get('_ansible_delegated_vars', None)
self._clean_results(result._result, result._task.action)
self._handle_exception(result._result)
msg = "failed: "
if delegated_vars:
msg += "[%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host'])
else:
msg += "[%s]" % (result._host.get_name())
self._handle_warnings(result._result)
self._display.display(msg + " (item=%s) => %s" % (self._get_item(result._result), self._dump_results(result._result)), color=C.COLOR_ERROR)
def v2_runner_item_on_skipped(self, result):
if self._plugin_options.get('show_skipped_hosts', C.DISPLAY_SKIPPED_HOSTS): # fallback on constants for inherited plugins missing docs
self._clean_results(result._result, result._task.action)
msg = "skipping: [%s] => (item=%s) " % (result._host.get_name(), self._get_item(result._result))
if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and '_ansible_verbose_override' not in result._result:
msg += " => %s" % self._dump_results(result._result)
self._display.display(msg, color=C.COLOR_SKIP)
def v2_playbook_on_include(self, included_file):
msg = 'included: %s for %s' % (included_file._filename, ", ".join([h.name for h in included_file._hosts]))
self._display.display(msg, color=C.COLOR_SKIP)
def v2_playbook_on_stats(self, stats):
self._display.banner("PLAY RECAP")
hosts = sorted(stats.processed.keys())
for h in hosts:
t = stats.summarize(h)
self._display.display(u"%s : %s %s %s %s" % (
hostcolor(h, t),
colorize(u'ok', t['ok'], C.COLOR_OK),
colorize(u'changed', t['changed'], C.COLOR_CHANGED),
colorize(u'unreachable', t['unreachable'], C.COLOR_UNREACHABLE),
colorize(u'failed', t['failures'], C.COLOR_ERROR)),
screen_only=True
)
self._display.display(u"%s : %s %s %s %s" % (
hostcolor(h, t, False),
colorize(u'ok', t['ok'], None),
colorize(u'changed', t['changed'], None),
colorize(u'unreachable', t['unreachable'], None),
colorize(u'failed', t['failures'], None)),
log_only=True
)
self._display.display("", screen_only=True)
# print custom stats
if self._plugin_options.get('show_custom_stats', C.SHOW_CUSTOM_STATS) and stats.custom: # fallback on constants for inherited plugins missing docs
self._display.banner("CUSTOM STATS: ")
# per host
# TODO: come up with 'pretty format'
for k in sorted(stats.custom.keys()):
if k == '_run':
continue
self._display.display('\t%s: %s' % (k, self._dump_results(stats.custom[k], indent=1).replace('\n', '')))
# print per run custom stats
if '_run' in stats.custom:
self._display.display("", screen_only=True)
self._display.display('\tRUN: %s' % self._dump_results(stats.custom['_run'], indent=1).replace('\n', ''))
self._display.display("", screen_only=True)
def v2_playbook_on_start(self, playbook):
if self._display.verbosity > 1:
from os.path import basename
self._display.banner("PLAYBOOK: %s" % basename(playbook._file_name))
if self._display.verbosity > 3:
# show CLI options
if self._options is not None:
for option in dir(self._options):
if option.startswith('_') or option in ['read_file', 'ensure_value', 'read_module']:
continue
val = getattr(self._options, option)
if val:
self._display.vvvv('%s: %s' % (option, val))
def v2_runner_retry(self, result):
task_name = result.task_name or result._task
msg = "FAILED - RETRYING: %s (%d retries left)." % (task_name, result._result['retries'] - result._result['attempts'])
if (self._display.verbosity > 2 or '_ansible_verbose_always' in result._result) and '_ansible_verbose_override' not in result._result:
msg += "Result was: %s" % self._dump_results(result._result)
self._display.display(msg, color=C.COLOR_DEBUG)
|
e-gob/plataforma-kioscos-autoatencion
|
scripts/ansible-play/.venv/lib/python2.7/site-packages/ansible/plugins/callback/default.py
|
Python
|
bsd-3-clause
| 13,463
| 0.002823
|
"""Simple HTTP Server.
This module builds on BaseHTTPServer by implementing the standard GET
and HEAD requests in a fairly straightforward manner.
"""
__version__ = "0.6"
__all__ = ["SimpleHTTPRequestHandler"]
import os
import posixpath
import BaseHTTPServer
import urllib
import cgi
import sys
import shutil
import mimetypes
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
class SimpleHTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""Simple HTTP request handler with GET and HEAD commands.
This serves files from the current directory and any of its
subdirectories. The MIME type for files is determined by
calling the .guess_type() method.
The GET and HEAD requests are identical except that the HEAD
request omits the actual contents of the file.
"""
server_version = "SimpleHTTP/" + __version__
def do_GET(self):
"""Serve a GET request."""
f = self.send_head()
if f:
self.copyfile(f, self.wfile)
f.close()
def do_HEAD(self):
"""Serve a HEAD request."""
f = self.send_head()
if f:
f.close()
def send_head(self):
"""Common code for GET and HEAD commands.
This sends the response code and MIME headers.
Return value is either a file object (which has to be copied
to the outputfile by the caller unless the command was HEAD,
and must be closed by the caller under all circumstances), or
None, in which case the caller has nothing further to do.
"""
path = self.translate_path(self.path)
f = None
if os.path.isdir(path):
if not self.path.endswith('/'):
# redirect browser - doing basically what apache does
self.send_response(301)
self.send_header("Location", self.path + "/")
self.end_headers()
return None
for index in "index.html", "index.htm":
index = os.path.join(path, index)
if os.path.exists(index):
path = index
break
else:
return self.list_directory(path)
ctype = self.guess_type(path)
try:
# Always read in binary mode. Opening files in text mode may cause
# newline translations, making the actual size of the content
# transmitted *less* than the content-length!
f = open(path, 'rb')
except IOError:
self.send_error(404, "File not found")
return None
self.send_response(200)
self.send_header("Content-type", ctype)
fs = os.fstat(f.fileno())
self.send_header("Content-Length", str(fs[6]))
self.send_header("Last-Modified", self.date_time_string(fs.st_mtime))
self.end_headers()
return f
def list_directory(self, path):
"""Helper to produce a directory listing (absent index.html).
Return value is either a file object, or None (indicating an
error). In either case, the headers are sent, making the
interface the same as for send_head().
"""
try:
list = os.listdir(path)
except os.error:
self.send_error(404, "No permission to list directory")
return None
list.sort(key=lambda a: a.lower())
f = StringIO()
displaypath = cgi.escape(urllib.unquote(self.path))
f.write('<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">')
f.write("<html>\n<title>Directory listing for %s</title>\n" % displaypath)
f.write("<body>\n<h2>Directory listing for %s</h2>\n" % displaypath)
f.write("<hr>\n<ul>\n")
for name in list:
fullname = os.path.join(path, name)
displayname = linkname = name
# Append / for directories or @ for symbolic links
if os.path.isdir(fullname):
displayname = name + "/"
linkname = name + "/"
if os.path.islink(fullname):
displayname = name + "@"
# Note: a link to a directory displays with @ and links with /
f.write('<li><a href="%s">%s</a>\n'
% (urllib.quote(linkname), cgi.escape(displayname)))
f.write("</ul>\n<hr>\n</body>\n</html>\n")
length = f.tell()
f.seek(0)
self.send_response(200)
encoding = sys.getfilesystemencoding()
self.send_header("Content-type", "text/html; charset=%s" % encoding)
self.send_header("Content-Length", str(length))
self.end_headers()
return f
def translate_path(self, path):
"""Translate a /-separated PATH to the local filename syntax.
Components that mean special things to the local file system
(e.g. drive or directory names) are ignored. (XXX They should
probably be diagnosed.)
"""
# abandon query parameters
path = path.split('?',1)[0]
path = path.split('#',1)[0]
# Don't forget explicit trailing slash when normalizing. Issue17324
trailing_slash = path.rstrip().endswith('/')
path = posixpath.normpath(urllib.unquote(path))
words = path.split('/')
words = filter(None, words)
path = os.getcwd()
for word in words:
drive, word = os.path.splitdrive(word)
head, word = os.path.split(word)
if word in (os.curdir, os.pardir): continue
path = os.path.join(path, word)
if trailing_slash:
path += '/'
return path
def copyfile(self, source, outputfile):
"""Copy all data between two file objects.
The SOURCE argument is a file object open for reading
(or anything with a read() method) and the DESTINATION
argument is a file object open for writing (or
anything with a write() method).
The only reason for overriding this would be to change
the block size or perhaps to replace newlines by CRLF
-- note however that this the default server uses this
to copy binary data as well.
"""
shutil.copyfileobj(source, outputfile)
def guess_type(self, path):
"""Guess the type of a file.
Argument is a PATH (a filename).
Return value is a string of the form type/subtype,
usable for a MIME Content-type header.
The default implementation looks the file's extension
up in the table self.extensions_map, using application/octet-stream
as a default; however it would be permissible (if
slow) to look inside the data to make a better guess.
"""
base, ext = posixpath.splitext(path)
if ext in self.extensions_map:
return self.extensions_map[ext]
ext = ext.lower()
if ext in self.extensions_map:
return self.extensions_map[ext]
else:
return self.extensions_map['']
if not mimetypes.inited:
mimetypes.init() # try to read system mime.types
extensions_map = mimetypes.types_map.copy()
extensions_map.update({
'': 'application/octet-stream', # Default
'.py': 'text/plain',
'.c': 'text/plain',
'.h': 'text/plain',
})
def test(HandlerClass = SimpleHTTPRequestHandler,
ServerClass = BaseHTTPServer.HTTPServer):
BaseHTTPServer.test(HandlerClass, ServerClass)
if __name__ == '__main__':
test()
|
jt6562/XX-Net
|
python27/1.0/lib/SimpleHTTPServer.py
|
Python
|
bsd-2-clause
| 7,826
| 0.001278
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import tinymce.models
class Migration(migrations.Migration):
dependencies = [
('cms', '0004_auto_20141112_1610'),
]
operations = [
migrations.CreateModel(
name='Text',
fields=[
('cmsplugin_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='cms.CMSPlugin')),
('body', tinymce.models.HTMLField(verbose_name='body')),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
]
|
Korkki/djangocms-text-tinymce
|
djangocms_text_tinymce/migrations/0001_initial.py
|
Python
|
bsd-3-clause
| 703
| 0.001422
|
# -*- coding: utf-8 -*-
"""
Tests for validate Internationalization and Module i18n service.
"""
import gettext
from unittest import skip
import mock
from django.contrib.auth.models import User # lint-amnesty, pylint: disable=imported-auth-user
from django.utils import translation
from django.utils.translation import get_language
from cms.djangoapps.contentstore.tests.utils import AjaxEnabledTestClient
from cms.djangoapps.contentstore.views.preview import _preview_module_system
from openedx.core.lib.edx_six import get_gettext
from xmodule.modulestore.django import ModuleI18nService
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
class FakeTranslations(ModuleI18nService):
"""A test GNUTranslations class that takes a map of msg -> translations."""
def __init__(self, translations): # pylint: disable=super-init-not-called
self.translations = translations
def ugettext(self, msgid):
"""
Mock override for ugettext translation operation
"""
return self.translations.get(msgid, msgid)
gettext = ugettext
@staticmethod
def translator(locales_map): # pylint: disable=method-hidden
"""Build mock translator for the given locales.
Returns a mock gettext.translation function that uses
individual TestTranslations to translate in the given locales.
:param locales_map: A map from locale name to a translations map.
{
'es': {'Hi': 'Hola', 'Bye': 'Adios'},
'zh': {'Hi': 'Ni Hao', 'Bye': 'Zaijian'}
}
"""
def _translation(domain, localedir=None, languages=None): # pylint: disable=unused-argument
"""
return gettext.translation for given language
"""
if languages:
language = languages[0]
if language in locales_map:
return FakeTranslations(locales_map[language])
return gettext.NullTranslations()
return _translation
class TestModuleI18nService(ModuleStoreTestCase):
""" Test ModuleI18nService """
def setUp(self):
""" Setting up tests """
super(TestModuleI18nService, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments
self.test_language = 'dummy language'
self.request = mock.Mock()
self.course = CourseFactory.create()
self.field_data = mock.Mock()
self.descriptor = ItemFactory(category="pure", parent=self.course)
self.runtime = _preview_module_system(
self.request,
self.descriptor,
self.field_data,
)
self.addCleanup(translation.deactivate)
def get_module_i18n_service(self, descriptor):
"""
return the module i18n service.
"""
i18n_service = self.runtime.service(descriptor, 'i18n')
self.assertIsNotNone(i18n_service)
self.assertIsInstance(i18n_service, ModuleI18nService)
return i18n_service
def test_django_service_translation_works(self):
"""
Test django translation service works fine.
"""
class wrap_ugettext_with_xyz(object): # pylint: disable=invalid-name
"""
A context manager function that just adds 'XYZ ' to the front
of all strings of the module ugettext function.
"""
def __init__(self, module):
self.module = module
self.old_ugettext = get_gettext(module)
def __enter__(self):
def new_ugettext(*args, **kwargs):
""" custom function """
output = self.old_ugettext(*args, **kwargs)
return "XYZ " + output
self.module.ugettext = new_ugettext
self.module.gettext = new_ugettext
def __exit__(self, _type, _value, _traceback):
self.module.ugettext = self.old_ugettext
self.module.gettext = self.old_ugettext
i18n_service = self.get_module_i18n_service(self.descriptor)
# Activate french, so that if the fr files haven't been loaded, they will be loaded now.
with translation.override("fr"):
french_translation = translation.trans_real._active.value # pylint: disable=protected-access
# wrap the ugettext functions so that 'XYZ ' will prefix each translation
with wrap_ugettext_with_xyz(french_translation):
self.assertEqual(i18n_service.ugettext(self.test_language), 'XYZ dummy language')
# Check that the old ugettext has been put back into place
self.assertEqual(i18n_service.ugettext(self.test_language), 'dummy language')
@mock.patch('django.utils.translation.ugettext', mock.Mock(return_value='XYZ-TEST-LANGUAGE'))
@mock.patch('django.utils.translation.gettext', mock.Mock(return_value='XYZ-TEST-LANGUAGE'))
def test_django_translator_in_use_with_empty_block(self):
"""
Test: Django default translator should in use if we have an empty block
"""
i18n_service = ModuleI18nService(None)
self.assertEqual(i18n_service.ugettext(self.test_language), 'XYZ-TEST-LANGUAGE')
@mock.patch('django.utils.translation.ugettext', mock.Mock(return_value='XYZ-TEST-LANGUAGE'))
def test_message_catalog_translations(self):
"""
Test: Message catalog from FakeTranslation should return required translations.
"""
_translator = FakeTranslations.translator(
{
'es': {'Hello': 'es-hello-world'},
'fr': {'Hello': 'fr-hello-world'},
},
)
localedir = '/translations'
translation.activate("es")
with mock.patch('gettext.translation', return_value=_translator(domain='text', localedir=localedir,
languages=[get_language()])):
i18n_service = self.get_module_i18n_service(self.descriptor)
self.assertEqual(i18n_service.ugettext('Hello'), 'es-hello-world')
translation.activate("ar")
with mock.patch('gettext.translation', return_value=_translator(domain='text', localedir=localedir,
languages=[get_language()])):
i18n_service = self.get_module_i18n_service(self.descriptor)
self.assertEqual(get_gettext(i18n_service)('Hello'), 'Hello')
self.assertNotEqual(get_gettext(i18n_service)('Hello'), 'fr-hello-world')
self.assertNotEqual(get_gettext(i18n_service)('Hello'), 'es-hello-world')
translation.activate("fr")
with mock.patch('gettext.translation', return_value=_translator(domain='text', localedir=localedir,
languages=[get_language()])):
i18n_service = self.get_module_i18n_service(self.descriptor)
self.assertEqual(i18n_service.ugettext('Hello'), 'fr-hello-world')
def test_i18n_service_callable(self):
"""
Test: i18n service should be callable in studio.
"""
self.assertTrue(callable(self.runtime._services.get('i18n'))) # pylint: disable=protected-access
class InternationalizationTest(ModuleStoreTestCase):
"""
Tests to validate Internationalization.
"""
CREATE_USER = False
def setUp(self):
"""
These tests need a user in the DB so that the django Test Client
can log them in.
They inherit from the ModuleStoreTestCase class so that the mongodb collection
will be cleared out before each test case execution and deleted
afterwards.
"""
super(InternationalizationTest, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments
self.uname = 'testuser'
self.email = 'test+courses@edx.org'
self.password = 'foo'
# Create the use so we can log them in.
self.user = User.objects.create_user(self.uname, self.email, self.password)
# Note that we do not actually need to do anything
# for registration if we directly mark them active.
self.user.is_active = True
# Staff has access to view all courses
self.user.is_staff = True
self.user.save()
self.course_data = {
'org': 'MITx',
'number': '999',
'display_name': 'Robot Super Course',
}
def test_course_plain_english(self):
"""Test viewing the index page with no courses"""
self.client = AjaxEnabledTestClient() # lint-amnesty, pylint: disable=attribute-defined-outside-init
self.client.login(username=self.uname, password=self.password)
resp = self.client.get_html('/home/')
self.assertContains(resp,
u'<h1 class="page-header">𝓢𝓽𝓾𝓭𝓲𝓸 Home</h1>',
status_code=200,
html=True)
def test_course_explicit_english(self):
"""Test viewing the index page with no courses"""
self.client = AjaxEnabledTestClient() # lint-amnesty, pylint: disable=attribute-defined-outside-init
self.client.login(username=self.uname, password=self.password)
resp = self.client.get_html(
'/home/',
{},
HTTP_ACCEPT_LANGUAGE='en',
)
self.assertContains(resp,
u'<h1 class="page-header">𝓢𝓽𝓾𝓭𝓲𝓸 Home</h1>',
status_code=200,
html=True)
# ****
# NOTE:
# ****
#
# This test will break when we replace this fake 'test' language
# with actual Esperanto. This test will need to be updated with
# actual Esperanto at that time.
# Test temporarily disable since it depends on creation of dummy strings
@skip
def test_course_with_accents(self):
"""Test viewing the index page with no courses"""
self.client = AjaxEnabledTestClient() # lint-amnesty, pylint: disable=attribute-defined-outside-init
self.client.login(username=self.uname, password=self.password)
resp = self.client.get_html(
'/home/',
{},
HTTP_ACCEPT_LANGUAGE='eo'
)
TEST_STRING = (
u'<h1 class="title-1">'
u'My \xc7\xf6\xfcrs\xe9s L#'
u'</h1>'
)
self.assertContains(resp,
TEST_STRING,
status_code=200,
html=True)
|
stvstnfrd/edx-platform
|
cms/djangoapps/contentstore/tests/test_i18n.py
|
Python
|
agpl-3.0
| 10,854
| 0.002588
|
#! /usr/bin/env python
# -*- coding=utf-8 -*-
from distutils.core import setup
setup(
name='pythis',
version='1.4',
description='zen of python in Simplified Chinese',
url='https://github.com/vincentping/pythis',
author='Vincent Ping',
author_email='vincentping@gmail.com',
license='MIT',
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
py_modules=['pythis'],
keywords='zen python chinese',
)
#EOF
|
vincentping/pythis
|
setup.py
|
Python
|
mit
| 1,142
| 0.016637
|
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
import sys
import json
from twit.models import User, Tweet, Mention, UserMention
from javanlp.models import Sentence, Sentiment
from javanlp.util import AnnotationException, annotate_document_with_sentiment
class Command(BaseCommand):
"""Annotate tweets and load into database."""
help = __doc__
#def add_arguments(self, parser):
# import argparse
# parser.add_argument('--input', type=argparse.FileType('r'), help="Input file containing a json tweet on each line.")
def handle(self, *args, **options):
for tweet in Tweet.objects.all():
if Sentence.objects.filter(doc_id = tweet.id).exists(): continue
try:
with transaction.atomic():
for sentence, sentiment in annotate_document_with_sentiment(tweet.id, tweet.text):
sentence.save()
sentiment.sentence = sentence
sentiment.save()
except AnnotationException:
pass # Couldn't annotate this sentence...
|
arunchaganty/aeschines
|
django/twit/management/commands/annotate_features.py
|
Python
|
mit
| 1,154
| 0.006066
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""This module provides a function that knows what you mean"""
def know_what_i_mean(wink, numwink=2):
""" Prints "Know what I mean?" with a variable number of winks.
Args:
wink (mixed): Represents a wink.
numwink (int): Wink multiplier. Defaults to 2.
Returns:
str: Arguments are contatenated in a sentence.
Examples:
>>> know_what_i_mean('wink')
'Know what I mean? winkwink, nudge nudge'
>>> know_what_i_mean('wink', 3)
'Know what I mean? winkwinkwink, nudge nudge nudge'
"""
winks = (wink * numwink).strip()
nudges = ('nudge ' * numwink).strip()
retstr = 'Know what I mean? {}, {}'.format(winks, nudges)
return retstr
|
rrafiringa/is210-week-04-warmup
|
task_01.py
|
Python
|
mpl-2.0
| 759
| 0
|
# coding:UTF-8
"""
Copyright (c) 2009-2010 Marian Tietz
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.
"""
import gtk
import gobject
import logging
import string
import time as mtime
from dbus import UInt64
from gettext import gettext as _
from tekka import com
from tekka import config
from tekka import signals
from tekka import gui
from tekka.lib import contrast
from tekka.com import sushi, parse_from
from tekka.signals import connect_signal
from tekka.lib import key_dialog
from tekka.lib import dcc_dialog
from tekka.lib import inline_dialog
from tekka.helper import code
from tekka.helper import color
from tekka.helper import markup
from tekka.typecheck import types
init = False
def setup():
sushi.g_connect("maki-connected", maki_connected_cb)
sushi.g_connect("maki-disconnected", maki_disconnected_cb)
def maki_connected_cb(sushi):
global init
if init == False:
# Message-Signals
connect_signal("message", userMessage_cb)
connect_signal("notice", userNotice_cb)
connect_signal("action", userAction_cb)
connect_signal("away_message", userAwayMessage_cb)
connect_signal("ctcp", userCTCP_cb)
connect_signal("error", userError_cb)
# action signals
connect_signal("part", userPart_cb)
connect_signal("join", userJoin_cb)
connect_signal("names", userNames_cb)
connect_signal("quit", userQuit_cb)
connect_signal("kick", userKick_cb)
connect_signal("nick", userNick_cb)
connect_signal("user_away", userAway_cb)
connect_signal("mode", userMode_cb)
connect_signal("oper", userOper_cb)
# Server-Signals
connect_signal("connect", serverConnect_cb)
connect_signal("connected", serverConnected_cb)
connect_signal("motd", serverMOTD_cb)
connect_signal("dcc_send", dcc_send_cb)
# Channel-Signals
connect_signal("topic", channelTopic_cb)
connect_signal("banlist", channelBanlist_cb)
# Maki signals
connect_signal("shutdown", makiShutdown_cb)
init = True
_add_servers()
def maki_disconnected_cb(sushi):
pass
@types (server = basestring)
def _setup_server(server):
tab = gui.tabs.create_server(server)
gui.tabs.add_tab(None, tab,
update_shortcuts = config.get_bool("tekka","server_shortcuts"))
return tab
def _add_servers():
""" Adds all servers to tekka which are reported by maki. """
# in case we're reconnecting, clear all stuff
gui.widgets.get_object("tab_store").clear()
for server in sushi.servers():
tab = _setup_server(server)
tab.connected = True
_add_channels(tab)
try:
toSwitch = gui.tabs.get_all_tabs()[1]
except IndexError:
return
else:
gui.tabs.switch_to_path(toSwitch.path)
def _add_channels(server_tab):
"""
Adds all channels to tekka wich are reported by maki.
"""
channels = sushi.channels(server_tab.name)
for channel in channels:
add = False
nicks, prefixes = sushi.channel_nicks(server_tab.name, channel)
tab = gui.tabs.search_tab(server_tab.name, channel)
if not tab:
tab = gui.tabs.create_channel(server_tab, channel)
add = True
tab.nickList.clear()
tab.nickList.add_nicks(nicks, prefixes)
for nick in nicks:
# FIXME inefficient → nicks, prefixes, aways = …?
tab.nickList.set_away(nick, sushi.user_away(server_tab.name, nick))
tab.topic = sushi.channel_topic(server_tab.name, channel)
tab.topicsetter = ""
if tab.is_active():
gui.set_topic(markup.markup_escape(tab.topic))
gui.mgmt.set_user_count(
len(tab.nickList),
tab.nickList.get_operator_count())
# TODO: handle topic setter
tab.joined = True
tab.connected = True
if add:
gui.tabs.add_tab(server_tab, tab, update_shortcuts = False)
tab.print_last_log()
topic = sushi.channel_topic(server_tab.name, channel)
_report_topic(mtime.time(), server_tab.name, channel, topic)
gui.shortcuts.assign_numeric_tab_shortcuts(gui.tabs.get_all_tabs())
def isHighlighted (server_tab, text):
def has_highlight(text, needle):
punctuation = string.punctuation + " \n\t"
needle = needle.lower()
ln = len(needle)
for line in text.split("\n"):
line = line.lower()
i = line.find(needle)
if i >= 0:
if (line[i-1:i] in punctuation
and line[ln+i:ln+i+1] in punctuation):
return True
return False
highlightwords = config.get_list("chatting", "highlight_words", [])
highlightwords.append(server_tab.nick)
for word in highlightwords:
if has_highlight(text, word):
return True
return False
def action_nick_color(nick):
""" return the nick color if color_action_nicks is activated,
otherwise return the default text color
"""
if config.get_bool("colors", "color_action_nicks"):
return color.get_nick_color(nick)
return color.get_color_by_key("text_action")
@types (server = basestring, name = basestring)
def _createTab (server, name):
""" check if tab exists, create it if not, return the tab """
server_tab = gui.tabs.search_tab(server)
if not server_tab:
raise Exception("No server tab in _createTab(%s, %s)" % (server,name))
tab = gui.tabs.search_tab(server, name)
if not tab:
if name[0] in server_tab.support_chantypes:
tab = gui.tabs.create_channel(server_tab, name)
else:
tab = gui.tabs.create_query(server_tab, name)
tab.connected = True
gui.tabs.add_tab(server_tab, tab)
tab.print_last_log()
if tab.name != name:
# the name of the tab differs from the
# real nick, correct this.
tab.name = name
return tab
def _getPrefix(server, channel, nick):
tab = gui.tabs.search_tab(server, channel)
if tab and tab.is_channel():
return tab.nickList.get_prefix(nick)
else:
return ""
@types (tab = gui.tabs.TekkaTab, what = basestring, own = bool)
def _hide_output(tab, what, own = False):
""" Returns bool.
Check if the message type determined by "what"
shall be hidden or not.
tab should be a TekkaServer, -Channel or -Query
"""
if type(tab) == gui.tabs.TekkaChannel:
cat = "channel_%s_%s" % (
tab.server.name.lower(),
tab.name.lower())
elif type(tab) == gui.tabs.TekkaQuery:
cat = "query_%s_%s" % (
tab.server.name.lower(),
tab.name.lower())
else:
return False
hide = what in config.get_list(cat, "hide", [])
hideOwn = what in config.get_list(cat, "hide_own", [])
return ((hide and not own)
or (own and hideOwn)
or (hide and own and not hideOwn))
@types (servertab = gui.tabs.TekkaServer, tab = gui.tabs.TekkaTab,
what = basestring, own = bool)
def _show_output_exclusive(servertab, tab, what, own = False):
""" Returns bool.
Determine if the message identified by -what- shall
be shown in tab -tab- or not.
-servertab- is not used at the moment.
"""
return not _hide_output(tab, what, own = own)
""" Server callbacks """
def serverConnect_cb(time, server):
"""
maki is connecting to a server.
"""
gui.mgmt.set_useable(True)
tab = gui.tabs.search_tab(server)
if not tab:
tab = _setup_server(server)
if tab.connected:
tab.connected = False
channels = gui.tabs.get_all_tabs(servers = [server])[1:]
if channels:
for channelTab in channels:
if channelTab.is_channel():
channelTab.joined=False
channelTab.connected=False
tab.write(time, "Connecting...", msgtype=gui.tabs.ACTION)
gui.status.set_visible("connecting", "Connecting to %s" % server)
def serverConnected_cb(time, server):
"""
maki connected successfuly to a server.
"""
tab = gui.tabs.search_tab(server)
if not tab:
tab = _setup_server(server)
tab.connected = True
# iterate over tabs, set the connected flag to queries
for query in [tab for tab in gui.tabs.get_all_tabs(
servers = [server])[1:] if tab.is_query()]:
query.connected = True
tab.write(time, "Connected.", msgtype=gui.tabs.ACTION)
def serverMOTD_cb(time, server, message, first_time = {}):
""" Server is sending a MOTD.
Channes are joined 3s after the end of the
MOTD so at the end of the MOTD, make sure
that the prefixes and chantypes are read
correctly.
"""
if not first_time.has_key(server):
tab = gui.tabs.search_tab(server)
if not tab:
tab = _setup_server(server)
else:
tab.update()
gui.status.unset("connecting")
tab.connected = True
first_time[server] = tab
if not message:
# get the prefixes for the server to make
# sure they are correct
tab = first_time[server]
tab.support_prefix = sushi.support_prefix(server)
tab.support_chantypes = sushi.support_chantypes(server)
del first_time[server]
else:
first_time[server].write(time, markup.escape(message),
no_general_output = True)
""" Callbacks for channel interaction """
def _report_topic(time, server, channel, topic):
message = _(u"• Topic for %(channel)s: %(topic)s") % {
"channel": channel,
"topic": markup.escape(topic) }
tab = gui.tabs.search_tab(server, channel)
if not tab:
raise Exception, "%s:%s not found." % (server, channel)
tab.write(time, message, gui.tabs.ACTION, no_general_output=True)
def channelTopic_cb(time, server, from_str, channel, topic):
"""
The topic was set on server "server" in channel "channel" by
user "nick" to "topic".
Apply this!
"""
nick = parse_from(from_str)[0]
serverTab, channelTab = gui.tabs.search_tabs(server, channel)
if not channelTab:
raise Exception("Channel %s does not exist but "
"emits topic signal." % channel)
channelTab.topic = topic
channelTab.topicsetter = nick
if channelTab == gui.tabs.get_current_tab():
gui.mgmt.set_topic(markup.markup_escape(topic))
if not nick:
# just reporting the topic.
_report_topic(time, server, channel, topic)
else:
if nick == serverTab.nick:
message = _(u"• You changed the topic to %(topic)s.")
else:
message = _(u"• %(nick)s changed the topic to %(topic)s.")
channelTab.write(time, message % {
"nick": nick,
"topic": markup.escape(topic) },
gui.tabs.ACTION)
def channelBanlist_cb(time, server, channel, mask, who, when):
"""
ban list signal.
"""
self = code.init_function_attrs(channelBanlist_cb,
tab = gui.tabs.search_tab(server, channel))
if not mask and not who and when == -1:
self.tab.write(time, "End of banlist.", gui.tabs.ACTION)
code.reset_function_attrs(channelBanlist_cb)
else:
timestring = mtime.strftime(
"%Y-%m-%d %H:%M:%S",
mtime.localtime(when))
self.tab.write(
time,
"%s by %s on %s" % (
markup.escape(mask),
markup.escape(who),
markup.escape(timestring)),
gui.tabs.ACTION)
""" Callbacks of maki signals """
def makiShutdown_cb(time):
gui.mgmt.myPrint("Maki is shut down!")
gui.mgmt.set_useable(False)
""" Callbacks for users """
def userAwayMessage_cb(timestamp, server, nick, message):
"""
The user is away and the server gives us the message he left
for us to see why he is away and probably when he's back again.
"""
tab = gui.tabs.get_current_tab()
# XXX: you can still write /msg <nick> and get an away message
# XXX:: in the query window. This would be a more complex fix.
try:
tab.printed_away_message
except AttributeError:
print_it = True
else:
print_it = not tab.printed_away_message
if print_it:
tab.write(
timestamp,
_(u"• %(nick)s is away (%(message)s).") % {
"nick": nick,
"message": markup.escape(message)},
gui.tabs.ACTION)
if tab and tab.name == nick:
tab.printed_away_message = True
def userMessage_cb(timestamp, server, from_str, channel, message):
"""
PRIVMSGs are coming in here.
"""
nick = parse_from(from_str)[0]
(server_tab, channel_tab) = gui.tabs.search_tabs(server, channel)
if server_tab == None:
return # happens if the target server does not exist
if nick.lower() == server_tab.nick.lower():
ownMessage_cb(timestamp, server, channel, message)
return
elif channel.lower() == server_tab.nick.lower():
userQuery_cb(timestamp, server, from_str, message)
return
message = markup.escape(message)
if isHighlighted(server_tab, message):
# set mode to highlight and disable setting
# of text color for the main message (would
# override channelPrint() highlight color)
type = gui.tabs.HIGHMESSAGE
messageString = message
gui.mgmt.set_urgent(True)
else:
# no highlight, normal message type and
# text color is allowed.
type = gui.tabs.MESSAGE
messageString = "<font foreground='%s'>%s</font>" % (
color.get_text_color(nick), message)
channel_tab.write(timestamp,
"<%s<font foreground='%s' weight='bold'>%s</font>> %s" % (
_getPrefix(server, channel, nick),
color.get_nick_color(nick),
markup.escape(nick),
messageString,
), type, group_string=nick)
def ownMessage_cb(timestamp, server, channel, message):
"""
The maki user wrote something on a channel or a query
"""
tab = _createTab(server, channel)
nick = gui.tabs.search_tab(server).nick
tab.write(timestamp,
"<%s<font foreground='%s' weight='bold'>%s</font>>"
" <font foreground='%s'>%s</font>" % (
_getPrefix(server, channel, nick),
color.get_color_by_key("own_nick"),
nick,
color.get_color_by_key("own_text"),
markup.escape(message)), group_string=nick)
def userQuery_cb(timestamp, server, from_str, message):
"""
A user writes to us in a query.
"""
nick = parse_from(from_str)[0]
tab = _createTab(server, nick)
if isHighlighted(tab.server, message):
mtype = gui.tabs.HIGHMESSAGE
else:
mtype = gui.tabs.MESSAGE
tab.write(timestamp,
"<<font foreground='%s' weight='bold'>%s</font>> %s" % (
color.get_nick_color(nick),
markup.escape(nick),
markup.escape(message)
), mtype, group_string=nick)
# queries are important
gui.mgmt.set_urgent(True)
def userMode_cb(time, server, from_str, target, mode, param):
"""
Mode change on target from nick detected.
nick and param are optional arguments and
can be empty.
As nemo:
/mode #xesio +o nemo
will result in:
userMode(<time>,<server>,"nemo","#xesio","+o","nemo")
"""
def n_updatePrefix(tab, nick, mode):
""" checks if the mode is a prefix-mode (e.g. +o)
If so, the prefix of the nick `nick` in channel `channel`
will be updated (fetched).
"""
if not nick:
return
if mode[1] in tab.server.support_prefix[0]:
tab.nickList.set_prefix(nick,
sushi.user_channel_prefix(tab.server.name, tab.name, nick))
if tab.is_active():
gui.mgmt.set_user_count(len(tab.nickList),
tab.nickList.get_operator_count())
nick = parse_from(from_str)[0]
# nick: /mode target +mode param
server_tab = gui.tabs.search_tab(server)
if not nick:
# only a mode listing
server_tab.current_write(
time,
_("• Modes for %(target)s: %(mode)s") % {
"target":target,
"mode":mode},
gui.tabs.ACTION)
else:
actor = nick
own = (nick == server_tab.nick)
if own:
actor = "You"
tab = gui.tabs.search_tab(server, target)
if not tab:
# no channel/query found
if param: param = " "+param
if not _hide_output(server_tab, "mode"):
actor = "<font foreground='%s'>%s</font>" % (
action_nick_color(actor), actor)
target = "<font foreground='%s'>%s</font>" % (
action_nick_color(target), target)
server_tab.current_write(
time,
"• %(actor)s set %(mode)s%(param)s on %(target)s" % {
"actor":actor,
"mode":mode,
"param":param,
"target":target},
gui.tabs.ACTION)
else:
# suitable channel/query found, print it there
n_updatePrefix(tab, param, mode)
type = gui.tabs.ACTION
victim = target
own = (target == server_tab.nick)
if (param == server_tab.nick) or own:
type = gui.tabs.HIGHACTION
elif own:
victim = "you"
if param: param = " "+param
if _show_output_exclusive(server_tab, tab, "mode", own = own):
actor = "<font foreground='%s'>%s</font>" % (
color.get_nick_color(actor), actor)
victim = "<font foreground='%s'>%s</font>" % (
color.get_nick_color(victim), victim)
tab.write(time,
"• %(actor)s set %(mode)s%(param)s on %(victim)s." % {
"actor": actor,
"mode": mode,
"param": param,
"victim": victim},
type)
def userOper_cb(time, server):
"""
yay, somebody gives the user oper rights.
"""
server_tab = gui.tabs.search_tab(server)
server_tab.current_write(time, "• You got oper access.")
def userCTCP_cb(time, server, from_str, target, message):
"""
A user sends a CTCP request to target.
I don't know a case in which target is not a channel
and not queried.
"""
nick = parse_from(from_str)[0]
(server_tab, target_tab) = gui.tabs.search_tabs(server, target)
if nick.lower() == server_tab.nick.lower():
# we wrote us
ownCTCP_cb(time, server, target, message)
elif target.lower() == server_tab.nick.lower():
# someone wrote us, put in into a query
queryCTCP_cb(time, server, from_str, message)
else:
# normal ctcp
headline = _("CTCP from %(nick)s to Channel:") % {
"nick": markup.escape(nick)}
target_tab.write(time,
"<font foreground='#00DD33'>%s</font> %s" % (
headline, markup.escape(message)))
def ownCTCP_cb(time, server, target, message):
"""
The maki user sends a CTCP request to
a channel or user (target).
"""
server_tab, tab = gui.tabs.search_tabs(server, target)
if tab:
# valid query/channel found, print it there
nickColor = color.get_color_by_key("own_nick")
textColor = color.get_color_by_key("own_text")
tab.write(time,
"<CTCP:<font foreground='%s' weight='bold'>%s</font>> "
"<font foreground='%s'>%s</font>" % (
nickColor,
server_tab.nick,
textColor,
markup.escape(message)))
else:
server_tab.write(time,
_("CTCP request from you to %(target)s: %(message)s") % {
"target": markup.escape(target),
"message": markup.escape(message)})
def queryCTCP_cb(time, server, from_str, message):
"""
A user sends us a CTCP request over a query.
If no query window is open, send it to the server tab.
"""
nick = parse_from(from_str)[0]
(server_tab, tab) = gui.tabs.search_tabs(server, nick)
if tab:
tab.write(time,
"<CTCP:<font foreground='%s' weight='bold'>%s"
"</font>> <font foreground='%s'>%s</font>" % (
color.get_nick_color(nick),
markup.escape(nick),
color.get_text_color(nick),
markup.escape(message)))
else:
server_tab.current_write(time,
"<CTCP:<font foreground='%s' weight='bold'>%s"
"</font>> <font foreground='%s'>%s</font>" % (
color.get_nick_color(nick),
markup.escape(nick),
color.get_text_color(nick),
markup.escape(message)))
def ownNotice_cb(time, server, target, message):
"""
if query channel with ``target`` exists, print
the notice there, else print it on the current
channel of the network which is identified by
`server`
"""
server_tab, tab = gui.tabs.search_tabs(server, target)
ownNickColor = color.get_color_by_key("own_nick")
ownNick = server_tab.nick
if tab:
tab.write(time,
"><font foreground='%s' weight='bold'>%s</font>< "
"<font foreground='%s'>%s</font>" % (
color.get_nick_color(target),
markup.escape(target),
color.get_text_color(target),
markup.escape(message)))
else:
server_tab.current_write(time,
"><font foreground='%s' weight='bold'>%s</font>< "
"<font foreground='%s'>%s</font>" % (
color.get_nick_color(target),
markup.escape(target),
color.get_text_color(target),
markup.escape(message)))
def queryNotice_cb(time, server, from_str, message):
"""
A user sends a notice directly to the maki user.
"""
nick = parse_from(from_str)[0]
(server_tab, tab) = gui.tabs.search_tabs(server, nick)
if tab:
if tab.name != nick:
# correct notation of tab name
tab.name = nick
if tab:
tab.write(time,
"-<font foreground='%s' weight='bold'>%s</font>- "
"<font foreground='%s'>%s</font>" % (
color.get_nick_color(nick),
markup.escape(nick),
color.get_text_color(nick),
markup.escape(message)))
else:
server_tab.current_write(time,
"-<font foreground='%s' weight='bold'>%s</font>- "
"<font foreground='%s'>%s</font>" % (
color.get_nick_color(nick),
markup.escape(nick),
color.get_text_color(nick),
markup.escape(message)))
def userNotice_cb(time, server, from_str, target, message):
""" An incoming notice """
nick = parse_from(from_str)[0]
(server_tab, target_tab) = gui.tabs.search_tabs(server, target)
if nick.lower() == server_tab.nick.lower():
# we wrote that notice
ownNotice_cb(time, server, target, message)
return
elif target.lower() == server_tab.nick.lower():
# it's supposed to be a private (query) message
queryNotice_cb(time, server, from_str, message)
return
message = "-<font foreground='%s' weight='bold'>%s</font>- "\
"<font foreground='%s'>%s</font>" % (
color.get_nick_color(nick),
markup.escape(nick),
color.get_text_color(nick),
markup.escape(message))
if target_tab == None:
# global notice
server_tab.current_write(time, message)
else:
# channel/query notice
target_tab.write(time, message)
def ownAction_cb(time, server, channel, action):
tab = _createTab(server, channel)
nick = gui.tabs.search_tab(server).nick
nickColor = color.get_color_by_key("own_nick")
textColor = color.get_color_by_key("own_text")
tab.write(time,
"<font foreground='%s' weight='bold'>%s</font> "
"<font foreground='%s'>%s</font>" % (
nickColor,
nick,
textColor,
markup.escape(action)), group_string=nick)
def actionQuery_cb(time, server, from_str, action):
""" action in a query """
nick = parse_from(from_str)[0]
tab = _createTab(server, nick)
tab.write(time, "%s %s" % (nick, markup.escape(action)), group_string=nick)
gui.mgmt.set_urgent(True)
def userAction_cb(time, server, from_str, channel, action):
""" normal action """
nick = parse_from(from_str)[0]
(server_tab, channel_tab) = gui.tabs.search_tabs(server, channel)
if nick.lower() == server_tab.nick.lower():
ownAction_cb(time, server, channel, action)
return
elif channel.lower() == server_tab.nick.lower():
actionQuery_cb(time, server, from_str, action)
return
action = markup.escape(action)
if isHighlighted(server_tab, action):
type = gui.tabs.HIGHACTION
actionString = action
gui.mgmt.set_urgent(True)
else:
type = gui.tabs.ACTION
actionString = "<font foreground='%s'>%s</font>" % (
color.get_text_color(nick), action)
channel_tab.write(
time,
"<font foreground='%s' weight='bold'>%s</font> %s" % (
color.get_nick_color(nick),
nick,
actionString),
type, group_string=nick)
def userNick_cb(time, server, from_str, newNick):
"""
A user (or the maki user) changed it's nick.
If a query window for this nick on this server
exists, it's name would be changed.
"""
nick = parse_from(from_str)[0]
# find a query
server_tab, tab = gui.tabs.search_tabs(server, nick)
# rename query if found
if tab and tab.is_query():
tab.name = newNick
own = False
# we changed the nick
if not nick or nick == server_tab.nick:
message = _(u"• You are now known as %(newnick)s.")
server_tab.nick = newNick
own = True
# someone else did
else:
message = _(u"• %(nick)s is now known as %(newnick)s.")
# iterate over all channels and look if the nick is
# present there. If true so rename him in nicklist cache.
for tab in gui.tabs.get_all_tabs(servers = [server])[1:]:
if not nick or newNick == server_tab.nick:
# notification, print everytime
doPrint = True
else:
doPrint = _show_output_exclusive(server_tab, tab, "nick", own)
if tab.is_channel():
if (nick in tab.nickList.get_nicks()):
tab.nickList.modify_nick(nick, newNick)
else:
continue
if tab.is_query() and tab.name != newNick:
# ignore not associated queries
continue
nickString = "<font foreground='%s' weight='bold'>%s</font>" % (
action_nick_color(nick),
markup.escape(nick))
newNickString = "<font foreground='%s' weight='bold'>%s</font>" % (
action_nick_color(newNick),
markup.escape(newNick))
if doPrint:
tab.write(time,
message % {
"nick": nickString,
"newnick": newNickString
},
gui.tabs.ACTION)
def userAway_cb(time, server, from_str, away):
nick = parse_from(from_str)[0]
server_tab = gui.tabs.search_tab(server)
if not server_tab:
return
if nick == server_tab.nick:
if away:
server_tab.away = "-- Not implemented yet --"
else:
server_tab.away = ""
# iterate over all channels and look if the nick is
# present there.
for tab in gui.tabs.get_all_tabs(servers = [server])[1:]:
if tab.is_channel():
if nick in tab.nickList.get_nicks():
tab.nickList.set_away(nick, away)
elif tab.is_query():
if tab.name == nick:
# FIXME
continue
def userKick_cb(time, server, from_str, channel, who, reason):
"""
signal emitted if a user got kicked.
If the kicked user is ourself mark the channel as
joined=False
"""
nick = parse_from(from_str)[0]
server_tab, tab = gui.tabs.search_tabs(server, channel)
if not tab:
logging.debug("userKick: channel '%s' does not exist." % (channel))
return
channelString = markup.escape(channel)
nickString = "<font foreground='%s' weight='bold'>%s</font>" % (
action_nick_color(nick), markup.escape(nick))
reasonString = markup.escape(reason)
if who == server_tab.nick:
tab.joined = False
if _show_output_exclusive(server_tab, tab, "kick", own = True):
message = _(u"« You have been kicked from %(channel)s "
u"by %(nick)s (%(reason)s)." % {
"channel": channelString,
"nick": nickString,
"reason": reasonString })
tab.write(time, message, gui.tabs.HIGHACTION)
else:
tab.nickList.remove_nick(who)
if tab.is_active():
gui.mgmt.set_user_count(
len(tab.nickList),
tab.nickList.get_operator_count())
if _show_output_exclusive(server_tab, tab, "kick"):
whoString = "<font foreground='%s' weight='bold'>%s</font>" % (
color.get_nick_color(who), markup.escape(who))
message = _(u"« %(who)s was kicked from %(channel)s by "
u"%(nick)s (%(reason)s).") % {
"who": whoString,
"channel": channelString,
"nick": nickString,
"reason": reasonString }
tab.write(time, message, gui.tabs.ACTION)
def userQuit_cb(time, server, from_str, reason):
"""
The user identified by nick quit on the server "server" with
the reason "reason". "reason" can be empty ("").
If we are the user all channels were set to joined=False and
the server's connected-flag is set to False (as well as the
connect-flags of the childs).
If another user quits on all channels on which the user was on
a message is generated.
"""
server_tab = gui.tabs.search_tab(server)
nick = parse_from(from_str)[0]
if not server_tab:
# tab was closed before
return
if nick == server_tab.nick:
# set the connected flag to False for the server
server_tab.connected = False
hideServerPrint = _hide_output(server_tab, "quit", own = True)
# walk through all channels and set joined = False on them
channels = gui.tabs.get_all_tabs(servers = [server])[1:]
if reason:
message = _(u"« You have quit (%(reason)s).")
else:
message = _(u"« You have quit.")
# deactivate channels/queries
for channelTab in channels:
hideChannelPrint = _hide_output(channelTab, "quit",
own = True)
if channelTab.is_channel():
channelTab.joined = False
channelTab.connected = False
if not (hideServerPrint or hideChannelPrint):
channelTab.write(time, message % {
"reason": reason},
gui.tabs.ACTION)
else: # another user quit the network
hideServerPrint = _hide_output(server_tab, "quit")
if reason:
message = _(u"« %(nick)s has quit (%(reason)s).")
else:
message = _(u"« %(nick)s has quit.")
nickString = "<font foreground='%s' weight='bold'>"\
"%s</font>" % (
action_nick_color(nick),
markup.escape(nick))
reasonString = markup.escape(reason)
message = message % {
"nick": nickString,
"reason": reasonString}
channels = gui.tabs.get_all_tabs(servers = [server])[1:]
if not channels:
logging.debug("No channels but quit reported.. Hum wtf? o.0")
return
# print in all channels where nick joined a message
for channelTab in channels:
hideChannelPrint = _hide_output(channelTab, "quit")
if channelTab.is_query():
# on query with `nick` only print quitmessage
if (not (hideChannelPrint or hideServerPrint)
and channelTab.name.lower() == nick.lower()):
channelTab.write(time, message, gui.tabs.ACTION)
# skip nickList modification for queries
continue
# search for the nick in the channel
# and print the quit message if the
# nick was found.
nickList = channelTab.nickList
nicks = nickList.get_nicks() or []
if nick in nicks:
nickList.remove_nick(nick)
if channelTab.is_active():
# update gui display for usercount
gui.mgmt.set_user_count(len(nickList),
nickList.get_operator_count())
if not (hideServerPrint or hideChannelPrint):
channelTab.write(time, message, gui.tabs.ACTION)
def userJoin_cb(timestamp, server, from_str, channel):
"""
A user identified by "nick" joins the channel "channel" on
server "server.
If the nick is our we add the channeltab and set properties
on it, else we generate messages and stuff.
"""
nick = parse_from(from_str)[0]
stab, tab = gui.tabs.search_tabs(server, channel)
doPrint = False
if nick == stab.nick:
# we joined a channel, fetch nicks and topic, create
# channel and print the log
if not tab:
tab = gui.tabs.create_channel(stab, channel)
if not gui.tabs.add_tab(stab, tab):
raise Exception, \
"userJoin_cb: adding tab for channel '%s' failed." % (
channel)
tab.print_last_log()
tab.nickList.clear()
if tab.is_active():
gui.mgmt.set_user_count(
len(tab.nickList),
tab.nickList.get_operator_count())
tab.joined = True
tab.connected = True
if config.get_bool("tekka","switch_to_channel_after_join"):
gui.tabs.switch_to_path(tab.path)
doPrint = _show_output_exclusive(stab, tab, "join", own = True)
if doPrint:
nickString = "You"
message = _(u"» You have joined %(channel)s.")
else: # another one joined the channel
if not tab:
raise Exception, \
"No tab for channel '%s' in userJoin (not me)."
doPrint = _show_output_exclusive(stab, tab, "join", own = False)
if doPrint:
message = _(u"» %(nick)s has joined %(channel)s.")
nickString = "<font foreground='%s' weight='bold'>"\
"%s</font>" % (
action_nick_color(nick),
markup.escape(nick))
tab.nickList.append_nick(nick)
if tab.is_active():
gui.mgmt.set_user_count(len(tab.nickList),
tab.nickList.get_operator_count())
if doPrint:
message = message % {
"nick": nickString,
"channel": markup.escape(channel) }
tab.write(timestamp, message, gui.tabs.ACTION)
def userNames_cb(timestamp, server, channel, nicks, prefixes):
"""
this signal is called for each nick in the channel.
remove the nick to make sure it isn't there (hac--workaround),
add the nick, fetch the prefix for it and at least
update the user count.
"""
server_tab, tab = gui.tabs.search_tabs(server, channel)
if not server_tab or not tab: # /names for unexisting channel?
return
if not nicks:
# end of list
tab.nickList.sort_nicks()
else:
for i in xrange(len(nicks)):
# FIXME
tab.nickList.remove_nick(nicks[i])
tab.nickList.append_nick(nicks[i], sort=False)
if prefixes[i]:
tab.nickList.set_prefix(nicks[i], prefixes[i], sort=False)
# FIXME inefficient
tab.nickList.set_away(nicks[i], sushi.user_away(server_tab.name, nicks[i]))
if tab.is_active():
gui.mgmt.set_user_count(
len(tab.nickList),
tab.nickList.get_operator_count())
def userPart_cb(timestamp, server, from_str, channel, reason):
"""
A user parted the channel.
If we are the user who parted, mark the channel
as parted (joined=False)
"""
nick = parse_from(from_str)[0]
stab, tab = gui.tabs.search_tabs(server, channel)
if not tab:
# tab was closed
return
channelString = markup.escape(channel)
reasonString = markup.escape(reason)
if nick == stab.nick:
# we parted
tab.joined = False
if _show_output_exclusive(stab, tab, "part", own = True):
if reason:
message = _(u"« You have left %(channel)s (%(reason)s).")
else:
message = _(u"« You have left %(channel)s.")
tab.write(timestamp,
message % {
"channel": channelString,
"reason": reasonString },
gui.tabs.ACTION)
else: # another user parted
tab.nickList.remove_nick(nick)
if tab.is_active():
gui.mgmt.set_user_count(
len(tab.nickList),
tab.nickList.get_operator_count())
if _show_output_exclusive(stab, tab, "part", False):
nickString = "<font foreground='%s' weight='bold'>"\
"%s</font>" % (
color.get_nick_color(nick), markup.escape(nick))
if reason:
message = _(u"« %(nick)s has left %(channel)s "\
"(%(reason)s).")
else:
message = _(u"« %(nick)s has left %(channel)s.")
tab.write(timestamp,
message % {
"nick": nickString,
"channel": channelString,
"reason": reasonString},
gui.tabs.ACTION)
def userError_cb(time, server, domain, reason, arguments):
if domain == "no_such":
noSuch(time, server, arguments[0], reason)
elif domain == "cannot_join":
cannotJoin(time, server, arguments[0], reason)
elif domain == "privilege":
if reason == "channel_operator":
tab = gui.tabs.search_tab(server, arguments[0])
message = _(u"• You are not a channel operator.")
tab.write(time, message)
def noSuch(time, server, target, type):
""" Signal is emitted if maki can't find the target on the server. """
(server_tab, tab) = gui.tabs.search_tabs(server, target)
if type == "nick":
error = _(u"• %(target)s: No such nick/channel.") % {
"target": markup.escape(target) }
elif type == "server":
error = _(u"• %(target)s: No such server.") % {
"target": markup.escape(target) }
elif type == "channel":
error = _(u"• %(target)s: No such channel.") % {
"target": markup.escape(target) }
if tab:
tab.write(time, error)
else:
server_tab.write(time, error)
def cannotJoin(time, server, channel, reason):
""" The channel could not be joined.
reason : { l (full), i (invite only), b (banned), k (key) }
"""
message = _("Unknown reason")
if reason == "full":
message = _("The channel is full.")
elif reason == "invite":
message = _("The channel is invite-only.")
elif reason == "banned":
message = _("You are banned.")
elif reason == "key":
if config.get_bool("tekka", "ask_for_key_on_cannotjoin"):
def key_dialog_response_cb(dialog, id):
if id == gtk.RESPONSE_OK:
sushi.join(server, channel, dialog.entry.get_text())
dialog.destroy()
# open a input dialog which asks for the key
d = key_dialog.KeyDialog(server, channel)
d.connect("response", key_dialog_response_cb)
gui.mgmt.show_inline_dialog(d)
return
else:
message = _("You need the correct channel key.")
server_tab = gui.tabs.search_tab(server)
server_tab.current_write(time,
_("You can not join %(channel)s: %(reason)s" % {
"channel":channel,
"reason":message
}))
def dcc_send_cb(time, id, server, sender, filename,
size, progress, speed, status):
"""
status:
- 1 << 0 = incoming
- 1 << 1 = resumed
- 1 << 2 = running
- 1 << 3 = error
"" in (server, sender, filename)
and 0 in (size, progress, speed, status):
send was removed
"""
def dcc_dialog_response_cb(dialog, id, tid):
if id == gtk.RESPONSE_OK:
sushi.dcc_send_accept(tid)
elif id == gtk.RESPONSE_CANCEL:
sushi.dcc_send_remove(tid)
dialog.destroy()
# create dcc_news new-transfer-cache
self = code.init_function_attrs(dcc_send_cb,
dcc_news={},
dcc_notifies={})
if (server == "" and sender == "" and filename == ""
and size == 0 and progress == 0 and speed == 0 and status == 0):
# send was removed
logging.debug("filetransfer %d removed." % (id))
try:
del self.dcc_news[id]
del self.dcc_notifies[id]
except KeyError:
pass
return
logging.debug("status is %d." % (status))
# import dcc transfer states
from tekka.helper.dcc import s_new, s_incoming, s_resumable, s_running
# handle incoming transfer
if status & s_incoming == s_incoming:
if status & s_new == s_new:
# attempt made
d = dcc_dialog.DCCDialog(
id, parse_from(sender)[0],
filename, size,
resumable = (status & s_resumable == s_resumable))
self.dcc_news[id] = True
d.connect("response", dcc_dialog_response_cb, id)
gui.mgmt.show_inline_dialog(d)
elif status & s_running and status & s_incoming:
if not self.dcc_news.has_key(id) and not self.dcc_notifies.has_key(id):
# notify about auto accepted file transfer
gui.mgmt.show_inline_message(
_("Auto accepted file transfer"),
_("maki auto accepted the following file transfer:\n"
"Filename: %(filename)s\n"
"Sender: %(sender)s\n"
"Size: %(size)s\n"
"Server: %(server)s" % {
"filename":filename,
"sender":parse_from(sender)[0],
"size":size,
"server":server}),
dtype="info")
self.dcc_notifies[id] = True
|
sushi-irc/tekka
|
signal_handler.py
|
Python
|
bsd-2-clause
| 38,295
| 0.032208
|
"""
GravMag: Use the DipoleMagDir class to estimate the magnetization direction
of dipoles with known centers
"""
import numpy
from fatiando import mesher, gridder
from fatiando.utils import ang2vec, vec2ang, contaminate
from fatiando.gravmag import sphere
from fatiando.vis import mpl
from fatiando.gravmag.magdir import DipoleMagDir
from fatiando.constants import CM
# Make noise-corrupted synthetic data
inc, dec = -10.0, -15.0 # inclination and declination of the Geomagnetic Field
model = [mesher.Sphere(3000, 3000, 1000, 1000,
{'magnetization': ang2vec(6.0, -20.0, -10.0)}),
mesher.Sphere(7000, 7000, 1000, 1000,
{'magnetization': ang2vec(10.0, 3.0, -67.0)})]
area = (0, 10000, 0, 10000)
x, y, z = gridder.scatter(area, 1000, z=-150, seed=0)
tf = contaminate(sphere.tf(x, y, z, model, inc, dec), 5.0, seed=0)
# Give the centers of the dipoles
centers = [[3000, 3000, 1000], [7000, 7000, 1000]]
# Estimate the magnetization vectors
solver = DipoleMagDir(x, y, z, tf, inc, dec, centers).fit()
# Print the estimated and true dipole monents, inclinations and declinations
print 'Estimated magnetization (intensity, inclination, declination)'
for e in solver.estimate_:
print e
# Plot the fit and the normalized histogram of the residuals
mpl.figure(figsize=(14, 5))
mpl.subplot(1, 2, 1)
mpl.title("Total Field Anomaly (nT)", fontsize=14)
mpl.axis('scaled')
nlevels = mpl.contour(y, x, tf, (50, 50), 15, interp=True, color='r',
label='Observed', linewidth=2.0)
mpl.contour(y, x, solver.predicted(), (50, 50), nlevels, interp=True,
color='b', label='Predicted', style='dashed', linewidth=2.0)
mpl.legend(loc='upper left', shadow=True, prop={'size': 13})
mpl.xlabel('East y (m)', fontsize=14)
mpl.ylabel('North x (m)', fontsize=14)
mpl.subplot(1, 2, 2)
residuals_mean = numpy.mean(solver.residuals())
residuals_std = numpy.std(solver.residuals())
# Each residual is subtracted from the mean and the resulting
# difference is divided by the standard deviation
s = (solver.residuals() - residuals_mean) / residuals_std
mpl.hist(s, bins=21, range=None, normed=True, weights=None,
cumulative=False, bottom=None, histtype='bar', align='mid',
orientation='vertical', rwidth=None, log=False,
color=None, label=None)
mpl.xlim(-4, 4)
mpl.title("mean = %.3f std = %.3f" % (residuals_mean, residuals_std),
fontsize=14)
mpl.ylabel("P(z)", fontsize=14)
mpl.xlabel("z", fontsize=14)
mpl.show()
|
eusoubrasileiro/fatiando_seismic
|
cookbook/gravmag_magdir_dipolemagdir.py
|
Python
|
bsd-3-clause
| 2,519
| 0
|
from django.conf.urls import url
from . import views
from core.views import list_evidence_view, create_evidence_view, list_submissions_view
app_name = 'core'
urlpatterns = [
# ex: /core/
url(r'^$', views.index, name='index'),
url(r'^(?P<corecomp_id>[0-9]+)/$', views.detail, name='detail'),
url(r'^submitted', views.submitted, name='submitted'),
url(r'^evidence_form/$', create_evidence_view, name='evidence'),
url(r'^CES/$', list_evidence_view, name='CES'),
url(r'^submissions/$', list_submissions_view, name='submissions')
]
|
shmish/core-assess
|
core/urls.py
|
Python
|
mpl-2.0
| 549
| 0.014572
|
from haps.scopes.instance import InstanceScope
def test_get_object(some_class):
some_instance = InstanceScope().get_object(some_class)
assert isinstance(some_instance, some_class)
def test_get_multiple_objects(some_class):
scope = InstanceScope()
objects = {scope.get_object(some_class) for _ in range(100)}
assert all(isinstance(o, some_class) for o in objects)
assert len({id(o) for o in objects}) == 100
|
ekiro/chaps
|
tests/test_instance_scope.py
|
Python
|
mit
| 436
| 0
|
#!/usr/bin/env python
import sys
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
import versioneer
class PyTest(TestCommand):
user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = ''
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errno = pytest.main(self.pytest_args + ' test')
sys.exit(errno)
cmd_classes = versioneer.get_cmdclass()
cmd_classes['test'] = PyTest
setup(
name="kinderstadt-registry",
version=versioneer.get_version(),
cmdclass=cmd_classes,
packages=find_packages(),
install_requires=[
'alembic==0.7.6',
'click==4.0',
'fake-factory==0.5.2',
'Flask-Migrate==1.4.0',
'Flask-SQLAlchemy==2.0',
'Flask-WTF==0.11',
'Flask==0.10.1',
'path.py==7.3',
'pgcli==0.17.0',
'python-stdnum==1.1',
'SQLAlchemy-Searchable==0.9.3',
'SQLAlchemy-Utils==0.30.12',
],
extras_require={
'devel': [
'ansible',
'autopep8',
'flake8',
'ipython',
],
},
tests_require=[
'pytest',
'testing.postgresql'
],
entry_points={
'console_scripts': [
'registry=registry.cli:main'
]
}
)
|
arsgeografica/kinderstadt-registry
|
setup.py
|
Python
|
gpl-3.0
| 1,548
| 0.000646
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/Users/victorgarric/Documents/INVENTAIRE/principal.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui
from PyQt5.QtWidgets import QWidget
from PyQt5.QtWidgets import QApplication, QPushButton, QLineEdit, QLabel, QMenuBar, QStatusBar, QMessageBox, QProgressDialog, QFileDialog
import display
import cursor
import listing
import excel
import delete
import manual
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QApplication.translate(context, text, disambig)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(500, 262)
self.centralwidget = QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.button_find_id = QPushButton(self.centralwidget)
self.button_find_id.setGeometry(QtCore.QRect(370, 10, 110, 32))
self.button_find_id.setObjectName(_fromUtf8("button_find_id"))
self.button_find_name = QPushButton(self.centralwidget)
self.button_find_name.setGeometry(QtCore.QRect(370, 50, 110, 32))
self.button_find_name.setObjectName(_fromUtf8("button_find_name"))
self.button_find_cas = QPushButton(self.centralwidget)
self.button_find_cas.setGeometry(QtCore.QRect(370, 90, 110, 32))
self.button_find_cas.setObjectName(_fromUtf8("button_find_cas"))
self.button_find_vpid = QPushButton(self.centralwidget)
self.button_find_vpid.setGeometry(QtCore.QRect(370, 130, 110, 32))
self.button_find_vpid.setObjectName(_fromUtf8("button_find_cas"))
self.button_add = QPushButton(self.centralwidget)
self.button_add.setGeometry(QtCore.QRect(150, 180, 110, 32))
self.button_add.setObjectName(_fromUtf8("button_add"))
self.button_stop = QPushButton(self.centralwidget)
self.button_stop.setGeometry(QtCore.QRect(150, 210, 110, 32))
self.button_stop.setObjectName(_fromUtf8("button_stop"))
self.button_invent = QPushButton(self.centralwidget)
self.button_invent.setGeometry(QtCore.QRect(20, 180, 120, 32))
self.button_invent.setObjectName(_fromUtf8("button_invent"))
self.button_invent_2 = QPushButton(self.centralwidget)
self.button_invent_2.setGeometry(QtCore.QRect(20, 210, 120, 32))
self.button_invent_2.setObjectName(_fromUtf8("button_invent_2"))
self.button_delete = QPushButton(self.centralwidget)
self.button_delete.setGeometry(QtCore.QRect(260, 210, 120, 32))
self.button_delete.setObjectName(_fromUtf8("button_delete"))
self.button_manual = QPushButton(self.centralwidget)
self.button_manual.setGeometry(QtCore.QRect(260, 180, 120, 32))
self.button_manual.setObjectName(_fromUtf8("button_delete"))
self.button_repop = QPushButton(self.centralwidget)
self.button_repop.setGeometry(QtCore.QRect(380, 195, 110, 32))
self.button_repop.setObjectName(_fromUtf8("button_repop"))
self.line_id = QLineEdit(self.centralwidget)
self.line_id.setGeometry(QtCore.QRect(90, 10, 251, 21))
self.line_id.setObjectName(_fromUtf8("line_id"))
self.line_name = QLineEdit(self.centralwidget)
self.line_name.setGeometry(QtCore.QRect(90, 50, 251, 21))
self.line_name.setObjectName(_fromUtf8("line_name"))
self.line_cas = QLineEdit(self.centralwidget)
self.line_cas.setGeometry(QtCore.QRect(90, 90, 251, 21))
self.line_cas.setObjectName(_fromUtf8("line_cas"))
self.line_vpid = QLineEdit(self.centralwidget)
self.line_vpid.setGeometry(QtCore.QRect(90, 130, 251, 21))
self.line_vpid.setObjectName(_fromUtf8("line_cas"))
self.label_id = QLabel(self.centralwidget)
self.label_id.setGeometry(QtCore.QRect(10, 10, 56, 13))
self.label_id.setObjectName(_fromUtf8("label_id"))
self.label_name = QLabel(self.centralwidget)
self.label_name.setGeometry(QtCore.QRect(10, 50, 56, 13))
self.label_name.setObjectName(_fromUtf8("label_name"))
self.label_cas = QLabel(self.centralwidget)
self.label_cas.setGeometry(QtCore.QRect(10, 90, 56, 13))
self.label_cas.setObjectName(_fromUtf8("label_cas"))
self.label_vpid = QLabel(self.centralwidget)
self.label_vpid.setGeometry(QtCore.QRect(10, 130, 56, 13))
self.label_vpid.setObjectName(_fromUtf8("label_cas"))
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 500, 22))
self.menubar.setObjectName(_fromUtf8("menubar"))
MainWindow.setMenuBar(self.menubar)
self.statusbar = QStatusBar(MainWindow)
self.statusbar.setObjectName(_fromUtf8("statusbar"))
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
#connection
#self.trigger=QtCore.pyqtSignal()
#self.trigger.connect(self.button_add, QtCore.SIGNAL("released()"), self.new)
#self.connect(self.button_stop, QtCore.SIGNAL("released()"), self.quit)
#self.connect(self.button_find_id, QtCore.SIGNAL("released()"), self.find_id)
#self.connect(self.button_find_name, QtCore.SIGNAL("released()"), self.find_name)
#self.connect(self.button_find_vpid, QtCore.SIGNAL("released()"), self.find_vpid)
#self.connect(self.button_find_cas, QtCore.SIGNAL("released()"), self.find_cas)
#self.connect(self.button_invent, QtCore.SIGNAL("released()"), self.invent)
#self.connect(self.button_invent_2, QtCore.SIGNAL("released()"), self.invent_2)
#self.connect(self.button_delete, QtCore.SIGNAL("released()"), self.delete)
#self.connect(self.button_manual, QtCore.SIGNAL("released()"), self.manu)
#self.connect(self.button_repop, QtCore.SIGNAL("released()"), self.repop)
self.button_stop.clicked.connect(self.quit)
self.button_add.clicked.connect(self.new)
self.button_find_id.clicked.connect(self.find_id)
self.button_find_name.clicked.connect(self.find_name)
self.button_find_vpid.clicked.connect(self.find_vpid)
self.button_find_cas.clicked.connect(self.find_cas)
self.button_invent.clicked.connect(self.invent)
self.button_invent_2.clicked.connect(self.invent_2)
self.button_delete.clicked.connect(self.delete)
self.button_manual.clicked.connect(self.manu)
self.button_repop.clicked.connect(self.repop)
def invent(self) :
prog=QProgressDialog("Compiling inventory...","Cancel",0,100,self)
prog.open()
allconn=cursor.connection()
curs=allconn[0]
data=allconn[1]
curs.execute("""SELECT * FROM "main"."chem" WHERE "id" > 0 """)
store=curs.fetchall()
a=excel.makeinvent(store)
a.begin()
internal=0
if prog.wasCanceled() :
return None
while internal != 100 :
try :
internal=(a.returnid()/len(store))*100
except :
internal=100
prog.setValue(internal)
if prog.wasCanceled() :
return None
b=a.returnbook()
try :
fname=QFileDialog.getSaveFileName(self, 'Save File', '/','Excel File (*.xls)')[0]
b.save(fname)
QMessageBox.information(self, "Info", "Inventory was saved sucessfully.")
if prog.wasCanceled() :
return None
except :
QMessageBox.information(self, "Info", "Inventory was no saved.")
def invent_2 (self) :
prog=QProgressDialog("Compiling inventory...","Cancel",0,100,self)
prog.open()
allconn=cursor.connection()
curs=allconn[0]
data=allconn[1]
curs.execute("""SELECT "storage" FROM "main"."chem" WHERE "id" > 0 """)
store=curs.fetchall()
newstore=[]
count=-1
if prog.wasCanceled() :
return None
for i in store :
count=count+1
if i[0] not in newstore :
newstore.append(store[count][0])
a=excel.makeinvent_2(newstore)
a.begin()
internal=[0,1]
percent=0
if prog.wasCanceled() :
return None
while percent != 100 :
internal=(a.returnid())
try :
percent=((internal[0]/internal[1])*100)
except :
percent=100
prog.setValue(percent)
if prog.wasCanceled() :
return None
b=a.returnbook()
try :
fname=QFileDialog.getSaveFileName(self, 'Save File', '/','Excel File (*.xls)')[0]
b.save(fname)
QMessageBox.information(self, "Info", "Inventory was saved sucessfully.")
except :
QMessageBox.information(self, "Info", "Inventory was no saved.")
def new (self) :
self.prop=display.Ui_chem()
curs=cursor.connection()[0]
curs.execute('''SELECT MAX(id) FROM chem''')
maximum=curs.fetchone()[0]
maximum=int(maximum)
if maximum==-1 :
maximum=0
self.prop.line_id.setText(str(maximum+1))
self.prop.line_id.setReadOnly(True)
self.prop.show()
def find_id (self) :
allconn=cursor.connection()
curs=allconn[0]
data=allconn[1]
idfind=str(self.line_id.text())
idfind=(idfind,)
curs.execute('''SELECT * FROM chem WHERE id=?''', idfind)
data.commit()
data.commit()
store=curs.fetchone()
if str(self.line_id.text())=="-1" :
store=None
data.close()
if store != None :
self.line_id.setText('')
self.prop=display.Ui_chem()
self.prop.line_name.setText(store[0])
self.prop.line_vendor.setText(store[1])
self.prop.line_vpid.setText(store[2])
self.prop.line_cas.setText(store[3])
self.prop.line_size.setText(store[4])
self.prop.line_storage.setText(store[5])
self.prop.line_room.setText(store[6])
self.prop.line_id.setText(str(store[7]))
self.prop.line_id.setReadOnly(True)
self.prop.show()
else :
self.line_id.setText('')
QMessageBox.information(self, "Error", "ID doesn't exist")
data.close()
def find_vpid (self) :
allconn=cursor.connection()
curs=allconn[0]
data=allconn[1]
idfind=str(self.line_vpid.text())
idfind=(idfind,)
curs.execute('''SELECT * FROM chem WHERE vpid=?''', idfind)
data.commit()
data.commit()
store=curs.fetchone()
print(store[0])
if store[0]=="CHEMDB\n" or store[0]=='CHEMDB' :
store=None
data.close()
if store != None :
self.line_id.setText('')
self.prop=display.Ui_chem()
self.prop.line_name.setText(store[0])
self.prop.line_vendor.setText(store[1])
self.prop.line_vpid.setText(store[2])
self.prop.line_cas.setText(store[3])
self.prop.line_size.setText(store[4])
self.prop.line_storage.setText(store[5])
self.prop.line_room.setText(store[6])
self.prop.line_id.setText(str(store[7]))
self.prop.line_id.setReadOnly(True)
self.prop.show()
else :
self.line_id.setText('')
QMessageBox.information(self, "Error", "Vendor ID doesn't exist")
data.close()
def delete (self) :
self.prop=delete.Ui_delete_entries()
self.prop.show()
def find_name (self) :
allconn=cursor.connection()
curs=allconn[0]
data=allconn[1]
idfind=str(self.line_name.text())
idfind.lower()
idfind="%"+idfind+"%"
idfind=(idfind,)
curs.execute('''SELECT "name", "id", "storage" FROM "main"."chem" where "name" LIKE ? LIMIT 0, 100''', idfind)
data.commit()
store=curs.fetchall()
for item in store :
if item[0]=="CHEMDB\n" or item[0]=="CHEMDB" :
store.remove(item)
if store != None and len(store)==1 :
curs.execute('''SELECT * FROM "main"."chem" where "name" LIKE ? LIMIT 0, 100''', idfind)
data.commit()
store=curs.fetchall()
for item in store :
if item[0]=="CHEMDB\n" or item[0]=="CHEMDB" :
store.remove(item)
data.close()
self.line_name.setText('')
self.prop=display.Ui_chem()
self.prop.line_name.setText(store[0][0])
self.prop.line_vendor.setText(store[0][1])
self.prop.line_vpid.setText(store[0][2])
self.prop.line_cas.setText(store[0][3])
self.prop.line_size.setText(store[0][4])
self.prop.line_storage.setText(store[0][5])
self.prop.line_room.setText(store[0][6])
self.prop.line_id.setText(str(store[0][7]))
self.prop.line_id.setReadOnly(True)
self.prop.show()
elif store != None and len(store)>1 :
self.listing=listing.Ui_Form()
self.listing.list.clear()
reform=[]
for produit in range(len(store)) :
reform.append(str(store[produit][0])+" // STORE : "+ str(store[produit][2]) +" // ID : " + str(store[produit][1]))
self.listing.list.addItem(reform[produit])
data.close()
if len(store)>=99 :
QMessageBox.information(self, "Warning", "More than 100 references were found. Only displaying the first 100 records")
self.line_name.setText('')
self.listing.show()
else :
data.close()
self.line_name.setText('')
QMessageBox.information(self, "Error", "The research gave nothing back")
def find_cas (self) :
allconn=cursor.connection()
curs=allconn[0]
data=allconn[1]
casfind=str(self.line_cas.text())
casfind.lower()
casfind=(casfind,)
curs.execute('''SELECT * FROM "main"."chem" WHERE "cas"=?''', casfind)
store=curs.fetchone()
if store[0]=="CHEMDB\n" or store[0]=='CHEMDB' :
store=None
if store!=None :
self.prop=display.Ui_chem()
self.prop.line_name.setText(store[0])
self.prop.line_vendor.setText(store[1])
self.prop.line_vpid.setText(store[2])
self.prop.line_cas.setText(store[3])
self.prop.line_size.setText(store[4])
self.prop.line_storage.setText(store[5])
self.prop.line_room.setText(store[6])
self.prop.line_id.setText(str(store[7]))
self.prop.line_id.setReadOnly(True)
self.line_cas.setText('')
self.prop.show()
else :
QMessageBox.information(self, "Error", "Cannot found CAS")
self.line_cas.setText('')
data.close()
def repop (self) :
h=QMessageBox.question(self, "WARNING", "WARNING ! Repopulate will erase all the database by an Excel file generated by this database. Do not do this action randomly !!! Are you sur you want to continue ?")
if h==QMessageBox.No :
return None
fname=QFileDialog.getOpenFileName(self, 'Choose an Excel File', '/','Excem File (*.xls)')[0]
prog=QProgressDialog("Gathering Data...","Cancel",0,100,self)
prog.open()
if prog.wasCanceled() :
return None
rep=excel.repopulate(fname)
try :
rep.begin()
if prog.wasCanceled() :
return None
except :
return None
state=int(rep.returnstate())
prog.setLabelText("Repopulating...")
while state==0 :
prog.setValue(rep.returnpercent())
state=rep.returnstate()
prog.setCancelButton(None)
if state==1 :
prog.close()
QMessageBox.information(self, "Sucess", "Repopulation Sucess")
if state==-1 :
QMessageBox.information(self, "Error", "Repopulation Failled")
def abort(self) :
return None
def manu (self) :
self.load=manual.Ui_manual()
self.load.show()
def quit (self) :
QApplication.quit()
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "Chem Database", None))
self.button_find_id.setText(_translate("MainWindow", "Find ID", None))
self.button_find_vpid.setText(_translate("MainWindow", "Find Vendor ID", None))
self.button_repop.setText(_translate("MainWindow", "Repopulate", None))
self.button_find_name.setText(_translate("MainWindow", "Find Name", None))
self.button_find_cas.setText(_translate("MainWindow", "Find CAS", None))
self.button_add.setText(_translate("MainWindow", "Add Entry", None))
self.button_stop.setText(_translate("MainWindow","Close Program",None))
self.button_invent.setText(_translate("MainWindow","Inventory:Full",None))
self.button_invent_2.setText(_translate("MainWindow","Inventory:Group",None))
self.button_delete.setText(_translate('MainWindow','Delete Entries',None))
self.button_manual.setText(_translate('MainWindow','Manual CMD',None))
self.label_id.setText(_translate("MainWindow", "ID", None))
self.label_name.setText(_translate("MainWindow", "Name", None))
self.label_cas.setText(_translate("MainWindow", "CAS", None))
self.label_vpid.setText(_translate("MainWindow", "Vendor ID", None))
|
dedichan/ChemDB
|
principal.py
|
Python
|
gpl-3.0
| 18,412
| 0.01423
|
"""
Ops for masked arrays.
"""
from typing import (
Optional,
Union,
)
import numpy as np
from pandas._libs import (
lib,
missing as libmissing,
)
def kleene_or(
left: Union[bool, np.ndarray],
right: Union[bool, np.ndarray],
left_mask: Optional[np.ndarray],
right_mask: Optional[np.ndarray],
):
"""
Boolean ``or`` using Kleene logic.
Values are NA where we have ``NA | NA`` or ``NA | False``.
``NA | True`` is considered True.
Parameters
----------
left, right : ndarray, NA, or bool
The values of the array.
left_mask, right_mask : ndarray, optional
The masks. Only one of these may be None, which implies that
the associated `left` or `right` value is a scalar.
Returns
-------
result, mask: ndarray[bool]
The result of the logical or, and the new mask.
"""
# To reduce the number of cases, we ensure that `left` & `left_mask`
# always come from an array, not a scalar. This is safe, since because
# A | B == B | A
if left_mask is None:
return kleene_or(right, left, right_mask, left_mask)
assert isinstance(left, np.ndarray)
raise_for_nan(right, method="or")
if right is libmissing.NA:
result = left.copy()
else:
result = left | right
if right_mask is not None:
# output is unknown where (False & NA), (NA & False), (NA & NA)
left_false = ~(left | left_mask)
right_false = ~(right | right_mask)
mask = (
(left_false & right_mask)
| (right_false & left_mask)
| (left_mask & right_mask)
)
else:
if right is True:
mask = np.zeros_like(left_mask)
elif right is libmissing.NA:
mask = (~left & ~left_mask) | left_mask
else:
# False
mask = left_mask.copy()
return result, mask
def kleene_xor(
left: Union[bool, np.ndarray],
right: Union[bool, np.ndarray],
left_mask: Optional[np.ndarray],
right_mask: Optional[np.ndarray],
):
"""
Boolean ``xor`` using Kleene logic.
This is the same as ``or``, with the following adjustments
* True, True -> False
* True, NA -> NA
Parameters
----------
left, right : ndarray, NA, or bool
The values of the array.
left_mask, right_mask : ndarray, optional
The masks. Only one of these may be None, which implies that
the associated `left` or `right` value is a scalar.
Returns
-------
result, mask: ndarray[bool]
The result of the logical xor, and the new mask.
"""
if left_mask is None:
return kleene_xor(right, left, right_mask, left_mask)
raise_for_nan(right, method="xor")
if right is libmissing.NA:
result = np.zeros_like(left)
else:
# error: Incompatible types in assignment (expression has type
# "Union[bool, Any]", variable has type "ndarray")
result = left ^ right # type: ignore[assignment]
if right_mask is None:
if right is libmissing.NA:
mask = np.ones_like(left_mask)
else:
mask = left_mask.copy()
else:
mask = left_mask | right_mask
return result, mask
def kleene_and(
left: Union[bool, libmissing.NAType, np.ndarray],
right: Union[bool, libmissing.NAType, np.ndarray],
left_mask: Optional[np.ndarray],
right_mask: Optional[np.ndarray],
):
"""
Boolean ``and`` using Kleene logic.
Values are ``NA`` for ``NA & NA`` or ``True & NA``.
Parameters
----------
left, right : ndarray, NA, or bool
The values of the array.
left_mask, right_mask : ndarray, optional
The masks. Only one of these may be None, which implies that
the associated `left` or `right` value is a scalar.
Returns
-------
result, mask: ndarray[bool]
The result of the logical xor, and the new mask.
"""
# To reduce the number of cases, we ensure that `left` & `left_mask`
# always come from an array, not a scalar. This is safe, since because
# A | B == B | A
if left_mask is None:
return kleene_and(right, left, right_mask, left_mask)
assert isinstance(left, np.ndarray)
raise_for_nan(right, method="and")
if right is libmissing.NA:
result = np.zeros_like(left)
else:
result = left & right
if right_mask is None:
# Scalar `right`
if right is libmissing.NA:
mask = (left & ~left_mask) | left_mask
else:
mask = left_mask.copy()
if right is False:
# unmask everything
mask[:] = False
else:
# unmask where either left or right is False
left_false = ~(left | left_mask)
right_false = ~(right | right_mask)
mask = (left_mask & ~right_false) | (right_mask & ~left_false)
return result, mask
def raise_for_nan(value, method: str):
if lib.is_float(value) and np.isnan(value):
raise ValueError(f"Cannot perform logical '{method}' with floating NaN")
|
datapythonista/pandas
|
pandas/core/ops/mask_ops.py
|
Python
|
bsd-3-clause
| 5,124
| 0.000195
|
"""
Classes to represent the default SQL aggregate functions
"""
import copy
from django.db.models.fields import IntegerField, FloatField
# Fake fields used to identify aggregate types in data-conversion operations.
ordinal_aggregate_field = IntegerField()
computed_aggregate_field = FloatField()
class Aggregate(object):
"""
Default SQL Aggregate.
"""
is_ordinal = False
is_computed = False
sql_template = '%(function)s(%(field)s)'
def __init__(self, col, source=None, is_summary=False, **extra):
"""Instantiate an SQL aggregate
* col is a column reference describing the subject field
of the aggregate. It can be an alias, or a tuple describing
a table and column name.
* source is the underlying field or aggregate definition for
the column reference. If the aggregate is not an ordinal or
computed type, this reference is used to determine the coerced
output type of the aggregate.
* extra is a dictionary of additional data to provide for the
aggregate definition
Also utilizes the class variables:
* sql_function, the name of the SQL function that implements the
aggregate.
* sql_template, a template string that is used to render the
aggregate into SQL.
* is_ordinal, a boolean indicating if the output of this aggregate
is an integer (e.g., a count)
* is_computed, a boolean indicating if this output of this aggregate
is a computed float (e.g., an average), regardless of the input
type.
"""
self.col = col
self.source = source
self.is_summary = is_summary
self.extra = extra
# Follow the chain of aggregate sources back until you find an
# actual field, or an aggregate that forces a particular output
# type. This type of this field will be used to coerce values
# retrieved from the database.
tmp = self
while tmp and isinstance(tmp, Aggregate):
if getattr(tmp, 'is_ordinal', False):
tmp = ordinal_aggregate_field
elif getattr(tmp, 'is_computed', False):
tmp = computed_aggregate_field
else:
tmp = tmp.source
self.field = tmp
def relabeled_clone(self, change_map):
clone = copy.copy(self)
if isinstance(self.col, (list, tuple)):
clone.col = (change_map.get(self.col[0], self.col[0]), self.col[1])
return clone
def as_sql(self, qn, connection):
"Return the aggregate, rendered as SQL with parameters."
params = []
if hasattr(self.col, 'as_sql'):
field_name, params = self.col.as_sql(qn, connection)
elif isinstance(self.col, (list, tuple)):
field_name = '.'.join([qn(c) for c in self.col])
else:
field_name = self.col
substitutions = {
'function': self.sql_function,
'field': field_name
}
substitutions.update(self.extra)
return self.sql_template % substitutions, params
class Avg(Aggregate):
is_computed = True
sql_function = 'AVG'
class Count(Aggregate):
is_ordinal = True
sql_function = 'COUNT'
sql_template = '%(function)s(%(distinct)s%(field)s)'
def __init__(self, col, distinct=False, **extra):
super(Count, self).__init__(col, distinct=distinct and 'DISTINCT ' or '', **extra)
class Max(Aggregate):
sql_function = 'MAX'
class Min(Aggregate):
sql_function = 'MIN'
class StdDev(Aggregate):
is_computed = True
def __init__(self, col, sample=False, **extra):
super(StdDev, self).__init__(col, **extra)
self.sql_function = sample and 'STDDEV_SAMP' or 'STDDEV_POP'
class Sum(Aggregate):
sql_function = 'SUM'
class Variance(Aggregate):
is_computed = True
def __init__(self, col, sample=False, **extra):
super(Variance, self).__init__(col, **extra)
self.sql_function = sample and 'VAR_SAMP' or 'VAR_POP'
|
andrewsmedina/django
|
django/db/models/sql/aggregates.py
|
Python
|
bsd-3-clause
| 4,116
| 0.001944
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Common functionalities shared between different DRAC modules.
"""
from oslo.utils import importutils
from ironic.common import exception
from ironic.common.i18n import _
from ironic.drivers.modules.drac import client as drac_client
pywsman = importutils.try_import('pywsman')
REQUIRED_PROPERTIES = {
'drac_host': _('IP address or hostname of the DRAC card. Required.'),
'drac_username': _('username used for authentication. Required.'),
'drac_password': _('password used for authentication. Required.')
}
OPTIONAL_PROPERTIES = {
'drac_port': _('port used for WS-Man endpoint; default is 443. Optional.'),
'drac_path': _('path used for WS-Man endpoint; default is "/wsman". '
'Optional.'),
'drac_protocol': _('protocol used for WS-Man endpoint; one of http, https;'
' default is "https". Optional.'),
}
COMMON_PROPERTIES = REQUIRED_PROPERTIES.copy()
COMMON_PROPERTIES.update(OPTIONAL_PROPERTIES)
# ReturnValue constants
RET_SUCCESS = '0'
RET_ERROR = '2'
RET_CREATED = '4096'
def parse_driver_info(node):
"""Parse a node's driver_info values.
Parses the driver_info of the node, reads default values
and returns a dict containing the combination of both.
:param node: an ironic node object.
:returns: a dict containing information from driver_info
and default values.
:raises: InvalidParameterValue if some mandatory information
is missing on the node or on invalid inputs.
"""
driver_info = node.driver_info
parsed_driver_info = {}
error_msgs = []
for param in REQUIRED_PROPERTIES:
try:
parsed_driver_info[param] = str(driver_info[param])
except KeyError:
error_msgs.append(_("'%s' not supplied to DracDriver.") % param)
except UnicodeEncodeError:
error_msgs.append(_("'%s' contains non-ASCII symbol.") % param)
parsed_driver_info['drac_port'] = driver_info.get('drac_port', 443)
try:
parsed_driver_info['drac_path'] = str(driver_info.get('drac_path',
'/wsman'))
except UnicodeEncodeError:
error_msgs.append(_("'drac_path' contains non-ASCII symbol."))
try:
parsed_driver_info['drac_protocol'] = str(
driver_info.get('drac_protocol', 'https'))
except UnicodeEncodeError:
error_msgs.append(_("'drac_protocol' contains non-ASCII symbol."))
try:
parsed_driver_info['drac_port'] = int(parsed_driver_info['drac_port'])
except ValueError:
error_msgs.append(_("'drac_port' is not an integer value."))
if error_msgs:
msg = (_('The following errors were encountered while parsing '
'driver_info:\n%s') % '\n'.join(error_msgs))
raise exception.InvalidParameterValue(msg)
return parsed_driver_info
def get_wsman_client(node):
"""Return a DRAC client object.
Given an ironic node object, this method gives back a
Client object which is a wrapper for pywsman.Client.
:param node: an ironic node object.
:returns: a Client object.
:raises: InvalidParameterValue if some mandatory information
is missing on the node or on invalid inputs.
"""
driver_info = parse_driver_info(node)
client = drac_client.Client(**driver_info)
return client
def find_xml(doc, item, namespace, find_all=False):
"""Find the first or all elements in a ElementTree object.
:param doc: the element tree object.
:param item: the element name.
:param namespace: the namespace of the element.
:param find_all: Boolean value, if True find all elements, if False
find only the first one. Defaults to False.
:returns: if find_all is False the element object will be returned
if found, None if not found. If find_all is True a list of
element objects will be returned or an empty list if no
elements were found.
"""
query = ('.//{%(namespace)s}%(item)s' % {'namespace': namespace,
'item': item})
if find_all:
return doc.findall(query)
return doc.find(query)
|
froyobin/ironic
|
ironic/drivers/modules/drac/common.py
|
Python
|
apache-2.0
| 4,782
| 0
|
# Author: Jason Lu
import urllib.request
from bs4 import BeautifulSoup
import time
req_header = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
#'Accept-Language': 'en-US,en;q=0.8,zh-Hans-CN;q=0.5,zh-Hans;q=0.3',
'Accept-Charset':'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Accept-Encoding':'en-us',
'Connection':'keep-alive',
'Referer':'http://www.baidu.com/'
}
req_timeout = 5
testUrl = "http://www.baidu.com/"
testStr = "wahaha"
file1 = open('proxy.txt' , 'w')
# url = ""
# req = urllib2.Request(url,None,req_header)
# jsondatas = urllib2.urlopen(req,None,req_timeout).read()
# cookies = urllib2.HTTPCookieProcessor()
# 希望登录状态一直保持,使用Cookie处理
import http.cookiejar
# 使用http.cookiejar.CookieJar()创建CookieJar对象
cjar = http.cookiejar.CookieJar()
cookies = urllib.request.HTTPCookieProcessor(cjar)
checked_num = 0
grasp_num = 0
for page in range(1, 3):
# req = urllib2.Request('http://www.xici.net.co/nn/' + str(page), None, req_header)
# html_doc = urllib2.urlopen(req, None, req_timeout).read()
req = urllib.request.Request('http://www.xici.net.co/nn/' + str(page))
req.add_header('User-Agent',
"Mozilla/5.0 (iPhone; CPU iPhone OS 10_3 like Mac OS X) AppleWebKit/602.1.50 (KHTML, like Gecko) CriOS/56.0.2924.75 Mobile/14E5239e Safari/602.1")
html_doc = urllib.request.urlopen(req).read().decode('utf-8')
# html_doc = urllib2.urlopen('http://www.xici.net.co/nn/' + str(page)).read()
soup = BeautifulSoup(html_doc)
trs = soup.find('table', id='ip_list').find_all('tr')
print(trs)
for tr in trs[1:]:
tds = tr.find_all('td')
ip = tds[1].text.strip()
port = tds[2].text.strip()
protocol = tds[5].text.strip()
if protocol == 'HTTP' or protocol == 'HTTPS':
#of.write('%s=%s:%s\n' % (protocol, ip, port))
print('%s=%s:%s' % (protocol, ip, port))
grasp_num +=1
proxyHandler = urllib.request.ProxyHandler({"http": r'http://%s:%s' % (ip, port)})
opener = urllib.request.build_opener(cookies, proxyHandler)
opener.addheaders = [('User-Agent',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.63 Safari/537.36')]
t1 = time.time()
try:
req = opener.open(testUrl, timeout=req_timeout)
result = req.read()
timeused = time.time() - t1
pos = result.find(testStr)
if pos > 1:
file1.write(protocol+"\t"+ip+"\t"+port+"\n")
checked_num+=1
print(checked_num, grasp_num)
else:
continue
except Exception as e:
print(str(e))
continue
file1.close()
print(checked_num,grasp_num)
|
jinzekid/codehub
|
python/lyutil/ly_proxy_test.py
|
Python
|
gpl-3.0
| 3,046
| 0.009321
|
from contextlib import contextmanager
import pkg_resources
import os
def local_stream(name):
return pkg_resources.resource_stream(__name__, name)
def local_file(name):
return pkg_resources.resource_filename(__name__, name)
@contextmanager
def Environ(**kwargs):
orig = os.environ.copy()
replace = set(kwargs.keys()) & set(orig.keys())
removes = set(kwargs.keys()) - set(orig.keys())
try:
os.environ.update(kwargs)
yield
finally:
for r in removes:
os.environ.pop(r)
for r in replace:
os.environ[r] = orig[r]
class O(dict):
def __getattr__(self, key):
return self[key]
|
bcsaller/layercake
|
tests/utils.py
|
Python
|
apache-2.0
| 671
| 0.00149
|
from django.db import models
# Django doesn't support big auto fields out of the box, see
# https://code.djangoproject.com/ticket/14286.
# This is a stripped down version of the BoundedBigAutoField from Sentry.
class BigAutoField(models.AutoField):
description = "Big Integer"
def db_type(self, connection):
engine = connection.settings_dict['ENGINE']
if 'mysql' in engine:
return "bigint AUTO_INCREMENT"
elif 'postgres' in engine:
return "bigserial"
else:
raise NotImplemented
def get_related_db_type(self, connection):
return models.BigIntegerField().db_type(connection)
def get_internal_type(self):
return "BigIntegerField"
class FlexibleForeignKey(models.ForeignKey):
def db_type(self, connection):
# This is required to support BigAutoField
rel_field = self.related_field
if hasattr(rel_field, 'get_related_db_type'):
return rel_field.get_related_db_type(connection)
return super(FlexibleForeignKey, self).db_type(connection)
|
adusca/treeherder
|
treeherder/model/fields.py
|
Python
|
mpl-2.0
| 1,086
| 0
|
from django.core.exceptions import ValidationError
from django.utils.deconstruct import deconstructible
from django.utils.translation import gettext_lazy as _
from django.core import validators
@deconstructible
class SkypeValidator:
message = _('Enter a valid URL.')
code = 'invalid'
def __call__(self, value):
if not value.startswith('skype:'):
raise ValidationError(self.message, code=self.code)
@deconstructible
class UrlValidator:
message = _('Enter a valid URL.')
code = 'invalid'
validators = [validators.URLValidator(), validators.EmailValidator(), SkypeValidator()]
def __call__(self, value):
def apply_validator(value):
def _apply_validator(validator):
try:
validator(value)
except ValidationError as e:
skype_failed = True
return _apply_validator
if any(map(apply_validator(value), self.validators)):
raise ValidationError(self.message, code=self.code)
|
bronycub/sugarcub
|
users/validators.py
|
Python
|
gpl-3.0
| 1,069
| 0.008419
|
#-*-coding:utf-8-*-
from . import about_blueprint
from flask import render_template
@about_blueprint.route("/")
def about_index():
return render_template("about.html")
|
PythonScientists/Shape
|
main/module/about/views.py
|
Python
|
apache-2.0
| 173
| 0.017341
|
"""
Caching framework.
This package defines set of cache backends that all conform to a simple API.
In a nutshell, a cache is a set of values -- which can be any object that
may be pickled -- identified by string keys. For the complete API, see
the abstract BaseCache class in django.core.cache.backends.base.
Client code should not access a cache backend directly; instead it should
either use the "cache" variable made available here, or it should use the
get_cache() function made available here. get_cache() takes a backend URI
(e.g. "memcached://127.0.0.1:11211/") and returns an instance of a backend
cache class.
See docs/topics/cache.txt for information on the public API.
"""
from django.conf import settings
from django.core import signals
from django.core.cache.backends.base import (
InvalidCacheBackendError, CacheKeyWarning, BaseCache)
from django.core.exceptions import ImproperlyConfigured
from django.utils import importlib
try:
# The mod_python version is more efficient, so try importing it first.
from mod_python.util import parse_qsl
except ImportError:
try:
# Python 2.6 and greater
from urlparse import parse_qsl
except ImportError:
# Python 2.5, 2.4. Works on Python 2.6 but raises
# PendingDeprecationWarning
from cgi import parse_qsl
__all__ = [
'get_cache', 'cache', 'DEFAULT_CACHE_ALIAS'
]
# Name for use in settings file --> name of module in "backends" directory.
# Any backend scheme that is not in this dictionary is treated as a Python
# import path to a custom backend.
BACKENDS = {
'memcached': 'memcached',
'locmem': 'locmem',
'file': 'filebased',
'db': 'db',
'dummy': 'dummy',
}
DEFAULT_CACHE_ALIAS = 'default'
def parse_backend_uri(backend_uri):
"""
Converts the "backend_uri" into a cache scheme ('db', 'memcached', etc), a
host and any extra params that are required for the backend. Returns a
(scheme, host, params) tuple.
"""
if backend_uri.find(':') == -1:
raise InvalidCacheBackendError("Backend URI must start with scheme://")
scheme, rest = backend_uri.split(':', 1)
if not rest.startswith('//'):
raise InvalidCacheBackendError("Backend URI must start with scheme://")
host = rest[2:]
qpos = rest.find('?')
if qpos != -1:
params = dict(parse_qsl(rest[qpos+1:]))
host = rest[2:qpos]
else:
params = {}
if host.endswith('/'):
host = host[:-1]
return scheme, host, params
if not settings.CACHES:
import warnings
warnings.warn(
"settings.CACHE_* is deprecated; use settings.CACHES instead.",
PendingDeprecationWarning
)
# Mapping for new-style cache backend api
backend_classes = {
'memcached': 'memcached.CacheClass',
'locmem': 'locmem.LocMemCache',
'file': 'filebased.FileBasedCache',
'db': 'db.DatabaseCache',
'dummy': 'dummy.DummyCache',
}
engine, host, params = parse_backend_uri(settings.CACHE_BACKEND)
if engine in backend_classes:
engine = 'django.core.cache.backends.%s' % backend_classes[engine]
defaults = {
'BACKEND': engine,
'LOCATION': host,
}
defaults.update(params)
settings.CACHES[DEFAULT_CACHE_ALIAS] = defaults
if DEFAULT_CACHE_ALIAS not in settings.CACHES:
raise ImproperlyConfigured("You must define a '%s' cache" % DEFAULT_CACHE_ALIAS)
def parse_backend_conf(backend, **kwargs):
"""
Helper function to parse the backend configuration
that doesn't use the URI notation.
"""
# Try to get the CACHES entry for the given backend name first
conf = settings.CACHES.get(backend, None)
if conf is not None:
args = conf.copy()
backend = args.pop('BACKEND')
location = args.pop('LOCATION', '')
return backend, location, args
else:
# Trying to import the given backend, in case it's a dotted path
mod_path, cls_name = backend.rsplit('.', 1)
try:
mod = importlib.import_module(mod_path)
backend_cls = getattr(mod, cls_name)
except (AttributeError, ImportError):
raise InvalidCacheBackendError("Could not find backend '%s'" % backend)
location = kwargs.pop('LOCATION', '')
return backend, location, kwargs
raise InvalidCacheBackendError(
"Couldn't find a cache backend named '%s'" % backend)
def get_cache(backend, **kwargs):
"""
Function to load a cache backend dynamically. This is flexible by design
to allow different use cases:
To load a backend with the old URI-based notation::
cache = get_cache('locmem://')
To load a backend that is pre-defined in the settings::
cache = get_cache('default')
To load a backend with its dotted import path,
including arbitrary options::
cache = get_cache('django.core.cache.backends.memcached.MemcachedCache', **{
'LOCATION': '127.0.0.1:11211', 'TIMEOUT': 30,
})
"""
try:
if '://' in backend:
# for backwards compatibility
backend, location, params = parse_backend_uri(backend)
if backend in BACKENDS:
backend = 'django.core.cache.backends.%s' % BACKENDS[backend]
params.update(kwargs)
mod = importlib.import_module(backend)
backend_cls = mod.CacheClass
else:
backend, location, params = parse_backend_conf(backend, **kwargs)
mod_path, cls_name = backend.rsplit('.', 1)
mod = importlib.import_module(mod_path)
backend_cls = getattr(mod, cls_name)
except (AttributeError, ImportError), e:
raise InvalidCacheBackendError(
"Could not find backend '%s': %s" % (backend, e))
return backend_cls(location, params)
cache = get_cache(DEFAULT_CACHE_ALIAS)
# Some caches -- python-memcached in particular -- need to do a cleanup at the
# end of a request cycle. If the cache provides a close() method, wire it up
# here.
if hasattr(cache, 'close'):
signals.request_finished.connect(cache.close)
|
heracek/django-nonrel
|
django/core/cache/__init__.py
|
Python
|
bsd-3-clause
| 6,144
| 0.001302
|
class DinkyFish:
def monthsUntilCrowded(self, tankVolume, maleNum, femaleNum):
months = 0
while maleNum + femaleNum <= (tankVolume * 2):
minFishes = min(maleNum, femaleNum)
maleNum += minFishes
femaleNum += minFishes
months += 1
return months
|
Oscarbralo/TopBlogCoder
|
SRMs/SRM180PY/250/250.py
|
Python
|
mit
| 326
| 0.003067
|
import datetime
from io import StringIO
from pytest import fixture
from gnss_tec import ObsFileV2, ObsFileV3
RNX_V2 = """\
2.11 OBSERVATION DATA M (MIXED) RINEX VERSION / TYPE
teqc 2016Nov7 NOAA/NOS/NGS/CORS 20170707 04:06:33UTCPGM / RUN BY / DATE
ASPA MARKER NAME
50503S006 MARKER NUMBER
Giovanni Sella NGS OBSERVER / AGENCY
4733K06635 TRIMBLE NETR5 4.85 REC # / TYPE / VERS
30517456 TRM55971.00 NONE ANT # / TYPE
-6100258.8690 -996506.1670 -1567978.8630 APPROX POSITION XYZ
0.0000 0.0000 0.0000 ANTENNA: DELTA H/E/N
1 1 WAVELENGTH FACT L1/2
11 L1 L2 L5 C1 P1 C2 P2 C5 S1# / TYPES OF OBSERV
S2 S5 # / TYPES OF OBSERV
30.0000 INTERVAL
18 LEAP SECONDS
2017 7 6 0 0 0.0000000 GPS TIME OF FIRST OBS
END OF HEADER
4 5
ASPA (COGO code) COMMENT
0.000 (antenna height) COMMENT
-14.32609534 (latitude) COMMENT
-170.72243361 (longitude) COMMENT
0053.667 (elevation) COMMENT
17 7 6 0 0 0.0000000 0 18G18R15G31G03R06G16G01R09G25G22R05G29-0.000392832
R16G26R04G10G32G14
129609926.497 6 100994793.77642 24663965.641
24663974.148 38.600 17.800
120505665.941 6 93726662.377 6 22550992.016 22550991.051
22550998.707 41.700 39.400
113401304.102 8 88364763.776 7 21579566.188
21579571.359 21579571.531 50.300 46.200
132701874.619 5 103404140.724 5 25252336.969
25252347.414 33.700 34.400
119263436.899 6 92760508.769 5 22349925.250 22349924.051
22349927.602 38.100 35.100
116184238.344 7 90533145.56945 22109098.484
22109105.234 45.600 33.200
129470789.804 6 100886299.783 6 24637455.992
24637464.797 24637466.082 37.100 37.300
114931261.449 7 89391042.915 7 21522933.477 21522934.391
21522939.465 45.900 43.900
131228058.513 6 102255791.926 6 24971881.508
24971889.785 24971890.309 38.400 36.300
119420387.410 7 93054887.93344 22724945.750
22724949.512 43.200 29.400
104095002.622 7 80962839.312 7 19473125.563 19473125.184
19473131.082 43.900 42.200
131232157.556 6 102258880.431 5 24972645.516
24972654.613 24972654.199 38.300 34.800
106080541.169 7 82507163.624 7 19858497.734 19858498.063
19858503.371 44.000 42.800
108649979.923 8 84662364.399 8 20675386.594
20675395.574 20675395.805 48.400 51.100
112909742.180 8 87818759.471 7 21085104.797 21085103.715
21085108.438 48.100 44.700
115661530.779 8 90125872.381 7 22009648.641
22009657.211 22009657.441 48.500 47.600
115505192.609 7 90004072.298 7 21979890.539
21979899.461 21979899.281 47.500 47.600
113491920.675 7 88435293.67545 21596788.523
21596794.160 46.100 32.700
17 7 6 0 1 0.0000000 0 18 18R15G31G03R06G16G01R09G25G22R05G29
R16G26R04G10G32G14
129714491.092 6 101076272.53043 24683863.789
24683872.414 39.200 18.600
120613774.752 7 93810746.963 6 22571222.727 22571222.703
22571230.711 42.500 39.700
113438416.847 8 88393682.795 7 21586628.695
21586633.398 21586633.336 50.300 46.600
132599072.037 5 103324034.869 6 25232775.227
25232785.262 25232781.449 34.600 36.400
119149217.493 6 92671671.486 5 22328518.555 22328518.293
22328522.430 38.300 35.000
116099973.097 7 90467484.36845 22093063.586
22093069.574 45.900 33.100
129470125.015 6 100885781.713 6 24637328.750
24637339.078 24637340.129 36.200 37.000
114869248.525 7 89342810.692 7 21511321.695 21511321.555
21511325.922 46.600 44.500
131324730.690 6 102331120.877 6 24990277.883
24990285.867 24990286.273 38.900 37.100
119340545.428 7 92992673.42545 22709753.359
22709755.480 46.100 31.200
104062372.020 7 80937459.929 7 19467020.781 19467021.227
19467027.590 44.100 42.700
131219712.462 6 102249182.977 6 24970277.469
24970285.688 24970286.094 39.900 36.900
106112572.378 7 82532076.791 7 19864493.438 19864493.176
19864498.133 43.700 42.700
108609118.768 8 84630524.539 8 20667611.063
20667619.516 20667619.746 48.500 51.000
112981641.858 7 87874681.372 7 21098530.055 21098530.383
21098535.574 47.800 45.400
115746528.568 8 90192104.390 7 22025823.547
22025831.473 22025832.172 49.600 47.100
115506300.717 8 90004935.735 7 21980103.695
21980111.211 21980110.855 48.800 47.200
113479270.250 7 88425436.16745 21594381.758
21594386.398 45.500 32.700
"""
RNX_HEADER_V2 = """\
2.11 OBSERVATION DATA M (MIXED) RINEX VERSION / TYPE
teqc 2016Apr1 BKG Frankfurt 20170707 00:23:29UTCPGM / RUN BY / DATE
ADIS MARKER NAME
31502M001 MARKER NUMBER
NTRIPS05-769322-52 ADDIS ABABA UNIVERSITY OBSERVER / AGENCY
MT300102915 JPS LEGACY 2.6.1 JAN,10,2008 REC # / TYPE / VERS
0220173805 TRM29659.00 NONE ANT # / TYPE
4913652.8072 3945922.6351 995383.2858 APPROX POSITION XYZ
0.0010 0.0000 0.0000 ANTENNA: DELTA H/E/N
1 1 WAVELENGTH FACT L1/2
21 L1 P1 C1 L2 P2 D1 D2 S1 S2# / TYPES OF OBSERV
L5 C5 D5 S5 L7 C7 D7 S7 L8# / TYPES OF OBSERV
C8 D8 S8 # / TYPES OF OBSERV
30.0000 INTERVAL
17 LEAP SECONDS
0 RCV CLOCK OFFS APPL
2017 7 6 0 0 0.0000000 GPS TIME OF FIRST OBS
Linux 2.4.21-27.ELsmp|Opteron|gcc -static|Linux x86_64|=+ COMMENT
MAKERINEX 2.0.20973 AAU/NTRIPS05 2017-07-06 01:04 COMMENT
END OF HEADER
"""
RNX_V3 = '''\
3.02 OBSERVATION DATA M RINEX VERSION / TYPE
Converto v3.4.8 IGN-RGP 20170627 013115 UTC PGM / RUN BY / DATE
AJAC MARKER NAME
10077M005 MARKER NUMBER
Automatic Institut Geographique National OBSERVER / AGENCY
1830139 LEICA GR25 4.02 REC # / TYPE / VERS
4611118324 TRM57971.00 NONE ANT # / TYPE
4696989.7040 723994.2090 4239678.3140 APPROX POSITION XYZ
0.0000 0.0000 0.0000 ANTENNA: DELTA H/E/N
G 12 C1C L1C D1C S1C C2W L2W D2W S2W C5Q L5Q D5Q S5Q SYS / # / OBS TYPES
R 8 C1C L1C D1C S1C C2P L2P D2P S2P SYS / # / OBS TYPES
E 16 C1C L1C D1C S1C C5Q L5Q D5Q S5Q C7Q L7Q D7Q S7Q C8Q SYS / # / OBS TYPES
L8Q D8Q S8Q SYS / # / OBS TYPES
C 8 C1I L1I D1I S1I C7I L7I D7I S7I SYS / # / OBS TYPES
S 4 C1C L1C D1C S1C SYS / # / OBS TYPES
DBHZ SIGNAL STRENGTH UNIT
30.000 INTERVAL
2017 06 26 00 00 0.0000000 GPS TIME OF FIRST OBS
2017 06 26 23 59 30.0000000 GPS TIME OF LAST OBS
0 RCV CLOCK OFFS APPL
G L2S -0.25000 SYS / PHASE SHIFT
G L2X -0.25000 SYS / PHASE SHIFT
R L2P 0.25000 SYS / PHASE SHIFT
E L8Q -0.25000 SYS / PHASE SHIFT
24 R01 1 R02 -4 R03 5 R04 6 R05 1 R06 -4 R07 5 R08 6 GLONASS SLOT / FRQ #
R09 -2 R10 -7 R11 0 R12 -1 R13 -2 R14 -7 R15 0 R16 -1 GLONASS SLOT / FRQ #
R17 4 R18 -3 R19 3 R20 2 R21 4 R22 -3 R23 3 R24 2 GLONASS SLOT / FRQ #
C1C -71.940 C1P -71.940 C2C -71.940 C2P -71.940 GLONASS COD/PHS/BIS
18 18 1929 7 LEAP SECONDS
END OF HEADER
> 2017 06 26 00 00 0.0000000 0 4
G06 20835332.939 109490435.32508 -587.633 50.500 20835328.717 85317207.80808 -457.896 48.250 20835330.401 81762343.64108 -438.821 52.350
R04 24135247.881 129243249.65706 -2964.509 39.250 24135244.262 100522446.54306 -2305.728 39.000
E02 25206580.771 132461485.07148 1704.855 50.900 25206579.417 98916045.45308 1273.096 50.150 25206576.244 101496450.89908 1306.281 51.950 25206577.942 100206247.02308 1289.659 48.650
C10 38625935.135 201135401.51606 436.003 40.600 38625926.793 155530626.32107 337.087 45.300
> 4 1
COMMENT
> 2017 06 26 00 00 30.0000000 0 5
G02 23269584.628 122282497.09607 2373.850 45.900 23269574.831 95285049.78406 1849.752 40.000
R06 20254437.775 108081579.18807 1594.895 44.050 20254434.977 84063449.71306 1240.474 41.000
E03 26199562.760 137679722.61448 -1953.987 49.350 26199562.201 102812831.22108 -1459.200 48.450 26199559.507 105494888.18008 -1497.248 50.800 26199561.223 104153862.04807 -1478.202 46.700
C05 39875325.769 207641286.35906 2.988 37.750 39875315.615 160561383.77107 2.041 43.350
S20 38144728.445 200451840.25607 -84.098 44.000
'''
RNX_HEADER_V3 = '''\
3.02 OBSERVATION DATA M: MIXED RINEX VERSION / TYPE
G 16 C1C L1C D1C S1C C2S L2S D2S S2S C2W L2W D2W S2W C5Q SYS / # / OBS TYPES
L5Q D5Q S5Q SYS / # / OBS TYPES
R 12 C1C L1C D1C S1C C2P L2P D2P S2P C2C L2C D2C S2C SYS / # / OBS TYPES
E 16 C1C L1C D1C S1C C5Q L5Q D5Q S5Q C7Q L7Q D7Q S7Q C8Q SYS / # / OBS TYPES
L8Q D8Q S8Q SYS / # / OBS TYPES
C 8 C1I L1I D1I S1I C7I L7I D7I S7I SYS / # / OBS TYPES
J 12 C1C L1C D1C S1C C2S L2S D2S S2S C5Q L5Q D5Q S5Q SYS / # / OBS TYPES
S 4 C1C L1C D1C S1C SYS / # / OBS TYPES
30.000 INTERVAL
2015 12 19 00 00 0.0000000 GPS TIME OF FIRST OBS
2015 12 19 23 59 30.0000000 GPS TIME OF LAST OBS
END OF HEADER
'''
@fixture
def dumb_obs_v2():
obs_file = StringIO(RNX_HEADER_V2)
return ObsFileV2(
obs_file,
version=2.11,
)
@fixture
def glo_freq_nums_v2():
"""
Специально под obs_v2 не соответсвует тому, что в nav_v2_stream
FIXME: привести к одному виду
"""
return {
4: {datetime.datetime(2017, 7, 6, 0, 15): 6.0},
5: {datetime.datetime(2017, 7, 6, 0, 15): 1.0},
6: {datetime.datetime(2017, 7, 6, 0, 15): -4.0},
9: {datetime.datetime(2017, 7, 6, 0, 15): -2.0},
15: {datetime.datetime(2017, 7, 6, 0, 15): 0.0},
16: {datetime.datetime(2017, 7, 6, 0, 15): -1.0},
}
@fixture
def obs_v2(glo_freq_nums_v2):
obs_file = StringIO(RNX_V2)
return ObsFileV2(
obs_file,
version=2.11,
glo_freq_nums=glo_freq_nums_v2,
)
@fixture
def obs_absent_slot_v2(glo_freq_nums_v2):
del glo_freq_nums_v2[16]
obs_file = StringIO(RNX_V2)
return ObsFileV2(
obs_file,
version=2.11,
glo_freq_nums=glo_freq_nums_v2,
)
@fixture
def dumb_obs_v3():
obs_file = StringIO(RNX_HEADER_V3)
return ObsFileV3(
obs_file,
version=3.02,
)
@fixture
def glo_freq_nums_v3():
"""
Специально под obs_v3 не соответсвует тому, что в nav_v3_stream
FIXME: привести к одному виду
"""
return {
4: {datetime.datetime(2017, 6, 26, 0): 6},
6: {datetime.datetime(2017, 6, 26, 0): -4},
}
@fixture
def obs_v3(glo_freq_nums_v3):
obs_file = StringIO(RNX_V3)
return ObsFileV3(
obs_file,
version=3.02,
glo_freq_nums=glo_freq_nums_v3,
)
@fixture
def obs_absent_slot_v3(glo_freq_nums_v3):
del glo_freq_nums_v3[4]
obs_file = StringIO(RNX_V3)
return ObsFileV3(
obs_file,
version=3.02,
glo_freq_nums=glo_freq_nums_v3,
)
@fixture(params=['obs_absent_slot_v2', 'obs_absent_slot_v3'])
def obs_absent_slot(request):
return request.getfixturevalue(request.param)
@fixture
def nav_v2_stream():
content = '''\
2.01 GLONASS NAV DATA RINEX VERSION / TYPE
CCRINEXG V1.4 UX CDDIS 09-MAR-16 12:44 PGM / RUN BY / DATE
17 LEAP SECONDS
END OF HEADER
1 16 1 1 0 15 0.0-0.147201120853D-03 0.000000000000D+00 0.300000000000D+02
0.634915380859D+04-0.254754066467D+00 0.000000000000D+00 0.000000000000D+00
0.170520444336D+05 0.243410968780D+01 0.186264514923D-08 0.100000000000E+01
-0.178746152344D+05 0.223380565643D+01 0.186264514923D-08 0.000000000000D+00
2 16 1 1 0 15 0.0 0.169292092323D-03 0.909494701773D-12 0.000000000000D+00
0.112042094727D+05-0.474354743957D+00-0.931322574616D-09 0.000000000000D+00
-0.190359375000D+04 0.309479141235D+01 0.000000000000D+00-0.400000000000e+01
-0.228054150391D+05-0.494022369385D+00 0.279396772385D-08 0.000000000000D+00
3 16 1 1 0 15 0.0 0.588288530707D-04 0.000000000000D+00 0.000000000000D+00
0.948807763672D+04-0.397904396057D+00-0.931322574616D-09 0.000000000000D+00
-0.192305532227D+05 0.185273551941D+01-0.931322574616D-09 0.500000000000d+01
-0.137629448242D+05-0.285944557190D+01 0.186264514923D-08 0.000000000000D+00
3 16 1 1 0 15 2.0 0.588288530707D-04 0.000000000000D+00 0.200000000000D+01
0.948807763672D+04-0.397904396057D+00-0.931322574615D-09 0.000000000000D+00
-0.192305532227D+05 0.185273551941D+01-0.931322574615D-09 0.500000000000D+01
-0.137629448242D+05-0.285944557190D+01 0.186264514923D-08 0.000000000000D+00
3 16 1 1 2 15 0.0 0.588288530707D-04 0.000000000000D+00 0.200000000000D+01
0.948807763672D+04-0.397904396057D+00-0.931322574615D-09 0.000000000000D+00
-0.192305532227D+05 0.185273551941D+01-0.931322574615D-09 0.600000000000D+01
-0.137629448242D+05-0.285944557190D+01 0.186264514923D-08 0.000000000000D+00
'''
return StringIO(content)
@fixture
def nav_v2_freq_nums():
return {
1: {datetime.datetime(2016, 1, 1, 0, 15): 1},
2: {datetime.datetime(2016, 1, 1, 0, 15): -4},
3: {
datetime.datetime(2016, 1, 1, 0, 15): 5,
datetime.datetime(2016, 1, 1, 2, 15): 6,
},
}
@fixture
def nav_v3_stream():
content = '''\
3.03 NAVIGATION DATA M (Mixed) RINEX VERSION / TYPE
BCEmerge congo 20180101 012902 GMT PGM / RUN BY / DATE
18 LEAP SECONDS
END OF HEADER
G01 2017 12 31 02 00 00-2.053100615740e-05-2.728484105319e-12 0.000000000000e+00
4.800000000000e+01-5.400000000000e+01 3.942664227791e-09-1.390323644218e+00
-2.790242433548e-06 7.220194092952e-03 1.273490488529e-05 5.153681987762e+03
7.200000000000e+03 1.862645149231e-07-1.153121228013e-01-5.401670932770e-08
9.700952267372e-01 1.391875000000e+02 6.230257609866e-01-7.583887327920e-09
4.771627328860e-10 1.000000000000e+00 1.982000000000e+03 0.000000000000e+00
2.000000000000e+00 0.000000000000e+00 5.587935447693e-09 4.800000000000e+01
1.800000000000e+01
S20 2017 12 31 00 00 48 0.000000000000e+00 0.000000000000e+00 5.800000000000e+01
4.063672000000e+04 0.000000000000e+00 0.000000000000e+00 1.000000000000e+00
-1.124591600000e+04 0.000000000000e+00 0.000000000000e+00 3.276700000000e+04
0.000000000000e+00 0.000000000000e+00 0.000000000000e+00 3.000000000000e+00
R01 2017 12 31 00 15 00 2.002064138651e-05 0.000000000000e+00 0.000000000000e+00
-6.600591796875e+03-7.817554473877e-02 1.862645149231e-09 0.000000000000e+00
-2.460916992188e+04 2.204618453979e-01 1.862645149231e-09 1.000000000000E+00
-1.388024902344e+03-3.550045967102e+00 0.000000000000e+00 0.000000000000e+00
E05 2017 12 31 17 00 00 3.507136716507e-04-7.418066161335e-12 0.000000000000e+00
1.020000000000e+02-3.050000000000e+01 4.012667143693e-09-4.573856882012e-01
-1.354143023491e-06 1.429287949577e-04 3.904104232788e-06 5.440600776672e+03
6.120000000000e+04 5.401670932770e-08 1.523815534839e+00 2.048909664154e-08
9.518374341811e-01 2.524687500000e+02 1.880333757323e+00-5.981677732316e-09
-2.325096849617e-10 5.160000000000e+02 1.982000000000e+03
3.120000000000e+00 0.000000000000e+00-2.095475792885e-09-2.328306436539e-09
6.195400000000e+04
R06 2017 12 31 13 45 00 1.242635771632e-04 0.000000000000e+00 4.860000000000e+04
-1.175926367188e+04 6.738233566284e-01 1.862645149231e-09 0.000000000000e+00
-2.086078564453e+04 1.010621070862e+00 1.862645149231e-09-4.000000000000D+00
8.810767089844e+03 3.282602310181e+00-9.313225746155e-10 0.000000000000e+00
'''
return StringIO(content)
@fixture
def nav_v3_freq_nums():
return {
1: {datetime.datetime(2017, 12, 31, 0, 15): 1},
6: {datetime.datetime(2017, 12, 31, 13, 45): -4},
}
@fixture(scope='function')
def nav_stream(request):
return request.getfixturevalue(request.param)
|
gnss-lab/gnss-tec
|
tests/conftest.py
|
Python
|
mit
| 21,163
| 0.002755
|
# Copyright (c) 2008 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{txweb2.http_headers}.
"""
from twisted.trial import unittest
import random
import time
from txweb2 import http_headers
from txweb2.http_headers import Cookie, HeaderHandler, quoteString, generateKeyValues
from twisted.python import util
class parsedvalue:
"""Marker class"""
def __init__(self, raw):
self.raw = raw
def __eq__(self, other):
return isinstance(other, parsedvalue) and other.raw == self.raw
class HeadersAPITest(unittest.TestCase):
"""Make sure the public API exists and works."""
def testRaw(self):
rawvalue = ("value1", "value2")
h = http_headers.Headers(handler=HeaderHandler(parsers={}, generators={}))
h.setRawHeaders("test", rawvalue)
self.assertEquals(h.hasHeader("test"), True)
self.assertEquals(h.getRawHeaders("test"), rawvalue)
self.assertEquals(list(h.getAllRawHeaders()), [('Test', rawvalue)])
self.assertEquals(h.getRawHeaders("foobar"), None)
h.removeHeader("test")
self.assertEquals(h.getRawHeaders("test"), None)
def testParsed(self):
parsed = parsedvalue(("value1", "value2"))
h = http_headers.Headers(handler=HeaderHandler(parsers={}, generators={}))
h.setHeader("test", parsed)
self.assertEquals(h.hasHeader("test"), True)
self.assertEquals(h.getHeader("test"), parsed)
self.assertEquals(h.getHeader("foobar"), None)
h.removeHeader("test")
self.assertEquals(h.getHeader("test"), None)
def testParsedAndRaw(self):
def parse(raw):
return parsedvalue(raw)
def generate(parsed):
return parsed.raw
rawvalue = ("value1", "value2")
rawvalue2 = ("value3", "value4")
handler = HeaderHandler(parsers={'test': (parse,)},
generators={'test': (generate,)})
h = http_headers.Headers(handler=handler)
h.setRawHeaders("test", rawvalue)
self.assertEquals(h.getHeader("test"), parsedvalue(rawvalue))
h.setHeader("test", parsedvalue(rawvalue2))
self.assertEquals(h.getRawHeaders("test"), rawvalue2)
# Check the initializers
h = http_headers.Headers(rawHeaders={"test": rawvalue},
handler=handler)
self.assertEquals(h.getHeader("test"), parsedvalue(rawvalue))
h = http_headers.Headers({"test": parsedvalue(rawvalue2)},
handler=handler)
self.assertEquals(h.getRawHeaders("test"), rawvalue2)
def testImmutable(self):
h = http_headers.Headers(handler=HeaderHandler(parsers={}, generators={}))
h.makeImmutable()
self.assertRaises(AttributeError, h.setRawHeaders, "test", [1])
self.assertRaises(AttributeError, h.setHeader, "test", 1)
self.assertRaises(AttributeError, h.removeHeader, "test")
class TokenizerTest(unittest.TestCase):
"""Test header list parsing functions."""
def testParse(self):
parser = lambda val: list(http_headers.tokenize([val, ]))
Token = http_headers.Token
tests = (('foo,bar', ['foo', Token(','), 'bar']),
('FOO,BAR', ['foo', Token(','), 'bar']),
(' \t foo \t bar \t , \t baz ', ['foo', Token(' '), 'bar', Token(','), 'baz']),
('()<>@,;:\\/[]?={}', [Token('('), Token(')'), Token('<'), Token('>'), Token('@'), Token(','), Token(';'), Token(':'), Token('\\'), Token('/'), Token('['), Token(']'), Token('?'), Token('='), Token('{'), Token('}')]),
(' "foo" ', ['foo']),
('"FOO(),\\"BAR,"', ['FOO(),"BAR,']))
raiseTests = ('"open quote', '"ending \\', "control character: \x127", "\x00", "\x1f")
for test, result in tests:
self.assertEquals(parser(test), result)
for test in raiseTests:
self.assertRaises(ValueError, parser, test)
def testGenerate(self):
pass
def testRoundtrip(self):
pass
def atSpecifiedTime(when, func):
def inner(*a, **kw):
orig = time.time
time.time = lambda: when
try:
return func(*a, **kw)
finally:
time.time = orig
return util.mergeFunctionMetadata(func, inner)
def parseHeader(name, val):
head = http_headers.Headers(handler=http_headers.DefaultHTTPHandler)
head.setRawHeaders(name, val)
return head.getHeader(name)
parseHeader = atSpecifiedTime(999999990, parseHeader) # Sun, 09 Sep 2001 01:46:30 GMT
def generateHeader(name, val):
head = http_headers.Headers(handler=http_headers.DefaultHTTPHandler)
head.setHeader(name, val)
return head.getRawHeaders(name)
generateHeader = atSpecifiedTime(999999990, generateHeader) # Sun, 09 Sep 2001 01:46:30 GMT
class HeaderParsingTestBase(unittest.TestCase):
def runRoundtripTest(self, headername, table):
"""
Perform some assertions about the behavior of parsing and
generating HTTP headers. Specifically: parse an HTTP header
value, assert that the parsed form contains all the available
information with the correct structure; generate the HTTP
header value from the parsed form, assert that it contains
certain literal strings; finally, re-parse the generated HTTP
header value and assert that the resulting structured data is
the same as the first-pass parsed form.
@type headername: C{str}
@param headername: The name of the HTTP header L{table} contains values for.
@type table: A sequence of tuples describing inputs to and
outputs from header parsing and generation. The tuples may be
either 2 or 3 elements long. In either case: the first
element is a string representing an HTTP-format header value;
the second element is a dictionary mapping names of parameters
to values of those parameters (the parsed form of the header).
If there is a third element, it is a list of strings which
must occur exactly in the HTTP header value
string which is re-generated from the parsed form.
"""
for row in table:
if len(row) == 2:
rawHeaderInput, parsedHeaderData = row
requiredGeneratedElements = []
elif len(row) == 3:
rawHeaderInput, parsedHeaderData, requiredGeneratedElements = row
assert isinstance(requiredGeneratedElements, list)
# parser
parsed = parseHeader(headername, [rawHeaderInput, ])
self.assertEquals(parsed, parsedHeaderData)
regeneratedHeaderValue = generateHeader(headername, parsed)
if requiredGeneratedElements:
# generator
for regeneratedElement in regeneratedHeaderValue:
reqEle = requiredGeneratedElements[regeneratedHeaderValue.index(regeneratedElement)]
elementIndex = regeneratedElement.find(reqEle)
self.assertNotEqual(
elementIndex, -1,
"%r did not appear in generated HTTP header %r: %r" % (reqEle,
headername,
regeneratedElement))
# parser/generator
reparsed = parseHeader(headername, regeneratedHeaderValue)
self.assertEquals(parsed, reparsed)
def invalidParseTest(self, headername, values):
for val in values:
parsed = parseHeader(headername, val)
self.assertEquals(parsed, None)
class GeneralHeaderParsingTests(HeaderParsingTestBase):
def testCacheControl(self):
table = (
("no-cache",
{'no-cache': None}),
("no-cache, no-store, max-age=5, max-stale=3, min-fresh=5, no-transform, only-if-cached, blahblah-extension-thingy",
{'no-cache': None,
'no-store': None,
'max-age': 5,
'max-stale': 3,
'min-fresh': 5,
'no-transform': None,
'only-if-cached': None,
'blahblah-extension-thingy': None}),
("max-stale",
{'max-stale': None}),
("public, private, no-cache, no-store, no-transform, must-revalidate, proxy-revalidate, max-age=5, s-maxage=10, blahblah-extension-thingy",
{'public': None,
'private': None,
'no-cache': None,
'no-store': None,
'no-transform': None,
'must-revalidate': None,
'proxy-revalidate': None,
'max-age': 5,
's-maxage': 10,
'blahblah-extension-thingy': None}),
('private="Set-Cookie, Set-Cookie2", no-cache="PROXY-AUTHENTICATE"',
{'private': ['set-cookie', 'set-cookie2'],
'no-cache': ['proxy-authenticate']},
['private="Set-Cookie, Set-Cookie2"', 'no-cache="Proxy-Authenticate"']),
)
self.runRoundtripTest("Cache-Control", table)
def testConnection(self):
table = (
("close", ['close', ]),
("close, foo-bar", ['close', 'foo-bar'])
)
self.runRoundtripTest("Connection", table)
def testDate(self):
# Don't need major tests since the datetime parser has its own tests
self.runRoundtripTest("Date", (("Sun, 09 Sep 2001 01:46:40 GMT", 1000000000),))
# def testPragma(self):
# fail
# def testTrailer(self):
# fail
def testTransferEncoding(self):
table = (
('chunked', ['chunked']),
('gzip, chunked', ['gzip', 'chunked'])
)
self.runRoundtripTest("Transfer-Encoding", table)
# def testUpgrade(self):
# fail
# def testVia(self):
# fail
# def testWarning(self):
# fail
class RequestHeaderParsingTests(HeaderParsingTestBase):
# FIXME test ordering too.
def testAccept(self):
table = (
("audio/*;q=0.2, audio/basic",
{http_headers.MimeType('audio', '*'): 0.2,
http_headers.MimeType('audio', 'basic'): 1.0}),
("text/plain;q=0.5, text/html, text/x-dvi;q=0.8, text/x-c",
{http_headers.MimeType('text', 'plain'): 0.5,
http_headers.MimeType('text', 'html'): 1.0,
http_headers.MimeType('text', 'x-dvi'): 0.8,
http_headers.MimeType('text', 'x-c'): 1.0}),
("text/*, text/html, text/html;level=1, */*",
{http_headers.MimeType('text', '*'): 1.0,
http_headers.MimeType('text', 'html'): 1.0,
http_headers.MimeType('text', 'html', (('level', '1'),)): 1.0,
http_headers.MimeType('*', '*'): 1.0}),
("text/*;q=0.3, text/html;q=0.7, text/html;level=1, text/html;level=2;q=0.4, */*;q=0.5",
{http_headers.MimeType('text', '*'): 0.3,
http_headers.MimeType('text', 'html'): 0.7,
http_headers.MimeType('text', 'html', (('level', '1'),)): 1.0,
http_headers.MimeType('text', 'html', (('level', '2'),)): 0.4,
http_headers.MimeType('*', '*'): 0.5}),
)
self.runRoundtripTest("Accept", table)
def testAcceptCharset(self):
table = (
("iso-8859-5, unicode-1-1;q=0.8",
{'iso-8859-5': 1.0, 'iso-8859-1': 1.0, 'unicode-1-1': 0.8},
["iso-8859-5", "unicode-1-1;q=0.8", "iso-8859-1"]),
("iso-8859-1;q=0.7",
{'iso-8859-1': 0.7}),
("*;q=.7",
{'*': 0.7},
["*;q=0.7"]),
("",
{'iso-8859-1': 1.0},
["iso-8859-1"]), # Yes this is an actual change -- we'll say that's okay. :)
)
self.runRoundtripTest("Accept-Charset", table)
def testAcceptEncoding(self):
table = (
("compress, gzip",
{'compress': 1.0, 'gzip': 1.0, 'identity': 0.0001}),
("",
{'identity': 0.0001}),
("*",
{'*': 1}),
("compress;q=0.5, gzip;q=1.0",
{'compress': 0.5, 'gzip': 1.0, 'identity': 0.0001},
["compress;q=0.5", "gzip"]),
("gzip;q=1.0, identity;q=0.5, *;q=0",
{'gzip': 1.0, 'identity': 0.5, '*': 0},
["gzip", "identity;q=0.5", "*;q=0"]),
)
self.runRoundtripTest("Accept-Encoding", table)
def testAcceptLanguage(self):
table = (
("da, en-gb;q=0.8, en;q=0.7",
{'da': 1.0, 'en-gb': 0.8, 'en': 0.7}),
("*",
{'*': 1}),
)
self.runRoundtripTest("Accept-Language", table)
def testAuthorization(self):
table = (
("Basic dXNlcm5hbWU6cGFzc3dvcmQ=",
("basic", "dXNlcm5hbWU6cGFzc3dvcmQ="),
["basic dXNlcm5hbWU6cGFzc3dvcmQ="]),
('Digest nonce="bar", realm="foo", username="baz", response="bax"',
('digest', 'nonce="bar", realm="foo", username="baz", response="bax"'),
['digest', 'nonce="bar"', 'realm="foo"', 'username="baz"', 'response="bax"'])
)
self.runRoundtripTest("Authorization", table)
def testCookie(self):
table = (
('name=value', [Cookie('name', 'value')]),
('"name"="value"', [Cookie('"name"', '"value"')]),
('name,"blah=value,"', [Cookie('name,"blah', 'value,"')]),
('name,"blah = value," ', [Cookie('name,"blah', 'value,"')], ['name,"blah=value,"']),
("`~!@#$%^&*()-_+[{]}\\|:'\",<.>/?=`~!@#$%^&*()-_+[{]}\\|:'\",<.>/?", [Cookie("`~!@#$%^&*()-_+[{]}\\|:'\",<.>/?", "`~!@#$%^&*()-_+[{]}\\|:'\",<.>/?")]),
(
'name,"blah = value," ; name2=val2',
[Cookie('name,"blah', 'value,"'), Cookie('name2', 'val2')],
['name,"blah=value,"', 'name2=val2']
),
)
self.runRoundtripTest("Cookie", table)
# newstyle RFC2965 Cookie
table2 = (
('$Version="1";'
'name="value";$Path="/foo";$Domain="www.local";$Port="80,8000";'
'name2="value"',
[Cookie('name', 'value', path='/foo', domain='www.local', ports=(80, 8000), version=1), Cookie('name2', 'value', version=1)]),
('$Version="1";'
'name="value";$Port',
[Cookie('name', 'value', ports=(), version=1)]),
('$Version = 1, NAME = "qq\\"qq",Frob=boo',
[Cookie('name', 'qq"qq', version=1), Cookie('frob', 'boo', version=1)],
['$Version="1";name="qq\\"qq";frob="boo"']),
)
self.runRoundtripTest("Cookie", table2)
# Generate only!
# make headers by combining oldstyle and newstyle cookies
table3 = (
([Cookie('name', 'value'), Cookie('name2', 'value2', version=1)],
'$Version="1";name=value;name2="value2"'),
([Cookie('name', 'value', path="/foo"), Cookie('name2', 'value2', domain="bar.baz", version=1)],
'$Version="1";name=value;$Path="/foo";name2="value2";$Domain="bar.baz"'),
([Cookie('invalid,"name', 'value'), Cookie('name2', 'value2', version=1)],
'$Version="1";name2="value2"'),
([Cookie('name', 'qq"qq'), Cookie('name2', 'value2', version=1)],
'$Version="1";name="qq\\"qq";name2="value2"'),
)
for row in table3:
self.assertEquals(generateHeader("Cookie", row[0]), [row[1], ])
def testSetCookie(self):
table = (
('name,"blah=value,; expires=Sun, 09 Sep 2001 01:46:40 GMT; path=/foo; domain=bar.baz; secure',
[Cookie('name,"blah', 'value,', expires=1000000000, path="/foo", domain="bar.baz", secure=True)]),
('name,"blah = value, ; expires="Sun, 09 Sep 2001 01:46:40 GMT"',
[Cookie('name,"blah', 'value,', expires=1000000000)],
['name,"blah=value,', 'expires=Sun, 09 Sep 2001 01:46:40 GMT']),
)
self.runRoundtripTest("Set-Cookie", table)
def testSetCookie2(self):
table = (
('name="value"; Comment="YadaYada"; CommentURL="http://frobnotz/"; Discard; Domain="blah.blah"; Max-Age=10; Path="/foo"; Port="80,8080"; Secure; Version="1"',
[Cookie("name", "value", comment="YadaYada", commenturl="http://frobnotz/", discard=True, domain="blah.blah", expires=1000000000, path="/foo", ports=(80, 8080), secure=True, version=1)]),
)
self.runRoundtripTest("Set-Cookie2", table)
def testExpect(self):
table = (
("100-continue",
{"100-continue": (None,)}),
('foobar=twiddle',
{'foobar': ('twiddle',)}),
("foo=bar;a=b;c",
{'foo': ('bar', ('a', 'b'), ('c', None))})
)
self.runRoundtripTest("Expect", table)
def testPrefer(self):
table = (
("wait",
[("wait", None, [])]),
("return = representation",
[("return", "representation", [])]),
("return =minimal;arg1;arg2=val2",
[("return", "minimal", [("arg1", None), ("arg2", "val2")])]),
)
self.runRoundtripTest("Prefer", table)
def testFrom(self):
self.runRoundtripTest("From", (("webmaster@w3.org", "webmaster@w3.org"),))
def testHost(self):
self.runRoundtripTest("Host", (("www.w3.org", "www.w3.org"),))
def testIfMatch(self):
table = (
('"xyzzy"', [http_headers.ETag('xyzzy')]),
('"xyzzy", "r2d2xxxx", "c3piozzzz"', [http_headers.ETag('xyzzy'),
http_headers.ETag('r2d2xxxx'),
http_headers.ETag('c3piozzzz')]),
('*', ['*']),
)
self.runRoundtripTest("If-Match", table)
def testIfModifiedSince(self):
# Don't need major tests since the datetime parser has its own test
# Just test stupid ; length= brokenness.
table = (
("Sun, 09 Sep 2001 01:46:40 GMT", 1000000000),
("Sun, 09 Sep 2001 01:46:40 GMT; length=500", 1000000000, ["Sun, 09 Sep 2001 01:46:40 GMT"]),
)
self.runRoundtripTest("If-Modified-Since", table)
def testIfNoneMatch(self):
table = (
('"xyzzy"', [http_headers.ETag('xyzzy')]),
('W/"xyzzy", "r2d2xxxx", "c3piozzzz"', [http_headers.ETag('xyzzy', weak=True),
http_headers.ETag('r2d2xxxx'),
http_headers.ETag('c3piozzzz')]),
('W/"xyzzy", W/"r2d2xxxx", W/"c3piozzzz"', [http_headers.ETag('xyzzy', weak=True),
http_headers.ETag('r2d2xxxx', weak=True),
http_headers.ETag('c3piozzzz', weak=True)]),
('*', ['*']),
)
self.runRoundtripTest("If-None-Match", table)
def testIfRange(self):
table = (
('"xyzzy"', http_headers.ETag('xyzzy')),
('W/"xyzzy"', http_headers.ETag('xyzzy', weak=True)),
('W/"xyzzy"', http_headers.ETag('xyzzy', weak=True)),
("Sun, 09 Sep 2001 01:46:40 GMT", 1000000000),
)
self.runRoundtripTest("If-Range", table)
def testIfUnmodifiedSince(self):
self.runRoundtripTest("If-Unmodified-Since", (("Sun, 09 Sep 2001 01:46:40 GMT", 1000000000),))
def testMaxForwards(self):
self.runRoundtripTest("Max-Forwards", (("15", 15),))
# def testProxyAuthorize(self):
# fail
def testRange(self):
table = (
("bytes=0-499", ('bytes', [(0, 499), ])),
("bytes=500-999", ('bytes', [(500, 999), ])),
("bytes=-500", ('bytes', [(None, 500), ])),
("bytes=9500-", ('bytes', [(9500, None), ])),
("bytes=0-0,-1", ('bytes', [(0, 0), (None, 1)])),
)
self.runRoundtripTest("Range", table)
def testReferer(self):
self.runRoundtripTest("Referer", (("http://www.w3.org/hypertext/DataSources/Overview.html",
"http://www.w3.org/hypertext/DataSources/Overview.html"),))
def testTE(self):
table = (
("deflate", {'deflate': 1}),
("", {}),
("trailers, deflate;q=0.5", {'trailers': 1, 'deflate': 0.5}),
)
self.runRoundtripTest("TE", table)
def testUserAgent(self):
self.runRoundtripTest("User-Agent", (("CERN-LineMode/2.15 libwww/2.17b3", "CERN-LineMode/2.15 libwww/2.17b3"),))
class ResponseHeaderParsingTests(HeaderParsingTestBase):
def testAcceptRanges(self):
self.runRoundtripTest("Accept-Ranges", (("bytes", ["bytes"]), ("none", ["none"])))
def testAge(self):
self.runRoundtripTest("Age", (("15", 15),))
def testETag(self):
table = (
('"xyzzy"', http_headers.ETag('xyzzy')),
('W/"xyzzy"', http_headers.ETag('xyzzy', weak=True)),
('""', http_headers.ETag('')),
)
self.runRoundtripTest("ETag", table)
def testLocation(self):
self.runRoundtripTest("Location", (("http://www.w3.org/pub/WWW/People.htm",
"http://www.w3.org/pub/WWW/People.htm"),))
# def testProxyAuthenticate(self):
# fail
def testRetryAfter(self):
# time() is always 999999990 when being tested.
table = (
("Sun, 09 Sep 2001 01:46:40 GMT", 1000000000, ["10"]),
("120", 999999990 + 120),
)
self.runRoundtripTest("Retry-After", table)
def testServer(self):
self.runRoundtripTest("Server", (("CERN/3.0 libwww/2.17", "CERN/3.0 libwww/2.17"),))
def testVary(self):
table = (
("*", ["*"]),
("Accept, Accept-Encoding", ["accept", "accept-encoding"], ["accept", "accept-encoding"])
)
self.runRoundtripTest("Vary", table)
def testWWWAuthenticate(self):
digest = ('Digest realm="digest realm", nonce="bAr", qop="auth"',
[('Digest', {'realm': 'digest realm', 'nonce': 'bAr',
'qop': 'auth'})],
['Digest', 'realm="digest realm"',
'nonce="bAr"', 'qop="auth"'])
basic = ('Basic realm="foo"',
[('Basic', {'realm': 'foo'})], ['Basic', 'realm="foo"'])
ntlm = ('NTLM',
[('NTLM', {})], ['NTLM', ''])
negotiate = ('Negotiate SomeGssAPIData',
[('Negotiate', 'SomeGssAPIData')],
['Negotiate', 'SomeGssAPIData'])
table = (digest,
basic,
(digest[0] + ', ' + basic[0],
digest[1] + basic[1],
[digest[2], basic[2]]),
ntlm,
negotiate,
(ntlm[0] + ', ' + basic[0],
ntlm[1] + basic[1],
[ntlm[2], basic[2]]),
(digest[0] + ', ' + negotiate[0],
digest[1] + negotiate[1],
[digest[2], negotiate[2]]),
(negotiate[0] + ', ' + negotiate[0],
negotiate[1] + negotiate[1],
[negotiate[2] + negotiate[2]]),
(ntlm[0] + ', ' + ntlm[0],
ntlm[1] + ntlm[1],
[ntlm[2], ntlm[2]]),
(basic[0] + ', ' + ntlm[0],
basic[1] + ntlm[1],
[basic[2], ntlm[2]]),
)
# runRoundtripTest doesn't work because we don't generate a single
# header
headername = 'WWW-Authenticate'
for row in table:
rawHeaderInput, parsedHeaderData, requiredGeneratedElements = row
parsed = parseHeader(headername, [rawHeaderInput, ])
self.assertEquals(parsed, parsedHeaderData)
regeneratedHeaderValue = generateHeader(headername, parsed)
for regeneratedElement in regeneratedHeaderValue:
requiredElements = requiredGeneratedElements[
regeneratedHeaderValue.index(
regeneratedElement)]
for reqEle in requiredElements:
elementIndex = regeneratedElement.find(reqEle)
self.assertNotEqual(
elementIndex, -1,
"%r did not appear in generated HTTP header %r: %r" % (reqEle,
headername,
regeneratedElement))
# parser/generator
reparsed = parseHeader(headername, regeneratedHeaderValue)
self.assertEquals(parsed, reparsed)
class EntityHeaderParsingTests(HeaderParsingTestBase):
def testAllow(self):
# Allow is a silly case-sensitive header unlike all the rest
table = (
("GET", ['GET', ]),
("GET, HEAD, PUT", ['GET', 'HEAD', 'PUT']),
)
self.runRoundtripTest("Allow", table)
def testContentEncoding(self):
table = (
("gzip", ['gzip', ]),
)
self.runRoundtripTest("Content-Encoding", table)
def testContentLanguage(self):
table = (
("da", ['da', ]),
("mi, en", ['mi', 'en']),
)
self.runRoundtripTest("Content-Language", table)
def testContentLength(self):
self.runRoundtripTest("Content-Length", (("15", 15),))
self.invalidParseTest("Content-Length", ("asdf",))
def testContentLocation(self):
self.runRoundtripTest("Content-Location",
(("http://www.w3.org/pub/WWW/People.htm",
"http://www.w3.org/pub/WWW/People.htm"),))
def testContentMD5(self):
self.runRoundtripTest("Content-MD5", (("Q2hlY2sgSW50ZWdyaXR5IQ==", "Check Integrity!"),))
self.invalidParseTest("Content-MD5", ("sdlaksjdfhlkaj",))
def testContentRange(self):
table = (
("bytes 0-499/1234", ("bytes", 0, 499, 1234)),
("bytes 500-999/1234", ("bytes", 500, 999, 1234)),
("bytes 500-1233/1234", ("bytes", 500, 1233, 1234)),
("bytes 734-1233/1234", ("bytes", 734, 1233, 1234)),
("bytes 734-1233/*", ("bytes", 734, 1233, None)),
("bytes */1234", ("bytes", None, None, 1234)),
("bytes */*", ("bytes", None, None, None))
)
self.runRoundtripTest("Content-Range", table)
def testContentType(self):
table = (
("text/html;charset=iso-8859-4", http_headers.MimeType('text', 'html', (('charset', 'iso-8859-4'),))),
("text/html", http_headers.MimeType('text', 'html')),
)
self.runRoundtripTest("Content-Type", table)
def testContentDisposition(self):
table = (
("attachment;filename=foo.txt", http_headers.MimeDisposition('attachment', (('filename', 'foo.txt'),))),
("inline", http_headers.MimeDisposition('inline')),
)
self.runRoundtripTest("Content-Disposition", table)
def testExpires(self):
self.runRoundtripTest("Expires", (("Sun, 09 Sep 2001 01:46:40 GMT", 1000000000),))
# Invalid expires MUST return date in the past.
self.assertEquals(parseHeader("Expires", ["0"]), 0)
self.assertEquals(parseHeader("Expires", ["wejthnaljn"]), 0)
def testLastModified(self):
# Don't need major tests since the datetime parser has its own test
self.runRoundtripTest("Last-Modified", (("Sun, 09 Sep 2001 01:46:40 GMT", 1000000000),))
class DateTimeTest(unittest.TestCase):
"""Test date parsing functions."""
def testParse(self):
timeNum = 784111777
timeStrs = ('Sun, 06 Nov 1994 08:49:37 GMT',
'Sunday, 06-Nov-94 08:49:37 GMT',
'Sun Nov 6 08:49:37 1994',
# Also some non-RFC formats, for good measure.
'Somefakeday 6 Nov 1994 8:49:37',
'6 Nov 1994 8:49:37',
'Sun, 6 Nov 1994 8:49:37',
'6 Nov 1994 8:49:37 GMT',
'06-Nov-94 08:49:37',
'Sunday, 06-Nov-94 08:49:37',
'06-Nov-94 08:49:37 GMT',
'Nov 6 08:49:37 1994',
)
for timeStr in timeStrs:
self.assertEquals(http_headers.parseDateTime(timeStr), timeNum)
# Test 2 Digit date wraparound yuckiness.
self.assertEquals(http_headers.parseDateTime(
'Monday, 11-Oct-04 14:56:50 GMT'), 1097506610)
self.assertEquals(http_headers.parseDateTime(
'Monday, 11-Oct-2004 14:56:50 GMT'), 1097506610)
def testGenerate(self):
self.assertEquals(http_headers.generateDateTime(784111777), 'Sun, 06 Nov 1994 08:49:37 GMT')
def testRoundtrip(self):
for _ignore in range(2000):
randomTime = random.randint(0, 2000000000)
timestr = http_headers.generateDateTime(randomTime)
time2 = http_headers.parseDateTime(timestr)
self.assertEquals(randomTime, time2)
class TestMimeType(unittest.TestCase):
def testEquality(self):
"""Test that various uses of the constructer are equal
"""
kwargMime = http_headers.MimeType('text', 'plain',
key='value',
param=None)
dictMime = http_headers.MimeType('text', 'plain',
{'param': None,
'key': 'value'})
tupleMime = http_headers.MimeType('text', 'plain',
(('param', None),
('key', 'value')))
stringMime = http_headers.MimeType.fromString('text/plain;key=value;param')
self.assertEquals(kwargMime, dictMime)
self.assertEquals(dictMime, tupleMime)
self.assertEquals(kwargMime, tupleMime)
self.assertEquals(kwargMime, stringMime)
class TestMimeDisposition(unittest.TestCase):
def testEquality(self):
"""Test that various uses of the constructer are equal
"""
kwargMime = http_headers.MimeDisposition('attachment', key='value')
dictMime = http_headers.MimeDisposition('attachment', {'key': 'value'})
tupleMime = http_headers.MimeDisposition('attachment', (('key', 'value'),))
stringMime = http_headers.MimeDisposition.fromString('attachment;key=value')
self.assertEquals(kwargMime, dictMime)
self.assertEquals(dictMime, tupleMime)
self.assertEquals(kwargMime, tupleMime)
self.assertEquals(kwargMime, stringMime)
class FormattingUtilityTests(unittest.TestCase):
"""
Tests for various string formatting functionality required to generate
headers.
"""
def test_quoteString(self):
"""
L{quoteString} returns a string which when interpreted according to the
rules for I{quoted-string} (RFC 2616 section 2.2) matches the input
string.
"""
self.assertEqual(
quoteString('a\\b"c'),
'"a\\\\b\\"c"')
def test_generateKeyValues(self):
"""
L{generateKeyValues} accepts an iterable of parameters and returns a
string formatted according to RFC 2045 section 5.1.
"""
self.assertEqual(
generateKeyValues(iter([("foo", "bar"), ("baz", "quux")])),
"foo=bar;baz=quux")
def test_generateKeyValuesNone(self):
"""
L{generateKeyValues} accepts C{None} as the 2nd element of a tuple and
includes just the 1st element in the output without an C{"="}.
"""
self.assertEqual(
generateKeyValues([("foo", None), ("bar", "baz")]),
"foo;bar=baz")
def test_generateKeyValuesQuoting(self):
"""
L{generateKeyValues} quotes the value of the 2nd element of a tuple if
it includes a character which cannot be in an HTTP token as defined in
RFC 2616 section 2.2.
"""
for needsQuote in [' ', '\t', '(', ')', '<', '>', '@', ',', ';', ':',
'\\', '"', '/', '[', ']', '?', '=', '{', '}']:
self.assertEqual(
generateKeyValues([("foo", needsQuote)]),
'foo=%s' % (quoteString(needsQuote),))
|
red-hood/calendarserver
|
txweb2/test/test_http_headers.py
|
Python
|
apache-2.0
| 32,998
| 0.004273
|
import os
from jflow.utils.importlib import import_module
from jflow.conf import global_settings
#If django is installed used the django setting object
try:
from django.conf import settings as django_settings
except:
django_settings = None
ENVIRONMENT_VARIABLE = "JFLOW_SETTINGS_MODULE"
class Settings(object):
pass
def fill(self, settings_module):
# update this dict from global settings (but only for ALL_CAPS settings)
for setting in dir(global_settings):
if setting == setting.upper():
if not hasattr(self,setting):
setattr(self, setting, getattr(global_settings, setting))
return self
def get_settings():
settings_module = os.environ.get(ENVIRONMENT_VARIABLE,None)
if settings_module:
try:
mod = import_module(settings_module)
except ImportError, e:
raise ImportError("Could not import settings '%s': %s" % (settings_module, e))
else:
mod = None
sett = django_settings
if not sett:
sett = Settings()
return fill(sett,mod)
settings = get_settings()
|
lsbardel/flow
|
flow/conf/__init__.py
|
Python
|
bsd-3-clause
| 1,191
| 0.012594
|
# Written by Greg Ver Steeg (http://www.isi.edu/~gregv/npeet.html)
import scipy.spatial as ss
from scipy.special import digamma
from math import log
import numpy.random as nr
import numpy as np
import random
# continuous estimators
def entropy(x, k=3, base=2):
"""
The classic K-L k-nearest neighbor continuous entropy estimator x should be a list of vectors,
e.g. x = [[1.3],[3.7],[5.1],[2.4]] if x is a one-dimensional scalar and we have four samples
"""
assert k <= len(x)-1, "Set k smaller than num. samples - 1"
d = len(x[0])
N = len(x)
intens = 1e-10 # small noise to break degeneracy, see doc.
x = [list(p + intens * nr.rand(len(x[0]))) for p in x]
tree = ss.cKDTree(x)
nn = [tree.query(point, k+1, p=float('inf'))[0][k] for point in x]
const = digamma(N)-digamma(k) + d*log(2)
return (const + d*np.mean(map(log, nn)))/log(base)
def mi(x, y, k=3, base=2):
"""
Mutual information of x and y; x, y should be a list of vectors, e.g. x = [[1.3],[3.7],[5.1],[2.4]]
if x is a one-dimensional scalar and we have four samples
"""
assert len(x) == len(y), "Lists should have same length"
assert k <= len(x) - 1, "Set k smaller than num. samples - 1"
intens = 1e-10 # small noise to break degeneracy, see doc.
x = [list(p + intens * nr.rand(len(x[0]))) for p in x]
y = [list(p + intens * nr.rand(len(y[0]))) for p in y]
points = zip2(x, y)
# Find nearest neighbors in joint space, p=inf means max-norm
tree = ss.cKDTree(points)
dvec = [tree.query(point, k+1, p=float('inf'))[0][k] for point in points]
a, b, c, d = avgdigamma(x, dvec), avgdigamma(y, dvec), digamma(k), digamma(len(x))
return (-a-b+c+d)/log(base)
def cmi(x, y, z, k=3, base=2):
"""
Mutual information of x and y, conditioned on z; x, y, z should be a list of vectors, e.g. x = [[1.3],[3.7],[5.1],[2.4]]
if x is a one-dimensional scalar and we have four samples
"""
assert len(x) == len(y), "Lists should have same length"
assert k <= len(x) - 1, "Set k smaller than num. samples - 1"
intens = 1e-10 # small noise to break degeneracy, see doc.
x = [list(p + intens * nr.rand(len(x[0]))) for p in x]
y = [list(p + intens * nr.rand(len(y[0]))) for p in y]
z = [list(p + intens * nr.rand(len(z[0]))) for p in z]
points = zip2(x, y, z)
# Find nearest neighbors in joint space, p=inf means max-norm
tree = ss.cKDTree(points)
dvec = [tree.query(point, k+1, p=float('inf'))[0][k] for point in points]
a, b, c, d = avgdigamma(zip2(x, z), dvec), avgdigamma(zip2(y, z), dvec), avgdigamma(z, dvec), digamma(k)
return (-a-b+c+d)/log(base)
def kldiv(x, xp, k=3, base=2):
"""
KL Divergence between p and q for x~p(x), xp~q(x); x, xp should be a list of vectors, e.g. x = [[1.3],[3.7],[5.1],[2.4]]
if x is a one-dimensional scalar and we have four samples
"""
assert k <= len(x) - 1, "Set k smaller than num. samples - 1"
assert k <= len(xp) - 1, "Set k smaller than num. samples - 1"
assert len(x[0]) == len(xp[0]), "Two distributions must have same dim."
d = len(x[0])
n = len(x)
m = len(xp)
const = log(m) - log(n-1)
tree = ss.cKDTree(x)
treep = ss.cKDTree(xp)
nn = [tree.query(point, k+1, p=float('inf'))[0][k] for point in x]
nnp = [treep.query(point, k, p=float('inf'))[0][k-1] for point in x]
return (const + d*np.mean(map(log, nnp))-d*np.mean(map(log, nn)))/log(base)
# Discrete estimators
def entropyd(sx, base=2):
"""
Discrete entropy estimator given a list of samples which can be any hashable object
"""
return entropyfromprobs(hist(sx), base=base)
def midd(x, y):
"""
Discrete mutual information estimator given a list of samples which can be any hashable object
"""
return -entropyd(list(zip(x, y)))+entropyd(x)+entropyd(y)
def cmidd(x, y, z):
"""
Discrete mutual information estimator given a list of samples which can be any hashable object
"""
return entropyd(list(zip(y, z)))+entropyd(list(zip(x, z)))-entropyd(list(zip(x, y, z)))-entropyd(z)
def hist(sx):
# Histogram from list of samples
d = dict()
for s in sx:
d[s] = d.get(s, 0) + 1
return map(lambda z: float(z)/len(sx), d.values())
def entropyfromprobs(probs, base=2):
# Turn a normalized list of probabilities of discrete outcomes into entropy (base 2)
return -sum(map(elog, probs))/log(base)
def elog(x):
# for entropy, 0 log 0 = 0. but we get an error for putting log 0
if x <= 0. or x >= 1.:
return 0
else:
return x*log(x)
# Mixed estimators
def micd(x, y, k=3, base=2, warning=True):
""" If x is continuous and y is discrete, compute mutual information
"""
overallentropy = entropy(x, k, base)
n = len(y)
word_dict = dict()
for sample in y:
word_dict[sample] = word_dict.get(sample, 0) + 1./n
yvals = list(set(word_dict.keys()))
mi = overallentropy
for yval in yvals:
xgiveny = [x[i] for i in range(n) if y[i] == yval]
if k <= len(xgiveny) - 1:
mi -= word_dict[yval]*entropy(xgiveny, k, base)
else:
if warning:
print("Warning, after conditioning, on y={0} insufficient data. Assuming maximal entropy in this case.".format(yval))
mi -= word_dict[yval]*overallentropy
return mi # units already applied
# Utility functions
def vectorize(scalarlist):
"""
Turn a list of scalars into a list of one-d vectors
"""
return [(x,) for x in scalarlist]
def shuffle_test(measure, x, y, z=False, ns=200, ci=0.95, **kwargs):
"""
Shuffle test
Repeatedly shuffle the x-values and then estimate measure(x,y,[z]).
Returns the mean and conf. interval ('ci=0.95' default) over 'ns' runs, 'measure' could me mi,cmi,
e.g. Keyword arguments can be passed. Mutual information and CMI should have a mean near zero.
"""
xp = x[:] # A copy that we can shuffle
outputs = []
for i in range(ns):
random.shuffle(xp)
if z:
outputs.append(measure(xp, y, z, **kwargs))
else:
outputs.append(measure(xp, y, **kwargs))
outputs.sort()
return np.mean(outputs), (outputs[int((1.-ci)/2*ns)], outputs[int((1.+ci)/2*ns)])
# Internal functions
def avgdigamma(points, dvec):
# This part finds number of neighbors in some radius in the marginal space
# returns expectation value of <psi(nx)>
N = len(points)
tree = ss.cKDTree(points)
avg = 0.
for i in range(N):
dist = dvec[i]
# subtlety, we don't include the boundary point,
# but we are implicitly adding 1 to kraskov def bc center point is included
num_points = len(tree.query_ball_point(points[i], dist-1e-15, p=float('inf')))
avg += digamma(num_points)/N
return avg
def zip2(*args):
# zip2(x,y) takes the lists of vectors and makes it a list of vectors in a joint space
# E.g. zip2([[1],[2],[3]],[[4],[5],[6]]) = [[1,4],[2,5],[3,6]]
return [sum(sublist, []) for sublist in zip(*args)]
|
UltronAI/Deep-Learning
|
Pattern-Recognition/hw2-Feature-Selection/skfeature/utility/entropy_estimators.py
|
Python
|
mit
| 7,335
| 0.00259
|
# Generated by Django 3.1.7 on 2021-09-27 16:10
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('product', '0016_product_company'),
]
operations = [
migrations.RemoveField(
model_name='product',
name='companies',
),
]
|
KlubJagiellonski/pola-backend
|
pola/product/migrations/0017_remove_product_companies.py
|
Python
|
bsd-3-clause
| 329
| 0
|
#!/usr/bin/env python
# coding=utf-8
"""
Distinct powers
Problem 29
Consider all integer combinations of ab for 2 ≤ a ≤ 5 and 2 ≤ b ≤ 5:
2^2=4, 2^3=8, 2^4=16, 2^5=32
3^2=9, 3^3=27, 3^4=81, 3^5=243
4^2=16, 4^3=64, 4^4=256, 4^5=1024
5^2=25, 5^3=125, 5^4=625, 5^5=3125
If they are then placed in numerical order, with any repeats removed, we get
the following sequence of 15 distinct terms:
4, 8, 9, 16, 25, 27, 32, 64, 81, 125, 243, 256, 625, 1024, 3125
How many distinct terms are in the sequence generated by ab for 2 ≤ a ≤ 100 and
2 ≤ b ≤ 100?
"""
from __future__ import print_function
def power_combinations(a, b):
for i in range(2, a):
for j in range(2, b):
yield i ** j
if __name__ == '__main__':
print(len(set(power_combinations(101, 101)))) # 9183
|
openqt/algorithms
|
projecteuler/ac/old/pe029_distinct_powers.py
|
Python
|
gpl-3.0
| 823
| 0
|
#!/usr/bin/python
#
# PubNub Real-time Cloud-Hosted Push API and Push Notification Client
# Frameworks
# Copyright (C) 2016 PubNub Inc.
# http://www.pubnub.com/
# http://www.pubnub.com/terms
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: pubnub_blocks
version_added: '2.2'
short_description: PubNub blocks management module.
description:
- "This module allows Ansible to interface with the PubNub BLOCKS
infrastructure by providing the following operations: create / remove,
start / stop and rename for blocks and create / modify / remove for event
handlers"
author:
- PubNub <support@pubnub.com> (@pubnub)
- Sergey Mamontov <sergey@pubnub.com> (@parfeon)
requirements:
- "python >= 2.7"
- "pubnub_blocks_client >= 1.0"
options:
email:
description:
- Email from account for which new session should be started.
- "Not required if C(cache) contains result of previous module call (in
same play)."
required: false
password:
description:
- Password which match to account to which specified C(email) belong.
- "Not required if C(cache) contains result of previous module call (in
same play)."
required: false
cache:
description: >
In case if single play use blocks management module few times it is
preferred to enabled 'caching' by making previous module to share
gathered artifacts and pass them to this parameter.
required: false
default: {}
account:
description:
- "Name of PubNub account for from which C(application) will be used to
manage blocks."
- "User\'s account will be used if value not set or empty."
required: false
version_added: '2.4'
application:
description:
- "Name of target PubNub application for which blocks configuration on
specific C(keyset) will be done."
required: true
keyset:
description:
- Name of application's keys set which is bound to managed blocks.
required: true
state:
description:
- "Intended block state after event handlers creation / update process
will be completed."
required: false
default: 'started'
choices: ['started', 'stopped', 'present', 'absent']
name:
description:
- Name of managed block which will be later visible on admin.pubnub.com.
required: true
description:
description:
- "Short block description which will be later visible on
admin.pubnub.com. Used only if block doesn\'t exists and won\'t change
description for existing block."
required: false
default: 'New block'
event_handlers:
description:
- "List of event handlers which should be updated for specified block
C(name)."
- "Each entry for new event handler should contain: C(name), C(src),
C(channels), C(event). C(name) used as event handler name which can be
used later to make changes to it."
- C(src) is full path to file with event handler code.
- "C(channels) is name of channel from which event handler is waiting
for events."
- "C(event) is type of event which is able to trigger event handler:
I(js-before-publish), I(js-after-publish), I(js-after-presence)."
- "Each entry for existing handlers should contain C(name) (so target
handler can be identified). Rest parameters (C(src), C(channels) and
C(event)) can be added if changes required for them."
- "It is possible to rename event handler by adding C(changes) key to
event handler payload and pass dictionary, which will contain single key
C(name), where new name should be passed."
- "To remove particular event handler it is possible to set C(state) for
it to C(absent) and it will be removed."
required: false
default: []
changes:
description:
- "List of fields which should be changed by block itself (doesn't
affect any event handlers)."
- "Possible options for change is: C(name)."
required: false
default: {}
validate_certs:
description:
- "This key allow to try skip certificates check when performing REST API
calls. Sometimes host may have issues with certificates on it and this
will cause problems to call PubNub REST API."
- If check should be ignored C(False) should be passed to this parameter.
required: false
default: true
'''
EXAMPLES = '''
# Event handler create example.
- name: Create single event handler
pubnub_blocks:
email: '{{ email }}'
password: '{{ password }}'
application: '{{ app_name }}'
keyset: '{{ keyset_name }}'
name: '{{ block_name }}'
event_handlers:
-
src: '{{ path_to_handler_source }}'
name: '{{ handler_name }}'
event: 'js-before-publish'
channels: '{{ handler_channel }}'
# Change event handler trigger event type.
- name: Change event handler 'event'
pubnub_blocks:
email: '{{ email }}'
password: '{{ password }}'
application: '{{ app_name }}'
keyset: '{{ keyset_name }}'
name: '{{ block_name }}'
event_handlers:
-
name: '{{ handler_name }}'
event: 'js-after-publish'
# Stop block and event handlers.
- name: Stopping block
pubnub_blocks:
email: '{{ email }}'
password: '{{ password }}'
application: '{{ app_name }}'
keyset: '{{ keyset_name }}'
name: '{{ block_name }}'
state: stop
# Multiple module calls with cached result passing
- name: Create '{{ block_name }}' block
register: module_cache
pubnub_blocks:
email: '{{ email }}'
password: '{{ password }}'
application: '{{ app_name }}'
keyset: '{{ keyset_name }}'
name: '{{ block_name }}'
state: present
- name: Add '{{ event_handler_1_name }}' handler to '{{ block_name }}'
register: module_cache
pubnub_blocks:
cache: '{{ module_cache }}'
application: '{{ app_name }}'
keyset: '{{ keyset_name }}'
name: '{{ block_name }}'
state: present
event_handlers:
-
src: '{{ path_to_handler_1_source }}'
name: '{{ event_handler_1_name }}'
channels: '{{ event_handler_1_channel }}'
event: 'js-before-publish'
- name: Add '{{ event_handler_2_name }}' handler to '{{ block_name }}'
register: module_cache
pubnub_blocks:
cache: '{{ module_cache }}'
application: '{{ app_name }}'
keyset: '{{ keyset_name }}'
name: '{{ block_name }}'
state: present
event_handlers:
-
src: '{{ path_to_handler_2_source }}'
name: '{{ event_handler_2_name }}'
channels: '{{ event_handler_2_channel }}'
event: 'js-before-publish'
- name: Start '{{ block_name }}' block
register: module_cache
pubnub_blocks:
cache: '{{ module_cache }}'
application: '{{ app_name }}'
keyset: '{{ keyset_name }}'
name: '{{ block_name }}'
state: started
'''
RETURN = '''
module_cache:
description: "Cached account information. In case if with single play module
used few times it is better to pass cached data to next module calls to speed
up process."
type: dict
returned: always
'''
import copy
import os
try:
# Import PubNub BLOCKS client.
from pubnub_blocks_client import User, Account, Owner, Application, Keyset
from pubnub_blocks_client import Block, EventHandler
from pubnub_blocks_client import exceptions
HAS_PUBNUB_BLOCKS_CLIENT = True
except ImportError:
HAS_PUBNUB_BLOCKS_CLIENT = False
User = None
Account = None
Owner = None
Application = None
Keyset = None
Block = None
EventHandler = None
exceptions = None
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_text
def pubnub_user(module):
"""Create and configure user model if it possible.
:type module: AnsibleModule
:param module: Reference on module which contain module launch
information and status report methods.
:rtype: User
:return: Reference on initialized and ready to use user or 'None' in
case if not all required information has been passed to block.
"""
user = None
params = module.params
if params.get('cache') and params['cache'].get('module_cache'):
cache = params['cache']['module_cache']
user = User()
user.restore(cache=copy.deepcopy(cache['pnm_user']))
elif params.get('email') and params.get('password'):
user = User(email=params.get('email'), password=params.get('password'))
else:
err_msg = 'It looks like not account credentials has been passed or ' \
'\'cache\' field doesn\'t have result of previous module ' \
'call.'
module.fail_json(msg='Missing account credentials.',
description=err_msg, changed=False)
return user
def pubnub_account(module, user):
"""Create and configure account if it is possible.
:type module: AnsibleModule
:param module: Reference on module which contain module launch
information and status report methods.
:type user: User
:param user: Reference on authorized user for which one of accounts
should be used during manipulations with block.
:rtype: Account
:return: Reference on initialized and ready to use account or 'None' in
case if not all required information has been passed to block.
"""
params = module.params
if params.get('account'):
account_name = params.get('account')
account = user.account(name=params.get('account'))
if account is None:
err_frmt = 'It looks like there is no \'{0}\' account for ' \
'authorized user. Please make sure what correct ' \
'name has been passed during module configuration.'
module.fail_json(msg='Missing account.',
description=err_frmt.format(account_name),
changed=False)
else:
account = user.accounts()[0]
return account
def pubnub_application(module, account):
"""Retrieve reference on target application from account model.
NOTE: In case if account authorization will fail or there is no
application with specified name, module will exit with error.
:type module: AnsibleModule
:param module: Reference on module which contain module launch
information and status report methods.
:type account: Account
:param account: Reference on PubNub account model from which reference
on application should be fetched.
:rtype: Application
:return: Reference on initialized and ready to use application model.
"""
application = None
params = module.params
try:
application = account.application(params['application'])
except (exceptions.AccountError, exceptions.GeneralPubNubError) as exc:
exc_msg = _failure_title_from_exception(exc)
exc_descr = exc.message if hasattr(exc, 'message') else exc.args[0]
module.fail_json(msg=exc_msg, description=exc_descr,
changed=account.changed,
module_cache=dict(account))
if application is None:
err_fmt = 'There is no \'{0}\' application for {1}. Make sure what ' \
'correct application name has been passed. If application ' \
'doesn\'t exist you can create it on admin.pubnub.com.'
email = account.owner.email
module.fail_json(msg=err_fmt.format(params['application'], email),
changed=account.changed, module_cache=dict(account))
return application
def pubnub_keyset(module, account, application):
"""Retrieve reference on target keyset from application model.
NOTE: In case if there is no keyset with specified name, module will
exit with error.
:type module: AnsibleModule
:param module: Reference on module which contain module launch
information and status report methods.
:type account: Account
:param account: Reference on PubNub account model which will be
used in case of error to export cached data.
:type application: Application
:param application: Reference on PubNub application model from which
reference on keyset should be fetched.
:rtype: Keyset
:return: Reference on initialized and ready to use keyset model.
"""
params = module.params
keyset = application.keyset(params['keyset'])
if keyset is None:
err_fmt = 'There is no \'{0}\' keyset for \'{1}\' application. Make ' \
'sure what correct keyset name has been passed. If keyset ' \
'doesn\'t exist you can create it on admin.pubnub.com.'
module.fail_json(msg=err_fmt.format(params['keyset'],
application.name),
changed=account.changed, module_cache=dict(account))
return keyset
def pubnub_block(module, account, keyset):
"""Retrieve reference on target keyset from application model.
NOTE: In case if there is no block with specified name and module
configured to start/stop it, module will exit with error.
:type module: AnsibleModule
:param module: Reference on module which contain module launch
information and status report methods.
:type account: Account
:param account: Reference on PubNub account model which will be used in
case of error to export cached data.
:type keyset: Keyset
:param keyset: Reference on keyset model from which reference on block
should be fetched.
:rtype: Block
:return: Reference on initialized and ready to use keyset model.
"""
block = None
params = module.params
try:
block = keyset.block(params['name'])
except (exceptions.KeysetError, exceptions.GeneralPubNubError) as exc:
exc_msg = _failure_title_from_exception(exc)
exc_descr = exc.message if hasattr(exc, 'message') else exc.args[0]
module.fail_json(msg=exc_msg, description=exc_descr,
changed=account.changed, module_cache=dict(account))
# Report error because block doesn't exists and at the same time
# requested to start/stop.
if block is None and params['state'] in ['started', 'stopped']:
block_name = params.get('name')
module.fail_json(msg="'{0}' block doesn't exists.".format(block_name),
changed=account.changed, module_cache=dict(account))
if block is None and params['state'] == 'present':
block = Block(name=params.get('name'),
description=params.get('description'))
keyset.add_block(block)
if block:
# Update block information if required.
if params.get('changes') and params['changes'].get('name'):
block.name = params['changes']['name']
if params.get('description'):
block.description = params.get('description')
return block
def pubnub_event_handler(block, data):
"""Retrieve reference on target event handler from application model.
:type block: Block
:param block: Reference on block model from which reference on event
handlers should be fetched.
:type data: dict
:param data: Reference on dictionary which contain information about
event handler and whether it should be created or not.
:rtype: EventHandler
:return: Reference on initialized and ready to use event handler model.
'None' will be returned in case if there is no handler with
specified name and no request to create it.
"""
event_handler = block.event_handler(data['name'])
# Prepare payload for event handler update.
changed_name = (data.pop('changes').get('name')
if 'changes' in data else None)
name = data.get('name') or changed_name
channels = data.get('channels')
event = data.get('event')
code = _content_of_file_at_path(data.get('src'))
state = data.get('state') or 'present'
# Create event handler if required.
if event_handler is None and state == 'present':
event_handler = EventHandler(name=name, channels=channels, event=event,
code=code)
block.add_event_handler(event_handler)
# Update event handler if required.
if event_handler is not None and state == 'present':
if name is not None:
event_handler.name = name
if channels is not None:
event_handler.channels = channels
if event is not None:
event_handler.event = event
if code is not None:
event_handler.code = code
return event_handler
def _failure_title_from_exception(exception):
"""Compose human-readable title for module error title.
Title will be based on status codes if they has been provided.
:type exception: exceptions.GeneralPubNubError
:param exception: Reference on exception for which title should be
composed.
:rtype: str
:return: Reference on error tile which should be shown on module
failure.
"""
title = 'General REST API access error.'
if exception.code == exceptions.PN_AUTHORIZATION_MISSING_CREDENTIALS:
title = 'Authorization error: missing credentials.'
elif exception.code == exceptions.PN_AUTHORIZATION_WRONG_CREDENTIALS:
title = 'Authorization error: wrong credentials.'
elif exception.code == exceptions.PN_USER_INSUFFICIENT_RIGHTS:
title = 'API access error: insufficient access rights.'
elif exception.code == exceptions.PN_API_ACCESS_TOKEN_EXPIRED:
title = 'API access error: time token expired.'
elif exception.code == exceptions.PN_KEYSET_BLOCK_EXISTS:
title = 'Block create did fail: block with same name already exists).'
elif exception.code == exceptions.PN_KEYSET_BLOCKS_FETCH_DID_FAIL:
title = 'Unable fetch list of blocks for keyset.'
elif exception.code == exceptions.PN_BLOCK_CREATE_DID_FAIL:
title = 'Block creation did fail.'
elif exception.code == exceptions.PN_BLOCK_UPDATE_DID_FAIL:
title = 'Block update did fail.'
elif exception.code == exceptions.PN_BLOCK_REMOVE_DID_FAIL:
title = 'Block removal did fail.'
elif exception.code == exceptions.PN_BLOCK_START_STOP_DID_FAIL:
title = 'Block start/stop did fail.'
elif exception.code == exceptions.PN_EVENT_HANDLER_MISSING_FIELDS:
title = 'Event handler creation did fail: missing fields.'
elif exception.code == exceptions.PN_BLOCK_EVENT_HANDLER_EXISTS:
title = 'Event handler creation did fail: missing fields.'
elif exception.code == exceptions.PN_EVENT_HANDLER_CREATE_DID_FAIL:
title = 'Event handler creation did fail.'
elif exception.code == exceptions.PN_EVENT_HANDLER_UPDATE_DID_FAIL:
title = 'Event handler update did fail.'
elif exception.code == exceptions.PN_EVENT_HANDLER_REMOVE_DID_FAIL:
title = 'Event handler removal did fail.'
return title
def _content_of_file_at_path(path):
"""Read file content.
Try read content of file at specified path.
:type path: str
:param path: Full path to location of file which should be read'ed.
:rtype: content
:return: File content or 'None'
"""
content = None
if path and os.path.exists(path):
with open(path, mode="rt") as opened_file:
b_content = opened_file.read()
try:
content = to_text(b_content, errors='surrogate_or_strict')
except UnicodeError:
pass
return content
def main():
fields = dict(
email=dict(default='', required=False, type='str'),
password=dict(default='', required=False, type='str', no_log=True),
account=dict(default='', required=False, type='str'),
application=dict(required=True, type='str'),
keyset=dict(required=True, type='str'),
state=dict(default='present', type='str',
choices=['started', 'stopped', 'present', 'absent']),
name=dict(required=True, type='str'), description=dict(type='str'),
event_handlers=dict(default=list(), type='list'),
changes=dict(default=dict(), type='dict'),
cache=dict(default=dict(), type='dict'),
validate_certs=dict(default=True, type='bool'))
module = AnsibleModule(argument_spec=fields, supports_check_mode=True)
if not HAS_PUBNUB_BLOCKS_CLIENT:
module.fail_json(msg='pubnub_blocks_client required for this module.')
params = module.params
# Authorize user.
user = pubnub_user(module)
# Initialize PubNub account instance.
account = pubnub_account(module, user=user)
# Try fetch application with which module should work.
application = pubnub_application(module, account=account)
# Try fetch keyset with which module should work.
keyset = pubnub_keyset(module, account=account, application=application)
# Try fetch block with which module should work.
block = pubnub_block(module, account=account, keyset=keyset)
is_new_block = block is not None and block.uid == -1
# Check whether block should be removed or not.
if block is not None and params['state'] == 'absent':
keyset.remove_block(block)
block = None
if block is not None:
# Update block information if required.
if params.get('changes') and params['changes'].get('name'):
block.name = params['changes']['name']
# Process event changes to event handlers.
for event_handler_data in params.get('event_handlers') or list():
state = event_handler_data.get('state') or 'present'
event_handler = pubnub_event_handler(data=event_handler_data,
block=block)
if state == 'absent' and event_handler:
block.delete_event_handler(event_handler)
# Update block operation state if required.
if block and not is_new_block:
if params['state'] == 'started':
block.start()
elif params['state'] == 'stopped':
block.stop()
# Save current account state.
if not module.check_mode:
try:
account.save()
except (exceptions.APIAccessError, exceptions.KeysetError,
exceptions.BlockError, exceptions.EventHandlerError,
exceptions.GeneralPubNubError) as exc:
module_cache = dict(account)
module_cache.update(dict(pnm_user=dict(user)))
exc_msg = _failure_title_from_exception(exc)
exc_descr = exc.message if hasattr(exc, 'message') else exc.args[0]
module.fail_json(msg=exc_msg, description=exc_descr,
changed=account.changed,
module_cache=module_cache)
# Report module execution results.
module_cache = dict(account)
module_cache.update(dict(pnm_user=dict(user)))
changed_will_change = account.changed or account.will_change
module.exit_json(changed=changed_will_change, module_cache=module_cache)
if __name__ == '__main__':
main()
|
sgerhart/ansible
|
lib/ansible/modules/cloud/pubnub/pubnub_blocks.py
|
Python
|
mit
| 23,797
| 0.00021
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import connection
from django.db import models, migrations
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.management import update_all_contenttypes
def create_notifications(apps, schema_editor):
update_all_contenttypes(verbosity=0)
sql="""
INSERT INTO notifications_watched (object_id, created_date, content_type_id, user_id, project_id)
SELECT userstory_id AS object_id, now() AS created_date, {content_type_id} AS content_type_id, user_id, project_id
FROM userstories_userstory_watchers INNER JOIN userstories_userstory ON userstories_userstory_watchers.userstory_id = userstories_userstory.id""".format(content_type_id=ContentType.objects.get(model='userstory').id)
cursor = connection.cursor()
cursor.execute(sql)
class Migration(migrations.Migration):
dependencies = [
('notifications', '0004_watched'),
('userstories', '0009_remove_userstory_is_archived'),
]
operations = [
migrations.RunPython(create_notifications),
migrations.RemoveField(
model_name='userstory',
name='watchers',
),
]
|
CMLL/taiga-back
|
taiga/projects/userstories/migrations/0010_remove_userstory_watchers.py
|
Python
|
agpl-3.0
| 1,220
| 0.004098
|
# Copyright 2014 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module: security_monkey.common.sts_connect
:platform: Unix
.. version:: $$VERSION$$
.. moduleauthor:: Patrick Kelley <pkelley@netflix.com> @monkeysecurity
"""
from security_monkey.datastore import Account
import botocore.session
import boto3
import boto
def connect(account_name, connection_type, **args):
"""
Examples of use:
ec2 = sts_connect.connect(environment, 'ec2', region=region, validate_certs=False)
ec2 = sts_connect.connect(environment, 'ec2', validate_certs=False, debug=1000)
ec2 = sts_connect.connect(environment, 'ec2')
where environment is ( test, prod, dev )
s3 = sts_connect.connect(environment, 's3')
ses = sts_connect.connect(environment, 'ses')
:param account: Account to connect with (i.e. test, prod, dev)
:raises Exception: RDS Region not valid
AWS Tech not supported.
:returns: STS Connection Object for given tech
:note: To use this method a SecurityMonkey role must be created
in the target account with full read only privileges.
"""
account = Account.query.filter(Account.name == account_name).first()
sts = boto.connect_sts()
role_name = 'SecurityMonkey'
if account.role_name and account.role_name != '':
role_name = account.role_name
role = sts.assume_role('arn:aws:iam::' + account.number + ':role/' + role_name, 'secmonkey')
if connection_type == 'botocore':
botocore_session = botocore.session.get_session()
botocore_session.set_credentials(
role.credentials.access_key,
role.credentials.secret_key,
token=role.credentials.session_token
)
return botocore_session
region = 'us-east-1'
if 'region' in args:
region = args.pop('region')
if hasattr(region, 'name'):
region = region.name
if 'boto3' in connection_type:
# Should be called in this format: boto3.iam.client
_, tech, api = connection_type.split('.')
session = boto3.Session(
aws_access_key_id=role.credentials.access_key,
aws_secret_access_key=role.credentials.secret_key,
aws_session_token=role.credentials.session_token,
region_name=region
)
if api == 'resource':
return session.resource(tech)
return session.client(tech)
module = __import__("boto.{}".format(connection_type))
for subm in connection_type.split('.'):
module = getattr(module, subm)
return module.connect_to_region(
region,
aws_access_key_id=role.credentials.access_key,
aws_secret_access_key=role.credentials.secret_key,
security_token=role.credentials.session_token
)
|
bunjiboys/security_monkey
|
security_monkey/common/sts_connect.py
|
Python
|
apache-2.0
| 3,363
| 0.000892
|
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management import *
def hbase_decommission(env):
import params
env.set_params(params)
kinit_cmd_decommission = params.kinit_cmd_decommission
File(params.region_drainer,
content=StaticFile("draining_servers.rb"),
mode=0755
)
if params.hbase_excluded_hosts and params.hbase_excluded_hosts.split(","):
hosts = params.hbase_excluded_hosts.split(",")
elif params.hbase_included_hosts and params.hbase_included_hosts.split(","):
hosts = params.hbase_included_hosts.split(",")
if params.hbase_drain_only:
for host in hosts:
if host:
regiondrainer_cmd = format(
"{kinit_cmd_decommission} {hbase_cmd} --config {hbase_conf_dir} org.jruby.Main {region_drainer} remove {host}")
Execute(regiondrainer_cmd,
user=params.hbase_user,
logoutput=True
)
pass
pass
else:
for host in hosts:
if host:
regiondrainer_cmd = format(
"{kinit_cmd_decommission} {hbase_cmd} --config {hbase_conf_dir} org.jruby.Main {region_drainer} add {host}")
regionmover_cmd = format(
"{kinit_cmd_decommission} {hbase_cmd} --config {hbase_conf_dir} org.jruby.Main {region_mover} unload {host}")
Execute(regiondrainer_cmd,
user=params.hbase_user,
logoutput=True
)
Execute(regionmover_cmd,
user=params.hbase_user,
logoutput=True
)
pass
pass
pass
pass
|
alexryndin/ambari
|
ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HBASE/package/scripts/hbase_decommission.py
|
Python
|
apache-2.0
| 2,306
| 0.009974
|
from flask import Flask, render_template, redirect, url_for, request, session, flash, jsonify, g
import sqlite3 as sql
import os
import random
import json
from flask_sqlalchemy import SQLAlchemy
import ast #To change string list to a python list
import collections #To conte duplicate in iventory list using Counter()
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///RTSDB.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.secret_key = os.urandom(24)
db = SQLAlchemy(app)
class Data(db.Model):
__tablename__ = "data"
id = db.Column('id', db.Integer, primary_key=True)
user = db.Column("user", db.String(20))
password = db.Column("password", db.String(20))
saltWater = db.Column("saltwater", db.String(20))
freshWater = db.Column("freshWater", db.String(20))
elixir = db.Column("elixir", db.String(20))
pearl = db.Column("pearl", db.String(20))
iron = db.Column("iron", db.String(20))
coal = db.Column("coal", db.String(20))
titanium = db.Column("titanium", db.String(20))
diamond = db.Column("diamond", db.String(20))
def __init__(self, id, user, password, saltWater, freshWater, elixir, pearl, iron, coal, titanium, diamond):
self.id = id
self.user = user
self.password = password
self.saltWater = saltWater
self.freshWater = freshWater
self.elixir = elixir
self.pearl = pearl
self.iron = iron
self.coal = coal
self.titanium = titanium
self.diamond = diamond
#def __repr__(self):
# return '{}'.format(self.id)
class Recipe(db.Model):
num_of_rec = 0
__tablename__ = "recipes"
id = db.Column('id', db.Integer, primary_key=True)
name = db.Column("name", db.String(20))
type = db.Column("type", db.String(20))
result = db.Column("result", db.String(20))
prereq = db.Column("prereq", db.String(20))
ing_1 = db.Column("ing_1", db.String(20))
ing_qty_1 = db.Column("ing_qty_1", db.Integer)
ing_2 = db.Column("ing_2", db.String(20))
ing_qty_2 = db.Column("ing_qty_2", db.Integer)
desc = db.Column("desc", db.String(20))
def __init__(self, id, name, type, result, prereq, ing_1, ing_qty_1, ing_2, coal, ing_qty_2, desc):
self.id = id
self.name = name
self.type = type
self.result = result
self.prereq = prereq
self.ing_1 = ing_1
self.ing_qty_1 = ing_qty_1
self.ing_2 = ing_2
self.ing_qty_2 = ing_qty_2
self.desc = desc
Recipe.num_of_rec += 1
def __repr__(self):
return '{}'.format(self.result)
'''
findtest = "saltWater"
userStuff = Data.query.filter_by(user="admin").first()
value = getattr(userStuff, findtest)
print (value)
temp = int(value)
temp += 1
value = str(temp)
print (value)
userStuff.saltWater = value
db.session.commit()
#newinfo = ExempleDB(7, 'sixth user', '123456', '25')
#db.session.add(newinfo)
#db.session.commit()
#update_this = ExempleDB.query.filter_by(id=6).first
#update_this.user = "New_user"
#db.session.commit()
'''
@app.route("/test")
def test():
resultList = Data.query.all()
return render_template('test.html', resultList=resultList)
@app.before_request
def before_request():
g.user = None
if 'user' in session:
g.user = session['user']
@app.route('/getsession')
def getsession():
if 'user' in session:
return session['user']
return 'Not logged in!'
@app.route("/logout")
def logout():
session.pop('user', None)
flash('You were just logged out!')
return redirect(url_for('index'))
@app.route("/", methods=['GET', 'POST'])
def index():
session.pop('user', None)
error = None
if request.method == 'POST':
session['user'] = request.form['username']
enteredPassword = request.form['password']
con = sql.connect("RTSDB.db")
cur = con.execute("SELECT user, password from data")
resultList = [row[0] for row in cur.fetchall()]
if session['user'] in resultList:
return redirect(url_for('map'))
else:
print "Failed"
flash('Invalid Credentials!')
return render_template("index.html", title="RTS FLASK PROJECT", **globals())
#Map_and_resources___________________________________________________________
@app.route("/map")
def map():
if g.user:
resources = Data.query.filter_by(user = g.user).all()
return render_template("map.html", resources=resources)
return redirect(url_for('index'))
@app.route('/gather/<clickedRegion>', methods=['POST'])
def gather(clickedRegion):
def gatherChances():
finding = ""
randomResult = random.random()
if randomResult <= 0.005:
if clickedRegion == "water":
finding = "pearl" #Legendary
if clickedRegion == "mine":
finding = "diamond" #Legendary
elif randomResult > 0.005 and randomResult <= 0.05:
if clickedRegion == "water":
finding = "elixir" #Epic
if clickedRegion == "mine":
finding = "titanium" #Epic
elif randomResult > 0.05 and randomResult <= 0.20:
if clickedRegion == "water":
finding = "freshWater" #Rare
if clickedRegion == "mine":
finding = "coal" #Rare
else:
if clickedRegion == "water":
finding = "saltWater" #Common
if clickedRegion == "mine":
finding = "iron" #Common
#print randomResult
#print finding
return finding
finding = gatherChances()
#Get value from database to show them in HTML
con = sql.connect("RTSDB.db")
cur = con.execute("SELECT " + finding + " from data WHERE user='"+g.user+"'")
for row in cur:
print row
#Update the value by one (for now)
newval = int(row[0]) + 1
newval = str(newval)
#Insert new values
#Data.update().where(user=g.user).with_entities(finding).values(newval)
#db.session.commit()
cur.execute("UPDATE data SET " + finding + " = " + newval + " WHERE user='" + g.user +"'")
con.commit()
con.close()
return json.dumps({"finding":finding, "newval":newval})
#Inventory____________________________________________________
@app.route("/inventory")
def inventory():
if g.user:
con = sql.connect("RTSDB.db")
#Showing ressources
resources = Data.query.filter_by(user = g.user).all()
#Getting current inventory
cur = con.execute("SELECT items from inventory WHERE user='" + g.user +"'").fetchone()
#First row of the list (all items)
x = cur[0]
#Stinged list as real list
currInv = ast.literal_eval(x)
counter = collections.Counter(currInv)
print counter
return render_template("inventory.html", resources=resources, currInv=currInv, counter=counter)
return redirect(url_for('index'))
#Crafting______________________________________________________
@app.route("/craft")
def craft():
if g.user:
#Get all recipes from Table recipes
resources = Data.query.filter_by(user = g.user).all()
recipes = Recipe.query.all()
return render_template("craft.html", recipes=recipes, resources=resources)
return redirect(url_for('index'))
'''
@app.route("/showComponent/<clickedComponent>")
def showComponent(clickedComponent):
recipe = Data.query.filter_by(name = clickedComponent).all()
return recipe
'''
@app.route("/craftProcess/<item>", methods=['POST'])
def craftProcess(item):
con = sql.connect("RTSDB.db")
#Getting FIRST required mats
cur = con.execute("SELECT ing_1 from recipes WHERE result='"+item+"'").fetchone()
ing_1 = cur[0]
cur = con.execute("SELECT ing_qty_1 from recipes WHERE result='"+item+"'").fetchone()
ing_qty_1 = cur[0]
#Getting SECOND required mats
cur = con.execute("SELECT ing_2 from recipes WHERE result='"+item+"'").fetchone()
ing_2 = cur[0]
cur = con.execute("SELECT ing_qty_2 from recipes WHERE result='"+item+"'").fetchone()
ing_qty_2 = cur[0]
#Getting FIRST concerned ressource and removing
cur = con.execute("SELECT " + ing_1 + " from data WHERE user='" + g.user +"'").fetchone()
oldVal = cur[0]
newVal1 = int(oldVal) - ing_qty_1
#Getting SECOND concerned ressource and removing
cur = con.execute("SELECT " + ing_2 + " from data WHERE user='" + g.user +"'").fetchone()
oldVal = cur[0]
newVal2 = int(oldVal) - ing_qty_1
#Updating resources
con.execute("UPDATE data SET " +\
ing_1 + " = " + str(newVal1) +","+\
ing_2 + " = " + str(newVal2) +\
" WHERE user='" + g.user +"'")
#Getting current inventory
cur = con.execute("SELECT items from inventory WHERE user='" + g.user +"'").fetchone()
# Tuple into list
x = list(cur)
#First row of the list (all items)
x = x[0]
#Stinged list as real list
x = ast.literal_eval(x)
# Add the item
x.append(item)
# Restring the list
x = json.dumps(x)
print x
#Update the items
con.execute('UPDATE inventory SET items = ? WHERE user = ? ', (x, g.user,))
con.commit()
con.close()
#From SQLAlchemy
#clickedItem = Recipes.query.filter_by(result = item).all()
#resource = Data.query.filter_by(user = g.user).all()
stringMessage = item + ' was added to inventory!'
return json.dumps({'stringMessage':stringMessage, 'newVal1':newVal1, 'newVal2':newVal2})
@app.route("/3d_test_1")
def test_1():
if g.user:
return render_template("3d_test_1.html")
return redirect(url_for('index'))
@app.route("/3d_test_2")
def test_2():
if g.user:
return render_template("3d_test_2.html")
return redirect(url_for('index'))
@app.route("/3d_test_3")
def test_3():
if g.user:
return render_template("3d_test_3.html")
return redirect(url_for('index'))
def getUserInfo(userx):
con = sql.connect("RTSDB.db")
cur = con.execute("SELECT * from data WHERE user='"+userx+"'")
print "Userx is: %s", userx
userInfoList = [row[0] for row in cur.fetchall()]
print userInfoList
global saltWater, freshWater, elixir, pearl, \
iron, coal, titanium, diamond
saltWater = row[3]
freshWater = row[4]
elixir = row[5]
pearl = row[6]
iron = row[7]
coal = row[8]
titanium = row[9]
diamond = row[10]
con.commit()
con.close()
if __name__ == "__main__":
app.run(debug=True, threaded=True)
|
sylverspace/craft_game
|
app.py
|
Python
|
mit
| 10,026
| 0.033513
|
''' Work of Cameron Palk '''
import sys
import pandas as pd
def main( argv ):
try:
csv_filepath = argv[ 0 ]
output_filepath = argv[ 1 ]
except IndexError:
print( "Error, usage: \"python3 coord_bounds.py <CSV> <output_file>\"" )
return
training_data = pd.read_csv( csv_filepath )
training_data[ 'clean_Latitude' ] = training_data[ training_data.Latitude > 47 ].Latitude
training_data[ 'clean_Longitude' ] = training_data[ training_data.Longitude < -122 ].Longitude
training_data.dropna()
print( training_data[ 'clean_Latitude' ] )
for axis in [ 'clean_Longitude', 'clean_Latitude' ]:
print( "{:16} min: {:16} max: {:16}".format(
axis,
min( training_data[ axis ] ),
max( training_data[ axis ] )
) )
#
if __name__=='__main__':
main( sys.argv[ 1: ] )
|
CKPalk/SeattleCrime_DM
|
DataMining/Stats/coord_bounds.py
|
Python
|
mit
| 801
| 0.087391
|
from glob import glob
import os
import time
import tornado.ioloop
from tornado import gen
from .core import Stream, convert_interval, RefCounter
def PeriodicCallback(callback, callback_time, asynchronous=False, **kwargs):
source = Stream(asynchronous=asynchronous)
def _():
result = callback()
source._emit(result)
pc = tornado.ioloop.PeriodicCallback(_, callback_time, **kwargs)
pc.start()
return source
def sink_to_file(filename, upstream, mode='w', prefix='', suffix='\n', flush=False):
file = open(filename, mode=mode)
def write(text):
file.write(prefix + text + suffix)
if flush:
file.flush()
upstream.sink(write)
return file
class Source(Stream):
_graphviz_shape = 'doubleoctagon'
def __init__(self, **kwargs):
self.stopped = True
super(Source, self).__init__(**kwargs)
def stop(self): # pragma: no cover
# fallback stop method - for poll functions with while not self.stopped
if not self.stopped:
self.stopped = True
@Stream.register_api(staticmethod)
class from_textfile(Source):
""" Stream data from a text file
Parameters
----------
f: file or string
Source of the data. If string, will be opened.
poll_interval: Number
Interval to poll file for new data in seconds
delimiter: str
Character(s) to use to split the data into parts
start: bool
Whether to start running immediately; otherwise call stream.start()
explicitly.
from_end: bool
Whether to begin streaming from the end of the file (i.e., only emit
lines appended after the stream starts).
Examples
--------
>>> source = Stream.from_textfile('myfile.json') # doctest: +SKIP
>>> source.map(json.loads).pluck('value').sum().sink(print) # doctest: +SKIP
>>> source.start() # doctest: +SKIP
Returns
-------
Stream
"""
def __init__(self, f, poll_interval=0.100, delimiter='\n', start=False,
from_end=False, **kwargs):
if isinstance(f, str):
f = open(f)
self.file = f
self.from_end = from_end
self.delimiter = delimiter
self.poll_interval = poll_interval
super(from_textfile, self).__init__(ensure_io_loop=True, **kwargs)
self.stopped = True
self.started = False
if start:
self.start()
def start(self):
self.stopped = False
self.started = False
self.loop.add_callback(self.do_poll)
@gen.coroutine
def do_poll(self):
buffer = ''
if self.from_end:
# this only happens when we are ready to read
self.file.seek(0, 2)
while not self.stopped:
self.started = True
line = self.file.read()
if line:
buffer = buffer + line
if self.delimiter in buffer:
parts = buffer.split(self.delimiter)
buffer = parts.pop(-1)
for part in parts:
yield self._emit(part + self.delimiter)
else:
yield gen.sleep(self.poll_interval)
@Stream.register_api(staticmethod)
class filenames(Source):
""" Stream over filenames in a directory
Parameters
----------
path: string
Directory path or globstring over which to search for files
poll_interval: Number
Seconds between checking path
start: bool (False)
Whether to start running immediately; otherwise call stream.start()
explicitly.
Examples
--------
>>> source = Stream.filenames('path/to/dir') # doctest: +SKIP
>>> source = Stream.filenames('path/to/*.csv', poll_interval=0.500) # doctest: +SKIP
"""
def __init__(self, path, poll_interval=0.100, start=False, **kwargs):
if '*' not in path:
if os.path.isdir(path):
if not path.endswith(os.path.sep):
path = path + '/'
path = path + '*'
self.path = path
self.seen = set()
self.poll_interval = poll_interval
self.stopped = True
super(filenames, self).__init__(ensure_io_loop=True)
if start:
self.start()
def start(self):
self.stopped = False
self.loop.add_callback(self.do_poll)
@gen.coroutine
def do_poll(self):
while True:
filenames = set(glob(self.path))
new = filenames - self.seen
for fn in sorted(new):
self.seen.add(fn)
yield self._emit(fn)
yield gen.sleep(self.poll_interval) # TODO: remove poll if delayed
if self.stopped:
break
@Stream.register_api(staticmethod)
class from_tcp(Source):
"""
Creates events by reading from a socket using tornado TCPServer
The stream of incoming bytes is split on a given delimiter, and the parts
become the emitted events.
Parameters
----------
port : int
The port to open and listen on. It only gets opened when the source
is started, and closed upon ``stop()``
delimiter : bytes
The incoming data will be split on this value. The resulting events
will still have the delimiter at the end.
start : bool
Whether to immediately initiate the source. You probably want to
set up downstream nodes first.
server_kwargs : dict or None
If given, additional arguments to pass to TCPServer
Examples
--------
>>> source = Source.from_tcp(4567) # doctest: +SKIP
"""
def __init__(self, port, delimiter=b'\n', start=False,
server_kwargs=None):
super(from_tcp, self).__init__(ensure_io_loop=True)
self.stopped = True
self.server_kwargs = server_kwargs or {}
self.port = port
self.server = None
self.delimiter = delimiter
if start: # pragma: no cover
self.start()
@gen.coroutine
def _start_server(self):
from tornado.tcpserver import TCPServer
from tornado.iostream import StreamClosedError
class EmitServer(TCPServer):
source = self
@gen.coroutine
def handle_stream(self, stream, address):
while True:
try:
data = yield stream.read_until(self.source.delimiter)
yield self.source._emit(data)
except StreamClosedError:
break
self.server = EmitServer(**self.server_kwargs)
self.server.listen(self.port)
def start(self):
if self.stopped:
self.loop.add_callback(self._start_server)
self.stopped = False
def stop(self):
if not self.stopped:
self.server.stop()
self.server = None
self.stopped = True
@Stream.register_api(staticmethod)
class from_http_server(Source):
"""Listen for HTTP POSTs on given port
Each connection will emit one event, containing the body data of
the request
Parameters
----------
port : int
The port to listen on
path : str
Specific path to listen on. Can be regex, but content is not used.
start : bool
Whether to immediately startup the server. Usually you want to connect
downstream nodes first, and then call ``.start()``.
server_kwargs : dict or None
If given, set of further parameters to pass on to HTTPServer
Examples
--------
>>> source = Source.from_http_server(4567) # doctest: +SKIP
"""
def __init__(self, port, path='/.*', start=False, server_kwargs=None):
self.port = port
self.path = path
self.server_kwargs = server_kwargs or {}
super(from_http_server, self).__init__(ensure_io_loop=True)
self.stopped = True
self.server = None
if start: # pragma: no cover
self.start()
def _start_server(self):
from tornado.web import Application, RequestHandler
from tornado.httpserver import HTTPServer
class Handler(RequestHandler):
source = self
@gen.coroutine
def post(self):
yield self.source._emit(self.request.body)
self.write('OK')
application = Application([
(self.path, Handler),
])
self.server = HTTPServer(application, **self.server_kwargs)
self.server.listen(self.port)
def start(self):
"""Start HTTP server and listen"""
if self.stopped:
self.loop.add_callback(self._start_server)
self.stopped = False
def stop(self):
"""Shutdown HTTP server"""
if not self.stopped:
self.server.stop()
self.server = None
self.stopped = True
@Stream.register_api(staticmethod)
class from_process(Source):
"""Messages from a running external process
This doesn't work on Windows
Parameters
----------
cmd : list of str or str
Command to run: program name, followed by arguments
open_kwargs : dict
To pass on the the process open function, see ``subprocess.Popen``.
with_stderr : bool
Whether to include the process STDERR in the stream
start : bool
Whether to immediately startup the process. Usually you want to connect
downstream nodes first, and then call ``.start()``.
Example
-------
>>> source = Source.from_process(['ping', 'localhost']) # doctest: +SKIP
"""
def __init__(self, cmd, open_kwargs=None, with_stderr=False, start=False):
self.cmd = cmd
self.open_kwargs = open_kwargs or {}
self.with_stderr = with_stderr
super(from_process, self).__init__(ensure_io_loop=True)
self.stopped = True
self.process = None
if start: # pragma: no cover
self.start()
@gen.coroutine
def _start_process(self):
# should be done in asyncio (py3 only)? Apparently can handle Windows
# with appropriate config.
from tornado.process import Subprocess
from tornado.iostream import StreamClosedError
import subprocess
stderr = subprocess.STDOUT if self.with_stderr else subprocess.PIPE
process = Subprocess(self.cmd, stdout=Subprocess.STREAM,
stderr=stderr, **self.open_kwargs)
while not self.stopped:
try:
out = yield process.stdout.read_until(b'\n')
except StreamClosedError:
# process exited
break
yield self._emit(out)
yield process.stdout.close()
process.proc.terminate()
def start(self):
"""Start external process"""
if self.stopped:
self.loop.add_callback(self._start_process)
self.stopped = False
def stop(self):
"""Shutdown external process"""
if not self.stopped:
self.stopped = True
@Stream.register_api(staticmethod)
class from_kafka(Source):
""" Accepts messages from Kafka
Uses the confluent-kafka library,
https://docs.confluent.io/current/clients/confluent-kafka-python/
Parameters
----------
topics: list of str
Labels of Kafka topics to consume from
consumer_params: dict
Settings to set up the stream, see
https://docs.confluent.io/current/clients/confluent-kafka-python/#configuration
https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md
Examples:
bootstrap.servers, Connection string(s) (host:port) by which to reach
Kafka;
group.id, Identity of the consumer. If multiple sources share the same
group, each message will be passed to only one of them.
poll_interval: number
Seconds that elapse between polling Kafka for new messages
start: bool (False)
Whether to start polling upon instantiation
Examples
--------
>>> source = Stream.from_kafka(['mytopic'],
... {'bootstrap.servers': 'localhost:9092',
... 'group.id': 'streamz'}) # doctest: +SKIP
"""
def __init__(self, topics, consumer_params, poll_interval=0.1, start=False, **kwargs):
self.cpars = consumer_params
self.consumer = None
self.topics = topics
self.poll_interval = poll_interval
super(from_kafka, self).__init__(ensure_io_loop=True, **kwargs)
self.stopped = True
if start:
self.start()
def do_poll(self):
if self.consumer is not None:
msg = self.consumer.poll(0)
if msg and msg.value() and msg.error() is None:
return msg.value()
@gen.coroutine
def poll_kafka(self):
while True:
val = self.do_poll()
if val:
yield self._emit(val)
else:
yield gen.sleep(self.poll_interval)
if self.stopped:
break
self._close_consumer()
def start(self):
import confluent_kafka as ck
if self.stopped:
self.stopped = False
self.consumer = ck.Consumer(self.cpars)
self.consumer.subscribe(self.topics)
tp = ck.TopicPartition(self.topics[0], 0, 0)
# blocks for consumer thread to come up
self.consumer.get_watermark_offsets(tp)
self.loop.add_callback(self.poll_kafka)
def _close_consumer(self):
if self.consumer is not None:
consumer = self.consumer
self.consumer = None
consumer.unsubscribe()
consumer.close()
self.stopped = True
class FromKafkaBatched(Stream):
"""Base class for both local and cluster-based batched kafka processing"""
def __init__(self, topic, consumer_params, poll_interval='1s',
npartitions=None, refresh_partitions=False,
max_batch_size=10000, keys=False,
engine=None, **kwargs):
self.consumer_params = consumer_params
# Override the auto-commit config to enforce custom streamz checkpointing
self.consumer_params['enable.auto.commit'] = 'false'
if 'auto.offset.reset' not in self.consumer_params.keys():
consumer_params['auto.offset.reset'] = 'latest'
self.topic = topic
self.npartitions = npartitions
self.refresh_partitions = refresh_partitions
if self.npartitions is not None and self.npartitions <= 0:
raise ValueError("Number of Kafka topic partitions must be > 0.")
self.poll_interval = convert_interval(poll_interval)
self.max_batch_size = max_batch_size
self.keys = keys
self.engine = engine
self.stopped = True
self.started = False
super(FromKafkaBatched, self).__init__(ensure_io_loop=True, **kwargs)
@gen.coroutine
def poll_kafka(self):
import confluent_kafka as ck
def commit(_part):
topic, part_no, _, _, offset = _part[1:]
_tp = ck.TopicPartition(topic, part_no, offset + 1)
self.consumer.commit(offsets=[_tp], asynchronous=True)
@gen.coroutine
def checkpoint_emit(_part):
ref = RefCounter(cb=lambda: commit(_part))
yield self._emit(_part, metadata=[{'ref': ref}])
if self.npartitions is None:
kafka_cluster_metadata = self.consumer.list_topics(self.topic)
if self.engine == "cudf": # pragma: no cover
self.npartitions = len(kafka_cluster_metadata[self.topic.encode('utf-8')])
else:
self.npartitions = len(kafka_cluster_metadata.topics[self.topic].partitions)
self.positions = [0] * self.npartitions
tps = []
for partition in range(self.npartitions):
tps.append(ck.TopicPartition(self.topic, partition))
while True:
try:
committed = self.consumer.committed(tps, timeout=1)
except ck.KafkaException:
pass
else:
for tp in committed:
self.positions[tp.partition] = tp.offset
break
try:
while not self.stopped:
out = []
if self.refresh_partitions:
kafka_cluster_metadata = self.consumer.list_topics(self.topic)
if self.engine == "cudf": # pragma: no cover
new_partitions = len(kafka_cluster_metadata[self.topic.encode('utf-8')])
else:
new_partitions = len(kafka_cluster_metadata.topics[self.topic].partitions)
if new_partitions > self.npartitions:
self.positions.extend([-1001] * (new_partitions - self.npartitions))
self.npartitions = new_partitions
for partition in range(self.npartitions):
tp = ck.TopicPartition(self.topic, partition, 0)
try:
low, high = self.consumer.get_watermark_offsets(
tp, timeout=0.1)
except (RuntimeError, ck.KafkaException):
continue
self.started = True
if 'auto.offset.reset' in self.consumer_params.keys():
if self.consumer_params['auto.offset.reset'] == 'latest' and \
self.positions[partition] == -1001:
self.positions[partition] = high
current_position = self.positions[partition]
lowest = max(current_position, low)
if high > lowest + self.max_batch_size:
high = lowest + self.max_batch_size
if high > lowest:
out.append((self.consumer_params, self.topic, partition,
self.keys, lowest, high - 1))
self.positions[partition] = high
self.consumer_params['auto.offset.reset'] = 'earliest'
for part in out:
yield self.loop.add_callback(checkpoint_emit, part)
else:
yield gen.sleep(self.poll_interval)
finally:
self.consumer.unsubscribe()
self.consumer.close()
def start(self):
import confluent_kafka as ck
if self.engine == "cudf": # pragma: no cover
from custreamz import kafka
if self.stopped:
if self.engine == "cudf": # pragma: no cover
self.consumer = kafka.Consumer(self.consumer_params)
else:
self.consumer = ck.Consumer(self.consumer_params)
self.stopped = False
tp = ck.TopicPartition(self.topic, 0, 0)
# blocks for consumer thread to come up
self.consumer.get_watermark_offsets(tp)
self.loop.add_callback(self.poll_kafka)
@Stream.register_api(staticmethod)
def from_kafka_batched(topic, consumer_params, poll_interval='1s',
npartitions=None, refresh_partitions=False,
start=False, dask=False,
max_batch_size=10000, keys=False,
engine=None, **kwargs):
""" Get messages and keys (optional) from Kafka in batches
Uses the confluent-kafka library,
https://docs.confluent.io/current/clients/confluent-kafka-python/
This source will emit lists of messages for each partition of a single given
topic per time interval, if there is new data. If using dask, one future
will be produced per partition per time-step, if there is data.
Checkpointing is achieved through the use of reference counting. A reference
counter is emitted downstream for each batch of data. A callback is
triggered when the reference count reaches zero and the offsets are
committed back to Kafka. Upon the start of this function, the previously
committed offsets will be fetched from Kafka and begin reading form there.
This will guarantee at-least-once semantics.
Parameters
----------
topic: str
Kafka topic to consume from
consumer_params: dict
| Settings to set up the stream, see
| https://docs.confluent.io/current/clients/confluent-kafka-python/#configuration
| https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md
| Examples:
| bootstrap.servers: Connection string(s) (host:port) by which to reach Kafka
| group.id: Identity of the consumer. If multiple sources share the same
| group, each message will be passed to only one of them.
poll_interval: number
Seconds that elapse between polling Kafka for new messages
npartitions: int (None)
| Number of partitions in the topic.
| If None, streamz will poll Kafka to get the number of partitions.
refresh_partitions: bool (False)
| Useful if the user expects to increase the number of topic partitions on the
| fly, maybe to handle spikes in load. Streamz polls Kafka in every batch to
| determine the current number of partitions. If partitions have been added,
| streamz will automatically start reading data from the new partitions as well.
| If set to False, streamz will not accommodate adding partitions on the fly.
| It is recommended to restart the stream after decreasing the number of partitions.
start: bool (False)
Whether to start polling upon instantiation
max_batch_size: int
The maximum number of messages per partition to be consumed per batch
keys: bool (False)
| Whether to extract keys along with the messages.
| If True, this will yield each message as a dict:
| {'key':msg.key(), 'value':msg.value()}
engine: str (None)
| If engine is set to "cudf", streamz reads data (messages must be JSON)
| from Kafka in an accelerated manner directly into cuDF (GPU) dataframes.
| This is done using the RAPIDS custreamz library.
| Please refer to RAPIDS cudf API here:
| https://docs.rapids.ai/api/cudf/stable/
| Folks interested in trying out custreamz would benefit from this
| accelerated Kafka reader. If one does not want to use GPUs, they
| can use streamz as is, with the default engine=None.
| To use this option, one must install custreamz (use the
| appropriate CUDA version recipe & Python version)
| using a command like the one below, which will install all
| GPU dependencies and streamz itself:
| conda install -c rapidsai-nightly -c nvidia -c conda-forge \
| -c defaults custreamz=0.15 python=3.7 cudatoolkit=10.2
| More information at: https://rapids.ai/start.html
Important Kafka Configurations
By default, a stream will start reading from the latest offsets
available. Please set 'auto.offset.reset': 'earliest' in the
consumer configs, if the stream needs to start processing from
the earliest offsets.
Examples
----------
>>> source = Stream.from_kafka_batched('mytopic',
... {'bootstrap.servers': 'localhost:9092',
... 'group.id': 'streamz'}) # doctest: +SKIP
"""
if dask:
from distributed.client import default_client
kwargs['loop'] = default_client().loop
source = FromKafkaBatched(topic, consumer_params,
poll_interval=poll_interval,
npartitions=npartitions,
refresh_partitions=refresh_partitions,
max_batch_size=max_batch_size,
keys=keys,
engine=engine,
**kwargs)
if dask:
source = source.scatter()
if start:
source.start()
if engine == "cudf": # pragma: no cover
return source.starmap(get_message_batch_cudf)
else:
return source.starmap(get_message_batch)
def get_message_batch(kafka_params, topic, partition, keys, low, high, timeout=None):
"""Fetch a batch of kafka messages (keys & values) in given topic/partition
This will block until messages are available, or timeout is reached.
"""
import confluent_kafka as ck
t0 = time.time()
consumer = ck.Consumer(kafka_params)
tp = ck.TopicPartition(topic, partition, low)
consumer.assign([tp])
out = []
try:
while True:
msg = consumer.poll(0)
if msg and msg.value() and msg.error() is None:
if high >= msg.offset():
if keys:
out.append({'key':msg.key(), 'value':msg.value()})
else:
out.append(msg.value())
if high <= msg.offset():
break
else:
time.sleep(0.1)
if timeout is not None and time.time() - t0 > timeout:
break
finally:
consumer.close()
return out
def get_message_batch_cudf(kafka_params, topic, partition, keys, low, high, timeout=None): # pragma: no cover
"""
Fetch a batch of kafka messages (currently, messages must be in JSON format)
in given topic/partition as a cudf dataframe
"""
from custreamz import kafka
consumer = kafka.Consumer(kafka_params)
gdf = None
try:
gdf = consumer.read_gdf(topic=topic, partition=partition, lines=True, start=low, end=high + 1)
finally:
consumer.close()
return gdf
|
mrocklin/streams
|
streamz/sources.py
|
Python
|
bsd-3-clause
| 25,985
| 0.001347
|
# -*- coding: utf-8
#------------------------------------------------------------------#
__author__ = "Xavier MARCELET <xavier@marcelet.com>"
#------------------------------------------------------------------#
import json
import sys
import cherrypy
from xtd.core import logger, config
from xtd.core.config import checkers
from xtd.core.application import Application
from .log import LogPage
from .counter import CounterPage
from .config import ConfigPage
from .param import ParamPage
from .manager import ServerManager
#------------------------------------------------------------------#
class ServerApplication(Application):
def __init__(self, p_name = sys.argv[0]):
super(ServerApplication, self).__init__(p_name)
self.config().register_section("http", "Server Settings", [{
"name" : "listen",
"default" : "tcp://localhost:8080",
"description" : "bind server to given socket",
"checks" : config.checkers.is_socket(p_schemes=["tcp", "unix"])
},{
"name" : "threads",
"default" : 10,
"description" : "allocate VAL number of work threads",
"checks" : config.checkers.is_int(p_min=1)
},{
"name" : "daemonize",
"default" : False,
"description" : "daemonize process at startup"
},{
"name" : "pid-file",
"default" : "/tmp/%s.pid",
"description" : "daemon pid file"
},{
"name" : "admin-password",
"default" : None,
"valued" : True,
"description" : "Administrator password for write access to admin web interfaces"
},{
"name" : "tls",
"default" : False,
"description" : "Enable TLS of http server",
"checks" : checkers.is_bool()
},{
"name" : "tlscacert",
"default" : None,
"valued" : True,
"description" : "TLS CA-Certificate file"
},{
"name" : "tlscert",
"default" : None,
"valued" : True,
"description" : "TLS Certificate file"
},{
"name" : "tlskey",
"default" : None,
"valued" : True,
"description" : "TLS key file"
}])
def _initialize_server(self):
l_password = config.get("http", "admin-password")
l_socket = config.get("http", "listen")
l_threads = config.get("http", "threads")
l_credentials = None
if l_password:
l_credentials = { "admin" : l_password }
ServerManager.initialize(__name__)
l_tls = config.get("http", "tls")
l_cacert = config.get("http", "tlscacert")
l_cert = config.get("http", "tlscert")
l_key = config.get("http", "tlskey")
ServerManager.listen(l_socket, l_threads, l_tls, l_cacert, l_cert, l_key)
ServerManager.mount(self, "/", {}, __name__)
ServerManager.mount(ConfigPage(), "/admin/config", {}, __name__)
ServerManager.mount(CounterPage(), "/admin/counter", {}, __name__)
l_paramPage = ParamPage(l_credentials)
ServerManager.mount(l_paramPage, "/admin/params", {
"/write" : {
'tools.auth_basic.on': True,
'tools.auth_basic.realm': 'localhost',
'tools.auth_basic.checkpassword': l_paramPage.check_password
}
}, __name__)
l_logPage = LogPage(l_credentials)
ServerManager.mount(l_logPage, "/admin/log", {
"/write" : {
'tools.auth_basic.on': True,
'tools.auth_basic.realm': 'localhost',
'tools.auth_basic.checkpassword': l_logPage.check_password
}
}, __name__)
ServerManager.subscribe("exit", super().stop, 100)
@cherrypy.expose
@cherrypy.tools.json_out()
#pylint: disable=unused-argument
def default(self, *p_args, **p_kwds):
l_reqinfo = {
"method" : cherrypy.request.method,
"path" : cherrypy.request.path_info,
"params" : cherrypy.request.params,
"headers" : cherrypy.request.headers
}
logger.error(self.m_name, "unhandled request : %s", json.dumps(l_reqinfo))
cherrypy.response.status = 500
return {
"error" : "unhandled request",
"request" : l_reqinfo
}
@staticmethod
def _check_config():
l_useTLS = config.get("http", "tls")
if l_useTLS:
l_values = [ "tlscacert", "tlscert", "tlskey" ]
for c_key in l_values:
l_val = config.get("http", c_key)
config.set("http", c_key, checkers.is_file("http", c_key, l_val, p_read=True))
def initialize(self):
super(ServerApplication, self).initialize()
self._check_config()
self._initialize_server()
def start(self):
super(ServerApplication, self).start()
ServerManager.start()
def join(self):
ServerManager.join()
super(ServerApplication, self).join()
def stop(self):
super(ServerApplication, self).stop()
ServerManager.stop()
def process(self):
return 0, False
|
psycofdj/xtd
|
xtd/network/server/application.py
|
Python
|
gpl-3.0
| 5,010
| 0.020958
|
#!/usr/bin/env python
"""
MCNPX Model for Cylindrical RPM8
"""
import sys
sys.path.append('../MCNPTools/')
sys.path.append('../')
from MCNPMaterial import Materials
import subprocess
import math
import mctal
import numpy as np
import itertools
import os
class CylinderRPM(object):
# Material Dictionaries
cellForStr = '{:5d} {:d} -{:4.3f} {:d} -{:d} u={:d}\n'
surfForStr = '{:5d} cz {:5.3f}\n'
tranForStr = '*tr{:d} {:4.3f} {:4.3f} 0.000\n'
geoParam={'RPM8Size':12.7,'DetectorThickness':0.01,'DetectorSpacing':0.8,
'CylinderLightGuideRadius':0.5,'CylinderRadius':2.5}
def __init__(self,inp='INP.mcnp'):
""" Wrapped Cylinder MCNPX Model of RPM8
Keywords:
inp -- desired name of the input deck
"""
# Material dictionary for the moderator, light guide, and detector
self.material = {'Moderator':None,'Detector':None,'LightGuide':None}
self.material['Detector'] = {'name':'Detector','mt': 3, 'rho': 1.1,'matString':None} # detector
self.material['LightGuide'] = {'name': 'PMMA','mt':10, 'rho':0.93} # PMMA
self.material['Moderator'] = {'name':'HDPE','mt':456, 'rho': 0.93} # HPDE
# Cell and Surface Inital Numbering
self.CellStartNum = 600
self.SurfaceStartNum = 600
self.ZeroSurfaceNum = 500
self.UniverseNum = 200
self.surfGeo = None
self.inp = inp
self.name = 'OUT_'+self.inp.strip('.mcnp')+'.'
self.setMaterial(0.1,'PS')
def __str__(self):
s = '\tMCNPX Model of Wrapped Cylinder\n'
s += '\t Cell Number Starts: {0:d}\n'.format(self.CellStartNum)
s += '\t Surface Number Starts: {0:d}\n'.format(self.SurfaceStartNum)
return s
def getInteractionRate(self):
""" Returns the interaction rate """
m = mctal.MCTAL(self.name+'.m')
t = m.tallies[4]
# Returing the total
return t.data[-1],t.errors[-1]
def setMaterial(self,massFraction,polymer):
"""
Sets the detector material
"""
M = Materials()
num = self.material['Detector']['mt']
if polymer == 'PS':
self.material['Detector']['matString'] = M.GetPSLiF(massFraction,num)
elif polymer == 'PEN':
self.material['Detector']['matString'] = M.GetPENLiF(massFraction,num)
else:
raise ValueError('Polymer {} is not in the material database'.format(polymer))
def createSurfaceGeo(self):
"""
Creates a dictionary of surface positions and cylinders
"""
self.surfGeo = dict()
r = self.geoParam['CylinderLightGuideRadius']
self.surfGeo[r] = 'LightGuide'
#self.material = {'Moderator':None,'Detector':None,'LightGuide':None}
while(r + self.geoParam['DetectorThickness'] < self.geoParam['CylinderRadius']):
r += self.geoParam['DetectorThickness']
self.surfGeo[r] = 'Detector'
r += self.geoParam['DetectorSpacing']
if (r < self.geoParam['CylinderRadius']):
self.surfGeo[r] = 'LightGuide'
return self.surfGeo
def calculateDetectorArea(self):
"""
Calculates the area used in a detector
"""
area = 0.0
r = self.geoParam['CylinderLightGuideRadius']
while(r + self.geoParam['DetectorThickness'] < self.geoParam['CylinderRadius']):
area -= math.pow(r,2)
r += self.geoParam['DetectorThickness']
area += math.pow(r,2)
r += self.geoParam['DetectorSpacing']
return math.pi*area
def createDetectorCylinder(self,uNum=1):
"""
Creates a detector cylinder
Returns an ntuple of s,c,detectorCells
s - the surface string
c - the cell string
detectorCells - a list of the numbers corresponding to the detectors cells
"""
cellsCreated = 0
sNum = self.SurfaceStartNum
cNum = self.CellStartNum
detectorCells = list()
s = '{:5d} rcc 0 0 0 0 0 217.7 {}\n'.format(self.SurfaceStartNum,self.geoParam['CylinderRadius'])
c = ''
keyList = sorted(self.surfGeo.keys(), key = lambda x: float(x))
for key in keyList:
sPrev = sNum
sNum += 1
cNum += 1
s += self.surfForStr.format(sNum,key)
m = self.material[self.surfGeo[key]]
if cNum == self.CellStartNum+1:
c+= '{:5d} {:d} -{:4.3f} -{:d} u={:d}\n'.format(cNum,m['mt'],m['rho'],sNum,uNum)
else:
c += self.cellForStr.format(cNum,m['mt'],m['rho'],sPrev,sNum,uNum)
# List of cells for the detector
if self.surfGeo[key] is 'Detector':
detectorCells.append(cNum)
cellsCreated += 1
# Last cell up to universe boundary
m = self.material['Moderator']
c += '{:5d} {:d} -{:4.3f} {:d} u={:d}\n'.format(cNum+1,m['mt'],m['rho'],sNum,uNum)
cellsCreated += 1
return s,c,detectorCells,cellsCreated
def runModel(self):
"""
Runs the Model by submission to Tourqe / Maui
"""
qsub= subprocess.check_output('which qsub',shell=True).strip()
cmd = '#!/bin/bash\n'
cmd += '#PBS -N {0}\n#PBS -V\n#PBS -q gen1\n#PBS -l nodes=1:ppn=1\n'
cmd += 'cd $PBS_O_WORKDIR\nmpirun mcnpx inp={1} name={2}\n'
job = cmd.format('Job_RPMCylinder',self.inp,self.name)
with open('qsub','w') as o:
o.write(job)
subprocess.call(qsub+' qsub',shell=True)
subprocess.call('rm qsub',shell=True)
def createInputDeck(self,cylinderPositions,inp=None,name=None):
""" createInputDeck
Creates an input deck of the given geometry
"""
self.inp = inp
self.name = name
if not inp:
self.inp = 'INP_Cylinder.mcnp'
if not name:
self.name = 'OUT_Cylinder.'
oFile = self.inp
# Problem Constants
cellString = 'c ------------------------- Source ----------------------------------------\n'
cellString += '70 5 -15.1 -70 $ 252Cf source \n'
cellString += '71 406 -11.34 -71 70 $ Lead around source\n'
cellString += '72 456 -0.93 -72 71 $ Poly around source\n'
surfString = 'c ########################### Surface Cards ##############################\n'
surfString += 'c ------------------- Encasing Bounds (Size of RPM8) ---------------------\n'
surfString += '500 rpp 0 12.7 -15.25 15.25 0 217.7 \n'
# Add in other cells here
numCells = 4 # 3 Source, 1 RPM8 Encasing
##################################################################
# Add in Detector Cells and Surfaces #
##################################################################
universeNum = 1
(s,c,detectorCells,cellsCreated) = self.createDetectorCylinder(universeNum)
surfString += s
cellString += 'c ------------------- Detector Cylinder Universe ------------------------\n'
cellString += c
transNum = 1
uCellNum = self.UniverseNum
transString = ''
cellString += 'c ----------------------- Detector Universe ----------------------------\n'
for pos in cylinderPositions:
transString += self.tranForStr.format(transNum,pos[0],pos[1])
cellString += '{:5d} 0 -{:d} trcl={:d} fill={:d}\n'.format(uCellNum,self.SurfaceStartNum,transNum,universeNum)
transNum +=1
uCellNum +=1
# Adding the PMMA Moderator Block
m = self.material['Moderator']
cellString += 'c ------------------------- HDPE Moderator -----------------------------\n'
cellString += '{:5d} {:d} -{:4.3f} -{:d} '.format(500,m['mt'],m['rho'],self.ZeroSurfaceNum)
cellString += ''.join('#{:d} '.format(i) for i in range(self.UniverseNum,uCellNum))
cellString += '\n'
# Getting total number of cells
numCells += cellsCreated + uCellNum-self.UniverseNum +1
##################################################################
# Write the Tallies #
##################################################################
univCells = range(self.UniverseNum,uCellNum)
tallyString = 'c ------------------------- Tallies Yo! -----------------------------------\n'
tallies = {'F54:n':{'cells':detectorCells,'comments':'FC54 6Li Reaction Rates\n',
'options':' T\nSD54 1 {0:d}R\nFM54 -1 3 105'}}
for t in tallies:
# Getting a list of cells
tallyString += tallies[t]['comments']
tallyString += str(t)+' '
j = 0
for u in univCells:
cell = list('('+str(c)+'<'+str(u)+') ' for c in tallies[t]['cells'])
cell = [cell[i:i+6] for i in range(0,len(cell),6)]
if j > 0:
tallyString += ' '+''.join(''.join(i)+'\n' for i in cell)
else:
tallyString += ' '.join(''.join(i)+'\n' for i in cell)
j +=1
tallyString = tallyString.rstrip()
tallyString += tallies[t]['options'].format(len(univCells)*len(tallies[t]['cells']))
tallyString+='\n'
# Finish up the problem data
cellString += 'c ---------------------- Detector Encasing ------------------------------\n'
cellString += '700 488 -7.92 701 -700 $ SS-316 Encasing \n'
cellString += 'c -------------------------- Outside World -------------------------------\n'
cellString += '1000 204 -0.001225 -1000 700 #70 #71 #72 $ Atmosphere \n'
cellString += '1001 0 1000 \n'
surfString += 'c ------------------------ Encasing Material -----------------------------\n'
surfString += '700 rpp -0.3175 13.018 -15.5675 15.5675 -0.3175 218.018 \n'
surfString += '701 rpp 0.0 12.7 -15.25 15.25 0.0 217.7 \n'
surfString += 'c -------------- Source --------------------------------------------------\n'
surfString += '70 s -200 0 108.85 2.510E-04 $ Source \n'
surfString += '71 s -200 0 108.85 5.0025E-01 $ 0.5 cm lead surrounding source \n'
surfString += '72 s -200 0 108.85 3.00025 $ 2.5 cm poly surrounding source \n'
surfString += 'c -------------- Outside World -------------------------------------------\n'
surfString += '1000 so 250 \n'
matString = 'c -------------------------- Material Cards -----------------------------\n'
matString += self.material['Detector']['matString']
matString += self.getMaterialString()
with open(oFile,'w') as o:
o.write('MCNPX Simulation of RPM8 Cylinder\n')
o.write(cellString)
o.write('\n')
o.write(surfString)
o.write('\n')
o.write(self.getRunString().format(numCells))
o.write(self.getSrcString())
o.write(tallyString)
o.write(matString)
o.write(transString)
o.write('\n')
def getRunString(self):
runString ='c ------------------------------ Run Info ---------------------------------\n'
runString +='nps 1E6 \n'
runString +='IMP:N 1 {0:d}R 0 $ Particle Importances within cells \n'
runString +='c -------------- Output --------------------------------------------------\n'
runString +='PRDMP j j 1 $ Write a MCTAL File \n'
runString +='PRINT 40 \n'
runString +='c ------------------------------ Physics ---------------------------------\n'
runString +='MODE N \n'
runString +='PHYS:N 100 4j -1 2 \n'
runString +='CUT:N 2j 0 0 \n'
return runString
def getSrcString(self):
"""
Returns the MCNPX formated source string
"""
srcString = 'c -------------------------- Source Defination ----------------------------\n'
srcString += 'c 1 nanogram Cf-252 source = 1E-9 grams = 6.623E-11 cc \n'
srcString += 'sdef pos=-200 0 108.85 cel=70 par=SF rad=d1 \n'
srcString += 'si1 0 2.510E-04 \n'
srcString += 'sp1 -21 1 \n'
return srcString
def getMaterialString(self):
"""
Returns the MCNXP material string
"""
matString = 'm10 1001.70c -0.080538 $Lucite (PMMA / Plexiglass) rho = 1.19 g/cc\n'
matString += ' 6012.70c -0.599848 8016.70c -0.319614 \n'
matString += 'm204 7014.70c -0.755636 $air (US S. Atm at sea level) rho = 0.001225 \n'
matString += ' 8016.70c -0.231475 18036.70c -3.9e-005 18038.70c -8e-006\n'
matString += ' 18040.70c -0.012842 \n'
matString += 'm5 98252.66c 1 $ Cf-252, rho =15.1 g/cc wiki \n'
matString += 'm406 82204.70c -0.013781 $Lead, \n'
matString += ' 82206.70c -0.239557 82207.70c -0.220743 82208.70c -0.525919\n'
matString += 'm456 1001.70c -0.143716 $Polyethylene - rho = 0.93 g/cc \n'
matString += ' 6000.70c -0.856284 \n'
matString += 'm488 14028.70c -0.009187 $Steel, Stainless 316 rho = 7.92 \n'
matString += ' 14029.70c -0.000482 14030.70c -0.000331 24050.70c -0.007095\n'
matString += ' 24052.70c -0.142291 24053.70c -0.016443 24054.70c -0.004171\n'
matString += ' 25055.70c -0.02 26054.70c -0.037326 26056.70c -0.601748\n'
matString += ' 26057.70c -0.014024 26058.70c -0.001903 28058.70c -0.080873\n'
matString += ' 28060.70c -0.031984 28061.70c -0.001408 28062.70c -0.004546\n'
matString += ' 28064.70c -0.001189 42092.70c -0.003554 42094.70c -0.002264\n'
matString += ' 42095.70c -0.003937 42096.70c -0.004169 42097.70c -0.002412\n'
matString += ' 42098.70c -0.006157 42100.70c -0.002507 \n'
matString += 'mt3 poly.01t \n'
matString += 'mt456 poly.01t \n'
matString += 'mt10 poly.01t \n'
return matString
def run(loading,polymers):
"""
Runs a matrix of loading and polymers
"""
cylinderPositions = ((4.23,10.16),(4.23,-10.16))
cylinderPositions = ((4.23,7.625),(4.23,0),(4.23,-7.625))
cylinderPositions = ((4.23,9.15),(4.23,3.05),(4.23,-3.05),(4.23,-9.15))
cylinderPositions = ((4.23,10.16),(4.23,5.08),(4.23,0.0),(4.23,-5.08),(4.23,-10.16))
for l in loading:
for p in polymers:
RunCylinder(l,p,cylinderPositions)
def RunCylinder(l,p,cylinderPositions):
"""
Runs an mcnpx model of the cylinder of loading l, polymer p, with
cylinder positions cylinderPositions.
Keywords:
l - loading of the films
p - polymer
cylinderPositions - the cylinder positons
"""
# Creating input and output deck names
posString = ''
for pos in cylinderPositions:
posString += '{:2.1f}-'.format(pos[0])
posString = posString.rstrip('-')
inp='Cyl_{}LiF_{}_{}.mcnp'.format(int(l*100),p,posString)
name='OUTCyl_{}LiF_{}_{}.'.format(int(l*100),p,posString)
print inp
# Creating and running the model
m = CylinderRPM()
m.createSurfaceGeo()
m.setMaterial(l,p)
m.createDetectorCylinder()
m.createInputDeck(cylinderPositions,inp,name)
m.runModel()
def CreatePositions(yPos,numXPertubations):
"""
Creates and returns an array of positions, using a set array of y
positions, with equally spaced number of numXPertubations.
Keywords:
yPos - the number of y positions (or spacing of the cylinders). The
number of elements in this array corresponds to the number of
cylinders that are simulated.
numXPertubations - the number of pertubations in x. The arrays
positions returned are spaced linerly in the x from 2.54 to
10.16 cm
"""
pos = list()
xVals = np.linspace(2.54,10,numXPertubations)
xPos = [i for i in itertools.product(xVals,repeat=len(yPos))]
for x in xPos:
pos.append(zip(x,yPos))
return pos
def PositionOptimization(loading,polymers,positions):
"""
Runs a matrix of loading, polymers and positions
"""
for l in loading:
for p in polymers:
for pos in positions:
RunCylinder(l,p,pos)
def createInputPlotDecks():
positions = list()
positions.append(((4.23,10.16),(4.23,-10.16)))
positions.append(((4.23,7.625),(4.23,0),(4.23,-7.625)))
#positions.append(((4.23,9.15),(4.23,3.05),(4.23,-3.05),(4.23,-9.15)))
for pos in positions:
m = CylinderRPM()
m.createSurfaceGeo()
m.createDetectorCylinder()
inp='Cylinder_{}.mcnp'.format(len(pos))
name='OUTCylinder_{}.'.format(len(pos))
m.createInputDeck(pos,inp,name)
def computeMassLi(polymer,loading,density=1.1):
"""
Computes the mass of Li for a given polymer and loading
"""
M = Materials()
m = CylinderRPM()
area = m.calculateDetectorArea()
massLi = area*217.0*M.GetLiMassFraction(loading,polymer)*density
return massLi
def extractRunInfo(filename):
"""
Extracts the loading and polymer from the file name
"""
tokens = filename.split('_')
loading = tokens[1].strip('LiF')
polymer = tokens[2].strip('.m')
return (float(loading)/100, polymer)
###########################################################################
# #
# Summerizes / Analysis #
# #
###########################################################################
def GetInteractionRate(f,tallyNum=54,src=2.3E3):
"""
Returns the interaction rate of the mctal file
"""
m = mctal.MCTAL(f)
t = m.tallies[tallyNum]
return (t.data[-1]*src,t.errors[-1]*t.data[-1]*src)
import glob
def summerize():
files = glob.glob('OUTCylinder*.m')
s = 'Polymer, loading, mass Li, count rate, error, count rate per mass\n'
for f in files:
runParam = extractRunInfo(f)
massLi = computeMassLi(runParam[1],runParam[0])
countRate = GetInteractionRate(f)
s += '{}, {:5.2f} , {:5.3f} , {:5.3f} , {:4.2f} , {:5.3f}\n'.format(runParam[1].ljust(7),runParam[0],massLi,countRate[0],countRate[1],countRate[0]/massLi)
print s
def OptimizationSummary(path):
"""
Summerizes the Optimization Output
"""
# Getting the files
if not os.path.isdir(path):
raise IOError('Path {} is not found'.format(path))
files = glob.glob(path+'/*.m')
if not files:
print 'No files matched the pattern'
return
# Parsing the files
data = dict()
for f in files:
name = os.path.splitext(os.path.split(f)[1])[0]
data[name] = GetInteractionRate(f)
# Max value
sortedKeys = sorted(data, key=data.get,reverse=True)
#sortedKeys = sorted(data.items(), key=lambda x : float(x[1][0]),reverse=True)
for key in sortedKeys[0:9]:
print '{} -> {:5.2f} +/- {:5.2f}'.format(key,data[key][0],data[key][1])
for key in sortedKeys[-6:-1]:
print '{} -> {:5.2f} +/- {:5.2f}'.format(key,data[key][0],data[key][1])
def cleanup(path):
files = glob.glob(path+'/OUTCyl_*.m')
for f in files:
head,tail = os.path.split(f)
numCylinders = tail.count('-')+1
if numCylinders == 3:
newdir = 'ThreeCylPosOpt'
elif numCylinders == 4:
newdir = 'FourCylPosOpt'
elif numCylinders == 5:
newdir = 'FiveCylPosOpt'
os.rename(f,os.path.join(newdir,tail))
###########################################################################
# #
# MAIN #
# #
###########################################################################
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-r','--run',action="store_true",
default=False,help='Runs the cylinders for multiple polymers and precent loadings')
parser.add_argument('-p','--plot',action="store_true",
default=False,help='Creates input decks for plotting')
parser.add_argument('-c','--clean',action="store_true",
default=False,help='Cleans up the files')
parser.add_argument('-a','--analysis',action="store_true",default=False,help="Analyze the results")
parser.add_argument('path', nargs='?', default='CylPosOpt',help='Specifiy the output directory to summerize')
parser.add_argument('-o','--optimize',action='store',type=int,default=-1,help='Run a number of optimizations on the positions. If 0 is entered a summary is preformed on the directory provided with path')
parser.add_argument('loading',metavar='loading',type=float,nargs='*',action="store",default=(0.1,0.2,0.3),help='Precent Loading of LiF')
args = parser.parse_args()
if args.run:
run(args.loading,('PS','PEN'))
if args.plot:
createInputPlotDecks()
if args.optimize > 0:
yPos = (7.625,0,-7.625)
yPos = (9.15,3.05,-3.05,-9.15)
#yPos = (10.16,5.08,0.0,-5.08,-10.16)
pos = CreatePositions(yPos,args.optimize)
loading = (0.3,)
polymers = ('PS',)
PositionOptimization(loading,polymers,pos)
if args.optimize == 0:
OptimizationSummary(args.path)
if args.analysis:
summerize()
if args.clean:
cleanup(os.getcwd())
|
murffer/DetectorSim
|
MCNPXRPMModels/WrappedCylinders/CylinderMCNPX.py
|
Python
|
apache-2.0
| 23,537
| 0.016145
|
import errors
def validate_num_arguments_eq(num_args):
"""Validate that the number of supplied args is equal to some number"""
def decorator(func):
def wrapped_func(*args, **kwargs):
if len(args[1]) != num_args:
raise errors.InvalidArgumentError
else:
func(*args, **kwargs)
return wrapped_func
return decorator
def validate_num_arguments_lt(num_args):
"""Validate that the number of supplied args is less than to some number"""
def decorator(func):
def wrapped_func(*args, **kwargs):
if len(args[1]) > num_args:
raise errors.InvalidArgumentError
else:
func(*args, **kwargs)
return wrapped_func
return decorator
def validate_num_arguments_gt(num_args):
"""Validate that the number of supplied args is greater than to some number"""
def decorator(func):
def wrapped_func(*args, **kwargs):
if len(args[1]) < num_args:
raise errors.InvalidArgumentError
else:
func(*args, **kwargs)
return wrapped_func
return decorator
def parse_index(lst, id):
"""Validate an index to the list is within range and a digit and return it"""
if not id.isdigit():
raise errors.ExpectedItemError
idx = int(id) - 1
if idx > len(lst) - 1 or idx < 0:
raise errors.InvalidItemError
return idx
|
dansackett/Todooo
|
todooo/validators.py
|
Python
|
mit
| 1,462
| 0.001368
|
# ChangeFile
# A class which represents a Debian change file.
# Copyright 2002 Colin Walters <walters@gnu.org>
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
import os, re, sys, string, stat
import threading, Queue
import logging
from minidinstall import DpkgControl, SignedFile
from minidinstall import misc
class ChangeFileException(Exception):
def __init__(self, value):
self._value = value
def __str__(self):
return `self._value`
class ChangeFile(DpkgControl.DpkgParagraph):
md5_re = r'^(?P<md5>[0-9a-f]{32})[ \t]+(?P<size>\d+)[ \t]+(?P<section>[-/a-zA-Z0-9]+)[ \t]+(?P<priority>[-a-zA-Z0-9]+)[ \t]+(?P<file>[0-9a-zA-Z][-+:.,=~0-9a-zA-Z_]+)$'
sha1_re = r'^(?P<sha1>[0-9a-f]{40})[ \t]+(?P<size>\d+)[ \t]+(?P<file>[0-9a-zA-Z][-+:.,=~0-9a-zA-Z_]+)$'
sha256_re = r'^(?P<sha256>[0-9a-f]{64})[ \t]+(?P<size>\d+)[ \t]+(?P<file>[0-9a-zA-Z][-+:.,=~0-9a-zA-Z_]+)$'
def __init__(self):
DpkgControl.DpkgParagraph.__init__(self)
self._logger = logging.getLogger("mini-dinstall")
self._file = ''
def load_from_file(self, filename):
self._file = filename
f = SignedFile.SignedFile(open(self._file))
self.load(f)
f.close()
def getFiles(self):
return self._get_checksum_from_changes()['md5']
def _get_checksum_from_changes(self):
""" extract checksums and size from changes file """
output = {}
hashes = { 'md5': ['files', re.compile(self.md5_re)],
'sha1': ['checksums-sha1', re.compile(self.sha1_re)],
'sha256': ['checksums-sha256', re.compile(self.sha256_re)]
}
hashes_checked = hashes.copy()
try:
self['files']
except KeyError:
return []
for hash in hashes:
try:
self[hashes[hash][0]]
except KeyError:
self._logger.warn("Can't find %s checksum in changes file '%s'" % (hash, os.path.basename(self._file)))
hashes_checked.pop(hash)
for hash in hashes_checked:
output[hash] = []
for line in self[hashes[hash][0]]:
if line == '':
continue
match = hashes[hash][1].match(line)
if (match is None):
raise ChangeFileException("Couldn't parse file entry \"%s\" in Files field of .changes" % (line,))
output[hash].append([match.group(hash), match.group('size'), match.group('file') ])
return output
def verify(self, sourcedir):
""" verify size and hash values from changes file """
checksum = self._get_checksum_from_changes()
for hash in checksum.keys():
for (hashsum, size, filename) in checksum[hash]:
self._verify_file_integrity(os.path.join(sourcedir, filename), int(size), hash, hashsum)
def _verify_file_integrity(self, filename, expected_size, hash, expected_hashsum):
""" check uploaded file integrity """
self._logger.debug('Checking integrity of %s' % (filename,))
try:
statbuf = os.stat(filename)
if not stat.S_ISREG(statbuf[stat.ST_MODE]):
raise ChangeFileException("%s is not a regular file" % (filename,))
size = statbuf[stat.ST_SIZE]
except OSError, e:
raise ChangeFileException("Can't stat %s: %s" % (filename,e.strerror))
if size != expected_size:
raise ChangeFileException("File size for %s does not match that specified in .dsc" % (filename,))
if (misc.get_file_sum(self, hash, filename) != expected_hashsum):
raise ChangeFileException("%ssum for %s does not match that specified in .dsc" % (hash, filename,))
self._logger.debug('Verified %ssum %s and size %s for %s' % (hash, expected_hashsum, expected_size, filename))
# vim:ts=4:sw=4:et:
|
xypron/mini-dinstall
|
minidinstall/ChangeFile.py
|
Python
|
gpl-2.0
| 4,579
| 0.005241
|
##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2019 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
from django.contrib import admin
from reference.models import *
admin.site.register(continent.Continent,
continent.ContinentAdmin)
admin.site.register(currency.Currency,
currency.CurrencyAdmin)
admin.site.register(country.Country,
country.CountryAdmin)
admin.site.register(decree.Decree,
decree.DecreeAdmin)
admin.site.register(domain.Domain,
domain.DomainAdmin)
admin.site.register(domain_isced.DomainIsced,
domain_isced.DomainIscedAdmin)
admin.site.register(language.Language,
language.LanguageAdmin)
admin.site.register(zipcode.ZipCode,
zipcode.ZipCodeAdmin)
admin.site.register(high_school.HighSchool,
high_school.HighSchoolAdmin)
|
uclouvain/osis
|
reference/admin.py
|
Python
|
agpl-3.0
| 2,104
| 0.000476
|
from __future__ import unicode_literals
import re
import six
# TODO add tests for all of these
EQ_FUNCTION = lambda item_value, test_value: item_value == test_value # flake8: noqa
NE_FUNCTION = lambda item_value, test_value: item_value != test_value # flake8: noqa
LE_FUNCTION = lambda item_value, test_value: item_value <= test_value # flake8: noqa
LT_FUNCTION = lambda item_value, test_value: item_value < test_value # flake8: noqa
GE_FUNCTION = lambda item_value, test_value: item_value >= test_value # flake8: noqa
GT_FUNCTION = lambda item_value, test_value: item_value > test_value # flake8: noqa
COMPARISON_FUNCS = {
'EQ': EQ_FUNCTION,
'=': EQ_FUNCTION,
'NE': NE_FUNCTION,
'!=': NE_FUNCTION,
'LE': LE_FUNCTION,
'<=': LE_FUNCTION,
'LT': LT_FUNCTION,
'<': LT_FUNCTION,
'GE': GE_FUNCTION,
'>=': GE_FUNCTION,
'GT': GT_FUNCTION,
'>': GT_FUNCTION,
'NULL': lambda item_value: item_value is None,
'NOT_NULL': lambda item_value: item_value is not None,
'CONTAINS': lambda item_value, test_value: test_value in item_value,
'NOT_CONTAINS': lambda item_value, test_value: test_value not in item_value,
'BEGINS_WITH': lambda item_value, test_value: item_value.startswith(test_value),
'IN': lambda item_value, *test_values: item_value in test_values,
'BETWEEN': lambda item_value, lower_test_value, upper_test_value: lower_test_value <= item_value <= upper_test_value,
}
def get_comparison_func(range_comparison):
return COMPARISON_FUNCS.get(range_comparison)
class RecursionStopIteration(StopIteration):
pass
def get_filter_expression(expr, names, values):
# Examples
# expr = 'Id > 5 AND attribute_exists(test) AND Id BETWEEN 5 AND 6 OR length < 6 AND contains(test, 1) AND 5 IN (4,5, 6) OR (Id < 5 AND 5 > Id)'
# expr = 'Id > 5 AND Subs < 7'
if names is None:
names = {}
if values is None:
values = {}
# Do substitutions
for key, value in names.items():
expr = expr.replace(key, value)
# Store correct types of values for use later
values_map = {}
for key, value in values.items():
if 'N' in value:
values_map[key] = float(value['N'])
elif 'BOOL' in value:
values_map[key] = value['BOOL']
elif 'S' in value:
values_map[key] = value['S']
elif 'NS' in value:
values_map[key] = tuple(value['NS'])
elif 'SS' in value:
values_map[key] = tuple(value['SS'])
elif 'L' in value:
values_map[key] = tuple(value['L'])
else:
raise NotImplementedError()
# Remove all spaces, tbf we could just skip them in the next step.
# The number of known options is really small so we can do a fair bit of cheating
expr = list(expr.strip())
# DodgyTokenisation stage 1
def is_value(val):
return val not in ('<', '>', '=', '(', ')')
def contains_keyword(val):
for kw in ('BETWEEN', 'IN', 'AND', 'OR', 'NOT'):
if kw in val:
return kw
return None
def is_function(val):
return val in ('attribute_exists', 'attribute_not_exists', 'attribute_type', 'begins_with', 'contains', 'size')
# Does the main part of splitting between sections of characters
tokens = []
stack = ''
while len(expr) > 0:
current_char = expr.pop(0)
if current_char == ' ':
if len(stack) > 0:
tokens.append(stack)
stack = ''
elif current_char == ',': # Split params ,
if len(stack) > 0:
tokens.append(stack)
stack = ''
elif is_value(current_char):
stack += current_char
kw = contains_keyword(stack)
if kw is not None:
# We have a kw in the stack, could be AND or something like 5AND
tmp = stack.replace(kw, '')
if len(tmp) > 0:
tokens.append(tmp)
tokens.append(kw)
stack = ''
else:
if len(stack) > 0:
tokens.append(stack)
tokens.append(current_char)
stack = ''
if len(stack) > 0:
tokens.append(stack)
def is_op(val):
return val in ('<', '>', '=', '>=', '<=', '<>', 'BETWEEN', 'IN', 'AND', 'OR', 'NOT')
# DodgyTokenisation stage 2, it groups together some elements to make RPN'ing it later easier.
def handle_token(token, tokens2, token_iterator):
# ok so this essentially groups up some tokens to make later parsing easier,
# when it encounters brackets it will recurse and then unrecurse when RecursionStopIteration is raised.
if token == ')':
raise RecursionStopIteration() # Should be recursive so this should work
elif token == '(':
temp_list = []
try:
while True:
next_token = six.next(token_iterator)
handle_token(next_token, temp_list, token_iterator)
except RecursionStopIteration:
pass # Continue
except StopIteration:
ValueError('Malformed filter expression, type1')
# Sigh, we only want to group a tuple if it doesnt contain operators
if any([is_op(item) for item in temp_list]):
# Its an expression
tokens2.append('(')
tokens2.extend(temp_list)
tokens2.append(')')
else:
tokens2.append(tuple(temp_list))
elif token == 'BETWEEN':
field = tokens2.pop()
# if values map contains a number, it would be a float
# so we need to int() it anyway
op1 = six.next(token_iterator)
op1 = int(values_map.get(op1, op1))
and_op = six.next(token_iterator)
assert and_op == 'AND'
op2 = six.next(token_iterator)
op2 = int(values_map.get(op2, op2))
tokens2.append(['between', field, op1, op2])
elif is_function(token):
function_list = [token]
lbracket = six.next(token_iterator)
assert lbracket == '('
next_token = six.next(token_iterator)
while next_token != ')':
function_list.append(next_token)
next_token = six.next(token_iterator)
tokens2.append(function_list)
else:
# Convert tokens back to real types
if token in values_map:
token = values_map[token]
# Need to join >= <= <>
if len(tokens2) > 0 and ((tokens2[-1] == '>' and token == '=') or (tokens2[-1] == '<' and token == '=') or (tokens2[-1] == '<' and token == '>')):
tokens2.append(tokens2.pop() + token)
else:
tokens2.append(token)
tokens2 = []
token_iterator = iter(tokens)
for token in token_iterator:
handle_token(token, tokens2, token_iterator)
# Start of the Shunting-Yard algorithm. <-- Proper beast algorithm!
def is_number(val):
return val not in ('<', '>', '=', '>=', '<=', '<>', 'BETWEEN', 'IN', 'AND', 'OR', 'NOT')
OPS = {'<': 5, '>': 5, '=': 5, '>=': 5, '<=': 5, '<>': 5, 'IN': 8, 'AND': 11, 'OR': 12, 'NOT': 10, 'BETWEEN': 9, '(': 100, ')': 100}
def shunting_yard(token_list):
output = []
op_stack = []
# Basically takes in an infix notation calculation, converts it to a reverse polish notation where there is no
# ambiguity on which order operators are applied.
while len(token_list) > 0:
token = token_list.pop(0)
if token == '(':
op_stack.append(token)
elif token == ')':
while len(op_stack) > 0 and op_stack[-1] != '(':
output.append(op_stack.pop())
lbracket = op_stack.pop()
assert lbracket == '('
elif is_number(token):
output.append(token)
else:
# Must be operator kw
# Cheat, NOT is our only RIGHT associative operator, should really have dict of operator associativity
while len(op_stack) > 0 and OPS[op_stack[-1]] <= OPS[token] and op_stack[-1] != 'NOT':
output.append(op_stack.pop())
op_stack.append(token)
while len(op_stack) > 0:
output.append(op_stack.pop())
return output
output = shunting_yard(tokens2)
# Hacky function to convert dynamo functions (which are represented as lists) to their Class equivalent
def to_func(val):
if isinstance(val, list):
func_name = val.pop(0)
# Expand rest of the list to arguments
val = FUNC_CLASS[func_name](*val)
return val
# Simple reverse polish notation execution. Builts up a nested filter object.
# The filter object then takes a dynamo item and returns true/false
stack = []
for token in output:
if is_op(token):
op_cls = OP_CLASS[token]
if token == 'NOT':
op1 = stack.pop()
op2 = True
else:
op2 = stack.pop()
op1 = stack.pop()
stack.append(op_cls(op1, op2))
else:
stack.append(to_func(token))
result = stack.pop(0)
if len(stack) > 0:
raise ValueError('Malformed filter expression, type2')
return result
class Op(object):
"""
Base class for a FilterExpression operator
"""
OP = ''
def __init__(self, lhs, rhs):
self.lhs = lhs
self.rhs = rhs
def _lhs(self, item):
"""
:type item: moto.dynamodb2.models.Item
"""
lhs = self.lhs
if isinstance(self.lhs, (Op, Func)):
lhs = self.lhs.expr(item)
elif isinstance(self.lhs, six.string_types):
try:
lhs = item.attrs[self.lhs].cast_value
except Exception:
pass
return lhs
def _rhs(self, item):
rhs = self.rhs
if isinstance(self.rhs, (Op, Func)):
rhs = self.rhs.expr(item)
elif isinstance(self.rhs, six.string_types):
try:
rhs = item.attrs[self.rhs].cast_value
except Exception:
pass
return rhs
def expr(self, item):
return True
def __repr__(self):
return '({0} {1} {2})'.format(self.lhs, self.OP, self.rhs)
class Func(object):
"""
Base class for a FilterExpression function
"""
FUNC = 'Unknown'
def expr(self, item):
return True
def __repr__(self):
return 'Func(...)'.format(self.FUNC)
class OpNot(Op):
OP = 'NOT'
def expr(self, item):
lhs = self._lhs(item)
return not lhs
def __str__(self):
return '({0} {1})'.format(self.OP, self.lhs)
class OpAnd(Op):
OP = 'AND'
def expr(self, item):
lhs = self._lhs(item)
rhs = self._rhs(item)
return lhs and rhs
class OpLessThan(Op):
OP = '<'
def expr(self, item):
lhs = self._lhs(item)
rhs = self._rhs(item)
return lhs < rhs
class OpGreaterThan(Op):
OP = '>'
def expr(self, item):
lhs = self._lhs(item)
rhs = self._rhs(item)
return lhs > rhs
class OpEqual(Op):
OP = '='
def expr(self, item):
lhs = self._lhs(item)
rhs = self._rhs(item)
return lhs == rhs
class OpNotEqual(Op):
OP = '<>'
def expr(self, item):
lhs = self._lhs(item)
rhs = self._rhs(item)
return lhs == rhs
class OpLessThanOrEqual(Op):
OP = '<='
def expr(self, item):
lhs = self._lhs(item)
rhs = self._rhs(item)
return lhs <= rhs
class OpGreaterThanOrEqual(Op):
OP = '>='
def expr(self, item):
lhs = self._lhs(item)
rhs = self._rhs(item)
return lhs >= rhs
class OpOr(Op):
OP = 'OR'
def expr(self, item):
lhs = self._lhs(item)
rhs = self._rhs(item)
return lhs or rhs
class OpIn(Op):
OP = 'IN'
def expr(self, item):
lhs = self._lhs(item)
rhs = self._rhs(item)
return lhs in rhs
class FuncAttrExists(Func):
FUNC = 'attribute_exists'
def __init__(self, attribute):
self.attr = attribute
def expr(self, item):
return self.attr in item.attrs
class FuncAttrNotExists(Func):
FUNC = 'attribute_not_exists'
def __init__(self, attribute):
self.attr = attribute
def expr(self, item):
return self.attr not in item.attrs
class FuncAttrType(Func):
FUNC = 'attribute_type'
def __init__(self, attribute, _type):
self.attr = attribute
self.type = _type
def expr(self, item):
return self.attr in item.attrs and item.attrs[self.attr].type == self.type
class FuncBeginsWith(Func):
FUNC = 'begins_with'
def __init__(self, attribute, substr):
self.attr = attribute
self.substr = substr
def expr(self, item):
return self.attr in item.attrs and item.attrs[self.attr].type == 'S' and item.attrs[self.attr].value.startswith(self.substr)
class FuncContains(Func):
FUNC = 'contains'
def __init__(self, attribute, operand):
self.attr = attribute
self.operand = operand
def expr(self, item):
if self.attr not in item.attrs:
return False
if item.attrs[self.attr].type in ('S', 'SS', 'NS', 'BS', 'L', 'M'):
return self.operand in item.attrs[self.attr].value
return False
class FuncSize(Func):
FUNC = 'contains'
def __init__(self, attribute):
self.attr = attribute
def expr(self, item):
if self.attr not in item.attrs:
raise ValueError('Invalid attribute name {0}'.format(self.attr))
if item.attrs[self.attr].type in ('S', 'SS', 'NS', 'B', 'BS', 'L', 'M'):
return len(item.attrs[self.attr].value)
raise ValueError('Invalid filter expression')
class FuncBetween(Func):
FUNC = 'between'
def __init__(self, attribute, start, end):
self.attr = attribute
self.start = start
self.end = end
def expr(self, item):
if self.attr not in item.attrs:
raise ValueError('Invalid attribute name {0}'.format(self.attr))
return self.start <= item.attrs[self.attr].cast_value <= self.end
OP_CLASS = {
'NOT': OpNot,
'AND': OpAnd,
'OR': OpOr,
'IN': OpIn,
'<': OpLessThan,
'>': OpGreaterThan,
'<=': OpLessThanOrEqual,
'>=': OpGreaterThanOrEqual,
'=': OpEqual,
'<>': OpNotEqual
}
FUNC_CLASS = {
'attribute_exists': FuncAttrExists,
'attribute_not_exists': FuncAttrNotExists,
'attribute_type': FuncAttrType,
'begins_with': FuncBeginsWith,
'contains': FuncContains,
'size': FuncSize,
'between': FuncBetween
}
|
Affirm/moto
|
moto/dynamodb2/comparisons.py
|
Python
|
apache-2.0
| 15,123
| 0.00238
|
import cPickle
import os.path as osp
from smqtk.representation.code_index import CodeIndex
from smqtk.utils import SimpleTimer
__author__ = "paul.tunison@kitware.com"
class MemoryCodeIndex (CodeIndex):
"""
Local RAM memory based index with an optional file cache
"""
@classmethod
def is_usable(cls):
"""
No outside dependencies.
:rtype: bool
"""
return True
def __init__(self, file_cache=None):
"""
Initialize a new in-memory code index, or reload one from a cache.
:param file_cache: Optional path to a file path, loading an existing
index if the file already exists. Either way, providing a path to
this enabled file caching when descriptors are added to this index.
This cache file is a pickle serialization.
:type file_cache: None | str
"""
super(MemoryCodeIndex, self).__init__()
self._num_descr = 0
self._file_cache = file_cache
# Mapping of code to a dictionary mapping descrUUID->Descriptor
#: :type: dict[collections.Hashable, dict[collections.Hashable, smqtk.representation.DescriptorElement]]
self._table = {}
if file_cache and osp.isfile(file_cache):
with open(file_cache) as f:
self._log.debug("Loading cached code index table from file: %s",
file_cache)
#: :type: dict[collections.Hashable, dict[collections.Hashable, smqtk.representation.DescriptorElement]]
self._table = cPickle.load(f)
self._log.debug("Counting indexed descriptors")
# Find the number of descriptors in the table
self._num_descr = sum(len(d) for d in self._table.itervalues())
self._log.debug("Done loading cached table")
def cache_table(self):
if self._file_cache:
with SimpleTimer("Caching memory table", self._log.debug):
with open(self._file_cache, 'wb') as f:
cPickle.dump(self._table, f)
def get_config(self):
return {
"file_cache": self._file_cache
}
def count(self):
"""
:return: Number of descriptor elements stored in this index. This is not
necessarily the number of codes stored in the index.
:rtype: int
"""
return self._num_descr
def clear(self):
"""
Clear this code index's table entries.
"""
self._table = {}
self.cache_table()
def codes(self):
"""
:return: Set of code integers currently used in this code index.
:rtype: set[int]
"""
return set(self._table)
def iter_codes(self):
"""
Iterate over code contained in this index in an arbitrary order.
:return: Generator that yields integer code keys
:rtype: collections.Iterator[int|long]
"""
for k in self._table:
yield k
def add_descriptor(self, code, descriptor, no_cache=False):
"""
Add a descriptor to this index given a matching small-code
:param code: bit-hash of the given descriptor in integer form
:type code: int
:param descriptor: Descriptor to index
:type descriptor: smqtk.representation.DescriptorElement
:param no_cache: Do not cache the internal table if a file cache was
provided. This option should not be modified from its default by
normal use. Used internally.
:type no_cache: bool
"""
code_map = self._table.setdefault(code, {})
if descriptor.uuid() not in code_map:
self._num_descr += 1
code_map[descriptor.uuid()] = descriptor
if not no_cache:
self.cache_table()
def add_many_descriptors(self, code_descriptor_pairs):
"""
Add multiple code/descriptor pairs.
:param code_descriptor_pairs: Iterable of integer code and paired
descriptor tuples to add to this index.
:type code_descriptor_pairs:
collections.Iterable[(int, smqtk.representation.DescriptorElement)]
"""
for c, d in code_descriptor_pairs:
self.add_descriptor(c, d, True)
self.cache_table()
def get_descriptors(self, code_or_codes):
"""
Get iterable of descriptors associated to this code or iterable of
codes. This may return an empty iterable.
:param code_or_codes: An integer or iterable of integer bit-codes.
:type code_or_codes: collections.Iterable[int] | int
:return: Iterable of descriptors
:rtype: collections.Iterable[smqtk.representation.DescriptorElement]
"""
if hasattr(code_or_codes, '__iter__'):
# noinspection PyTypeChecker
# -> I literally just checked for __iter__
for c in code_or_codes:
for v in self._table.get(c, {}).values():
yield v
else: # assuming int
for v in self._table.get(code_or_codes, {}).itervalues():
yield v
CODE_INDEX_CLASS = MemoryCodeIndex
|
kfieldho/SMQTK
|
python/smqtk/representation/code_index/memory.py
|
Python
|
bsd-3-clause
| 5,235
| 0.000764
|
from issues.models import ReportedLink, ReportedUser
from issues.serializers import ReportedLinkSerializer, ReportedUserSerializer
class ReportedLinkAPI(object):
serializer_class = ReportedLinkSerializer
def get_queryset(self):
return ReportedLink.objects.all()
class ReportedLinkSelfAPI(object):
def get_queryset(self):
return ReportedLink.objects.filter(reporter=self.request.user)
def pre_save(self, obj):
obj.reporter = self.request.user
class ReportedUserAPI(object):
serializer_class = ReportedUserSerializer
def get_queryset(self):
return ReportedUser.objects.all()
class ReportedUserSelfAPI(object):
def get_queryset(self):
return ReportedUser.objects.filter(reporter=self.request.user)
def pre_save(self, obj):
obj.reporter = self.request.user
|
projectweekend/Links-API
|
links/issues/mixins.py
|
Python
|
mit
| 849
| 0
|
# Script for building the _ssl and _hashlib modules for Windows.
# Uses Perl to setup the OpenSSL environment correctly
# and build OpenSSL, then invokes a simple nmake session
# for the actual _ssl.pyd and _hashlib.pyd DLLs.
# THEORETICALLY, you can:
# * Unpack the latest SSL release one level above your main Python source
# directory. It is likely you will already find the zlib library and
# any other external packages there.
# * Install ActivePerl and ensure it is somewhere on your path.
# * Run this script from the PCBuild directory.
#
# it should configure and build SSL, then build the _ssl and _hashlib
# Python extensions without intervention.
# Modified by Christian Heimes
# Now this script supports pre-generated makefiles and assembly files.
# Developers don't need an installation of Perl anymore to build Python. A svn
# checkout from our svn repository is enough.
#
# In Order to create the files in the case of an update you still need Perl.
# Run build_ssl in this order:
# python.exe build_ssl.py Release x64
# python.exe build_ssl.py Release Win32
import os, sys, re, shutil
# Find all "foo.exe" files on the PATH.
def find_all_on_path(filename, extras = None):
entries = os.environ["PATH"].split(os.pathsep)
ret = []
for p in entries:
fname = os.path.abspath(os.path.join(p, filename))
if os.path.isfile(fname) and fname not in ret:
ret.append(fname)
if extras:
for p in extras:
fname = os.path.abspath(os.path.join(p, filename))
if os.path.isfile(fname) and fname not in ret:
ret.append(fname)
return ret
# Find a suitable Perl installation for OpenSSL.
# cygwin perl does *not* work. ActivePerl does.
# Being a Perl dummy, the simplest way I can check is if the "Win32" package
# is available.
def find_working_perl(perls):
for perl in perls:
fh = os.popen('"%s" -e "use Win32;"' % perl)
fh.read()
rc = fh.close()
if rc:
continue
return perl
print("Can not find a suitable PERL:")
if perls:
print(" the following perl interpreters were found:")
for p in perls:
print(" ", p)
print(" None of these versions appear suitable for building OpenSSL")
else:
print(" NO perl interpreters were found on this machine at all!")
print(" Please install ActivePerl and ensure it appears on your path")
return None
# Fetch SSL directory from VC properties
def get_ssl_dir():
propfile = (os.path.join(os.path.dirname(__file__), 'pyproject.vsprops'))
with open(propfile) as f:
m = re.search('openssl-([^"]+)"', f.read())
return "..\..\openssl-"+m.group(1)
def create_makefile64(makefile, m32):
"""Create and fix makefile for 64bit
Replace 32 with 64bit directories
"""
if not os.path.isfile(m32):
return
with open(m32) as fin:
with open(makefile, 'w') as fout:
for line in fin:
line = line.replace("=tmp32", "=tmp64")
line = line.replace("=out32", "=out64")
line = line.replace("=inc32", "=inc64")
# force 64 bit machine
line = line.replace("MKLIB=lib", "MKLIB=lib /MACHINE:X64")
line = line.replace("LFLAGS=", "LFLAGS=/MACHINE:X64 ")
# don't link against the lib on 64bit systems
line = line.replace("bufferoverflowu.lib", "")
fout.write(line)
os.unlink(m32)
def fix_makefile(makefile):
"""Fix some stuff in all makefiles
"""
if not os.path.isfile(makefile):
return
with open(makefile) as fin:
lines = fin.readlines()
with open(makefile, 'w') as fout:
for line in lines:
if line.startswith("PERL="):
continue
if line.startswith("CP="):
line = "CP=copy\n"
if line.startswith("MKDIR="):
line = "MKDIR=mkdir\n"
if line.startswith("CFLAG="):
line = line.strip()
for algo in ("RC5", "MDC2", "IDEA"):
noalgo = " -DOPENSSL_NO_%s" % algo
if noalgo not in line:
line = line + noalgo
line = line + '\n'
fout.write(line)
def run_configure(configure, do_script):
print("perl Configure "+configure+" no-idea no-mdc2")
os.system("perl Configure "+configure+" no-idea no-mdc2")
print(do_script)
os.system(do_script)
def cmp(f1, f2):
bufsize = 1024 * 8
with open(f1, 'rb') as fp1, open(f2, 'rb') as fp2:
while True:
b1 = fp1.read(bufsize)
b2 = fp2.read(bufsize)
if b1 != b2:
return False
if not b1:
return True
def copy(src, dst):
if os.path.isfile(dst) and cmp(src, dst):
return
shutil.copy(src, dst)
def main():
build_all = "-a" in sys.argv
if sys.argv[1] == "Release":
debug = False
elif sys.argv[1] == "Debug":
debug = True
else:
raise ValueError(str(sys.argv))
if sys.argv[2] == "Win32":
arch = "x86"
configure = "VC-WIN32"
do_script = "ms\\do_nasm"
makefile="ms\\nt.mak"
m32 = makefile
dirsuffix = "32"
elif sys.argv[2] == "x64":
arch="amd64"
configure = "VC-WIN64A"
do_script = "ms\\do_win64a"
makefile = "ms\\nt64.mak"
m32 = makefile.replace('64', '')
dirsuffix = "64"
#os.environ["VSEXTCOMP_USECL"] = "MS_OPTERON"
else:
raise ValueError(str(sys.argv))
make_flags = ""
if build_all:
make_flags = "-a"
# perl should be on the path, but we also look in "\perl" and "c:\\perl"
# as "well known" locations
perls = find_all_on_path("perl.exe", ["\\perl\\bin", "C:\\perl\\bin"])
perl = find_working_perl(perls)
if perl:
print("Found a working perl at '%s'" % (perl,))
else:
print("No Perl installation was found. Existing Makefiles are used.")
sys.stdout.flush()
# Look for SSL 2 levels up from pcbuild - ie, same place zlib etc all live.
ssl_dir = get_ssl_dir()
if ssl_dir is None:
sys.exit(1)
old_cd = os.getcwd()
try:
os.chdir(ssl_dir)
# rebuild makefile when we do the role over from 32 to 64 build
if arch == "amd64" and os.path.isfile(m32) and not os.path.isfile(makefile):
os.unlink(m32)
# If the ssl makefiles do not exist, we invoke Perl to generate them.
# Due to a bug in this script, the makefile sometimes ended up empty
# Force a regeneration if it is.
if not os.path.isfile(makefile) or os.path.getsize(makefile)==0:
if perl is None:
print("Perl is required to build the makefiles!")
sys.exit(1)
print("Creating the makefiles...")
sys.stdout.flush()
# Put our working Perl at the front of our path
os.environ["PATH"] = os.path.dirname(perl) + \
os.pathsep + \
os.environ["PATH"]
run_configure(configure, do_script)
if debug:
print("OpenSSL debug builds aren't supported.")
#if arch=="x86" and debug:
# # the do_masm script in openssl doesn't generate a debug
# # build makefile so we generate it here:
# os.system("perl util\mk1mf.pl debug "+configure+" >"+makefile)
if arch == "amd64":
create_makefile64(makefile, m32)
fix_makefile(makefile)
copy(r"crypto\buildinf.h", r"crypto\buildinf_%s.h" % arch)
copy(r"crypto\opensslconf.h", r"crypto\opensslconf_%s.h" % arch)
# If the assembler files don't exist in tmpXX, copy them there
if perl is None and os.path.exists("asm"+dirsuffix):
if not os.path.exists("tmp"+dirsuffix):
os.mkdir("tmp"+dirsuffix)
for f in os.listdir("asm"+dirsuffix):
if not f.endswith(".asm"): continue
if os.path.isfile(r"tmp%s\%s" % (dirsuffix, f)): continue
shutil.copy(r"asm%s\%s" % (dirsuffix, f), "tmp"+dirsuffix)
# Now run make.
if arch == "amd64":
rc = os.system("ml64 -c -Foms\\uptable.obj ms\\uptable.asm")
if rc:
print("ml64 assembler has failed.")
sys.exit(rc)
copy(r"crypto\buildinf_%s.h" % arch, r"crypto\buildinf.h")
copy(r"crypto\opensslconf_%s.h" % arch, r"crypto\opensslconf.h")
#makeCommand = "nmake /nologo PERL=\"%s\" -f \"%s\"" %(perl, makefile)
makeCommand = "nmake /nologo -f \"%s\"" % makefile
print("Executing ssl makefiles:", makeCommand)
sys.stdout.flush()
rc = os.system(makeCommand)
if rc:
print("Executing "+makefile+" failed")
print(rc)
sys.exit(rc)
finally:
os.chdir(old_cd)
sys.exit(rc)
if __name__=='__main__':
main()
|
LaoZhongGu/kbengine
|
kbe/src/lib/python/PCbuild/build_ssl.py
|
Python
|
lgpl-3.0
| 9,198
| 0.002827
|
import pytest
from webtest import TestApp
from pyramid.config import Configurator
from pyramid.testing import DummyRequest
from pyramid.authorization import ACLAuthorizationPolicy
from pyramid.authentication import AuthTktAuthenticationPolicy
def make_app(config):
return TestApp(config.make_wsgi_app())
@pytest.mark.parametrize('method', ['delete', 'get', 'post', 'patch', 'put'])
def test_unallowed_method_added(method):
config = Configurator()
config.scan('resource_only')
app = make_app(config)
getattr(app, method)('/', status=405)
def test_default_options_method():
config = Configurator()
config.scan('resource_only')
app = make_app(config)
response = app.options('/')
assert response.headers['Access-Control-Allow-Methods'] == 'OPTIONS'
def test_request_add_get_view():
config = Configurator()
config.scan('resource_get')
app = make_app(config)
app.get('/')
def test_request_default_to_json_renderer():
config = Configurator()
config.scan('resource_get')
app = make_app(config)
r = app.get('/')
assert r.content_type == 'application/json'
assert r.json == {'message': 'hello'}
def test_request_override_renderer():
config = Configurator()
config.scan('resource_get_renderer')
app = make_app(config)
r = app.get('/')
assert r.content_type == 'text/plain'
assert r.unicode_body == 'hello'
def test_add_controller():
config = Configurator()
config.scan('controller')
app = make_app(config)
app.post('/engage')
def test_nested_controller():
# Test for https://github.com/wichert/rest_toolkit/issues/12
config = Configurator()
config.scan('controller')
app = make_app(config)
app.post('/resource/engage')
def test_controller_default_to_json_renderer():
config = Configurator()
config.scan('controller')
app = make_app(config)
r = app.post('/engage')
assert r.content_type == 'application/json'
assert r.json == {'message': 'Ai ai captain'}
def test_set_controller_method():
config = Configurator()
config.scan('controller')
app = make_app(config)
r = app.get('/engage')
assert r.json == {'message': 'Warp engine offline'}
@pytest.mark.parametrize('method', ['delete', 'get', 'patch', 'put'])
def test_controller_invalid_method(method):
config = Configurator()
config.scan('controller')
app = make_app(config)
getattr(app, method)('/', status=405)
def test_default_get_view():
config = Configurator()
config.scan('resource_abc')
app = make_app(config)
r = app.get('/')
assert r.json == {'message': 'Hello, world'}
def test_override_default_view():
config = Configurator()
config.scan('resource_abc_override')
app = make_app(config)
r = app.get('/')
assert r.json == {'message': 'Welcome'}
def test_set_resource_route_name():
config = Configurator()
config.scan('resource_route_name')
config.make_wsgi_app()
request = DummyRequest()
request.registry = config.registry
assert request.route_path('user', id=15) == '/users/15'
def test_secured_default_view_not_allowed():
config = Configurator()
config.set_authentication_policy(AuthTktAuthenticationPolicy('seekrit'))
config.set_authorization_policy(ACLAuthorizationPolicy())
config.scan('resource_abc')
app = make_app(config)
app.get('/secure', status=403)
def test_secured_default_view_allowed():
config = Configurator()
config.testing_securitypolicy(1)
config.scan('resource_abc')
app = make_app(config)
app.get('/secure')
|
wichert/rest_toolkit
|
tests/test_resource.py
|
Python
|
bsd-2-clause
| 3,615
| 0
|
import urllib
from askbot.deps.django_authopenid.util import OAuthConnection
class Twitter(OAuthConnection):
def __init__(self):
super(Twitter, self).__init__('twitter')
self.tweet_url = 'https://api.twitter.com/1.1/statuses/update.json'
def tweet(self, text, access_token=None):
client = self.get_client(access_token)
body = urllib.urlencode({'status': text})
return self.send_request(client, self.tweet_url, 'POST', body=body)
|
PearsonIOKI/compose-forum
|
askbot/utils/twitter.py
|
Python
|
gpl-3.0
| 479
| 0.002088
|
# Author: Steven J. Bethard <steven.bethard@gmail.com>.
# New maintainer as of 29 August 2019: Raymond Hettinger <raymond.hettinger@gmail.com>
"""Command-line parsing library
This module is an optparse-inspired command-line parsing library that:
- handles both optional and positional arguments
- produces highly informative usage messages
- supports parsers that dispatch to sub-parsers
The following is a simple usage example that sums integers from the
command-line and writes the result to a file::
parser = argparse.ArgumentParser(
description='sum the integers at the command line')
parser.add_argument(
'integers', metavar='int', nargs='+', type=int,
help='an integer to be summed')
parser.add_argument(
'--log', default=sys.stdout, type=argparse.FileType('w'),
help='the file where the sum should be written')
args = parser.parse_args()
args.log.write('%s' % sum(args.integers))
args.log.close()
The module contains the following public classes:
- ArgumentParser -- The main entry point for command-line parsing. As the
example above shows, the add_argument() method is used to populate
the parser with actions for optional and positional arguments. Then
the parse_args() method is invoked to convert the args at the
command-line into an object with attributes.
- ArgumentError -- The exception raised by ArgumentParser objects when
there are errors with the parser's actions. Errors raised while
parsing the command-line are caught by ArgumentParser and emitted
as command-line messages.
- FileType -- A factory for defining types of files to be created. As the
example above shows, instances of FileType are typically passed as
the type= argument of add_argument() calls.
- Action -- The base class for parser actions. Typically actions are
selected by passing strings like 'store_true' or 'append_const' to
the action= argument of add_argument(). However, for greater
customization of ArgumentParser actions, subclasses of Action may
be defined and passed as the action= argument.
- HelpFormatter, RawDescriptionHelpFormatter, RawTextHelpFormatter,
ArgumentDefaultsHelpFormatter -- Formatter classes which
may be passed as the formatter_class= argument to the
ArgumentParser constructor. HelpFormatter is the default,
RawDescriptionHelpFormatter and RawTextHelpFormatter tell the parser
not to change the formatting for help text, and
ArgumentDefaultsHelpFormatter adds information about argument defaults
to the help.
All other classes in this module are considered implementation details.
(Also note that HelpFormatter and RawDescriptionHelpFormatter are only
considered public as object names -- the API of the formatter objects is
still considered an implementation detail.)
"""
__version__ = '1.1'
__all__ = [
'ArgumentParser',
'ArgumentError',
'ArgumentTypeError',
'BooleanOptionalAction',
'FileType',
'HelpFormatter',
'ArgumentDefaultsHelpFormatter',
'RawDescriptionHelpFormatter',
'RawTextHelpFormatter',
'MetavarTypeHelpFormatter',
'Namespace',
'Action',
'ONE_OR_MORE',
'OPTIONAL',
'PARSER',
'REMAINDER',
'SUPPRESS',
'ZERO_OR_MORE',
]
import os as _os
import re as _re
import sys as _sys
from gettext import gettext as _, ngettext
SUPPRESS = '==SUPPRESS=='
OPTIONAL = '?'
ZERO_OR_MORE = '*'
ONE_OR_MORE = '+'
PARSER = 'A...'
REMAINDER = '...'
_UNRECOGNIZED_ARGS_ATTR = '_unrecognized_args'
# =============================
# Utility functions and classes
# =============================
class _AttributeHolder(object):
"""Abstract base class that provides __repr__.
The __repr__ method returns a string in the format::
ClassName(attr=name, attr=name, ...)
The attributes are determined either by a class-level attribute,
'_kwarg_names', or by inspecting the instance __dict__.
"""
def __repr__(self):
type_name = type(self).__name__
arg_strings = []
star_args = {}
for arg in self._get_args():
arg_strings.append(repr(arg))
for name, value in self._get_kwargs():
if name.isidentifier():
arg_strings.append('%s=%r' % (name, value))
else:
star_args[name] = value
if star_args:
arg_strings.append('**%s' % repr(star_args))
return '%s(%s)' % (type_name, ', '.join(arg_strings))
def _get_kwargs(self):
return list(self.__dict__.items())
def _get_args(self):
return []
def _copy_items(items):
if items is None:
return []
# The copy module is used only in the 'append' and 'append_const'
# actions, and it is needed only when the default value isn't a list.
# Delay its import for speeding up the common case.
if type(items) is list:
return items[:]
import copy
return copy.copy(items)
# ===============
# Formatting Help
# ===============
class HelpFormatter(object):
"""Formatter for generating usage messages and argument help strings.
Only the name of this class is considered a public API. All the methods
provided by the class are considered an implementation detail.
"""
def __init__(self,
prog,
indent_increment=2,
max_help_position=24,
width=None):
# default setting for width
if width is None:
import shutil
width = shutil.get_terminal_size().columns
width -= 2
self._prog = prog
self._indent_increment = indent_increment
self._max_help_position = min(max_help_position,
max(width - 20, indent_increment * 2))
self._width = width
self._current_indent = 0
self._level = 0
self._action_max_length = 0
self._root_section = self._Section(self, None)
self._current_section = self._root_section
self._whitespace_matcher = _re.compile(r'\s+', _re.ASCII)
self._long_break_matcher = _re.compile(r'\n\n\n+')
# ===============================
# Section and indentation methods
# ===============================
def _indent(self):
self._current_indent += self._indent_increment
self._level += 1
def _dedent(self):
self._current_indent -= self._indent_increment
assert self._current_indent >= 0, 'Indent decreased below 0.'
self._level -= 1
class _Section(object):
def __init__(self, formatter, parent, heading=None):
self.formatter = formatter
self.parent = parent
self.heading = heading
self.items = []
def format_help(self):
# format the indented section
if self.parent is not None:
self.formatter._indent()
join = self.formatter._join_parts
item_help = join([func(*args) for func, args in self.items])
if self.parent is not None:
self.formatter._dedent()
# return nothing if the section was empty
if not item_help:
return ''
# add the heading if the section was non-empty
if self.heading is not SUPPRESS and self.heading is not None:
current_indent = self.formatter._current_indent
heading = '%*s%s:\n' % (current_indent, '', self.heading)
else:
heading = ''
# join the section-initial newline, the heading and the help
return join(['\n', heading, item_help, '\n'])
def _add_item(self, func, args):
self._current_section.items.append((func, args))
# ========================
# Message building methods
# ========================
def start_section(self, heading):
self._indent()
section = self._Section(self, self._current_section, heading)
self._add_item(section.format_help, [])
self._current_section = section
def end_section(self):
self._current_section = self._current_section.parent
self._dedent()
def add_text(self, text):
if text is not SUPPRESS and text is not None:
self._add_item(self._format_text, [text])
def add_usage(self, usage, actions, groups, prefix=None):
if usage is not SUPPRESS:
args = usage, actions, groups, prefix
self._add_item(self._format_usage, args)
def add_argument(self, action):
if action.help is not SUPPRESS:
# find all invocations
get_invocation = self._format_action_invocation
invocations = [get_invocation(action)]
for subaction in self._iter_indented_subactions(action):
invocations.append(get_invocation(subaction))
# update the maximum item length
invocation_length = max(map(len, invocations))
action_length = invocation_length + self._current_indent
self._action_max_length = max(self._action_max_length,
action_length)
# add the item to the list
self._add_item(self._format_action, [action])
def add_arguments(self, actions):
for action in actions:
self.add_argument(action)
# =======================
# Help-formatting methods
# =======================
def format_help(self):
help = self._root_section.format_help()
if help:
help = self._long_break_matcher.sub('\n\n', help)
help = help.strip('\n') + '\n'
return help
def _join_parts(self, part_strings):
return ''.join([part
for part in part_strings
if part and part is not SUPPRESS])
def _format_usage(self, usage, actions, groups, prefix):
if prefix is None:
prefix = _('usage: ')
# if usage is specified, use that
if usage is not None:
usage = usage % dict(prog=self._prog)
# if no optionals or positionals are available, usage is just prog
elif usage is None and not actions:
usage = '%(prog)s' % dict(prog=self._prog)
# if optionals and positionals are available, calculate usage
elif usage is None:
prog = '%(prog)s' % dict(prog=self._prog)
# split optionals from positionals
optionals = []
positionals = []
for action in actions:
if action.option_strings:
optionals.append(action)
else:
positionals.append(action)
# build full usage string
format = self._format_actions_usage
action_usage = format(optionals + positionals, groups)
usage = ' '.join([s for s in [prog, action_usage] if s])
# wrap the usage parts if it's too long
text_width = self._width - self._current_indent
if len(prefix) + len(usage) > text_width:
# break usage into wrappable parts
part_regexp = (
r'\(.*?\)+(?=\s|$)|'
r'\[.*?\]+(?=\s|$)|'
r'\S+'
)
opt_usage = format(optionals, groups)
pos_usage = format(positionals, groups)
opt_parts = _re.findall(part_regexp, opt_usage)
pos_parts = _re.findall(part_regexp, pos_usage)
assert ' '.join(opt_parts) == opt_usage
assert ' '.join(pos_parts) == pos_usage
# helper for wrapping lines
def get_lines(parts, indent, prefix=None):
lines = []
line = []
if prefix is not None:
line_len = len(prefix) - 1
else:
line_len = len(indent) - 1
for part in parts:
if line_len + 1 + len(part) > text_width and line:
lines.append(indent + ' '.join(line))
line = []
line_len = len(indent) - 1
line.append(part)
line_len += len(part) + 1
if line:
lines.append(indent + ' '.join(line))
if prefix is not None:
lines[0] = lines[0][len(indent):]
return lines
# if prog is short, follow it with optionals or positionals
if len(prefix) + len(prog) <= 0.75 * text_width:
indent = ' ' * (len(prefix) + len(prog) + 1)
if opt_parts:
lines = get_lines([prog] + opt_parts, indent, prefix)
lines.extend(get_lines(pos_parts, indent))
elif pos_parts:
lines = get_lines([prog] + pos_parts, indent, prefix)
else:
lines = [prog]
# if prog is long, put it on its own line
else:
indent = ' ' * len(prefix)
parts = opt_parts + pos_parts
lines = get_lines(parts, indent)
if len(lines) > 1:
lines = []
lines.extend(get_lines(opt_parts, indent))
lines.extend(get_lines(pos_parts, indent))
lines = [prog] + lines
# join lines into usage
usage = '\n'.join(lines)
# prefix with 'usage:'
return '%s%s\n\n' % (prefix, usage)
def _format_actions_usage(self, actions, groups):
# find group indices and identify actions in groups
group_actions = set()
inserts = {}
for group in groups:
if not group._group_actions:
raise ValueError(f'empty group {group}')
try:
start = actions.index(group._group_actions[0])
except ValueError:
continue
else:
end = start + len(group._group_actions)
if actions[start:end] == group._group_actions:
for action in group._group_actions:
group_actions.add(action)
if not group.required:
if start in inserts:
inserts[start] += ' ['
else:
inserts[start] = '['
if end in inserts:
inserts[end] += ']'
else:
inserts[end] = ']'
else:
if start in inserts:
inserts[start] += ' ('
else:
inserts[start] = '('
if end in inserts:
inserts[end] += ')'
else:
inserts[end] = ')'
for i in range(start + 1, end):
inserts[i] = '|'
# collect all actions format strings
parts = []
for i, action in enumerate(actions):
# suppressed arguments are marked with None
# remove | separators for suppressed arguments
if action.help is SUPPRESS:
parts.append(None)
if inserts.get(i) == '|':
inserts.pop(i)
elif inserts.get(i + 1) == '|':
inserts.pop(i + 1)
# produce all arg strings
elif not action.option_strings:
default = self._get_default_metavar_for_positional(action)
part = self._format_args(action, default)
# if it's in a group, strip the outer []
if action in group_actions:
if part[0] == '[' and part[-1] == ']':
part = part[1:-1]
# add the action string to the list
parts.append(part)
# produce the first way to invoke the option in brackets
else:
option_string = action.option_strings[0]
# if the Optional doesn't take a value, format is:
# -s or --long
if action.nargs == 0:
part = action.format_usage()
# if the Optional takes a value, format is:
# -s ARGS or --long ARGS
else:
default = self._get_default_metavar_for_optional(action)
args_string = self._format_args(action, default)
part = '%s %s' % (option_string, args_string)
# make it look optional if it's not required or in a group
if not action.required and action not in group_actions:
part = '[%s]' % part
# add the action string to the list
parts.append(part)
# insert things at the necessary indices
for i in sorted(inserts, reverse=True):
parts[i:i] = [inserts[i]]
# join all the action items with spaces
text = ' '.join([item for item in parts if item is not None])
# clean up separators for mutually exclusive groups
open = r'[\[(]'
close = r'[\])]'
text = _re.sub(r'(%s) ' % open, r'\1', text)
text = _re.sub(r' (%s)' % close, r'\1', text)
text = _re.sub(r'%s *%s' % (open, close), r'', text)
text = _re.sub(r'\(([^|]*)\)', r'\1', text)
text = text.strip()
# return the text
return text
def _format_text(self, text):
if '%(prog)' in text:
text = text % dict(prog=self._prog)
text_width = max(self._width - self._current_indent, 11)
indent = ' ' * self._current_indent
return self._fill_text(text, text_width, indent) + '\n\n'
def _format_action(self, action):
# determine the required width and the entry label
help_position = min(self._action_max_length + 2,
self._max_help_position)
help_width = max(self._width - help_position, 11)
action_width = help_position - self._current_indent - 2
action_header = self._format_action_invocation(action)
# no help; start on same line and add a final newline
if not action.help:
tup = self._current_indent, '', action_header
action_header = '%*s%s\n' % tup
# short action name; start on the same line and pad two spaces
elif len(action_header) <= action_width:
tup = self._current_indent, '', action_width, action_header
action_header = '%*s%-*s ' % tup
indent_first = 0
# long action name; start on the next line
else:
tup = self._current_indent, '', action_header
action_header = '%*s%s\n' % tup
indent_first = help_position
# collect the pieces of the action help
parts = [action_header]
# if there was help for the action, add lines of help text
if action.help and action.help.strip():
help_text = self._expand_help(action)
if help_text:
help_lines = self._split_lines(help_text, help_width)
parts.append('%*s%s\n' % (indent_first, '', help_lines[0]))
for line in help_lines[1:]:
parts.append('%*s%s\n' % (help_position, '', line))
# or add a newline if the description doesn't end with one
elif not action_header.endswith('\n'):
parts.append('\n')
# if there are any sub-actions, add their help as well
for subaction in self._iter_indented_subactions(action):
parts.append(self._format_action(subaction))
# return a single string
return self._join_parts(parts)
def _format_action_invocation(self, action):
if not action.option_strings:
default = self._get_default_metavar_for_positional(action)
metavar, = self._metavar_formatter(action, default)(1)
return metavar
else:
parts = []
# if the Optional doesn't take a value, format is:
# -s, --long
if action.nargs == 0:
parts.extend(action.option_strings)
# if the Optional takes a value, format is:
# -s ARGS, --long ARGS
else:
default = self._get_default_metavar_for_optional(action)
args_string = self._format_args(action, default)
for option_string in action.option_strings:
parts.append('%s %s' % (option_string, args_string))
return ', '.join(parts)
def _metavar_formatter(self, action, default_metavar):
if action.metavar is not None:
result = action.metavar
elif action.choices is not None:
choice_strs = [str(choice) for choice in action.choices]
result = '{%s}' % ','.join(choice_strs)
else:
result = default_metavar
def format(tuple_size):
if isinstance(result, tuple):
return result
else:
return (result, ) * tuple_size
return format
def _format_args(self, action, default_metavar):
get_metavar = self._metavar_formatter(action, default_metavar)
if action.nargs is None:
result = '%s' % get_metavar(1)
elif action.nargs == OPTIONAL:
result = '[%s]' % get_metavar(1)
elif action.nargs == ZERO_OR_MORE:
metavar = get_metavar(1)
if len(metavar) == 2:
result = '[%s [%s ...]]' % metavar
else:
result = '[%s ...]' % metavar
elif action.nargs == ONE_OR_MORE:
result = '%s [%s ...]' % get_metavar(2)
elif action.nargs == REMAINDER:
result = '...'
elif action.nargs == PARSER:
result = '%s ...' % get_metavar(1)
elif action.nargs == SUPPRESS:
result = ''
else:
try:
formats = ['%s' for _ in range(action.nargs)]
except TypeError:
raise ValueError("invalid nargs value") from None
result = ' '.join(formats) % get_metavar(action.nargs)
return result
def _expand_help(self, action):
params = dict(vars(action), prog=self._prog)
for name in list(params):
if params[name] is SUPPRESS:
del params[name]
for name in list(params):
if hasattr(params[name], '__name__'):
params[name] = params[name].__name__
if params.get('choices') is not None:
choices_str = ', '.join([str(c) for c in params['choices']])
params['choices'] = choices_str
return self._get_help_string(action) % params
def _iter_indented_subactions(self, action):
try:
get_subactions = action._get_subactions
except AttributeError:
pass
else:
self._indent()
yield from get_subactions()
self._dedent()
def _split_lines(self, text, width):
text = self._whitespace_matcher.sub(' ', text).strip()
# The textwrap module is used only for formatting help.
# Delay its import for speeding up the common usage of argparse.
import textwrap
return textwrap.wrap(text, width)
def _fill_text(self, text, width, indent):
text = self._whitespace_matcher.sub(' ', text).strip()
import textwrap
return textwrap.fill(text, width,
initial_indent=indent,
subsequent_indent=indent)
def _get_help_string(self, action):
return action.help
def _get_default_metavar_for_optional(self, action):
return action.dest.upper()
def _get_default_metavar_for_positional(self, action):
return action.dest
class RawDescriptionHelpFormatter(HelpFormatter):
"""Help message formatter which retains any formatting in descriptions.
Only the name of this class is considered a public API. All the methods
provided by the class are considered an implementation detail.
"""
def _fill_text(self, text, width, indent):
return ''.join(indent + line for line in text.splitlines(keepends=True))
class RawTextHelpFormatter(RawDescriptionHelpFormatter):
"""Help message formatter which retains formatting of all help text.
Only the name of this class is considered a public API. All the methods
provided by the class are considered an implementation detail.
"""
def _split_lines(self, text, width):
return text.splitlines()
class ArgumentDefaultsHelpFormatter(HelpFormatter):
"""Help message formatter which adds default values to argument help.
Only the name of this class is considered a public API. All the methods
provided by the class are considered an implementation detail.
"""
def _get_help_string(self, action):
help = action.help
if '%(default)' not in action.help:
if action.default is not SUPPRESS:
defaulting_nargs = [OPTIONAL, ZERO_OR_MORE]
if action.option_strings or action.nargs in defaulting_nargs:
help += ' (default: %(default)s)'
return help
class MetavarTypeHelpFormatter(HelpFormatter):
"""Help message formatter which uses the argument 'type' as the default
metavar value (instead of the argument 'dest')
Only the name of this class is considered a public API. All the methods
provided by the class are considered an implementation detail.
"""
def _get_default_metavar_for_optional(self, action):
return action.type.__name__
def _get_default_metavar_for_positional(self, action):
return action.type.__name__
# =====================
# Options and Arguments
# =====================
def _get_action_name(argument):
if argument is None:
return None
elif argument.option_strings:
return '/'.join(argument.option_strings)
elif argument.metavar not in (None, SUPPRESS):
return argument.metavar
elif argument.dest not in (None, SUPPRESS):
return argument.dest
elif argument.choices:
return '{' + ','.join(argument.choices) + '}'
else:
return None
class ArgumentError(Exception):
"""An error from creating or using an argument (optional or positional).
The string value of this exception is the message, augmented with
information about the argument that caused it.
"""
def __init__(self, argument, message):
self.argument_name = _get_action_name(argument)
self.message = message
def __str__(self):
if self.argument_name is None:
format = '%(message)s'
else:
format = 'argument %(argument_name)s: %(message)s'
return format % dict(message=self.message,
argument_name=self.argument_name)
class ArgumentTypeError(Exception):
"""An error from trying to convert a command line string to a type."""
pass
# ==============
# Action classes
# ==============
class Action(_AttributeHolder):
"""Information about how to convert command line strings to Python objects.
Action objects are used by an ArgumentParser to represent the information
needed to parse a single argument from one or more strings from the
command line. The keyword arguments to the Action constructor are also
all attributes of Action instances.
Keyword Arguments:
- option_strings -- A list of command-line option strings which
should be associated with this action.
- dest -- The name of the attribute to hold the created object(s)
- nargs -- The number of command-line arguments that should be
consumed. By default, one argument will be consumed and a single
value will be produced. Other values include:
- N (an integer) consumes N arguments (and produces a list)
- '?' consumes zero or one arguments
- '*' consumes zero or more arguments (and produces a list)
- '+' consumes one or more arguments (and produces a list)
Note that the difference between the default and nargs=1 is that
with the default, a single value will be produced, while with
nargs=1, a list containing a single value will be produced.
- const -- The value to be produced if the option is specified and the
option uses an action that takes no values.
- default -- The value to be produced if the option is not specified.
- type -- A callable that accepts a single string argument, and
returns the converted value. The standard Python types str, int,
float, and complex are useful examples of such callables. If None,
str is used.
- choices -- A container of values that should be allowed. If not None,
after a command-line argument has been converted to the appropriate
type, an exception will be raised if it is not a member of this
collection.
- required -- True if the action must always be specified at the
command line. This is only meaningful for optional command-line
arguments.
- help -- The help string describing the argument.
- metavar -- The name to be used for the option's argument with the
help string. If None, the 'dest' value will be used as the name.
"""
def __init__(self,
option_strings,
dest,
nargs=None,
const=None,
default=None,
type=None,
choices=None,
required=False,
help=None,
metavar=None):
self.option_strings = option_strings
self.dest = dest
self.nargs = nargs
self.const = const
self.default = default
self.type = type
self.choices = choices
self.required = required
self.help = help
self.metavar = metavar
def _get_kwargs(self):
names = [
'option_strings',
'dest',
'nargs',
'const',
'default',
'type',
'choices',
'help',
'metavar',
]
return [(name, getattr(self, name)) for name in names]
def format_usage(self):
return self.option_strings[0]
def __call__(self, parser, namespace, values, option_string=None):
raise NotImplementedError(_('.__call__() not defined'))
class BooleanOptionalAction(Action):
def __init__(self,
option_strings,
dest,
default=None,
type=None,
choices=None,
required=False,
help=None,
metavar=None):
_option_strings = []
for option_string in option_strings:
_option_strings.append(option_string)
if option_string.startswith('--'):
option_string = '--no-' + option_string[2:]
_option_strings.append(option_string)
if help is not None and default is not None:
help += " (default: %(default)s)"
super().__init__(
option_strings=_option_strings,
dest=dest,
nargs=0,
default=default,
type=type,
choices=choices,
required=required,
help=help,
metavar=metavar)
def __call__(self, parser, namespace, values, option_string=None):
if option_string in self.option_strings:
setattr(namespace, self.dest, not option_string.startswith('--no-'))
def format_usage(self):
return ' | '.join(self.option_strings)
class _StoreAction(Action):
def __init__(self,
option_strings,
dest,
nargs=None,
const=None,
default=None,
type=None,
choices=None,
required=False,
help=None,
metavar=None):
if nargs == 0:
raise ValueError('nargs for store actions must be != 0; if you '
'have nothing to store, actions such as store '
'true or store const may be more appropriate')
if const is not None and nargs != OPTIONAL:
raise ValueError('nargs must be %r to supply const' % OPTIONAL)
super(_StoreAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=nargs,
const=const,
default=default,
type=type,
choices=choices,
required=required,
help=help,
metavar=metavar)
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, values)
class _StoreConstAction(Action):
def __init__(self,
option_strings,
dest,
const,
default=None,
required=False,
help=None,
metavar=None):
super(_StoreConstAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=0,
const=const,
default=default,
required=required,
help=help)
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, self.const)
class _StoreTrueAction(_StoreConstAction):
def __init__(self,
option_strings,
dest,
default=False,
required=False,
help=None):
super(_StoreTrueAction, self).__init__(
option_strings=option_strings,
dest=dest,
const=True,
default=default,
required=required,
help=help)
class _StoreFalseAction(_StoreConstAction):
def __init__(self,
option_strings,
dest,
default=True,
required=False,
help=None):
super(_StoreFalseAction, self).__init__(
option_strings=option_strings,
dest=dest,
const=False,
default=default,
required=required,
help=help)
class _AppendAction(Action):
def __init__(self,
option_strings,
dest,
nargs=None,
const=None,
default=None,
type=None,
choices=None,
required=False,
help=None,
metavar=None):
if nargs == 0:
raise ValueError('nargs for append actions must be != 0; if arg '
'strings are not supplying the value to append, '
'the append const action may be more appropriate')
if const is not None and nargs != OPTIONAL:
raise ValueError('nargs must be %r to supply const' % OPTIONAL)
super(_AppendAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=nargs,
const=const,
default=default,
type=type,
choices=choices,
required=required,
help=help,
metavar=metavar)
def __call__(self, parser, namespace, values, option_string=None):
items = getattr(namespace, self.dest, None)
items = _copy_items(items)
items.append(values)
setattr(namespace, self.dest, items)
class _AppendConstAction(Action):
def __init__(self,
option_strings,
dest,
const,
default=None,
required=False,
help=None,
metavar=None):
super(_AppendConstAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=0,
const=const,
default=default,
required=required,
help=help,
metavar=metavar)
def __call__(self, parser, namespace, values, option_string=None):
items = getattr(namespace, self.dest, None)
items = _copy_items(items)
items.append(self.const)
setattr(namespace, self.dest, items)
class _CountAction(Action):
def __init__(self,
option_strings,
dest,
default=None,
required=False,
help=None):
super(_CountAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=0,
default=default,
required=required,
help=help)
def __call__(self, parser, namespace, values, option_string=None):
count = getattr(namespace, self.dest, None)
if count is None:
count = 0
setattr(namespace, self.dest, count + 1)
class _HelpAction(Action):
def __init__(self,
option_strings,
dest=SUPPRESS,
default=SUPPRESS,
help=None):
super(_HelpAction, self).__init__(
option_strings=option_strings,
dest=dest,
default=default,
nargs=0,
help=help)
def __call__(self, parser, namespace, values, option_string=None):
parser.print_help()
parser.exit()
class _VersionAction(Action):
def __init__(self,
option_strings,
version=None,
dest=SUPPRESS,
default=SUPPRESS,
help="show program's version number and exit"):
super(_VersionAction, self).__init__(
option_strings=option_strings,
dest=dest,
default=default,
nargs=0,
help=help)
self.version = version
def __call__(self, parser, namespace, values, option_string=None):
version = self.version
if version is None:
version = parser.version
formatter = parser._get_formatter()
formatter.add_text(version)
parser._print_message(formatter.format_help(), _sys.stdout)
parser.exit()
class _SubParsersAction(Action):
class _ChoicesPseudoAction(Action):
def __init__(self, name, aliases, help):
metavar = dest = name
if aliases:
metavar += ' (%s)' % ', '.join(aliases)
sup = super(_SubParsersAction._ChoicesPseudoAction, self)
sup.__init__(option_strings=[], dest=dest, help=help,
metavar=metavar)
def __init__(self,
option_strings,
prog,
parser_class,
dest=SUPPRESS,
required=False,
help=None,
metavar=None):
self._prog_prefix = prog
self._parser_class = parser_class
self._name_parser_map = {}
self._choices_actions = []
super(_SubParsersAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=PARSER,
choices=self._name_parser_map,
required=required,
help=help,
metavar=metavar)
def add_parser(self, name, **kwargs):
# set prog from the existing prefix
if kwargs.get('prog') is None:
kwargs['prog'] = '%s %s' % (self._prog_prefix, name)
aliases = kwargs.pop('aliases', ())
# create a pseudo-action to hold the choice help
if 'help' in kwargs:
help = kwargs.pop('help')
choice_action = self._ChoicesPseudoAction(name, aliases, help)
self._choices_actions.append(choice_action)
# create the parser and add it to the map
parser = self._parser_class(**kwargs)
self._name_parser_map[name] = parser
# make parser available under aliases also
for alias in aliases:
self._name_parser_map[alias] = parser
return parser
def _get_subactions(self):
return self._choices_actions
def __call__(self, parser, namespace, values, option_string=None):
parser_name = values[0]
arg_strings = values[1:]
# set the parser name if requested
if self.dest is not SUPPRESS:
setattr(namespace, self.dest, parser_name)
# select the parser
try:
parser = self._name_parser_map[parser_name]
except KeyError:
args = {'parser_name': parser_name,
'choices': ', '.join(self._name_parser_map)}
msg = _('unknown parser %(parser_name)r (choices: %(choices)s)') % args
raise ArgumentError(self, msg)
# parse all the remaining options into the namespace
# store any unrecognized options on the object, so that the top
# level parser can decide what to do with them
# In case this subparser defines new defaults, we parse them
# in a new namespace object and then update the original
# namespace for the relevant parts.
subnamespace, arg_strings = parser.parse_known_args(arg_strings, None)
for key, value in vars(subnamespace).items():
setattr(namespace, key, value)
if arg_strings:
vars(namespace).setdefault(_UNRECOGNIZED_ARGS_ATTR, [])
getattr(namespace, _UNRECOGNIZED_ARGS_ATTR).extend(arg_strings)
class _ExtendAction(_AppendAction):
def __call__(self, parser, namespace, values, option_string=None):
items = getattr(namespace, self.dest, None)
items = _copy_items(items)
items.extend(values)
setattr(namespace, self.dest, items)
# ==============
# Type classes
# ==============
class FileType(object):
"""Factory for creating file object types
Instances of FileType are typically passed as type= arguments to the
ArgumentParser add_argument() method.
Keyword Arguments:
- mode -- A string indicating how the file is to be opened. Accepts the
same values as the builtin open() function.
- bufsize -- The file's desired buffer size. Accepts the same values as
the builtin open() function.
- encoding -- The file's encoding. Accepts the same values as the
builtin open() function.
- errors -- A string indicating how encoding and decoding errors are to
be handled. Accepts the same value as the builtin open() function.
"""
def __init__(self, mode='r', bufsize=-1, encoding=None, errors=None):
self._mode = mode
self._bufsize = bufsize
self._encoding = encoding
self._errors = errors
def __call__(self, string):
# the special argument "-" means sys.std{in,out}
if string == '-':
if 'r' in self._mode:
return _sys.stdin
elif 'w' in self._mode:
return _sys.stdout
else:
msg = _('argument "-" with mode %r') % self._mode
raise ValueError(msg)
# all other arguments are used as file names
try:
return open(string, self._mode, self._bufsize, self._encoding,
self._errors)
except OSError as e:
args = {'filename': string, 'error': e}
message = _("can't open '%(filename)s': %(error)s")
raise ArgumentTypeError(message % args)
def __repr__(self):
args = self._mode, self._bufsize
kwargs = [('encoding', self._encoding), ('errors', self._errors)]
args_str = ', '.join([repr(arg) for arg in args if arg != -1] +
['%s=%r' % (kw, arg) for kw, arg in kwargs
if arg is not None])
return '%s(%s)' % (type(self).__name__, args_str)
# ===========================
# Optional and Positional Parsing
# ===========================
class Namespace(_AttributeHolder):
"""Simple object for storing attributes.
Implements equality by attribute names and values, and provides a simple
string representation.
"""
def __init__(self, **kwargs):
for name in kwargs:
setattr(self, name, kwargs[name])
def __eq__(self, other):
if not isinstance(other, Namespace):
return NotImplemented
return vars(self) == vars(other)
def __contains__(self, key):
return key in self.__dict__
class _ActionsContainer(object):
def __init__(self,
description,
prefix_chars,
argument_default,
conflict_handler):
super(_ActionsContainer, self).__init__()
self.description = description
self.argument_default = argument_default
self.prefix_chars = prefix_chars
self.conflict_handler = conflict_handler
# set up registries
self._registries = {}
# register actions
self.register('action', None, _StoreAction)
self.register('action', 'store', _StoreAction)
self.register('action', 'store_const', _StoreConstAction)
self.register('action', 'store_true', _StoreTrueAction)
self.register('action', 'store_false', _StoreFalseAction)
self.register('action', 'append', _AppendAction)
self.register('action', 'append_const', _AppendConstAction)
self.register('action', 'count', _CountAction)
self.register('action', 'help', _HelpAction)
self.register('action', 'version', _VersionAction)
self.register('action', 'parsers', _SubParsersAction)
self.register('action', 'extend', _ExtendAction)
# raise an exception if the conflict handler is invalid
self._get_handler()
# action storage
self._actions = []
self._option_string_actions = {}
# groups
self._action_groups = []
self._mutually_exclusive_groups = []
# defaults storage
self._defaults = {}
# determines whether an "option" looks like a negative number
self._negative_number_matcher = _re.compile(r'^-\d+$|^-\d*\.\d+$')
# whether or not there are any optionals that look like negative
# numbers -- uses a list so it can be shared and edited
self._has_negative_number_optionals = []
# ====================
# Registration methods
# ====================
def register(self, registry_name, value, object):
registry = self._registries.setdefault(registry_name, {})
registry[value] = object
def _registry_get(self, registry_name, value, default=None):
return self._registries[registry_name].get(value, default)
# ==================================
# Namespace default accessor methods
# ==================================
def set_defaults(self, **kwargs):
self._defaults.update(kwargs)
# if these defaults match any existing arguments, replace
# the previous default on the object with the new one
for action in self._actions:
if action.dest in kwargs:
action.default = kwargs[action.dest]
def get_default(self, dest):
for action in self._actions:
if action.dest == dest and action.default is not None:
return action.default
return self._defaults.get(dest, None)
# =======================
# Adding argument actions
# =======================
def add_argument(self, *args, **kwargs):
"""
add_argument(dest, ..., name=value, ...)
add_argument(option_string, option_string, ..., name=value, ...)
"""
# if no positional args are supplied or only one is supplied and
# it doesn't look like an option string, parse a positional
# argument
chars = self.prefix_chars
if not args or len(args) == 1 and args[0][0] not in chars:
if args and 'dest' in kwargs:
raise ValueError('dest supplied twice for positional argument')
kwargs = self._get_positional_kwargs(*args, **kwargs)
# otherwise, we're adding an optional argument
else:
kwargs = self._get_optional_kwargs(*args, **kwargs)
# if no default was supplied, use the parser-level default
if 'default' not in kwargs:
dest = kwargs['dest']
if dest in self._defaults:
kwargs['default'] = self._defaults[dest]
elif self.argument_default is not None:
kwargs['default'] = self.argument_default
# create the action object, and add it to the parser
action_class = self._pop_action_class(kwargs)
if not callable(action_class):
raise ValueError('unknown action "%s"' % (action_class,))
action = action_class(**kwargs)
# raise an error if the action type is not callable
type_func = self._registry_get('type', action.type, action.type)
if not callable(type_func):
raise ValueError('%r is not callable' % (type_func,))
if type_func is FileType:
raise ValueError('%r is a FileType class object, instance of it'
' must be passed' % (type_func,))
# raise an error if the metavar does not match the type
if hasattr(self, "_get_formatter"):
try:
self._get_formatter()._format_args(action, None)
except TypeError:
raise ValueError("length of metavar tuple does not match nargs")
return self._add_action(action)
def add_argument_group(self, *args, **kwargs):
group = _ArgumentGroup(self, *args, **kwargs)
self._action_groups.append(group)
return group
def add_mutually_exclusive_group(self, **kwargs):
group = _MutuallyExclusiveGroup(self, **kwargs)
self._mutually_exclusive_groups.append(group)
return group
def _add_action(self, action):
# resolve any conflicts
self._check_conflict(action)
# add to actions list
self._actions.append(action)
action.container = self
# index the action by any option strings it has
for option_string in action.option_strings:
self._option_string_actions[option_string] = action
# set the flag if any option strings look like negative numbers
for option_string in action.option_strings:
if self._negative_number_matcher.match(option_string):
if not self._has_negative_number_optionals:
self._has_negative_number_optionals.append(True)
# return the created action
return action
def _remove_action(self, action):
self._actions.remove(action)
def _add_container_actions(self, container):
# collect groups by titles
title_group_map = {}
for group in self._action_groups:
if group.title in title_group_map:
msg = _('cannot merge actions - two groups are named %r')
raise ValueError(msg % (group.title))
title_group_map[group.title] = group
# map each action to its group
group_map = {}
for group in container._action_groups:
# if a group with the title exists, use that, otherwise
# create a new group matching the container's group
if group.title not in title_group_map:
title_group_map[group.title] = self.add_argument_group(
title=group.title,
description=group.description,
conflict_handler=group.conflict_handler)
# map the actions to their new group
for action in group._group_actions:
group_map[action] = title_group_map[group.title]
# add container's mutually exclusive groups
# NOTE: if add_mutually_exclusive_group ever gains title= and
# description= then this code will need to be expanded as above
for group in container._mutually_exclusive_groups:
mutex_group = self.add_mutually_exclusive_group(
required=group.required)
# map the actions to their new mutex group
for action in group._group_actions:
group_map[action] = mutex_group
# add all actions to this container or their group
for action in container._actions:
group_map.get(action, self)._add_action(action)
def _get_positional_kwargs(self, dest, **kwargs):
# make sure required is not specified
if 'required' in kwargs:
msg = _("'required' is an invalid argument for positionals")
raise TypeError(msg)
# mark positional arguments as required if at least one is
# always required
if kwargs.get('nargs') not in [OPTIONAL, ZERO_OR_MORE]:
kwargs['required'] = True
if kwargs.get('nargs') == ZERO_OR_MORE and 'default' not in kwargs:
kwargs['required'] = True
# return the keyword arguments with no option strings
return dict(kwargs, dest=dest, option_strings=[])
def _get_optional_kwargs(self, *args, **kwargs):
# determine short and long option strings
option_strings = []
long_option_strings = []
for option_string in args:
# error on strings that don't start with an appropriate prefix
if not option_string[0] in self.prefix_chars:
args = {'option': option_string,
'prefix_chars': self.prefix_chars}
msg = _('invalid option string %(option)r: '
'must start with a character %(prefix_chars)r')
raise ValueError(msg % args)
# strings starting with two prefix characters are long options
option_strings.append(option_string)
if len(option_string) > 1 and option_string[1] in self.prefix_chars:
long_option_strings.append(option_string)
# infer destination, '--foo-bar' -> 'foo_bar' and '-x' -> 'x'
dest = kwargs.pop('dest', None)
if dest is None:
if long_option_strings:
dest_option_string = long_option_strings[0]
else:
dest_option_string = option_strings[0]
dest = dest_option_string.lstrip(self.prefix_chars)
if not dest:
msg = _('dest= is required for options like %r')
raise ValueError(msg % option_string)
dest = dest.replace('-', '_')
# return the updated keyword arguments
return dict(kwargs, dest=dest, option_strings=option_strings)
def _pop_action_class(self, kwargs, default=None):
action = kwargs.pop('action', default)
return self._registry_get('action', action, action)
def _get_handler(self):
# determine function from conflict handler string
handler_func_name = '_handle_conflict_%s' % self.conflict_handler
try:
return getattr(self, handler_func_name)
except AttributeError:
msg = _('invalid conflict_resolution value: %r')
raise ValueError(msg % self.conflict_handler)
def _check_conflict(self, action):
# find all options that conflict with this option
confl_optionals = []
for option_string in action.option_strings:
if option_string in self._option_string_actions:
confl_optional = self._option_string_actions[option_string]
confl_optionals.append((option_string, confl_optional))
# resolve any conflicts
if confl_optionals:
conflict_handler = self._get_handler()
conflict_handler(action, confl_optionals)
def _handle_conflict_error(self, action, conflicting_actions):
message = ngettext('conflicting option string: %s',
'conflicting option strings: %s',
len(conflicting_actions))
conflict_string = ', '.join([option_string
for option_string, action
in conflicting_actions])
raise ArgumentError(action, message % conflict_string)
def _handle_conflict_resolve(self, action, conflicting_actions):
# remove all conflicting options
for option_string, action in conflicting_actions:
# remove the conflicting option
action.option_strings.remove(option_string)
self._option_string_actions.pop(option_string, None)
# if the option now has no option string, remove it from the
# container holding it
if not action.option_strings:
action.container._remove_action(action)
class _ArgumentGroup(_ActionsContainer):
def __init__(self, container, title=None, description=None, **kwargs):
# add any missing keyword arguments by checking the container
update = kwargs.setdefault
update('conflict_handler', container.conflict_handler)
update('prefix_chars', container.prefix_chars)
update('argument_default', container.argument_default)
super_init = super(_ArgumentGroup, self).__init__
super_init(description=description, **kwargs)
# group attributes
self.title = title
self._group_actions = []
# share most attributes with the container
self._registries = container._registries
self._actions = container._actions
self._option_string_actions = container._option_string_actions
self._defaults = container._defaults
self._has_negative_number_optionals = \
container._has_negative_number_optionals
self._mutually_exclusive_groups = container._mutually_exclusive_groups
def _add_action(self, action):
action = super(_ArgumentGroup, self)._add_action(action)
self._group_actions.append(action)
return action
def _remove_action(self, action):
super(_ArgumentGroup, self)._remove_action(action)
self._group_actions.remove(action)
class _MutuallyExclusiveGroup(_ArgumentGroup):
def __init__(self, container, required=False):
super(_MutuallyExclusiveGroup, self).__init__(container)
self.required = required
self._container = container
def _add_action(self, action):
if action.required:
msg = _('mutually exclusive arguments must be optional')
raise ValueError(msg)
action = self._container._add_action(action)
self._group_actions.append(action)
return action
def _remove_action(self, action):
self._container._remove_action(action)
self._group_actions.remove(action)
class ArgumentParser(_AttributeHolder, _ActionsContainer):
"""Object for parsing command line strings into Python objects.
Keyword Arguments:
- prog -- The name of the program (default:
``os.path.basename(sys.argv[0])``)
- usage -- A usage message (default: auto-generated from arguments)
- description -- A description of what the program does
- epilog -- Text following the argument descriptions
- parents -- Parsers whose arguments should be copied into this one
- formatter_class -- HelpFormatter class for printing help messages
- prefix_chars -- Characters that prefix optional arguments
- fromfile_prefix_chars -- Characters that prefix files containing
additional arguments
- argument_default -- The default value for all arguments
- conflict_handler -- String indicating how to handle conflicts
- add_help -- Add a -h/-help option
- allow_abbrev -- Allow long options to be abbreviated unambiguously
- exit_on_error -- Determines whether or not ArgumentParser exits with
error info when an error occurs
"""
def __init__(self,
prog=None,
usage=None,
description=None,
epilog=None,
parents=[],
formatter_class=HelpFormatter,
prefix_chars='-',
fromfile_prefix_chars=None,
argument_default=None,
conflict_handler='error',
add_help=True,
allow_abbrev=True,
exit_on_error=True):
superinit = super(ArgumentParser, self).__init__
superinit(description=description,
prefix_chars=prefix_chars,
argument_default=argument_default,
conflict_handler=conflict_handler)
# default setting for prog
if prog is None:
prog = _os.path.basename(_sys.argv[0])
self.prog = prog
self.usage = usage
self.epilog = epilog
self.formatter_class = formatter_class
self.fromfile_prefix_chars = fromfile_prefix_chars
self.add_help = add_help
self.allow_abbrev = allow_abbrev
self.exit_on_error = exit_on_error
add_group = self.add_argument_group
self._positionals = add_group(_('positional arguments'))
self._optionals = add_group(_('options'))
self._subparsers = None
# register types
def identity(string):
return string
self.register('type', None, identity)
# add help argument if necessary
# (using explicit default to override global argument_default)
default_prefix = '-' if '-' in prefix_chars else prefix_chars[0]
if self.add_help:
self.add_argument(
default_prefix+'h', default_prefix*2+'help',
action='help', default=SUPPRESS,
help=_('show this help message and exit'))
# add parent arguments and defaults
for parent in parents:
self._add_container_actions(parent)
try:
defaults = parent._defaults
except AttributeError:
pass
else:
self._defaults.update(defaults)
# =======================
# Pretty __repr__ methods
# =======================
def _get_kwargs(self):
names = [
'prog',
'usage',
'description',
'formatter_class',
'conflict_handler',
'add_help',
]
return [(name, getattr(self, name)) for name in names]
# ==================================
# Optional/Positional adding methods
# ==================================
def add_subparsers(self, **kwargs):
if self._subparsers is not None:
self.error(_('cannot have multiple subparser arguments'))
# add the parser class to the arguments if it's not present
kwargs.setdefault('parser_class', type(self))
if 'title' in kwargs or 'description' in kwargs:
title = _(kwargs.pop('title', 'subcommands'))
description = _(kwargs.pop('description', None))
self._subparsers = self.add_argument_group(title, description)
else:
self._subparsers = self._positionals
# prog defaults to the usage message of this parser, skipping
# optional arguments and with no "usage:" prefix
if kwargs.get('prog') is None:
formatter = self._get_formatter()
positionals = self._get_positional_actions()
groups = self._mutually_exclusive_groups
formatter.add_usage(self.usage, positionals, groups, '')
kwargs['prog'] = formatter.format_help().strip()
# create the parsers action and add it to the positionals list
parsers_class = self._pop_action_class(kwargs, 'parsers')
action = parsers_class(option_strings=[], **kwargs)
self._subparsers._add_action(action)
# return the created parsers action
return action
def _add_action(self, action):
if action.option_strings:
self._optionals._add_action(action)
else:
self._positionals._add_action(action)
return action
def _get_optional_actions(self):
return [action
for action in self._actions
if action.option_strings]
def _get_positional_actions(self):
return [action
for action in self._actions
if not action.option_strings]
# =====================================
# Command line argument parsing methods
# =====================================
def parse_args(self, args=None, namespace=None):
args, argv = self.parse_known_args(args, namespace)
if argv:
msg = _('unrecognized arguments: %s')
self.error(msg % ' '.join(argv))
return args
def parse_known_args(self, args=None, namespace=None):
if args is None:
# args default to the system args
args = _sys.argv[1:]
else:
# make sure that args are mutable
args = list(args)
# default Namespace built from parser defaults
if namespace is None:
namespace = Namespace()
# add any action defaults that aren't present
for action in self._actions:
if action.dest is not SUPPRESS:
if not hasattr(namespace, action.dest):
if action.default is not SUPPRESS:
setattr(namespace, action.dest, action.default)
# add any parser defaults that aren't present
for dest in self._defaults:
if not hasattr(namespace, dest):
setattr(namespace, dest, self._defaults[dest])
# parse the arguments and exit if there are any errors
if self.exit_on_error:
try:
namespace, args = self._parse_known_args(args, namespace)
except ArgumentError:
err = _sys.exc_info()[1]
self.error(str(err))
else:
namespace, args = self._parse_known_args(args, namespace)
if hasattr(namespace, _UNRECOGNIZED_ARGS_ATTR):
args.extend(getattr(namespace, _UNRECOGNIZED_ARGS_ATTR))
delattr(namespace, _UNRECOGNIZED_ARGS_ATTR)
return namespace, args
def _parse_known_args(self, arg_strings, namespace):
# replace arg strings that are file references
if self.fromfile_prefix_chars is not None:
arg_strings = self._read_args_from_files(arg_strings)
# map all mutually exclusive arguments to the other arguments
# they can't occur with
action_conflicts = {}
for mutex_group in self._mutually_exclusive_groups:
group_actions = mutex_group._group_actions
for i, mutex_action in enumerate(mutex_group._group_actions):
conflicts = action_conflicts.setdefault(mutex_action, [])
conflicts.extend(group_actions[:i])
conflicts.extend(group_actions[i + 1:])
# find all option indices, and determine the arg_string_pattern
# which has an 'O' if there is an option at an index,
# an 'A' if there is an argument, or a '-' if there is a '--'
option_string_indices = {}
arg_string_pattern_parts = []
arg_strings_iter = iter(arg_strings)
for i, arg_string in enumerate(arg_strings_iter):
# all args after -- are non-options
if arg_string == '--':
arg_string_pattern_parts.append('-')
for arg_string in arg_strings_iter:
arg_string_pattern_parts.append('A')
# otherwise, add the arg to the arg strings
# and note the index if it was an option
else:
option_tuple = self._parse_optional(arg_string)
if option_tuple is None:
pattern = 'A'
else:
option_string_indices[i] = option_tuple
pattern = 'O'
arg_string_pattern_parts.append(pattern)
# join the pieces together to form the pattern
arg_strings_pattern = ''.join(arg_string_pattern_parts)
# converts arg strings to the appropriate and then takes the action
seen_actions = set()
seen_non_default_actions = set()
def take_action(action, argument_strings, option_string=None):
seen_actions.add(action)
argument_values = self._get_values(action, argument_strings)
# error if this argument is not allowed with other previously
# seen arguments, assuming that actions that use the default
# value don't really count as "present"
if argument_values is not action.default:
seen_non_default_actions.add(action)
for conflict_action in action_conflicts.get(action, []):
if conflict_action in seen_non_default_actions:
msg = _('not allowed with argument %s')
action_name = _get_action_name(conflict_action)
raise ArgumentError(action, msg % action_name)
# take the action if we didn't receive a SUPPRESS value
# (e.g. from a default)
if argument_values is not SUPPRESS:
action(self, namespace, argument_values, option_string)
# function to convert arg_strings into an optional action
def consume_optional(start_index):
# get the optional identified at this index
option_tuple = option_string_indices[start_index]
action, option_string, explicit_arg = option_tuple
# identify additional optionals in the same arg string
# (e.g. -xyz is the same as -x -y -z if no args are required)
match_argument = self._match_argument
action_tuples = []
while True:
# if we found no optional action, skip it
if action is None:
extras.append(arg_strings[start_index])
return start_index + 1
# if there is an explicit argument, try to match the
# optional's string arguments to only this
if explicit_arg is not None:
arg_count = match_argument(action, 'A')
# if the action is a single-dash option and takes no
# arguments, try to parse more single-dash options out
# of the tail of the option string
chars = self.prefix_chars
if arg_count == 0 and option_string[1] not in chars:
action_tuples.append((action, [], option_string))
char = option_string[0]
option_string = char + explicit_arg[0]
new_explicit_arg = explicit_arg[1:] or None
optionals_map = self._option_string_actions
if option_string in optionals_map:
action = optionals_map[option_string]
explicit_arg = new_explicit_arg
else:
msg = _('ignored explicit argument %r')
raise ArgumentError(action, msg % explicit_arg)
# if the action expect exactly one argument, we've
# successfully matched the option; exit the loop
elif arg_count == 1:
stop = start_index + 1
args = [explicit_arg]
action_tuples.append((action, args, option_string))
break
# error if a double-dash option did not use the
# explicit argument
else:
msg = _('ignored explicit argument %r')
raise ArgumentError(action, msg % explicit_arg)
# if there is no explicit argument, try to match the
# optional's string arguments with the following strings
# if successful, exit the loop
else:
start = start_index + 1
selected_patterns = arg_strings_pattern[start:]
arg_count = match_argument(action, selected_patterns)
stop = start + arg_count
args = arg_strings[start:stop]
action_tuples.append((action, args, option_string))
break
# add the Optional to the list and return the index at which
# the Optional's string args stopped
assert action_tuples
for action, args, option_string in action_tuples:
take_action(action, args, option_string)
return stop
# the list of Positionals left to be parsed; this is modified
# by consume_positionals()
positionals = self._get_positional_actions()
# function to convert arg_strings into positional actions
def consume_positionals(start_index):
# match as many Positionals as possible
match_partial = self._match_arguments_partial
selected_pattern = arg_strings_pattern[start_index:]
arg_counts = match_partial(positionals, selected_pattern)
# slice off the appropriate arg strings for each Positional
# and add the Positional and its args to the list
for action, arg_count in zip(positionals, arg_counts):
args = arg_strings[start_index: start_index + arg_count]
start_index += arg_count
take_action(action, args)
# slice off the Positionals that we just parsed and return the
# index at which the Positionals' string args stopped
positionals[:] = positionals[len(arg_counts):]
return start_index
# consume Positionals and Optionals alternately, until we have
# passed the last option string
extras = []
start_index = 0
if option_string_indices:
max_option_string_index = max(option_string_indices)
else:
max_option_string_index = -1
while start_index <= max_option_string_index:
# consume any Positionals preceding the next option
next_option_string_index = min([
index
for index in option_string_indices
if index >= start_index])
if start_index != next_option_string_index:
positionals_end_index = consume_positionals(start_index)
# only try to parse the next optional if we didn't consume
# the option string during the positionals parsing
if positionals_end_index > start_index:
start_index = positionals_end_index
continue
else:
start_index = positionals_end_index
# if we consumed all the positionals we could and we're not
# at the index of an option string, there were extra arguments
if start_index not in option_string_indices:
strings = arg_strings[start_index:next_option_string_index]
extras.extend(strings)
start_index = next_option_string_index
# consume the next optional and any arguments for it
start_index = consume_optional(start_index)
# consume any positionals following the last Optional
stop_index = consume_positionals(start_index)
# if we didn't consume all the argument strings, there were extras
extras.extend(arg_strings[stop_index:])
# make sure all required actions were present and also convert
# action defaults which were not given as arguments
required_actions = []
for action in self._actions:
if action not in seen_actions:
if action.required:
required_actions.append(_get_action_name(action))
else:
# Convert action default now instead of doing it before
# parsing arguments to avoid calling convert functions
# twice (which may fail) if the argument was given, but
# only if it was defined already in the namespace
if (action.default is not None and
isinstance(action.default, str) and
hasattr(namespace, action.dest) and
action.default is getattr(namespace, action.dest)):
setattr(namespace, action.dest,
self._get_value(action, action.default))
if required_actions:
self.error(_('the following arguments are required: %s') %
', '.join(required_actions))
# make sure all required groups had one option present
for group in self._mutually_exclusive_groups:
if group.required:
for action in group._group_actions:
if action in seen_non_default_actions:
break
# if no actions were used, report the error
else:
names = [_get_action_name(action)
for action in group._group_actions
if action.help is not SUPPRESS]
msg = _('one of the arguments %s is required')
self.error(msg % ' '.join(names))
# return the updated namespace and the extra arguments
return namespace, extras
def _read_args_from_files(self, arg_strings):
# expand arguments referencing files
new_arg_strings = []
for arg_string in arg_strings:
# for regular arguments, just add them back into the list
if not arg_string or arg_string[0] not in self.fromfile_prefix_chars:
new_arg_strings.append(arg_string)
# replace arguments referencing files with the file content
else:
try:
with open(arg_string[1:]) as args_file:
arg_strings = []
for arg_line in args_file.read().splitlines():
for arg in self.convert_arg_line_to_args(arg_line):
arg_strings.append(arg)
arg_strings = self._read_args_from_files(arg_strings)
new_arg_strings.extend(arg_strings)
except OSError:
err = _sys.exc_info()[1]
self.error(str(err))
# return the modified argument list
return new_arg_strings
def convert_arg_line_to_args(self, arg_line):
return [arg_line]
def _match_argument(self, action, arg_strings_pattern):
# match the pattern for this action to the arg strings
nargs_pattern = self._get_nargs_pattern(action)
match = _re.match(nargs_pattern, arg_strings_pattern)
# raise an exception if we weren't able to find a match
if match is None:
nargs_errors = {
None: _('expected one argument'),
OPTIONAL: _('expected at most one argument'),
ONE_OR_MORE: _('expected at least one argument'),
}
msg = nargs_errors.get(action.nargs)
if msg is None:
msg = ngettext('expected %s argument',
'expected %s arguments',
action.nargs) % action.nargs
raise ArgumentError(action, msg)
# return the number of arguments matched
return len(match.group(1))
def _match_arguments_partial(self, actions, arg_strings_pattern):
# progressively shorten the actions list by slicing off the
# final actions until we find a match
result = []
for i in range(len(actions), 0, -1):
actions_slice = actions[:i]
pattern = ''.join([self._get_nargs_pattern(action)
for action in actions_slice])
match = _re.match(pattern, arg_strings_pattern)
if match is not None:
result.extend([len(string) for string in match.groups()])
break
# return the list of arg string counts
return result
def _parse_optional(self, arg_string):
# if it's an empty string, it was meant to be a positional
if not arg_string:
return None
# if it doesn't start with a prefix, it was meant to be positional
if not arg_string[0] in self.prefix_chars:
return None
# if the option string is present in the parser, return the action
if arg_string in self._option_string_actions:
action = self._option_string_actions[arg_string]
return action, arg_string, None
# if it's just a single character, it was meant to be positional
if len(arg_string) == 1:
return None
# if the option string before the "=" is present, return the action
if '=' in arg_string:
option_string, explicit_arg = arg_string.split('=', 1)
if option_string in self._option_string_actions:
action = self._option_string_actions[option_string]
return action, option_string, explicit_arg
# search through all possible prefixes of the option string
# and all actions in the parser for possible interpretations
option_tuples = self._get_option_tuples(arg_string)
# if multiple actions match, the option string was ambiguous
if len(option_tuples) > 1:
options = ', '.join([option_string
for action, option_string, explicit_arg in option_tuples])
args = {'option': arg_string, 'matches': options}
msg = _('ambiguous option: %(option)s could match %(matches)s')
self.error(msg % args)
# if exactly one action matched, this segmentation is good,
# so return the parsed action
elif len(option_tuples) == 1:
option_tuple, = option_tuples
return option_tuple
# if it was not found as an option, but it looks like a negative
# number, it was meant to be positional
# unless there are negative-number-like options
if self._negative_number_matcher.match(arg_string):
if not self._has_negative_number_optionals:
return None
# if it contains a space, it was meant to be a positional
if ' ' in arg_string:
return None
# it was meant to be an optional but there is no such option
# in this parser (though it might be a valid option in a subparser)
return None, arg_string, None
def _get_option_tuples(self, option_string):
result = []
# option strings starting with two prefix characters are only
# split at the '='
chars = self.prefix_chars
if option_string[0] in chars and option_string[1] in chars:
if self.allow_abbrev:
if '=' in option_string:
option_prefix, explicit_arg = option_string.split('=', 1)
else:
option_prefix = option_string
explicit_arg = None
for option_string in self._option_string_actions:
if option_string.startswith(option_prefix):
action = self._option_string_actions[option_string]
tup = action, option_string, explicit_arg
result.append(tup)
# single character options can be concatenated with their arguments
# but multiple character options always have to have their argument
# separate
elif option_string[0] in chars and option_string[1] not in chars:
option_prefix = option_string
explicit_arg = None
short_option_prefix = option_string[:2]
short_explicit_arg = option_string[2:]
for option_string in self._option_string_actions:
if option_string == short_option_prefix:
action = self._option_string_actions[option_string]
tup = action, option_string, short_explicit_arg
result.append(tup)
elif option_string.startswith(option_prefix):
action = self._option_string_actions[option_string]
tup = action, option_string, explicit_arg
result.append(tup)
# shouldn't ever get here
else:
self.error(_('unexpected option string: %s') % option_string)
# return the collected option tuples
return result
def _get_nargs_pattern(self, action):
# in all examples below, we have to allow for '--' args
# which are represented as '-' in the pattern
nargs = action.nargs
# the default (None) is assumed to be a single argument
if nargs is None:
nargs_pattern = '(-*A-*)'
# allow zero or one arguments
elif nargs == OPTIONAL:
nargs_pattern = '(-*A?-*)'
# allow zero or more arguments
elif nargs == ZERO_OR_MORE:
nargs_pattern = '(-*[A-]*)'
# allow one or more arguments
elif nargs == ONE_OR_MORE:
nargs_pattern = '(-*A[A-]*)'
# allow any number of options or arguments
elif nargs == REMAINDER:
nargs_pattern = '([-AO]*)'
# allow one argument followed by any number of options or arguments
elif nargs == PARSER:
nargs_pattern = '(-*A[-AO]*)'
# suppress action, like nargs=0
elif nargs == SUPPRESS:
nargs_pattern = '(-*-*)'
# all others should be integers
else:
nargs_pattern = '(-*%s-*)' % '-*'.join('A' * nargs)
# if this is an optional action, -- is not allowed
if action.option_strings:
nargs_pattern = nargs_pattern.replace('-*', '')
nargs_pattern = nargs_pattern.replace('-', '')
# return the pattern
return nargs_pattern
# ========================
# Alt command line argument parsing, allowing free intermix
# ========================
def parse_intermixed_args(self, args=None, namespace=None):
args, argv = self.parse_known_intermixed_args(args, namespace)
if argv:
msg = _('unrecognized arguments: %s')
self.error(msg % ' '.join(argv))
return args
def parse_known_intermixed_args(self, args=None, namespace=None):
# returns a namespace and list of extras
#
# positional can be freely intermixed with optionals. optionals are
# first parsed with all positional arguments deactivated. The 'extras'
# are then parsed. If the parser definition is incompatible with the
# intermixed assumptions (e.g. use of REMAINDER, subparsers) a
# TypeError is raised.
#
# positionals are 'deactivated' by setting nargs and default to
# SUPPRESS. This blocks the addition of that positional to the
# namespace
positionals = self._get_positional_actions()
a = [action for action in positionals
if action.nargs in [PARSER, REMAINDER]]
if a:
raise TypeError('parse_intermixed_args: positional arg'
' with nargs=%s'%a[0].nargs)
if [action.dest for group in self._mutually_exclusive_groups
for action in group._group_actions if action in positionals]:
raise TypeError('parse_intermixed_args: positional in'
' mutuallyExclusiveGroup')
try:
save_usage = self.usage
try:
if self.usage is None:
# capture the full usage for use in error messages
self.usage = self.format_usage()[7:]
for action in positionals:
# deactivate positionals
action.save_nargs = action.nargs
# action.nargs = 0
action.nargs = SUPPRESS
action.save_default = action.default
action.default = SUPPRESS
namespace, remaining_args = self.parse_known_args(args,
namespace)
for action in positionals:
# remove the empty positional values from namespace
if (hasattr(namespace, action.dest)
and getattr(namespace, action.dest)==[]):
from warnings import warn
warn('Do not expect %s in %s' % (action.dest, namespace))
delattr(namespace, action.dest)
finally:
# restore nargs and usage before exiting
for action in positionals:
action.nargs = action.save_nargs
action.default = action.save_default
optionals = self._get_optional_actions()
try:
# parse positionals. optionals aren't normally required, but
# they could be, so make sure they aren't.
for action in optionals:
action.save_required = action.required
action.required = False
for group in self._mutually_exclusive_groups:
group.save_required = group.required
group.required = False
namespace, extras = self.parse_known_args(remaining_args,
namespace)
finally:
# restore parser values before exiting
for action in optionals:
action.required = action.save_required
for group in self._mutually_exclusive_groups:
group.required = group.save_required
finally:
self.usage = save_usage
return namespace, extras
# ========================
# Value conversion methods
# ========================
def _get_values(self, action, arg_strings):
# for everything but PARSER, REMAINDER args, strip out first '--'
if action.nargs not in [PARSER, REMAINDER]:
try:
arg_strings.remove('--')
except ValueError:
pass
# optional argument produces a default when not present
if not arg_strings and action.nargs == OPTIONAL:
if action.option_strings:
value = action.const
else:
value = action.default
if isinstance(value, str):
value = self._get_value(action, value)
self._check_value(action, value)
# when nargs='*' on a positional, if there were no command-line
# args, use the default if it is anything other than None
elif (not arg_strings and action.nargs == ZERO_OR_MORE and
not action.option_strings):
if action.default is not None:
value = action.default
else:
value = arg_strings
self._check_value(action, value)
# single argument or optional argument produces a single value
elif len(arg_strings) == 1 and action.nargs in [None, OPTIONAL]:
arg_string, = arg_strings
value = self._get_value(action, arg_string)
self._check_value(action, value)
# REMAINDER arguments convert all values, checking none
elif action.nargs == REMAINDER:
value = [self._get_value(action, v) for v in arg_strings]
# PARSER arguments convert all values, but check only the first
elif action.nargs == PARSER:
value = [self._get_value(action, v) for v in arg_strings]
self._check_value(action, value[0])
# SUPPRESS argument does not put anything in the namespace
elif action.nargs == SUPPRESS:
value = SUPPRESS
# all other types of nargs produce a list
else:
value = [self._get_value(action, v) for v in arg_strings]
for v in value:
self._check_value(action, v)
# return the converted value
return value
def _get_value(self, action, arg_string):
type_func = self._registry_get('type', action.type, action.type)
if not callable(type_func):
msg = _('%r is not callable')
raise ArgumentError(action, msg % type_func)
# convert the value to the appropriate type
try:
result = type_func(arg_string)
# ArgumentTypeErrors indicate errors
except ArgumentTypeError:
name = getattr(action.type, '__name__', repr(action.type))
msg = str(_sys.exc_info()[1])
raise ArgumentError(action, msg)
# TypeErrors or ValueErrors also indicate errors
except (TypeError, ValueError):
name = getattr(action.type, '__name__', repr(action.type))
args = {'type': name, 'value': arg_string}
msg = _('invalid %(type)s value: %(value)r')
raise ArgumentError(action, msg % args)
# return the converted value
return result
def _check_value(self, action, value):
# converted value must be one of the choices (if specified)
if action.choices is not None and value not in action.choices:
args = {'value': value,
'choices': ', '.join(map(repr, action.choices))}
msg = _('invalid choice: %(value)r (choose from %(choices)s)')
raise ArgumentError(action, msg % args)
# =======================
# Help-formatting methods
# =======================
def format_usage(self):
formatter = self._get_formatter()
formatter.add_usage(self.usage, self._actions,
self._mutually_exclusive_groups)
return formatter.format_help()
def format_help(self):
formatter = self._get_formatter()
# usage
formatter.add_usage(self.usage, self._actions,
self._mutually_exclusive_groups)
# description
formatter.add_text(self.description)
# positionals, optionals and user-defined groups
for action_group in self._action_groups:
formatter.start_section(action_group.title)
formatter.add_text(action_group.description)
formatter.add_arguments(action_group._group_actions)
formatter.end_section()
# epilog
formatter.add_text(self.epilog)
# determine help from format above
return formatter.format_help()
def _get_formatter(self):
return self.formatter_class(prog=self.prog)
# =====================
# Help-printing methods
# =====================
def print_usage(self, file=None):
if file is None:
file = _sys.stdout
self._print_message(self.format_usage(), file)
def print_help(self, file=None):
if file is None:
file = _sys.stdout
self._print_message(self.format_help(), file)
def _print_message(self, message, file=None):
if message:
if file is None:
file = _sys.stderr
file.write(message)
# ===============
# Exiting methods
# ===============
def exit(self, status=0, message=None):
if message:
self._print_message(message, _sys.stderr)
_sys.exit(status)
def error(self, message):
"""error(message: string)
Prints a usage message incorporating the message to stderr and
exits.
If you override this in a subclass, it should not return -- it
should either exit or raise an exception.
"""
self.print_usage(_sys.stderr)
args = {'prog': self.prog, 'message': message}
self.exit(2, _('%(prog)s: error: %(message)s\n') % args)
|
bruderstein/PythonScript
|
PythonLib/min/argparse.py
|
Python
|
gpl-2.0
| 97,984
| 0.000235
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys, os, os.path, re
import distutils, setuptools
from setuptools import dist, extension
from setuptools.command import build_ext
from setuptools.extension import Library as _Library
def patch_st_dir(patch_version, st_egg, jccdir):
return '''
Shared mode is disabled, setuptools patch.43.%s must be applied to enable it
or the NO_SHARED environment variable must be set to turn off this error.
sudo patch -d %s -Nup0 < %s/jcc/patches/patch.43.%s
See %s/INSTALL for more information about shared mode.
''' %(patch_version, st_egg, jccdir, patch_version, jccdir)
def patch_st_zip(patch_version, st_egg, jccdir):
return '''
Shared mode is disabled, setuptools patch.43.%s must be applied to enable it
or the NO_SHARED environment variable must be set to turn off this error.
mkdir tmp
cd tmp
unzip -q %s
patch -Nup0 < %s/jcc/patches/patch.43.%s
sudo zip %s -f
cd ..
rm -rf tmp
See %s/INSTALL for more information about shared mode.
''' %(patch_version, st_egg, jccdir, patch_version, st_egg, jccdir)
def patch_setuptools(with_setuptools):
with_setuptools_c7 = ('00000000', '00000006', '*c', '00000007', '*final')
with_setuptools_c11 = ('00000000', '00000006', '*c', '00000011', '*final')
with_distribute_1 = ('00000000', '00000006', '00000001', '*final')
try:
from setuptools.command.build_ext import sh_link_shared_object
enable_shared = True # jcc/patches/patch.43 was applied
except ImportError:
jccdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
st_egg = os.path.dirname(setuptools.__path__[0])
if with_setuptools_c7 <= with_setuptools <= with_setuptools_c11 or with_distribute_1 <= with_setuptools:
# Old setuptools 0.6c7-10 series
# New distribute 0.6.1+ series
if with_setuptools < with_setuptools_c11 and not hasattr(dist, 'check_packages'):
# Old setuptools 0.6c7-10 series missing check_packages()
dist.check_packages = check_packages
setuptools.Library = LinuxLibrary
extension.Library = LinuxLibrary
build_ext.build_ext = LinuxBuildExt
if build_ext.use_stubs:
# Build shared libraries.
global sh_link_shared_object # Fix UnboundLocalError
build_ext.link_shared_object = sh_link_shared_object
else:
# Build static libraries every where else (unless forced)
build_ext.libtype = 'static'
build_ext.link_shared_object = st_link_shared_object
print >>sys.stderr, "Applied shared mode monkey patch to:", setuptools
return True # monkey patch was applied
elif with_setuptools < with_setuptools_c11: # old 0.6c7-10 series
patch_version = '0.6c7'
elif with_setuptools >= with_distribute_1: # new 0.6.1 and up fork
patch_version = '0.6c7' # compatible with 0.6c7
else:
patch_version = '0.6c11' # old 0.6c11+ series
if os.path.isdir(st_egg):
raise NotImplementedError, patch_st_dir(patch_version, st_egg,
jccdir)
else:
raise NotImplementedError, patch_st_zip(patch_version, st_egg,
jccdir)
return enable_shared
class LinuxLibrary(_Library):
def __init__(self, *args, **kwds):
self.force_shared = kwds.pop('force_shared', False)
extension.Extension.__init__(self, *args, **kwds)
class LinuxBuildExt(build_ext.build_ext):
def get_ext_filename(self, fullname):
filename = build_ext._build_ext.get_ext_filename(self, fullname)
if fullname in self.ext_map:
ext = self.ext_map[fullname]
if isinstance(ext, _Library):
if ext.force_shared and not build_ext.use_stubs:
libtype = 'shared'
else:
libtype = build_ext.libtype
fn, ext = os.path.splitext(filename)
return self.shlib_compiler.library_filename(fn, libtype)
elif build_ext.use_stubs and ext._links_to_dynamic:
d, fn = os.path.split(filename)
return os.path.join(d, 'dl-' + fn)
return filename
def build_extension(self, ext):
_compiler = self.compiler
try:
force_shared = False
if isinstance(ext, _Library):
self.compiler = self.shlib_compiler
force_shared = ext.force_shared and not build_ext.use_stubs
if force_shared:
self.compiler.link_shared_object = sh_link_shared_object.__get__(self.compiler)
build_ext._build_ext.build_extension(self, ext)
if ext._needs_stub:
self.write_stub(self.get_finalized_command('build_py').build_lib, ext)
finally:
if force_shared:
self.compiler.link_shared_object = build_ext.link_shared_object.__get__(self.compiler)
self.compiler = _compiler
def sh_link_shared_object(self, objects, output_libname, output_dir=None, libraries=None, library_dirs=None, runtime_library_dirs=None, export_symbols=None, debug=0, extra_preargs=None, extra_postargs=None, build_temp=None, target_lang=None):
self.link(self.SHARED_LIBRARY, objects, output_libname, output_dir, libraries, library_dirs, runtime_library_dirs, export_symbols, debug, extra_preargs, extra_postargs, build_temp, target_lang)
def st_link_shared_object(self, objects, output_libname, output_dir=None, libraries=None, library_dirs=None, runtime_library_dirs=None, export_symbols=None, debug=0, extra_preargs=None, extra_postargs=None, build_temp=None, target_lang=None):
assert output_dir is None # distutils build_ext doesn't pass this
output_dir, filename = os.path.split(output_libname)
basename, ext = os.path.splitext(filename)
if self.library_filename("x").startswith('lib'):
# strip 'lib' prefix; this is kludgy if some platform uses
# a different prefix
basename = basename[3:]
self.create_static_lib(objects, basename, output_dir, debug, target_lang)
def check_packages(dist, attr, value):
for pkgname in value:
if not re.match(r'\w+(\.\w+)*', pkgname):
distutils.log.warn(
"WARNING: %r not a valid package name; please use only"
".-separated package names in setup.py", pkgname
)
|
devs4v/devs4v-information-retrieval15
|
project/lucene/pylucene-4.9.0-0/jcc/helpers/linux.py
|
Python
|
mit
| 7,260
| 0.003306
|
# !/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
try:
from PyQt4.QtGui import QApplication, QKeySequence
except ImportError:
from PyQt5.QtWidgets import QApplication
from PyQt5.QtGui import QKeySequence
from pygs import QxtGlobalShortcut
def hotkeyBinding():
SHORTCUT_SHOW = "Ctrl+Alt+S" # Ctrl maps to Command on Mac OS X
SHORTCUT_EXIT = "Ctrl+Alt+F" # again, Ctrl maps to Command on Mac OS X
def show_activated():
print("Shortcut Activated!")
app = QApplication([])
shortcut_show = QxtGlobalShortcut()
shortcut_show.setShortcut(QKeySequence(SHORTCUT_SHOW))
shortcut_show.activated.connect(show_activated)
shortcut_exit = QxtGlobalShortcut()
shortcut_exit.setShortcut(QKeySequence(SHORTCUT_EXIT))
shortcut_exit.activated.connect(app.exit)
return_code = app.exec_()
del shortcut_show
del shortcut_exit
sys.exit(return_code)
|
LeunamBk/translatorPy
|
globalHotkeys.py
|
Python
|
gpl-3.0
| 950
| 0.009474
|
import unittest
from mock import patch, call
from chaser.controller import MotorController, LEFT_KEY, RIGHT_KEY, UP_KEY, DOWN_KEY, MotorInputError
class ControllerTestCase(unittest.TestCase):
@patch('chaser.controller.gpio')
def test_init(self, io):
io.OUT = True
calls = [call(4, True), call(17, True), call(24, True), call(25, True)]
MotorController()
io.setup.assert_has_calls(calls)
@patch('chaser.controller.gpio')
def test_shut_down(self, io):
controller = MotorController()
io.reset_mock()
controller.shut_down()
calls = [call(4, False), call(17, False), call(24, False), call(25, False)]
io.output.assert_has_calls(calls)
@patch('chaser.controller.gpio')
def test_left(self, io):
controller = MotorController()
controller.left()
calls = [call(24, True), call(25, False)]
io.output.assert_has_calls(calls)
self.assertEqual(controller.state, 'left')
@patch('chaser.controller.gpio')
def test_left_stop(self, io):
controller = MotorController()
controller.turn_keys.add(LEFT_KEY)
controller.left()
calls = [call(24, False), call(25, False)]
io.output.assert_has_calls(calls)
self.assertEqual(controller.state, 'stopped')
@patch('chaser.controller.gpio')
def test_right(self, io):
controller = MotorController()
controller.right()
calls = [call(25, True), call(24, False)]
io.output.assert_has_calls(calls)
self.assertEqual(controller.state, 'right')
@patch('chaser.controller.gpio')
def test_right_stop(self, io):
controller = MotorController()
controller.turn_keys.add(RIGHT_KEY)
controller.right()
calls = [call(25, False), call(24, False)]
io.output.assert_has_calls(calls)
self.assertEqual(controller.state, 'stopped')
@patch('chaser.controller.gpio')
def test_forward(self, io):
controller = MotorController()
controller.forward()
calls = [call(4, True), call(17, False)]
io.output.assert_has_calls(calls)
self.assertEqual(controller.state, 'forward')
@patch('chaser.controller.gpio')
def test_forward_stop(self, io):
controller = MotorController()
controller.progress_keys.add(UP_KEY)
controller.forward()
calls = [call(4, False), call(17, False)]
io.output.assert_has_calls(calls)
self.assertEqual(controller.state, 'stopped')
@patch('chaser.controller.gpio')
def test_reverse(self, io):
controller = MotorController()
controller.reverse()
calls = [call(17, True), call(4, False)]
io.output.assert_has_calls(calls)
self.assertEqual(controller.state, 'backwards')
@patch('chaser.controller.gpio')
def test_reverse_stop(self, io):
controller = MotorController()
controller.progress_keys.add(DOWN_KEY)
controller.reverse()
calls = [call(17, False), call(4, False)]
io.output.assert_has_calls(calls)
self.assertEqual(controller.state, 'stopped')
@patch('chaser.controller.gpio')
def test_motor(self, io):
controller = MotorController()
controller.motor(UP_KEY)
calls = [call(4, True), call(17, False)]
io.output.assert_has_calls(calls)
io.reset_mock()
controller.motor(DOWN_KEY)
calls = [call(17, True), call(4, False)]
io.output.assert_has_calls(calls)
io.reset_mock()
controller.motor(RIGHT_KEY)
calls = [call(25, True), call(24, False)]
io.output.assert_has_calls(calls)
io.reset_mock()
controller.motor(LEFT_KEY)
calls = [call(24, True), call(25, False)]
io.output.assert_has_calls(calls)
def test_motor_bad_key(self):
controller = MotorController()
with self.assertRaises(MotorInputError):
controller.motor('other')
|
mmilkin/cchaser
|
chaser/tests/test_controller.py
|
Python
|
mit
| 4,019
| 0.000498
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Property classes for building wrapper classes for Pikov nodes.
We want to wrap our semantic graph with Python classes. This allows us to
interact with Python objects to modify the guid_map.
These classes encode the core types used in the semantic graph. When classes
use these properties, the guid_map is updated with the correct serialization
of the property.
"""
from .core import Int64Node, StringNode
class AbstractSemanticGraphProperty(object):
def __init__(self, label):
self._label = label
def from_node(self, obj, value):
raise NotImplementedError()
def to_node(self, value):
raise NotImplementedError()
def __get__(self, obj, type=None):
return self.from_node(obj, obj[self._label])
def __set__(self, obj, value):
obj[self._label] = self.to_node(value)
class UnspecifiedProperty(AbstractSemanticGraphProperty):
def from_node(self, obj, value):
obj._graph.get_value(obj, self._label)
def to_node(self, value):
# Value should already by a Node.
return value
class GuidProperty(AbstractSemanticGraphProperty):
def __init__(self, label, cls):
super().__init__(label)
self._cls = cls
def from_node(self, obj, value):
if value is None:
return None
return self._cls(obj._graph, guid=value.guid)
def to_node(self, value):
# Value should already by a GuidNode.
return value
def make_guid_property(wrapped):
def __init__(self, label):
GuidProperty.__init__(self, label, wrapped)
return type(
wrapped.__name__ + "Property",
(GuidProperty,),
{
"__init__": __init__,
}
)
class ScalarProperty(AbstractSemanticGraphProperty):
def from_node(self, obj, value):
if value is None:
return None
return value.value
class Int64Property(ScalarProperty):
def to_node(self, value):
if value is None:
return None
return Int64Node(value)
class StringProperty(ScalarProperty):
def to_node(self, value):
if value is None:
return None
return StringNode(value)
|
google/pikov
|
python/pikov/properties.py
|
Python
|
apache-2.0
| 2,766
| 0
|
#!/usr/bin/python
# encoding: utf-8
# filename: outroTipoDeProducaoBibliografica.py
#
# scriptLattes V8
# Copyright 2005-2013: Jesús P. Mena-Chalco e Roberto M. Cesar-Jr.
# http://scriptlattes.sourceforge.net/
#
#
# Este programa é um software livre; você pode redistribui-lo e/ou
# modifica-lo dentro dos termos da Licença Pública Geral GNU como
# publicada pela Fundação do Software Livre (FSF); na versão 2 da
# Licença, ou (na sua opinião) qualquer versão.
#
# Este programa é distribuído na esperança que possa ser util,
# mas SEM NENHUMA GARANTIA; sem uma garantia implicita de ADEQUAÇÂO a qualquer
# MERCADO ou APLICAÇÃO EM PARTICULAR. Veja a
# Licença Pública Geral GNU para maiores detalhes.
#
# Você deve ter recebido uma cópia da Licença Pública Geral GNU
# junto com este programa, se não, escreva para a Fundação do Software
# Livre(FSF) Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
from scriptLattes import *
from geradorDePaginasWeb import *
import re
class OutroTipoDeProducaoBibliografica:
item = None # dado bruto
idMembro = None
relevante = None
autores = None
titulo = None
ano = None
natureza = None # tipo de producao
chave = None
def __init__(self, idMembro, partesDoItem='', relevante=''):
self.idMembro = set([])
self.idMembro.add(idMembro)
if not partesDoItem=='':
# partesDoItem[0]: Numero (NAO USADO)
# partesDoItem[1]: Descricao do livro (DADO BRUTO)
self.relevante = relevante
self.item = partesDoItem[1]
# Dividir o item na suas partes constituintes
partes = self.item.partition(" . ")
self.autores = partes[0].strip()
partes = partes[2]
aux = re.findall(u' \((.*?)\)\.$', partes)
if len(aux)>0:
self.natureza = aux[-1]
partes = partes.rpartition(" (")
partes = partes[0]
else:
self.natureza = ''
aux = re.findall(u' ((?:19|20)\d\d)\\b', partes)
if len(aux)>0:
self.ano = aux[-1] #.strip().rstrip(".").rstrip(",")
partes = partes.rpartition(" ")
partes = partes[0]
else:
self.ano = ''
self.titulo = partes.strip().rstrip(".").rstrip(",")
self.chave = self.autores # chave de comparação entre os objetos
else:
self.relevante = ''
self.autores = ''
self.titulo = ''
self.ano = ''
self.natureza = ''
def compararCom(self, objeto):
if self.idMembro.isdisjoint(objeto.idMembro) and compararCadeias(self.titulo, objeto.titulo):
# Os IDs dos membros são agrupados.
# Essa parte é importante para a criação do GRAFO de colaborações
self.idMembro.update(objeto.idMembro)
if len(self.autores)<len(objeto.autores):
self.autores = objeto.autores
if len(self.titulo)<len(objeto.titulo):
self.titulo = objeto.titulo
if len(self.natureza)<len(objeto.natureza):
self.natureza = objeto.natureza
return self
else: # nao similares
return None
def html(self, listaDeMembros):
s = self.autores + '. <b>' + self.titulo + '</b>. '
s+= str(self.ano) + '. ' if str(self.ano).isdigit() else '. '
s+= self.natureza if not self.natureza=='' else ''
s+= menuHTMLdeBuscaPB(self.titulo)
return s
# ------------------------------------------------------------------------ #
def __str__(self):
s = "\n[OUTRO TIPO DE PRODUCAO BIBLIOGRAFICA] \n"
s += "+ID-MEMBRO : " + str(self.idMembro) + "\n"
s += "+RELEVANTE : " + str(self.relevante) + "\n"
s += "+AUTORES : " + self.autores.encode('utf8','replace') + "\n"
s += "+TITULO : " + self.titulo.encode('utf8','replace') + "\n"
s += "+ANO : " + str(self.ano) + "\n"
s += "+NATUREZA : " + self.natureza.encode('utf8','replace') + "\n"
s += "+item : " + self.item.encode('utf8','replace') + "\n"
return s
|
DiegoQueiroz/scriptLattes
|
scriptLattes/producoesBibliograficas/outroTipoDeProducaoBibliografica.py
|
Python
|
gpl-2.0
| 3,771
| 0.032388
|
from os.path import abspath
import wptools
from mycroft.messagebus.message import Message
from mycroft.skills.LILACS_knowledge.services import KnowledgeBackend
from mycroft.util.log import getLogger
__author__ = 'jarbas'
logger = getLogger(abspath(__file__).split('/')[-2])
class WikidataService(KnowledgeBackend):
def __init__(self, config, emitter, name='wikidata'):
self.config = config
self.process = None
self.emitter = emitter
self.name = name
self.emitter.on('WikidataKnowledgeAdquire', self._adquire)
def _adquire(self, message=None):
logger.info('WikidataKnowledge_Adquire')
subject = message.data["subject"]
if subject is None:
logger.error("No subject to adquire knowledge about")
return
else:
dict = {}
node_data = {}
# get knowledge about
# TODO exception handling for erros
try:
page = wptools.page(subject, silent=True, verbose=False).get_wikidata()
# parse for distant child of
node_data["description"] = page.description
# direct child of
node_data["what"] = page.what
# data fields
node_data["data"] = page.wikidata
# related to
# TODO parse and make cousin/child/parent
node_data["properties"] = page.props
# id info source
dict["wikidata"] = node_data
except:
logger.error("Could not parse wikidata for " + str(subject))
self.send_result(dict)
def adquire(self, subject):
logger.info('Call WikidataKnowledgeAdquire')
self.emitter.emit(Message('WikidataKnowledgeAdquire', {"subject": subject}))
def send_result(self, result = {}):
self.emitter.emit(Message("LILACS_result", {"data": result}))
def stop(self):
logger.info('WikidataKnowledge_Stop')
if self.process:
self.process.terminate()
self.process = None
def load_service(base_config, emitter):
backends = base_config.get('backends', [])
services = [(b, backends[b]) for b in backends
if backends[b]['type'] == 'wikidata']
instances = [WikidataService(s[1], emitter, s[0]) for s in services]
return instances
|
ElliotTheRobot/LILACS-mycroft-core
|
mycroft/skills/LILACS_knowledge/services/wikidata/__init__.py
|
Python
|
gpl-3.0
| 2,400
| 0.002917
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from IPython import get_ipython
from IPython.display import (
display,
Javascript,
HTML,
)
from IPython.core import magic_arguments
from IPython.core.magic import (
Magics,
magics_class,
cell_magic,
)
import pyjade
__version_info__ = (0, 1, 0)
__version__ = '.'.join(map(str, __version_info__))
@magics_class
class JadeMagics(Magics):
"""
Write and load HTML with Jade in the IPython Notebook.
Example:
%%jade
ul
li: some text!
"""
def __init__(self, shell):
super(JadeMagics, self).__init__(shell)
@cell_magic
@magic_arguments.magic_arguments()
@magic_arguments.argument(
"var_name",
default=None,
nargs="?",
help="""Name of local variable to set to parsed value"""
)
def jade(self, line, cell):
line = line.strip()
args = magic_arguments.parse_argstring(self.jade, line)
display(Javascript(
"""
require(
[
"notebook/js/codecell",
"codemirror/mode/jade/jade"
],
function(cc){
cc.CodeCell.options_default.highlight_modes.magic_jade = {
reg: ["^%%jade"]
}
}
);
"""))
try:
val = pyjade.simple_convert(cell)
except Exception as err:
print(err)
return
if args.var_name is not None:
get_ipython().user_ns[args.var_name] = val
else:
return HTML(val)
def load_ipython_extension(ip):
ip = get_ipython()
ip.register_magics(JadeMagics)
|
bollwyvl/jademagic
|
jademagic.py
|
Python
|
bsd-3-clause
| 1,775
| 0
|
# This is to see that the total memory usage doesn't increase with time
# i.e. no leakage / link between consecutive usages of hsp.
# This will run for ever, to be monitored by the printout and some external monitor.
def t():
from guppy import hsp
while 1:
import guppy.heapy.UniSet
import gc
reload( guppy.heapy.UniSet )
hp = hsp()
x = None
x = hp.heap()
print x
gc.collect()
print x[0]
print x[1]
print x[2]
gc.collect()
print x&dict
|
vmanoria/bluemix-hue-filebrowser
|
hue-3.8.1-bluemix/desktop/core/ext-py/guppy-0.1.10/guppy/heapy/test/test_sf.py
|
Python
|
gpl-2.0
| 460
| 0.041304
|
import boto
from amslib.core.manager import BaseManager
from amslib.instance.instance import InstanceManager
import argparse
from errors import *
import pprint
import time
pp = pprint.PrettyPrinter(indent=4)
# Custom HealthCheck object to add support for failure threshold...seems to have been missed in boto
class HealthCheck(object):
"""An individual health check"""
POSTXMLBody = """
<HealthCheckConfig>
<IPAddress>%(ip_addr)s</IPAddress>
<Port>%(port)s</Port>
<Type>%(type)s</Type>
%(resource_path)s
%(fqdn_part)s
%(string_match_part)s
%(request_interval)s
%(failure_threshold)s
</HealthCheckConfig>
"""
XMLResourcePathPart = """<ResourcePath>%(resource_path)s</ResourcePath>"""
XMLFQDNPart = """<FullyQualifiedDomainName>%(fqdn)s</FullyQualifiedDomainName>"""
XMLStringMatchPart = """<SearchString>%(string_match)s</SearchString>"""
XMLRequestIntervalPart = """<RequestInterval>%(request_interval)d</RequestInterval>"""
XMLRequestFailurePart = """<FailureThreshold>%(failure_threshold)d</FailureThreshold>"""
valid_request_intervals = (10, 30)
valid_failure_thresholds = range(1, 11) # valid values are integers 1-10
def __init__(self, ip_addr, port, hc_type, resource_path, fqdn=None, string_match=None, request_interval=30, failure_threshold=3):
"""
HealthCheck object
:type ip_addr: str
:param ip_addr: IP Address
:type port: int
:param port: Port to check
:type hc_type: str
:param ip_addr: One of HTTP | HTTPS | HTTP_STR_MATCH | HTTPS_STR_MATCH | TCP
:type resource_path: str
:param resource_path: Path to check
:type fqdn: str
:param fqdn: domain name of the endpoint to check
:type string_match: str
:param string_match: if hc_type is HTTP_STR_MATCH or HTTPS_STR_MATCH, the string to search for in the response body from the specified resource
:type request_interval: int
:param request_interval: The number of seconds between the time that Amazon Route 53 gets a response from your endpoint and the time that it sends the next health-check request.
:type failure_threshold: int
:param failure_threshold: The number of times that Amazon Route 53 that a health check has fails before the resource is marked as down.
"""
self.ip_addr = ip_addr
self.port = port
self.hc_type = hc_type
self.resource_path = resource_path
self.fqdn = fqdn
self.string_match = string_match
if failure_threshold in self.valid_failure_thresholds:
self.failure_threshold = failure_threshold
else:
raise AttributeError(
"Valid values for request_interval are: %s" %
",".join(str(i) for i in self.valid_failure_thresholds))
if request_interval in self.valid_request_intervals:
self.request_interval = request_interval
else:
raise AttributeError(
"Valid values for request_interval are: %s" %
",".join(str(i) for i in self.valid_request_intervals))
def to_xml(self):
params = {
'ip_addr': self.ip_addr,
'port': self.port,
'type': self.hc_type,
'resource_path': "",
'fqdn_part': "",
'string_match_part': "",
'request_interval': (self.XMLRequestIntervalPart %
{'request_interval': self.request_interval}),
'failure_threshold': (self.XMLRequestFailurePart %
{'failure_threshold': self.failure_threshold}),
}
if self.fqdn is not None:
params['fqdn_part'] = self.XMLFQDNPart % {'fqdn': self.fqdn}
if self.string_match is not None:
params['string_match_part'] = self.XMLStringMatchPart % {'string_match' : self.string_match}
if self.resource_path is not None:
params['resource_path'] = self.XMLResourcePathPart % {'resource_path' : self.resource_path}
return self.POSTXMLBody % params
# custom version of the boto.route53.record.Record module to add support for failover resource records and to fix the missing health chech field on a response
class Record(object):
"""An individual ResourceRecordSet"""
HealthCheckBody = """<HealthCheckId>%s</HealthCheckId>"""
XMLBody = """<ResourceRecordSet>
<Name>%(name)s</Name>
<Type>%(type)s</Type>
%(weight)s
%(body)s
%(health_check)s
</ResourceRecordSet>"""
WRRBody = """
<SetIdentifier>%(identifier)s</SetIdentifier>
<Weight>%(weight)s</Weight>
"""
RRRBody = """
<SetIdentifier>%(identifier)s</SetIdentifier>
<Region>%(region)s</Region>
"""
FailoverBody = """
<SetIdentifier>%(identifier)s</SetIdentifier>
<Failover>%(failover)s</Failover>
"""
ResourceRecordsBody = """
<TTL>%(ttl)s</TTL>
<ResourceRecords>
%(records)s
</ResourceRecords>"""
ResourceRecordBody = """<ResourceRecord>
<Value>%s</Value>
</ResourceRecord>"""
AliasBody = """<AliasTarget>
<HostedZoneId>%(hosted_zone_id)s</HostedZoneId>
<DNSName>%(dns_name)s</DNSName>
%(eval_target_health)s
</AliasTarget>"""
EvaluateTargetHealth = """<EvaluateTargetHealth>%s</EvaluateTargetHealth>"""
valid_failover_roles = ['PRIMARY', 'SECONDARY']
def __init__(self, name=None, type=None, ttl=600, resource_records=None,
alias_hosted_zone_id=None, alias_dns_name=None, identifier=None,
weight=None, region=None, alias_evaluate_target_health=None,
health_check=None, failover_role=None):
self.name = name
self.type = type
self.ttl = ttl
if resource_records is None:
resource_records = []
self.resource_records = resource_records
self.alias_hosted_zone_id = alias_hosted_zone_id
self.alias_dns_name = alias_dns_name
self.identifier = identifier
self.weight = weight
self.region = region
self.alias_evaluate_target_health = alias_evaluate_target_health
self.health_check = health_check
self.failover_role = None
if failover_role in self.valid_failover_roles or failover_role is None:
self.failover_role = failover_role
else:
raise AttributeError(
"Valid values for failover_role are: %s" %
",".join(self.valid_failover_roles))
def __repr__(self):
return '<Record:%s:%s:%s>' % (self.name, self.type, self.to_print())
def add_value(self, value):
"""Add a resource record value"""
self.resource_records.append(value)
def set_alias(self, alias_hosted_zone_id, alias_dns_name,
alias_evaluate_target_health=False):
"""Make this an alias resource record set"""
self.alias_hosted_zone_id = alias_hosted_zone_id
self.alias_dns_name = alias_dns_name
self.alias_evaluate_target_health = alias_evaluate_target_health
def to_xml(self):
"""Spit this resource record set out as XML"""
if self.alias_hosted_zone_id is not None and self.alias_dns_name is not None:
# Use alias
if self.alias_evaluate_target_health is not None:
eval_target_health = self.EvaluateTargetHealth % ('true' if self.alias_evaluate_target_health else 'false')
else:
eval_target_health = ""
body = self.AliasBody % { "hosted_zone_id": self.alias_hosted_zone_id,
"dns_name": self.alias_dns_name,
"eval_target_health": eval_target_health }
else:
# Use resource record(s)
records = ""
for r in self.resource_records:
records += self.ResourceRecordBody % r
body = self.ResourceRecordsBody % {
"ttl": self.ttl,
"records": records,
}
weight = ""
if self.identifier is not None and self.weight is not None:
weight = self.WRRBody % {"identifier": self.identifier, "weight":
self.weight}
elif self.identifier is not None and self.region is not None:
weight = self.RRRBody % {"identifier": self.identifier, "region":
self.region}
elif self.identifier is not None and self.failover_role is not None:
weight = self.FailoverBody % {"identifier": self.identifier, "failover":
self.failover_role}
health_check = ""
if self.health_check is not None:
health_check = self.HealthCheckBody % (self.health_check)
params = {
"name": self.name,
"type": self.type,
"weight": weight,
"body": body,
"health_check": health_check
}
return self.XMLBody % params
def to_print(self):
rr = ""
if self.alias_hosted_zone_id is not None and self.alias_dns_name is not None:
# Show alias
rr = 'ALIAS ' + self.alias_hosted_zone_id + ' ' + self.alias_dns_name
if self.alias_evaluate_target_health is not None:
rr += ' (EvalTarget %s)' % self.alias_evaluate_target_health
else:
# Show resource record(s)
rr = ",".join(self.resource_records)
if self.identifier is not None and self.weight is not None:
rr += ' (WRR id=%s, w=%s)' % (self.identifier, self.weight)
elif self.identifier is not None and self.region is not None:
rr += ' (LBR id=%s, region=%s)' % (self.identifier, self.region)
return rr
# this is a terrible thing to have to do, but I had to monkeypatch this to get it to work until new version of boto
# comes out that addresses the missing health_check field addressed in this pull request
# https://github.com/jzbruno/boto/commit/075634f49441ff293e1717d44c04862b257f65c6
def endElement(self, name, value, connection):
if name == 'Name':
self.name = value
elif name == 'Type':
self.type = value
elif name == 'TTL':
self.ttl = value
elif name == 'Value':
self.resource_records.append(value)
elif name == 'HostedZoneId':
self.alias_hosted_zone_id = value
elif name == 'DNSName':
self.alias_dns_name = value
elif name == 'SetIdentifier':
self.identifier = value
elif name == 'EvaluateTargetHealth':
self.alias_evaluate_target_health = value
elif name == 'Weight':
self.weight = value
elif name == 'Region':
self.region = value
# following 2 add support for parsing health check id
elif name == 'HealthCheckId':
self.health_check = value
# following 2 lines add support for parsing the failover role
elif name == 'Failover':
self.failover_role = value
def startElement(self, name, attrs, connection):
return None
class Route53Manager(BaseManager):
def __get_boto_conn(self):
if 'rout53' not in self.boto_conns:
self.boto_conns["route53"] = boto.connect_route53(aws_access_key_id=self.settings.AWS_ACCESS_KEY, aws_secret_access_key=self.settings.AWS_SECRET_KEY)
################-------------START MONKEYPATCH-------------#####################
# this is a terrible thing to have to do, but I had to monkeypatch this to get it to work until boto supports the needed functionality
# related pull requests:
# https://github.com/boto/boto/pull/2195
# https://github.com/boto/boto/pull/2222
boto.route53.record.Record = Record
################-------------END MONKEYPATCH-------------#####################
return self.boto_conns["route53"]
#TODO implement interactive mode
#TODO need to figure out how to handle a host in multiple zones (possible: preferred zone, and handling in interactive)
def discovery(self, prefer_hostname='external', interactive=False, load_route53_only=False):
botoconn = self.__get_boto_conn()
health_checks = botoconn.get_list_health_checks()
ids = []
for health_check in health_checks['ListHealthChecksResponse']['HealthChecks']:
hcid = self.store_healthcheck(health_check)
if hcid:
ids.append(hcid)
#remove healthchecks that no longer exist in route53
if len(ids):
self.db.execute("delete from route53_healthchecks where id not in " + " ({0})".format(",".join("{0}".format(n) for n in ids)))
self.dbconn.commit()
zonesdata = botoconn.get_all_hosted_zones()
for zd in zonesdata['ListHostedZonesResponse']['HostedZones']:
comment = None
if "Comment" in zd['Config']:
comment = zd['Config']['Comment']
# strip the trailing '.' on the zone name
zone_name = zd['Name']
if zone_name[len(zone_name)-1] == '.':
zone_name = zone_name[0:len(zone_name)-1]
self.db.execute("replace into route53_zones set zone_id=%s, name=%s, record_sets=%s, comment=%s", (zd['Id'].replace('/hostedzone/',''), zone_name, zd['ResourceRecordSetCount'], comment))
self.dbconn.commit()
self.logger.debug(zd)
# pp.pprint(zonesdata)
zones = botoconn.get_zones()
self.logger.debug(zones)
# since we are pulling a complete list every time, we do not need records that exist prior to this load step #TODO should possibly implement more atomic way of doing this
self.db.execute("truncate route53_records")
self.dbconn.commit()
for z in zones:
self.logger.debug(z)
recs = z.get_records()
zone_id = recs.hosted_zone_id
# if identifier is set, then the record is one of WRR, latency or failover. WRR will include a value for weight,
# latency will include a value for region, and failover will not include weight or region #TODO verify assumption on failover type
for r in recs:
name = r.name
#TODO need to find out if i could ever get relative hostnames (rather than fqdn) back from this API call
if name[len(name)-1] == '.':
name = name[0:len(name)-1]
ident = r.identifier
if not r.identifier:
ident = ""
self.db.execute("insert into route53_records set zone_id=%s, name=%s, type=%s, identifier=%s, resource_records=%s, ttl=%s, alias_hosted_zone_id=%s, "
"alias_dns_name=%s, weight=%s, region=%s, healthcheck_id=%s on duplicate key update resource_records=%s, ttl=%s, alias_hosted_zone_id=%s, "
"alias_dns_name=%s, weight=%s, region=%s, healthcheck_id=%s",
(zone_id, name, r.type, ident, "\n".join(r.resource_records), r.ttl, r.alias_hosted_zone_id, r.alias_dns_name, r.weight, r.region, r.health_check,
"\n".join(r.resource_records), r.ttl, r.alias_hosted_zone_id, r.alias_dns_name, r.weight, r.region, r.health_check))
self.dbconn.commit()
self.logger.info("Found {0} record for {1}".format(r.type, r.name))
if not load_route53_only:
self.db.execute("select instance_id, host, ip_internal, ip_external, hostname_internal, hostname_external from hosts")
hosts = self.db.fetchall()
if not hosts:
self.logger.warning("No hosts found, try running: ams host discovery")
return
self.logger.debug("number of hosts {0}".format(len(hosts)))
for host in hosts:
if not (host[2] or host[3] or host[4] or host[5]):
# self.logger.debug("Skipping {0}({1}) as it has no hostname or ip information".format(host[1], host[0]))
continue
hostname = None
if prefer_hostname == 'internal':
if not hostname:
hostname = self.get_fqdn_for_host(host[2])
if not hostname:
hostname = self.get_fqdn_for_host(host[4])
if not hostname:
hostname = self.get_fqdn_for_host(host[3])
if not hostname:
hostname = self.get_fqdn_for_host(host[5])
elif prefer_hostname == 'external':
if not hostname:
hostname = self.get_fqdn_for_host(host[3])
if not hostname:
hostname = self.get_fqdn_for_host(host[5])
if not hostname:
hostname = self.get_fqdn_for_host(host[2])
if not hostname:
hostname = self.get_fqdn_for_host(host[4])
if hostname and hostname != host[1]:
self.logger.info("Found hostname for instance {0}, updating from {1} to {2}".format(host[0], host[1], hostname))
self.db.execute("update hosts set host=%s where instance_id=%s", (hostname, host[0]))
self.dbconn.commit()
def store_healthcheck(self, health_check):
resource_path = None
if 'ResourcePath' in health_check['HealthCheckConfig']:
resource_path = health_check['HealthCheckConfig']['ResourcePath']
search_string = None
if 'SearchString' in health_check['HealthCheckConfig']:
search_string = health_check['HealthCheckConfig']['SearchString']
fqdn = None
if 'FullyQualifiedDomainName' in health_check['HealthCheckConfig']:
fqdn = health_check['HealthCheckConfig']['FullyQualifiedDomainName']
ipaddr = None
if 'IPAddress' in health_check['HealthCheckConfig']:
ipaddr = health_check['HealthCheckConfig']['IPAddress']
self.logger.info("Storing health check: {0}://{1}:{2}".format(health_check['HealthCheckConfig']['Type'], ipaddr, health_check['HealthCheckConfig']['Port']))
self.db.execute("insert into route53_healthchecks set healthcheck_id=%s, ip=%s, port=%s, type=%s, request_interval=%s, "
"failure_threshold=%s, resource_path=%s, search_string=%s, fqdn=%s, caller_reference=%s "
"on duplicate key update ip=%s, port=%s, type=%s, request_interval=%s, failure_threshold=%s, "
"resource_path=%s, search_string=%s, fqdn=%s, caller_reference=%s",
(health_check['Id'], ipaddr, health_check['HealthCheckConfig']['Port'],
health_check['HealthCheckConfig']['Type'], health_check['HealthCheckConfig']['RequestInterval'],
health_check['HealthCheckConfig']['FailureThreshold'], resource_path, search_string, fqdn, health_check['CallerReference'],
ipaddr, health_check['HealthCheckConfig']['Port'], health_check['HealthCheckConfig']['Type'],
health_check['HealthCheckConfig']['RequestInterval'], health_check['HealthCheckConfig']['FailureThreshold'],
resource_path, search_string, fqdn, health_check['CallerReference']))
self.dbconn.commit()
self.db.execute("select id from route53_healthchecks where healthcheck_id=%s", (health_check['Id'], ))
row = self.db.fetchone()
if not row:
return None
else:
return row[0]
def create_health_check(self, ip, port, protocol, request_interval=30, failure_threshold=3, resource_path=None, fqdn=None, string_match=None):
botoconn = self.__get_boto_conn()
hc_type = None
if protocol == 'tcp':
hc_type = 'TCP'
elif protocol == 'http':
hc_type = 'HTTP'
elif protocol == 'https':
hc_type = 'HTTPS'
if not hc_type:
raise AttributeError("Protocol must be one of [tcp, http, https]")
if not ip:
raise AttributeError("ip must be provided for healthcheck")
if not port:
raise AttributeError("port must be provided for healthcheck")
if string_match and hc_type in ('HTTP', 'HTTPS'):
hc_type += '_STR_MATCH'
hc = HealthCheck(ip, port, hc_type, resource_path, fqdn, string_match, request_interval, failure_threshold)
self.logger.debug(hc)
response = botoconn.create_health_check(hc)
#TODO need to find out what an error response looks like
hcid = None
if 'CreateHealthCheckResponse' in response:
if 'HealthCheck' in response['CreateHealthCheckResponse']:
hcid = self.store_healthcheck(response['CreateHealthCheckResponse']['HealthCheck'])
if hcid:
conf = response['CreateHealthCheckResponse']['HealthCheck']['HealthCheckConfig']
self.logger.info("Created healthcheck {0}: {1}://{2}:{3}".format(hcid, conf['Type'], conf['IPAddress'], conf['Port']))
return hcid
def create_dns_record(self, fqdn, record_type, zone_id, records, ttl=60, routing_policy='simple', weight=None, identifier=None, region=None, health_check_id=None, failover_role="primary"):
botoconn = self.__get_boto_conn()
if routing_policy == 'simple':
weight = None
identifier = None
region = None
health_check_id = None
failover_role = None
elif routing_policy == 'weighted':
region = None
failover_role = None
if not weight:
raise AttributeError("weight must be provided for weighted routing policy")
if not identifier:
raise AttributeError("identifier must be provided for weighted routing policy")
elif routing_policy == 'latency':
weight = None
failover_role = None
if not region:
raise AttributeError("region must be provided for latency routing policy")
if not identifier:
raise AttributeError("identifier must be provided for latency routing policy")
elif routing_policy == 'failover':
weight = None
region = None
if not failover_role:
raise AttributeError("failover_role must be provided for failover routing policy")
if not identifier:
raise AttributeError("identifier must be provided for failover routing policy")
health_check = None
if health_check_id:
self.db.execute("select healthcheck_id from route53_healthchecks where id=%s", (health_check_id, ))
row = self.db.fetchone()
if not row:
raise ResourceNotFound("Could not find information on health check {0}".format(health_check_id))
health_check = row[0]
zones = botoconn.get_zones()
zone = None
# unfortunately boto's get_zone only takes a zone name which is not necessarily unique :(
for z in zones:
if z.id == zone_id:
zone = z
break
if not zone:
raise ResourceNotFound("Zone ID {0} not found".format(zone_id))
rrset = zone.get_records()
record_type = record_type.upper()
if failover_role is not None:
failover_role = failover_role.upper()
rec = Record(name=fqdn, type=record_type, ttl=ttl, resource_records=records, identifier=identifier, weight=weight, region=region, health_check=health_check, failover_role=failover_role)
rrset.add_change_record('CREATE', rec)
response = rrset.commit()
if 'ChangeResourceRecordSetsResponse' in response:
name = rec.name
if name[len(name)-1] == '.':
name = name[0:len(name)-1]
ident = rec.identifier
if not rec.identifier:
ident = ""
self.db.execute("insert into route53_records set zone_id=%s, name=%s, type=%s, identifier=%s, resource_records=%s, ttl=%s, alias_hosted_zone_id=%s, "
"alias_dns_name=%s, weight=%s, region=%s, healthcheck_id=%s on duplicate key update resource_records=%s, ttl=%s, alias_hosted_zone_id=%s, "
"alias_dns_name=%s, weight=%s, region=%s, healthcheck_id=%s",
(zone_id, name, rec.type, ident, "\n".join(rec.resource_records), rec.ttl, rec.alias_hosted_zone_id, rec.alias_dns_name, rec.weight, rec.region, rec.health_check,
"\n".join(rec.resource_records), rec.ttl, rec.alias_hosted_zone_id, rec.alias_dns_name, rec.weight, rec.region, rec.health_check))
self.dbconn.commit()
self.logger.info("Created new dns entry for {0} -> {1}".format(fqdn, " \\n ".join(records)))
self.db.execute("update route53_zones z set record_sets = (select count(*) from route53_records where zone_id=z.zone_id)")
self.dbconn.commit()
def get_fqdn_for_host(self, host_or_ip):
if not host_or_ip:
return None
self.db.execute("select name from route53_records where identifier = '' and type in ('A', 'CNAME') and resource_records = %s", (host_or_ip,))
row = self.db.fetchone()
if not row:
return None
name = row[0]
return name
def delete_dns_record(self, zone_id, fqdn, record_type, identifier=None):
# normalize values
record_type = record_type.upper()
fqdn = fqdn.lower()
if fqdn[len(fqdn)-1] != '.':
fqdn += '.'
botoconn = self.__get_boto_conn()
zones = botoconn.get_zones()
zone = None
# unfortunately boto's get_zone only takes a zone name which is not necessarily unique :(
for z in zones:
if z.id == zone_id:
zone = z
break
if not zone:
raise ResourceNotFound("Zone ID {0} not found".format(zone_id))
rrset = zone.get_records()
record = None
for r in rrset:
if r.name == fqdn and r.type == record_type and r.identifier == identifier:
record = r
break
if not record:
raise ResourceNotFound("Cannot find DNS record for {0} {1} {2} {3}", format(zone_id, fqdn, record_type, identifier))
rrset.add_change_record('DELETE', record)
# for some reason, a properly formed response is not being returned, but the record is deleted from route53
# adding the response to the debug log for now to keep an eye on it
response = rrset.commit()
self.logger.debug(response)
#if 'ChangeResourceRecordSetsResponse' in response:
name = record.name
if name[len(name)-1] == '.':
name = name[0:len(name)-1]
if not identifier:
identifier = ""
self.db.execute("delete from route53_records where zone_id=%s and name=%s and type=%s and identifier=%s", (zone_id, fqdn, record_type, identifier))
self.dbconn.commit()
self.logger.info("Deleted DNS record for {0} {1} {2} {3}".format(zone_id, fqdn, record_type, identifier))
self.db.execute("update route53_zones z set record_sets = (select count(*) from route53_records where zone_id=z.zone_id)")
self.dbconn.commit()
def delete_healthcheck(self, healthcheck_id, force=False):
self.db.execute("select h.healthcheck_id, r.healthcheck_id, r.name, r.zone_id from route53_healthchecks h left join route53_records r on h.healthcheck_id=r.healthcheck_id where id=%s", (healthcheck_id, ))
row = self.db.fetchone()
if not row:
raise ResourceNotFound("Health check {0} not found".format(healthcheck_id))
if row[1] is not None and not force:
raise ResourceNotAvailable("Health check {0} is currently in use for a dns entry in zone {1} with FQDN of {2}".format(healthcheck_id, row[3], row[2]))
botoconn = self.__get_boto_conn()
response = botoconn.delete_health_check(row[0])
if 'DeleteHealthCheckResponse' in response:
self.db.execute("delete from route53_healthchecks where id=%s", (healthcheck_id, ))
self.dbconn.commit()
self.logger.info("Deleted health check {0} ({1})".format(healthcheck_id, row[0]))
def argument_parser_builder(self, parser):
rsubparser = parser.add_subparsers(title="action", dest='action')
# ams route53 discovery
discparser = rsubparser.add_parser("discovery", help="Run discovery on route53 to populate database with DNS data")
discparser.add_argument("--interactive", help="Enable interactive mode for applying discovered host names to hosts (not enabled yet)", action='store_true')
discparser.add_argument("--prefer", default='external', choices=['internal', 'external'], help="Sets which hostname gets preference if DNS records are defined for an internal address and an external address")
discparser.add_argument("--load-only", help="Only load the route53 tables, but do not apply hostname changes to hosts", action='store_true')
discparser.set_defaults(func=self.command_discover)
listparser = rsubparser.add_parser("list", help="List Route53 DNS information currently in the database")
listparser.add_argument("resource", nargs='?', default="dns", choices=["dns", "healthchecks", "zones"], help="Resource type to list")
listparser.set_defaults(func=self.command_list)
adddnssharedargs = argparse.ArgumentParser(add_help=False)
adddnssharedargs.add_argument('fqdn', help="Fully qualified domain name for the entry. You can include the trailing dot(.) or it will be added automatically")
adddnssharedargs.add_argument('record_type', help="DNS record type (currently only support A and CNAME)", choices=['A', 'CNAME'])
group = adddnssharedargs.add_mutually_exclusive_group(required=True)
group.add_argument('--zone-id', help="Zone id to add DNS record to")
group.add_argument('--zone-name', help="Zone name to add DNS record to")
adddnssharedargs.add_argument('-t', '--ttl', help="TTL for the entry (default: 60)", type=int, default=60)
adddnssharedargs.add_argument('-r', '--routing-policy', help='The routing policy to use (default: simple)', choices=['simple', 'weighted', 'latency', 'failover'], default='simple')
adddnssharedargs.add_argument('-w', '--weight', type=int, help="Weighted routing policy: weight to assign to the dns resource")
adddnssharedargs.add_argument('--region', help="Latency routing policy: assigns the region for the dns resource for routing")
adddnssharedargs.add_argument('--health-check', type=int, help="health check id to associate with the record (for IDs, use: ams route53 list healthchecks)")
adddnssharedargs.add_argument('--failover-role', help="Failover routing policy: defines whether resource is primary or secondary", choices=['primary','secondary'], default='primary')
# ams route53 dns
dnsparser = rsubparser.add_parser("dns", help="DNS management operations")
dnssubparser = dnsparser.add_subparsers(title="operation", dest="operation")
# ams route53 dns create
creatednsparser = dnssubparser.add_parser("create", help="Create new DNS entry", parents=[adddnssharedargs])
creatednsparser.add_argument('-v', '--record-value', help="Value for the DNS record (Currently only has support single value entries)", required=True)
creatednsparser.add_argument('--identifier', help="Unique identifier to associate to a record that shares a name/type with other records in weighted, latency, or failover records")
creatednsparser.set_defaults(func=self.command_create_dns)
# ams route53 dns add
adddnsparser = dnssubparser.add_parser("add", help="add dns entries for host/instance", parents=[adddnssharedargs])
group = adddnsparser.add_mutually_exclusive_group(required=True)
group.add_argument('-H', '--host', help="Hostname (to find current hostname use: ams host list)")
group.add_argument('-i', '--instance', help="Instance ID")
adddnsparser.add_argument('--use', help="Define whether to use the public or private hostname/IP", choices=["public", "private"], default="public")
adddnsparser.add_argument('--identifier', help="Unique identifier to associate to a record that shares a name/type with other records in weighted, latency, or failover records. If not provided, one will be created from the hostname or instance id")
adddnsparser.add_argument('--update-hosts', action='store_true', help="(routing_policy=simple only) Updates the hostname for the host in the AMS hosts table (saving you from having to run route53 discovery to update)")
adddnsparser.add_argument('--configure-hostname', action='store_true', help="(routing_policy=simple only) Set the hostname on the host to the FQDN that was just added to the host or the currently set uname (uname will override the FQDN). Also applies the --update-hosts option (for Ubuntu and Redhat flavors, it will also edit the proper files to make this change permanent)")
group = adddnsparser.add_argument_group(title="Health Check Options", description="Use these options to create a health check for the dns record being added to host")
group.add_argument('--hc', action='store_true', help="Create a Route53 health check for host")
group.add_argument('--hc-port', type=int, help="Health check port")
group.add_argument('--hc-type', help="Health check type", choices=['tcp', 'http', 'https'])
group.add_argument('--hc-interval', type=int, help="Health check interval (10 or 30 second)", choices=[10,30], default=30)
group.add_argument('--hc-threshold', type=int, help="Number of times health check fails before the host is marked down by Route53", choices=range(1, 11), default=3)
group.add_argument('--hc-path', help="HTTP/HTTPS: health check resource path")
group.add_argument('--hc-fqdn', help="HTTP/HTTPS: health check fully qualified domain name")
group.add_argument('--hc-match', help="HTTP/HTTPS: health check response match string")
group.add_argument('--hc-ip', help="IP address to use for the healthcheck. Default is to use the instance's external IP, but this argument can be used to override")
adddnsparser.set_defaults(func=self.command_add_dns)
# ams route53 dns update
updatednsparser = dnssubparser.add_parser("update", help="Update a DNS entry")
updatednsparser.set_defaults(func=self.command_not_implemented)
# ams route53 dns delete
deletednsparser = dnssubparser.add_parser("delete", help="Delete a DNS entry")
deletednsparser.set_defaults(func=self.command_delete_dns)
deletednsparser.add_argument('fqdn', help="Fully qualified domain name for the entry. You can include the trailing dot(.) or it will be added automatically")
deletednsparser.add_argument('record_type', help="DNS record type (currently only support A and CNAME)", choices=['A', 'CNAME'])
deletednsparser.add_argument('--identifier', help="Unique identifier for a record that shares a name/type with other records in weighted, latency, or failover records")
group = deletednsparser.add_mutually_exclusive_group(required=True)
group.add_argument('--zone-id', help="Zone id to add DNS record to")
group.add_argument('--zone-name', help="Zone name to add DNS record to")
# ams route53 healthchecks
healthparser = rsubparser.add_parser("healthcheck", help="Route53 healthcheck management operations")
healthsubparser = healthparser.add_subparsers(title="operation", dest="operation")
# ams route53 healthchecks create
createhealthparser = healthsubparser.add_parser("create", help="Create a new health check")
createhealthparser.add_argument('ip', help='IP address to health check')
createhealthparser.add_argument('port', type=int, help="Health check port")
createhealthparser.add_argument('type', help="Health check type", choices=['tcp', 'http', 'https'])
createhealthparser.add_argument('-i', '--interval', type=int, help="Health check interval (10 or 30 second)", choices=[10,30], default=30)
createhealthparser.add_argument('-f', '--failure-threshold', type=int, help="Number of times health check fails before the host is marked down by Route53", choices=range(1, 11), default=3)
createhealthparser.add_argument('-a', '--resource-path', help="HTTP/HTTPS: health check resource path")
createhealthparser.add_argument('-d', '--fqdn', help="HTTP/HTTPS: health check fully qualified domain name")
createhealthparser.add_argument('-s', '--string-match', help="HTTP/HTTPS: health check response match string")
createhealthparser.set_defaults(func=self.command_create_healthcheck)
# ams route53 healthchecks update
updatehealthparser = healthsubparser.add_parser("update", help="Update a health check")
updatehealthparser.set_defaults(func=self.command_not_implemented)
# ams route53 healthchecks delete
deletehealthparser = healthsubparser.add_parser("delete", help="Delete a health check")
deletehealthparser.add_argument('healthcheck_id', type=int, help='ID of the health check to delete. To list health check ID run: ams route53 list healthchecks')
deletehealthparser.add_argument('--force', action='store_true', help="Force the deletion of a health check even if it is still defined as the health check for a record")
deletehealthparser.set_defaults(func=self.command_delete_healthcheck)
def command_delete_healthcheck(self, args):
self.delete_healthcheck(args.healthcheck_id, args.force)
def command_delete_dns(self, args):
zone_id = None
zone_name = None
if args.zone_id:
whereclause = 'zone_id=%s'
wherevar = args.zone_id
if args.zone_name:
whereclause = 'name=%s'
wherevar = args.zone_name
self.db.execute("select zone_id, name from route53_zones where " + whereclause, (wherevar, ))
rows = self.db.fetchall()
if not rows:
self.logger.error("No Route53 zone ID found")
return
elif len(rows) > 1:
self.logger.error("Multiple zones found for zone name: {0}. Use --zone-id instead".format(args.zone_name))
return
else:
zone_name = rows[0][1]
zone_id = rows[0][0]
self.delete_dns_record(zone_id=zone_id, fqdn=args.fqdn, record_type=args.record_type, identifier=args.identifier)
def command_add_dns(self, args):
if args.host:
whereclause = "host=%s"
wherevar = args.host
elif args.instance:
whereclause = "instance_id=%s"
wherevar = args.instance
self.db.execute("select instance_id, host, hostname_internal, hostname_external, ip_internal, ip_external, availability_zone from hosts where `terminated`=0 and " + whereclause, (wherevar, ))
row = self.db.fetchone()
if not row:
self.logger.error("{0} not found".format(wherevar))
return
instance_id = row[0]
if args.use == 'public':
cname_entry = row[3]
ip_entry = row[5]
elif args.use == 'private':
cname_entry = row[2]
ip_entry = row[4]
if args.record_type == 'A':
entry_value = ip_entry
if not entry_value:
self.logger.error("No {0} ip address on instance to use for A record".format(args.use))
return
elif args.record_type == 'CNAME':
entry_value = cname_entry
if not entry_value:
self.logger.error("No {0} dns name on instance to use for CNAME record".format(args.use))
return
healthcheck_id = None
if args.hc:
hcip = None
if row[5]:
hcip = row[5]
if args.hc_ip:
hcip = args.hc_ip
if not hcip:
self.logger.error("Instance does not have a public IP address and there was no healthcheck IP override given")
return
healthcheck_id = self.create_health_check(ip=hcip, port=args.hc_port, protocol=args.hc_type, request_interval=args.hc_interval, failure_threshold=args.hc_threshold, resource_path=args.hc_path, fqdn=args.hc_fqdn, string_match=args.hc_match)
if not healthcheck_id:
self.logger.error("Unknown error creating health check")
return
self.logger.info("Created new health check with id: {0}".format(healthcheck_id))
if not args.identifier and args.routing_policy in ('weighted', 'latency', 'failover'):
# will use public dns, public ip, private dns, private (in that order of precedence) for the unique identifier if it is needed and not provided
if row[3]:
args.identifier = row[3]
elif row[5]:
args.identifier = row[5]
elif row[2]:
args.identifier = row[2]
elif row[4]:
args.identifier = row[4]
if args.routing_policy == 'latency' and not args.region:
args.region = self.parse_region_from_availability_zone(row[6])
if healthcheck_id:
args.health_check = healthcheck_id
args.record_value = entry_value
self.command_create_dns(args)
if (args.configure_hostname or args.update_hosts) and args.routing_policy == 'simple':
if args.configure_hostname:
self.logger.info("Waiting 30 seconds to give route53 time to reflect the new dns changes")
time.sleep(30)
im = InstanceManager(self.settings)
im.configure_hostname(instance_id, args.fqdn, args.configure_hostname)
def command_create_healthcheck(self, args):
self.create_health_check(ip=args.ip, port=args.port, protocol=args.type, request_interval=args.interval, failure_threshold=args.failure_threshold, resource_path=args.resource_path, fqdn=args.fqdn, string_match=args.string_match)
def command_create_dns(self, args):
zone_id = None
zone_name = None
if args.zone_id:
whereclause = 'zone_id=%s'
wherevar = args.zone_id
if args.zone_name:
whereclause = 'name=%s'
wherevar = args.zone_name
self.db.execute("select zone_id, name from route53_zones where " + whereclause, (wherevar, ))
rows = self.db.fetchall()
if not rows:
self.logger.error("No Route53 zone ID found")
return
elif len(rows) > 1:
self.logger.error("Multiple zones found for zone name: {0}. Use --zone-id instead".format(args.zone_name))
return
else:
zone_name = rows[0][1]
zone_id = rows[0][0]
# normalize the fqdn
fqdn = args.fqdn
if fqdn[len(fqdn)-1] != '.':
fqdn += '.'
#TODO should likely put a check here to make sure that the fqdn is valid for the zone name
self.create_dns_record(fqdn=fqdn, record_type=args.record_type, zone_id=zone_id, records=[args.record_value], ttl=args.ttl, routing_policy=args.routing_policy, weight=args.weight, identifier=args.identifier, region=args.region, health_check_id=args.health_check, failover_role=args.failover_role.upper())
def command_not_implemented(self, args):
self.logger.error("Function not implemented yet")
def command_discover(self, args):
self.discovery(args.prefer, args.interactive, args.load_only)
def command_list(self, args):
if args.resource == "dns":
self.db.execute("select r.name, r.type, r.weight, r.region, r.identifier, r.zone_id, z.name, r.resource_records, if(h.id is not null, concat(h.type,'://',h.ip,':',h.port, ' (', h.id,')'), null) from route53_zones z join route53_records r using(zone_id) left join route53_healthchecks h using (healthcheck_id) order by r.name")
rows = self.db.fetchall()
headers = ['fqdns', 'type', 'weight', 'region', 'identifier', 'zone id', 'zone name', 'resource records', 'health check (id)']
self.output_formatted("Route53 DNS", headers, rows)
elif args.resource == "healthchecks":
self.db.execute("select id, ip, port, type, request_interval, failure_threshold, resource_path, search_string, fqdn from route53_healthchecks")
rows = self.db.fetchall()
headers = ['id', 'ip', 'port', 'type', 'request interval', 'failure threshold', 'resource path', 'search string', 'fqdn']
self.output_formatted("Route53 Health Checks", headers, rows)
elif args.resource == "zones":
self.db.execute("select zone_id, name, record_sets, comment from route53_zones")
rows = self.db.fetchall()
headers = ['zone id', 'zone name', 'records', 'comment']
self.output_formatted("Route53 Zones", headers, rows)
|
ThisLife/aws-management-suite
|
amslib/network/route53.py
|
Python
|
mit
| 46,031
| 0.004627
|
# This test module covers support in various parts of the standard library
# for working with modules located inside zipfiles
# The tests are centralised in this fashion to make it easy to drop them
# if a platform doesn't support zipimport
import unittest
import test.test_support
import os
import os.path
import sys
import textwrap
import zipfile
import zipimport
import doctest
import inspect
import linecache
import pdb
import warnings
verbose = test.test_support.verbose
# Library modules covered by this test set
# pdb (Issue 4201)
# inspect (Issue 4223)
# doctest (Issue 4197)
# Other test modules with zipimport related tests
# test_zipimport (of course!)
# test_cmd_line_script (covers the zipimport support in runpy)
# Retrieve some helpers from other test cases
from test import test_doctest, sample_doctest
from test.test_importhooks import ImportHooksBaseTestCase
from test.test_cmd_line_script import temp_dir, _run_python, \
_spawn_python, _kill_python, \
_make_test_script, \
_compile_test_script, \
_make_test_zip, _make_test_pkg
def _run_object_doctest(obj, module):
# Direct doctest output (normally just errors) to real stdout; doctest
# output shouldn't be compared by regrtest.
save_stdout = sys.stdout
sys.stdout = test.test_support.get_original_stdout()
try:
finder = doctest.DocTestFinder(verbose=verbose, recurse=False)
runner = doctest.DocTestRunner(verbose=verbose)
# Use the object's fully qualified name if it has one
# Otherwise, use the module's name
try:
name = "%s.%s" % (obj.__module__, obj.__name__)
except AttributeError:
name = module.__name__
for example in finder.find(obj, name, module):
runner.run(example)
f, t = runner.failures, runner.tries
if f:
raise test.test_support.TestFailed("%d of %d doctests failed" % (f, t))
finally:
sys.stdout = save_stdout
if verbose:
print 'doctest (%s) ... %d tests with zero failures' % (module.__name__, t)
return f, t
class ZipSupportTests(ImportHooksBaseTestCase):
# We use the ImportHooksBaseTestCase to restore
# the state of the import related information
# in the sys module after each test
# We also clear the linecache and zipimport cache
# just to avoid any bogus errors due to name reuse in the tests
def setUp(self):
linecache.clearcache()
zipimport._zip_directory_cache.clear()
ImportHooksBaseTestCase.setUp(self)
def test_inspect_getsource_issue4223(self):
test_src = "def foo(): pass\n"
with temp_dir() as d:
init_name = _make_test_script(d, '__init__', test_src)
name_in_zip = os.path.join('zip_pkg',
os.path.basename(init_name))
zip_name, run_name = _make_test_zip(d, 'test_zip',
init_name, name_in_zip)
os.remove(init_name)
sys.path.insert(0, zip_name)
import zip_pkg
self.assertEqual(inspect.getsource(zip_pkg.foo), test_src)
def test_doctest_issue4197(self):
# To avoid having to keep two copies of the doctest module's
# unit tests in sync, this test works by taking the source of
# test_doctest itself, rewriting it a bit to cope with a new
# location, and then throwing it in a zip file to make sure
# everything still works correctly
test_src = inspect.getsource(test_doctest)
test_src = test_src.replace(
"from test import test_doctest",
"import test_zipped_doctest as test_doctest")
test_src = test_src.replace("test.test_doctest",
"test_zipped_doctest")
test_src = test_src.replace("test.sample_doctest",
"sample_zipped_doctest")
sample_src = inspect.getsource(sample_doctest)
sample_src = sample_src.replace("test.test_doctest",
"test_zipped_doctest")
with temp_dir() as d:
script_name = _make_test_script(d, 'test_zipped_doctest',
test_src)
zip_name, run_name = _make_test_zip(d, 'test_zip',
script_name)
z = zipfile.ZipFile(zip_name, 'a')
z.writestr("sample_zipped_doctest.py", sample_src)
z.close()
if verbose:
zip_file = zipfile.ZipFile(zip_name, 'r')
print 'Contents of %r:' % zip_name
zip_file.printdir()
zip_file.close()
os.remove(script_name)
sys.path.insert(0, zip_name)
import test_zipped_doctest
# Some of the doc tests depend on the colocated text files
# which aren't available to the zipped version (the doctest
# module currently requires real filenames for non-embedded
# tests). So we're forced to be selective about which tests
# to run.
# doctest could really use some APIs which take a text
# string or a file object instead of a filename...
known_good_tests = [
test_zipped_doctest.SampleClass,
test_zipped_doctest.SampleClass.NestedClass,
test_zipped_doctest.SampleClass.NestedClass.__init__,
test_zipped_doctest.SampleClass.__init__,
test_zipped_doctest.SampleClass.a_classmethod,
test_zipped_doctest.SampleClass.a_property,
test_zipped_doctest.SampleClass.a_staticmethod,
test_zipped_doctest.SampleClass.double,
test_zipped_doctest.SampleClass.get,
test_zipped_doctest.SampleNewStyleClass,
test_zipped_doctest.SampleNewStyleClass.__init__,
test_zipped_doctest.SampleNewStyleClass.double,
test_zipped_doctest.SampleNewStyleClass.get,
test_zipped_doctest.old_test1,
test_zipped_doctest.old_test2,
test_zipped_doctest.old_test3,
test_zipped_doctest.old_test4,
test_zipped_doctest.sample_func,
test_zipped_doctest.test_DocTest,
test_zipped_doctest.test_DocTestParser,
test_zipped_doctest.test_DocTestRunner.basics,
test_zipped_doctest.test_DocTestRunner.exceptions,
test_zipped_doctest.test_DocTestRunner.option_directives,
test_zipped_doctest.test_DocTestRunner.optionflags,
test_zipped_doctest.test_DocTestRunner.verbose_flag,
test_zipped_doctest.test_Example,
test_zipped_doctest.test_debug,
test_zipped_doctest.test_pdb_set_trace,
test_zipped_doctest.test_pdb_set_trace_nested,
test_zipped_doctest.test_testsource,
test_zipped_doctest.test_trailing_space_in_test,
test_zipped_doctest.test_DocTestSuite,
test_zipped_doctest.test_DocTestFinder,
]
# These remaining tests are the ones which need access
# to the data files, so we don't run them
fail_due_to_missing_data_files = [
test_zipped_doctest.test_DocFileSuite,
test_zipped_doctest.test_testfile,
test_zipped_doctest.test_unittest_reportflags,
]
# Needed for test_DocTestParser and test_debug
deprecations = [
# Ignore all warnings about the use of class Tester in this module.
("class Tester is deprecated", DeprecationWarning)]
if sys.py3kwarning:
deprecations += [
("backquote not supported", SyntaxWarning),
("execfile.. not supported", DeprecationWarning)]
with test.test_support.check_warnings(*deprecations):
for obj in known_good_tests:
_run_object_doctest(obj, test_zipped_doctest)
def test_doctest_main_issue4197(self):
test_src = textwrap.dedent("""\
class Test:
">>> 'line 2'"
pass
import doctest
doctest.testmod()
""")
pattern = 'File "%s", line 2, in %s'
with temp_dir() as d:
script_name = _make_test_script(d, 'script', test_src)
exit_code, data = _run_python(script_name)
expected = pattern % (script_name, "__main__.Test")
if verbose:
print "Expected line", expected
print "Got stdout:"
print data
self.assert_(expected in data)
zip_name, run_name = _make_test_zip(d, "test_zip",
script_name, '__main__.py')
exit_code, data = _run_python(zip_name)
expected = pattern % (run_name, "__main__.Test")
if verbose:
print "Expected line", expected
print "Got stdout:"
print data
self.assert_(expected in data)
def test_pdb_issue4201(self):
test_src = textwrap.dedent("""\
def f():
pass
import pdb
pdb.runcall(f)
""")
with temp_dir() as d:
script_name = _make_test_script(d, 'script', test_src)
p = _spawn_python(script_name)
p.stdin.write('l\n')
data = _kill_python(p)
self.assert_(script_name in data)
zip_name, run_name = _make_test_zip(d, "test_zip",
script_name, '__main__.py')
p = _spawn_python(zip_name)
p.stdin.write('l\n')
data = _kill_python(p)
self.assert_(run_name in data)
def test_main():
test.test_support.run_unittest(ZipSupportTests)
test.test_support.reap_children()
if __name__ == '__main__':
test_main()
|
mancoast/CPythonPyc_test
|
cpython/266_test_zipimport_support.py
|
Python
|
gpl-3.0
| 10,482
| 0.000859
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#Copyright (c) 2013 Miguel Moreto <http://sites.google.com/site/miguelmoreto/>
#This file is part of pyComtrade.
#
# pyComtrade is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# pyComtrade is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyComtrade. If not, see <http://www.gnu.org/licenses/>.
# ====================================================================
# pyComtrade: A python Class for read and write IEEE
# Comtrade files based on 1999 standard
#
#
# OBS: - The field names ara iqual to Comtrade 1999 standard;
#
# Developed by Miguel Moreto
# Brazil - 2013
#
#
__version__ = "$Revision$" # SVN revision.
__date__ = "$Date$" # Date of the last SVN revision.
import os
import numpy
import struct
import traceback
class ComtradeRecord:
"""
A python Class for read and write IEEE Comtrade files.
This is the main class of pyComtrade.
"""
def __init__(self,filename):
"""
pyComtrade constructor:
Prints a message.
Clear the variables
Check if filename exists.
If so, read the CFG file.
filename: string with the path for the .cfg file.
"""
self.filename = filename
self.filehandler = 0
# Station name, identification and revision year:
self.station_name = ''
self.rec_dev_id = ''
self.rev_year = 0000
# Number and type of channels:
self.TT = 0
self.A = 0 # Number of analog channels.
self.D = 0 # Number of digital channels.
# Analog channel information:
self.An = []
self.Ach_id = []
self.Aph = []
self.Accbm = []
self.uu = []
self.a = []
self.b = []
self.skew = []
self.min = []
self.max = []
self.primary = []
self.secondary = []
self.PS = []
# Digital channel information:
self.Dn = []
self.Dch_id = []
self.Dph = []
self.Dccbm = []
self.y = []
# Line frequency:
self.lf = 0
# Sampling rate information:
self.nrates = 0
self.samp = []
self.endsamp = []
# Date/time stamps:
# defined by: [dd,mm,yyyy,hh,mm,ss.ssssss]
self.start = [00,00,0000,00,00,0.0]
self.trigger = [00,00,0000,00,00,0.0]
# Data file type:
self.ft = ''
# Time stamp multiplication factor:
self.timemult = 0.0
self.DatFileContent = ''
print 'pyComtrade instance created!'
def clear(self):
"""
Clear the internal (private) variables of the class.
"""
self.filename = ''
self.filehandler = 0
# Station name, identification and revision year:
self.station_name = ''
self.rec_dev_id = ''
self.rev_year = 0000
# Number and type of channels:
self.TT = 0
self.A = 0 # Number of analog channels.
self.D = 0 # Number of digital channels.
# Analog channel information:
self.An = []
self.Ach_id = []
self.Aph = []
self.Accbm = []
self.uu = []
self.a = []
self.b = []
self.skew = []
self.min = []
self.max = []
self.primary = []
self.secondary = []
self.PS = []
# Digital channel information:
self.Dn = []
self.Dch_id = []
self.Dph = []
self.Dccbm = []
self.y = []
# Line frequency:
self.lf = 0
# Sampling rate information:
self.nrates = 0
self.samp = []
self.endsamp = []
# Date/time stamps:
# defined by: [dd,mm,yyyy,hh,mm,ss.ssssss]
self.start = [00,00,0000,00,00,0.0]
self.trigger = [00,00,0000,00,00,0.0]
# Data file type:
self.ft = ''
# Time stamp multiplication factor:
self.timemult = 0.0
self.DatFileContent = ''
def ReadCFG(self):
"""
Reads the Comtrade header file (.cfg).
"""
try:
self.filehandler = open(self.filename,'r')
# Processing first line:
line = self.filehandler.readline()
templist = line.split(',')
self.station_name = templist[0]
self.rec_dev_id = templist[1]
if len(templist) > 2:
self.rev_year = int(templist[2])
# Processing second line:
line = self.filehandler.readline().rstrip() # Read line and remove spaces and new line characters.
templist = line.split(',')
self.TT = int(templist[0])
self.A = int(templist[1].strip('A'))
self.D = int(templist[2].strip('D'))
# Processing analog channel lines:
for i in range(self.A): #@UnusedVariable
line = self.filehandler.readline()
templist = line.split(',')
self.An.append(int(templist[0]))
self.Ach_id.append(templist[1])
self.Aph.append(templist[2])
self.Accbm.append(templist[3])
self.uu.append(templist[4])
self.a.append(float(templist[5]))
self.b.append(float(templist[6]))
self.skew.append(float(templist[7]))
self.min.append(int(templist[8]))
self.max.append(int(templist[9]))
if len(templist) > 10:
self.primary.append(float(templist[10]))
if len(templist) > 11:
self.secondary.append(float(templist[11]))
if len(templist) > 12:
self.PS.append(templist[12])
# Processing digital channel lines:
for i in range(self.D): #@UnusedVariable
line = self.filehandler.readline()
templist = line.split(',')
self.Dn.append(int(templist[0]))
self.Dch_id.append(templist[1])
self.Dph.append(templist[2])
if len(templist) > 3:
self.Dccbm.append(templist[3])
if len(templist) > 4:
self.y.append(int(templist[4]))
# Read line frequency:
self.lf = int(float(self.filehandler.readline()))
# Read sampling rates:
self.nrates = int(self.filehandler.readline()) # nrates.
for i in range(self.nrates): #@UnusedVariable
line = self.filehandler.readline()
templist = line.split(',')
self.samp.append(int(float(templist[0])))
self.endsamp.append(int(float(templist[1])))
# Read start date and time ([dd,mm,yyyy,hh,mm,ss.ssssss]):
line = self.filehandler.readline()
templist = line.split('/')
self.start[0] = int(templist[0]) # day.
self.start[1] = int(templist[1]) # month.
templist = templist[2].split(',')
self.start[2] = int(templist[0]) # year.
templist = templist[1].split(':')
self.start[3] = int(templist[0]) # hours.
self.start[4] = int(templist[1]) # minutes.
self.start[5] = float(templist[2]) # seconds.
# Read trigger date and time ([dd,mm,yyyy,hh,mm,ss.ssssss]):
line = self.filehandler.readline()
templist = line.split('/')
self.trigger[0] = int(templist[0]) # day.
self.trigger[1] = int(templist[1]) # month.
templist = templist[2].split(',')
self.trigger[2] = int(templist[0]) # year.
templist = templist[1].split(':')
self.trigger[3] = int(templist[0]) # hours.
self.trigger[4] = int(templist[1]) # minutes.
self.trigger[5] = float(templist[2]) # seconds.
# Read file type:
self.ft = self.filehandler.readline()
# Read time multiplication factor:
self.timemul = self.filehandler.readline()
if self.timemul != '':
self.timemul = float(self.timemul)
else:
self.timemul = 1
# END READING .CFG FILE.
self.filehandler.close() # Close file.
except:
# If the cfg parsing doesn't go well, warn the user and explain
print "Invalid cfg file! Follows trackback for debugging:"
print traceback.print_exc()
return 2
def getNumberOfSamples(self):
"""
Return the number of samples of the oscillographic record.
Only one smapling rate is taking into account for now.
"""
return self.endsamp[0]
def getSamplingRate(self):
"""
Return the sampling rate.
Only one smapling rate is taking into account for now.
"""
return self.samp[0]
def getTime(self):
"""
Actually, this function creates a time stamp vector
based on the number of samples and sample rate.
"""
T = 1/float(self.samp[self.nrates-1])
endtime = self.endsamp[self.nrates-1] * T
t = numpy.linspace(0,endtime,self.endsamp[self.nrates-1])
return t
def getAnalogID(self,num):
"""
Returns the COMTRADE ID of a given channel number.
The number to be given is the same of the COMTRADE header.
"""
listidx = self.An.index(num) # Get the position of the channel number.
return self.Ach_id[listidx]
def getDigitalID(self,num):
"""
Reads the COMTRADE ID of a given channel number.
The number to be given is the same of the COMTRADE header.
"""
listidx = self.Dn.index(num) # Get the position of the channel number.
return self.Dch_id[listidx]
def getAnalogType(self,num):
"""
Returns the type of the channel 'num' based
on its unit stored in the Comtrade header file.
Returns 'V' for a voltage channel and 'I' for a current channel.
"""
listidx = self.An.index(num)
unit = self.uu[listidx]
if unit == 'kV' or unit == 'V':
return 'V'
elif unit == 'A' or unit == 'kA':
return 'I'
else:
print 'Unknown channel type'
return 0
def getAnalogUnit(self,num):
"""
Returns the COMTRADE channel unit (e.g., kV, V, kA, A)
of a given channel number.
The number to be given is the same of the COMTRADE header.
"""
listidx = self.An.index(num) # Get the position of the channel number.
return self.uu[listidx]
def ReadDataFile(self):
"""
Reads the contents of the Comtrade .dat file and store them in a
private variable.
For accessing a specific channel data, see methods getAnalogData and
getDigitalData.
"""
if os.path.isfile(self.filename[0:-4] + '.dat'):
filename = self.filename[0:-4] + '.dat'
elif os.path.isfile(self.filename[0:-4] + '.DAT'):
filename = self.filename[0:-4] + '.DAT'
else:
print "Data file File not found."
return 1
self.filehandler = open(filename,'rb')
self.DatFileContent = self.filehandler.read()
# END READING .dat FILE.
self.filehandler.close() # Close file.
return 0
def getAnalogChannelData(self,ChNumber):
"""
Returns an array of numbers containing the data values of the channel
number "ChNumber".
ChNumber is the number of the channal as in .cfg file.
"""
if not self.DatFileContent:
print "No data file content. Use the method ReadDataFile first"
return 0
if (ChNumber > self.A):
print "Channel number greater than the total number of channels."
return 0
# Fomating string for struct module:
str_struct = "ii%dh" %(self.A + int(numpy.ceil((float(self.D)/float(16)))))
# Number of bytes per sample:
NB = 4 + 4 + self.A*2 + int(numpy.ceil((float(self.D)/float(16))))*2
# Number of samples:
N = self.getNumberOfSamples()
# Empty column vector:
values = numpy.empty((N,1))
ch_index = self.An.index(ChNumber)
# Reading the values from DatFileContent string:
for i in range(N):
data = struct.unpack(str_struct,self.DatFileContent[i*NB:(i*NB)+NB])
values[i] = data[ChNumber+1] # The first two number ar the sample index and timestamp
values = values * self.a[ch_index] # a factor
values = values + self.b[ch_index] # b factor
return values
def getDigitalChannelData(self,ChNumber):
"""
Returns an array of numbers (0 or 1) containing the values of the
digital channel status.
ChNumber: digital channel number.
"""
if not self.DatFileContent:
print "No data file content. Use the method ReadDataFile first"
return 0
if (ChNumber > self.D):
print "Digital channel number greater than the total number of channels."
return 0
# Fomating string for struct module:
str_struct = "ii%dh%dH" %(self.A, int(numpy.ceil((float(self.D)/float(16)))))
# Number of bytes per sample:
NB = 4 + 4 + self.A*2 + int(numpy.ceil((float(self.D)/float(16))))*2
# Number of samples:
N = self.getNumberOfSamples()
# Empty column vector:
values = numpy.empty((N,1))
# Number of the 16 word where digital channal is. Every word contains
# 16 digital channels:
byte_number = int(numpy.ceil((ChNumber-1)/16)+1)
# Value of the digital channel. Ex. channal 1 has value 2^0=1, channel
# 2 has value 2^1 = 2, channel 3 => 2^2=4 and so on.
digital_ch_value = (1<<(ChNumber-1-(byte_number-1)*16))
# Reading the values from DatFileContent string:
for i in range(N):
data = struct.unpack(str_struct,self.DatFileContent[i*NB:(i*NB)+NB])
# The first two number ar the sample index and timestamp.
# And logic to extract only one channel from the 16 bit.
# Normalize the output to 0 or 1
values[i] = (digital_ch_value & data[self.A+1+byte_number]) * 1/digital_ch_value
# Return the array.
return values
|
ldemattos/openComtradeViewer
|
src/pyComtrade.py
|
Python
|
gpl-3.0
| 15,076
| 0.022221
|
__version__ = "0.1.3"
get_version = lambda: __version__
|
eternalfame/django-russian_fields
|
russian_fields/__init__.py
|
Python
|
mit
| 56
| 0.035714
|
# highSpeedManuveringCapacitorNeedMultiplierPostPercentCapacitorNeedLocationShipModulesRequiringHighSpeedManuvering
#
# Used by:
# Implants named like: Eifyr and Co. 'Rogue' High Speed Maneuvering HS (6 of 6)
# Skill: High Speed Maneuvering
type = "passive"
def handler(fit, container, context):
level = container.level if "skill" in context else 1
fit.modules.filteredItemBoost(lambda mod: mod.item.requiresSkill("High Speed Maneuvering"),
"capacitorNeed", container.getModifiedItemAttr("capacitorNeedMultiplier") * level)
|
Ebag333/Pyfa
|
eos/effects/highspeedmanuveringcapacitorneedmultiplierpostpercentcapacitorneedlocationshipmodulesrequiringhighspeedmanuvering.py
|
Python
|
gpl-3.0
| 568
| 0.003521
|
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2016 Red Hat Inc.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from ansible.module_utils.basic import env_fallback, return_values
from ansible.module_utils.network_common import to_list
from ansible.module_utils.connection import exec_command
_DEVICE_CONFIGS = {}
vyos_argument_spec = {
'host': dict(),
'port': dict(type='int'),
'username': dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME'])),
'password': dict(fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD']), no_log=True),
'ssh_keyfile': dict(fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE']), type='path'),
'timeout': dict(type='int'),
'provider': dict(type='dict'),
}
def check_args(module, warnings):
provider = module.params['provider'] or {}
for key in vyos_argument_spec:
if key != 'provider' and module.params[key]:
warnings.append('argument %s has been deprecated and will be '
'removed in a future version' % key)
if provider:
for param in ('password',):
if provider.get(param):
module.no_log_values.update(return_values(provider[param]))
def get_config(module, target='commands'):
cmd = ' '.join(['show configuration', target])
try:
return _DEVICE_CONFIGS[cmd]
except KeyError:
rc, out, err = exec_command(module, cmd)
if rc != 0:
module.fail_json(msg='unable to retrieve current config', stderr=err)
cfg = str(out).strip()
_DEVICE_CONFIGS[cmd] = cfg
return cfg
def run_commands(module, commands, check_rc=True):
responses = list()
for cmd in to_list(commands):
rc, out, err = exec_command(module, cmd)
if check_rc and rc != 0:
module.fail_json(msg=err, rc=rc)
responses.append(out)
return responses
def load_config(module, commands, commit=False, comment=None):
rc, out, err = exec_command(module, 'configure')
if rc != 0:
module.fail_json(msg='unable to enter configuration mode', output=err)
for cmd in to_list(commands):
rc, out, err = exec_command(module, cmd)
if rc != 0:
# discard any changes in case of failure
exec_command(module, 'exit discard')
module.fail_json(msg='configuration failed')
diff = None
if module._diff:
rc, out, err = exec_command(module, 'compare')
if not out.startswith('No changes'):
rc, out, err = exec_command(module, 'show')
diff = str(out).strip()
if commit:
cmd = 'commit'
if comment:
cmd += ' comment "%s"' % comment
exec_command(module, cmd)
if not commit:
exec_command(module, 'exit discard')
else:
exec_command(module, 'exit')
if diff:
return diff
|
dmitry-sobolev/ansible
|
lib/ansible/module_utils/vyos.py
|
Python
|
gpl-3.0
| 4,383
| 0.004335
|
# Copyright: (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# Copyright: (c) 2016, Toshio Kuratomi <tkuratomi@ansible.com>
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import getpass
import os
import re
import subprocess
import sys
from abc import ABCMeta, abstractmethod
from ansible.cli.arguments import option_helpers as opt_help
from ansible import constants as C
from ansible import context
from ansible.errors import AnsibleError
from ansible.inventory.manager import InventoryManager
from ansible.module_utils.six import with_metaclass, string_types
from ansible.module_utils._text import to_bytes, to_text
from ansible.parsing.dataloader import DataLoader
from ansible.parsing.vault import PromptVaultSecret, get_file_vault_secret
from ansible.plugins.loader import add_all_plugin_dirs
from ansible.release import __version__
from ansible.utils.collection_loader import AnsibleCollectionLoader, get_collection_name_from_path, set_collection_playbook_paths
from ansible.utils.display import Display
from ansible.utils.path import unfrackpath
from ansible.utils.unsafe_proxy import to_unsafe_text
from ansible.vars.manager import VariableManager
try:
import argcomplete
HAS_ARGCOMPLETE = True
except ImportError:
HAS_ARGCOMPLETE = False
display = Display()
class CLI(with_metaclass(ABCMeta, object)):
''' code behind bin/ansible* programs '''
_ITALIC = re.compile(r"I\(([^)]+)\)")
_BOLD = re.compile(r"B\(([^)]+)\)")
_MODULE = re.compile(r"M\(([^)]+)\)")
_URL = re.compile(r"U\(([^)]+)\)")
_CONST = re.compile(r"C\(([^)]+)\)")
PAGER = 'less'
# -F (quit-if-one-screen) -R (allow raw ansi control chars)
# -S (chop long lines) -X (disable termcap init and de-init)
LESS_OPTS = 'FRSX'
SKIP_INVENTORY_DEFAULTS = False
def __init__(self, args, callback=None):
"""
Base init method for all command line programs
"""
if not args:
raise ValueError('A non-empty list for args is required')
self.args = args
self.parser = None
self.callback = callback
if C.DEVEL_WARNING and __version__.endswith('dev0'):
display.warning(
'You are running the development version of Ansible. You should only run Ansible from "devel" if '
'you are modifying the Ansible engine, or trying out features under development. This is a rapidly '
'changing source of code and can become unstable at any point.'
)
@abstractmethod
def run(self):
"""Run the ansible command
Subclasses must implement this method. It does the actual work of
running an Ansible command.
"""
self.parse()
display.vv(to_text(opt_help.version(self.parser.prog)))
if C.CONFIG_FILE:
display.v(u"Using %s as config file" % to_text(C.CONFIG_FILE))
else:
display.v(u"No config file found; using defaults")
# warn about deprecated config options
for deprecated in C.config.DEPRECATED:
name = deprecated[0]
why = deprecated[1]['why']
if 'alternatives' in deprecated[1]:
alt = ', use %s instead' % deprecated[1]['alternatives']
else:
alt = ''
ver = deprecated[1]['version']
display.deprecated("%s option, %s %s" % (name, why, alt), version=ver)
@staticmethod
def split_vault_id(vault_id):
# return (before_@, after_@)
# if no @, return whole string as after_
if '@' not in vault_id:
return (None, vault_id)
parts = vault_id.split('@', 1)
ret = tuple(parts)
return ret
@staticmethod
def build_vault_ids(vault_ids, vault_password_files=None,
ask_vault_pass=None, create_new_password=None,
auto_prompt=True):
vault_password_files = vault_password_files or []
vault_ids = vault_ids or []
# convert vault_password_files into vault_ids slugs
for password_file in vault_password_files:
id_slug = u'%s@%s' % (C.DEFAULT_VAULT_IDENTITY, password_file)
# note this makes --vault-id higher precedence than --vault-password-file
# if we want to intertwingle them in order probably need a cli callback to populate vault_ids
# used by --vault-id and --vault-password-file
vault_ids.append(id_slug)
# if an action needs an encrypt password (create_new_password=True) and we dont
# have other secrets setup, then automatically add a password prompt as well.
# prompts cant/shouldnt work without a tty, so dont add prompt secrets
if ask_vault_pass or (not vault_ids and auto_prompt):
id_slug = u'%s@%s' % (C.DEFAULT_VAULT_IDENTITY, u'prompt_ask_vault_pass')
vault_ids.append(id_slug)
return vault_ids
# TODO: remove the now unused args
@staticmethod
def setup_vault_secrets(loader, vault_ids, vault_password_files=None,
ask_vault_pass=None, create_new_password=False,
auto_prompt=True):
# list of tuples
vault_secrets = []
# Depending on the vault_id value (including how --ask-vault-pass / --vault-password-file create a vault_id)
# we need to show different prompts. This is for compat with older Towers that expect a
# certain vault password prompt format, so 'promp_ask_vault_pass' vault_id gets the old format.
prompt_formats = {}
# If there are configured default vault identities, they are considered 'first'
# so we prepend them to vault_ids (from cli) here
vault_password_files = vault_password_files or []
if C.DEFAULT_VAULT_PASSWORD_FILE:
vault_password_files.append(C.DEFAULT_VAULT_PASSWORD_FILE)
if create_new_password:
prompt_formats['prompt'] = ['New vault password (%(vault_id)s): ',
'Confirm new vault password (%(vault_id)s): ']
# 2.3 format prompts for --ask-vault-pass
prompt_formats['prompt_ask_vault_pass'] = ['New Vault password: ',
'Confirm New Vault password: ']
else:
prompt_formats['prompt'] = ['Vault password (%(vault_id)s): ']
# The format when we use just --ask-vault-pass needs to match 'Vault password:\s*?$'
prompt_formats['prompt_ask_vault_pass'] = ['Vault password: ']
vault_ids = CLI.build_vault_ids(vault_ids,
vault_password_files,
ask_vault_pass,
create_new_password,
auto_prompt=auto_prompt)
for vault_id_slug in vault_ids:
vault_id_name, vault_id_value = CLI.split_vault_id(vault_id_slug)
if vault_id_value in ['prompt', 'prompt_ask_vault_pass']:
# --vault-id some_name@prompt_ask_vault_pass --vault-id other_name@prompt_ask_vault_pass will be a little
# confusing since it will use the old format without the vault id in the prompt
built_vault_id = vault_id_name or C.DEFAULT_VAULT_IDENTITY
# choose the prompt based on --vault-id=prompt or --ask-vault-pass. --ask-vault-pass
# always gets the old format for Tower compatibility.
# ie, we used --ask-vault-pass, so we need to use the old vault password prompt
# format since Tower needs to match on that format.
prompted_vault_secret = PromptVaultSecret(prompt_formats=prompt_formats[vault_id_value],
vault_id=built_vault_id)
# a empty or invalid password from the prompt will warn and continue to the next
# without erroring globally
try:
prompted_vault_secret.load()
except AnsibleError as exc:
display.warning('Error in vault password prompt (%s): %s' % (vault_id_name, exc))
raise
vault_secrets.append((built_vault_id, prompted_vault_secret))
# update loader with new secrets incrementally, so we can load a vault password
# that is encrypted with a vault secret provided earlier
loader.set_vault_secrets(vault_secrets)
continue
# assuming anything else is a password file
display.vvvvv('Reading vault password file: %s' % vault_id_value)
# read vault_pass from a file
file_vault_secret = get_file_vault_secret(filename=vault_id_value,
vault_id=vault_id_name,
loader=loader)
# an invalid password file will error globally
try:
file_vault_secret.load()
except AnsibleError as exc:
display.warning('Error in vault password file loading (%s): %s' % (vault_id_name, to_text(exc)))
raise
if vault_id_name:
vault_secrets.append((vault_id_name, file_vault_secret))
else:
vault_secrets.append((C.DEFAULT_VAULT_IDENTITY, file_vault_secret))
# update loader with as-yet-known vault secrets
loader.set_vault_secrets(vault_secrets)
return vault_secrets
@staticmethod
def ask_passwords():
''' prompt for connection and become passwords if needed '''
op = context.CLIARGS
sshpass = None
becomepass = None
become_prompt = ''
become_prompt_method = "BECOME" if C.AGNOSTIC_BECOME_PROMPT else op['become_method'].upper()
try:
if op['ask_pass']:
sshpass = getpass.getpass(prompt="SSH password: ")
become_prompt = "%s password[defaults to SSH password]: " % become_prompt_method
else:
become_prompt = "%s password: " % become_prompt_method
if op['become_ask_pass']:
becomepass = getpass.getpass(prompt=become_prompt)
if op['ask_pass'] and becomepass == '':
becomepass = sshpass
except EOFError:
pass
# we 'wrap' the passwords to prevent templating as
# they can contain special chars and trigger it incorrectly
if sshpass:
sshpass = to_unsafe_text(sshpass)
if becomepass:
becomepass = to_unsafe_text(becomepass)
return (sshpass, becomepass)
def validate_conflicts(self, op, runas_opts=False, fork_opts=False):
''' check for conflicting options '''
if fork_opts:
if op.forks < 1:
self.parser.error("The number of processes (--forks) must be >= 1")
return op
@abstractmethod
def init_parser(self, usage="", desc=None, epilog=None):
"""
Create an options parser for most ansible scripts
Subclasses need to implement this method. They will usually call the base class's
init_parser to create a basic version and then add their own options on top of that.
An implementation will look something like this::
def init_parser(self):
super(MyCLI, self).init_parser(usage="My Ansible CLI", inventory_opts=True)
ansible.arguments.option_helpers.add_runas_options(self.parser)
self.parser.add_option('--my-option', dest='my_option', action='store')
"""
self.parser = opt_help.create_base_parser(os.path.basename(self.args[0]), usage=usage, desc=desc, epilog=epilog, )
@abstractmethod
def post_process_args(self, options):
"""Process the command line args
Subclasses need to implement this method. This method validates and transforms the command
line arguments. It can be used to check whether conflicting values were given, whether filenames
exist, etc.
An implementation will look something like this::
def post_process_args(self, options):
options = super(MyCLI, self).post_process_args(options)
if options.addition and options.subtraction:
raise AnsibleOptionsError('Only one of --addition and --subtraction can be specified')
if isinstance(options.listofhosts, string_types):
options.listofhosts = string_types.split(',')
return options
"""
# process tags
if hasattr(options, 'tags') and not options.tags:
# optparse defaults does not do what's expected
options.tags = ['all']
if hasattr(options, 'tags') and options.tags:
tags = set()
for tag_set in options.tags:
for tag in tag_set.split(u','):
tags.add(tag.strip())
options.tags = list(tags)
# process skip_tags
if hasattr(options, 'skip_tags') and options.skip_tags:
skip_tags = set()
for tag_set in options.skip_tags:
for tag in tag_set.split(u','):
skip_tags.add(tag.strip())
options.skip_tags = list(skip_tags)
# process inventory options except for CLIs that require their own processing
if hasattr(options, 'inventory') and not self.SKIP_INVENTORY_DEFAULTS:
if options.inventory:
# should always be list
if isinstance(options.inventory, string_types):
options.inventory = [options.inventory]
# Ensure full paths when needed
options.inventory = [unfrackpath(opt, follow=False) if ',' not in opt else opt for opt in options.inventory]
else:
options.inventory = C.DEFAULT_HOST_LIST
# Dup args set on the root parser and sub parsers results in the root parser ignoring the args. e.g. doing
# 'ansible-galaxy -vvv init' has no verbosity set but 'ansible-galaxy init -vvv' sets a level of 3. To preserve
# back compat with pre-argparse changes we manually scan and set verbosity based on the argv values.
if self.parser.prog in ['ansible-galaxy', 'ansible-vault'] and not options.verbosity:
verbosity_arg = next(iter([arg for arg in self.args if arg.startswith('-v')]), None)
if verbosity_arg:
display.deprecated("Setting verbosity before the arg sub command is deprecated, set the verbosity "
"after the sub command", "2.13")
options.verbosity = verbosity_arg.count('v')
return options
def parse(self):
"""Parse the command line args
This method parses the command line arguments. It uses the parser
stored in the self.parser attribute and saves the args and options in
context.CLIARGS.
Subclasses need to implement two helper methods, init_parser() and post_process_args() which
are called from this function before and after parsing the arguments.
"""
self.init_parser()
if HAS_ARGCOMPLETE:
argcomplete.autocomplete(self.parser)
options = self.parser.parse_args(self.args[1:])
options = self.post_process_args(options)
context._init_global_context(options)
@staticmethod
def version_info(gitinfo=False):
''' return full ansible version info '''
if gitinfo:
# expensive call, user with care
ansible_version_string = opt_help.version()
else:
ansible_version_string = __version__
ansible_version = ansible_version_string.split()[0]
ansible_versions = ansible_version.split('.')
for counter in range(len(ansible_versions)):
if ansible_versions[counter] == "":
ansible_versions[counter] = 0
try:
ansible_versions[counter] = int(ansible_versions[counter])
except Exception:
pass
if len(ansible_versions) < 3:
for counter in range(len(ansible_versions), 3):
ansible_versions.append(0)
return {'string': ansible_version_string.strip(),
'full': ansible_version,
'major': ansible_versions[0],
'minor': ansible_versions[1],
'revision': ansible_versions[2]}
@staticmethod
def pager(text):
''' find reasonable way to display text '''
# this is a much simpler form of what is in pydoc.py
if not sys.stdout.isatty():
display.display(text, screen_only=True)
elif 'PAGER' in os.environ:
if sys.platform == 'win32':
display.display(text, screen_only=True)
else:
CLI.pager_pipe(text, os.environ['PAGER'])
else:
p = subprocess.Popen('less --version', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.communicate()
if p.returncode == 0:
CLI.pager_pipe(text, 'less')
else:
display.display(text, screen_only=True)
@staticmethod
def pager_pipe(text, cmd):
''' pipe text through a pager '''
if 'LESS' not in os.environ:
os.environ['LESS'] = CLI.LESS_OPTS
try:
cmd = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=sys.stdout)
cmd.communicate(input=to_bytes(text))
except IOError:
pass
except KeyboardInterrupt:
pass
@classmethod
def tty_ify(cls, text):
t = cls._ITALIC.sub("`" + r"\1" + "'", text) # I(word) => `word'
t = cls._BOLD.sub("*" + r"\1" + "*", t) # B(word) => *word*
t = cls._MODULE.sub("[" + r"\1" + "]", t) # M(word) => [word]
t = cls._URL.sub(r"\1", t) # U(word) => word
t = cls._CONST.sub("`" + r"\1" + "'", t) # C(word) => `word'
return t
@staticmethod
def _play_prereqs():
options = context.CLIARGS
# all needs loader
loader = DataLoader()
basedir = options.get('basedir', False)
if basedir:
loader.set_basedir(basedir)
add_all_plugin_dirs(basedir)
set_collection_playbook_paths(basedir)
default_collection = get_collection_name_from_path(basedir)
if default_collection:
display.warning(u'running with default collection {0}'.format(default_collection))
AnsibleCollectionLoader().set_default_collection(default_collection)
vault_ids = list(options['vault_ids'])
default_vault_ids = C.DEFAULT_VAULT_IDENTITY_LIST
vault_ids = default_vault_ids + vault_ids
vault_secrets = CLI.setup_vault_secrets(loader,
vault_ids=vault_ids,
vault_password_files=list(options['vault_password_files']),
ask_vault_pass=options['ask_vault_pass'],
auto_prompt=False)
loader.set_vault_secrets(vault_secrets)
# create the inventory, and filter it based on the subset specified (if any)
inventory = InventoryManager(loader=loader, sources=options['inventory'])
# create the variable manager, which will be shared throughout
# the code, ensuring a consistent view of global variables
variable_manager = VariableManager(loader=loader, inventory=inventory, version_info=CLI.version_info(gitinfo=False))
return loader, inventory, variable_manager
@staticmethod
def get_host_list(inventory, subset, pattern='all'):
no_hosts = False
if len(inventory.list_hosts()) == 0:
# Empty inventory
if C.LOCALHOST_WARNING and pattern not in C.LOCALHOST:
display.warning("provided hosts list is empty, only localhost is available. Note that the implicit localhost does not match 'all'")
no_hosts = True
inventory.subset(subset)
hosts = inventory.list_hosts(pattern)
if not hosts and no_hosts is False:
raise AnsibleError("Specified hosts and/or --limit does not match any hosts")
return hosts
|
shsingh/ansible
|
lib/ansible/cli/__init__.py
|
Python
|
gpl-3.0
| 20,952
| 0.002816
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from nltk.parse.generate import generate, demo_grammar
from nltk import CFG
grammar = CFG.fromstring(demo_grammar)
print(grammar)
for sentence in generate(grammar, n=10):
print(' '.join(sentence))
for sentence in generate(grammar, depth=4):
print(' '.join(sentence))
print(len(list(generate(grammar, depth=3))))
print(len(list(generate(grammar, depth=4))))
print(len(list(generate(grammar, depth=5))))
print(len(list(generate(grammar, depth=6))))
print(len(list(generate(grammar))))
|
davidam/python-examples
|
nlp/nltk/howtos/generate.py
|
Python
|
gpl-3.0
| 537
| 0
|
from itertools import imap, chain
def set_name(name, f):
try:
f.__pipetools__name__ = name
except (AttributeError, UnicodeEncodeError):
pass
return f
def get_name(f):
from pipetools.main import Pipe
pipetools_name = getattr(f, '__pipetools__name__', None)
if pipetools_name:
return pipetools_name() if callable(pipetools_name) else pipetools_name
if isinstance(f, Pipe):
return repr(f)
return f.__name__ if hasattr(f, '__name__') else repr(f)
def repr_args(*args, **kwargs):
return ', '.join(chain(
imap('{0!r}'.format, args),
imap('{0[0]}={0[1]!r}'.format, kwargs.iteritems())))
|
katakumpo/pipetools
|
pipetools/debug.py
|
Python
|
mit
| 672
| 0
|
# -*- coding: utf-8 -*-
# __init__.py
#
# Copyright (C) 2007 - Guillaume Desmottes
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# The Rhythmbox authors hereby grant permission for non-GPL compatible
# GStreamer plugins to be used and distributed together with GStreamer
# and Rhythmbox. This permission is above and beyond the permissions granted
# by the GPL license by which Rhythmbox is covered. If you modify this code
# you may extend this exception to your version of the code, but you are not
# obligated to do so. If you do not wish to do so, delete this exception
# statement from your version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# Parts from "Magnatune Rhythmbox plugin" (stolen from rhythmbox's __init__.py)
# Copyright (C), 2006 Adam Zimmerman <adam_zimmerman@sfu.ca>
import rhythmdb, rb
import gobject
import gtk
from JamendoSource import JamendoSource
from JamendoConfigureDialog import JamendoConfigureDialog
popup_ui = """
<ui>
<popup name="JamendoSourceViewPopup">
<menuitem name="AddToQueueLibraryPopup" action="AddToQueue"/>
<menuitem name="JamendoDownloadAlbum" action="JamendoDownloadAlbum"/>
<menuitem name="JamendoDonateArtist" action="JamendoDonateArtist"/>
<separator/>
<menuitem name="BrowseGenreLibraryPopup" action="BrowserSrcChooseGenre"/>
<menuitem name="BrowseArtistLibraryPopup" action="BrowserSrcChooseArtist"/>
<menuitem name="BrowseAlbumLibraryPopup" action="BrowserSrcChooseAlbum"/>
<separator/>
<menuitem name="PropertiesLibraryPopup" action="MusicProperties"/>
</popup>
</ui>
"""
class Jamendo(rb.Plugin):
#
# Core methods
#
def __init__(self):
rb.Plugin.__init__(self)
def activate(self, shell):
self.db = shell.get_property("db")
self.entry_type = self.db.entry_register_type("JamendoEntryType")
# allow changes which don't do anything
self.entry_type.can_sync_metadata = True
self.entry_type.sync_metadata = None
group = rb.rb_source_group_get_by_name ("stores")
if not group:
group = rb.rb_source_group_register ("stores",
_("Stores"),
rb.SOURCE_GROUP_CATEGORY_FIXED)
theme = gtk.icon_theme_get_default()
rb.append_plugin_source_path(theme, "/icons/")
width, height = gtk.icon_size_lookup(gtk.ICON_SIZE_LARGE_TOOLBAR)
icon = rb.try_load_icon(theme, "jamendo", width, 0)
self.source = gobject.new (JamendoSource,
shell=shell,
entry_type=self.entry_type,
plugin=self,
icon=icon,
source_group=group)
shell.register_entry_type_for_source(self.source, self.entry_type)
shell.append_source(self.source, None) # Add the source to the list
# Add button
manager = shell.get_player().get_property('ui-manager')
action = gtk.Action('JamendoDownloadAlbum', _('_Download Album'),
_("Download this album using BitTorrent"),
'gtk-save')
action.connect('activate', lambda a: shell.get_property("selected-source").download_album())
self.action_group = gtk.ActionGroup('JamendoPluginActions')
self.action_group.add_action(action)
# Add Button for Donate
action = gtk.Action('JamendoDonateArtist', _('_Donate to Artist'),
_("Donate Money to this Artist"),
'gtk-jump-to')
action.connect('activate', lambda a: shell.get_property("selected-source").launch_donate())
self.action_group.add_action(action)
manager.insert_action_group(self.action_group, 0)
self.ui_id = manager.add_ui_from_string(popup_ui)
manager.ensure_update()
self.pec_id = shell.get_player().connect('playing-song-changed', self.playing_entry_changed)
def deactivate(self, shell):
manager = shell.get_player().get_property('ui-manager')
manager.remove_ui (self.ui_id)
manager.remove_action_group(self.action_group)
self.action_group = None
shell.get_player().disconnect (self.pec_id)
self.db.entry_delete_by_type(self.entry_type)
self.db.commit()
self.db = None
self.entry_type = None
self.source.delete_thyself()
self.source = None
def create_configure_dialog(self, dialog=None):
if not dialog:
builder_file = self.find_file("jamendo-prefs.ui")
dialog = JamendoConfigureDialog (builder_file).get_dialog()
dialog.present()
return dialog
def playing_entry_changed (self, sp, entry):
self.source.playing_entry_changed (entry)
|
paulbellamy/Rhythmbox-iPod-Plugin
|
plugins/jamendo/jamendo/__init__.py
|
Python
|
gpl-2.0
| 4,882
| 0.020893
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleParserError, AnsibleUndefinedVariable, AnsibleAssertionError
from ansible.module_utils.six import iteritems, string_types
from ansible.module_utils._text import to_native
from ansible.parsing.mod_args import ModuleArgsParser
from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleMapping
from ansible.plugins.loader import lookup_loader
from ansible.playbook.attribute import FieldAttribute
from ansible.playbook.base import Base
from ansible.playbook.block import Block
from ansible.playbook.collectionsearch import CollectionSearch
from ansible.playbook.conditional import Conditional
from ansible.playbook.loop_control import LoopControl
from ansible.playbook.role import Role
from ansible.playbook.taggable import Taggable
from ansible.utils.collection_loader import AnsibleCollectionLoader
from ansible.utils.display import Display
from ansible.utils.sentinel import Sentinel
__all__ = ['Task']
display = Display()
class Task(Base, Conditional, Taggable, CollectionSearch):
"""
A task is a language feature that represents a call to a module, with given arguments and other parameters.
A handler is a subclass of a task.
Usage:
Task.load(datastructure) -> Task
Task.something(...)
"""
# =================================================================================
# ATTRIBUTES
# load_<attribute_name> and
# validate_<attribute_name>
# will be used if defined
# might be possible to define others
# NOTE: ONLY set defaults on task attributes that are not inheritable,
# inheritance is only triggered if the 'current value' is None,
# default can be set at play/top level object and inheritance will take it's course.
_args = FieldAttribute(isa='dict', default=dict)
_action = FieldAttribute(isa='string')
_async_val = FieldAttribute(isa='int', default=0, alias='async')
_changed_when = FieldAttribute(isa='list', default=list)
_delay = FieldAttribute(isa='int', default=5)
_delegate_to = FieldAttribute(isa='string')
_delegate_facts = FieldAttribute(isa='bool')
_failed_when = FieldAttribute(isa='list', default=list)
_loop = FieldAttribute()
_loop_control = FieldAttribute(isa='class', class_type=LoopControl, inherit=False)
_notify = FieldAttribute(isa='list')
_poll = FieldAttribute(isa='int', default=C.DEFAULT_POLL_INTERVAL)
_register = FieldAttribute(isa='string', static=True)
_retries = FieldAttribute(isa='int', default=3)
_until = FieldAttribute(isa='list', default=list)
# deprecated, used to be loop and loop_args but loop has been repurposed
_loop_with = FieldAttribute(isa='string', private=True, inherit=False)
def __init__(self, block=None, role=None, task_include=None):
''' constructors a task, without the Task.load classmethod, it will be pretty blank '''
self._role = role
self._parent = None
if task_include:
self._parent = task_include
else:
self._parent = block
super(Task, self).__init__()
def get_path(self):
''' return the absolute path of the task with its line number '''
path = ""
if hasattr(self, '_ds') and hasattr(self._ds, '_data_source') and hasattr(self._ds, '_line_number'):
path = "%s:%s" % (self._ds._data_source, self._ds._line_number)
elif hasattr(self._parent._play, '_ds') and hasattr(self._parent._play._ds, '_data_source') and hasattr(self._parent._play._ds, '_line_number'):
path = "%s:%s" % (self._parent._play._ds._data_source, self._parent._play._ds._line_number)
return path
def get_name(self, include_role_fqcn=True):
''' return the name of the task '''
if self._role:
role_name = self._role.get_name(include_role_fqcn=include_role_fqcn)
if self._role and self.name and role_name not in self.name:
return "%s : %s" % (role_name, self.name)
elif self.name:
return self.name
else:
if self._role:
return "%s : %s" % (role_name, self.action)
else:
return "%s" % (self.action,)
def _merge_kv(self, ds):
if ds is None:
return ""
elif isinstance(ds, string_types):
return ds
elif isinstance(ds, dict):
buf = ""
for (k, v) in iteritems(ds):
if k.startswith('_'):
continue
buf = buf + "%s=%s " % (k, v)
buf = buf.strip()
return buf
@staticmethod
def load(data, block=None, role=None, task_include=None, variable_manager=None, loader=None):
t = Task(block=block, role=role, task_include=task_include)
return t.load_data(data, variable_manager=variable_manager, loader=loader)
def __repr__(self):
''' returns a human readable representation of the task '''
if self.get_name() == 'meta':
return "TASK: meta (%s)" % self.args['_raw_params']
else:
return "TASK: %s" % self.get_name()
def _preprocess_with_loop(self, ds, new_ds, k, v):
''' take a lookup plugin name and store it correctly '''
loop_name = k.replace("with_", "")
if new_ds.get('loop') is not None or new_ds.get('loop_with') is not None:
raise AnsibleError("duplicate loop in task: %s" % loop_name, obj=ds)
if v is None:
raise AnsibleError("you must specify a value when using %s" % k, obj=ds)
new_ds['loop_with'] = loop_name
new_ds['loop'] = v
# display.deprecated("with_ type loops are being phased out, use the 'loop' keyword instead", version="2.10")
def preprocess_data(self, ds):
'''
tasks are especially complex arguments so need pre-processing.
keep it short.
'''
if not isinstance(ds, dict):
raise AnsibleAssertionError('ds (%s) should be a dict but was a %s' % (ds, type(ds)))
# the new, cleaned datastructure, which will have legacy
# items reduced to a standard structure suitable for the
# attributes of the task class
new_ds = AnsibleMapping()
if isinstance(ds, AnsibleBaseYAMLObject):
new_ds.ansible_pos = ds.ansible_pos
# since this affects the task action parsing, we have to resolve in preprocess instead of in typical validator
default_collection = AnsibleCollectionLoader().default_collection
# use the parent value if our ds doesn't define it
collections_list = ds.get('collections', self.collections)
if collections_list is None:
collections_list = []
if isinstance(collections_list, string_types):
collections_list = [collections_list]
if default_collection and not self._role: # FIXME: and not a collections role
if collections_list:
if default_collection not in collections_list:
collections_list.insert(0, default_collection)
else:
collections_list = [default_collection]
if collections_list and 'ansible.builtin' not in collections_list and 'ansible.legacy' not in collections_list:
collections_list.append('ansible.legacy')
if collections_list:
ds['collections'] = collections_list
# use the args parsing class to determine the action, args,
# and the delegate_to value from the various possible forms
# supported as legacy
args_parser = ModuleArgsParser(task_ds=ds, collection_list=collections_list)
try:
(action, args, delegate_to) = args_parser.parse()
except AnsibleParserError as e:
# if the raises exception was created with obj=ds args, then it includes the detail
# so we dont need to add it so we can just re raise.
if e._obj:
raise
# But if it wasn't, we can add the yaml object now to get more detail
raise AnsibleParserError(to_native(e), obj=ds, orig_exc=e)
# the command/shell/script modules used to support the `cmd` arg,
# which corresponds to what we now call _raw_params, so move that
# value over to _raw_params (assuming it is empty)
if action in ('command', 'shell', 'script'):
if 'cmd' in args:
if args.get('_raw_params', '') != '':
raise AnsibleError("The 'cmd' argument cannot be used when other raw parameters are specified."
" Please put everything in one or the other place.", obj=ds)
args['_raw_params'] = args.pop('cmd')
new_ds['action'] = action
new_ds['args'] = args
new_ds['delegate_to'] = delegate_to
# we handle any 'vars' specified in the ds here, as we may
# be adding things to them below (special handling for includes).
# When that deprecated feature is removed, this can be too.
if 'vars' in ds:
# _load_vars is defined in Base, and is used to load a dictionary
# or list of dictionaries in a standard way
new_ds['vars'] = self._load_vars(None, ds.get('vars'))
else:
new_ds['vars'] = dict()
for (k, v) in iteritems(ds):
if k in ('action', 'local_action', 'args', 'delegate_to') or k == action or k == 'shell':
# we don't want to re-assign these values, which were determined by the ModuleArgsParser() above
continue
elif k.startswith('with_') and k.replace("with_", "") in lookup_loader:
# transform into loop property
self._preprocess_with_loop(ds, new_ds, k, v)
else:
# pre-2.0 syntax allowed variables for include statements at the top level of the task,
# so we move those into the 'vars' dictionary here, and show a deprecation message
# as we will remove this at some point in the future.
if action in ('include',) and k not in self._valid_attrs and k not in self.DEPRECATED_ATTRIBUTES:
display.deprecated("Specifying include variables at the top-level of the task is deprecated."
" Please see:\nhttps://docs.ansible.com/ansible/playbooks_roles.html#task-include-files-and-encouraging-reuse\n\n"
" for currently supported syntax regarding included files and variables", version="2.12")
new_ds['vars'][k] = v
elif C.INVALID_TASK_ATTRIBUTE_FAILED or k in self._valid_attrs:
new_ds[k] = v
else:
display.warning("Ignoring invalid attribute: %s" % k)
return super(Task, self).preprocess_data(new_ds)
def _load_loop_control(self, attr, ds):
if not isinstance(ds, dict):
raise AnsibleParserError(
"the `loop_control` value must be specified as a dictionary and cannot "
"be a variable itself (though it can contain variables)",
obj=ds,
)
return LoopControl.load(data=ds, variable_manager=self._variable_manager, loader=self._loader)
def _validate_attributes(self, ds):
try:
super(Task, self)._validate_attributes(ds)
except AnsibleParserError as e:
e.message += '\nThis error can be suppressed as a warning using the "invalid_task_attribute_failed" configuration'
raise e
def post_validate(self, templar):
'''
Override of base class post_validate, to also do final validation on
the block and task include (if any) to which this task belongs.
'''
if self._parent:
self._parent.post_validate(templar)
if AnsibleCollectionLoader().default_collection:
pass
super(Task, self).post_validate(templar)
def _post_validate_loop(self, attr, value, templar):
'''
Override post validation for the loop field, which is templated
specially in the TaskExecutor class when evaluating loops.
'''
return value
def _post_validate_environment(self, attr, value, templar):
'''
Override post validation of vars on the play, as we don't want to
template these too early.
'''
env = {}
if value is not None:
def _parse_env_kv(k, v):
try:
env[k] = templar.template(v, convert_bare=False)
except AnsibleUndefinedVariable as e:
error = to_native(e)
if self.action in ('setup', 'gather_facts') and 'ansible_facts.env' in error or 'ansible_env' in error:
# ignore as fact gathering is required for 'env' facts
return
raise
if isinstance(value, list):
for env_item in value:
if isinstance(env_item, dict):
for k in env_item:
_parse_env_kv(k, env_item[k])
else:
isdict = templar.template(env_item, convert_bare=False)
if isinstance(isdict, dict):
env.update(isdict)
else:
display.warning("could not parse environment value, skipping: %s" % value)
elif isinstance(value, dict):
# should not really happen
env = dict()
for env_item in value:
_parse_env_kv(env_item, value[env_item])
else:
# at this point it should be a simple string, also should not happen
env = templar.template(value, convert_bare=False)
return env
def _post_validate_changed_when(self, attr, value, templar):
'''
changed_when is evaluated after the execution of the task is complete,
and should not be templated during the regular post_validate step.
'''
return value
def _post_validate_failed_when(self, attr, value, templar):
'''
failed_when is evaluated after the execution of the task is complete,
and should not be templated during the regular post_validate step.
'''
return value
def _post_validate_until(self, attr, value, templar):
'''
until is evaluated after the execution of the task is complete,
and should not be templated during the regular post_validate step.
'''
return value
def get_vars(self):
all_vars = dict()
if self._parent:
all_vars.update(self._parent.get_vars())
all_vars.update(self.vars)
if 'tags' in all_vars:
del all_vars['tags']
if 'when' in all_vars:
del all_vars['when']
return all_vars
def get_include_params(self):
all_vars = dict()
if self._parent:
all_vars.update(self._parent.get_include_params())
if self.action in ('include', 'include_tasks', 'include_role'):
all_vars.update(self.vars)
return all_vars
def copy(self, exclude_parent=False, exclude_tasks=False):
new_me = super(Task, self).copy()
new_me._parent = None
if self._parent and not exclude_parent:
new_me._parent = self._parent.copy(exclude_tasks=exclude_tasks)
new_me._role = None
if self._role:
new_me._role = self._role
return new_me
def serialize(self):
data = super(Task, self).serialize()
if not self._squashed and not self._finalized:
if self._parent:
data['parent'] = self._parent.serialize()
data['parent_type'] = self._parent.__class__.__name__
if self._role:
data['role'] = self._role.serialize()
return data
def deserialize(self, data):
# import is here to avoid import loops
from ansible.playbook.task_include import TaskInclude
from ansible.playbook.handler_task_include import HandlerTaskInclude
parent_data = data.get('parent', None)
if parent_data:
parent_type = data.get('parent_type')
if parent_type == 'Block':
p = Block()
elif parent_type == 'TaskInclude':
p = TaskInclude()
elif parent_type == 'HandlerTaskInclude':
p = HandlerTaskInclude()
p.deserialize(parent_data)
self._parent = p
del data['parent']
role_data = data.get('role')
if role_data:
r = Role()
r.deserialize(role_data)
self._role = r
del data['role']
super(Task, self).deserialize(data)
def set_loader(self, loader):
'''
Sets the loader on this object and recursively on parent, child objects.
This is used primarily after the Task has been serialized/deserialized, which
does not preserve the loader.
'''
self._loader = loader
if self._parent:
self._parent.set_loader(loader)
def _get_parent_attribute(self, attr, extend=False, prepend=False):
'''
Generic logic to get the attribute or parent attribute for a task value.
'''
extend = self._valid_attrs[attr].extend
prepend = self._valid_attrs[attr].prepend
try:
value = self._attributes[attr]
# If parent is static, we can grab attrs from the parent
# otherwise, defer to the grandparent
if getattr(self._parent, 'statically_loaded', True):
_parent = self._parent
else:
_parent = self._parent._parent
if _parent and (value is Sentinel or extend):
if getattr(_parent, 'statically_loaded', True):
# vars are always inheritable, other attributes might not be for the parent but still should be for other ancestors
if attr != 'vars' and hasattr(_parent, '_get_parent_attribute'):
parent_value = _parent._get_parent_attribute(attr)
else:
parent_value = _parent._attributes.get(attr, Sentinel)
if extend:
value = self._extend_value(value, parent_value, prepend)
else:
value = parent_value
except KeyError:
pass
return value
def get_dep_chain(self):
if self._parent:
return self._parent.get_dep_chain()
else:
return None
def get_search_path(self):
'''
Return the list of paths you should search for files, in order.
This follows role/playbook dependency chain.
'''
path_stack = []
dep_chain = self.get_dep_chain()
# inside role: add the dependency chain from current to dependent
if dep_chain:
path_stack.extend(reversed([x._role_path for x in dep_chain]))
# add path of task itself, unless it is already in the list
task_dir = os.path.dirname(self.get_path())
if task_dir not in path_stack:
path_stack.append(task_dir)
return path_stack
def all_parents_static(self):
if self._parent:
return self._parent.all_parents_static()
return True
def get_first_parent_include(self):
from ansible.playbook.task_include import TaskInclude
if self._parent:
if isinstance(self._parent, TaskInclude):
return self._parent
return self._parent.get_first_parent_include()
return None
|
ilpianista/ansible
|
lib/ansible/playbook/task.py
|
Python
|
gpl-3.0
| 20,951
| 0.002148
|
from django.conf import settings
def posthog_configurations(request):
return {
'POSTHOG_API_KEY': settings.POSTHOG_API_KEY,
'POSTHOG_API_URL': settings.POSTHOG_API_URL,
}
|
pythonprobr/pythonpro-website
|
pythonpro/analytics/context_processors.py
|
Python
|
agpl-3.0
| 197
| 0
|
import parole
from parole.colornames import colors
from parole.display import interpolateRGB
import pygame, random
import sim_creatures, main, random
from util import *
description = \
"""
This guy should really look into another line of work.
"""
nagLines = [
'*sigh*',
"It's not been the same 'round 'ere.",
"Ain't been no work since the mines... changed.",
"We been in for some rough times.",
"I pray they don't get to the wells.",
]
class NPCClass(sim_creatures.NPC):
def __init__(self):
sim_creatures.NPC.__init__(
self,
'retired miner', # name
parole.map.AsciiTile('@', colors['Gray']), # symbol, color
11, # str
8, # dex
11, # con
11, # per
10, # spd
1, # level
description=description,
)
parole.info('New NPC: retiredminer.')
main.schedule.listen('enter tile', self)
def listen(self, event):
super(NPCClass, self).listen(event)
if random.random() < 0.9:
return
if not visible(self):
return
if event.id == 'enter tile':
eObj, ePos, eMap = event.args
if eMap is self.parentTile.map and eObj is main.player:
self.say(random.choice(nagLines))
#========================================
thingClass = NPCClass
|
tectronics/nyctos
|
src/data.res/scripts/npcs/retiredminer.py
|
Python
|
gpl-2.0
| 1,428
| 0.008403
|
while 1:
arr=input().split(' ')
k=arr[0]
n=arr[1]
if k=='0' and n=='0':
break
ans=int(int(k)**int(n))
print (int(ans))
|
ProgDan/maratona
|
SPOJ/UJ.py
|
Python
|
gpl-3.0
| 129
| 0.124031
|
"""Ensure credentials are preserved through the authorization.
The Authorization Code Grant will need to preserve state as well as redirect
uri and the Implicit Grant will need to preserve state.
"""
from __future__ import absolute_import, unicode_literals
import json
import mock
from .test_utils import get_query_credentials, get_fragment_credentials
from ....unittest import TestCase
from oauthlib.oauth2 import RequestValidator
from oauthlib.oauth2 import WebApplicationServer, MobileApplicationServer
from oauthlib.oauth2.rfc6749 import errors
class PreservationTest(TestCase):
DEFAULT_REDIRECT_URI = 'http://i.b./path'
def setUp(self):
self.validator = mock.MagicMock(spec=RequestValidator)
self.validator.get_default_redirect_uri.return_value = self.DEFAULT_REDIRECT_URI
self.validator.authenticate_client.side_effect = self.set_client
self.web = WebApplicationServer(self.validator)
self.mobile = MobileApplicationServer(self.validator)
def set_state(self, state):
def set_request_state(client_id, code, client, request):
request.state = state
return True
return set_request_state
def set_client(self, request):
request.client = mock.MagicMock()
request.client.client_id = 'mocked'
return True
def test_state_preservation(self):
auth_uri = 'http://example.com/path?state=xyz&client_id=abc&response_type='
token_uri = 'http://example.com/path'
# authorization grant
h, _, s = self.web.create_authorization_response(
auth_uri + 'code', scopes=['random'])
self.assertEqual(s, 302)
self.assertIn('Location', h)
code = get_query_credentials(h['Location'])['code'][0]
self.validator.validate_code.side_effect = self.set_state('xyz')
_, body, _ = self.web.create_token_response(token_uri,
body='grant_type=authorization_code&code=%s' % code)
self.assertEqual(json.loads(body)['state'], 'xyz')
# implicit grant
h, _, s = self.mobile.create_authorization_response(
auth_uri + 'token', scopes=['random'])
self.assertEqual(s, 302)
self.assertIn('Location', h)
self.assertEqual(get_fragment_credentials(h['Location'])['state'][0], 'xyz')
def test_redirect_uri_preservation(self):
auth_uri = 'http://example.com/path?redirect_uri=http%3A%2F%2Fi.b%2Fpath&client_id=abc'
redirect_uri = 'http://i.b/path'
token_uri = 'http://example.com/path'
# authorization grant
h, _, s = self.web.create_authorization_response(
auth_uri + '&response_type=code', scopes=['random'])
self.assertEqual(s, 302)
self.assertIn('Location', h)
self.assertTrue(h['Location'].startswith(redirect_uri))
# confirm_redirect_uri should return false if the redirect uri
# was given in the authorization but not in the token request.
self.validator.confirm_redirect_uri.return_value = False
code = get_query_credentials(h['Location'])['code'][0]
_, body, _ = self.web.create_token_response(token_uri,
body='grant_type=authorization_code&code=%s' % code)
self.assertEqual(json.loads(body)['error'], 'access_denied')
# implicit grant
h, _, s = self.mobile.create_authorization_response(
auth_uri + '&response_type=token', scopes=['random'])
self.assertEqual(s, 302)
self.assertIn('Location', h)
self.assertTrue(h['Location'].startswith(redirect_uri))
def test_invalid_redirect_uri(self):
auth_uri = 'http://example.com/path?redirect_uri=http%3A%2F%2Fi.b%2Fpath&client_id=abc'
self.validator.validate_redirect_uri.return_value = False
# authorization grant
self.assertRaises(errors.MismatchingRedirectURIError,
self.web.create_authorization_response,
auth_uri + '&response_type=code', scopes=['random'])
# implicit grant
self.assertRaises(errors.MismatchingRedirectURIError,
self.mobile.create_authorization_response,
auth_uri + '&response_type=token', scopes=['random'])
def test_default_uri(self):
auth_uri = 'http://example.com/path?state=xyz&client_id=abc'
self.validator.get_default_redirect_uri.return_value = None
# authorization grant
self.assertRaises(errors.MissingRedirectURIError,
self.web.create_authorization_response,
auth_uri + '&response_type=code', scopes=['random'])
# implicit grant
self.assertRaises(errors.MissingRedirectURIError,
self.mobile.create_authorization_response,
auth_uri + '&response_type=token', scopes=['random'])
|
nirmeshk/oh-mainline
|
vendor/packages/oauthlib/tests/oauth2/rfc6749/endpoints/test_credentials_preservation.py
|
Python
|
agpl-3.0
| 4,860
| 0.003086
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for fft operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_spectral_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import spectral_ops
from tensorflow.python.platform import test
VALID_FFT_RANKS = (1, 2, 3)
class BaseFFTOpsTest(test.TestCase):
def _use_eigen_kernels(self):
use_eigen_kernels = False # Eigen kernels are default
if test.is_gpu_available(cuda_only=True):
use_eigen_kernels = False
return use_eigen_kernels
def _fft_kernel_label_map(self):
"""Returns a generator overriding kernel selection.
This is used to force testing of the eigen kernels, even
when they are not the default registered kernels.
Returns:
A generator in which to wrap every test.
"""
if self._use_eigen_kernels():
d = dict([(op, "eigen")
for op in [
"FFT", "FFT2D", "FFT3D", "IFFT", "IFFT2D", "IFFT3D",
"IRFFT", "IRFFT2D", "IRFFT3D", "RFFT", "RFFT2D", "RFFT3D"
]])
return ops.get_default_graph()._kernel_label_map(d)
else:
return ops.get_default_graph()._kernel_label_map({})
def _Compare(self, x, rank, fft_length=None, use_placeholder=False):
self._CompareForward(x, rank, fft_length, use_placeholder)
self._CompareBackward(x, rank, fft_length, use_placeholder)
def _CompareForward(self, x, rank, fft_length=None, use_placeholder=False):
x_np = self._npFFT(x, rank, fft_length)
if use_placeholder:
x_ph = array_ops.placeholder(dtype=dtypes.as_dtype(x.dtype))
x_tf = self._tfFFT(x_ph, rank, fft_length, feed_dict={x_ph: x})
else:
x_tf = self._tfFFT(x, rank, fft_length)
self.assertAllClose(x_np, x_tf, rtol=1e-4, atol=1e-4)
def _CompareBackward(self, x, rank, fft_length=None, use_placeholder=False):
x_np = self._npIFFT(x, rank, fft_length)
if use_placeholder:
x_ph = array_ops.placeholder(dtype=dtypes.as_dtype(x.dtype))
x_tf = self._tfIFFT(x_ph, rank, fft_length, feed_dict={x_ph: x})
else:
x_tf = self._tfIFFT(x, rank, fft_length)
self.assertAllClose(x_np, x_tf, rtol=1e-4, atol=1e-4)
def _checkGradComplex(self, func, x, y, result_is_complex=True):
with self.test_session(use_gpu=True):
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
# func is a forward or inverse, real or complex, batched or unbatched FFT
# function with a complex input.
z = func(math_ops.complex(inx, iny))
# loss = sum(|z|^2)
loss = math_ops.reduce_sum(math_ops.real(z * math_ops.conj(z)))
((x_jacob_t, x_jacob_n),
(y_jacob_t, y_jacob_n)) = gradient_checker.compute_gradient(
[inx, iny], [list(x.shape), list(y.shape)],
loss, [1],
x_init_value=[x, y],
delta=1e-2)
self.assertAllClose(x_jacob_t, x_jacob_n, rtol=1e-2, atol=1e-2)
self.assertAllClose(y_jacob_t, y_jacob_n, rtol=1e-2, atol=1e-2)
def _checkGradReal(self, func, x):
with self.test_session(use_gpu=True):
inx = ops.convert_to_tensor(x)
# func is a forward RFFT function (batched or unbatched).
z = func(inx)
# loss = sum(|z|^2)
loss = math_ops.reduce_sum(math_ops.real(z * math_ops.conj(z)))
x_jacob_t, x_jacob_n = test.compute_gradient(
inx, list(x.shape), loss, [1], x_init_value=x, delta=1e-2)
self.assertAllClose(x_jacob_t, x_jacob_n, rtol=1e-2, atol=1e-2)
class FFTOpsTest(BaseFFTOpsTest):
def _tfFFT(self, x, rank, fft_length=None, feed_dict=None):
# fft_length unused for complex FFTs.
with self.test_session(use_gpu=True):
return self._tfFFTForRank(rank)(x).eval(feed_dict=feed_dict)
def _tfIFFT(self, x, rank, fft_length=None, feed_dict=None):
# fft_length unused for complex FFTs.
with self.test_session(use_gpu=True):
return self._tfIFFTForRank(rank)(x).eval(feed_dict=feed_dict)
def _npFFT(self, x, rank, fft_length=None):
if rank == 1:
return np.fft.fft2(x, s=fft_length, axes=(-1,))
elif rank == 2:
return np.fft.fft2(x, s=fft_length, axes=(-2, -1))
elif rank == 3:
return np.fft.fft2(x, s=fft_length, axes=(-3, -2, -1))
else:
raise ValueError("invalid rank")
def _npIFFT(self, x, rank, fft_length=None):
if rank == 1:
return np.fft.ifft2(x, s=fft_length, axes=(-1,))
elif rank == 2:
return np.fft.ifft2(x, s=fft_length, axes=(-2, -1))
elif rank == 3:
return np.fft.ifft2(x, s=fft_length, axes=(-3, -2, -1))
else:
raise ValueError("invalid rank")
def _tfFFTForRank(self, rank):
if rank == 1:
return spectral_ops.fft
elif rank == 2:
return spectral_ops.fft2d
elif rank == 3:
return spectral_ops.fft3d
else:
raise ValueError("invalid rank")
def _tfIFFTForRank(self, rank):
if rank == 1:
return spectral_ops.ifft
elif rank == 2:
return spectral_ops.ifft2d
elif rank == 3:
return spectral_ops.ifft3d
else:
raise ValueError("invalid rank")
def testEmpty(self):
with self._fft_kernel_label_map():
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 3):
x = np.zeros((0,) * dims).astype(np.complex64)
self.assertEqual(x.shape, self._tfFFT(x, rank).shape)
self.assertEqual(x.shape, self._tfIFFT(x, rank).shape)
def testBasic(self):
with self._fft_kernel_label_map():
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 3):
self._Compare(
np.mod(np.arange(np.power(4, dims)), 10).reshape(
(4,) * dims).astype(np.complex64), rank)
def testBasicPlaceholder(self):
with self._fft_kernel_label_map():
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 3):
self._Compare(
np.mod(np.arange(np.power(4, dims)), 10).reshape(
(4,) * dims).astype(np.complex64),
rank,
use_placeholder=True)
def testRandom(self):
with self._fft_kernel_label_map():
np.random.seed(12345)
def gen(shape):
n = np.prod(shape)
re = np.random.uniform(size=n)
im = np.random.uniform(size=n)
return (re + im * 1j).reshape(shape)
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 3):
self._Compare(gen((4,) * dims), rank)
def testError(self):
for rank in VALID_FFT_RANKS:
for dims in xrange(0, rank):
x = np.zeros((1,) * dims).astype(np.complex64)
with self.assertRaisesWithPredicateMatch(
ValueError, "Shape must be .*rank {}.*".format(rank)):
self._tfFFT(x, rank)
with self.assertRaisesWithPredicateMatch(
ValueError, "Shape must be .*rank {}.*".format(rank)):
self._tfIFFT(x, rank)
def testGrad_Simple(self):
with self._fft_kernel_label_map():
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 2):
re = np.ones(shape=(4,) * dims, dtype=np.float32) / 10.0
im = np.zeros(shape=(4,) * dims, dtype=np.float32)
self._checkGradComplex(self._tfFFTForRank(rank), re, im)
self._checkGradComplex(self._tfIFFTForRank(rank), re, im)
def testGrad_Random(self):
with self._fft_kernel_label_map():
np.random.seed(54321)
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 2):
re = np.random.rand(*((3,) * dims)).astype(np.float32) * 2 - 1
im = np.random.rand(*((3,) * dims)).astype(np.float32) * 2 - 1
self._checkGradComplex(self._tfFFTForRank(rank), re, im)
self._checkGradComplex(self._tfIFFTForRank(rank), re, im)
class RFFTOpsTest(BaseFFTOpsTest):
def _CompareBackward(self, x, rank, fft_length=None, use_placeholder=False):
super(RFFTOpsTest, self)._CompareBackward(x, rank, fft_length,
use_placeholder)
def _tfFFT(self, x, rank, fft_length=None, feed_dict=None):
with self.test_session(use_gpu=True):
return self._tfFFTForRank(rank)(x, fft_length).eval(feed_dict=feed_dict)
def _tfIFFT(self, x, rank, fft_length=None, feed_dict=None):
with self.test_session(use_gpu=True):
return self._tfIFFTForRank(rank)(x, fft_length).eval(feed_dict=feed_dict)
def _npFFT(self, x, rank, fft_length=None):
if rank == 1:
return np.fft.rfft2(x, s=fft_length, axes=(-1,))
elif rank == 2:
return np.fft.rfft2(x, s=fft_length, axes=(-2, -1))
elif rank == 3:
return np.fft.rfft2(x, s=fft_length, axes=(-3, -2, -1))
else:
raise ValueError("invalid rank")
def _npIFFT(self, x, rank, fft_length=None):
if rank == 1:
return np.fft.irfft2(x, s=fft_length, axes=(-1,))
elif rank == 2:
return np.fft.irfft2(x, s=fft_length, axes=(-2, -1))
elif rank == 3:
return np.fft.irfft2(x, s=fft_length, axes=(-3, -2, -1))
else:
raise ValueError("invalid rank")
def _tfFFTForRank(self, rank):
if rank == 1:
return spectral_ops.rfft
elif rank == 2:
return spectral_ops.rfft2d
elif rank == 3:
return spectral_ops.rfft3d
else:
raise ValueError("invalid rank")
def _tfIFFTForRank(self, rank):
if rank == 1:
return spectral_ops.irfft
elif rank == 2:
return spectral_ops.irfft2d
elif rank == 3:
return spectral_ops.irfft3d
else:
raise ValueError("invalid rank")
def testEmpty(self):
with self._fft_kernel_label_map():
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 3):
x = np.zeros((0,) * dims).astype(np.float32)
self.assertEqual(x.shape, self._tfFFT(x, rank).shape)
x = np.zeros((0,) * dims).astype(np.complex64)
self.assertEqual(x.shape, self._tfIFFT(x, rank).shape)
def testBasic(self):
with self._fft_kernel_label_map():
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 3):
for size in (5, 6):
inner_dim = size // 2 + 1
r2c = np.mod(np.arange(np.power(size, dims)), 10).reshape(
(size,) * dims)
self._CompareForward(r2c.astype(np.float32), rank, (size,) * rank)
c2r = np.mod(np.arange(np.power(size, dims - 1) * inner_dim),
10).reshape((size,) * (dims - 1) + (inner_dim,))
self._CompareBackward(
c2r.astype(np.complex64), rank, (size,) * rank)
def testBasicPlaceholder(self):
with self._fft_kernel_label_map():
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 3):
for size in (5, 6):
inner_dim = size // 2 + 1
r2c = np.mod(np.arange(np.power(size, dims)), 10).reshape(
(size,) * dims)
self._CompareForward(
r2c.astype(np.float32),
rank, (size,) * rank,
use_placeholder=True)
c2r = np.mod(np.arange(np.power(size, dims - 1) * inner_dim),
10).reshape((size,) * (dims - 1) + (inner_dim,))
self._CompareBackward(
c2r.astype(np.complex64),
rank, (size,) * rank,
use_placeholder=True)
def testFftLength(self):
if test.is_gpu_available(cuda_only=True):
with self._fft_kernel_label_map():
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 3):
for size in (5, 6):
inner_dim = size // 2 + 1
r2c = np.mod(np.arange(np.power(size, dims)), 10).reshape(
(size,) * dims)
c2r = np.mod(np.arange(np.power(size, dims - 1) * inner_dim),
10).reshape((size,) * (dims - 1) + (inner_dim,))
# Test truncation (FFT size < dimensions).
fft_length = (size - 2,) * rank
self._CompareForward(r2c.astype(np.float32), rank, fft_length)
self._CompareBackward(c2r.astype(np.complex64), rank, fft_length)
# Confirm it works with unknown shapes as well.
self._CompareForward(r2c.astype(np.float32), rank, fft_length,
use_placeholder=True)
self._CompareBackward(c2r.astype(np.complex64), rank, fft_length,
use_placeholder=True)
# Test padding (FFT size > dimensions).
fft_length = (size + 2,) * rank
self._CompareForward(r2c.astype(np.float32), rank, fft_length)
self._CompareBackward(c2r.astype(np.complex64), rank, fft_length)
# Confirm it works with unknown shapes as well.
self._CompareForward(r2c.astype(np.float32), rank, fft_length,
use_placeholder=True)
self._CompareBackward(c2r.astype(np.complex64), rank, fft_length,
use_placeholder=True)
def testRandom(self):
with self._fft_kernel_label_map():
np.random.seed(12345)
def gen_real(shape):
n = np.prod(shape)
re = np.random.uniform(size=n)
ret = re.reshape(shape)
return ret
def gen_complex(shape):
n = np.prod(shape)
re = np.random.uniform(size=n)
im = np.random.uniform(size=n)
ret = (re + im * 1j).reshape(shape)
return ret
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 3):
for size in (5, 6):
inner_dim = size // 2 + 1
self._CompareForward(gen_real((size,) * dims), rank, (size,) * rank)
complex_dims = (size,) * (dims - 1) + (inner_dim,)
self._CompareBackward(
gen_complex(complex_dims), rank, (size,) * rank)
def testError(self):
with self._fft_kernel_label_map():
for rank in VALID_FFT_RANKS:
for dims in xrange(0, rank):
x = np.zeros((1,) * dims).astype(np.complex64)
with self.assertRaisesWithPredicateMatch(
ValueError, "Shape .* must have rank at least {}".format(rank)):
self._tfFFT(x, rank)
with self.assertRaisesWithPredicateMatch(
ValueError, "Shape .* must have rank at least {}".format(rank)):
self._tfIFFT(x, rank)
for dims in xrange(rank, rank + 2):
x = np.zeros((1,) * rank)
# Test non-rank-1 fft_length produces an error.
fft_length = np.zeros((1, 1)).astype(np.int32)
with self.assertRaisesWithPredicateMatch(ValueError,
"Shape .* must have rank 1"):
self._tfFFT(x, rank, fft_length)
with self.assertRaisesWithPredicateMatch(ValueError,
"Shape .* must have rank 1"):
self._tfIFFT(x, rank, fft_length)
# Test wrong fft_length length.
fft_length = np.zeros((rank + 1,)).astype(np.int32)
with self.assertRaisesWithPredicateMatch(
ValueError, "Dimension must be .*but is {}.*".format(rank + 1)):
self._tfFFT(x, rank, fft_length)
with self.assertRaisesWithPredicateMatch(
ValueError, "Dimension must be .*but is {}.*".format(rank + 1)):
self._tfIFFT(x, rank, fft_length)
# Test that calling the kernel directly without padding to fft_length
# produces an error.
rffts_for_rank = {
1: [gen_spectral_ops.rfft, gen_spectral_ops.irfft],
2: [gen_spectral_ops.rfft2d, gen_spectral_ops.irfft2d],
3: [gen_spectral_ops.rfft3d, gen_spectral_ops.irfft3d]
}
rfft_fn, irfft_fn = rffts_for_rank[rank]
with self.assertRaisesWithPredicateMatch(
errors.InvalidArgumentError,
"Input dimension .* must have length of at least 6 but got: 5"):
x = np.zeros((5,) * rank).astype(np.float32)
fft_length = [6] * rank
with self.test_session():
rfft_fn(x, fft_length).eval()
with self.assertRaisesWithPredicateMatch(
errors.InvalidArgumentError,
"Input dimension .* must have length of at least .* but got: 3"):
x = np.zeros((3,) * rank).astype(np.complex64)
fft_length = [6] * rank
with self.test_session():
irfft_fn(x, fft_length).eval()
def testGrad_Simple(self):
with self._fft_kernel_label_map():
for rank in VALID_FFT_RANKS:
# rfft3d/irfft3d do not have gradients yet.
if rank == 3:
continue
for dims in xrange(rank, rank + 2):
for size in (5, 6):
re = np.ones(shape=(size,) * dims, dtype=np.float32)
im = -np.ones(shape=(size,) * dims, dtype=np.float32)
self._checkGradReal(self._tfFFTForRank(rank), re)
self._checkGradComplex(
self._tfIFFTForRank(rank), re, im, result_is_complex=False)
def testGrad_Random(self):
with self._fft_kernel_label_map():
np.random.seed(54321)
for rank in VALID_FFT_RANKS:
# rfft3d/irfft3d do not have gradients yet.
if rank == 3:
continue
for dims in xrange(rank, rank + 2):
for size in (5, 6):
re = np.random.rand(*((size,) * dims)).astype(np.float32) * 2 - 1
im = np.random.rand(*((size,) * dims)).astype(np.float32) * 2 - 1
self._checkGradReal(self._tfFFTForRank(rank), re)
self._checkGradComplex(
self._tfIFFTForRank(rank), re, im, result_is_complex=False)
if __name__ == "__main__":
test.main()
|
Bulochkin/tensorflow_pack
|
tensorflow/python/kernel_tests/fft_ops_test.py
|
Python
|
apache-2.0
| 18,811
| 0.010207
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
faq_en = '''
<p>
<b>Why statistics on the site does not coincide with the statistics in the game?</b>
</p>
<p>
Algorithms collection statistics IL2 stats differs from statistics in-game. As a consequence of these statistics will not coincide with the game.
</p>
<br>
<p>
<b>How is calculated the rating?</b>
</p>
<p>
1) calculate how many scores player earns per one life - score / (dead + capture) = SD<br>
2) calculate how many scores player earns per one hour - score / flight time = SH<br>
3) calculate rating by formula: (SD * SH * score) / 1000
</p>
<br>
<p>
<b>Why my profile is not displayed in the table of players?</b>
</p>
<p>
Statistics exclude inactive players from the overall rating. By default players inactive for more than 7 days - do not participate in the rating.
</p>
<br>
<p>
<b>I landed at the airfield, but sortie status - landing not on airfield. Why?</b>
</p>
<p>
Landing working only on active airfield. Usually active airfield is the one where you can respawn.
</p>
<br>
<p>
<b>What is the Fairplay Index?</b>
</p>
<p>
Fairplay Index is an indicator of the correct behavior of the player, it affects the score.The maximum value - 100% indicates that the player does not violate the rules, a player receives a 100% score and all bonuses. If the index is less than 100%, that player gets a percentage of the score corresponding to the current index. Also, in this case, the player does not receive any bonuses.<br>
Violations of reducing the index:<br>
Disconnection -10%<br>
Shotdown friendly aircraft -10%<br>
Destroyed friendly ground target -5%<br>
The index recovered by 5% per flying hour, if the player did not violate the rules.<br>
The idea was borrowed from the project Bellum War.
</p>
<br>
'''
faq_ru = '''
<p>
<b>Почему статистика на сайте не совпадает со статистикой внутри игры?</b>
</p>
<p>
Алгоритмы сбора статистики IL2 stats отличаются от статистики в игре. Как следствие данная статистика не будет совпадать с игровой.
</p>
<br>
<p>
<b>Как рассчитывается рейтинг?</b>
</p>
<p>
Рейтинг пилота рассчитывается на основе заработанных пилотом очков, среднего количества очков за жизнь и за час. Такой способ расчета рейтинга учитывает не только количественные, но и качественные показатели пилота, а так же сводит в единую систему оценки пилотов разных специализаций.<br>
Как именно рассчитывается рейтинг:<br>
1) вычисляем сколько игрок зарабатывает очков за одну жизнь - очки / (смерти + плен) = ОС<br>
2) вычисляем сколько игрок зарабатывает очков за один час налета - очки / налет часов = ОЧ<br>
3) вычисляем рейтинг по формуле: (ОС * ОЧ * очки) / 1000
</p>
<br>
<p>
<b>Почему мой профиль не отображается в общей таблице игроков?</b>
</p>
<p>
В статистике включена опция которая исключает неактивных игроков из общего рейтинга. По умолчанию игроки неактивные более 7 дней - не участвуют в рейтинге.
</p>
<br>
<p>
<b>Я приземлился на аэродром, но в статусе вылета указана посадка в поле. Почему?</b>
</p>
<p>
Посадка засчитывается только на активный аэродром. Как правило активный аэродром это тот на котором вы можете начать вылет.
</p>
<br>
<p>
<b>Что такое "Индекс честной игры"?</b>
</p>
<p>
Индекс честной игры (Fairplay) это показатель правильного поведения игрока, влияющий на получаемые им очки. Максимальное значение - 100% говорит о том, что игрок не нарушает правила, такой игрок получает 100% очков и все полагающиеся ему бонусы. Если индекс меньше 100%, то игрок получает не всю сумму заработанных очков, а лишь процент от них, соответствующий текущему индексу честной игры. Так же, в этом случае, игрок не получает ни каких бонусов.<br>
Нарушения уменьшающие индекс честной игры:<br>
Дисконнект -10%<br>
Уничтожение союзного самолета -10%<br>
Уничтожение союзной техники -5%<br>
Индекс восстанавливается по 5% за час налета, при условии игры без нарушений.<br>
Идея заимствована из проекта Bellum War.
</p>
<br>
'''
def default_chunks(apps, schema_editor):
Chunk = apps.get_model('chunks', 'Chunk')
faq = Chunk.objects.get_or_create(key='faq')[0]
faq.title_en = 'FAQ'
faq.title_ru = 'FAQ'
faq.content_en = faq_en
faq.content_ru = faq_ru
faq.save()
class Migration(migrations.Migration):
dependencies = [
('chunks', '0003_auto_20151107_2007'),
]
operations = [
migrations.RunPython(default_chunks),
]
|
Flyingfox646/flyingfox
|
src/chunks/migrations/0004_faq_fairplay.py
|
Python
|
mit
| 5,970
| 0.004275
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Project.slug'
db.alter_column('teams_project', 'slug', self.gf('django.db.models.fields.SlugField')(max_length=50, blank=True))
# Changing field 'Task.assignee'
db.alter_column('teams_task', 'assignee_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.CustomUser'], null=True, blank=True))
def backwards(self, orm):
# Changing field 'Project.slug'
db.alter_column('teams_project', 'slug', self.gf('django.db.models.fields.SlugField')(max_length=50))
# Changing field 'Task.assignee'
db.alter_column('teams_task', 'assignee_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['teams.TeamMember'], null=True, blank=True))
models = {
'auth.customuser': {
'Meta': {'object_name': 'CustomUser', '_ormbases': ['auth.User']},
'autoplay_preferences': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'award_points': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'biography': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'changes_notification': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'follow_new_video': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'full_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '63', 'blank': 'True'}),
'homepage': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'last_ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'picture': ('utils.amazon.fields.S3EnabledImageField', [], {'thumb_options': "{'upscale': True, 'crop': 'smart'}", 'max_length': '100', 'blank': 'True'}),
'preferred_language': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'primary_key': 'True'}),
'valid_email': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'videos': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['videos.Video']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'teams.application': {
'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'Application'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'note': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'applications'", 'to': "orm['teams.Team']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'team_applications'", 'to': "orm['auth.CustomUser']"})
},
'teams.invite': {
'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'Invite'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'note': ('django.db.models.fields.TextField', [], {'max_length': '200', 'blank': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'invitations'", 'to': "orm['teams.Team']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'team_invitations'", 'to': "orm['auth.CustomUser']"})
},
'teams.project': {
'Meta': {'unique_together': "(('team', 'name'), ('team', 'slug'))", 'object_name': 'Project'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'guidelines': ('django.db.models.fields.TextField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Team']"})
},
'teams.task': {
'Meta': {'object_name': 'Task'},
'assignee': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']", 'null': 'True', 'blank': 'True'}),
'completed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '16', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Team']"}),
'team_video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.TeamVideo']"}),
'type': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'teams.team': {
'Meta': {'object_name': 'Team'},
'applicants': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'applicated_teams'", 'symmetrical': 'False', 'through': "orm['teams.Application']", 'to': "orm['auth.CustomUser']"}),
'application_text': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'header_html_text': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'highlight': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_moderated': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_visible': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'last_notification_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'logo': ('utils.amazon.fields.S3EnabledImageField', [], {'thumb_options': "{'upscale': True, 'crop': 'smart'}", 'max_length': '100', 'blank': 'True'}),
'membership_policy': ('django.db.models.fields.IntegerField', [], {'default': '4'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '250'}),
'page_content': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'points': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'projects_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'teams'", 'symmetrical': 'False', 'through': "orm['teams.TeamMember']", 'to': "orm['auth.CustomUser']"}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'intro_for_teams'", 'null': 'True', 'to': "orm['videos.Video']"}),
'video_policy': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'videos': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['videos.Video']", 'through': "orm['teams.TeamVideo']", 'symmetrical': 'False'})
},
'teams.teammember': {
'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'TeamMember'},
'changes_notification': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'role': ('django.db.models.fields.CharField', [], {'default': "'member'", 'max_length': '16'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'members'", 'to': "orm['teams.Team']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']"})
},
'teams.teamvideo': {
'Meta': {'unique_together': "(('team', 'video'),)", 'object_name': 'TeamVideo'},
'added_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']"}),
'all_languages': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'completed_languages': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['videos.SubtitleLanguage']", 'symmetrical': 'False', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Project']"}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Team']"}),
'thumbnail': ('utils.amazon.fields.S3EnabledImageField', [], {'max_length': '100', 'thumb_options': "{'upscale': True, 'crop': 'smart'}", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'blank': 'True'}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.Video']"})
},
'teams.teamvideolanguage': {
'Meta': {'unique_together': "(('team_video', 'subtitle_language'),)", 'object_name': 'TeamVideoLanguage'},
'forked': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}),
'is_lingua_franca': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}),
'is_original': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '16', 'db_index': 'True'}),
'percent_done': ('django.db.models.fields.IntegerField', [], {'default': '0', 'db_index': 'True'}),
'subtitle_language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.SubtitleLanguage']", 'null': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Team']"}),
'team_video': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'languages'", 'to': "orm['teams.TeamVideo']"}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.Video']"})
},
'teams.teamvideolanguagepair': {
'Meta': {'object_name': 'TeamVideoLanguagePair'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_0': ('django.db.models.fields.CharField', [], {'max_length': '16', 'db_index': 'True'}),
'language_1': ('django.db.models.fields.CharField', [], {'max_length': '16', 'db_index': 'True'}),
'language_pair': ('django.db.models.fields.CharField', [], {'max_length': '16', 'db_index': 'True'}),
'percent_complete': ('django.db.models.fields.IntegerField', [], {'default': '0', 'db_index': 'True'}),
'subtitle_language_0': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'team_video_language_pairs_0'", 'to': "orm['videos.SubtitleLanguage']"}),
'subtitle_language_1': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'team_video_language_pairs_1'", 'null': 'True', 'to': "orm['videos.SubtitleLanguage']"}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Team']"}),
'team_video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.TeamVideo']"}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.Video']"})
},
'teams.workflow': {
'Meta': {'unique_together': "(('team', 'project', 'team_video'),)", 'object_name': 'Workflow'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'perm_approve': ('django.db.models.fields.PositiveIntegerField', [], {'default': '60'}),
'perm_review': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40'}),
'perm_subtitle': ('django.db.models.fields.PositiveIntegerField', [], {'default': '20'}),
'perm_translate': ('django.db.models.fields.PositiveIntegerField', [], {'default': '20'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Project']", 'null': 'True', 'blank': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Team']"}),
'team_video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.TeamVideo']", 'null': 'True', 'blank': 'True'})
},
'videos.subtitlelanguage': {
'Meta': {'unique_together': "(('video', 'language', 'standard_language'),)", 'object_name': 'SubtitleLanguage'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'followers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'followed_languages'", 'blank': 'True', 'to': "orm['auth.CustomUser']"}),
'had_version': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'has_version': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_forked': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_original': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'percent_done': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'standard_language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.SubtitleLanguage']", 'null': 'True', 'blank': 'True'}),
'subtitle_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'subtitles_fetched_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'blank': 'True'}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.Video']"}),
'writelock_owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']", 'null': 'True', 'blank': 'True'}),
'writelock_session_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'writelock_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
'videos.video': {
'Meta': {'object_name': 'Video'},
'allow_community_edits': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'allow_video_urls_edit': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'complete_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'duration': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'edited': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'featured': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'followers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'followed_videos'", 'blank': 'True', 'to': "orm['auth.CustomUser']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_subtitled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'languages_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'moderated_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'moderating'", 'null': 'True', 'to': "orm['teams.Team']"}),
's3_thumbnail': ('utils.amazon.fields.S3EnabledImageField', [], {'thumb_options': "{'upscale': True, 'crop': 'smart'}", 'max_length': '100', 'blank': 'True'}),
'small_thumbnail': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'subtitles_fetched_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'db_index': 'True'}),
'thumbnail': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']", 'null': 'True', 'blank': 'True'}),
'video_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'was_subtitled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}),
'widget_views_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'db_index': 'True'}),
'writelock_owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'writelock_owners'", 'null': 'True', 'to': "orm['auth.CustomUser']"}),
'writelock_session_key': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'writelock_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
}
}
complete_apps = ['teams']
|
ofer43211/unisubs
|
apps/teams/migrations/0057_auto__chg_field_project_slug__chg_field_task_assignee.py
|
Python
|
agpl-3.0
| 23,628
| 0.008507
|
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 28 19:18:58 2015
@author: alex_
"""
# General Imports
import matplotlib as mpl
mpl.use('TkAgg') # Force mpl backend not to use qt. Else we have a conflict.
import numpy as np
import pickle
import time
from datetime import datetime
from collections import namedtuple
import warnings
import inspect
#from sunpy.sun._constants import physical_constants as con
# SunPy imports
import sunpy.map
from sunpy.sun import constants, sun
from sunpy.time import parse_time, is_time
from astropy.table import Table
import astropy.units as u
# Internal imports
#from solarbextrapolation.utilities import *
class Preprocessors(object):
"""
A common class for all 2D pre-processing routines, tools used to pre-process
the 2D sunpy map data for use in extrapolations.
Usage can include basic filters for noise/contrast or algorythms to
compensate for extrapolator assumptions, such as the force-free assumption
that is assumed in many extrapolations, but isn't true in the photosphere
where magnetogram observations are generally taken.
Parameters
----------
map_data : `sunpy.map.GenericMap`
The sunpy map containing the data to be processed.
filepath : `string`
The optional filepath for automatic saving of preprocessed results.
notes : `string`
User specified notes that will be added to the metadata.
"""
def __init__(self, map_data, **kwargs):
"""
Method for creating a preprocessor object, using a sunpy map.
"""
# Add some type checking, we want a map object, check for .unit attribute.
self.map_input = map_data
self.routine = kwargs.get('preprocessor_routine', type(self))
self.meta = self.map_input.meta
self.meta['preprocessor_notes'] = kwargs.get('notes', '')
self.meta['preprocessor_routine'] = self.routine
self.filepath = kwargs.get('filepath', None)
def _preprocessor(self, **kwargs):
"""
Method running the and returning a sunpy map.
For tracability this should add entries into the metadata that
include any parameters used for the given run.
"""
map_output = sunpy.map.Map(self.map_input.data, self.meta)
return map_output
def preprocess(self, **kwargs):
"""
Method to be called to run the preprocessor.
Times the process and saves output where applicable.
"""
dt_start = datetime.now()
tim_start = time.time()
map_output = self._preprocessor()
tim_duration = time.time() - tim_start
map_output.meta['preprocessor_start_time'] = dt_start.isoformat()
map_output.meta['preprocessor_duration'] = tim_duration
if self.filepath:
map_output.save(self.filepath)
return map_output
|
Alex-Ian-Hamilton/solarbextrapolation
|
solarbextrapolation/preprocessors/base.py
|
Python
|
mit
| 2,867
| 0.006278
|
import os
import sys
import getopt
import json
def main(argv):
"""Specify input to generator with:
-s : save path
-f : model_def folder
"""
opts, args = getopt.getopt(argv,"s:f:")
save_location = "models/ddpg_models/"
model_def_folder = ""
print(opts)
for opt, arg in opts:
if opt == "-s":
save_location = arg
elif opt == "-f":
model_def_folder = arg
json_data = open(os.path.join(model_def_folder,'config.json')).read()
config_dict = json.loads(json_data)
print(config_dict)
exec('')
os.system("script2.py 1")
if __name__ == "__main__":
main(sys.argv[1:])
|
fizz-ml/pytorch-aux-reward-rl
|
model_generator.py
|
Python
|
mit
| 671
| 0.011923
|
#40/40
#Part 1: Terminology (15 points) --> 15/15
#1 1pt) What is the symbol "=" used for?
#to assign and store values to and in variables
# 1pt
#
#2 3pts) Write a technical definition for 'function'
#a named sequence of calculations which takes input and returns output
# 3pts
#
#3 1pt) What does the keyword "return" do?
#it gives back the output or result of the function
# 1pt
#
#4 5pts) We know 5 basic data types. Write the name for each one and provide two
# examples of each below
# 1: integer ex: 1, 2
# 2: floating point ex: 1.2, 1.3
# 3: string ex: "hi", "hello"
# 4: boolean ex: True, False
# 5: tuple ex: ("HEllo", 3), ("Bob", 10, "fat")
# 5pts
#
#5 2pts) What is the difference between a "function definition" and a
# "function call"?
#a function definition does not result in any output being presented, it simply defines a set of calculations which are run if and only if they are called by a function call
# 2pts
#
#
#6 3pts) What are the 3 phases that every computer program has? What happens in
# each of them
# 1:input (the program takes some input values, most often from the user)
# 2:processing (the program does something with those input values to for instance calculate something)
# 3:output (the program returns the product of its labours (processing) often a something printed
# 3pts
#
#Part 2: Programming (25 points) --> 25/25
#Write a program that asks the user for the areas of 3 circles.
#It should then calculate the diameter of each and the sum of the diameters
#of the 3 circles.
#Finally, it should produce output like this:
#Circle Diameter
#c1 ...
#c2 ...
#c3 ...
#TOTALS ...
# Hint: Radius is the square root of the area divided by pi (a = pi(r)^2) so r = sqrt(a/pi)
import math
#1 pt for header line
#3 pt for correct formula
#1 pt for return value
#1 pt for parameter name
#1 pt for function name
def circarea_to_diameter(circarea):
return 2 * (math.sqrt(circarea/math.pi)) #finds radius and multiples by 2 to get diameter
def sum_three(x, y, z): #takes three values and adds them
return x + y + z
#1pt for header line
#1pt for parameter names
#1pt for return value
#1pt for correct output format
#3pt for correct use of format function
def output(d1, d2, d3, total):
return """
Circle Diameter
C1 {}
C2 {}
C3 {}
Totals {}
""".format(d1, d2, d3, total)
#1pt header line
#1pt getting input
#1pt converting input
#1pt for calling output function
#2pt for correct diameter formula
#1pt for variable names
def main():
#input
C1 = raw_input("Area of C1: ")
C2 = raw_input("Area of C2: ")
C3 = raw_input("Area of C3: ")
#processing
d1 = circarea_to_diameter(float(C1))
d2 = circarea_to_diameter(float(C2))
d3 = circarea_to_diameter(float(C3))
total = sum_three(d1, d2, d3)
#output
print output(d1, d2, d3, total)
#1pt for calling main
main()
#1pt explanatory comments
#1pt code format
|
ieuan1630-cmis/ieuan1630-cmis-cs2
|
cs2quiz1.py
|
Python
|
cc0-1.0
| 2,934
| 0.02454
|
try:
from pygments.lexer import bygroups, include, using
from pygments.lexers.agile import PythonLexer, PythonTracebackLexer
from pygments.token import Text, Name, Number, Generic, String, Operator
except ImportError: # pragma: no cover
# this is for nose coverage which does a recursive import on the package
pass
else:
BASE_NAME = r"[a-zA-Z_][a-zA-Z0-9_]*"
class XPythonLexer(PythonLexer):
tokens = PythonLexer.tokens.copy()
tokens["classname"] = [
("'?[a-zA-Z_][a-zA-Z0-9_.]*'?", Name.Class, "#pop")
]
# Marker __repr__
ref = "(<ref offset)(=)(\-\d+)( ?)((?:name)?)(=?)((?:%s)?)(>?)" % BASE_NAME
tokens["root"].insert(0, (ref, bygroups(Name.Builtin, Name.Operator,
Number, Text, Name.Builtin,
Name.Operator, Name.Variable,
Name.Builtin)))
class PythonXTracebackLexer(PythonTracebackLexer):
tokens = {
"root": [
include("entry"),
include("exception"),
(r"^.*\n", Generic.Error),
],
"entry": [
(r"^Traceback \(most recent call last\):\n",
Generic.Error,
"frame"),
# file - path is colored differently if under working directory
(r'^( File )((?:"[./<][^"]+")?)((?:"[^"]+")?)' \
'(, line )(\d+)((?:, in )?)(.*)(\n)',
bygroups(Generic.Error, Name.Builtin, Operator.Word,
Generic.Error, Number, Generic.Error, Name.Function,
Text),
"frame"),
],
"exception": [
(r"^(AssertionError: )(.+\n)", bygroups(Generic.Error,
using(XPythonLexer))),
(r"^(%s:?)(.+\n)" % BASE_NAME, bygroups(Generic.Error, String)),
],
"frame": [
include("entry"),
include("exception"),
# line of python code
(r"^((?:-+>)?)( +)(\d+)(.+\n)",
bygroups(Generic.Error, Text, Number, using(XPythonLexer))),
# variable continuation
(r"^([ ]+)('[^']+')(: )(.*)([,}]?\n)",
bygroups(Text, String, Name.Operator, using(XPythonLexer), Text)),
# variable
(r"^([ ]+)((?:g:)?)(\**%s)( = )(.+\n)" % BASE_NAME,
bygroups(Text, Name.Builtin, Name.Variable, Name.Operator,
using(XPythonLexer))),
# plain python
(r"^( )(.+)(\n)",
bygroups(Text, using(XPythonLexer), Text)),
],
}
|
g2p/xtraceback
|
xtraceback/lexer.py
|
Python
|
mit
| 2,860
| 0.006643
|
# coding:utf-8
import logging
import regex as re
import email.quoprimime
import email.base64mime
from base64 import b64encode
from flanker.mime.message import charsets, errors
log = logging.getLogger(__name__)
#deal with unfolding
foldingWhiteSpace = re.compile(r"(\n\r?|\r\n?)(\s*)")
def unfold(value):
"""
Unfolding is accomplished by simply removing any CRLF
that is immediately followed by WSP. Each header field should be
treated in its unfolded form for further syntactic and semantic
evaluation.
"""
return re.sub(foldingWhiteSpace, r"\2", value)
def decode(header):
return mime_to_unicode(header)
def mime_to_unicode(header):
"""
Takes a header value and returns a fully decoded unicode string.
It differs from standard Python's mail.header.decode_header() because:
- it is higher level, i.e. returns a unicode string instead of
an array of tuples
- it accepts Unicode and non-ASCII strings as well
>>> header_to_unicode("=?UTF-8?B?UmVbMl06INCX0LXQvNC70Y/QutC4?=")
u"Земляки"
>>> header_to_unicode("hello")
u"Hello"
"""
try:
header = unfold(header)
decoded = [] # decoded parts
while header:
match = encodedWord.search(header)
if match:
start = match.start()
if start != 0:
# decodes unencoded ascii part to unicode
value = charsets.convert_to_unicode(ascii, header[0:start])
if value.strip():
decoded.append(value)
# decode a header =?...?= of encoding
charset, value = decode_part(
match.group('charset').lower(),
match.group('encoding').lower(),
match.group('encoded'))
decoded.append(charsets.convert_to_unicode(charset, value))
header = header[match.end():]
else:
# no match? append the remainder
# of the string to the list of chunks
decoded.append(charsets.convert_to_unicode(ascii, header))
break
return u"".join(decoded)
except Exception:
try:
log.warning(
u"HEADER-DECODE-FAIL: ({0}) - b64encoded".format(
b64encode(header)))
except Exception:
log.exception("Failed to log exception")
return header
ascii = 'ascii'
#this spec refers to
#http://tools.ietf.org/html/rfc2047
encodedWord = re.compile(r'''(?P<encodedWord>
=\? # literal =?
(?P<charset>[^?]*?) # non-greedy up to the next ? is the charset
\? # literal ?
(?P<encoding>[qb]) # either a "q" or a "b", case insensitive
\? # literal ?
(?P<encoded>.*?) # non-greedy up to the next ?= is the encoded string
\?= # literal ?=
)''', re.VERBOSE | re.IGNORECASE | re.MULTILINE)
def decode_part(charset, encoding, value):
"""
Attempts to decode part, understands
'q' - quoted encoding
'b' - base64 mime encoding
Returns (charset, decoded-string)
"""
if encoding == 'q':
return (charset, email.quoprimime.header_decode(str(value)))
elif encoding == 'b':
# Postel's law: add missing padding
paderr = len(value) % 4
if paderr:
value += '==='[:4 - paderr]
return (charset, email.base64mime.decode(value))
elif not encoding:
return (charset, value)
else:
raise errors.DecodingError(
"Unknown encoding: {0}".format(encoding))
|
alex/flanker
|
flanker/mime/message/headers/encodedword.py
|
Python
|
apache-2.0
| 3,690
| 0.000815
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# function: 剪切更改图片尺寸大小
import os
import os.path
import sys
import argparse
from PIL import Image
def CutImage(filein, fileout, width, height, type):
'''
# 从左上角开始 剪切 width*height的图片
filein: 输入图片
fileout: 输出图片
width: 输出图片宽度
height:输出图片高度
type:输出图片类型(png, gif, jpeg...)
'''
img = Image.open(filein)
out = img.crop((1, 1, width, height))
out.save(fileout, type)
if __name__ == "__main__":
argc = len(sys.argv)
cmdargs = str(sys.argv)
parser = argparse.ArgumentParser(description="Tool for change the picture to custom size")
parser.add_argument('-f', '--file', required=True, help='the file path of the input file')
parser.add_argument('-H', '--height', type=int, required=True, help='height of the output file')
parser.add_argument('-W', '--width', type=int, required=True, help='width of the output file')
parser.add_argument('-T', '--type', required=False, help='the type of the output file: jpeg, git, png ,etc')
args = parser.parse_args()
filein = args.file
width = args.width
height = args.height
f, e = os.path.splitext(filein)
if args.type is None:
type = 'png'
else:
type = args.type
fileout = f + "_" + str(width) + "_" + str(height) + '.' + type
CutImage(filein, fileout, width, height, type)
|
congminghaoxue/learn_python
|
change_pic_size_by_cut.py
|
Python
|
apache-2.0
| 1,474
| 0.003634
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.